[Midnightbsd-cvs] src [10169] trunk/sys/cam: sync with freebsd 10 stable

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Fri Jun 1 18:51:19 EDT 2018


Revision: 10169
          http://svnweb.midnightbsd.org/src/?rev=10169
Author:   laffer1
Date:     2018-06-01 18:51:18 -0400 (Fri, 01 Jun 2018)
Log Message:
-----------
sync with freebsd 10 stable

Modified Paths:
--------------
    trunk/sys/cam/ata/ata_all.c
    trunk/sys/cam/ata/ata_all.h
    trunk/sys/cam/ata/ata_da.c
    trunk/sys/cam/ata/ata_pmp.c
    trunk/sys/cam/ata/ata_xpt.c
    trunk/sys/cam/cam.c
    trunk/sys/cam/cam.h
    trunk/sys/cam/cam_ccb.h
    trunk/sys/cam/cam_debug.h
    trunk/sys/cam/cam_periph.c
    trunk/sys/cam/cam_periph.h
    trunk/sys/cam/cam_queue.c
    trunk/sys/cam/cam_queue.h
    trunk/sys/cam/cam_sim.c
    trunk/sys/cam/cam_sim.h
    trunk/sys/cam/cam_xpt.c
    trunk/sys/cam/cam_xpt.h
    trunk/sys/cam/cam_xpt_internal.h
    trunk/sys/cam/cam_xpt_periph.h
    trunk/sys/cam/cam_xpt_sim.h
    trunk/sys/cam/ctl/README.ctl.txt
    trunk/sys/cam/ctl/ctl.c
    trunk/sys/cam/ctl/ctl.h
    trunk/sys/cam/ctl/ctl_backend.c
    trunk/sys/cam/ctl/ctl_backend.h
    trunk/sys/cam/ctl/ctl_backend_block.c
    trunk/sys/cam/ctl/ctl_backend_ramdisk.c
    trunk/sys/cam/ctl/ctl_cmd_table.c
    trunk/sys/cam/ctl/ctl_debug.h
    trunk/sys/cam/ctl/ctl_error.c
    trunk/sys/cam/ctl/ctl_error.h
    trunk/sys/cam/ctl/ctl_frontend.c
    trunk/sys/cam/ctl/ctl_frontend.h
    trunk/sys/cam/ctl/ctl_frontend_cam_sim.c
    trunk/sys/cam/ctl/ctl_ha.h
    trunk/sys/cam/ctl/ctl_io.h
    trunk/sys/cam/ctl/ctl_ioctl.h
    trunk/sys/cam/ctl/ctl_private.h
    trunk/sys/cam/ctl/ctl_scsi_all.c
    trunk/sys/cam/ctl/ctl_scsi_all.h
    trunk/sys/cam/ctl/ctl_ser_table.c
    trunk/sys/cam/ctl/ctl_util.c
    trunk/sys/cam/ctl/ctl_util.h
    trunk/sys/cam/ctl/scsi_ctl.c
    trunk/sys/cam/scsi/scsi_all.c
    trunk/sys/cam/scsi/scsi_all.h
    trunk/sys/cam/scsi/scsi_cd.c
    trunk/sys/cam/scsi/scsi_cd.h
    trunk/sys/cam/scsi/scsi_ch.c
    trunk/sys/cam/scsi/scsi_ch.h
    trunk/sys/cam/scsi/scsi_da.c
    trunk/sys/cam/scsi/scsi_da.h
    trunk/sys/cam/scsi/scsi_dvcfg.h
    trunk/sys/cam/scsi/scsi_enc.c
    trunk/sys/cam/scsi/scsi_enc.h
    trunk/sys/cam/scsi/scsi_enc_internal.h
    trunk/sys/cam/scsi/scsi_enc_safte.c
    trunk/sys/cam/scsi/scsi_enc_ses.c
    trunk/sys/cam/scsi/scsi_iu.h
    trunk/sys/cam/scsi/scsi_low.c
    trunk/sys/cam/scsi/scsi_low.h
    trunk/sys/cam/scsi/scsi_message.h
    trunk/sys/cam/scsi/scsi_pass.c
    trunk/sys/cam/scsi/scsi_pass.h
    trunk/sys/cam/scsi/scsi_pt.c
    trunk/sys/cam/scsi/scsi_pt.h
    trunk/sys/cam/scsi/scsi_sa.c
    trunk/sys/cam/scsi/scsi_sa.h
    trunk/sys/cam/scsi/scsi_ses.h
    trunk/sys/cam/scsi/scsi_sg.c
    trunk/sys/cam/scsi/scsi_sg.h
    trunk/sys/cam/scsi/scsi_targ_bh.c
    trunk/sys/cam/scsi/scsi_target.c
    trunk/sys/cam/scsi/scsi_targetio.h
    trunk/sys/cam/scsi/scsi_xpt.c

Added Paths:
-----------
    trunk/sys/cam/cam_compat.c
    trunk/sys/cam/cam_compat.h
    trunk/sys/cam/ctl/ctl_frontend_ioctl.c
    trunk/sys/cam/ctl/ctl_frontend_iscsi.c
    trunk/sys/cam/ctl/ctl_frontend_iscsi.h
    trunk/sys/cam/ctl/ctl_ha.c
    trunk/sys/cam/ctl/ctl_tpc.c
    trunk/sys/cam/ctl/ctl_tpc.h
    trunk/sys/cam/ctl/ctl_tpc_local.c

Property Changed:
----------------
    trunk/sys/cam/ctl/README.ctl.txt

Modified: trunk/sys/cam/ata/ata_all.c
===================================================================
--- trunk/sys/cam/ata/ata_all.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ata/ata_all.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2009 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ata/ata_all.c 308075 2016-10-29 08:42:43Z mav $");
 
 #include <sys/param.h>
 
@@ -75,14 +76,20 @@
 	if (cmd->control & 0x04)
 		return ("SOFT_RESET");
 	switch (cmd->command) {
-	case 0x00: return ("NOP");
+	case 0x00:
+		switch (cmd->features) {
+		case 0x00: return ("NOP FLUSHQUEUE");
+		case 0x01: return ("NOP AUTOPOLL");
+		}
+		return ("NOP");
 	case 0x03: return ("CFA_REQUEST_EXTENDED_ERROR");
 	case 0x06:
 		switch (cmd->features) {
-	        case 0x01: return ("DSM TRIM");
-	        }
-	        return "DSM";
+		case 0x01: return ("DSM TRIM");
+		}
+		return "DSM";
 	case 0x08: return ("DEVICE_RESET");
+	case 0x0b: return ("REQUEST_SENSE_DATA_EXT");
 	case 0x20: return ("READ");
 	case 0x24: return ("READ48");
 	case 0x25: return ("READ_DMA48");
@@ -105,9 +112,24 @@
 	case 0x3f: return ("WRITE_LOG_EXT");
 	case 0x40: return ("READ_VERIFY");
 	case 0x42: return ("READ_VERIFY48");
+	case 0x45:
+		switch (cmd->features) {
+		case 0x55: return ("WRITE_UNCORRECTABLE48 PSEUDO");
+		case 0xaa: return ("WRITE_UNCORRECTABLE48 FLAGGED");
+		}
+		return "WRITE_UNCORRECTABLE48";
 	case 0x51: return ("CONFIGURE_STREAM");
+	case 0x57: return ("WRITE_LOG_DMA_EXT");
+	case 0x5b: return ("TRUSTED_NON_DATA");
+	case 0x5c: return ("TRUSTED_RECEIVE");
+	case 0x5d: return ("TRUSTED_RECEIVE_DMA");
+	case 0x5e: return ("TRUSTED_SEND");
+	case 0x5f: return ("TRUSTED_SEND_DMA");
 	case 0x60: return ("READ_FPDMA_QUEUED");
 	case 0x61: return ("WRITE_FPDMA_QUEUED");
+	case 0x63: return ("NCQ_NON_DATA");
+	case 0x64: return ("SEND_FPDMA_QUEUED");
+	case 0x65: return ("RECEIVE_FPDMA_QUEUED");
 	case 0x67:
 		if (cmd->features == 0xec)
 			return ("SEP_ATTN IDENTIFY");
@@ -119,14 +141,30 @@
 		}
 		return ("SEP_ATTN");
 	case 0x70: return ("SEEK");
+	case 0x77: return ("SET_DATE_TIME_EXT");
+	case 0x78: return ("ACCESSIBLE_MAX_ADDRESS_CONFIGURATION");
 	case 0x87: return ("CFA_TRANSLATE_SECTOR");
 	case 0x90: return ("EXECUTE_DEVICE_DIAGNOSTIC");
 	case 0x92: return ("DOWNLOAD_MICROCODE");
+	case 0x93: return ("DOWNLOAD_MICROCODE_DMA");
+	case 0x9a: return ("ZAC_MANAGEMENT_OUT");
 	case 0xa0: return ("PACKET");
 	case 0xa1: return ("ATAPI_IDENTIFY");
 	case 0xa2: return ("SERVICE");
-	case 0xb0: return ("SMART");
+	case 0xb0:
+		switch(cmd->features) {
+		case 0xd0: return ("SMART READ ATTR VALUES");
+		case 0xd1: return ("SMART READ ATTR THRESHOLDS");
+		case 0xd3: return ("SMART SAVE ATTR VALUES");
+		case 0xd4: return ("SMART EXECUTE OFFLINE IMMEDIATE");
+		case 0xd5: return ("SMART READ LOG DATA");
+		case 0xd8: return ("SMART ENABLE OPERATION");
+		case 0xd9: return ("SMART DISABLE OPERATION");
+		case 0xda: return ("SMART RETURN STATUS");
+		}
+		return ("SMART");
 	case 0xb1: return ("DEVICE CONFIGURATION");
+	case 0xb4: return ("SANITIZE_DEVICE");
 	case 0xc0: return ("CFA_ERASE");
 	case 0xc4: return ("READ_MUL");
 	case 0xc5: return ("WRITE_MUL");
@@ -155,18 +193,22 @@
 	case 0xed: return ("MEDIA_EJECT");
 	case 0xef:
 		switch (cmd->features) {
-	        case 0x03: return ("SETFEATURES SET TRANSFER MODE");
-	        case 0x02: return ("SETFEATURES ENABLE WCACHE");
-	        case 0x82: return ("SETFEATURES DISABLE WCACHE");
-	        case 0x06: return ("SETFEATURES ENABLE PUIS");
-	        case 0x86: return ("SETFEATURES DISABLE PUIS");
-	        case 0x07: return ("SETFEATURES SPIN-UP");
-	        case 0x10: return ("SETFEATURES ENABLE SATA FEATURE");
-	        case 0x90: return ("SETFEATURES DISABLE SATA FEATURE");
-	        case 0xaa: return ("SETFEATURES ENABLE RCACHE");
-	        case 0x55: return ("SETFEATURES DISABLE RCACHE");
-	        }
-	        return "SETFEATURES";
+		case 0x03: return ("SETFEATURES SET TRANSFER MODE");
+		case 0x02: return ("SETFEATURES ENABLE WCACHE");
+		case 0x82: return ("SETFEATURES DISABLE WCACHE");
+		case 0x06: return ("SETFEATURES ENABLE PUIS");
+		case 0x86: return ("SETFEATURES DISABLE PUIS");
+		case 0x07: return ("SETFEATURES SPIN-UP");
+		case 0x10: return ("SETFEATURES ENABLE SATA FEATURE");
+		case 0x90: return ("SETFEATURES DISABLE SATA FEATURE");
+		case 0xaa: return ("SETFEATURES ENABLE RCACHE");
+		case 0x55: return ("SETFEATURES DISABLE RCACHE");
+		case 0x5d: return ("SETFEATURES ENABLE RELIRQ");
+		case 0xdd: return ("SETFEATURES DISABLE RELIRQ");
+		case 0x5e: return ("SETFEATURES ENABLE SRVIRQ");
+		case 0xde: return ("SETFEATURES DISABLE SRVIRQ");
+		}
+		return "SETFEATURES";
 	case 0xf1: return ("SECURITY_SET_PASSWORD");
 	case 0xf2: return ("SECURITY_UNLOCK");
 	case 0xf3: return ("SECURITY_ERASE_PREPARE");
@@ -182,15 +224,31 @@
 char *
 ata_cmd_string(struct ata_cmd *cmd, char *cmd_string, size_t len)
 {
+	struct sbuf sb;
+	int error;
 
-	snprintf(cmd_string, len, "%02x %02x %02x %02x "
+	if (len == 0)
+		return ("");
+
+	sbuf_new(&sb, cmd_string, len, SBUF_FIXEDLEN);
+	ata_cmd_sbuf(cmd, &sb);
+
+	error = sbuf_finish(&sb);
+	if (error != 0 && error != ENOMEM)
+		return ("");
+
+	return(sbuf_data(&sb));
+}
+
+void
+ata_cmd_sbuf(struct ata_cmd *cmd, struct sbuf *sb)
+{
+	sbuf_printf(sb, "%02x %02x %02x %02x "
 	    "%02x %02x %02x %02x %02x %02x %02x %02x",
 	    cmd->command, cmd->features,
 	    cmd->lba_low, cmd->lba_mid, cmd->lba_high, cmd->device,
 	    cmd->lba_low_exp, cmd->lba_mid_exp, cmd->lba_high_exp,
 	    cmd->features_exp, cmd->sector_count, cmd->sector_count_exp);
-
-	return(cmd_string);
 }
 
 char *
@@ -204,7 +262,7 @@
 	    res->lba_low_exp, res->lba_mid_exp, res->lba_high_exp,
 	    res->sector_count, res->sector_count_exp);
 
-	return(res_string);
+	return (res_string);
 }
 
 /*
@@ -213,11 +271,10 @@
 int
 ata_command_sbuf(struct ccb_ataio *ataio, struct sbuf *sb)
 {
-	char cmd_str[(12 * 3) + 1];
 
-	sbuf_printf(sb, "%s. ACB: %s",
-	    ata_op_string(&ataio->cmd),
-	    ata_cmd_string(&ataio->cmd, cmd_str, sizeof(cmd_str)));
+	sbuf_printf(sb, "%s. ACB: ",
+	    ata_op_string(&ataio->cmd));
+	ata_cmd_sbuf(&ataio->cmd, sb);
 
 	return(0);
 }
@@ -272,31 +329,53 @@
 void
 ata_print_ident(struct ata_params *ident_data)
 {
-	char product[48], revision[16];
+	const char *proto;
+	char product[48], revision[16], ata[12], sata[12];
 
 	cam_strvis(product, ident_data->model, sizeof(ident_data->model),
 		   sizeof(product));
 	cam_strvis(revision, ident_data->revision, sizeof(ident_data->revision),
 		   sizeof(revision));
-	printf("<%s %s> %s-%d",
-	    product, revision,
-	    (ident_data->config == ATA_PROTO_CFA) ? "CFA" :
-	    (ident_data->config & ATA_PROTO_ATAPI) ? "ATAPI" : "ATA",
-	    ata_version(ident_data->version_major));
+	proto = (ident_data->config == ATA_PROTO_CFA) ? "CFA" :
+		(ident_data->config & ATA_PROTO_ATAPI) ? "ATAPI" : "ATA";
+	if (ata_version(ident_data->version_major) == 0) {
+		snprintf(ata, sizeof(ata), "%s", proto);
+	} else if (ata_version(ident_data->version_major) <= 7) {
+		snprintf(ata, sizeof(ata), "%s-%d", proto,
+		    ata_version(ident_data->version_major));
+	} else if (ata_version(ident_data->version_major) == 8) {
+		snprintf(ata, sizeof(ata), "%s8-ACS", proto);
+	} else {
+		snprintf(ata, sizeof(ata), "ACS-%d %s",
+		    ata_version(ident_data->version_major) - 7, proto);
+	}
 	if (ident_data->satacapabilities && ident_data->satacapabilities != 0xffff) {
 		if (ident_data->satacapabilities & ATA_SATA_GEN3)
-			printf(" SATA 3.x");
+			snprintf(sata, sizeof(sata), " SATA 3.x");
 		else if (ident_data->satacapabilities & ATA_SATA_GEN2)
-			printf(" SATA 2.x");
+			snprintf(sata, sizeof(sata), " SATA 2.x");
 		else if (ident_data->satacapabilities & ATA_SATA_GEN1)
-			printf(" SATA 1.x");
+			snprintf(sata, sizeof(sata), " SATA 1.x");
 		else
-			printf(" SATA");
-	}
-	printf(" device\n");
+			snprintf(sata, sizeof(sata), " SATA");
+	} else
+		sata[0] = 0;
+	printf("<%s %s> %s%s device\n", product, revision, ata, sata);
 }
 
 void
+ata_print_ident_short(struct ata_params *ident_data)
+{
+	char product[48], revision[16];
+
+	cam_strvis(product, ident_data->model, sizeof(ident_data->model),
+		   sizeof(product));
+	cam_strvis(revision, ident_data->revision, sizeof(ident_data->revision),
+		   sizeof(revision));
+	printf("<%s %s>", product, revision);
+}
+
+void
 semb_print_ident(struct sep_identify_data *ident_data)
 {
 	char vendor[9], product[17], revision[5], fw[5], in[7], ins[5];
@@ -311,13 +390,25 @@
 	    vendor, product, revision, fw, in, ins);
 }
 
+void
+semb_print_ident_short(struct sep_identify_data *ident_data)
+{
+	char vendor[9], product[17], revision[5], fw[5];
+
+	cam_strvis(vendor, ident_data->vendor_id, 8, sizeof(vendor));
+	cam_strvis(product, ident_data->product_id, 16, sizeof(product));
+	cam_strvis(revision, ident_data->product_rev, 4, sizeof(revision));
+	cam_strvis(fw, ident_data->firmware_rev, 4, sizeof(fw));
+	printf("<%s %s %s %s>", vendor, product, revision, fw);
+}
+
 uint32_t
 ata_logical_sector_size(struct ata_params *ident_data)
 {
-	if ((ident_data->pss & 0xc000) == 0x4000 &&
+	if ((ident_data->pss & ATA_PSS_VALID_MASK) == ATA_PSS_VALID_VALUE &&
 	    (ident_data->pss & ATA_PSS_LSSABOVE512)) {
-		return ((u_int32_t)ident_data->lss_1 |
-		    ((u_int32_t)ident_data->lss_2 << 16));
+		return (((u_int32_t)ident_data->lss_1 |
+		    ((u_int32_t)ident_data->lss_2 << 16)) * 2);
 	}
 	return (512);
 }
@@ -325,10 +416,13 @@
 uint64_t
 ata_physical_sector_size(struct ata_params *ident_data)
 {
-	if ((ident_data->pss & 0xc000) == 0x4000 &&
-	    (ident_data->pss & ATA_PSS_MULTLS)) {
-		return ((uint64_t)ata_logical_sector_size(ident_data) *
-		    (1 << (ident_data->pss & ATA_PSS_LSPPS)));
+	if ((ident_data->pss & ATA_PSS_VALID_MASK) == ATA_PSS_VALID_VALUE) {
+		if (ident_data->pss & ATA_PSS_MULTLS) {
+			return ((uint64_t)ata_logical_sector_size(ident_data) *
+			    (1 << (ident_data->pss & ATA_PSS_LSPPS)));
+		} else {
+			return (uint64_t)ata_logical_sector_size(ident_data);
+		}
 	}
 	return (512);
 }

Modified: trunk/sys/cam/ata/ata_all.h
===================================================================
--- trunk/sys/cam/ata/ata_all.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ata/ata_all.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2009 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
@@ -23,7 +24,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/ata/ata_all.h 298100 2016-04-16 05:14:55Z scottl $
  */
 
 #ifndef	CAM_ATA_ALL_H
@@ -103,6 +104,7 @@
 
 char *	ata_op_string(struct ata_cmd *cmd);
 char *	ata_cmd_string(struct ata_cmd *cmd, char *cmd_string, size_t len);
+void	ata_cmd_sbuf(struct ata_cmd *cmd, struct sbuf *sb);
 char *	ata_res_string(struct ata_res *res, char *res_string, size_t len);
 int	ata_command_sbuf(struct ccb_ataio *ataio, struct sbuf *sb);
 int	ata_status_sbuf(struct ccb_ataio *ataio, struct sbuf *sb);
@@ -109,6 +111,7 @@
 int	ata_res_sbuf(struct ccb_ataio *ataio, struct sbuf *sb);
 
 void	ata_print_ident(struct ata_params *ident_data);
+void	ata_print_ident_short(struct ata_params *ident_data);
 
 uint32_t	ata_logical_sector_size(struct ata_params *ident_data);
 uint64_t	ata_physical_sector_size(struct ata_params *ident_data);
@@ -143,6 +146,7 @@
 int	ata_static_identify_match(caddr_t identbuffer, caddr_t table_entry);
 
 void	semb_print_ident(struct sep_identify_data *ident_data);
+void	semb_print_ident_short(struct sep_identify_data *ident_data);
 
 void semb_receive_diagnostic_results(struct ccb_ataio *ataio,
 	u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*),

Modified: trunk/sys/cam/ata/ata_da.c
===================================================================
--- trunk/sys/cam/ata/ata_da.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ata/ata_da.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2009 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
@@ -25,10 +26,9 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ata/ata_da.c 308081 2016-10-29 08:48:01Z mav $");
 
 #include "opt_ada.h"
-#include "opt_ata.h"
 
 #include <sys/param.h>
 
@@ -45,6 +45,7 @@
 #include <sys/eventhandler.h>
 #include <sys/malloc.h>
 #include <sys/cons.h>
+#include <sys/proc.h>
 #include <sys/reboot.h>
 #include <geom/geom_disk.h>
 #endif /* _KERNEL */
@@ -75,19 +76,19 @@
 } ada_state;
 
 typedef enum {
-	ADA_FLAG_PACK_INVALID	= 0x0001,
 	ADA_FLAG_CAN_48BIT	= 0x0002,
 	ADA_FLAG_CAN_FLUSHCACHE	= 0x0004,
 	ADA_FLAG_CAN_NCQ	= 0x0008,
 	ADA_FLAG_CAN_DMA	= 0x0010,
 	ADA_FLAG_NEED_OTAG	= 0x0020,
-	ADA_FLAG_WENT_IDLE	= 0x0040,
+	ADA_FLAG_WAS_OTAG	= 0x0040,
 	ADA_FLAG_CAN_TRIM	= 0x0080,
 	ADA_FLAG_OPEN		= 0x0100,
 	ADA_FLAG_SCTX_INIT	= 0x0200,
 	ADA_FLAG_CAN_CFA        = 0x0400,
 	ADA_FLAG_CAN_POWERMGT   = 0x0800,
-	ADA_FLAG_CAN_DMA48	= 0x1000
+	ADA_FLAG_CAN_DMA48	= 0x1000,
+	ADA_FLAG_DIRTY		= 0x2000
 } ada_flags;
 
 typedef enum {
@@ -103,7 +104,6 @@
 	ADA_CCB_RAHEAD		= 0x01,
 	ADA_CCB_WCACHE		= 0x02,
 	ADA_CCB_BUFFER_IO	= 0x03,
-	ADA_CCB_WAITING		= 0x04,
 	ADA_CCB_DUMP		= 0x05,
 	ADA_CCB_TRIM		= 0x06,
 	ADA_CCB_TYPE_MASK	= 0x0F,
@@ -123,21 +123,20 @@
 
 #define TRIM_MAX_BLOCKS	8
 #define TRIM_MAX_RANGES	(TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES)
-#define TRIM_MAX_BIOS	(TRIM_MAX_RANGES * 4)
 struct trim_request {
 	uint8_t		data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE];
-	struct bio	*bps[TRIM_MAX_BIOS];
+	TAILQ_HEAD(, bio) bps;
 };
 
 struct ada_softc {
 	struct	 bio_queue_head bio_queue;
 	struct	 bio_queue_head trim_queue;
+	int	 outstanding_cmds;	/* Number of active commands */
+	int	 refcount;		/* Active xpt_action() calls */
 	ada_state state;
-	ada_flags flags;	
+	ada_flags flags;
 	ada_quirks quirks;
 	int	 sort_io_queue;
-	int	 ordered_tag_count;
-	int	 outstanding_cmds;
 	int	 trim_max_ranges;
 	int	 trim_running;
 	int	 read_ahead;
@@ -452,14 +451,6 @@
 	},
 	{
 		/*
-		 * Samsung 750 Series SSDs
-		 * 4k optimised
-		 */
-		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 750*", "*" },
-		/*quirks*/ADA_Q_4K
-	},
-	{
-		/*
 		 * Samsung 830 Series SSDs
 		 * 4k optimised
 		 */
@@ -476,14 +467,6 @@
 	},
 	{
 		/*
-		 * Samsung 843T Series SSDs
-		 * 4k optimised
-		 */
-		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7WD*", "*" },
-		/*quirks*/ADA_Q_4K
-	},
- 	{
- 		/*
 		 * Samsung 850 SSDs
 		 * 4k optimised
 		 */
@@ -492,10 +475,13 @@
 	},
 	{
 		/*
-		 * Samsung PM853T Series SSDs
+		 * Samsung 843T Series SSDs (MZ7WD*)
+		 * Samsung PM851 Series SSDs (MZ7TE*)
+		 * Samsung PM853T Series SSDs (MZ7GE*)
+		 * Samsung SM863 Series SSDs (MZ7KM*)
 		 * 4k optimised
 		 */
-		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7GE*", "*" },
+		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7*", "*" },
 		/*quirks*/ADA_Q_4K
 	},
 	{
@@ -508,22 +494,6 @@
 	},
 	{
 		/*
-		 * Toshiba SSDs
-		 * 4k optimised  (some of these are 8k, but this is what we support now)
-		 */
-		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "TOSHIBA THNSNH*", "*" },
-		/*quirks*/ADA_Q_4K
-	},
-	{
-		/*
-		 * Toshiba SSDs
-		 * 4k optimised
-		 */
-		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "TOSHIBA THNSNJ???GCSU*", "*" },
-		/*quirks*/ADA_Q_4K
-	},
-	{
-		/*
 		 * XceedIOPS SATA SSDs
 		 * 4k optimised
 		 */
@@ -562,12 +532,8 @@
 static void		adaresume(void *arg);
 
 #ifndef	ADA_DEFAULT_LEGACY_ALIASES
-#ifdef ATA_CAM
 #define	ADA_DEFAULT_LEGACY_ALIASES	1
-#else
-#define	ADA_DEFAULT_LEGACY_ALIASES	0
 #endif
-#endif
 
 #ifndef ADA_DEFAULT_TIMEOUT
 #define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
@@ -672,8 +638,6 @@
 
 PERIPHDRIVER_DECLARE(ada, adadriver);
 
-static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
-
 static int
 adaopen(struct disk *dp)
 {
@@ -693,16 +657,11 @@
 		return (error);
 	}
 
-	softc = (struct ada_softc *)periph->softc;
-	softc->flags |= ADA_FLAG_OPEN;
-
 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
 	    ("adaopen\n"));
 
-	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
-		/* Invalidate our pack information. */
-		softc->flags &= ~ADA_FLAG_PACK_INVALID;
-	}
+	softc = (struct ada_softc *)periph->softc;
+	softc->flags |= ADA_FLAG_OPEN;
 
 	cam_periph_unhold(periph);
 	cam_periph_unlock(periph);
@@ -715,23 +674,20 @@
 	struct	cam_periph *periph;
 	struct	ada_softc *softc;
 	union ccb *ccb;
+	int error;
 
 	periph = (struct cam_periph *)dp->d_drv1;
+	softc = (struct ada_softc *)periph->softc;
 	cam_periph_lock(periph);
-	if (cam_periph_hold(periph, PRIBIO) != 0) {
-		cam_periph_unlock(periph);
-		cam_periph_release(periph);
-		return (0);
-	}
 
-	softc = (struct ada_softc *)periph->softc;
-
 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
 	    ("adaclose\n"));
 
 	/* We only sync the cache if the drive is capable of it. */
-	if ((softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
-	    (softc->flags & ADA_FLAG_PACK_INVALID) == 0) {
+	if ((softc->flags & ADA_FLAG_DIRTY) != 0 &&
+	    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
+	    (periph->flags & CAM_PERIPH_INVALID) == 0 &&
+	    cam_periph_hold(periph, PRIBIO) == 0) {
 
 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 		cam_fill_ataio(&ccb->ataio,
@@ -747,16 +703,20 @@
 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
 		else
 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
-		cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
+		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
 		    /*sense_flags*/0, softc->disk->d_devstat);
 
-		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+		if (error != 0)
 			xpt_print(periph->path, "Synchronize cache failed\n");
+		softc->flags &= ~ADA_FLAG_DIRTY;
 		xpt_release_ccb(ccb);
+		cam_periph_unhold(periph);
 	}
 
 	softc->flags &= ~ADA_FLAG_OPEN;
-	cam_periph_unhold(periph);
+
+	while (softc->refcount != 0)
+		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1);
 	cam_periph_unlock(periph);
 	cam_periph_release(periph);
 	return (0);	
@@ -766,20 +726,15 @@
 adaschedule(struct cam_periph *periph)
 {
 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
-	uint32_t prio;
 
-	/* Check if cam_periph_getccb() was called. */
-	prio = periph->immediate_priority;
+	if (softc->state != ADA_STATE_NORMAL)
+		return;
 
 	/* Check if we have more work to do. */
 	if (bioq_first(&softc->bio_queue) ||
 	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
-		prio = CAM_PRIORITY_NORMAL;
+		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
 	}
-
-	/* Schedule CCB if any of above is true. */
-	if (prio != CAM_PRIORITY_NONE)
-		xpt_schedule(periph, prio);
 }
 
 /*
@@ -803,7 +758,7 @@
 	/*
 	 * If the device has been made invalid, error out
 	 */
-	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
+	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
 		cam_periph_unlock(periph);
 		biofinish(bp, NULL, ENXIO);
 		return;
@@ -812,12 +767,8 @@
 	/*
 	 * Place it in the queue of disk activities for this disk
 	 */
-	if (bp->bio_cmd == BIO_DELETE &&
-	    (softc->flags & ADA_FLAG_CAN_TRIM)) {
-		if (ADA_SIO)
-		    bioq_disksort(&softc->trim_queue, bp);
-		else
-		    bioq_insert_tail(&softc->trim_queue, bp);
+	if (bp->bio_cmd == BIO_DELETE) {
+		bioq_disksort(&softc->trim_queue, bp);
 	} else {
 		if (ADA_SIO)
 		    bioq_disksort(&softc->bio_queue, bp);
@@ -854,7 +805,7 @@
 	lba = offset / secsize;
 	count = length / secsize;
 	
-	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
+	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
 		cam_periph_unlock(periph);
 		return (ENXIO);
 	}
@@ -979,8 +930,6 @@
 	 */
 	xpt_register_async(0, adaasync, periph, periph->path);
 
-	softc->flags |= ADA_FLAG_PACK_INVALID;
-
 	/*
 	 * Return all queued I/O with ENXIO.
 	 * XXX Handle any transactions queued to the card
@@ -990,7 +939,6 @@
 	bioq_flush(&softc->trim_queue, NULL, ENXIO);
 
 	disk_gone(softc->disk);
-	xpt_print(periph->path, "lost device\n");
 }
 
 static void
@@ -1000,7 +948,6 @@
 
 	softc = (struct ada_softc *)periph->softc;
 
-	xpt_print(periph->path, "removing device entry\n");
 	cam_periph_unlock(periph);
 
 	/*
@@ -1047,7 +994,7 @@
 		status = cam_periph_alloc(adaregister, adaoninvalidate,
 					  adacleanup, adastart,
 					  "ada", CAM_PERIPH_BIO,
-					  cgd->ccb_h.path, adaasync,
+					  path, adaasync,
 					  AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP
@@ -1123,10 +1070,10 @@
 			softc->state = ADA_STATE_WCACHE;
 		else
 		    break;
-		cam_periph_acquire(periph);
-		cam_freeze_devq_arg(periph->path,
-		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
-		xpt_schedule(periph, CAM_PRIORITY_DEV);
+		if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+			softc->state = ADA_STATE_NORMAL;
+		else
+			xpt_schedule(periph, CAM_PRIORITY_DEV);
 	}
 	default:
 		cam_periph_async(periph, code, path, arg);
@@ -1144,7 +1091,7 @@
 	periph = (struct cam_periph *)context;
 
 	/* periph was held for us when this task was enqueued */
-	if (periph->flags & CAM_PERIPH_INVALID) {
+	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
 		cam_periph_release(periph);
 		return;
 	}
@@ -1310,12 +1257,13 @@
 	    "kern.cam.ada.%d.write_cache", periph->unit_number);
 	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
 	/* Disable queue sorting for non-rotational media by default. */
-	if (cgd->ident_data.media_rotation_rate == 1)
+	if (cgd->ident_data.media_rotation_rate == ATA_RATE_NON_ROTATING)
 		softc->sort_io_queue = 0;
 	else
 		softc->sort_io_queue = -1;
 	adagetparams(periph, cgd);
 	softc->disk = disk_alloc();
+	softc->disk->d_rotation_rate = cgd->ident_data.media_rotation_rate;
 	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
 			  periph->unit_number, softc->params.secsize,
 			  DEVSTAT_ALL_SUPPORTED,
@@ -1341,15 +1289,22 @@
 		maxio = min(maxio, 256 * softc->params.secsize);
 	softc->disk->d_maxsize = maxio;
 	softc->disk->d_unit = periph->unit_number;
-	softc->disk->d_flags = 0;
+	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
 	if (softc->flags & ADA_FLAG_CAN_TRIM) {
 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
+		softc->disk->d_delmaxsize = softc->params.secsize *
+					    ATA_DSM_RANGE_MAX *
+					    softc->trim_max_ranges;
 	} else if ((softc->flags & ADA_FLAG_CAN_CFA) &&
 	    !(softc->flags & ADA_FLAG_CAN_48BIT)) {
 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
-	}
+		softc->disk->d_delmaxsize = 256 * softc->params.secsize;
+	} else
+		softc->disk->d_delmaxsize = maxio;
+	if ((cpi.hba_misc & PIM_UNMAPPED) != 0)
+		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
 	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
 	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
 	strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
@@ -1410,12 +1365,9 @@
 
 	dp = &softc->params;
 	snprintf(announce_buf, sizeof(announce_buf),
-		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
-		(uintmax_t)(((uintmax_t)dp->secsize *
-		dp->sectors) / (1024*1024)),
-		(uintmax_t)dp->sectors,
-		dp->secsize, dp->heads,
-		dp->secs_per_track, dp->cylinders);
+	    "%juMB (%ju %u byte sectors)",
+	    ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024),
+	    (uintmax_t)dp->sectors, dp->secsize);
 	xpt_announce_periph(periph, announce_buf);
 	xpt_announce_quirks(periph, softc->quirks, ADA_Q_BIT_STRING);
 	if (legacy_id >= 0)
@@ -1426,8 +1378,8 @@
 	 * Create our sysctl variables, now that we know
 	 * we have successfully attached.
 	 */
-	cam_periph_acquire(periph);
-	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
+	if (cam_periph_acquire(periph) == CAM_REQ_CMP)
+		taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
 
 	/*
 	 * Add async callbacks for bus reset and
@@ -1445,7 +1397,7 @@
 	 * Schedule a periodic event to occasionally send an
 	 * ordered tag to a device.
 	 */
-	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
+	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
 	callout_reset(&softc->sendordered_c,
 	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
 	    adasendorderedtag, softc);
@@ -1453,24 +1405,117 @@
 	if (ADA_RA >= 0 &&
 	    cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
 		softc->state = ADA_STATE_RAHEAD;
-		cam_periph_acquire(periph);
-		cam_freeze_devq_arg(periph->path,
-		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
-		xpt_schedule(periph, CAM_PRIORITY_DEV);
 	} else if (ADA_WC >= 0 &&
 	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
 		softc->state = ADA_STATE_WCACHE;
-		cam_periph_acquire(periph);
-		cam_freeze_devq_arg(periph->path,
-		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
+	} else {
+		softc->state = ADA_STATE_NORMAL;
+		return(CAM_REQ_CMP);
+	}
+	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+		softc->state = ADA_STATE_NORMAL;
+	else
 		xpt_schedule(periph, CAM_PRIORITY_DEV);
-	} else
-		softc->state = ADA_STATE_NORMAL;
-
 	return(CAM_REQ_CMP);
 }
 
 static void
+ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
+{
+	struct trim_request *req = &softc->trim_req;
+	uint64_t lastlba = (uint64_t)-1;
+	int c, lastcount = 0, off, ranges = 0;
+
+	bzero(req, sizeof(*req));
+	TAILQ_INIT(&req->bps);
+	do {
+		uint64_t lba = bp->bio_pblkno;
+		int count = bp->bio_bcount / softc->params.secsize;
+
+		bioq_remove(&softc->trim_queue, bp);
+
+		/* Try to extend the previous range. */
+		if (lba == lastlba) {
+			c = min(count, ATA_DSM_RANGE_MAX - lastcount);
+			lastcount += c;
+			off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
+			req->data[off + 6] = lastcount & 0xff;
+			req->data[off + 7] =
+				(lastcount >> 8) & 0xff;
+			count -= c;
+			lba += c;
+		}
+
+		while (count > 0) {
+			c = min(count, ATA_DSM_RANGE_MAX);
+			off = ranges * ATA_DSM_RANGE_SIZE;
+			req->data[off + 0] = lba & 0xff;
+			req->data[off + 1] = (lba >> 8) & 0xff;
+			req->data[off + 2] = (lba >> 16) & 0xff;
+			req->data[off + 3] = (lba >> 24) & 0xff;
+			req->data[off + 4] = (lba >> 32) & 0xff;
+			req->data[off + 5] = (lba >> 40) & 0xff;
+			req->data[off + 6] = c & 0xff;
+			req->data[off + 7] = (c >> 8) & 0xff;
+			lba += c;
+			count -= c;
+			lastcount = c;
+			ranges++;
+			/*
+			 * Its the caller's responsibility to ensure the
+			 * request will fit so we don't need to check for
+			 * overrun here
+			 */
+		}
+		lastlba = lba;
+		TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
+		bp = bioq_first(&softc->trim_queue);
+		if (bp == NULL ||
+		    bp->bio_bcount / softc->params.secsize >
+		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX)
+			break;
+	} while (1);
+	cam_fill_ataio(ataio,
+	    ada_retry_count,
+	    adadone,
+	    CAM_DIR_OUT,
+	    0,
+	    req->data,
+	    ((ranges + ATA_DSM_BLK_RANGES - 1) /
+	    ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
+	    ada_default_timeout * 1000);
+	ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
+	    ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES -
+	    1) / ATA_DSM_BLK_RANGES);
+}
+
+static void
+ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
+{
+	struct trim_request *req = &softc->trim_req;
+	uint64_t lba = bp->bio_pblkno;
+	uint16_t count = bp->bio_bcount / softc->params.secsize;
+
+	bzero(req, sizeof(*req));
+	TAILQ_INIT(&req->bps);
+	bioq_remove(&softc->trim_queue, bp);
+	TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
+
+	cam_fill_ataio(ataio,
+	    ada_retry_count,
+	    adadone,
+	    CAM_DIR_NONE,
+	    0,
+	    NULL,
+	    0,
+	    ada_default_timeout*1000);
+
+	if (count >= 256)
+		count = 0;
+	ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
+}
+
+static void
 adastart(struct cam_periph *periph, union ccb *start_ccb)
 {
 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
@@ -1484,93 +1529,25 @@
 		struct bio *bp;
 		u_int8_t tag_code;
 
-		/* Execute immediate CCB if waiting. */
-		if (periph->immediate_priority <= periph->pinfo.priority) {
-			CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
-					("queuing for immediate ccb\n"));
-			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
-			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-					  periph_links.sle);
-			periph->immediate_priority = CAM_PRIORITY_NONE;
-			wakeup(&periph->ccb_list);
-			/* Have more work to do, so ensure we stay scheduled */
-			adaschedule(periph);
-			break;
-		}
 		/* Run TRIM if not running yet. */
 		if (!softc->trim_running &&
 		    (bp = bioq_first(&softc->trim_queue)) != 0) {
-			struct trim_request *req = &softc->trim_req;
-			struct bio *bp1;
-			uint64_t lastlba = (uint64_t)-1;
-			int bps = 0, c, lastcount = 0, off, ranges = 0;
-
+			if (softc->flags & ADA_FLAG_CAN_TRIM) {
+				ada_dsmtrim(softc, bp, ataio);
+			} else if ((softc->flags & ADA_FLAG_CAN_CFA) &&
+			    !(softc->flags & ADA_FLAG_CAN_48BIT)) {
+				ada_cfaerase(softc, bp, ataio);
+			} else {
+				/* This can happen if DMA was disabled. */
+				bioq_remove(&softc->trim_queue, bp);
+				biofinish(bp, NULL, EOPNOTSUPP);
+				xpt_release_ccb(start_ccb);
+				adaschedule(periph);
+				return;
+			}
 			softc->trim_running = 1;
-			bzero(req, sizeof(*req));
-			bp1 = bp;
-			do {
-				uint64_t lba = bp1->bio_pblkno;
-				int count = bp1->bio_bcount /
-				    softc->params.secsize;
-
-				bioq_remove(&softc->trim_queue, bp1);
-
-				/* Try to extend the previous range. */
-				if (lba == lastlba) {
-					c = min(count, ATA_DSM_RANGE_MAX - lastcount);
-					lastcount += c;
-					off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
-					req->data[off + 6] = lastcount & 0xff;
-					req->data[off + 7] =
-					    (lastcount >> 8) & 0xff;
-					count -= c;
-					lba += c;
-				}
-
-				while (count > 0) {
-					c = min(count, ATA_DSM_RANGE_MAX);
-					off = ranges * ATA_DSM_RANGE_SIZE;
-					req->data[off + 0] = lba & 0xff;
-					req->data[off + 1] = (lba >> 8) & 0xff;
-					req->data[off + 2] = (lba >> 16) & 0xff;
-					req->data[off + 3] = (lba >> 24) & 0xff;
-					req->data[off + 4] = (lba >> 32) & 0xff;
-					req->data[off + 5] = (lba >> 40) & 0xff;
-					req->data[off + 6] = c & 0xff;
-					req->data[off + 7] = (c >> 8) & 0xff;
-					lba += c;
-					count -= c;
-					lastcount = c;
-					ranges++;
-					/*
-					 * Its the caller's responsibility to ensure the
-					 * request will fit so we don't need to check for
-					 * overrun here
-					 */
-				}
-				lastlba = lba;
-				req->bps[bps++] = bp1;
-				bp1 = bioq_first(&softc->trim_queue);
-				if (bps >= TRIM_MAX_BIOS ||
-				    bp1 == NULL ||
-				    bp1->bio_bcount / softc->params.secsize >
-				    (softc->trim_max_ranges - ranges) *
-				    ATA_DSM_RANGE_MAX)
-					break;
-			} while (1);
-			cam_fill_ataio(ataio,
-			    ada_retry_count,
-			    adadone,
-			    CAM_DIR_OUT,
-			    0,
-			    req->data,
-			    ((ranges + ATA_DSM_BLK_RANGES - 1) /
-			        ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
-			    ada_default_timeout * 1000);
-			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
-			    ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES -
-			    1) / ATA_DSM_BLK_RANGES);
 			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
+			start_ccb->ccb_h.flags |= CAM_UNLOCKED;
 			goto out;
 		}
 		/* Run regular command. */
@@ -1584,17 +1561,33 @@
 		if ((bp->bio_flags & BIO_ORDERED) != 0
 		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
 			softc->flags &= ~ADA_FLAG_NEED_OTAG;
-			softc->ordered_tag_count++;
+			softc->flags |= ADA_FLAG_WAS_OTAG;
 			tag_code = 0;
 		} else {
 			tag_code = 1;
 		}
 		switch (bp->bio_cmd) {
+		case BIO_WRITE:
 		case BIO_READ:
-		case BIO_WRITE:
 		{
 			uint64_t lba = bp->bio_pblkno;
 			uint16_t count = bp->bio_bcount / softc->params.secsize;
+			void *data_ptr;
+			int rw_op;
+
+			if (bp->bio_cmd == BIO_WRITE) {
+				softc->flags |= ADA_FLAG_DIRTY;
+				rw_op = CAM_DIR_OUT;
+			} else {
+				rw_op = CAM_DIR_IN;
+			}
+
+			data_ptr = bp->bio_data;
+			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
+				rw_op |= CAM_DATA_BIO;
+				data_ptr = bp;
+			}
+
 #ifdef ADA_TEST_FAILURE
 			int fail = 0;
 
@@ -1626,21 +1619,22 @@
 				}
 			}
 			if (fail) {
-				bp->bio_error = EIO;
-				bp->bio_flags |= BIO_ERROR;
-				biodone(bp);
+				biofinish(bp, NULL, EIO);
 				xpt_release_ccb(start_ccb);
 				adaschedule(periph);
 				return;
 			}
 #endif
+			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
+			    round_page(bp->bio_bcount + bp->bio_ma_offset) /
+			    PAGE_SIZE == bp->bio_ma_n,
+			    ("Short bio %p", bp));
 			cam_fill_ataio(ataio,
 			    ada_retry_count,
 			    adadone,
-			    bp->bio_cmd == BIO_READ ?
-			        CAM_DIR_IN : CAM_DIR_OUT,
+			    rw_op,
 			    tag_code,
-			    bp->bio_data,
+			    data_ptr,
 			    bp->bio_bcount,
 			    ada_default_timeout*1000);
 
@@ -1695,25 +1689,6 @@
 			}
 			break;
 		}
-		case BIO_DELETE:
-		{
-			uint64_t lba = bp->bio_pblkno;
-			uint16_t count = bp->bio_bcount / softc->params.secsize;
-
-			cam_fill_ataio(ataio,
-			    ada_retry_count,
-			    adadone,
-			    CAM_DIR_NONE,
-			    0,
-			    NULL,
-			    0,
-			    ada_default_timeout*1000);
-
-			if (count >= 256)
-				count = 0;
-			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
-			break;
-		}
 		case BIO_FLUSH:
 			cam_fill_ataio(ataio,
 			    1,
@@ -1731,10 +1706,15 @@
 			break;
 		}
 		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
+		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
 out:
 		start_ccb->ccb_h.ccb_bp = bp;
 		softc->outstanding_cmds++;
+		softc->refcount++;
+		cam_periph_unlock(periph);
 		xpt_action(start_ccb);
+		cam_periph_lock(periph);
+		softc->refcount--;
 
 		/* May have more work to do, so ensure we stay scheduled */
 		adaschedule(periph);
@@ -1743,16 +1723,6 @@
 	case ADA_STATE_RAHEAD:
 	case ADA_STATE_WCACHE:
 	{
-		if (softc->flags & ADA_FLAG_PACK_INVALID) {
-			softc->state = ADA_STATE_NORMAL;
-			xpt_release_ccb(start_ccb);
-			cam_release_devq(periph->path,
-			    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
-			adaschedule(periph);
-			cam_periph_release_locked(periph);
-			return;
-		}
-
 		cam_fill_ataio(ataio,
 		    1,
 		    adadone,
@@ -1771,6 +1741,7 @@
 			    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
 			start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
 		}
+		start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 		xpt_action(start_ccb);
 		break;
 	}
@@ -1783,53 +1754,33 @@
 	struct ada_softc *softc;
 	struct ccb_ataio *ataio;
 	struct ccb_getdev *cgd;
+	struct cam_path *path;
+	int state;
 
 	softc = (struct ada_softc *)periph->softc;
 	ataio = &done_ccb->ataio;
+	path = done_ccb->ccb_h.path;
 
-	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adadone\n"));
+	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n"));
 
-	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
+	state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK;
+	switch (state) {
 	case ADA_CCB_BUFFER_IO:
 	case ADA_CCB_TRIM:
 	{
 		struct bio *bp;
+		int error;
 
-		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
+		cam_periph_lock(periph);
 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
-			int error;
-			
 			error = adaerror(done_ccb, 0, 0);
 			if (error == ERESTART) {
 				/* A retry was scheduled, so just return. */
+				cam_periph_unlock(periph);
 				return;
 			}
-			if (error != 0) {
-				if (error == ENXIO &&
-				    (softc->flags & ADA_FLAG_PACK_INVALID) == 0) {
-					/*
-					 * Catastrophic error.  Mark our pack as
-					 * invalid.
-					 */
-					/*
-					 * XXX See if this is really a media
-					 * XXX change first?
-					 */
-					xpt_print(periph->path,
-					    "Invalidating pack\n");
-					softc->flags |= ADA_FLAG_PACK_INVALID;
-				}
-				bp->bio_error = error;
-				bp->bio_resid = bp->bio_bcount;
-				bp->bio_flags |= BIO_ERROR;
-			} else {
-				bp->bio_resid = ataio->resid;
-				bp->bio_error = 0;
-				if (bp->bio_resid != 0)
-					bp->bio_flags |= BIO_ERROR;
-			}
 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
-				cam_release_devq(done_ccb->ccb_h.path,
+				cam_release_devq(path,
 						 /*relsim_flags*/0,
 						 /*reduction*/0,
 						 /*timeout*/0,
@@ -1837,42 +1788,70 @@
 		} else {
 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 				panic("REQ_CMP with QFRZN");
-			bp->bio_resid = ataio->resid;
-			if (ataio->resid > 0)
+			error = 0;
+		}
+		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
+		bp->bio_error = error;
+		if (error != 0) {
+			bp->bio_resid = bp->bio_bcount;
+			bp->bio_flags |= BIO_ERROR;
+		} else {
+			if (state == ADA_CCB_TRIM)
+				bp->bio_resid = 0;
+			else
+				bp->bio_resid = ataio->resid;
+			if (bp->bio_resid > 0)
 				bp->bio_flags |= BIO_ERROR;
 		}
 		softc->outstanding_cmds--;
 		if (softc->outstanding_cmds == 0)
-			softc->flags |= ADA_FLAG_WENT_IDLE;
-		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
-		    ADA_CCB_TRIM) {
-			struct trim_request *req =
-			    (struct trim_request *)ataio->data_ptr;
-			int i;
+			softc->flags |= ADA_FLAG_WAS_OTAG;
+		xpt_release_ccb(done_ccb);
+		if (state == ADA_CCB_TRIM) {
+			TAILQ_HEAD(, bio) queue;
+			struct bio *bp1;
 
-			for (i = 1; i < TRIM_MAX_BIOS && req->bps[i]; i++) {
-				struct bio *bp1 = req->bps[i];
-				
-				bp1->bio_resid = bp->bio_resid;
-				bp1->bio_error = bp->bio_error;
-				if (bp->bio_flags & BIO_ERROR)
+			TAILQ_INIT(&queue);
+			TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue);
+			/*
+			 * Normally, the xpt_release_ccb() above would make sure
+			 * that when we have more work to do, that work would
+			 * get kicked off. However, we specifically keep
+			 * trim_running set to 0 before the call above to allow
+			 * other I/O to progress when many BIO_DELETE requests
+			 * are pushed down. We set trim_running to 0 and call
+			 * daschedule again so that we don't stall if there are
+			 * no other I/Os pending apart from BIO_DELETEs.
+			 */
+			softc->trim_running = 0;
+			adaschedule(periph);
+			cam_periph_unlock(periph);
+			while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
+				TAILQ_REMOVE(&queue, bp1, bio_queue);
+				bp1->bio_error = error;
+				if (error != 0) {
 					bp1->bio_flags |= BIO_ERROR;
+					bp1->bio_resid = bp1->bio_bcount;
+				} else
+					bp1->bio_resid = 0;
 				biodone(bp1);
 			}
-			softc->trim_running = 0;
+		} else {
+			cam_periph_unlock(periph);
 			biodone(bp);
-			adaschedule(periph);
-		} else
-			biodone(bp);
-		break;
+		}
+		return;
 	}
 	case ADA_CCB_RAHEAD:
 	{
 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
+out:
+				/* Drop freeze taken due to CAM_DEV_QFREEZE */
+				cam_release_devq(path, 0, 0, 0, FALSE);
 				return;
 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
-				cam_release_devq(done_ccb->ccb_h.path,
+				cam_release_devq(path,
 				    /*relsim_flags*/0,
 				    /*reduction*/0,
 				    /*timeout*/0,
@@ -1889,7 +1868,7 @@
 		 * operation.
 		 */
 		cgd = (struct ccb_getdev *)done_ccb;
-		xpt_setup_ccb(&cgd->ccb_h, periph->path, CAM_PRIORITY_NORMAL);
+		xpt_setup_ccb(&cgd->ccb_h, path, CAM_PRIORITY_NORMAL);
 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
 		xpt_action((union ccb *)cgd);
 		if (ADA_WC >= 0 &&
@@ -1897,12 +1876,12 @@
 			softc->state = ADA_STATE_WCACHE;
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, CAM_PRIORITY_DEV);
-			return;
+			goto out;
 		}
 		softc->state = ADA_STATE_NORMAL;
 		xpt_release_ccb(done_ccb);
-		cam_release_devq(periph->path,
-		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
+		/* Drop freeze taken due to CAM_DEV_QFREEZE */
+		cam_release_devq(path, 0, 0, 0, FALSE);
 		adaschedule(periph);
 		cam_periph_release_locked(periph);
 		return;
@@ -1911,9 +1890,9 @@
 	{
 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
-				return;
+				goto out;
 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
-				cam_release_devq(done_ccb->ccb_h.path,
+				cam_release_devq(path,
 				    /*relsim_flags*/0,
 				    /*reduction*/0,
 				    /*timeout*/0,
@@ -1931,18 +1910,12 @@
 		 * operation.
 		 */
 		xpt_release_ccb(done_ccb);
-		cam_release_devq(periph->path,
-		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
+		/* Drop freeze taken due to CAM_DEV_QFREEZE */
+		cam_release_devq(path, 0, 0, 0, FALSE);
 		adaschedule(periph);
 		cam_periph_release_locked(periph);
 		return;
 	}
-	case ADA_CCB_WAITING:
-	{
-		/* Caller will release the CCB */
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
-	}
 	case ADA_CCB_DUMP:
 		/* No-op.  We're polling */
 		return;
@@ -2004,14 +1977,11 @@
 	struct ada_softc *softc = arg;
 
 	if (ada_send_ordered) {
-		if ((softc->ordered_tag_count == 0) 
-		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
-			softc->flags |= ADA_FLAG_NEED_OTAG;
+		if (softc->outstanding_cmds > 0) {
+			if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0)
+				softc->flags |= ADA_FLAG_NEED_OTAG;
+			softc->flags &= ~ADA_FLAG_WAS_OTAG;
 		}
-		if (softc->outstanding_cmds > 0)
-			softc->flags &= ~ADA_FLAG_WENT_IDLE;
-
-		softc->ordered_tag_count = 0;
 	}
 	/* Queue us up again */
 	callout_reset(&softc->sendordered_c,
@@ -2032,11 +2002,16 @@
 	int error;
 
 	CAM_PERIPH_FOREACH(periph, &adadriver) {
-		/* If we paniced with lock held - not recurse here. */
-		if (cam_periph_owned(periph))
+		softc = (struct ada_softc *)periph->softc;
+		if (SCHEDULER_STOPPED()) {
+			/* If we paniced with the lock held, do not recurse. */
+			if (!cam_periph_owned(periph) &&
+			    (softc->flags & ADA_FLAG_OPEN)) {
+				adadump(softc->disk, NULL, 0, 0, 0);
+			}
 			continue;
+		}
 		cam_periph_lock(periph);
-		softc = (struct ada_softc *)periph->softc;
 		/*
 		 * We only sync the cache if the drive is still open, and
 		 * if the drive is capable of it..

Modified: trunk/sys/cam/ata/ata_pmp.c
===================================================================
--- trunk/sys/cam/ata/ata_pmp.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ata/ata_pmp.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2009 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ata/ata_pmp.c 290768 2015-11-13 10:34:14Z mav $");
 
 #include <sys/param.h>
 
@@ -158,8 +159,6 @@
 
 PERIPHDRIVER_DECLARE(pmp, pmpdriver);
 
-static MALLOC_DEFINE(M_ATPMP, "ata_pmp", "ata_pmp buffers");
-
 static void
 pmpinit(void)
 {
@@ -193,8 +192,7 @@
 		    i, 0) == CAM_REQ_CMP) {
 			softc->frozen |= (1 << i);
 			xpt_acquire_device(dpath->device);
-			cam_freeze_devq_arg(dpath,
-			    RELSIM_RELEASE_RUNLEVEL, CAM_RL_BUS + 1);
+			cam_freeze_devq(dpath);
 			xpt_free_path(dpath);
 		}
 	}
@@ -215,8 +213,7 @@
 		    xpt_path_path_id(periph->path),
 		    i, 0) == CAM_REQ_CMP) {
 			softc->frozen &= ~(1 << i);
-			cam_release_devq(dpath,
-			    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_BUS + 1, FALSE);
+			cam_release_devq(dpath, 0, 0, 0, FALSE);
 			xpt_release_device(dpath->device);
 			xpt_free_path(dpath);
 		}
@@ -243,7 +240,6 @@
 		}
 	}
 	pmprelease(periph, -1);
-	xpt_print(periph->path, "lost device\n");
 }
 
 static void
@@ -253,7 +249,6 @@
 
 	softc = (struct pmp_softc *)periph->softc;
 
-	xpt_print(periph->path, "removing device entry\n");
 	cam_periph_unlock(periph);
 
 	/*
@@ -297,7 +292,7 @@
 		status = cam_periph_alloc(pmpregister, pmponinvalidate,
 					  pmpcleanup, pmpstart,
 					  "pmp", CAM_PERIPH_BIO,
-					  cgd->ccb_h.path, pmpasync,
+					  path, pmpasync,
 					  AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP
@@ -322,13 +317,17 @@
 		if (code == AC_SENT_BDR || code == AC_BUS_RESET)
 			softc->found = 0; /* We have to reset everything. */
 		if (softc->state == PMP_STATE_NORMAL) {
-			if (softc->pm_pid == 0x37261095 ||
-			    softc->pm_pid == 0x38261095)
-				softc->state = PMP_STATE_PM_QUIRKS_1;
-			else
-				softc->state = PMP_STATE_PRECONFIG;
-			cam_periph_acquire(periph);
-			xpt_schedule(periph, CAM_PRIORITY_DEV);
+			if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
+				if (softc->pm_pid == 0x37261095 ||
+				    softc->pm_pid == 0x38261095)
+					softc->state = PMP_STATE_PM_QUIRKS_1;
+				else
+					softc->state = PMP_STATE_PRECONFIG;
+				xpt_schedule(periph, CAM_PRIORITY_DEV);
+			} else {
+				pmprelease(periph, softc->found);
+				xpt_release_boot();
+			}
 		} else
 			softc->restart = 1;
 		break;

Modified: trunk/sys/cam/ata/ata_xpt.c
===================================================================
--- trunk/sys/cam/ata/ata_xpt.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ata/ata_xpt.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2009 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ata/ata_xpt.c 323737 2017-09-19 07:39:39Z avg $");
 
 #include <sys/param.h>
 #include <sys/bus.h>
@@ -40,6 +41,7 @@
 #include <sys/interrupt.h>
 #include <sys/sbuf.h>
 
+#include <sys/eventhandler.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/sysctl.h>
@@ -182,7 +184,7 @@
 static void	 ata_device_transport(struct cam_path *path);
 static void	 ata_get_transfer_settings(struct ccb_trans_settings *cts);
 static void	 ata_set_transfer_settings(struct ccb_trans_settings *cts,
-					    struct cam_ed *device,
+					    struct cam_path *path,
 					    int async_update);
 static void	 ata_dev_async(u_int32_t async_code,
 				struct cam_eb *bus,
@@ -249,12 +251,7 @@
 		return (status);
 	}
 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
-
-	/*
-	 * Ensure nobody slip in until probe finish.
-	 */
-	cam_freeze_devq_arg(periph->path,
-	    RELSIM_RELEASE_RUNLEVEL, CAM_RL_XPT + 1);
+	ata_device_transport(periph->path);
 	probeschedule(periph);
 	return(CAM_REQ_CMP);
 }
@@ -661,6 +658,7 @@
 	default:
 		panic("probestart: invalid action state 0x%x\n", softc->action);
 	}
+	start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 	xpt_action(start_ccb);
 }
 
@@ -708,12 +706,15 @@
 	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 		if (cam_periph_error(done_ccb,
 		    0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0,
-		    NULL) == ERESTART)
+		    NULL) == ERESTART) {
+out:
+			/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
+			cam_release_devq(path, 0, 0, 0, FALSE);
 			return;
+		}
 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 			/* Don't wedge the queue */
-			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
-					 /*run_queue*/TRUE);
+			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
 		}
 		status = done_ccb->ccb_h.status & CAM_STATUS_MASK;
 		if (softc->restart) {
@@ -743,18 +744,20 @@
 			goto noerror;
 
 		/*
-		 * Some HP SATA disks report supported DMA Auto-Activation,
-		 * but return ABORT on attempt to enable it.
+		 * Some old WD SATA disks have broken SPINUP handling.
+		 * If we really fail to spin up the disk, then there will be
+		 * some media access errors later on, but at least we will
+		 * have a device to interact with for recovery attempts.
 		 */
-		} else if (softc->action == PROBE_SETDMAAA &&
+		} else if (softc->action == PROBE_SPINUP &&
 		    status == CAM_ATA_STATUS_ERROR) {
 			goto noerror;
 
 		/*
-		 * Some Samsung SSDs report supported Asynchronous Notification,
+		 * Some HP SATA disks report supported DMA Auto-Activation,
 		 * but return ABORT on attempt to enable it.
 		 */
-		} else if (softc->action == PROBE_SETAN &&
+		} else if (softc->action == PROBE_SETDMAAA &&
 		    status == CAM_ATA_STATUS_ERROR) {
 			goto noerror;
 
@@ -768,7 +771,7 @@
 			PROBE_SET_ACTION(softc, PROBE_IDENTIFY_SAFTE);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 
 		/*
@@ -830,12 +833,13 @@
 		}
 		xpt_release_ccb(done_ccb);
 		xpt_schedule(periph, priority);
-		return;
+		goto out;
 	}
 	case PROBE_IDENTIFY:
 	{
 		struct ccb_pathinq cpi;
 		int16_t *ptr;
+		int veto = 0;
 
 		ident_buf = &softc->ident_data;
 		for (ptr = (int16_t *)ident_buf;
@@ -842,6 +846,17 @@
 		     ptr < (int16_t *)ident_buf + sizeof(struct ata_params)/2; ptr++) {
 			*ptr = le16toh(*ptr);
 		}
+
+		/*
+		 * Allow others to veto this ATA disk attachment.  This
+		 * is mainly used by VMs, whose disk controllers may
+		 * share the disks with the simulated ATA controllers.
+		 */
+		EVENTHANDLER_INVOKE(ada_probe_veto, path, ident_buf, &veto);
+		if (veto) {
+			goto device_fail;
+		}
+
 		if (strncmp(ident_buf->model, "FX", 2) &&
 		    strncmp(ident_buf->model, "NEC", 3) &&
 		    strncmp(ident_buf->model, "Pioneer", 7) &&
@@ -864,7 +879,7 @@
 			PROBE_SET_ACTION(softc, PROBE_SPINUP);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		ident_buf = &path->device->ident_data;
 		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
@@ -917,6 +932,7 @@
 					    path->device->device_id, 8);
 					bcopy(ident_buf->wwn,
 					    path->device->device_id + 8, 8);
+					ata_bswap(path->device->device_id + 8, 8);
 				}
 			}
 
@@ -954,7 +970,7 @@
 		PROBE_SET_ACTION(softc, PROBE_SETMODE);
 		xpt_release_ccb(done_ccb);
 		xpt_schedule(periph, priority);
-		return;
+		goto out;
 	}
 	case PROBE_SPINUP:
 		if (bootverbose)
@@ -963,7 +979,7 @@
 		PROBE_SET_ACTION(softc, PROBE_IDENTIFY);
 		xpt_release_ccb(done_ccb);
 		xpt_schedule(periph, priority);
-		return;
+		goto out;
 	case PROBE_SETMODE:
 		/* Set supported bits. */
 		bzero(&cts, sizeof(cts));
@@ -1034,7 +1050,7 @@
 			PROBE_SET_ACTION(softc, PROBE_SETPM);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		/* FALLTHROUGH */
 	case PROBE_SETPM:
@@ -1045,7 +1061,7 @@
 			PROBE_SET_ACTION(softc, PROBE_SETAPST);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		/* FALLTHROUGH */
 	case PROBE_SETAPST:
@@ -1055,17 +1071,18 @@
 			PROBE_SET_ACTION(softc, PROBE_SETDMAAA);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		/* FALLTHROUGH */
 	case PROBE_SETDMAAA:
-		if ((ident_buf->satasupport & ATA_SUPPORT_ASYNCNOTIF) &&
+		if (path->device->protocol != PROTO_ATA &&
+		    (ident_buf->satasupport & ATA_SUPPORT_ASYNCNOTIF) &&
 		    (!(softc->caps & CTS_SATA_CAPS_H_AN)) !=
 		    (!(ident_buf->sataenabled & ATA_SUPPORT_ASYNCNOTIF))) {
 			PROBE_SET_ACTION(softc, PROBE_SETAN);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		/* FALLTHROUGH */
 	case PROBE_SETAN:
@@ -1077,7 +1094,7 @@
 		}
 		xpt_release_ccb(done_ccb);
 		xpt_schedule(periph, priority);
-		return;
+		goto out;
 	case PROBE_SET_MULTI:
 		if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
 			path->device->flags &= ~CAM_DEV_UNCONFIGURED;
@@ -1084,8 +1101,7 @@
 			xpt_acquire_device(path->device);
 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 			xpt_action(done_ccb);
-			xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
-			    done_ccb);
+			xpt_async(AC_FOUND_DEVICE, path, done_ccb);
 		}
 		PROBE_SET_ACTION(softc, PROBE_DONE);
 		break;
@@ -1098,7 +1114,8 @@
 
 		periph_qual = SID_QUAL(inq_buf);
 
-		if (periph_qual != SID_QUAL_LU_CONNECTED)
+		if (periph_qual != SID_QUAL_LU_CONNECTED &&
+		    periph_qual != SID_QUAL_LU_OFFLINE)
 			break;
 
 		/*
@@ -1118,7 +1135,7 @@
 			PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 
 		ata_device_transport(path);
@@ -1127,7 +1144,7 @@
 			xpt_acquire_device(path->device);
 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 			xpt_action(done_ccb);
-			xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb);
+			xpt_async(AC_FOUND_DEVICE, path, done_ccb);
 		}
 		PROBE_SET_ACTION(softc, PROBE_DONE);
 		break;
@@ -1145,7 +1162,7 @@
 		PROBE_SET_ACTION(softc, PROBE_PM_PRV);
 		xpt_release_ccb(done_ccb);
 		xpt_schedule(periph, priority);
-		return;
+		goto out;
 	case PROBE_PM_PRV:
 		softc->pm_prv = (done_ccb->ataio.res.lba_high << 24) +
 		    (done_ccb->ataio.res.lba_mid << 16) +
@@ -1181,7 +1198,7 @@
 		else
 			caps = 0;
 		/* Remember what transport thinks about AEN. */
-		if (caps & CTS_SATA_CAPS_H_AN)
+		if ((caps & CTS_SATA_CAPS_H_AN) && path->device->protocol != PROTO_ATA)
 			path->device->inq_flags |= SID_AEN;
 		else
 			path->device->inq_flags &= ~SID_AEN;
@@ -1200,12 +1217,11 @@
 			xpt_acquire_device(path->device);
 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 			xpt_action(done_ccb);
-			xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
-			    done_ccb);
+			xpt_async(AC_FOUND_DEVICE, path, done_ccb);
 		} else {
 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 			xpt_action(done_ccb);
-			xpt_async(AC_SCSI_AEN, done_ccb->ccb_h.path, done_ccb);
+			xpt_async(AC_SCSI_AEN, path, done_ccb);
 		}
 		PROBE_SET_ACTION(softc, PROBE_DONE);
 		break;
@@ -1250,8 +1266,7 @@
 			xpt_acquire_device(path->device);
 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 			xpt_action(done_ccb);
-			xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
-			    done_ccb);
+			xpt_async(AC_FOUND_DEVICE, path, done_ccb);
 		}
 		PROBE_SET_ACTION(softc, PROBE_DONE);
 		break;
@@ -1263,7 +1278,7 @@
 		softc->restart = 0;
 		xpt_release_ccb(done_ccb);
 		probeschedule(periph);
-		return;
+		goto out;
 	}
 	xpt_release_ccb(done_ccb);
 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
@@ -1273,9 +1288,9 @@
 		done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR;
 		xpt_done(done_ccb);
 	}
+	/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
+	cam_release_devq(path, 0, 0, 0, FALSE);
 	cam_periph_invalidate(periph);
-	cam_release_devq(periph->path,
-	    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_XPT + 1, FALSE);
 	cam_periph_release_locked(periph);
 }
 
@@ -1324,6 +1339,7 @@
 	struct	cam_path *path;
 	ata_scan_bus_info *scan_info;
 	union	ccb *work_ccb, *reset_ccb;
+	struct mtx *mtx;
 	cam_status status;
 
 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
@@ -1399,11 +1415,14 @@
 			xpt_done(request_ccb);
 			break;
 		}
+		mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
 		goto scan_next;
 	case XPT_SCAN_LUN:
 		work_ccb = request_ccb;
 		/* Reuse the same CCB to query if a device was really found */
 		scan_info = (ata_scan_bus_info *)work_ccb->ccb_h.ppriv_ptr0;
+		mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
+		mtx_lock(mtx);
 		/* If there is PMP... */
 		if ((scan_info->cpi->hba_inquiry & PI_SATAPM) &&
 		    (scan_info->counter == scan_info->cpi->max_target)) {
@@ -1432,6 +1451,7 @@
 		    ((scan_info->cpi->hba_inquiry & PI_SATAPM) ?
 		    0 : scan_info->cpi->max_target)) {
 done:
+			mtx_unlock(mtx);
 			xpt_free_ccb(work_ccb);
 			xpt_free_ccb((union ccb *)scan_info->cpi);
 			request_ccb = scan_info->request_ccb;
@@ -1444,10 +1464,12 @@
 		scan_info->counter = (scan_info->counter + 1 ) %
 		    (scan_info->cpi->max_target + 1);
 scan_next:
-		status = xpt_create_path(&path, xpt_periph,
+		status = xpt_create_path(&path, NULL,
 		    scan_info->request_ccb->ccb_h.path_id,
 		    scan_info->counter, 0);
 		if (status != CAM_REQ_CMP) {
+			if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN)
+				mtx_unlock(mtx);
 			printf("xpt_scan_bus: xpt_create_path failed"
 			    " with status %#x, bus scan halted\n",
 			    status);
@@ -1463,9 +1485,15 @@
 		    scan_info->request_ccb->ccb_h.pinfo.priority);
 		work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 		work_ccb->ccb_h.cbfcnp = ata_scan_bus;
+		work_ccb->ccb_h.flags |= CAM_UNLOCKED;
 		work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 		work_ccb->crcn.flags = scan_info->request_ccb->crcn.flags;
+		mtx_unlock(mtx);
+		if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN)
+			mtx = NULL;
 		xpt_action(work_ccb);
+		if (mtx != NULL)
+			mtx_lock(mtx);
 		break;
 	default:
 		break;
@@ -1480,6 +1508,7 @@
 	cam_status status;
 	struct cam_path *new_path;
 	struct cam_periph *old_periph;
+	int lock;
 
 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_scan_lun\n"));
 
@@ -1502,7 +1531,7 @@
 			    "can't continue\n");
 			return;
 		}
-		status = xpt_create_path(&new_path, xpt_periph,
+		status = xpt_create_path(&new_path, NULL,
 					  path->bus->path_id,
 					  path->target->target_id,
 					  path->device->lun_id);
@@ -1514,10 +1543,14 @@
 		}
 		xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT);
 		request_ccb->ccb_h.cbfcnp = xptscandone;
+		request_ccb->ccb_h.flags |= CAM_UNLOCKED;
 		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 		request_ccb->crcn.flags = flags;
 	}
 
+	lock = (xpt_path_owned(path) == 0);
+	if (lock)
+		xpt_path_lock(path);
 	if ((old_periph = cam_periph_find(path, "aprobe")) != NULL) {
 		if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
 			probe_softc *softc;
@@ -1544,6 +1577,8 @@
 			xpt_done(request_ccb);
 		}
 	}
+	if (lock)
+		xpt_path_unlock(path);
 }
 
 static void
@@ -1557,7 +1592,6 @@
 static struct cam_ed *
 ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 {
-	struct cam_path path;
 	struct ata_quirk_entry *quirk;
 	struct cam_ed *device;
 
@@ -1578,22 +1612,6 @@
 	device->queue_flags = 0;
 	device->serial_num = NULL;
 	device->serial_num_len = 0;
-
-	/*
-	 * XXX should be limited by number of CCBs this bus can
-	 * do.
-	 */
-	bus->sim->max_ccbs += device->ccbq.devq_openings;
-	if (lun_id != CAM_LUN_WILDCARD) {
-		xpt_compile_path(&path,
-				 NULL,
-				 bus->path_id,
-				 target->target_id,
-				 lun_id);
-		ata_device_transport(&path);
-		xpt_release_path(&path);
-	}
-
 	return (device);
 }
 
@@ -1716,15 +1734,8 @@
 	start_ccb->ccb_h.status = CAM_REQ_CMP;
 
 	if (cdai->flags & CDAI_FLAG_STORE) {
-		int owned;
-
-		owned = mtx_owned(start_ccb->ccb_h.path->bus->sim->mtx);
-		if (owned == 0)
-			mtx_lock(start_ccb->ccb_h.path->bus->sim->mtx);
 		xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
 			  (void *)(uintptr_t)cdai->buftype);
-		if (owned == 0)
-			mtx_unlock(start_ccb->ccb_h.path->bus->sim->mtx);
 	}
 }
 
@@ -1736,7 +1747,7 @@
 	case XPT_SET_TRAN_SETTINGS:
 	{
 		ata_set_transfer_settings(&start_ccb->cts,
-					   start_ccb->ccb_h.path->device,
+					   start_ccb->ccb_h.path,
 					   /*async_update*/FALSE);
 		break;
 	}
@@ -1795,11 +1806,9 @@
 	struct	ccb_trans_settings_ata *ata;
 	struct	ccb_trans_settings_scsi *scsi;
 	struct	cam_ed *device;
-	struct	cam_sim *sim;
 
 	device = cts->ccb_h.path->device;
-	sim = cts->ccb_h.path->bus->sim;
-	(*(sim->sim_action))(sim, (union ccb *)cts);
+	xpt_action_default((union ccb *)cts);
 
 	if (cts->protocol == PROTO_UNKNOWN ||
 	    cts->protocol == PROTO_UNSPECIFIED) {
@@ -1836,17 +1845,17 @@
 }
 
 static void
-ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
+ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path,
 			   int async_update)
 {
 	struct	ccb_pathinq cpi;
 	struct	ccb_trans_settings_ata *ata;
 	struct	ccb_trans_settings_scsi *scsi;
-	struct	cam_sim *sim;
 	struct	ata_params *ident_data;
 	struct	scsi_inquiry_data *inq_data;
+	struct	cam_ed *device;
 
-	if (device == NULL) {
+	if (path == NULL || (device = path->device) == NULL) {
 		cts->ccb_h.status = CAM_PATH_INVALID;
 		xpt_done((union ccb *)cts);
 		return;
@@ -1863,7 +1872,7 @@
 		cts->protocol_version = device->protocol_version;
 
 	if (cts->protocol != device->protocol) {
-		xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
+		xpt_print(path, "Uninitialized Protocol %x:%x?\n",
 		       cts->protocol, device->protocol);
 		cts->protocol = device->protocol;
 	}
@@ -1870,7 +1879,7 @@
 
 	if (cts->protocol_version > device->protocol_version) {
 		if (bootverbose) {
-			xpt_print(cts->ccb_h.path, "Down reving Protocol "
+			xpt_print(path, "Down reving Protocol "
 			    "Version from %d to %d?\n", cts->protocol_version,
 			    device->protocol_version);
 		}
@@ -1888,7 +1897,7 @@
 		cts->transport_version = device->transport_version;
 
 	if (cts->transport != device->transport) {
-		xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
+		xpt_print(path, "Uninitialized Transport %x:%x?\n",
 		    cts->transport, device->transport);
 		cts->transport = device->transport;
 	}
@@ -1895,7 +1904,7 @@
 
 	if (cts->transport_version > device->transport_version) {
 		if (bootverbose) {
-			xpt_print(cts->ccb_h.path, "Down reving Transport "
+			xpt_print(path, "Down reving Transport "
 			    "Version from %d to %d?\n", cts->transport_version,
 			    device->transport_version);
 		}
@@ -1902,7 +1911,6 @@
 		cts->transport_version = device->transport_version;
 	}
 
-	sim = cts->ccb_h.path->bus->sim;
 	ident_data = &device->ident_data;
 	inq_data = &device->inq_data;
 	if (cts->protocol == PROTO_ATA)
@@ -1913,7 +1921,7 @@
 		scsi = &cts->proto_specific.scsi;
 	else
 		scsi = NULL;
-	xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE);
+	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
 	cpi.ccb_h.func_code = XPT_PATH_INQ;
 	xpt_action((union ccb *)&cpi);
 
@@ -1957,11 +1965,11 @@
 			device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 			device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 		} else if (nowt && !newt)
-			xpt_stop_tags(cts->ccb_h.path);
+			xpt_stop_tags(path);
 	}
 
 	if (async_update == FALSE)
-		(*(sim->sim_action))(sim, (union ccb *)cts);
+		xpt_action_default((union ccb *)cts);
 }
 
 /*
@@ -2018,10 +2026,14 @@
 		xpt_release_device(device);
 	} else if (async_code == AC_TRANSFER_NEG) {
 		struct ccb_trans_settings *settings;
+		struct cam_path path;
 
 		settings = (struct ccb_trans_settings *)async_arg;
-		ata_set_transfer_settings(settings, device,
+		xpt_compile_path(&path, NULL, bus->path_id, target->target_id,
+				 device->lun_id);
+		ata_set_transfer_settings(settings, &path,
 					  /*async_update*/TRUE);
+		xpt_release_path(&path);
 	}
 }
 
@@ -2034,7 +2046,7 @@
 	u_int	speed;
 	u_int	mb;
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
+	cam_periph_assert(periph, MA_OWNED);
 
 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;

Modified: trunk/sys/cam/cam.c
===================================================================
--- trunk/sys/cam/cam.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Generic utility routines for the Common Access Method layer.
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/cam.c 284435 2015-06-16 02:31:11Z ken $");
 
 #include <sys/param.h>
 #ifdef _KERNEL
@@ -158,6 +159,56 @@
 	*dst = '\0';
 }
 
+void
+cam_strvis_sbuf(struct sbuf *sb, const u_int8_t *src, int srclen,
+		uint32_t flags)
+{
+
+	/* Trim leading/trailing spaces, nulls. */
+	while (srclen > 0 && src[0] == ' ')
+		src++, srclen--;
+	while (srclen > 0
+	    && (src[srclen-1] == ' ' || src[srclen-1] == '\0'))
+		srclen--;
+
+	while (srclen > 0) {
+		if (*src < 0x20 || *src >= 0x80) {
+			/* SCSI-II Specifies that these should never occur. */
+			/* non-printable character */
+			switch (flags & CAM_STRVIS_FLAG_NONASCII_MASK) {
+			case CAM_STRVIS_FLAG_NONASCII_ESC:
+				sbuf_printf(sb, "\\%c%c%c", 
+				    ((*src & 0300) >> 6) + '0',
+				    ((*src & 0070) >> 3) + '0',
+				    ((*src & 0007) >> 0) + '0');
+				break;
+			case CAM_STRVIS_FLAG_NONASCII_RAW:
+				/*
+				 * If we run into a NUL, just transform it
+				 * into a space.
+				 */
+				if (*src != 0x00)
+					sbuf_putc(sb, *src);
+				else
+					sbuf_putc(sb, ' ');
+				break;
+			case CAM_STRVIS_FLAG_NONASCII_SPC:
+				sbuf_putc(sb, ' ');
+				break;
+			case CAM_STRVIS_FLAG_NONASCII_TRIM:
+			default:
+				break;
+			}
+		} else {
+			/* normal character */
+			sbuf_putc(sb, *src);
+		}
+		src++;
+		srclen--;
+	}
+}
+
+
 /*
  * Compare string with pattern, returning 0 on match.
  * Short pattern matches trailing blanks in name,

Modified: trunk/sys/cam/cam.h
===================================================================
--- trunk/sys/cam/cam.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Data structures and definitions for the CAM system.
  *
@@ -25,7 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam.h 311402 2017-01-05 11:20:31Z mav $
  */
 
 #ifndef _CAM_CAM_H
@@ -40,6 +41,10 @@
 typedef u_int path_id_t;
 typedef u_int target_id_t;
 typedef u_int lun_id_t;
+typedef union {
+	u_int64_t	lun64;
+	u_int8_t	lun[8];
+} lun64_id_t;
 
 #define	CAM_XPT_PATH_ID	((path_id_t)~0)
 #define	CAM_BUS_WILDCARD ((path_id_t)~0)
@@ -46,6 +51,12 @@
 #define	CAM_TARGET_WILDCARD ((target_id_t)~0)
 #define	CAM_LUN_WILDCARD ((lun_id_t)~0)
 
+#define CAM_EXTLUN_BYTE_SWIZZLE(lun) (	\
+	((((u_int64_t)lun) & 0xffff000000000000L) >> 48) | \
+	((((u_int64_t)lun) & 0x0000ffff00000000L) >> 16) | \
+	((((u_int64_t)lun) & 0x00000000ffff0000L) << 16) | \
+	((((u_int64_t)lun) & 0x000000000000ffffL) << 48))
+
 /*
  * Maximum length for a CAM CDB.  
  */
@@ -71,7 +82,7 @@
     CAM_RL_VALUES
 } cam_rl;
 /*
- * The generation number is incremented everytime a new entry is entered into
+ * The generation number is incremented every time a new entry is entered into
  * the queue giving round robin per priority level scheduling.
  */
 typedef struct {
@@ -80,15 +91,15 @@
 #define CAM_PRIORITY_BUS	((CAM_RL_BUS << 8) + 0x80)
 #define CAM_PRIORITY_XPT	((CAM_RL_XPT << 8) + 0x80)
 #define CAM_PRIORITY_DEV	((CAM_RL_DEV << 8) + 0x80)
+#define CAM_PRIORITY_OOB	(CAM_RL_DEV << 8)
 #define CAM_PRIORITY_NORMAL	((CAM_RL_NORMAL << 8) + 0x80)
 #define CAM_PRIORITY_NONE	(u_int32_t)-1
-#define CAM_PRIORITY_TO_RL(x)	((x) >> 8)
-#define CAM_RL_TO_PRIORITY(x)	((x) << 8)
 	u_int32_t generation;
 	int       index;
 #define CAM_UNQUEUED_INDEX	-1
 #define CAM_ACTIVE_INDEX	-2	
 #define CAM_DONEQ_INDEX		-3	
+#define CAM_EXTRAQ_INDEX	INT_MAX
 } cam_pinfo;
 
 /*
@@ -112,77 +123,193 @@
 enum {
 	SF_RETRY_UA		= 0x01,	/* Retry UNIT ATTENTION conditions. */
 	SF_NO_PRINT		= 0x02,	/* Never print error status. */
-	SF_QUIET_IR		= 0x04,	/* Be quiet about Illegal Request reponses */
+	SF_QUIET_IR		= 0x04,	/* Be quiet about Illegal Request responses */
 	SF_PRINT_ALWAYS		= 0x08,	/* Always print error status. */
 	SF_NO_RECOVERY		= 0x10,	/* Don't do active error recovery. */
-	SF_NO_RETRY		= 0x20	/* Don't do any retries. */
+	SF_NO_RETRY		= 0x20,	/* Don't do any retries. */
+	SF_RETRY_BUSY		= 0x40	/* Retry BUSY status. */
 };
 
 /* CAM  Status field values */
 typedef enum {
-	CAM_REQ_INPROG,		/* CCB request is in progress */
-	CAM_REQ_CMP,		/* CCB request completed without error */
-	CAM_REQ_ABORTED,	/* CCB request aborted by the host */
-	CAM_UA_ABORT,		/* Unable to abort CCB request */
-	CAM_REQ_CMP_ERR,	/* CCB request completed with an error */
-	CAM_BUSY,		/* CAM subsystem is busy */
-	CAM_REQ_INVALID,	/* CCB request was invalid */
-	CAM_PATH_INVALID,	/* Supplied Path ID is invalid */
-	CAM_DEV_NOT_THERE,	/* SCSI Device Not Installed/there */
-	CAM_UA_TERMIO,		/* Unable to terminate I/O CCB request */
-	CAM_SEL_TIMEOUT,	/* Target Selection Timeout */
-	CAM_CMD_TIMEOUT,	/* Command timeout */
-	CAM_SCSI_STATUS_ERROR,	/* SCSI error, look at error code in CCB */
-	CAM_MSG_REJECT_REC,	/* Message Reject Received */
-	CAM_SCSI_BUS_RESET,	/* SCSI Bus Reset Sent/Received */
-	CAM_UNCOR_PARITY,	/* Uncorrectable parity error occurred */
-	CAM_AUTOSENSE_FAIL = 0x10,/* Autosense: request sense cmd fail */
-	CAM_NO_HBA,		/* No HBA Detected error */
-	CAM_DATA_RUN_ERR,	/* Data Overrun error */
-	CAM_UNEXP_BUSFREE,	/* Unexpected Bus Free */
-	CAM_SEQUENCE_FAIL,	/* Target Bus Phase Sequence Failure */
-	CAM_CCB_LEN_ERR,	/* CCB length supplied is inadequate */
-	CAM_PROVIDE_FAIL,	/* Unable to provide requested capability */
-	CAM_BDR_SENT,		/* A SCSI BDR msg was sent to target */
-	CAM_REQ_TERMIO,		/* CCB request terminated by the host */
-	CAM_UNREC_HBA_ERROR,	/* Unrecoverable Host Bus Adapter Error */
-	CAM_REQ_TOO_BIG,	/* The request was too large for this host */
-	CAM_REQUEUE_REQ,	/*
-				 * This request should be requeued to preserve
-				 * transaction ordering.  This typically occurs
-				 * when the SIM recognizes an error that should
-				 * freeze the queue and must place additional
-				 * requests for the target at the sim level
-				 * back into the XPT queue.
-				 */
-	CAM_ATA_STATUS_ERROR,	/* ATA error, look at error code in CCB */
-	CAM_SCSI_IT_NEXUS_LOST,	/* Initiator/Target Nexus lost. */
-	CAM_SMP_STATUS_ERROR,	/* SMP error, look at error code in CCB */
-	CAM_IDE = 0x33,		/* Initiator Detected Error */
-	CAM_RESRC_UNAVAIL,	/* Resource Unavailable */
-	CAM_UNACKED_EVENT,	/* Unacknowledged Event by Host */
-	CAM_MESSAGE_RECV,	/* Message Received in Host Target Mode */
-	CAM_INVALID_CDB,	/* Invalid CDB received in Host Target Mode */
-	CAM_LUN_INVALID,	/* Lun supplied is invalid */
-	CAM_TID_INVALID,	/* Target ID supplied is invalid */
-	CAM_FUNC_NOTAVAIL,	/* The requested function is not available */
-	CAM_NO_NEXUS,		/* Nexus is not established */
-	CAM_IID_INVALID,	/* The initiator ID is invalid */
-	CAM_CDB_RECVD,		/* The SCSI CDB has been received */
-	CAM_LUN_ALRDY_ENA,	/* The LUN is already enabled for target mode */
-	CAM_SCSI_BUSY,		/* SCSI Bus Busy */
+	/* CCB request is in progress */
+	CAM_REQ_INPROG		= 0x00,
 
-	CAM_DEV_QFRZN = 0x40,	/* The DEV queue is frozen w/this err */
+	/* CCB request completed without error */
+	CAM_REQ_CMP		= 0x01,
 
-				/* Autosense data valid for target */
-	CAM_AUTOSNS_VALID = 0x80,
-	CAM_RELEASE_SIMQ = 0x100,/* SIM ready to take more commands */
-	CAM_SIM_QUEUED   = 0x200,/* SIM has this command in it's queue */
+	/* CCB request aborted by the host */
+	CAM_REQ_ABORTED		= 0x02,
 
-	CAM_STATUS_MASK = 0x3F,	/* Mask bits for just the status # */
+	/* Unable to abort CCB request */
+	CAM_UA_ABORT		= 0x03,
 
-				/* Target Specific Adjunct Status */
-	CAM_SENT_SENSE = 0x40000000	/* sent sense with status */
+	/* CCB request completed with an error */
+	CAM_REQ_CMP_ERR		= 0x04,
+
+	/* CAM subsystem is busy */
+	CAM_BUSY		= 0x05,
+
+	/* CCB request was invalid */
+	CAM_REQ_INVALID		= 0x06,
+
+	/* Supplied Path ID is invalid */
+	CAM_PATH_INVALID	= 0x07,
+
+	/* SCSI Device Not Installed/there */
+	CAM_DEV_NOT_THERE	= 0x08,
+
+	/* Unable to terminate I/O CCB request */
+	CAM_UA_TERMIO		= 0x09,
+
+	/* Target Selection Timeout */
+	CAM_SEL_TIMEOUT		= 0x0a,
+
+	/* Command timeout */
+	CAM_CMD_TIMEOUT		= 0x0b,
+
+	/* SCSI error, look at error code in CCB */
+	CAM_SCSI_STATUS_ERROR	= 0x0c,
+
+	/* Message Reject Received */
+	CAM_MSG_REJECT_REC	= 0x0d,
+
+	/* SCSI Bus Reset Sent/Received */
+	CAM_SCSI_BUS_RESET	= 0x0e,
+
+	/* Uncorrectable parity error occurred */
+	CAM_UNCOR_PARITY	= 0x0f,
+
+	/* Autosense: request sense cmd fail */
+	CAM_AUTOSENSE_FAIL	= 0x10,
+
+	/* No HBA Detected error */
+	CAM_NO_HBA		= 0x11,
+
+	/* Data Overrun error */
+	CAM_DATA_RUN_ERR	= 0x12,
+
+	/* Unexpected Bus Free */
+	CAM_UNEXP_BUSFREE	= 0x13,
+
+	/* Target Bus Phase Sequence Failure */
+	CAM_SEQUENCE_FAIL	= 0x14,
+
+	/* CCB length supplied is inadequate */
+	CAM_CCB_LEN_ERR		= 0x15,
+
+	/* Unable to provide requested capability*/
+	CAM_PROVIDE_FAIL	= 0x16,
+
+	/* A SCSI BDR msg was sent to target */
+	CAM_BDR_SENT		= 0x17,
+
+	/* CCB request terminated by the host */
+	CAM_REQ_TERMIO		= 0x18,
+
+	/* Unrecoverable Host Bus Adapter Error */
+	CAM_UNREC_HBA_ERROR	= 0x19,
+
+	/* Request was too large for this host */
+	CAM_REQ_TOO_BIG		= 0x1a,
+
+	/*
+	 * This request should be requeued to preserve
+	 * transaction ordering.  This typically occurs
+	 * when the SIM recognizes an error that should
+	 * freeze the queue and must place additional
+	 * requests for the target at the sim level
+	 * back into the XPT queue.
+	 */
+	CAM_REQUEUE_REQ		= 0x1b,
+
+	/* ATA error, look at error code in CCB */
+	CAM_ATA_STATUS_ERROR	= 0x1c,
+
+	/* Initiator/Target Nexus lost. */
+	CAM_SCSI_IT_NEXUS_LOST	= 0x1d,
+
+	/* SMP error, look at error code in CCB */
+	CAM_SMP_STATUS_ERROR	= 0x1e,
+
+	/*
+	 * Command completed without error but  exceeded the soft
+	 * timeout threshold.
+	 */
+	CAM_REQ_SOFTTIMEOUT	= 0x1f,
+
+	/*
+	 * 0x20 - 0x32 are unassigned
+	 */
+
+	/* Initiator Detected Error */
+	CAM_IDE			= 0x33,
+
+	/* Resource Unavailable */
+	CAM_RESRC_UNAVAIL	= 0x34,
+
+	/* Unacknowledged Event by Host */
+	CAM_UNACKED_EVENT	= 0x35,
+
+	/* Message Received in Host Target Mode */
+	CAM_MESSAGE_RECV	= 0x36,
+
+	/* Invalid CDB received in Host Target Mode */
+	CAM_INVALID_CDB		= 0x37,
+
+	/* Lun supplied is invalid */
+	CAM_LUN_INVALID		= 0x38,
+
+	/* Target ID supplied is invalid */
+	CAM_TID_INVALID		= 0x39,
+
+	/* The requested function is not available */
+	CAM_FUNC_NOTAVAIL	= 0x3a,
+
+	/* Nexus is not established */
+	CAM_NO_NEXUS		= 0x3b,
+
+	/* The initiator ID is invalid */
+	CAM_IID_INVALID		= 0x3c,
+
+	/* The SCSI CDB has been received */
+	CAM_CDB_RECVD		= 0x3d,
+
+	/* The LUN is already enabled for target mode */
+	CAM_LUN_ALRDY_ENA	= 0x3e,
+
+	/* SCSI Bus Busy */
+	CAM_SCSI_BUSY		= 0x3f,
+
+
+	/*
+	 * Flags
+	 */
+
+	/* The DEV queue is frozen w/this err */
+	CAM_DEV_QFRZN		= 0x40,
+
+	/* Autosense data valid for target */
+	CAM_AUTOSNS_VALID	= 0x80,
+
+	/* SIM ready to take more commands */
+	CAM_RELEASE_SIMQ	= 0x100,
+
+	/* SIM has this command in it's queue */
+	CAM_SIM_QUEUED		= 0x200,
+
+	/* Quality of service data is valid */
+	CAM_QOS_VALID		= 0x400,
+
+	/* Mask bits for just the status # */
+	CAM_STATUS_MASK = 0x3F,
+
+	/*
+	 * Target Specific Adjunct Status
+	 */
+	
+	/* sent sense with status */
+	CAM_SENT_SENSE		= 0x40000000
 } cam_status;
 
 typedef enum {
@@ -220,6 +347,15 @@
 	CAM_EAF_PRINT_RESULT	= 0x20
 } cam_error_ata_flags;
 
+typedef enum {
+	CAM_STRVIS_FLAG_NONE		= 0x00,
+	CAM_STRVIS_FLAG_NONASCII_MASK	= 0x03,
+	CAM_STRVIS_FLAG_NONASCII_TRIM	= 0x00,
+	CAM_STRVIS_FLAG_NONASCII_RAW	= 0x01,
+	CAM_STRVIS_FLAG_NONASCII_SPC	= 0x02,
+	CAM_STRVIS_FLAG_NONASCII_ESC	= 0x03
+} cam_strvis_flags;
+
 struct cam_status_entry
 {
 	cam_status  status_code;
@@ -232,6 +368,7 @@
 extern int cam_sort_io_queues;
 #endif
 union ccb;
+struct sbuf;
 
 #ifdef SYSCTL_DECL	/* from sysctl.h */
 SYSCTL_DECL(_kern_cam);
@@ -244,6 +381,8 @@
 		       int entry_size, cam_quirkmatch_t *comp_func);
 
 void	cam_strvis(u_int8_t *dst, const u_int8_t *src, int srclen, int dstlen);
+void	cam_strvis_sbuf(struct sbuf *sb, const u_int8_t *src, int srclen,
+			uint32_t flags);
 
 int	cam_strmatch(const u_int8_t *str, const u_int8_t *pattern, int str_len);
 const struct cam_status_entry*

Modified: trunk/sys/cam/cam_ccb.h
===================================================================
--- trunk/sys/cam/cam_ccb.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_ccb.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Data structures and definitions for CAM Control Blocks (CCBs).
  *
@@ -25,7 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_ccb.h 312850 2017-01-26 21:35:58Z mav $
  */
 
 #ifndef _CAM_CAM_CCB_H
@@ -42,7 +43,6 @@
 #include <cam/scsi/scsi_all.h>
 #include <cam/ata/ata_all.h>
 
-
 /* General allocation length definitions for CCB structures */
 #define	IOCDBLEN	CAM_MAX_CDBLEN	/* Space for CDB bytes/pointer */
 #define	VUHBALEN	14		/* Vendor Unique HBA length */
@@ -64,7 +64,7 @@
 					      * Perform transport negotiation
 					      * with this command.
 					      */
-	CAM_SCATTER_VALID	= 0x00000010,/* Scatter/gather list is valid  */
+	CAM_DATA_ISPHYS		= 0x00000010,/* Data type with physical addrs */
 	CAM_DIS_AUTOSENSE	= 0x00000020,/* Disable autosense feature     */
 	CAM_DIR_BOTH		= 0x00000000,/* Data direction (00:IN/OUT)    */
 	CAM_DIR_IN		= 0x00000040,/* Data direction (01:DATA IN)   */
@@ -71,6 +71,12 @@
 	CAM_DIR_OUT		= 0x00000080,/* Data direction (10:DATA OUT)  */
 	CAM_DIR_NONE		= 0x000000C0,/* Data direction (11:no data)   */
 	CAM_DIR_MASK		= 0x000000C0,/* Data direction Mask	      */
+	CAM_DATA_VADDR		= 0x00000000,/* Data type (000:Virtual)       */
+	CAM_DATA_PADDR		= 0x00000010,/* Data type (001:Physical)      */
+	CAM_DATA_SG		= 0x00040000,/* Data type (010:sglist)        */
+	CAM_DATA_SG_PADDR	= 0x00040010,/* Data type (011:sglist phys)   */
+	CAM_DATA_BIO		= 0x00200000,/* Data type (100:bio)           */
+	CAM_DATA_MASK		= 0x00240010,/* Data type mask                */
 	CAM_SOFT_RST_OP		= 0x00000100,/* Use Soft reset alternative    */
 	CAM_ENG_SYNC		= 0x00000200,/* Flush resid bytes on complete */
 	CAM_DEV_QFRZDIS		= 0x00000400,/* Disable DEV Q freezing	      */
@@ -81,10 +87,8 @@
 	CAM_TAG_ACTION_VALID	= 0x00008000,/* Use the tag action in this ccb*/
 	CAM_PASS_ERR_RECOVER	= 0x00010000,/* Pass driver does err. recovery*/
 	CAM_DIS_DISCONNECT	= 0x00020000,/* Disable disconnect	      */
-	CAM_SG_LIST_PHYS	= 0x00040000,/* SG list has physical addrs.   */
 	CAM_MSG_BUF_PHYS	= 0x00080000,/* Message buffer ptr is physical*/
 	CAM_SNS_BUF_PHYS	= 0x00100000,/* Autosense data ptr is physical*/
-	CAM_DATA_PHYS		= 0x00200000,/* SG/Buffer data ptrs are phys. */
 	CAM_CDB_PHYS		= 0x00400000,/* CDB poiner is physical	      */
 	CAM_ENG_SGLIST		= 0x00800000,/* SG list is for the HBA engine */
 
@@ -96,14 +100,23 @@
 	CAM_MSGB_VALID		= 0x10000000,/* Message buffer valid	      */
 	CAM_STATUS_VALID	= 0x20000000,/* Status buffer valid	      */
 	CAM_DATAB_VALID		= 0x40000000,/* Data buffer valid	      */
-	
+
 /* Host target Mode flags */
 	CAM_SEND_SENSE		= 0x08000000,/* Send sense data with status   */
 	CAM_TERM_IO		= 0x10000000,/* Terminate I/O Message sup.    */
 	CAM_DISCONNECT		= 0x20000000,/* Disconnects are mandatory     */
-	CAM_SEND_STATUS		= 0x40000000 /* Send status after data phase  */
+	CAM_SEND_STATUS		= 0x40000000,/* Send status after data phase  */
+
+	CAM_UNLOCKED		= 0x80000000 /* Call callback without lock.   */
 } ccb_flags;
 
+typedef enum {
+	CAM_EXTLUN_VALID	= 0x00000001,/* 64bit lun field is valid      */
+	CAM_USER_DATA_ADDR	= 0x00000002,/* Userspace data pointers */
+	CAM_SG_FORMAT_IOVEC	= 0x00000004,/* iovec instead of busdma S/G*/
+	CAM_UNMAPPED_BUF	= 0x00000008 /* use unmapped I/O */
+} ccb_xflags;
+
 /* XPT Opcodes for xpt_action */
 typedef enum {
 /* Function code flags are bits greater than 0xff */
@@ -142,10 +155,11 @@
 				/* Path statistics (error counts, etc.) */
 	XPT_GDEV_STATS		= 0x0c,
 				/* Device statistics (error counts, etc.) */
-	XPT_FREEZE_QUEUE	= 0x0d,
-				/* Freeze device queue */
 	XPT_DEV_ADVINFO		= 0x0e,
 				/* Get/Set Device advanced information */
+	XPT_ASYNC		= 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB
+				       | XPT_FC_XPT_ONLY,
+				/* Asynchronous event */
 /* SCSI Control Functions: 0x10->0x1F */
 	XPT_ABORT		= 0x10,
 				/* Abort the specified CCB */
@@ -217,6 +231,8 @@
 				/* Notify Host Target driver of event */
 	XPT_NOTIFY_ACKNOWLEDGE	= 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
 				/* Acknowledgement of event */
+	XPT_REPROBE_LUN		= 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
+				/* Query device capacity and notify GEOM */
 
 /* Vendor Unique codes: 0x80->0x8F */
 	XPT_VUNIQUE		= 0x80
@@ -257,6 +273,7 @@
 	XPORT_SAS,	/* Serial Attached SCSI */
 	XPORT_SATA,	/* Serial AT Attachment */
 	XPORT_ISCSI,	/* iSCSI */
+	XPORT_SRP,	/* SCSI RDMA Protocol */
 } cam_xport;
 
 #define XPORT_IS_ATA(t)		((t) == XPORT_ATA || (t) == XPORT_SATA)
@@ -295,6 +312,12 @@
 	u_int8_t	bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)];
 } ccb_spriv_area;
 
+typedef struct {
+	struct timeval	*etime;
+	uintptr_t	sim_data;
+	uintptr_t	periph_data;
+} ccb_qos_area;
+
 struct ccb_hdr {
 	cam_pinfo	pinfo;		/* Info for priority scheduling */
 	camq_entry	xpt_links;	/* For chaining in the XPT layer */	
@@ -309,16 +332,14 @@
 	path_id_t	path_id;	/* Path ID for the request */
 	target_id_t	target_id;	/* Target device ID */
 	lun_id_t	target_lun;	/* Target LUN number */
+	lun64_id_t	ext_lun;	/* 64bit extended/multi-level LUNs */
 	u_int32_t	flags;		/* ccb_flags */
+	u_int32_t	xflags;		/* Extended flags */
 	ccb_ppriv_area	periph_priv;
 	ccb_spriv_area	sim_priv;
-	u_int32_t	timeout;	/* Timeout value */
-
-	/*
-	 * Deprecated, only for use by non-MPSAFE SIMs.  All others must
-	 * allocate and initialize their own callout storage.
-	 */
-	struct		callout_handle timeout_ch;
+	ccb_qos_area	qos;
+	u_int32_t	timeout;	/* Hard timeout value in mseconds */
+	struct timeval	softtimeout;	/* Soft timeout value in sec + usec */
 };
 
 /* Get Device Information CCB */
@@ -337,8 +358,8 @@
 	struct	ccb_hdr	ccb_h;
 	int	dev_openings;	/* Space left for more work on device*/	
 	int	dev_active;	/* Transactions running on the device */
-	int	devq_openings;	/* Space left for more queued work */
-	int	devq_queued;	/* Transactions queued to be sent */
+	int	allocated;	/* CCBs allocated for the device */
+	int	queued;		/* CCBs queued to be sent to the device */
 	int	held;		/*
 				 * CCBs held by peripheral drivers
 				 * for this device
@@ -540,7 +561,7 @@
 /*
  * Definitions for the path inquiry CCB fields.
  */
-#define CAM_VERSION	0x16	/* Hex value for current version */
+#define CAM_VERSION	0x18	/* Hex value for current version */
 
 typedef enum {
 	PI_MDP_ABLE	= 0x80,	/* Supports MDP message */
@@ -563,12 +584,15 @@
 } pi_tmflag;
 
 typedef enum {
+	PIM_EXTLUNS	= 0x100,/* 64bit extended LUNs supported */
 	PIM_SCANHILO	= 0x80,	/* Bus scans from high ID to low ID */
 	PIM_NOREMOVE	= 0x40,	/* Removeable devices not included in scan */
 	PIM_NOINITIATOR	= 0x20,	/* Initiator role not supported. */
 	PIM_NOBUSRESET	= 0x10,	/* User has disabled initial BUS RESET */
 	PIM_NO_6_BYTE	= 0x08,	/* Do not send 6-byte commands */
-	PIM_SEQSCAN	= 0x04	/* Do bus scans sequentially, not in parallel */
+	PIM_SEQSCAN	= 0x04,	/* Do bus scans sequentially, not in parallel */
+	PIM_UNMAPPED	= 0x02,
+	PIM_NOSCAN	= 0x01	/* SIM does its own scanning */
 } pi_miscflag;
 
 /* Path Inquiry CCB */
@@ -592,8 +616,8 @@
 	struct 	    ccb_hdr ccb_h;
 	u_int8_t    version_num;	/* Version number for the SIM/HBA */
 	u_int8_t    hba_inquiry;	/* Mimic of INQ byte 7 for the HBA */
-	u_int8_t    target_sprt;	/* Flags for target mode support */
-	u_int8_t    hba_misc;		/* Misc HBA features */
+	u_int16_t   target_sprt;	/* Flags for target mode support */
+	u_int32_t   hba_misc;		/* Misc HBA features */
 	u_int16_t   hba_eng_cnt;	/* HBA engine count */
 					/* Vendor Unique capabilities */
 	u_int8_t    vuhba_flags[VUHBALEN];
@@ -704,6 +728,13 @@
 	u_int	   init_id;		/* initiator id of who selected */
 };
 
+static __inline uint8_t *
+scsiio_cdb_ptr(struct ccb_scsiio *ccb)
+{
+	return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
+	    ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
+}
+
 /*
  * ATA I/O Request CCB used for the XPT_ATA_IO function code.
  */
@@ -737,6 +768,13 @@
 	struct     scsi_sense_data sense_data;
 };
 
+static __inline uint8_t *
+atio_cdb_ptr(struct ccb_accept_tio *ccb)
+{
+	return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
+	    ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
+}
+
 /* Release SIM Queue */
 struct ccb_relsim {
 	struct ccb_hdr ccb_h;
@@ -745,7 +783,6 @@
 #define RELSIM_RELEASE_AFTER_TIMEOUT	0x02
 #define RELSIM_RELEASE_AFTER_CMDCMPLT	0x04
 #define RELSIM_RELEASE_AFTER_QEMPTY	0x08
-#define RELSIM_RELEASE_RUNLEVEL		0x10
 	u_int32_t      openings;
 	u_int32_t      release_timeout;	/* Abstract argument. */
 	u_int32_t      qfrozen_cnt;
@@ -1062,7 +1099,17 @@
 	u_int     tag_id;		/* Tag for immediate notify */
 	u_int     seq_id;		/* Tar for target of notify */
 	u_int     initiator_id;		/* Initiator Identifier */
-	u_int     arg;			/* Function specific */
+	u_int     arg;			/* Response information */
+	/*
+	 * Lower byte of arg is one of RESPONSE CODE values defined below
+	 * (subset of response codes from SPL-4 and FCP-4 specifications),
+	 * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION.
+	 */
+#define	CAM_RSP_TMF_COMPLETE		0x00
+#define	CAM_RSP_TMF_REJECTED		0x04
+#define	CAM_RSP_TMF_FAILED		0x05
+#define	CAM_RSP_TMF_SUCCEEDED		0x08
+#define	CAM_RSP_TMF_INCORRECT_LUN	0x09
 };
 
 /* HBA engine structures. */
@@ -1128,6 +1175,7 @@
 struct ccb_dev_advinfo {
 	struct ccb_hdr ccb_h;
 	uint32_t flags;
+#define	CDAI_FLAG_NONE		0x0	/* No flags set */
 #define	CDAI_FLAG_STORE		0x1	/* If set, action becomes store */
 	uint32_t buftype;		/* IN: Type of data being requested */
 	/* NB: buftype is interpreted on a per-transport basis */
@@ -1135,6 +1183,7 @@
 #define	CDAI_TYPE_SERIAL_NUM	2
 #define	CDAI_TYPE_PHYS_PATH	3
 #define	CDAI_TYPE_RCAPLONG	4
+#define	CDAI_TYPE_EXT_INQ	5
 	off_t bufsiz;			/* IN: Size of external buffer */
 #define	CAM_SCSI_DEVID_MAXLEN	65536	/* length in buffer is an uint16_t */
 	off_t provsiz;			/* OUT: Size required/used */
@@ -1142,6 +1191,16 @@
 };
 
 /*
+ * CCB for sending async events
+ */
+struct ccb_async {
+	struct ccb_hdr ccb_h;
+	uint32_t async_code;
+	off_t async_arg_size;
+	void *async_arg_ptr;
+};
+
+/*
  * Union of all CCB types for kernel space allocation.  This union should
  * never be used for manipulating CCBs - its only use is for the allocation
  * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc
@@ -1180,8 +1239,13 @@
 	struct  ccb_debug		cdbg;
 	struct	ccb_ataio		ataio;
 	struct	ccb_dev_advinfo		cdai;
+	struct	ccb_async		casync;
 };
 
+#define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp)			\
+	bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h),	\
+	    sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h))
+
 __BEGIN_DECLS
 static __inline void
 cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries,
@@ -1222,6 +1286,7 @@
 {
 	csio->ccb_h.func_code = XPT_SCSI_IO;
 	csio->ccb_h.flags = flags;
+	csio->ccb_h.xflags = 0;
 	csio->ccb_h.retry_count = retries;	
 	csio->ccb_h.cbfcnp = cbfcnp;
 	csio->ccb_h.timeout = timeout;
@@ -1241,6 +1306,7 @@
 {
 	csio->ccb_h.func_code = XPT_CONT_TARGET_IO;
 	csio->ccb_h.flags = flags;
+	csio->ccb_h.xflags = 0;
 	csio->ccb_h.retry_count = retries;	
 	csio->ccb_h.cbfcnp = cbfcnp;
 	csio->ccb_h.timeout = timeout;
@@ -1295,6 +1361,19 @@
 	smpio->smp_response_len = smp_response_len;
 }
 
+static __inline void
+cam_set_ccbstatus(union ccb *ccb, cam_status status)
+{
+	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
+	ccb->ccb_h.status |= status;
+}
+
+static __inline cam_status
+cam_ccb_status(union ccb *ccb)
+{
+	return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK));
+}
+
 void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
 
 __END_DECLS

Added: trunk/sys/cam/cam_compat.c
===================================================================
--- trunk/sys/cam/cam_compat.c	                        (rev 0)
+++ trunk/sys/cam/cam_compat.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,205 @@
+/* $MidnightBSD$ */
+/*-
+ * CAM ioctl compatibility shims
+ *
+ * Copyright (c) 2013 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/cam/cam_compat.c 306750 2016-10-06 03:20:47Z mav $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/kthread.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_xpt.h>
+#include <cam/cam_compat.h>
+
+#include <cam/scsi/scsi_pass.h>
+
+#include "opt_cam.h"
+
+static int cam_compat_handle_0x17(struct cdev *dev, u_long cmd, caddr_t addr,
+    int flag, struct thread *td, d_ioctl_t *cbfnp);
+
+int
+cam_compat_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+    struct thread *td, d_ioctl_t *cbfnp)
+{
+	int error;
+
+	switch (cmd) {
+	case CAMIOCOMMAND_0x16:
+	{
+		struct ccb_hdr_0x17 *hdr17;
+
+		hdr17 = (struct ccb_hdr_0x17 *)addr;
+		if (hdr17->flags & CAM_SG_LIST_PHYS_0x16) {
+			hdr17->flags &= ~CAM_SG_LIST_PHYS_0x16;
+			hdr17->flags |= CAM_DATA_SG_PADDR;
+		}
+		if (hdr17->flags & CAM_DATA_PHYS_0x16) {
+			hdr17->flags &= ~CAM_DATA_PHYS_0x16;
+			hdr17->flags |= CAM_DATA_PADDR;
+		}
+		if (hdr17->flags & CAM_SCATTER_VALID_0x16) {
+			hdr17->flags &= CAM_SCATTER_VALID_0x16;
+			hdr17->flags |= CAM_DATA_SG;
+		}
+		cmd = CAMIOCOMMAND;
+		error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp);
+		break;
+	}
+	case CAMGETPASSTHRU_0x16:
+		cmd = CAMGETPASSTHRU;
+		error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp);
+		break;
+	case CAMIOCOMMAND_0x17:
+		cmd = CAMIOCOMMAND;
+		error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp);
+		break;
+	case CAMGETPASSTHRU_0x17:
+		cmd = CAMGETPASSTHRU;
+		error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp);
+		break;
+	default:
+		error = ENOTTY;
+	}
+
+	return (error);
+}
+
+static int
+cam_compat_handle_0x17(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+    struct thread *td, d_ioctl_t *cbfnp)
+{
+	union ccb		*ccb;
+	struct ccb_hdr		*hdr;
+	struct ccb_hdr_0x17	*hdr17;
+	uint8_t			*ccbb, *ccbb17;
+	u_int			error;
+
+	hdr17 = (struct ccb_hdr_0x17 *)addr;
+	ccb = xpt_alloc_ccb();
+	hdr = &ccb->ccb_h;
+
+	hdr->pinfo = hdr17->pinfo;
+	hdr->xpt_links = hdr17->xpt_links;
+	hdr->sim_links = hdr17->sim_links;
+	hdr->periph_links = hdr17->periph_links;
+	hdr->retry_count = hdr17->retry_count;
+	hdr->cbfcnp = hdr17->cbfcnp;
+	hdr->func_code = hdr17->func_code;
+	hdr->status = hdr17->status;
+	hdr->path = hdr17->path;
+	hdr->path_id = hdr17->path_id;
+	hdr->target_id = hdr17->target_id;
+	hdr->target_lun = hdr17->target_lun;
+	hdr->ext_lun.lun64 = 0;
+	hdr->flags = hdr17->flags;
+	hdr->xflags = 0;
+	hdr->periph_priv = hdr17->periph_priv;
+	hdr->sim_priv = hdr17->sim_priv;
+	hdr->timeout = hdr17->timeout;
+	hdr->softtimeout.tv_sec = 0;
+	hdr->softtimeout.tv_usec = 0;
+
+	ccbb = (uint8_t *)&hdr[1];
+	ccbb17 = (uint8_t *)&hdr17[1];
+	bcopy(ccbb17, ccbb, CAM_0X17_DATA_LEN);
+
+	error = (cbfnp)(dev, cmd, (caddr_t)ccb, flag, td);
+
+	hdr17->pinfo = hdr->pinfo;
+	hdr17->xpt_links = hdr->xpt_links;
+	hdr17->sim_links = hdr->sim_links;
+	hdr17->periph_links = hdr->periph_links;
+	hdr17->retry_count = hdr->retry_count;
+	hdr17->cbfcnp = hdr->cbfcnp;
+	hdr17->func_code = hdr->func_code;
+	hdr17->status = hdr->status;
+	hdr17->path = hdr->path;
+	hdr17->path_id = hdr->path_id;
+	hdr17->target_id = hdr->target_id;
+	hdr17->target_lun = hdr->target_lun;
+	hdr17->flags = hdr->flags;
+	hdr17->periph_priv = hdr->periph_priv;
+	hdr17->sim_priv = hdr->sim_priv;
+	hdr17->timeout = hdr->timeout;
+
+	/* The PATH_INQ only needs special handling on the way out */
+	if (ccb->ccb_h.func_code != XPT_PATH_INQ) {
+		bcopy(ccbb, ccbb17, CAM_0X17_DATA_LEN);
+	} else {
+		struct ccb_pathinq	*cpi;
+		struct ccb_pathinq_0x17 *cpi17;
+
+		cpi = &ccb->cpi;
+		cpi17 = (struct ccb_pathinq_0x17 *)hdr17;
+		cpi17->version_num = cpi->version_num;
+		cpi17->hba_inquiry = cpi->hba_inquiry;
+		cpi17->target_sprt = (u_int8_t)cpi->target_sprt;
+		cpi17->hba_misc = (u_int8_t)cpi->hba_misc;
+		cpi17->hba_eng_cnt = cpi->hba_eng_cnt;
+		bcopy(&cpi->vuhba_flags[0], &cpi17->vuhba_flags[0], VUHBALEN);
+		cpi17->max_target = cpi->max_target;
+		cpi17->max_lun = cpi->max_lun;
+		cpi17->async_flags = cpi->async_flags;
+		cpi17->hpath_id = cpi->hpath_id;
+		cpi17->initiator_id = cpi->initiator_id;
+		bcopy(&cpi->sim_vid[0], &cpi17->sim_vid[0], SIM_IDLEN);
+		bcopy(&cpi->hba_vid[0], &cpi17->hba_vid[0], HBA_IDLEN);
+		bcopy(&cpi->dev_name[0], &cpi17->dev_name[0], DEV_IDLEN);
+		cpi17->unit_number = cpi->unit_number;
+		cpi17->bus_id = cpi->bus_id;
+		cpi17->base_transfer_speed = cpi->base_transfer_speed;
+		cpi17->protocol = cpi->protocol;
+		cpi17->protocol_version = cpi->protocol_version;
+		cpi17->transport = cpi->transport;
+		cpi17->transport_version = cpi->transport_version;
+		bcopy(&cpi->xport_specific, &cpi17->xport_specific,
+		    PATHINQ_SETTINGS_SIZE);
+		cpi17->maxio = cpi->maxio;
+		cpi17->hba_vendor = cpi->hba_vendor;
+		cpi17->hba_device = cpi->hba_device;
+		cpi17->hba_subvendor = cpi->hba_subvendor;
+		cpi17->hba_subdevice = cpi->hba_subdevice;
+	}
+
+	xpt_free_ccb(ccb);
+
+	return (error);
+}


Property changes on: trunk/sys/cam/cam_compat.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/cam/cam_compat.h
===================================================================
--- trunk/sys/cam/cam_compat.h	                        (rev 0)
+++ trunk/sys/cam/cam_compat.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,121 @@
+/* $MidnightBSD$ */
+/*-
+ * CAM ioctl compatibility shims
+ *
+ * Copyright (c) 2013 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/cam/cam_compat.h 306750 2016-10-06 03:20:47Z mav $
+ */
+
+#ifndef _CAM_CAM_COMPAT_H
+#define _CAM_CAM_COMPAT_H
+
+/* No user-servicable parts in here. */
+#ifdef _KERNEL
+
+int cam_compat_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+    struct thread *td, int(*cbfnp)(struct cdev *, u_long, caddr_t, int,
+    struct thread *));
+
+
+/* Version 0x16 compatibility */
+#define CAM_VERSION_0x16	0x16
+
+/* The size of the union ccb didn't change when going to 0x17 */
+#define	CAMIOCOMMAND_0x16	_IOC(IOC_INOUT, CAM_VERSION_0x16, 2, CAM_0X17_LEN)
+#define	CAMGETPASSTHRU_0x16	_IOC(IOC_INOUT, CAM_VERSION_0x16, 3, CAM_0X17_LEN)
+
+#define CAM_SCATTER_VALID_0x16	0x00000010
+#define CAM_SG_LIST_PHYS_0x16	0x00040000
+#define CAM_DATA_PHYS_0x16	0x00200000
+
+/* Version 0x17 compatibility */
+#define CAM_VERSION_0x17	0x17
+
+struct ccb_hdr_0x17 {
+	cam_pinfo	pinfo;		/* Info for priority scheduling */
+	camq_entry	xpt_links;	/* For chaining in the XPT layer */	
+	camq_entry	sim_links;	/* For chaining in the SIM layer */	
+	camq_entry	periph_links;	/* For chaining in the type driver */
+	u_int32_t	retry_count;
+	void		(*cbfcnp)(struct cam_periph *, union ccb *);
+	xpt_opcode	func_code;	/* XPT function code */
+	u_int32_t	status;		/* Status returned by CAM subsystem */
+	struct		cam_path *path;	/* Compiled path for this ccb */
+	path_id_t	path_id;	/* Path ID for the request */
+	target_id_t	target_id;	/* Target device ID */
+	lun_id_t	target_lun;	/* Target LUN number */
+	u_int32_t	flags;		/* ccb_flags */
+	ccb_ppriv_area	periph_priv;
+	ccb_spriv_area	sim_priv;
+	u_int32_t	timeout;	/* Hard timeout value in seconds */
+	struct callout_handle timeout_ch;
+};
+
+struct ccb_pathinq_0x17 {
+	struct ccb_hdr_0x17 ccb_h;
+	u_int8_t    version_num;	/* Version number for the SIM/HBA */
+	u_int8_t    hba_inquiry;	/* Mimic of INQ byte 7 for the HBA */
+	u_int8_t    target_sprt;	/* Flags for target mode support */
+	u_int8_t    hba_misc;		/* Misc HBA features */
+	u_int16_t   hba_eng_cnt;	/* HBA engine count */
+					/* Vendor Unique capabilities */
+	u_int8_t    vuhba_flags[VUHBALEN];
+	u_int32_t   max_target;		/* Maximum supported Target */
+	u_int32_t   max_lun;		/* Maximum supported Lun */
+	u_int32_t   async_flags;	/* Installed Async handlers */
+	path_id_t   hpath_id;		/* Highest Path ID in the subsystem */
+	target_id_t initiator_id;	/* ID of the HBA on the SCSI bus */
+	char	    sim_vid[SIM_IDLEN];	/* Vendor ID of the SIM */
+	char	    hba_vid[HBA_IDLEN];	/* Vendor ID of the HBA */
+	char 	    dev_name[DEV_IDLEN];/* Device name for SIM */
+	u_int32_t   unit_number;	/* Unit number for SIM */
+	u_int32_t   bus_id;		/* Bus ID for SIM */
+	u_int32_t   base_transfer_speed;/* Base bus speed in KB/sec */
+	cam_proto   protocol;
+	u_int	    protocol_version;
+	cam_xport   transport;
+	u_int	    transport_version;
+	union {
+		struct ccb_pathinq_settings_spi spi;
+		struct ccb_pathinq_settings_fc fc;
+		struct ccb_pathinq_settings_sas sas;
+		char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE];
+	} xport_specific;
+	u_int		maxio;		/* Max supported I/O size, in bytes. */
+	u_int16_t	hba_vendor;	/* HBA vendor ID */
+	u_int16_t	hba_device;	/* HBA device ID */
+	u_int16_t	hba_subvendor;	/* HBA subvendor ID */
+	u_int16_t	hba_subdevice;	/* HBA subdevice ID */
+};
+
+#define	CAM_0X17_DATA_LEN	(sizeof(union ccb) - sizeof(struct ccb_hdr))
+#define	CAM_0X17_LEN		(sizeof(struct ccb_hdr_0x17) + CAM_0X17_DATA_LEN)
+
+#define	CAMIOCOMMAND_0x17	_IOC(IOC_INOUT, CAM_VERSION_0x17, 2, CAM_0X17_LEN)
+#define CAMGETPASSTHRU_0x17	_IOC(IOC_INOUT, CAM_VERSION_0x17, 3, CAM_0X17_LEN)
+
+#endif
+#endif


Property changes on: trunk/sys/cam/cam_compat.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/cam/cam_debug.h
===================================================================
--- trunk/sys/cam/cam_debug.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_debug.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Macros for tracing/loging information in the CAM layer
  *
@@ -25,7 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_debug.h 265632 2014-05-08 06:55:48Z mav $
  */
 #ifndef	_CAM_CAM_DEBUG_H
 #define _CAM_CAM_DEBUG_H 1
@@ -61,13 +62,13 @@
 #endif
 
 #ifndef CAM_DEBUG_BUS
-#define CAM_DEBUG_BUS		(-1)
+#define CAM_DEBUG_BUS		CAM_BUS_WILDCARD
 #endif
 #ifndef CAM_DEBUG_TARGET
-#define CAM_DEBUG_TARGET	(-1)
+#define CAM_DEBUG_TARGET	CAM_TARGET_WILDCARD
 #endif
 #ifndef CAM_DEBUG_LUN
-#define CAM_DEBUG_LUN		(-1)
+#define CAM_DEBUG_LUN		CAM_LUN_WILDCARD
 #endif
 
 #ifndef CAM_DEBUG_DELAY
@@ -99,6 +100,17 @@
 			DELAY(cam_debug_delay);		\
 	}
 
+#define	CAM_DEBUG_DEV(dev, flag, printfargs)		\
+	if (((flag) & (CAM_DEBUG_COMPILE) & cam_dflags)	\
+	 && (cam_dpath != NULL)				\
+	 && (xpt_path_comp_dev(cam_dpath, dev) >= 0)	\
+	 && (xpt_path_comp_dev(cam_dpath, dev) < 2)) {	\
+		xpt_print_device(dev);			\
+		printf printfargs;			\
+		if (cam_debug_delay != 0)		\
+			DELAY(cam_debug_delay);		\
+	}
+
 #define	CAM_DEBUG_PRINT(flag, printfargs)		\
 	if (((flag) & (CAM_DEBUG_COMPILE) & cam_dflags)) {	\
 		printf("cam_debug: ");			\

Modified: trunk/sys/cam/cam_periph.c
===================================================================
--- trunk/sys/cam/cam_periph.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_periph.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Common functions for CAM "type" (peripheral) drivers.
  *
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/cam_periph.c 320601 2017-07-03 15:34:19Z ken $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -75,7 +76,7 @@
 						 int *openings,
 						 u_int32_t *relsim_flags,
 						 u_int32_t *timeout,
-						 int *print,
+						 u_int32_t  *action,
 						 const char **action_string);
 static	int		camperiphscsisenseerror(union ccb *ccb,
 					        union ccb **orig_ccb,
@@ -84,8 +85,9 @@
 					        int *openings,
 					        u_int32_t *relsim_flags,
 					        u_int32_t *timeout,
-					        int *print,
+					        u_int32_t *action,
 					        const char **action_string);
+static void		cam_periph_devctl_notify(union ccb *ccb);
 
 static int nperiph_drivers;
 static int initialized = 0;
@@ -108,9 +110,19 @@
 	struct periph_driver **newdrivers, **old;
 	int ndrivers;
 
+again:
 	ndrivers = nperiph_drivers + 2;
 	newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
 			    M_WAITOK);
+	xpt_lock_buses();
+	if (ndrivers != nperiph_drivers + 2) {
+		/*
+		 * Lost race against itself; go around.
+		 */
+		xpt_unlock_buses();
+		free(newdrivers, M_CAMPERIPH);
+		goto again;
+	}
 	if (periph_drivers)
 		bcopy(periph_drivers, newdrivers,
 		      sizeof(*newdrivers) * nperiph_drivers);
@@ -118,9 +130,10 @@
 	newdrivers[nperiph_drivers + 1] = NULL;
 	old = periph_drivers;
 	periph_drivers = newdrivers;
+	nperiph_drivers++;
+	xpt_unlock_buses();
 	if (old)
 		free(old, M_CAMPERIPH);
-	nperiph_drivers++;
 	/* If driver marked as early or it is late now, initialize it. */
 	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
 	    initialized > 1)
@@ -196,14 +209,14 @@
 	path_id = xpt_path_path_id(path);
 	target_id = xpt_path_target_id(path);
 	lun_id = xpt_path_lun_id(path);
-	cam_init_pinfo(&periph->pinfo);
 	periph->periph_start = periph_start;
 	periph->periph_dtor = periph_dtor;
 	periph->periph_oninval = periph_oninvalidate;
 	periph->type = type;
 	periph->periph_name = name;
+	periph->scheduled_priority = CAM_PRIORITY_NONE;
 	periph->immediate_priority = CAM_PRIORITY_NONE;
-	periph->refcount = 0;
+	periph->refcount = 1;		/* Dropped by invalidation. */
 	periph->sim = sim;
 	SLIST_INIT(&periph->ccb_list);
 	status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
@@ -218,9 +231,9 @@
 	}
 	if (*p_drv == NULL) {
 		printf("cam_periph_alloc: invalid periph name '%s'\n", name);
+		xpt_unlock_buses();
 		xpt_free_path(periph->path);
 		free(periph, M_CAMPERIPH);
-		xpt_unlock_buses();
 		return (CAM_REQ_INVALID);
 	}
 	periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
@@ -258,7 +271,7 @@
 		break;
 	case 3:
 		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
-		xpt_remove_periph(periph, /*topology_lock_held*/ 0);
+		xpt_remove_periph(periph);
 		/* FALLTHROUGH */
 	case 2:
 		xpt_lock_buses();
@@ -298,7 +311,7 @@
 		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
 			if (xpt_path_comp(periph->path, path) == 0) {
 				xpt_unlock_buses();
-				mtx_assert(periph->sim->mtx, MA_OWNED);
+				cam_periph_assert(periph, MA_OWNED);
 				return(periph);
 			}
 		}
@@ -376,15 +389,24 @@
 }
 
 void
+cam_periph_doacquire(struct cam_periph *periph)
+{
+
+	xpt_lock_buses();
+	KASSERT(periph->refcount >= 1,
+	    ("cam_periph_doacquire() with refcount == %d", periph->refcount));
+	periph->refcount++;
+	xpt_unlock_buses();
+}
+
+void
 cam_periph_release_locked_buses(struct cam_periph *periph)
 {
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
+	cam_periph_assert(periph, MA_OWNED);
 	KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
-	if (--periph->refcount == 0
-	    && (periph->flags & CAM_PERIPH_INVALID)) {
+	if (--periph->refcount == 0)
 		camperiphfree(periph);
-	}
 }
 
 void
@@ -402,16 +424,16 @@
 void
 cam_periph_release(struct cam_periph *periph)
 {
-	struct cam_sim *sim;
+	struct mtx *mtx;
 
 	if (periph == NULL)
 		return;
 	
-	sim = periph->sim;
-	mtx_assert(sim->mtx, MA_NOTOWNED);
-	mtx_lock(sim->mtx);
+	cam_periph_assert(periph, MA_NOTOWNED);
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
 	cam_periph_release_locked(periph);
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
 }
 
 int
@@ -429,10 +451,10 @@
 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
 		return (ENXIO);
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
+	cam_periph_assert(periph, MA_OWNED);
 	while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
 		periph->flags |= CAM_PERIPH_LOCK_WANTED;
-		if ((error = mtx_sleep(periph, periph->sim->mtx, priority,
+		if ((error = cam_periph_sleep(periph, periph, priority,
 		    "caplck", 0)) != 0) {
 			cam_periph_release_locked(periph);
 			return (error);
@@ -451,7 +473,7 @@
 cam_periph_unhold(struct cam_periph *periph)
 {
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
+	cam_periph_assert(periph, MA_OWNED);
 
 	periph->flags &= ~CAM_PERIPH_LOCKED;
 	if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
@@ -579,23 +601,22 @@
 cam_periph_invalidate(struct cam_periph *periph)
 {
 
-	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
-	mtx_assert(periph->sim->mtx, MA_OWNED);
+	cam_periph_assert(periph, MA_OWNED);
 	/*
 	 * We only call this routine the first time a peripheral is
 	 * invalidated.
 	 */
-	if (((periph->flags & CAM_PERIPH_INVALID) == 0)
-	 && (periph->periph_oninval != NULL))
-		periph->periph_oninval(periph);
+	if ((periph->flags & CAM_PERIPH_INVALID) != 0)
+		return;
 
+	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
+	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
+		xpt_denounce_periph(periph);
 	periph->flags |= CAM_PERIPH_INVALID;
 	periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
-
-	xpt_lock_buses();
-	if (periph->refcount == 0)
-		camperiphfree(periph);
-	xpt_unlock_buses();
+	if (periph->periph_oninval != NULL)
+		periph->periph_oninval(periph);
+	cam_periph_release_locked(periph);
 }
 
 static void
@@ -602,8 +623,11 @@
 camperiphfree(struct cam_periph *periph)
 {
 	struct periph_driver **p_drv;
+	struct periph_driver *drv;
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
+	cam_periph_assert(periph, MA_OWNED);
+	KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
+	    periph->periph_name, periph->unit_number));
 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
 		if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
 			break;
@@ -612,6 +636,15 @@
 		printf("camperiphfree: attempt to free non-existant periph\n");
 		return;
 	}
+	/*
+	 * Cache a pointer to the periph_driver structure.  If a
+	 * periph_driver is added or removed from the array (see
+	 * periphdriver_register()) while we drop the toplogy lock
+	 * below, p_drv may change.  This doesn't protect against this
+	 * particular periph_driver going away.  That will require full
+	 * reference counting in the periph_driver infrastructure.
+	 */
+	drv = *p_drv;
 
 	/*
 	 * We need to set this flag before dropping the topology lock, to
@@ -647,13 +680,16 @@
 	 */
 	xpt_lock_buses();
 
-	TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
-	(*p_drv)->generation++;
+	TAILQ_REMOVE(&drv->units, periph, unit_links);
+	drv->generation++;
 
-	xpt_remove_periph(periph, /*topology_lock_held*/ 1);
+	xpt_remove_periph(periph);
 
 	xpt_unlock_buses();
-	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
+	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
+		xpt_print(periph->path, "Periph destroyed\n");
+	else
+		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
 
 	if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
 		union ccb ccb;
@@ -686,13 +722,14 @@
 
 /*
  * Map user virtual pointers into kernel virtual address space, so we can
- * access the memory.  This won't work on physical pointers, for now it's
- * up to the caller to check for that.  (XXX KDM -- should we do that here
- * instead?)  This also only works for up to MAXPHYS memory.  Since we use
+ * access the memory.  This is now a generic function that centralizes most
+ * of the sanity checks on the data flags, if any.
+ * This also only works for up to MAXPHYS memory.  Since we use
  * buffers to map stuff in and out, we're limited to the buffer size.
  */
 int
-cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
+cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
+    u_int maxmap)
 {
 	int numbufs, i, j;
 	int flags[CAM_PERIPH_MAXMAPS];
@@ -699,9 +736,11 @@
 	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
 	u_int32_t lengths[CAM_PERIPH_MAXMAPS];
 	u_int32_t dirs[CAM_PERIPH_MAXMAPS];
-	/* Some controllers may not be able to handle more data. */
-	size_t maxmap = DFLTPHYS;
 
+	if (maxmap == 0)
+		maxmap = DFLTPHYS;	/* traditional default */
+	else if (maxmap > MAXPHYS)
+		maxmap = MAXPHYS;	/* for safety */
 	switch(ccb->ccb_h.func_code) {
 	case XPT_DEV_MATCH:
 		if (ccb->cdm.match_buf_len == 0) {
@@ -733,7 +772,8 @@
 	case XPT_CONT_TARGET_IO:
 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
 			return(0);
-
+		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
+			return (EINVAL);
 		data_ptrs[0] = &ccb->csio.data_ptr;
 		lengths[0] = ccb->csio.dxfer_len;
 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
@@ -742,7 +782,8 @@
 	case XPT_ATA_IO:
 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
 			return(0);
-
+		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
+			return (EINVAL);
 		data_ptrs[0] = &ccb->ataio.data_ptr;
 		lengths[0] = ccb->ataio.dxfer_len;
 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
@@ -813,8 +854,12 @@
 
 	}
 
-	/* this keeps the current process from getting swapped */
 	/*
+	 * This keeps the the kernel stack of current thread from getting
+	 * swapped.  In low-memory situations where the kernel stack might
+	 * otherwise get swapped out, this holds it and allows the thread
+	 * to make progress and release the kernel mapped pages sooner.
+	 *
 	 * XXX KDM should I use P_NOSWAP instead?
 	 */
 	PHOLD(curproc);
@@ -845,7 +890,7 @@
 		 * into a larger area of VM, or if userland races against
 		 * vmapbuf() after the useracc() check.
 		 */
-		if (vmapbuf(mapinfo->bp[i]) < 0) {
+		if (vmapbuf(mapinfo->bp[i], 1) < 0) {
 			for (j = 0; j < i; ++j) {
 				*data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
 				vunmapbuf(mapinfo->bp[j]);
@@ -886,8 +931,7 @@
 	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
 
 	if (mapinfo->num_bufs_used <= 0) {
-		/* allow ourselves to be swapped once again */
-		PRELE(curproc);
+		/* nothing to free and the process wasn't held. */
 		return;
 	}
 
@@ -942,40 +986,14 @@
 	PRELE(curproc);
 }
 
-union ccb *
-cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
-{
-	struct ccb_hdr *ccb_h;
-
-	mtx_assert(periph->sim->mtx, MA_OWNED);
-	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
-
-	while (SLIST_FIRST(&periph->ccb_list) == NULL) {
-		if (periph->immediate_priority > priority)
-			periph->immediate_priority = priority;
-		xpt_schedule(periph, priority);
-		if ((SLIST_FIRST(&periph->ccb_list) != NULL)
-		 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
-			break;
-		mtx_assert(periph->sim->mtx, MA_OWNED);
-		mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb",
-		    0);
-	}
-
-	ccb_h = SLIST_FIRST(&periph->ccb_list);
-	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
-	return ((union ccb *)ccb_h);
-}
-
 void
 cam_periph_ccbwait(union ccb *ccb)
 {
-	struct cam_sim *sim;
 
-	sim = xpt_path_sim(ccb->ccb_h.path);
 	if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
 	 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
-		mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0);
+		xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO,
+		    "cbwait", 0);
 }
 
 int
@@ -1040,6 +1058,14 @@
 	return(error);
 }
 
+static void
+cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
+{
+
+	/* Caller will release the CCB */
+	wakeup(&done_ccb->ccb_h.cbfcnp);
+}
+
 int
 cam_periph_runccb(union ccb *ccb,
 		  int (*error_routine)(union ccb *ccb,
@@ -1048,12 +1074,12 @@
 		  cam_flags camflags, u_int32_t sense_flags,
 		  struct devstat *ds)
 {
-	struct cam_sim *sim;
+	struct bintime *starttime;
+	struct bintime ltime;
 	int error;
  
-	error = 0;
-	sim = xpt_path_sim(ccb->ccb_h.path);
-	mtx_assert(sim->mtx, MA_OWNED);
+	starttime = NULL;
+	xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
 
 	/*
 	 * If the user has supplied a stats structure, and if we understand
@@ -1060,9 +1086,13 @@
 	 * this particular type of ccb, record the transaction start.
 	 */
 	if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
-	    ccb->ccb_h.func_code == XPT_ATA_IO))
-		devstat_start_transaction(ds, NULL);
+	    ccb->ccb_h.func_code == XPT_ATA_IO)) {
+		starttime = <ime;
+		binuptime(starttime);
+		devstat_start_transaction(ds, starttime);
+	}
 
+	ccb->ccb_h.cbfcnp = cam_periph_done;
 	xpt_action(ccb);
  
 	do {
@@ -1088,22 +1118,22 @@
 	if (ds != NULL) {
 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
 			devstat_end_transaction(ds,
-					ccb->csio.dxfer_len,
+					ccb->csio.dxfer_len - ccb->csio.resid,
 					ccb->csio.tag_action & 0x3,
 					((ccb->ccb_h.flags & CAM_DIR_MASK) ==
 					CAM_DIR_NONE) ?  DEVSTAT_NO_DATA : 
 					(ccb->ccb_h.flags & CAM_DIR_OUT) ?
 					DEVSTAT_WRITE : 
-					DEVSTAT_READ, NULL, NULL);
+					DEVSTAT_READ, NULL, starttime);
 		} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
 			devstat_end_transaction(ds,
-					ccb->ataio.dxfer_len,
+					ccb->ataio.dxfer_len - ccb->ataio.resid,
 					ccb->ataio.tag_action & 0x3,
 					((ccb->ccb_h.flags & CAM_DIR_MASK) ==
 					CAM_DIR_NONE) ?  DEVSTAT_NO_DATA : 
 					(ccb->ccb_h.flags & CAM_DIR_OUT) ?
 					DEVSTAT_WRITE : 
-					DEVSTAT_READ, NULL, NULL);
+					DEVSTAT_READ, NULL, starttime);
 		}
 	}
 
@@ -1113,23 +1143,15 @@
 void
 cam_freeze_devq(struct cam_path *path)
 {
+	struct ccb_hdr ccb_h;
 
-	cam_freeze_devq_arg(path, 0, 0);
+	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
+	xpt_setup_ccb(&ccb_h, path, /*priority*/1);
+	ccb_h.func_code = XPT_NOOP;
+	ccb_h.flags = CAM_DEV_QFREEZE;
+	xpt_action((union ccb *)&ccb_h);
 }
 
-void
-cam_freeze_devq_arg(struct cam_path *path, uint32_t flags, uint32_t arg)
-{
-	struct ccb_relsim crs;
-
-	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NONE);
-	crs.ccb_h.func_code = XPT_FREEZE_QUEUE;
-	crs.release_flags = flags;
-	crs.openings = arg;
-	crs.release_timeout = arg;
-	xpt_action((union ccb *)&crs);
-}
-
 u_int32_t
 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
 		 u_int32_t openings, u_int32_t arg,
@@ -1137,6 +1159,8 @@
 {
 	struct ccb_relsim crs;
 
+	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
+	    relsim_flags, openings, arg, getcount_only));
 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 	crs.ccb_h.func_code = XPT_REL_SIMQ;
 	crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
@@ -1283,7 +1307,7 @@
 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
     cam_flags camflags, u_int32_t sense_flags,
     int *openings, u_int32_t *relsim_flags,
-    u_int32_t *timeout, int *print, const char **action_string)
+    u_int32_t *timeout, u_int32_t *action, const char **action_string)
 {
 	int error;
 
@@ -1302,7 +1326,7 @@
 					        openings,
 					        relsim_flags,
 					        timeout,
-					        print,
+					        action,
 					        action_string);
 		break;
 	case SCSI_STATUS_QUEUE_FULL:
@@ -1357,7 +1381,7 @@
 			}
 			*timeout = 0;
 			error = ERESTART;
-			*print = 0;
+			*action &= ~SSQ_PRINT_SENSE;
 			break;
 		}
 		/* FALLTHROUGH */
@@ -1367,8 +1391,8 @@
 		 * Restart the queue after either another
 		 * command completes or a 1 second timeout.
 		 */
-	 	if (ccb->ccb_h.retry_count > 0) {
-	 		ccb->ccb_h.retry_count--;
+		if ((sense_flags & SF_RETRY_BUSY) != 0 ||
+		    (ccb->ccb_h.retry_count--) > 0) {
 			error = ERESTART;
 			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
 				      | RELSIM_RELEASE_AFTER_CMDCMPLT;
@@ -1389,7 +1413,7 @@
 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
     cam_flags camflags, u_int32_t sense_flags,
     int *openings, u_int32_t *relsim_flags,
-    u_int32_t *timeout, int *print, const char **action_string)
+    u_int32_t *timeout, u_int32_t *action, const char **action_string)
 {
 	struct cam_periph *periph;
 	union ccb *orig_ccb = ccb;
@@ -1412,7 +1436,7 @@
 		 * imperitive that we don't violate this assumption.
 		 */
 		error = ERESTART;
-		*print = 0;
+		*action &= ~SSQ_PRINT_SENSE;
 	} else {
 		scsi_sense_action err_action;
 		struct ccb_getdev cgd;
@@ -1584,7 +1608,7 @@
 		}
 
 sense_error_done:
-		*print = ((err_action & SSQ_PRINT_SENSE) != 0);
+		*action = err_action;
 	}
 	return (error);
 }
@@ -1591,7 +1615,7 @@
 
 /*
  * Generic error handler.  Peripheral drivers usually filter
- * out the errors that they handle in a unique mannor, then
+ * out the errors that they handle in a unique manner, then
  * call this function.
  */
 int
@@ -1598,32 +1622,49 @@
 cam_periph_error(union ccb *ccb, cam_flags camflags,
 		 u_int32_t sense_flags, union ccb *save_ccb)
 {
-	union ccb  *orig_ccb;
+	struct cam_path *newpath;
+	union ccb  *orig_ccb, *scan_ccb;
 	struct cam_periph *periph;
 	const char *action_string;
 	cam_status  status;
-	int	    frozen, error, openings, print, lost_device;
-	int	    error_code, sense_key, asc, ascq;
-	u_int32_t   relsim_flags, timeout;
+	int	    frozen, error, openings, devctl_err;
+	u_int32_t   action, relsim_flags, timeout;
 
-	print = 1;
+	action = SSQ_PRINT_SENSE;
 	periph = xpt_path_periph(ccb->ccb_h.path);
 	action_string = NULL;
 	status = ccb->ccb_h.status;
 	frozen = (status & CAM_DEV_QFRZN) != 0;
 	status &= CAM_STATUS_MASK;
-	openings = relsim_flags = timeout = lost_device = 0;
+	devctl_err = openings = relsim_flags = timeout = 0;
 	orig_ccb = ccb;
 
+	/* Filter the errors that should be reported via devctl */
+	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
+	case CAM_CMD_TIMEOUT:
+	case CAM_REQ_ABORTED:
+	case CAM_REQ_CMP_ERR:
+	case CAM_REQ_TERMIO:
+	case CAM_UNREC_HBA_ERROR:
+	case CAM_DATA_RUN_ERR:
+	case CAM_SCSI_STATUS_ERROR:
+	case CAM_ATA_STATUS_ERROR:
+	case CAM_SMP_STATUS_ERROR:
+		devctl_err++;
+		break;
+	default:
+		break;
+	}
+
 	switch (status) {
 	case CAM_REQ_CMP:
 		error = 0;
-		print = 0;
+		action &= ~SSQ_PRINT_SENSE;
 		break;
 	case CAM_SCSI_STATUS_ERROR:
 		error = camperiphscsistatuserror(ccb, &orig_ccb,
 		    camflags, sense_flags, &openings, &relsim_flags,
-		    &timeout, &print, &action_string);
+		    &timeout, &action, &action_string);
 		break;
 	case CAM_AUTOSENSE_FAIL:
 		error = EIO;	/* we have to kill the command */
@@ -1654,8 +1695,7 @@
 		/* FALLTHROUGH */
 	case CAM_DEV_NOT_THERE:
 		error = ENXIO;
-		print = 0;
-		lost_device = 1;
+		action = SSQ_LOST;
 		break;
 	case CAM_REQ_INVALID:
 	case CAM_PATH_INVALID:
@@ -1664,6 +1704,7 @@
 	case CAM_REQ_TOO_BIG:
 	case CAM_LUN_INVALID:
 	case CAM_TID_INVALID:
+	case CAM_FUNC_NOTAVAIL:
 		error = EINVAL;
 		break;
 	case CAM_SCSI_BUS_RESET:
@@ -1686,7 +1727,7 @@
 			action_string = "Retry was blocked";
 		} else {
 			error = ERESTART;
-			print = 0;
+			action &= ~SSQ_PRINT_SENSE;
 		}
 		break;
 	case CAM_RESRC_UNAVAIL:
@@ -1725,12 +1766,12 @@
 
 	if ((sense_flags & SF_PRINT_ALWAYS) ||
 	    CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
-		print = 1;
+		action |= SSQ_PRINT_SENSE;
 	else if (sense_flags & SF_NO_PRINT)
-		print = 0;
-	if (print)
+		action &= ~SSQ_PRINT_SENSE;
+	if ((action & SSQ_PRINT_SENSE) != 0)
 		cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
-	if (error != 0 && print) {
+	if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
 		if (error != ERESTART) {
 			if (action_string == NULL)
 				action_string = "Unretryable error";
@@ -1742,8 +1783,10 @@
 			xpt_print(ccb->ccb_h.path, "Retrying command\n");
 	}
 
-	if (lost_device) {
-		struct cam_path *newpath;
+	if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
+		cam_periph_devctl_notify(orig_ccb);
+
+	if ((action & SSQ_LOST) != 0) {
 		lun_id_t lun_id;
 
 		/*
@@ -1752,10 +1795,10 @@
 		 * then we only get rid of the device(s) specified by the
 		 * path in the original CCB.
 		 */
-		if (status == CAM_DEV_NOT_THERE)
+		if (status == CAM_SEL_TIMEOUT)
+			lun_id = CAM_LUN_WILDCARD;
+		else
 			lun_id = xpt_path_lun_id(ccb->ccb_h.path);
-		else
-			lun_id = CAM_LUN_WILDCARD;
 
 		/* Should we do more if we can't create the path?? */
 		if (xpt_create_path(&newpath, periph,
@@ -1770,12 +1813,31 @@
 			xpt_async(AC_LOST_DEVICE, newpath, NULL);
 			xpt_free_path(newpath);
 		}
+	}
 
 	/* Broadcast UNIT ATTENTIONs to all periphs. */
-	} else if (scsi_extract_sense_ccb(ccb,
-	    &error_code, &sense_key, &asc, &ascq) &&
-	    sense_key == SSD_KEY_UNIT_ATTENTION) {
+	if ((action & SSQ_UA) != 0)
 		xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
+
+	/* Rescan target on "Reported LUNs data has changed" */
+	if ((action & SSQ_RESCAN) != 0) {
+		if (xpt_create_path(&newpath, NULL,
+				    xpt_path_path_id(ccb->ccb_h.path),
+				    xpt_path_target_id(ccb->ccb_h.path),
+				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
+
+			scan_ccb = xpt_alloc_ccb_nowait();
+			if (scan_ccb != NULL) {
+				scan_ccb->ccb_h.path = newpath;
+				scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
+				scan_ccb->crcn.flags = 0;
+				xpt_rescan(scan_ccb);
+			} else {
+				xpt_print(newpath,
+				    "Can't allocate CCB to rescan target\n");
+				xpt_free_path(newpath);
+			}
+		}
 	}
 
 	/* Attempt a retry */
@@ -1794,3 +1856,81 @@
 
 	return (error);
 }
+
+#define CAM_PERIPH_DEVD_MSG_SIZE	256
+
+static void
+cam_periph_devctl_notify(union ccb *ccb)
+{
+	struct cam_periph *periph;
+	struct ccb_getdev *cgd;
+	struct sbuf sb;
+	int serr, sk, asc, ascq;
+	char *sbmsg, *type;
+
+	sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
+	if (sbmsg == NULL)
+		return;
+
+	sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
+
+	periph = xpt_path_periph(ccb->ccb_h.path);
+	sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
+	    periph->unit_number);
+
+	sbuf_printf(&sb, "serial=\"");
+	if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
+		xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
+		    CAM_PRIORITY_NORMAL);
+		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
+		xpt_action((union ccb *)cgd);
+
+		if (cgd->ccb_h.status == CAM_REQ_CMP)
+			sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
+		xpt_free_ccb((union ccb *)cgd);
+	}
+	sbuf_printf(&sb, "\" ");
+	sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
+
+	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
+	case CAM_CMD_TIMEOUT:
+		sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
+		type = "timeout";
+		break;
+	case CAM_SCSI_STATUS_ERROR:
+		sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
+		if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
+			sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
+			    serr, sk, asc, ascq);
+		type = "error";
+		break;
+	case CAM_ATA_STATUS_ERROR:
+	{
+		char res_str[(11 * 3) + 1];
+
+		sbuf_printf(&sb, "RES=\"%s\" ", ata_res_string(&ccb->ataio.res,
+		    res_str, sizeof(res_str)));
+		type = "error";
+		break;
+	}
+	default:
+		type = "error";
+		break;
+	}
+
+	if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
+		sbuf_printf(&sb, "CDB=\"");
+		scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
+		sbuf_printf(&sb, "\" ");
+	} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
+		sbuf_printf(&sb, "ACB=\"");
+		ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
+		sbuf_printf(&sb, "\" ");
+	}
+
+	if (sbuf_finish(&sb) == 0)
+		devctl_notify("CAM", "periph", type, sbuf_data(&sb));
+	sbuf_delete(&sb);
+	free(sbmsg, M_CAMPERIPH);
+}
+

Modified: trunk/sys/cam/cam_periph.h
===================================================================
--- trunk/sys/cam/cam_periph.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_periph.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Data structures and definitions for CAM peripheral ("type") drivers.
  *
@@ -25,7 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_periph.h 288817 2015-10-05 11:45:28Z mav $
  */
 
 #ifndef _CAM_CAM_PERIPH_H
@@ -35,6 +36,7 @@
 #include <cam/cam_sim.h>
 
 #ifdef _KERNEL
+#include <sys/taskqueue.h>
 
 #include <cam/cam_xpt.h>
 
@@ -90,7 +92,7 @@
 	CAM_PERIPH_BIO
 } cam_periph_type;
 
-/* Generically usefull offsets into the peripheral private area */
+/* Generically useful offsets into the peripheral private area */
 #define ppriv_ptr0 periph_priv.entries[0].ptr
 #define ppriv_ptr1 periph_priv.entries[1].ptr
 #define ppriv_field0 periph_priv.entries[0].field
@@ -103,7 +105,6 @@
 typedef void		periph_oninv_t (struct cam_periph *periph);
 typedef void		periph_dtor_t (struct cam_periph *periph);
 struct cam_periph {
-	cam_pinfo		 pinfo;
 	periph_start_t		*periph_start;
 	periph_oninv_t		*periph_oninval;
 	periph_dtor_t		*periph_dtor;
@@ -120,8 +121,13 @@
 #define CAM_PERIPH_INVALID		0x08
 #define CAM_PERIPH_NEW_DEV_FOUND	0x10
 #define CAM_PERIPH_RECOVERY_INPROG	0x20
+#define CAM_PERIPH_RUN_TASK		0x40
 #define CAM_PERIPH_FREE			0x80
-	u_int32_t		 immediate_priority;
+#define CAM_PERIPH_ANNOUNCED		0x100
+	uint32_t		 scheduled_priority;
+	uint32_t		 immediate_priority;
+	int			 periph_allocating;
+	int			 periph_allocated;
 	u_int32_t		 refcount;
 	SLIST_HEAD(, ccb_hdr)	 ccb_list;	/* For "immediate" requests */
 	SLIST_ENTRY(cam_periph)  periph_links;
@@ -128,6 +134,7 @@
 	TAILQ_ENTRY(cam_periph)  unit_links;
 	ac_callback_t		*deferred_callback; 
 	ac_code			 deferred_ac;
+	struct task		 periph_run_task;
 };
 
 #define CAM_PERIPH_MAXMAPS	2
@@ -146,6 +153,7 @@
 struct cam_periph *cam_periph_find(struct cam_path *path, char *name);
 int		cam_periph_list(struct cam_path *, struct sbuf *);
 cam_status	cam_periph_acquire(struct cam_periph *periph);
+void		cam_periph_doacquire(struct cam_periph *periph);
 void		cam_periph_release(struct cam_periph *periph);
 void		cam_periph_release_locked(struct cam_periph *periph);
 void		cam_periph_release_locked_buses(struct cam_periph *periph);
@@ -153,7 +161,8 @@
 void		cam_periph_unhold(struct cam_periph *periph);
 void		cam_periph_invalidate(struct cam_periph *periph);
 int		cam_periph_mapmem(union ccb *ccb,
-				  struct cam_periph_map_info *mapinfo);
+				  struct cam_periph_map_info *mapinfo,
+				  u_int maxmap);
 void		cam_periph_unmapmem(union ccb *ccb,
 				    struct cam_periph_map_info *mapinfo);
 union ccb	*cam_periph_getccb(struct cam_periph *periph,
@@ -171,8 +180,6 @@
 						      cam_flags camflags,
 						      u_int32_t sense_flags));
 void		cam_freeze_devq(struct cam_path *path);
-void		cam_freeze_devq_arg(struct cam_path *path, u_int32_t flags,
-		    uint32_t arg);
 u_int32_t	cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
 				 u_int32_t opening_reduction, u_int32_t arg,
 				 int getcount_only);
@@ -186,31 +193,27 @@
 int		cam_periph_error(union ccb *ccb, cam_flags camflags,
 				 u_int32_t sense_flags, union ccb *save_ccb);
 
-static __inline void
-cam_periph_lock(struct cam_periph *periph)
+static __inline struct mtx *
+cam_periph_mtx(struct cam_periph *periph)
 {
-	mtx_lock(periph->sim->mtx);
+	return (xpt_path_mtx(periph->path));
 }
 
-static __inline void
-cam_periph_unlock(struct cam_periph *periph)
-{
-	mtx_unlock(periph->sim->mtx);
-}
+#define cam_periph_owned(periph)					\
+	mtx_owned(xpt_path_mtx((periph)->path))
 
-static __inline int
-cam_periph_owned(struct cam_periph *periph)
-{
-	return (mtx_owned(periph->sim->mtx));
-}
+#define cam_periph_lock(periph)						\
+	mtx_lock(xpt_path_mtx((periph)->path))
 
-static __inline int
-cam_periph_sleep(struct cam_periph *periph, void *chan, int priority,
-		 const char *wmesg, int timo)
-{
-	return (msleep(chan, periph->sim->mtx, priority, wmesg, timo));
-}
+#define cam_periph_unlock(periph)					\
+	mtx_unlock(xpt_path_mtx((periph)->path))
 
+#define cam_periph_assert(periph, what)					\
+	mtx_assert(xpt_path_mtx((periph)->path), (what))
+
+#define cam_periph_sleep(periph, chan, priority, wmesg, timo)		\
+	xpt_path_sleep((periph)->path, (chan), (priority), (wmesg), (timo))
+
 static inline struct cam_periph *
 cam_periph_acquire_first(struct periph_driver *driver)
 {
@@ -231,7 +234,7 @@
 {
 	struct cam_periph *periph = pperiph;
 
-	mtx_assert(pperiph->sim->mtx, MA_NOTOWNED);
+	cam_periph_assert(pperiph, MA_NOTOWNED);
 	xpt_lock_buses();
 	do {
 		periph = TAILQ_NEXT(periph, unit_links);

Modified: trunk/sys/cam/cam_queue.c
===================================================================
--- trunk/sys/cam/cam_queue.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_queue.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * CAM request queue management functions.
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/cam_queue.c 299677 2016-05-13 15:09:38Z pfg $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -122,7 +123,7 @@
 	cam_pinfo **new_array;
 
 	KASSERT(new_size >= queue->entries, ("camq_resize: "
-	    "New queue size can't accomodate queued entries (%d < %d).",
+	    "New queue size can't accommodate queued entries (%d < %d).",
 	    new_size, queue->entries));
 	new_array = (cam_pinfo **)malloc(new_size * sizeof(cam_pinfo *),
 					 M_CAMQ, M_NOWAIT);
@@ -220,9 +221,8 @@
 	}
 	if (cam_devq_init(devq, devices, openings) != 0) {
 		free(devq, M_CAMDEVQ);
-		return (NULL);		
+		return (NULL);
 	}
-	
 	return (devq);
 }
 
@@ -229,26 +229,22 @@
 int
 cam_devq_init(struct cam_devq *devq, int devices, int openings)
 {
+
 	bzero(devq, sizeof(*devq));
-	if (camq_init(&devq->alloc_queue, devices) != 0) {
+	mtx_init(&devq->send_mtx, "CAM queue lock", NULL, MTX_DEF);
+	if (camq_init(&devq->send_queue, devices) != 0)
 		return (1);
-	}
-	if (camq_init(&devq->send_queue, devices) != 0) {
-		camq_fini(&devq->alloc_queue);
-		return (1);
-	}
-	devq->alloc_openings = openings;
-	devq->alloc_active = 0;
 	devq->send_openings = openings;
-	devq->send_active = 0;	
-	return (0);	
+	devq->send_active = 0;
+	return (0);
 }
 
 void
 cam_devq_free(struct cam_devq *devq)
 {
-	camq_fini(&devq->alloc_queue);
+
 	camq_fini(&devq->send_queue);
+	mtx_destroy(&devq->send_mtx);
 	free(devq, M_CAMDEVQ);
 }
 
@@ -257,11 +253,7 @@
 {
 	u_int32_t retval;
 
-	retval = camq_resize(&camq->alloc_queue, devices);
-
-	if (retval == CAM_REQ_CMP)
-		retval = camq_resize(&camq->send_queue, devices);
-
+	retval = camq_resize(&camq->send_queue, devices);
 	return (retval);
 }
 
@@ -296,32 +288,16 @@
 cam_ccbq_resize(struct cam_ccbq *ccbq, int new_size)
 {
 	int delta;
-	int space_left;
 
 	delta = new_size - (ccbq->dev_active + ccbq->dev_openings);
-	space_left = new_size
-	    - ccbq->queue.entries
-	    - ccbq->held
-	    - ccbq->dev_active;
+	ccbq->total_openings += delta;
+	ccbq->dev_openings += delta;
 
-	/*
-	 * Only attempt to change the underlying queue size if we are
-	 * shrinking it and there is space for all outstanding entries
-	 * in the new array or we have been requested to grow the array.
-	 * We don't fail in the case where we can't reduce the array size,
-	 * but clients that care that the queue be "garbage collected"
-	 * should detect this condition and call us again with the
-	 * same size once the outstanding entries have been processed.
-	 */
-	if (space_left < 0
-	 || camq_resize(&ccbq->queue, new_size + (CAM_RL_VALUES - 1)) ==
-	    CAM_REQ_CMP) {
-		ccbq->devq_openings += delta;
-		ccbq->dev_openings += delta;
+	new_size = imax(64, 1 << fls(new_size + new_size / 2));
+	if (new_size > ccbq->queue.array_size)
+		return (camq_resize(&ccbq->queue, new_size));
+	else
 		return (CAM_REQ_CMP);
-	} else {
-		return (CAM_RESRC_UNAVAIL);
-	}
 }
 
 int
@@ -328,11 +304,11 @@
 cam_ccbq_init(struct cam_ccbq *ccbq, int openings)
 {
 	bzero(ccbq, sizeof(*ccbq));
-	if (camq_init(&ccbq->queue, openings + (CAM_RL_VALUES - 1)) != 0) {
+	if (camq_init(&ccbq->queue,
+	    imax(64, 1 << fls(openings + openings / 2))) != 0)
 		return (1);
-	}
-	ccbq->devq_openings = openings;
-	ccbq->dev_openings = openings;	
+	ccbq->total_openings = openings;
+	ccbq->dev_openings = openings;
 	return (0);
 }
 

Modified: trunk/sys/cam/cam_queue.h
===================================================================
--- trunk/sys/cam/cam_queue.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_queue.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * CAM request queue management definitions.
  *
@@ -25,7 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_queue.h 273078 2014-10-14 12:13:01Z mav $
  */
 
 #ifndef _CAM_CAM_QUEUE_H
@@ -33,6 +34,8 @@
 
 #ifdef _KERNEL
 
+#include <sys/lock.h>
+#include <sys/mutex.h>
 #include <sys/queue.h>
 #include <cam/cam.h>
 
@@ -48,7 +51,7 @@
 	int	   array_size;
 	int	   entries;
 	u_int32_t  generation;
-	u_int32_t  qfrozen_cnt[CAM_RL_VALUES];
+	u_int32_t  qfrozen_cnt;
 };
 
 TAILQ_HEAD(ccb_hdr_tailq, ccb_hdr);
@@ -57,22 +60,21 @@
 
 struct cam_ccbq {
 	struct	camq queue;
-	int	devq_openings;
-	int	dev_openings;	
+	struct ccb_hdr_tailq	queue_extra_head;
+	int	queue_extra_entries;
+	int	total_openings;
+	int	allocated;
+	int	dev_openings;
 	int	dev_active;
-	int	held;
 };
 
 struct cam_ed;
 
 struct cam_devq {
-	struct	camq alloc_queue;
-	struct	camq send_queue;
-	struct	cam_ed *active_dev;
-	int	alloc_openings;
-	int	alloc_active;
-	int	send_openings;
-	int	send_active;
+	struct mtx	 send_mtx;
+	struct camq	 send_queue;
+	int		 send_openings;
+	int		 send_active;
 };
 
 
@@ -158,10 +160,10 @@
 static __inline void
 cam_ccbq_take_opening(struct cam_ccbq *ccbq);
 
-static __inline int
+static __inline void
 cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb);
 
-static __inline int
+static __inline void
 cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb);
 
 static __inline union ccb *
@@ -180,41 +182,72 @@
 static __inline int
 cam_ccbq_pending_ccb_count(struct cam_ccbq *ccbq)
 {
-	return (ccbq->queue.entries);
+	return (ccbq->queue.entries + ccbq->queue_extra_entries);
 }
 
 static __inline void
 cam_ccbq_take_opening(struct cam_ccbq *ccbq)
 {
-	ccbq->devq_openings--;
-	ccbq->held++;
+
+	ccbq->allocated++;
 }
 
-static __inline int
+static __inline void
 cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb)
 {
-	ccbq->held--;
-	camq_insert(&ccbq->queue, &new_ccb->ccb_h.pinfo);
-	if (ccbq->queue.qfrozen_cnt[CAM_PRIORITY_TO_RL(
-	    new_ccb->ccb_h.pinfo.priority)] > 0) {
-		ccbq->devq_openings++;
-		ccbq->held++;
-		return (1);
-	} else
-		return (0);
+	struct ccb_hdr *old_ccb;
+	struct camq *queue = &ccbq->queue;
+
+	/*
+	 * If queue is already full, try to resize.
+	 * If resize fail, push CCB with lowest priority out to the TAILQ.
+	 */
+	if (queue->entries == queue->array_size &&
+	    camq_resize(&ccbq->queue, queue->array_size * 2) != CAM_REQ_CMP) {
+		old_ccb = (struct ccb_hdr *)camq_remove(queue, queue->entries);
+		TAILQ_INSERT_HEAD(&ccbq->queue_extra_head, old_ccb,
+		    xpt_links.tqe);
+		old_ccb->pinfo.index = CAM_EXTRAQ_INDEX;
+		ccbq->queue_extra_entries++;
+	}
+
+	camq_insert(queue, &new_ccb->ccb_h.pinfo);
 }
 
-static __inline int
+static __inline void
 cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb)
 {
-	camq_remove(&ccbq->queue, ccb->ccb_h.pinfo.index);
-	if (ccbq->queue.qfrozen_cnt[CAM_PRIORITY_TO_RL(
-	    ccb->ccb_h.pinfo.priority)] > 0) {
-		ccbq->devq_openings--;
-		ccbq->held--;
-		return (1);
-	} else
-		return (0);
+	struct ccb_hdr *cccb, *bccb;
+	struct camq *queue = &ccbq->queue;
+
+	/* If the CCB is on the TAILQ, remove it from there. */
+	if (ccb->ccb_h.pinfo.index == CAM_EXTRAQ_INDEX) {
+		TAILQ_REMOVE(&ccbq->queue_extra_head, &ccb->ccb_h,
+		    xpt_links.tqe);
+		ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
+		ccbq->queue_extra_entries--;
+		return;
+	}
+
+	camq_remove(queue, ccb->ccb_h.pinfo.index);
+
+	/*
+	 * If there are some CCBs on TAILQ, find the best one and move it
+	 * to the emptied space in the queue.
+	 */
+	bccb = TAILQ_FIRST(&ccbq->queue_extra_head);
+	if (bccb == NULL)
+		return;
+	TAILQ_FOREACH(cccb, &ccbq->queue_extra_head, xpt_links.tqe) {
+		if (bccb->pinfo.priority > cccb->pinfo.priority ||
+		    (bccb->pinfo.priority == cccb->pinfo.priority &&
+		     GENERATIONCMP(bccb->pinfo.generation, >,
+		      cccb->pinfo.generation)))
+		        bccb = cccb;
+	}
+	TAILQ_REMOVE(&ccbq->queue_extra_head, bccb, xpt_links.tqe);
+	ccbq->queue_extra_entries--;
+	camq_insert(queue, &bccb->pinfo);
 }
 
 static __inline union ccb *
@@ -229,7 +262,7 @@
 
 	send_ccb->ccb_h.pinfo.index = CAM_ACTIVE_INDEX;
 	ccbq->dev_active++;
-	ccbq->dev_openings--;		
+	ccbq->dev_openings--;
 }
 
 static __inline void
@@ -237,92 +270,15 @@
 {
 
 	ccbq->dev_active--;
-	ccbq->dev_openings++;	
-	ccbq->held++;
+	ccbq->dev_openings++;
 }
 
 static __inline void
 cam_ccbq_release_opening(struct cam_ccbq *ccbq)
 {
-	ccbq->held--;
-	ccbq->devq_openings++;
-}
 
-static __inline int
-cam_ccbq_freeze(struct cam_ccbq *ccbq, cam_rl rl, u_int32_t cnt)
-{
-	int i, frozen = 0;
-	cam_rl p, n;
-
-	/* Find pevious run level. */
-	for (p = 0; p < CAM_RL_VALUES && ccbq->queue.qfrozen_cnt[p] == 0; p++);
-	/* Find new run level. */
-	n = min(rl, p);
-	/* Apply new run level. */
-	for (i = rl; i < CAM_RL_VALUES; i++)
-		ccbq->queue.qfrozen_cnt[i] += cnt;
-	/* Update ccbq statistics. */
-	if (n == p)
-		return (0);
-	for (i = CAMQ_HEAD; i <= ccbq->queue.entries; i++) {
-		cam_rl rrl =
-		    CAM_PRIORITY_TO_RL(ccbq->queue.queue_array[i]->priority);
-		if (rrl < n)
-			continue;
-		if (rrl >= p)
-			break;
-		ccbq->devq_openings++;
-		ccbq->held++;
-		frozen++;
-	}
-	return (frozen);
+	ccbq->allocated--;
 }
 
-static __inline int
-cam_ccbq_release(struct cam_ccbq *ccbq, cam_rl rl, u_int32_t cnt)
-{
-	int i, released = 0;
-	cam_rl p, n;
-
-	/* Apply new run level. */
-	for (i = rl; i < CAM_RL_VALUES; i++)
-		ccbq->queue.qfrozen_cnt[i] -= cnt;
-	/* Find new run level. */
-	for (n = 0; n < CAM_RL_VALUES && ccbq->queue.qfrozen_cnt[n] == 0; n++);
-	/* Find previous run level. */
-	p = min(rl, n);
-	/* Update ccbq statistics. */
-	if (n == p)
-		return (0);
-	for (i = CAMQ_HEAD; i <= ccbq->queue.entries; i++) {
-		cam_rl rrl =
-		    CAM_PRIORITY_TO_RL(ccbq->queue.queue_array[i]->priority);
-		if (rrl < p)
-			continue;
-		if (rrl >= n)
-			break;
-		ccbq->devq_openings--;
-		ccbq->held--;
-		released++;
-	}
-	return (released);
-}
-
-static __inline u_int32_t
-cam_ccbq_frozen(struct cam_ccbq *ccbq, cam_rl rl)
-{
-	
-	return (ccbq->queue.qfrozen_cnt[rl]);
-}
-
-static __inline u_int32_t
-cam_ccbq_frozen_top(struct cam_ccbq *ccbq)
-{
-	cam_rl rl;
-	
-	rl = CAM_PRIORITY_TO_RL(CAMQ_GET_PRIO(&ccbq->queue));
-	return (ccbq->queue.qfrozen_cnt[rl]);
-}
-
 #endif /* _KERNEL */
 #endif  /* _CAM_CAM_QUEUE_H */

Modified: trunk/sys/cam/cam_sim.c
===================================================================
--- trunk/sys/cam/cam_sim.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_sim.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Common functions for SCSI Interface Modules (SIMs).
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/cam_sim.c 316499 2017-04-04 18:01:35Z mav $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -46,6 +47,9 @@
 
 static MALLOC_DEFINE(M_CAMSIM, "CAM SIM", "CAM SIM buffers");
 
+static struct mtx cam_sim_free_mtx;
+MTX_SYSINIT(cam_sim_free_init, &cam_sim_free_mtx, "CAM SIM free lock", MTX_DEF);
+
 struct cam_devq *
 cam_simq_alloc(u_int32_t max_sim_transactions)
 {
@@ -66,9 +70,6 @@
 {
 	struct cam_sim *sim;
 
-	if (mtx == NULL)
-		return (NULL);
-
 	sim = (struct cam_sim *)malloc(sizeof(struct cam_sim),
 	    M_CAMSIM, M_ZERO | M_NOWAIT);
 
@@ -87,7 +88,6 @@
 	sim->flags = 0;
 	sim->refcount = 1;
 	sim->devq = queue;
-	sim->max_ccbs = 8;	/* Reserve for management purposes. */
 	sim->mtx = mtx;
 	if (mtx == &Giant) {
 		sim->flags |= 0;
@@ -96,10 +96,6 @@
 		sim->flags |= CAM_SIM_MPSAFE;
 		callout_init(&sim->callout, 1);
 	}
-
-	SLIST_INIT(&sim->ccb_freeq);
-	TAILQ_INIT(&sim->sim_doneq);
-
 	return (sim);
 }
 
@@ -106,22 +102,24 @@
 void
 cam_sim_free(struct cam_sim *sim, int free_devq)
 {
-	union ccb *ccb;
+	struct mtx *mtx = sim->mtx;
 	int error;
 
-	mtx_assert(sim->mtx, MA_OWNED);
+	if (mtx) {
+		mtx_assert(mtx, MA_OWNED);
+	} else {
+		mtx = &cam_sim_free_mtx;
+		mtx_lock(mtx);
+	}
 	sim->refcount--;
 	if (sim->refcount > 0) {
-		error = msleep(sim, sim->mtx, PRIBIO, "simfree", 0);
+		error = msleep(sim, mtx, PRIBIO, "simfree", 0);
 		KASSERT(error == 0, ("invalid error value for msleep(9)"));
 	}
-
 	KASSERT(sim->refcount == 0, ("sim->refcount == 0"));
+	if (sim->mtx == NULL)
+		mtx_unlock(mtx);
 
-	while ((ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) != NULL) {
-		SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
-		xpt_free_ccb(ccb);
-	}
 	if (free_devq)
 		cam_simq_free(sim->devq);
 	free(sim, M_CAMSIM);
@@ -130,21 +128,43 @@
 void
 cam_sim_release(struct cam_sim *sim)
 {
+	struct mtx *mtx = sim->mtx;
+
+	if (mtx) {
+		if (!mtx_owned(mtx))
+			mtx_lock(mtx);
+		else
+			mtx = NULL;
+	} else {
+		mtx = &cam_sim_free_mtx;
+		mtx_lock(mtx);
+	}
 	KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
-	mtx_assert(sim->mtx, MA_OWNED);
-
 	sim->refcount--;
 	if (sim->refcount == 0)
 		wakeup(sim);
+	if (mtx)
+		mtx_unlock(mtx);
 }
 
 void
 cam_sim_hold(struct cam_sim *sim)
 {
+	struct mtx *mtx = sim->mtx;
+
+	if (mtx) {
+		if (!mtx_owned(mtx))
+			mtx_lock(mtx);
+		else
+			mtx = NULL;
+	} else {
+		mtx = &cam_sim_free_mtx;
+		mtx_lock(mtx);
+	}
 	KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
-	mtx_assert(sim->mtx, MA_OWNED);
-
 	sim->refcount++;
+	if (mtx)
+		mtx_unlock(mtx);
 }
 
 void

Modified: trunk/sys/cam/cam_sim.h
===================================================================
--- trunk/sys/cam/cam_sim.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_sim.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Data structures and definitions for SCSI Interface Modules (SIMs).
  *
@@ -25,7 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_sim.h 260387 2014-01-07 01:51:48Z scottl $
  */
 
 #ifndef _CAM_CAM_SIM_H
@@ -104,23 +105,9 @@
 	u_int32_t		flags;
 #define	CAM_SIM_REL_TIMEOUT_PENDING	0x01
 #define	CAM_SIM_MPSAFE			0x02
-#define	CAM_SIM_ON_DONEQ		0x04
-#define	CAM_SIM_POLLED			0x08
-#define	CAM_SIM_BATCH			0x10
 	struct callout		callout;
 	struct cam_devq 	*devq;	/* Device Queue to use for this SIM */
 	int			refcount; /* References to the SIM. */
-
-	/* "Pool" of inactive ccbs managed by xpt_get_ccb and xpt_release_ccb */
-	SLIST_HEAD(,ccb_hdr)	ccb_freeq;
-	/*
-	 * Maximum size of ccb pool.  Modified as devices are added/removed
-	 * or have their * opening counts changed.
-	 */
-	u_int			max_ccbs;
-	/* Current count of allocated ccbs */
-	u_int			ccb_count;
-
 };
 
 #define CAM_SIM_LOCK(sim)	mtx_lock((sim)->mtx)

Modified: trunk/sys/cam/cam_xpt.c
===================================================================
--- trunk/sys/cam/cam_xpt.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_xpt.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Implementation of the Common Access Method Transport (XPT) layer.
  *
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/cam_xpt.c 316499 2017-04-04 18:01:35Z mav $");
 
 #include <sys/param.h>
 #include <sys/bus.h>
@@ -40,7 +41,9 @@
 #include <sys/conf.h>
 #include <sys/fcntl.h>
 #include <sys/interrupt.h>
+#include <sys/proc.h>
 #include <sys/sbuf.h>
+#include <sys/smp.h>
 #include <sys/taskqueue.h>
 
 #include <sys/lock.h>
@@ -58,6 +61,7 @@
 #include <cam/cam_xpt_periph.h>
 #include <cam/cam_xpt_internal.h>
 #include <cam/cam_debug.h>
+#include <cam/cam_compat.h>
 
 #include <cam/scsi/scsi_all.h>
 #include <cam/scsi/scsi_message.h>
@@ -89,16 +93,12 @@
 	uintptr_t	data2;
 };
 
-typedef enum {
-	XPT_FLAG_OPEN		= 0x01
-} xpt_flags;
-
 struct xpt_softc {
-	xpt_flags		flags;
-	u_int32_t		xpt_generation;
+	uint32_t		xpt_generation;
 
 	/* number of high powered commands that can go through right now */
-	STAILQ_HEAD(highpowerlist, ccb_hdr)	highpowerq;
+	struct mtx		xpt_highpower_lock;
+	STAILQ_HEAD(highpowerlist, cam_ed)	highpowerq;
 	int			num_highpower;
 
 	/* queue for handling async rescan requests. */
@@ -117,6 +117,7 @@
 
 	struct mtx		xpt_topo_lock;
 	struct mtx		xpt_lock;
+	struct taskqueue	*xpt_taskq;
 };
 
 typedef enum {
@@ -151,19 +152,28 @@
 /* Transport layer configuration information */
 static struct xpt_softc xsoftc;
 
+MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
+
 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
            &xsoftc.boot_delay, 0, "Bus registration wait time");
+SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
+	    &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
 
-/* Queues for our software interrupt handler */
-typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
-typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
-static cam_simq_t cam_simq;
-static struct mtx cam_simq_lock;
+struct cam_doneq {
+	struct mtx_padalign	cam_doneq_mtx;
+	STAILQ_HEAD(, ccb_hdr)	cam_doneq;
+	int			cam_doneq_sleep;
+};
 
-/* Pointers to software interrupt handlers */
-static void *cambio_ih;
+static struct cam_doneq cam_doneqs[MAXCPU];
+static int cam_num_doneqs;
+static struct proc *cam_proc;
 
+TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
+SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
+           &cam_num_doneqs, 0, "Number of completion queues/threads");
+
 struct cam_periph *xpt_periph;
 
 static periph_init_t xpt_periph_init;
@@ -180,6 +190,7 @@
 static d_open_t xptopen;
 static d_close_t xptclose;
 static d_ioctl_t xptioctl;
+static d_ioctl_t xptdoioctl;
 
 static struct cdevsw xpt_cdevsw = {
 	.d_version =	D_VERSION,
@@ -222,16 +233,21 @@
 					void *async_arg);
 static path_id_t xptnextfreepathid(void);
 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
-static union ccb *xpt_get_ccb(struct cam_ed *device);
-static void	 xpt_run_dev_allocq(struct cam_eb *bus);
-static void	 xpt_run_dev_sendq(struct cam_eb *bus);
+static union ccb *xpt_get_ccb(struct cam_periph *periph);
+static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
+static void	 xpt_run_allocq(struct cam_periph *periph, int sleep);
+static void	 xpt_run_allocq_task(void *context, int pending);
+static void	 xpt_run_devq(struct cam_devq *devq);
 static timeout_t xpt_release_devq_timeout;
 static void	 xpt_release_simq_timeout(void *arg) __unused;
+static void	 xpt_acquire_bus(struct cam_eb *bus);
 static void	 xpt_release_bus(struct cam_eb *bus);
-static void	 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl,
-		    u_int count, int run_queue);
+static uint32_t	 xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
+static int	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
+		    int run_queue);
 static struct cam_et*
 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
+static void	 xpt_acquire_target(struct cam_et *target);
 static void	 xpt_release_target(struct cam_et *target);
 static struct cam_eb*
 		 xpt_find_bus(path_id_t path_id);
@@ -240,11 +256,14 @@
 static struct cam_ed*
 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
 static void	 xpt_config(void *arg);
+static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
+				 u_int32_t new_priority);
 static xpt_devicefunc_t xptpassannouncefunc;
 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
 static void	 xptpoll(struct cam_sim *sim);
-static void	 camisr(void *);
-static void	 camisr_runqueue(void *);
+static void	 camisr_runqueue(void);
+static void	 xpt_done_process(struct ccb_hdr *ccb_h);
+static void	 xpt_done_td(void *);
 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
 				    u_int num_patterns, struct cam_eb *bus);
 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
@@ -295,42 +314,17 @@
 static xpt_busfunc_t	xptsetasyncbusfunc;
 static cam_status	xptregister(struct cam_periph *periph,
 				    void *arg);
-static __inline int periph_is_queued(struct cam_periph *periph);
-static __inline int device_is_alloc_queued(struct cam_ed *device);
-static __inline int device_is_send_queued(struct cam_ed *device);
+static __inline int device_is_queued(struct cam_ed *device);
 
 static __inline int
-xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
+xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
 {
-	int retval;
-
-	if ((dev->drvq.entries > 0) &&
-	    (dev->ccbq.devq_openings > 0) &&
-	    (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
-		CAMQ_GET_PRIO(&dev->drvq))) == 0)) {
-		/*
-		 * The priority of a device waiting for CCB resources
-		 * is that of the highest priority peripheral driver
-		 * enqueued.
-		 */
-		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
-					  &dev->alloc_ccb_entry.pinfo,
-					  CAMQ_GET_PRIO(&dev->drvq));
-	} else {
-		retval = 0;
-	}
-
-	return (retval);
-}
-
-static __inline int
-xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
-{
 	int	retval;
 
+	mtx_assert(&devq->send_mtx, MA_OWNED);
 	if ((dev->ccbq.queue.entries > 0) &&
 	    (dev->ccbq.dev_openings > 0) &&
-	    (cam_ccbq_frozen_top(&dev->ccbq) == 0)) {
+	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
 		/*
 		 * The priority of a device waiting for controller
 		 * resources is that of the highest priority CCB
@@ -337,8 +331,8 @@
 		 * enqueued.
 		 */
 		retval =
-		    xpt_schedule_dev(&bus->sim->devq->send_queue,
-				     &dev->send_ccb_entry.pinfo,
+		    xpt_schedule_dev(&devq->send_queue,
+				     &dev->devq_entry,
 				     CAMQ_GET_PRIO(&dev->ccbq.queue));
 	} else {
 		retval = 0;
@@ -347,23 +341,11 @@
 }
 
 static __inline int
-periph_is_queued(struct cam_periph *periph)
+device_is_queued(struct cam_ed *device)
 {
-	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
+	return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
 }
 
-static __inline int
-device_is_alloc_queued(struct cam_ed *device)
-{
-	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
-}
-
-static __inline int
-device_is_send_queued(struct cam_ed *device)
-{
-	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
-}
-
 static void
 xpt_periph_init()
 {
@@ -370,13 +352,6 @@
 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
 }
 
-static void
-xptdone(struct cam_periph *periph, union ccb *done_ccb)
-{
-	/* Caller will release the CCB */
-	wakeup(&done_ccb->ccb_h.cbfcnp);
-}
-
 static int
 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
 {
@@ -395,11 +370,6 @@
 		return(ENODEV);
 	}
 
-	/* Mark ourselves open */
-	mtx_lock(&xsoftc.xpt_lock);
-	xsoftc.flags |= XPT_FLAG_OPEN;
-	mtx_unlock(&xsoftc.xpt_lock);
-
 	return(0);
 }
 
@@ -407,11 +377,6 @@
 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 {
 
-	/* Mark ourselves closed */
-	mtx_lock(&xsoftc.xpt_lock);
-	xsoftc.flags &= ~XPT_FLAG_OPEN;
-	mtx_unlock(&xsoftc.xpt_lock);
-
 	return(0);
 }
 
@@ -426,6 +391,17 @@
 {
 	int error;
 
+	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
+		error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
+	}
+	return (error);
+}
+	
+static int
+xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
+{
+	int error;
+
 	error = 0;
 
 	switch(cmd) {
@@ -476,19 +452,16 @@
 
 			ccb = xpt_alloc_ccb();
 
-			CAM_SIM_LOCK(bus->sim);
-
 			/*
 			 * Create a path using the bus, target, and lun the
 			 * user passed in.
 			 */
-			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
+			if (xpt_create_path(&ccb->ccb_h.path, NULL,
 					    inccb->ccb_h.path_id,
 					    inccb->ccb_h.target_id,
 					    inccb->ccb_h.target_lun) !=
 					    CAM_REQ_CMP){
 				error = EINVAL;
-				CAM_SIM_UNLOCK(bus->sim);
 				xpt_free_ccb(ccb);
 				break;
 			}
@@ -496,12 +469,12 @@
 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
 				      inccb->ccb_h.pinfo.priority);
 			xpt_merge_ccb(ccb, inccb);
-			ccb->ccb_h.cbfcnp = xptdone;
+			xpt_path_lock(ccb->ccb_h.path);
 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
+			xpt_path_unlock(ccb->ccb_h.path);
 			bcopy(ccb, inccb, sizeof(union ccb));
 			xpt_free_path(ccb->ccb_h.path);
 			xpt_free_ccb(ccb);
-			CAM_SIM_UNLOCK(bus->sim);
 			break;
 
 		case XPT_DEBUG: {
@@ -512,19 +485,16 @@
 			 * allocate it on the stack.
 			 */
 
-			CAM_SIM_LOCK(bus->sim);
-
 			/*
 			 * Create a path using the bus, target, and lun the
 			 * user passed in.
 			 */
-			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
+			if (xpt_create_path(&ccb.ccb_h.path, NULL,
 					    inccb->ccb_h.path_id,
 					    inccb->ccb_h.target_id,
 					    inccb->ccb_h.target_lun) !=
 					    CAM_REQ_CMP){
 				error = EINVAL;
-				CAM_SIM_UNLOCK(bus->sim);
 				break;
 			}
 			/* Ensure all of our fields are correct */
@@ -531,11 +501,9 @@
 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
 				      inccb->ccb_h.pinfo.priority);
 			xpt_merge_ccb(&ccb, inccb);
-			ccb.ccb_h.cbfcnp = xptdone;
 			xpt_action(&ccb);
 			bcopy(&ccb, inccb, sizeof(union ccb));
 			xpt_free_path(ccb.ccb_h.path);
-			CAM_SIM_UNLOCK(bus->sim);
 			break;
 
 		}
@@ -547,7 +515,8 @@
 			 * We can't deal with physical addresses for this
 			 * type of transaction.
 			 */
-			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
+			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
+			    CAM_DATA_VADDR) {
 				error = EINVAL;
 				break;
 			}
@@ -572,7 +541,7 @@
 			 * Map the pattern and match buffers into kernel
 			 * virtual address space.
 			 */
-			error = cam_periph_mapmem(inccb, &mapinfo);
+			error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
 
 			if (error) {
 				inccb->ccb_h.path = old_path;
@@ -582,9 +551,7 @@
 			/*
 			 * This is an immediate CCB, we can send it on directly.
 			 */
-			CAM_SIM_LOCK(xpt_path_sim(xpt_periph->path));
 			xpt_action(inccb);
-			CAM_SIM_UNLOCK(xpt_path_sim(xpt_periph->path));
 
 			/*
 			 * Map the buffers back into user space.
@@ -623,24 +590,11 @@
 		struct periph_driver **p_drv;
 		char   *name;
 		u_int unit;
-		u_int cur_generation;
 		int base_periph_found;
-		int splbreaknum;
 
 		ccb = (union ccb *)addr;
 		unit = ccb->cgdl.unit_number;
 		name = ccb->cgdl.periph_name;
-		/*
-		 * Every 100 devices, we want to drop our lock protection to
-		 * give the software interrupt handler a chance to run.
-		 * Most systems won't run into this check, but this should
-		 * avoid starvation in the software interrupt handler in
-		 * large systems.
-		 */
-		splbreaknum = 100;
-
-		ccb = (union ccb *)addr;
-
 		base_periph_found = 0;
 
 		/*
@@ -654,8 +608,6 @@
 
 		/* Keep the list from changing while we traverse it */
 		xpt_lock_buses();
-ptstartover:
-		cur_generation = xsoftc.xpt_generation;
 
 		/* first find our driver in the list of drivers */
 		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
@@ -682,15 +634,8 @@
 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
 		     periph = TAILQ_NEXT(periph, unit_links)) {
 
-			if (periph->unit_number == unit) {
+			if (periph->unit_number == unit)
 				break;
-			} else if (--splbreaknum == 0) {
-				xpt_unlock_buses();
-				xpt_lock_buses();
-				splbreaknum = 100;
-				if (cur_generation != xsoftc.xpt_generation)
-				       goto ptstartover;
-			}
 		}
 		/*
 		 * If we found the peripheral driver that the user passed
@@ -824,21 +769,27 @@
 xpt_scanner_thread(void *dummy)
 {
 	union ccb	*ccb;
-	struct cam_sim	*sim;
+	struct cam_path	 path;
 
 	xpt_lock_buses();
 	for (;;) {
 		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
 			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
-			       "ccb_scanq", 0);
+			       "-", 0);
 		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
 			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
 			xpt_unlock_buses();
 
-			sim = ccb->ccb_h.path->bus->sim;
-			CAM_SIM_LOCK(sim);
+			/*
+			 * Since lock can be dropped inside and path freed
+			 * by completion callback even before return here,
+			 * take our own path copy for reference.
+			 */
+			xpt_copy_path(&path, ccb->ccb_h.path);
+			xpt_path_lock(&path);
 			xpt_action(ccb);
-			CAM_SIM_UNLOCK(sim);
+			xpt_path_unlock(&path);
+			xpt_release_path(&path);
 
 			xpt_lock_buses();
 		}
@@ -897,16 +848,17 @@
 	struct cam_path *path;
 	struct cam_devq *devq;
 	cam_status status;
+	int error, i;
 
 	TAILQ_INIT(&xsoftc.xpt_busses);
-	TAILQ_INIT(&cam_simq);
 	TAILQ_INIT(&xsoftc.ccb_scanq);
 	STAILQ_INIT(&xsoftc.highpowerq);
 	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
 
-	mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
 	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
-	mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
+	mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
+	xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
+	    taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
 
 #ifdef CAM_BOOT_DELAY
 	/*
@@ -916,7 +868,7 @@
 	xsoftc.boot_delay = CAM_BOOT_DELAY;
 #endif
 	/*
-	 * The xpt layer is, itself, the equivelent of a SIM.
+	 * The xpt layer is, itself, the equivalent of a SIM.
 	 * Allow 16 ccbs in the ccb pool for it.  This should
 	 * give decent parallelism when we probe busses and
 	 * perform other XPT functions.
@@ -941,27 +893,46 @@
 		       " failing attach\n", status);
 		return (EINVAL);
 	}
+	mtx_unlock(&xsoftc.xpt_lock);
 
 	/*
 	 * Looking at the XPT from the SIM layer, the XPT is
-	 * the equivelent of a peripheral driver.  Allocate
+	 * the equivalent of a peripheral driver.  Allocate
 	 * a peripheral driver entry for us.
 	 */
 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
 				      CAM_TARGET_WILDCARD,
 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
-		mtx_unlock(&xsoftc.xpt_lock);
 		printf("xpt_init: xpt_create_path failed with status %#x,"
 		       " failing attach\n", status);
 		return (EINVAL);
 	}
-
+	xpt_path_lock(path);
 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
 			 path, NULL, 0, xpt_sim);
+	xpt_path_unlock(path);
 	xpt_free_path(path);
-	mtx_unlock(&xsoftc.xpt_lock);
-	/* Install our software interrupt handlers */
-	swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
+
+	if (cam_num_doneqs < 1)
+		cam_num_doneqs = 1 + mp_ncpus / 6;
+	else if (cam_num_doneqs > MAXCPU)
+		cam_num_doneqs = MAXCPU;
+	for (i = 0; i < cam_num_doneqs; i++) {
+		mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
+		    MTX_DEF);
+		STAILQ_INIT(&cam_doneqs[i].cam_doneq);
+		error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
+		    &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
+		if (error != 0) {
+			cam_num_doneqs = i;
+			break;
+		}
+	}
+	if (cam_num_doneqs < 1) {
+		printf("xpt_init: Cannot init completion queues "
+		       "- failing attach\n");
+		return (ENOMEM);
+	}
 	/*
 	 * Register a callback for when interrupts are enabled.
 	 */
@@ -1006,66 +977,34 @@
 {
 	struct cam_ed *device;
 	int32_t	 status;
-	struct periph_list *periph_head;
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
-
+	TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
 	device = periph->path->device;
-
-	periph_head = &device->periphs;
-
 	status = CAM_REQ_CMP;
-
 	if (device != NULL) {
-		/*
-		 * Make room for this peripheral
-		 * so it will fit in the queue
-		 * when it's scheduled to run
-		 */
-		status = camq_resize(&device->drvq,
-				     device->drvq.array_size + 1);
-
+		mtx_lock(&device->target->bus->eb_mtx);
 		device->generation++;
-
-		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
+		SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
+		mtx_unlock(&device->target->bus->eb_mtx);
+		atomic_add_32(&xsoftc.xpt_generation, 1);
 	}
 
-	xpt_lock_buses();
-	xsoftc.xpt_generation++;
-	xpt_unlock_buses();
-
 	return (status);
 }
 
 void
-xpt_remove_periph(struct cam_periph *periph, int topology_lock_held)
+xpt_remove_periph(struct cam_periph *periph)
 {
 	struct cam_ed *device;
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
-
 	device = periph->path->device;
-
 	if (device != NULL) {
-		struct periph_list *periph_head;
-
-		periph_head = &device->periphs;
-
-		/* Release the slot for this peripheral */
-		camq_resize(&device->drvq, device->drvq.array_size - 1);
-
+		mtx_lock(&device->target->bus->eb_mtx);
 		device->generation++;
-
-		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
+		SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
+		mtx_unlock(&device->target->bus->eb_mtx);
+		atomic_add_32(&xsoftc.xpt_generation, 1);
 	}
-
-	if (topology_lock_held == 0)
-		xpt_lock_buses();
-
-	xsoftc.xpt_generation++;
-
-	if (topology_lock_held == 0)
-		xpt_unlock_buses();
 }
 
 
@@ -1074,9 +1013,10 @@
 {
 	struct	cam_path *path = periph->path;
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
+	cam_periph_assert(periph, MA_OWNED);
+	periph->flags |= CAM_PERIPH_ANNOUNCED;
 
-	printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
+	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
 	       periph->periph_name, periph->unit_number,
 	       path->bus->sim->sim_name,
 	       path->bus->sim->unit_number,
@@ -1083,7 +1023,7 @@
 	       path->bus->sim->bus_id,
 	       path->bus->path_id,
 	       path->target->target_id,
-	       path->device->lun_id);
+	       (uintmax_t)path->device->lun_id);
 	printf("%s%d: ", periph->periph_name, periph->unit_number);
 	if (path->device->protocol == PROTO_SCSI)
 		scsi_print_inquiry(&path->device->inq_data);
@@ -1095,7 +1035,7 @@
 		    (struct sep_identify_data *)&path->device->ident_data);
 	else
 		printf("Unknown protocol device\n");
-	if (bootverbose && path->device->serial_num_len > 0) {
+	if (path->device->serial_num_len > 0) {
 		/* Don't wrap the screen  - print only the first 60 chars */
 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 		       periph->unit_number, path->device->serial_num);
@@ -1123,17 +1063,50 @@
 	}
 }
 
+void
+xpt_denounce_periph(struct cam_periph *periph)
+{
+	struct	cam_path *path = periph->path;
+
+	cam_periph_assert(periph, MA_OWNED);
+	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
+	       periph->periph_name, periph->unit_number,
+	       path->bus->sim->sim_name,
+	       path->bus->sim->unit_number,
+	       path->bus->sim->bus_id,
+	       path->bus->path_id,
+	       path->target->target_id,
+	       (uintmax_t)path->device->lun_id);
+	printf("%s%d: ", periph->periph_name, periph->unit_number);
+	if (path->device->protocol == PROTO_SCSI)
+		scsi_print_inquiry_short(&path->device->inq_data);
+	else if (path->device->protocol == PROTO_ATA ||
+	    path->device->protocol == PROTO_SATAPM)
+		ata_print_ident_short(&path->device->ident_data);
+	else if (path->device->protocol == PROTO_SEMB)
+		semb_print_ident_short(
+		    (struct sep_identify_data *)&path->device->ident_data);
+	else
+		printf("Unknown protocol device");
+	if (path->device->serial_num_len > 0)
+		printf(" s/n %.60s", path->device->serial_num);
+	printf(" detached\n");
+}
+
+
 int
 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
 {
-	int ret = -1;
+	int ret = -1, l, o;
 	struct ccb_dev_advinfo cdai;
+	struct scsi_vpd_id_descriptor *idd;
 
-	mtx_assert(path->bus->sim->mtx, MA_OWNED);
+	xpt_path_assert(path, MA_OWNED);
 
 	memset(&cdai, 0, sizeof(cdai));
 	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
+	cdai.flags = CDAI_FLAG_NONE;
 	cdai.bufsiz = len;
 
 	if (!strcmp(attr, "GEOM::ident"))
@@ -1140,7 +1113,11 @@
 		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
 	else if (!strcmp(attr, "GEOM::physpath"))
 		cdai.buftype = CDAI_TYPE_PHYS_PATH;
-	else
+	else if (strcmp(attr, "GEOM::lunid") == 0 ||
+		 strcmp(attr, "GEOM::lunname") == 0) {
+		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
+		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
+	} else
 		goto out;
 
 	cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
@@ -1153,9 +1130,69 @@
 		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
 	if (cdai.provsiz == 0)
 		goto out;
-	ret = 0;
-	if (strlcpy(buf, cdai.buf, len) >= len)
-		ret = EFAULT;
+	if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
+		if (strcmp(attr, "GEOM::lunid") == 0) {
+			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
+			    cdai.provsiz, scsi_devid_is_lun_naa);
+			if (idd == NULL)
+				idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
+				    cdai.provsiz, scsi_devid_is_lun_eui64);
+			if (idd == NULL)
+				idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
+				    cdai.provsiz, scsi_devid_is_lun_uuid);
+			if (idd == NULL)
+				idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
+				    cdai.provsiz, scsi_devid_is_lun_md5);
+		} else
+			idd = NULL;
+		if (idd == NULL)
+			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
+			    cdai.provsiz, scsi_devid_is_lun_t10);
+		if (idd == NULL)
+			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
+			    cdai.provsiz, scsi_devid_is_lun_name);
+		if (idd == NULL)
+			goto out;
+		ret = 0;
+		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) {
+			if (idd->length < len) {
+				for (l = 0; l < idd->length; l++)
+					buf[l] = idd->identifier[l] ?
+					    idd->identifier[l] : ' ';
+				buf[l] = 0;
+			} else
+				ret = EFAULT;
+		} else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
+			l = strnlen(idd->identifier, idd->length);
+			if (l < len) {
+				bcopy(idd->identifier, buf, l);
+				buf[l] = 0;
+			} else
+				ret = EFAULT;
+		} else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID
+		    && idd->identifier[0] == 0x10) {
+			if ((idd->length - 2) * 2 + 4 < len) {
+				for (l = 2, o = 0; l < idd->length; l++) {
+					if (l == 6 || l == 8 || l == 10 || l == 12)
+					    o += sprintf(buf + o, "-");
+					o += sprintf(buf + o, "%02x",
+					    idd->identifier[l]);
+				}
+			} else
+				ret = EFAULT;
+		} else {
+			if (idd->length * 2 < len) {
+				for (l = 0; l < idd->length; l++)
+					sprintf(buf + l * 2, "%02x",
+					    idd->identifier[l]);
+			} else
+				ret = EFAULT;
+		}
+	} else {
+		ret = 0;
+		if (strlcpy(buf, cdai.buf, len) >= len)
+			ret = EFAULT;
+	}
 
 out:
 	if (cdai.buf != NULL)
@@ -1168,7 +1205,7 @@
 	    struct cam_eb *bus)
 {
 	dev_match_ret retval;
-	int i;
+	u_int i;
 
 	retval = DM_RET_NONE;
 
@@ -1280,7 +1317,7 @@
 	       struct cam_ed *device)
 {
 	dev_match_ret retval;
-	int i;
+	u_int i;
 
 	retval = DM_RET_NONE;
 
@@ -1403,7 +1440,7 @@
 	       struct cam_periph *periph)
 {
 	dev_match_ret retval;
-	int i;
+	u_int i;
 
 	/*
 	 * If we aren't given something to match against, that's an error.
@@ -1510,6 +1547,7 @@
 xptedtbusfunc(struct cam_eb *bus, void *arg)
 {
 	struct ccb_dev_match *cdm;
+	struct cam_et *target;
 	dev_match_ret retval;
 
 	cdm = (struct ccb_dev_match *)arg;
@@ -1581,25 +1619,24 @@
 	 * If there is a target generation recorded, check it to
 	 * make sure the target list hasn't changed.
 	 */
+	mtx_lock(&bus->eb_mtx);
 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
-	 && (bus == cdm->pos.cookie.bus)
+	 && (cdm->pos.cookie.bus == bus)
 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
-	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
-	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
-	     bus->generation)) {
-		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
-		return(0);
-	}
+	 && (cdm->pos.cookie.target != NULL)) {
+		if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
+		    bus->generation)) {
+			mtx_unlock(&bus->eb_mtx);
+			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+			return (0);
+		}
+		target = (struct cam_et *)cdm->pos.cookie.target;
+		target->refcount++;
+	} else
+		target = NULL;
+	mtx_unlock(&bus->eb_mtx);
 
-	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
-	 && (cdm->pos.cookie.bus == bus)
-	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
-	 && (cdm->pos.cookie.target != NULL))
-		return(xpttargettraverse(bus,
-					(struct cam_et *)cdm->pos.cookie.target,
-					 xptedttargetfunc, arg));
-	else
-		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
+	return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
 }
 
 static int
@@ -1606,46 +1643,48 @@
 xptedttargetfunc(struct cam_et *target, void *arg)
 {
 	struct ccb_dev_match *cdm;
+	struct cam_eb *bus;
+	struct cam_ed *device;
 
 	cdm = (struct ccb_dev_match *)arg;
+	bus = target->bus;
 
 	/*
 	 * If there is a device list generation recorded, check it to
 	 * make sure the device list hasn't changed.
 	 */
+	mtx_lock(&bus->eb_mtx);
 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
-	 && (cdm->pos.cookie.bus == target->bus)
+	 && (cdm->pos.cookie.bus == bus)
 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 	 && (cdm->pos.cookie.target == target)
 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
-	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
-	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
-	     target->generation)) {
-		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
-		return(0);
-	}
+	 && (cdm->pos.cookie.device != NULL)) {
+		if (cdm->pos.generations[CAM_DEV_GENERATION] !=
+		    target->generation) {
+			mtx_unlock(&bus->eb_mtx);
+			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+			return(0);
+		}
+		device = (struct cam_ed *)cdm->pos.cookie.device;
+		device->refcount++;
+	} else
+		device = NULL;
+	mtx_unlock(&bus->eb_mtx);
 
-	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
-	 && (cdm->pos.cookie.bus == target->bus)
-	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
-	 && (cdm->pos.cookie.target == target)
-	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
-	 && (cdm->pos.cookie.device != NULL))
-		return(xptdevicetraverse(target,
-					(struct cam_ed *)cdm->pos.cookie.device,
-					 xptedtdevicefunc, arg));
-	else
-		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
+	return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
 }
 
 static int
 xptedtdevicefunc(struct cam_ed *device, void *arg)
 {
-
+	struct cam_eb *bus;
+	struct cam_periph *periph;
 	struct ccb_dev_match *cdm;
 	dev_match_ret retval;
 
 	cdm = (struct ccb_dev_match *)arg;
+	bus = device->target->bus;
 
 	/*
 	 * If our position is for something deeper in the tree, that means
@@ -1735,33 +1774,31 @@
 	 * If there is a peripheral list generation recorded, make sure
 	 * it hasn't changed.
 	 */
+	xpt_lock_buses();
+	mtx_lock(&bus->eb_mtx);
 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
-	 && (device->target->bus == cdm->pos.cookie.bus)
+	 && (cdm->pos.cookie.bus == bus)
 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
-	 && (device->target == cdm->pos.cookie.target)
-	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
-	 && (device == cdm->pos.cookie.device)
-	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
-	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
-	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
-	     device->generation)){
-		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
-		return(0);
-	}
-
-	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
-	 && (cdm->pos.cookie.bus == device->target->bus)
-	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 	 && (cdm->pos.cookie.target == device->target)
 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 	 && (cdm->pos.cookie.device == device)
 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
-	 && (cdm->pos.cookie.periph != NULL))
-		return(xptperiphtraverse(device,
-				(struct cam_periph *)cdm->pos.cookie.periph,
-				xptedtperiphfunc, arg));
-	else
-		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
+	 && (cdm->pos.cookie.periph != NULL)) {
+		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
+		    device->generation) {
+			mtx_unlock(&bus->eb_mtx);
+			xpt_unlock_buses();
+			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+			return(0);
+		}
+		periph = (struct cam_periph *)cdm->pos.cookie.periph;
+		periph->refcount++;
+	} else
+		periph = NULL;
+	mtx_unlock(&bus->eb_mtx);
+	xpt_unlock_buses();
+
+	return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
 }
 
 static int
@@ -1837,6 +1874,7 @@
 static int
 xptedtmatch(struct ccb_dev_match *cdm)
 {
+	struct cam_eb *bus;
 	int ret;
 
 	cdm->num_matches = 0;
@@ -1845,19 +1883,22 @@
 	 * Check the bus list generation.  If it has changed, the user
 	 * needs to reset everything and start over.
 	 */
+	xpt_lock_buses();
 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
-	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
-	 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
-		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
-		return(0);
-	}
+	 && (cdm->pos.cookie.bus != NULL)) {
+		if (cdm->pos.generations[CAM_BUS_GENERATION] !=
+		    xsoftc.bus_generation) {
+			xpt_unlock_buses();
+			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+			return(0);
+		}
+		bus = (struct cam_eb *)cdm->pos.cookie.bus;
+		bus->refcount++;
+	} else
+		bus = NULL;
+	xpt_unlock_buses();
 
-	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
-	 && (cdm->pos.cookie.bus != NULL))
-		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
-				     xptedtbusfunc, cdm);
-	else
-		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
+	ret = xptbustraverse(bus, xptedtbusfunc, cdm);
 
 	/*
 	 * If we get back 0, that means that we had to stop before fully
@@ -1874,29 +1915,29 @@
 static int
 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 {
+	struct cam_periph *periph;
 	struct ccb_dev_match *cdm;
 
 	cdm = (struct ccb_dev_match *)arg;
 
+	xpt_lock_buses();
 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 	 && (cdm->pos.cookie.pdrv == pdrv)
 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
-	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
-	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
-	     (*pdrv)->generation)) {
-		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
-		return(0);
-	}
+	 && (cdm->pos.cookie.periph != NULL)) {
+		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
+		    (*pdrv)->generation) {
+			xpt_unlock_buses();
+			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+			return(0);
+		}
+		periph = (struct cam_periph *)cdm->pos.cookie.periph;
+		periph->refcount++;
+	} else
+		periph = NULL;
+	xpt_unlock_buses();
 
-	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
-	 && (cdm->pos.cookie.pdrv == pdrv)
-	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
-	 && (cdm->pos.cookie.periph != NULL))
-		return(xptpdperiphtraverse(pdrv,
-				(struct cam_periph *)cdm->pos.cookie.periph,
-				xptplistperiphfunc, arg));
-	else
-		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
+	return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
 }
 
 static int
@@ -1983,13 +2024,15 @@
 			cdm->matches[j].result.periph_result.target_id =
 				periph->path->target->target_id;
 		else
-			cdm->matches[j].result.periph_result.target_id = -1;
+			cdm->matches[j].result.periph_result.target_id =
+				CAM_TARGET_WILDCARD;
 
 		if (periph->path->device)
 			cdm->matches[j].result.periph_result.target_lun =
 				periph->path->device->lun_id;
 		else
-			cdm->matches[j].result.periph_result.target_lun = -1;
+			cdm->matches[j].result.periph_result.target_lun =
+				CAM_LUN_WILDCARD;
 
 		cdm->matches[j].result.periph_result.unit_number =
 			periph->unit_number;
@@ -2045,71 +2088,34 @@
 	int retval;
 
 	retval = 1;
-
-	xpt_lock_buses();
-	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
-	     bus != NULL;
-	     bus = next_bus) {
-
+	if (start_bus)
+		bus = start_bus;
+	else {
+		xpt_lock_buses();
+		bus = TAILQ_FIRST(&xsoftc.xpt_busses);
+		if (bus == NULL) {
+			xpt_unlock_buses();
+			return (retval);
+		}
 		bus->refcount++;
-
-		/*
-		 * XXX The locking here is obviously very complex.  We
-		 * should work to simplify it.
-		 */
 		xpt_unlock_buses();
-		CAM_SIM_LOCK(bus->sim);
+	}
+	for (; bus != NULL; bus = next_bus) {
 		retval = tr_func(bus, arg);
-		CAM_SIM_UNLOCK(bus->sim);
-
+		if (retval == 0) {
+			xpt_release_bus(bus);
+			break;
+		}
 		xpt_lock_buses();
 		next_bus = TAILQ_NEXT(bus, links);
+		if (next_bus)
+			next_bus->refcount++;
 		xpt_unlock_buses();
-
 		xpt_release_bus(bus);
-
-		if (retval == 0)
-			return(retval);
-		xpt_lock_buses();
 	}
-	xpt_unlock_buses();
-
 	return(retval);
 }
 
-int
-xpt_sim_opened(struct cam_sim *sim)
-{
-	struct cam_eb *bus;
-	struct cam_et *target;
-	struct cam_ed *device;
-	struct cam_periph *periph;
-
-	KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
-	mtx_assert(sim->mtx, MA_OWNED);
-
-	xpt_lock_buses();
-	TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
-		if (bus->sim != sim)
-			continue;
-
-		TAILQ_FOREACH(target, &bus->et_entries, links) {
-			TAILQ_FOREACH(device, &target->ed_entries, links) {
-				SLIST_FOREACH(periph, &device->periphs,
-				    periph_links) {
-					if (periph->refcount > 0) {
-						xpt_unlock_buses();
-						return (1);
-					}
-				}
-			}
-		}
-	}
-
-	xpt_unlock_buses();
-	return (0);
-}
-
 static int
 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 		  xpt_targetfunc_t *tr_func, void *arg)
@@ -2117,24 +2123,32 @@
 	struct cam_et *target, *next_target;
 	int retval;
 
-	mtx_assert(bus->sim->mtx, MA_OWNED);
 	retval = 1;
-	for (target = (start_target ? start_target :
-		       TAILQ_FIRST(&bus->et_entries));
-	     target != NULL; target = next_target) {
-
+	if (start_target)
+		target = start_target;
+	else {
+		mtx_lock(&bus->eb_mtx);
+		target = TAILQ_FIRST(&bus->et_entries);
+		if (target == NULL) {
+			mtx_unlock(&bus->eb_mtx);
+			return (retval);
+		}
 		target->refcount++;
-
+		mtx_unlock(&bus->eb_mtx);
+	}
+	for (; target != NULL; target = next_target) {
 		retval = tr_func(target, arg);
-
+		if (retval == 0) {
+			xpt_release_target(target);
+			break;
+		}
+		mtx_lock(&bus->eb_mtx);
 		next_target = TAILQ_NEXT(target, links);
-
+		if (next_target)
+			next_target->refcount++;
+		mtx_unlock(&bus->eb_mtx);
 		xpt_release_target(target);
-
-		if (retval == 0)
-			return(retval);
 	}
-
 	return(retval);
 }
 
@@ -2142,36 +2156,39 @@
 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 		  xpt_devicefunc_t *tr_func, void *arg)
 {
+	struct cam_eb *bus;
 	struct cam_ed *device, *next_device;
 	int retval;
 
-	mtx_assert(target->bus->sim->mtx, MA_OWNED);
 	retval = 1;
-	for (device = (start_device ? start_device :
-		       TAILQ_FIRST(&target->ed_entries));
-	     device != NULL;
-	     device = next_device) {
-
-		/*
-		 * Hold a reference so the current device does not go away
-		 * on us.
-		 */
+	bus = target->bus;
+	if (start_device)
+		device = start_device;
+	else {
+		mtx_lock(&bus->eb_mtx);
+		device = TAILQ_FIRST(&target->ed_entries);
+		if (device == NULL) {
+			mtx_unlock(&bus->eb_mtx);
+			return (retval);
+		}
 		device->refcount++;
-
+		mtx_unlock(&bus->eb_mtx);
+	}
+	for (; device != NULL; device = next_device) {
+		mtx_lock(&device->device_mtx);
 		retval = tr_func(device, arg);
-
-		/*
-		 * Grab our next pointer before we release the current
-		 * device.
-		 */
+		mtx_unlock(&device->device_mtx);
+		if (retval == 0) {
+			xpt_release_device(device);
+			break;
+		}
+		mtx_lock(&bus->eb_mtx);
 		next_device = TAILQ_NEXT(device, links);
-
+		if (next_device)
+			next_device->refcount++;
+		mtx_unlock(&bus->eb_mtx);
 		xpt_release_device(device);
-
-		if (retval == 0)
-			return(retval);
 	}
-
 	return(retval);
 }
 
@@ -2179,56 +2196,48 @@
 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 		  xpt_periphfunc_t *tr_func, void *arg)
 {
+	struct cam_eb *bus;
 	struct cam_periph *periph, *next_periph;
 	int retval;
 
 	retval = 1;
 
-	mtx_assert(device->sim->mtx, MA_OWNED);
-	xpt_lock_buses();
-	for (periph = (start_periph ? start_periph :
-		       SLIST_FIRST(&device->periphs));
-	     periph != NULL;
-	     periph = next_periph) {
-
-
-		/*
-		 * In this case, we want to show peripherals that have been
-		 * invalidated, but not peripherals that are scheduled to
-		 * be freed.  So instead of calling cam_periph_acquire(),
-		 * which will fail if the periph has been invalidated, we
-		 * just check for the free flag here.  If it is in the
-		 * process of being freed, we skip to the next periph.
-		 */
-		if (periph->flags & CAM_PERIPH_FREE) {
-			next_periph = SLIST_NEXT(periph, periph_links);
-			continue;
+	bus = device->target->bus;
+	if (start_periph)
+		periph = start_periph;
+	else {
+		xpt_lock_buses();
+		mtx_lock(&bus->eb_mtx);
+		periph = SLIST_FIRST(&device->periphs);
+		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
+			periph = SLIST_NEXT(periph, periph_links);
+		if (periph == NULL) {
+			mtx_unlock(&bus->eb_mtx);
+			xpt_unlock_buses();
+			return (retval);
 		}
-
-		/*
-		 * Acquire a reference to this periph while we call the
-		 * traversal function, so it can't go away.
-		 */
 		periph->refcount++;
-
+		mtx_unlock(&bus->eb_mtx);
+		xpt_unlock_buses();
+	}
+	for (; periph != NULL; periph = next_periph) {
 		retval = tr_func(periph, arg);
-
-		/*
-		 * Grab the next peripheral before we release this one, so
-		 * our next pointer is still valid.
-		 */
+		if (retval == 0) {
+			cam_periph_release_locked(periph);
+			break;
+		}
+		xpt_lock_buses();
+		mtx_lock(&bus->eb_mtx);
 		next_periph = SLIST_NEXT(periph, periph_links);
-
-		cam_periph_release_locked_buses(periph);
-
-		if (retval == 0)
-			goto bailout_done;
+		while (next_periph != NULL &&
+		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
+			next_periph = SLIST_NEXT(next_periph, periph_links);
+		if (next_periph)
+			next_periph->refcount++;
+		mtx_unlock(&bus->eb_mtx);
+		xpt_unlock_buses();
+		cam_periph_release_locked(periph);
 	}
-
-bailout_done:
-
-	xpt_unlock_buses();
-
 	return(retval);
 }
 
@@ -2266,57 +2275,42 @@
 		    xpt_periphfunc_t *tr_func, void *arg)
 {
 	struct cam_periph *periph, *next_periph;
-	struct cam_sim *sim;
 	int retval;
 
 	retval = 1;
 
-	xpt_lock_buses();
-	for (periph = (start_periph ? start_periph :
-	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
-	     periph = next_periph) {
-
-
-		/*
-		 * In this case, we want to show peripherals that have been
-		 * invalidated, but not peripherals that are scheduled to
-		 * be freed.  So instead of calling cam_periph_acquire(),
-		 * which will fail if the periph has been invalidated, we
-		 * just check for the free flag here.  If it is free, we
-		 * skip to the next periph.
-		 */
-		if (periph->flags & CAM_PERIPH_FREE) {
-			next_periph = TAILQ_NEXT(periph, unit_links);
-			continue;
+	if (start_periph)
+		periph = start_periph;
+	else {
+		xpt_lock_buses();
+		periph = TAILQ_FIRST(&(*pdrv)->units);
+		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
+			periph = TAILQ_NEXT(periph, unit_links);
+		if (periph == NULL) {
+			xpt_unlock_buses();
+			return (retval);
 		}
-
-		/*
-		 * Acquire a reference to this periph while we call the
-		 * traversal function, so it can't go away.
-		 */
 		periph->refcount++;
-		sim = periph->sim;
 		xpt_unlock_buses();
-		CAM_SIM_LOCK(sim);
+	}
+	for (; periph != NULL; periph = next_periph) {
+		cam_periph_lock(periph);
+		retval = tr_func(periph, arg);
+		cam_periph_unlock(periph);
+		if (retval == 0) {
+			cam_periph_release(periph);
+			break;
+		}
 		xpt_lock_buses();
-		retval = tr_func(periph, arg);
-
-		/*
-		 * Grab the next peripheral before we release this one, so
-		 * our next pointer is still valid.
-		 */
 		next_periph = TAILQ_NEXT(periph, unit_links);
-
-		cam_periph_release_locked_buses(periph);
-		CAM_SIM_UNLOCK(sim);
-
-		if (retval == 0)
-			goto bailout_done;
+		while (next_periph != NULL &&
+		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
+			next_periph = TAILQ_NEXT(next_periph, unit_links);
+		if (next_periph)
+			next_periph->refcount++;
+		xpt_unlock_buses();
+		cam_periph_release(periph);
 	}
-bailout_done:
-
-	xpt_unlock_buses();
-
 	return(retval);
 }
 
@@ -2459,9 +2453,10 @@
 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 
 	xpt_compile_path(&path, /*periph*/NULL,
-			 bus->sim->path_id,
+			 bus->path_id,
 			 CAM_TARGET_WILDCARD,
 			 CAM_LUN_WILDCARD);
+	xpt_path_lock(&path);
 	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 	cpi.ccb_h.func_code = XPT_PATH_INQ;
 	xpt_action((union ccb *)&cpi);
@@ -2468,6 +2463,7 @@
 	csa->callback(csa->callback_arg,
 			    AC_PATH_REGISTERED,
 			    &path, &cpi);
+	xpt_path_unlock(&path);
 	xpt_release_path(&path);
 
 	return(1);
@@ -2487,6 +2483,8 @@
 xpt_action_default(union ccb *start_ccb)
 {
 	struct cam_path *path;
+	struct cam_sim *sim;
+	struct mtx *mtx;
 
 	path = start_ccb->ccb_h.path;
 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
@@ -2536,20 +2534,17 @@
 	case XPT_ENG_EXEC:
 	case XPT_SMP_IO:
 	{
-		int frozen;
+		struct cam_devq *devq;
 
-		frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
-		path->device->sim->devq->alloc_openings += frozen;
-		if (frozen > 0)
-			xpt_run_dev_allocq(path->bus);
-		if (xpt_schedule_dev_sendq(path->bus, path->device))
-			xpt_run_dev_sendq(path->bus);
+		devq = path->bus->sim->devq;
+		mtx_lock(&devq->send_mtx);
+		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
+		if (xpt_schedule_devq(devq, path->device) != 0)
+			xpt_run_devq(devq);
+		mtx_unlock(&devq->send_mtx);
 		break;
 	}
 	case XPT_CALC_GEOMETRY:
-	{
-		struct cam_sim *sim;
-
 		/* Filter out garbage */
 		if (start_ccb->ccg.block_size == 0
 		 || start_ccb->ccg.volume_size == 0) {
@@ -2577,10 +2572,7 @@
 			break;
 		}
 #endif
-		sim = path->bus->sim;
-		(*(sim->sim_action))(sim, start_ccb);
-		break;
-	}
+		goto call_sim;
 	case XPT_ABORT:
 	{
 		union ccb* abort_ccb;
@@ -2594,8 +2586,7 @@
 
 				device = abort_ccb->ccb_h.path->device;
 				ccbq = &device->ccbq;
-				device->sim->devq->alloc_openings -= 
-				    cam_ccbq_remove_ccb(ccbq, abort_ccb);
+				cam_ccbq_remove_ccb(ccbq, abort_ccb);
 				abort_ccb->ccb_h.status =
 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
@@ -2642,21 +2633,20 @@
 	case XPT_NOTIFY_ACKNOWLEDGE:
 	case XPT_GET_SIM_KNOB:
 	case XPT_SET_SIM_KNOB:
-	{
-		struct cam_sim *sim;
-
-		sim = path->bus->sim;
-		(*(sim->sim_action))(sim, start_ccb);
-		break;
-	}
+	case XPT_GET_TRAN_SETTINGS:
+	case XPT_SET_TRAN_SETTINGS:
 	case XPT_PATH_INQ:
-	{
-		struct cam_sim *sim;
-
+call_sim:
 		sim = path->bus->sim;
+		mtx = sim->mtx;
+		if (mtx && !mtx_owned(mtx))
+			mtx_lock(mtx);
+		else
+			mtx = NULL;
 		(*(sim->sim_action))(sim, start_ccb);
+		if (mtx)
+			mtx_unlock(mtx);
 		break;
-	}
 	case XPT_PATH_STATS:
 		start_ccb->cpis.last_reset = path->bus->last_reset;
 		start_ccb->ccb_h.status = CAM_REQ_CMP;
@@ -2687,31 +2677,25 @@
 	}
 	case XPT_GDEV_STATS:
 	{
-		struct cam_ed *dev;
+		struct ccb_getdevstats *cgds = &start_ccb->cgds;
+		struct cam_ed *dev = path->device;
+		struct cam_eb *bus = path->bus;
+		struct cam_et *tar = path->target;
+		struct cam_devq *devq = bus->sim->devq;
 
-		dev = path->device;
-		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
-			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
-		} else {
-			struct ccb_getdevstats *cgds;
-			struct cam_eb *bus;
-			struct cam_et *tar;
-
-			cgds = &start_ccb->cgds;
-			bus = path->bus;
-			tar = path->target;
-			cgds->dev_openings = dev->ccbq.dev_openings;
-			cgds->dev_active = dev->ccbq.dev_active;
-			cgds->devq_openings = dev->ccbq.devq_openings;
-			cgds->devq_queued = dev->ccbq.queue.entries;
-			cgds->held = dev->ccbq.held;
-			cgds->last_reset = tar->last_reset;
-			cgds->maxtags = dev->maxtags;
-			cgds->mintags = dev->mintags;
-			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
-				cgds->last_reset = bus->last_reset;
-			cgds->ccb_h.status = CAM_REQ_CMP;
-		}
+		mtx_lock(&devq->send_mtx);
+		cgds->dev_openings = dev->ccbq.dev_openings;
+		cgds->dev_active = dev->ccbq.dev_active;
+		cgds->allocated = dev->ccbq.allocated;
+		cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
+		cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
+		cgds->last_reset = tar->last_reset;
+		cgds->maxtags = dev->maxtags;
+		cgds->mintags = dev->mintags;
+		if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
+			cgds->last_reset = bus->last_reset;
+		mtx_unlock(&devq->send_mtx);
+		cgds->ccb_h.status = CAM_REQ_CMP;
 		break;
 	}
 	case XPT_GDEVLIST:
@@ -2818,11 +2802,6 @@
 				position_type = CAM_DEV_POS_PDRV;
 		}
 
-		/*
-		 * Note that we drop the SIM lock here, because the EDT
-		 * traversal code needs to do its own locking.
-		 */
-		CAM_SIM_UNLOCK(xpt_path_sim(cdm->ccb_h.path));
 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
 		case CAM_DEV_POS_EDT:
 			xptedtmatch(cdm);
@@ -2834,7 +2813,6 @@
 			cdm->status = CAM_DEV_MATCH_ERROR;
 			break;
 		}
-		CAM_SIM_LOCK(xpt_path_sim(cdm->ccb_h.path));
 
 		if (cdm->status == CAM_DEV_MATCH_ERROR)
 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
@@ -2889,6 +2867,8 @@
 				break;
 			}
 			cur_entry->event_enable = csa->event_enable;
+			cur_entry->event_lock = (path->bus->sim->mtx &&
+			    mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
 			cur_entry->callback_arg = csa->callback_arg;
 			cur_entry->callback = csa->callback;
 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
@@ -2923,6 +2903,7 @@
 			}
 		}
 
+		mtx_lock(&dev->sim->devq->send_mtx);
 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 
 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
@@ -2939,9 +2920,9 @@
 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 			}
 
-			callout_reset(&dev->callout,
-			    (crs->release_timeout * hz) / 1000,
-			    xpt_release_devq_timeout, dev);
+			callout_reset_sbt(&dev->callout,
+			    SBT_1MS * crs->release_timeout, 0,
+			    xpt_release_devq_timeout, dev, 0);
 
 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 
@@ -2975,20 +2956,16 @@
 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 			}
 		}
+		mtx_unlock(&dev->sim->devq->send_mtx);
 
-		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
-			xpt_release_devq_rl(path, /*runlevel*/
-			    (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
-				crs->release_timeout : 0,
-			    /*count*/1, /*run_queue*/TRUE);
-		}
-		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0];
+		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
+			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
+		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
 		start_ccb->ccb_h.status = CAM_REQ_CMP;
 		break;
 	}
 	case XPT_DEBUG: {
 		struct cam_path *oldpath;
-		struct cam_sim *oldsim;
 
 		/* Check that all request bits are supported. */
 		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
@@ -2998,18 +2975,12 @@
 
 		cam_dflags = CAM_DEBUG_NONE;
 		if (cam_dpath != NULL) {
-			/* To release the old path we must hold proper lock. */
 			oldpath = cam_dpath;
 			cam_dpath = NULL;
-			oldsim = xpt_path_sim(oldpath);
-			CAM_SIM_UNLOCK(xpt_path_sim(start_ccb->ccb_h.path));
-			CAM_SIM_LOCK(oldsim);
 			xpt_free_path(oldpath);
-			CAM_SIM_UNLOCK(oldsim);
-			CAM_SIM_LOCK(xpt_path_sim(start_ccb->ccb_h.path));
 		}
 		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
-			if (xpt_create_path(&cam_dpath, xpt_periph,
+			if (xpt_create_path(&cam_dpath, NULL,
 					    start_ccb->ccb_h.path_id,
 					    start_ccb->ccb_h.target_id,
 					    start_ccb->ccb_h.target_lun) !=
@@ -3025,21 +2996,16 @@
 			start_ccb->ccb_h.status = CAM_REQ_CMP;
 		break;
 	}
-	case XPT_FREEZE_QUEUE:
-	{
-		struct ccb_relsim *crs = &start_ccb->crs;
-
-		xpt_freeze_devq_rl(path, /*runlevel*/
-		    (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
-		    crs->release_timeout : 0, /*count*/1);
-		start_ccb->ccb_h.status = CAM_REQ_CMP;
-		break;
-	}
 	case XPT_NOOP:
 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 			xpt_freeze_devq(path, 1);
 		start_ccb->ccb_h.status = CAM_REQ_CMP;
 		break;
+	case XPT_REPROBE_LUN:
+		xpt_async(AC_INQ_CHANGED, path, NULL);
+		start_ccb->ccb_h.status = CAM_REQ_CMP;
+		xpt_done(start_ccb);
+		break;
 	default:
 	case XPT_SDEV_TYPE:
 	case XPT_TERM_IO:
@@ -3062,40 +3028,46 @@
 	struct	  cam_sim *sim;
 	struct	  cam_devq *devq;
 	struct	  cam_ed *dev;
+	struct mtx *mtx;
 
-
 	timeout = start_ccb->ccb_h.timeout * 10;
 	sim = start_ccb->ccb_h.path->bus->sim;
 	devq = sim->devq;
+	mtx = sim->mtx;
 	dev = start_ccb->ccb_h.path->device;
 
-	mtx_assert(sim->mtx, MA_OWNED);
+	mtx_unlock(&dev->device_mtx);
 
-	/* Don't use ISR for this SIM while polling. */
-	sim->flags |= CAM_SIM_POLLED;
-
 	/*
 	 * Steal an opening so that no other queued requests
 	 * can get it before us while we simulate interrupts.
 	 */
-	dev->ccbq.devq_openings--;
+	mtx_lock(&devq->send_mtx);
 	dev->ccbq.dev_openings--;
-
-	while(((devq != NULL && devq->send_openings <= 0) ||
-	   dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
+	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
+	    (--timeout > 0)) {
+		mtx_unlock(&devq->send_mtx);
 		DELAY(100);
+		if (mtx)
+			mtx_lock(mtx);
 		(*(sim->sim_poll))(sim);
-		camisr_runqueue(&sim->sim_doneq);
+		if (mtx)
+			mtx_unlock(mtx);
+		camisr_runqueue();
+		mtx_lock(&devq->send_mtx);
 	}
-
-	dev->ccbq.devq_openings++;
 	dev->ccbq.dev_openings++;
+	mtx_unlock(&devq->send_mtx);
 
 	if (timeout != 0) {
 		xpt_action(start_ccb);
 		while(--timeout > 0) {
+			if (mtx)
+				mtx_lock(mtx);
 			(*(sim->sim_poll))(sim);
-			camisr_runqueue(&sim->sim_doneq);
+			if (mtx)
+				mtx_unlock(mtx);
+			camisr_runqueue();
 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 			    != CAM_REQ_INPROG)
 				break;
@@ -3114,48 +3086,23 @@
 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 	}
 
-	/* We will use CAM ISR for this SIM again. */
-	sim->flags &= ~CAM_SIM_POLLED;
+	mtx_lock(&dev->device_mtx);
 }
 
 /*
- * Schedule a peripheral driver to receive a ccb when it's
+ * Schedule a peripheral driver to receive a ccb when its
  * target device has space for more transactions.
  */
 void
-xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
+xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
 {
-	struct cam_ed *device;
-	int runq = 0;
 
-	mtx_assert(perph->sim->mtx, MA_OWNED);
-
-	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
-	device = perph->path->device;
-	if (periph_is_queued(perph)) {
-		/* Simply reorder based on new priority */
-		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
-			  ("   change priority to %d\n", new_priority));
-		if (new_priority < perph->pinfo.priority) {
-			camq_change_priority(&device->drvq,
-					     perph->pinfo.index,
-					     new_priority);
-			runq = xpt_schedule_dev_allocq(perph->path->bus, device);
-		}
-	} else {
-		/* New entry on the queue */
-		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
-			  ("   added periph to queue\n"));
-		perph->pinfo.priority = new_priority;
-		perph->pinfo.generation = ++device->drvq.generation;
-		camq_insert(&device->drvq, &perph->pinfo);
-		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
+	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
+	cam_periph_assert(periph, MA_OWNED);
+	if (new_priority < periph->scheduled_priority) {
+		periph->scheduled_priority = new_priority;
+		xpt_run_allocq(periph, 0);
 	}
-	if (runq != 0) {
-		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
-			  ("   calling xpt_run_devq\n"));
-		xpt_run_dev_allocq(perph->path->bus);
-	}
 }
 
 
@@ -3167,7 +3114,7 @@
  * started the queue, return 0 so the caller doesn't attempt
  * to run the queue.
  */
-int
+static int
 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 		 u_int32_t new_priority)
 {
@@ -3207,90 +3154,90 @@
 }
 
 static void
-xpt_run_dev_allocq(struct cam_eb *bus)
+xpt_run_allocq_task(void *context, int pending)
 {
-	struct	cam_devq *devq;
+	struct cam_periph *periph = context;
 
-	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
-	devq = bus->sim->devq;
+	cam_periph_lock(periph);
+	periph->flags &= ~CAM_PERIPH_RUN_TASK;
+	xpt_run_allocq(periph, 1);
+	cam_periph_unlock(periph);
+	cam_periph_release(periph);
+}
 
-	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
-			("   qfrozen_cnt == 0x%x, entries == %d, "
-			 "openings == %d, active == %d\n",
-			 devq->alloc_queue.qfrozen_cnt[0],
-			 devq->alloc_queue.entries,
-			 devq->alloc_openings,
-			 devq->alloc_active));
+static void
+xpt_run_allocq(struct cam_periph *periph, int sleep)
+{
+	struct cam_ed	*device;
+	union ccb	*ccb;
+	uint32_t	 prio;
 
-	devq->alloc_queue.qfrozen_cnt[0]++;
-	while ((devq->alloc_queue.entries > 0)
-	    && (devq->alloc_openings > 0)
-	    && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) {
-		struct	cam_ed_qinfo *qinfo;
-		struct	cam_ed *device;
-		union	ccb *work_ccb;
-		struct	cam_periph *drv;
-		struct	camq *drvq;
+	cam_periph_assert(periph, MA_OWNED);
+	if (periph->periph_allocating)
+		return;
+	periph->periph_allocating = 1;
+	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
+	device = periph->path->device;
+	ccb = NULL;
+restart:
+	while ((prio = min(periph->scheduled_priority,
+	    periph->immediate_priority)) != CAM_PRIORITY_NONE &&
+	    (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
+	     device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
 
-		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
-							   CAMQ_HEAD);
-		device = qinfo->device;
-		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
-				("running device %p\n", device));
-
-		drvq = &device->drvq;
-		KASSERT(drvq->entries > 0, ("xpt_run_dev_allocq: "
-		    "Device on queue without any work to do"));
-		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
-			devq->alloc_openings--;
-			devq->alloc_active++;
-			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
-			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
-				      drv->pinfo.priority);
+		if (ccb == NULL &&
+		    (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
+			if (sleep) {
+				ccb = xpt_get_ccb(periph);
+				goto restart;
+			}
+			if (periph->flags & CAM_PERIPH_RUN_TASK)
+				break;
+			cam_periph_doacquire(periph);
+			periph->flags |= CAM_PERIPH_RUN_TASK;
+			taskqueue_enqueue(xsoftc.xpt_taskq,
+			    &periph->periph_run_task);
+			break;
+		}
+		xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
+		if (prio == periph->immediate_priority) {
+			periph->immediate_priority = CAM_PRIORITY_NONE;
 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
-					("calling periph start\n"));
-			drv->periph_start(drv, work_ccb);
+					("waking cam_periph_getccb()\n"));
+			SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
+					  periph_links.sle);
+			wakeup(&periph->ccb_list);
 		} else {
-			/*
-			 * Malloc failure in alloc_ccb
-			 */
-			/*
-			 * XXX add us to a list to be run from free_ccb
-			 * if we don't have any ccbs active on this
-			 * device queue otherwise we may never get run
-			 * again.
-			 */
-			break;
+			periph->scheduled_priority = CAM_PRIORITY_NONE;
+			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
+					("calling periph_start()\n"));
+			periph->periph_start(periph, ccb);
 		}
-
-		/* We may have more work. Attempt to reschedule. */
-		xpt_schedule_dev_allocq(bus, device);
+		ccb = NULL;
 	}
-	devq->alloc_queue.qfrozen_cnt[0]--;
+	if (ccb != NULL)
+		xpt_release_ccb(ccb);
+	periph->periph_allocating = 0;
 }
 
 static void
-xpt_run_dev_sendq(struct cam_eb *bus)
+xpt_run_devq(struct cam_devq *devq)
 {
-	struct	cam_devq *devq;
 	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
+	struct mtx *mtx;
 
-	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
+	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
 
-	devq = bus->sim->devq;
-
-	devq->send_queue.qfrozen_cnt[0]++;
+	devq->send_queue.qfrozen_cnt++;
 	while ((devq->send_queue.entries > 0)
 	    && (devq->send_openings > 0)
-	    && (devq->send_queue.qfrozen_cnt[0] <= 1)) {
-		struct	cam_ed_qinfo *qinfo;
+	    && (devq->send_queue.qfrozen_cnt <= 1)) {
 		struct	cam_ed *device;
 		union ccb *work_ccb;
 		struct	cam_sim *sim;
 
-		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
+		device = (struct cam_ed *)camq_remove(&devq->send_queue,
 							   CAMQ_HEAD);
-		device = qinfo->device;
 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 				("running device %p\n", device));
 
@@ -3302,7 +3249,7 @@
 
 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 
-			mtx_lock(&xsoftc.xpt_lock);
+			mtx_lock(&xsoftc.xpt_highpower_lock);
 		 	if (xsoftc.num_highpower <= 0) {
 				/*
 				 * We got a high power command, but we
@@ -3310,12 +3257,11 @@
 				 * the device queue until we have a slot
 				 * available.
 				 */
-				xpt_freeze_devq(work_ccb->ccb_h.path, 1);
-				STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
-						   &work_ccb->ccb_h,
-						   xpt_links.stqe);
+				xpt_freeze_devq_device(device, 1);
+				STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
+						   highpowerq_entry);
 
-				mtx_unlock(&xsoftc.xpt_lock);
+				mtx_unlock(&xsoftc.xpt_highpower_lock);
 				continue;
 			} else {
 				/*
@@ -3324,17 +3270,16 @@
 				 */
 				xsoftc.num_highpower--;
 			}
-			mtx_unlock(&xsoftc.xpt_lock);
+			mtx_unlock(&xsoftc.xpt_highpower_lock);
 		}
 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
-
 		devq->send_openings--;
 		devq->send_active++;
+		xpt_schedule_devq(devq, device);
+		mtx_unlock(&devq->send_mtx);
 
-		xpt_schedule_dev_sendq(bus, device);
-
-		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
+		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
 			/*
 			 * The client wants to freeze the queue
 			 * after this CCB is sent.
@@ -3376,15 +3321,22 @@
 		}
 
 		/*
-		 * Device queues can be shared among multiple sim instances
-		 * that reside on different busses.  Use the SIM in the queue
-		 * CCB's path, rather than the one in the bus that was passed
-		 * into this function.
+		 * Device queues can be shared among multiple SIM instances
+		 * that reside on different busses.  Use the SIM from the
+		 * queued device, rather than the one from the calling bus.
 		 */
-		sim = work_ccb->ccb_h.path->bus->sim;
+		sim = device->sim;
+		mtx = sim->mtx;
+		if (mtx && !mtx_owned(mtx))
+			mtx_lock(mtx);
+		else
+			mtx = NULL;
 		(*(sim->sim_action))(sim, work_ccb);
+		if (mtx)
+			mtx_unlock(mtx);
+		mtx_lock(&devq->send_mtx);
 	}
-	devq->send_queue.qfrozen_cnt[0]--;
+	devq->send_queue.qfrozen_cnt--;
 }
 
 /*
@@ -3408,7 +3360,8 @@
 }
 
 void
-xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
+xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
+		    u_int32_t priority, u_int32_t flags)
 {
 
 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
@@ -3426,9 +3379,16 @@
 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
 	}
 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
-	ccb_h->flags = 0;
+	ccb_h->flags = flags;
+	ccb_h->xflags = 0;
 }
 
+void
+xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
+{
+	xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
+}
+
 /* Path manipulation functions */
 cam_status
 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
@@ -3457,26 +3417,9 @@
 			 struct cam_periph *periph, path_id_t path_id,
 			 target_id_t target_id, lun_id_t lun_id)
 {
-	struct	   cam_path *path;
-	struct	   cam_eb *bus = NULL;
-	cam_status status;
 
-	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_WAITOK);
-
-	bus = xpt_find_bus(path_id);
-	if (bus != NULL)
-		CAM_SIM_LOCK(bus->sim);
-	status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
-	if (bus != NULL) {
-		CAM_SIM_UNLOCK(bus->sim);
-		xpt_release_bus(bus);
-	}
-	if (status != CAM_REQ_CMP) {
-		free(path, M_CAMPATH);
-		path = NULL;
-	}
-	*new_path_ptr = path;
-	return (status);
+	return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
+	    lun_id));
 }
 
 cam_status
@@ -3500,6 +3443,8 @@
 	if (bus == NULL) {
 		status = CAM_PATH_INVALID;
 	} else {
+		xpt_lock_buses();
+		mtx_lock(&bus->eb_mtx);
 		target = xpt_find_target(bus, target_id);
 		if (target == NULL) {
 			/* Create one */
@@ -3512,6 +3457,7 @@
 				target = new_target;
 			}
 		}
+		xpt_unlock_buses();
 		if (target != NULL) {
 			device = xpt_find_device(target, lun_id);
 			if (device == NULL) {
@@ -3529,6 +3475,7 @@
 				}
 			}
 		}
+		mtx_unlock(&bus->eb_mtx);
 	}
 
 	/*
@@ -3551,7 +3498,33 @@
 	return (status);
 }
 
+cam_status
+xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
+{
+	struct	   cam_path *new_path;
+
+	new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
+	if (new_path == NULL)
+		return(CAM_RESRC_UNAVAIL);
+	xpt_copy_path(new_path, path);
+	*new_path_ptr = new_path;
+	return (CAM_REQ_CMP);
+}
+
 void
+xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
+{
+
+	*new_path = *path;
+	if (path->bus != NULL)
+		xpt_acquire_bus(path->bus);
+	if (path->target != NULL)
+		xpt_acquire_target(path->target);
+	if (path->device != NULL)
+		xpt_acquire_device(path->device);
+}
+
+void
 xpt_release_path(struct cam_path *path)
 {
 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
@@ -3649,6 +3622,40 @@
 	return (retval);
 }
 
+int
+xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
+{
+	int retval = 0;
+
+	if (path->bus != dev->target->bus) {
+		if (path->bus->path_id == CAM_BUS_WILDCARD)
+			retval = 1;
+		else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
+			retval = 2;
+		else
+			return (-1);
+	}
+	if (path->target != dev->target) {
+		if (path->target->target_id == CAM_TARGET_WILDCARD) {
+			if (retval == 0)
+				retval = 1;
+		} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
+			retval = 2;
+		else
+			return (-1);
+	}
+	if (path->device != dev) {
+		if (path->device->lun_id == CAM_LUN_WILDCARD) {
+			if (retval == 0)
+				retval = 1;
+		} else if (dev->lun_id == CAM_LUN_WILDCARD)
+			retval = 2;
+		else
+			return (-1);
+	}
+	return (retval);
+}
+
 void
 xpt_print_path(struct cam_path *path)
 {
@@ -3675,7 +3682,7 @@
 			printf("X:");
 
 		if (path->device != NULL)
-			printf("%d): ", path->device->lun_id);
+			printf("%jx): ", (uintmax_t)path->device->lun_id);
 		else
 			printf("X): ");
 	}
@@ -3682,6 +3689,21 @@
 }
 
 void
+xpt_print_device(struct cam_ed *device)
+{
+
+	if (device == NULL)
+		printf("(nopath): ");
+	else {
+		printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
+		       device->sim->unit_number,
+		       device->sim->bus_id,
+		       device->target->target_id,
+		       (uintmax_t)device->lun_id);
+	}
+}
+
+void
 xpt_print(struct cam_path *path, const char *fmt, ...)
 {
 	va_list ap;
@@ -3696,11 +3718,6 @@
 {
 	struct sbuf sb;
 
-#ifdef INVARIANTS
-	if (path != NULL && path->bus != NULL)
-		mtx_assert(path->bus->sim->mtx, MA_OWNED);
-#endif
-
 	sbuf_new(&sb, str, str_len, 0);
 
 	if (path == NULL)
@@ -3725,7 +3742,8 @@
 			sbuf_printf(&sb, "X:");
 
 		if (path->device != NULL)
-			sbuf_printf(&sb, "%d): ", path->device->lun_id);
+			sbuf_printf(&sb, "%jx): ",
+			    (uintmax_t)path->device->lun_id);
 		else
 			sbuf_printf(&sb, "X): ");
 	}
@@ -3768,7 +3786,6 @@
 struct cam_periph*
 xpt_path_periph(struct cam_path *path)
 {
-	mtx_assert(path->bus->sim->mtx, MA_OWNED);
 
 	return (path->periph);
 }
@@ -3821,40 +3838,18 @@
 void
 xpt_release_ccb(union ccb *free_ccb)
 {
-	struct	 cam_path *path;
 	struct	 cam_ed *device;
-	struct	 cam_eb *bus;
-	struct   cam_sim *sim;
+	struct	 cam_periph *periph;
 
 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
-	path = free_ccb->ccb_h.path;
-	device = path->device;
-	bus = path->bus;
-	sim = bus->sim;
+	xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
+	device = free_ccb->ccb_h.path->device;
+	periph = free_ccb->ccb_h.path->periph;
 
-	mtx_assert(sim->mtx, MA_OWNED);
-
+	xpt_free_ccb(free_ccb);
+	periph->periph_allocated--;
 	cam_ccbq_release_opening(&device->ccbq);
-	if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
-		device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
-		cam_ccbq_resize(&device->ccbq,
-		    device->ccbq.dev_openings + device->ccbq.dev_active);
-	}
-	if (sim->ccb_count > sim->max_ccbs) {
-		xpt_free_ccb(free_ccb);
-		sim->ccb_count--;
-	} else {
-		SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
-		    xpt_links.sle);
-	}
-	if (sim->devq == NULL) {
-		return;
-	}
-	sim->devq->alloc_openings++;
-	sim->devq->alloc_active--;
-	if (device_is_alloc_queued(device) == 0)
-		xpt_schedule_dev_allocq(bus, device);
-	xpt_run_dev_allocq(bus);
+	xpt_run_allocq(periph, 0);
 }
 
 /* Functions accessed by SIM drivers */
@@ -3883,22 +3878,16 @@
 	struct cam_path *path;
 	cam_status status;
 
-	mtx_assert(sim->mtx, MA_OWNED);
-
 	sim->bus_id = bus;
 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
-					  M_CAMXPT, M_NOWAIT);
+					  M_CAMXPT, M_NOWAIT|M_ZERO);
 	if (new_bus == NULL) {
 		/* Couldn't satisfy request */
 		return (CAM_RESRC_UNAVAIL);
 	}
-	if (strcmp(sim->sim_name, "xpt") != 0) {
-		sim->path_id =
-		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
-	}
 
+	mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
 	TAILQ_INIT(&new_bus->et_entries);
-	new_bus->path_id = sim->path_id;
 	cam_sim_hold(sim);
 	new_bus->sim = sim;
 	timevalclear(&new_bus->last_reset);
@@ -3907,6 +3896,8 @@
 	new_bus->generation = 0;
 
 	xpt_lock_buses();
+	sim->path_id = new_bus->path_id =
+	    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 	while (old_bus != NULL
 	    && old_bus->path_id < new_bus->path_id)
@@ -3944,6 +3935,7 @@
 		case XPORT_FC:
 		case XPORT_USB:
 		case XPORT_ISCSI:
+		case XPORT_SRP:
 		case XPORT_PPB:
 			new_bus->xport = scsi_get_xport();
 			break;
@@ -3959,15 +3951,25 @@
 
 	/* Notify interested parties */
 	if (sim->path_id != CAM_XPT_PATH_ID) {
-		union	ccb *scan_ccb;
 
 		xpt_async(AC_PATH_REGISTERED, path, &cpi);
-		/* Initiate bus rescan. */
-		scan_ccb = xpt_alloc_ccb_nowait();
-		scan_ccb->ccb_h.path = path;
-		scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
-		scan_ccb->crcn.flags = 0;
-		xpt_rescan(scan_ccb);
+		if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
+			union	ccb *scan_ccb;
+
+			/* Initiate bus rescan. */
+			scan_ccb = xpt_alloc_ccb_nowait();
+			if (scan_ccb != NULL) {
+				scan_ccb->ccb_h.path = path;
+				scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
+				scan_ccb->crcn.flags = 0;
+				xpt_rescan(scan_ccb);
+			} else {
+				xpt_print(path,
+					  "Can't allocate CCB to scan bus\n");
+				xpt_free_path(path);
+			}
+		} else
+			xpt_free_path(path);
 	} else
 		xpt_free_path(path);
 	return (CAM_SUCCESS);
@@ -4001,8 +4003,8 @@
 	path_id_t pathid;
 	const char *strval;
 
+	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
 	pathid = 0;
-	xpt_lock_buses();
 	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 retry:
 	/* Find an unoccupied pathid */
@@ -4011,7 +4013,6 @@
 			pathid++;
 		bus = TAILQ_NEXT(bus, links);
 	}
-	xpt_unlock_buses();
 
 	/*
 	 * Ensure that this pathid is not reserved for
@@ -4020,7 +4021,6 @@
 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 		++pathid;
 		/* Start the search over */
-		xpt_lock_buses();
 		goto retry;
 	}
 	return (pathid);
@@ -4036,6 +4036,8 @@
 
 	pathid = CAM_XPT_PATH_ID;
 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
+	if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
+		return (pathid);
 	i = 0;
 	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 		if (strcmp(dname, "scbus")) {
@@ -4092,80 +4094,138 @@
 	return ("AC_UNKNOWN");
 }
 
-void
-xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
+static int
+xpt_async_size(u_int32_t async_code)
 {
-	struct cam_eb *bus;
-	struct cam_et *target, *next_target;
-	struct cam_ed *device, *next_device;
 
-	mtx_assert(path->bus->sim->mtx, MA_OWNED);
-	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
-	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
+	switch (async_code) {
+	case AC_BUS_RESET: return (0);
+	case AC_UNSOL_RESEL: return (0);
+	case AC_SCSI_AEN: return (0);
+	case AC_SENT_BDR: return (0);
+	case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
+	case AC_PATH_DEREGISTERED: return (0);
+	case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
+	case AC_LOST_DEVICE: return (0);
+	case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
+	case AC_INQ_CHANGED: return (0);
+	case AC_GETDEV_CHANGED: return (0);
+	case AC_CONTRACT: return (sizeof(struct ac_contract));
+	case AC_ADVINFO_CHANGED: return (-1);
+	case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
+	}
+	return (0);
+}
 
+static int
+xpt_async_process_dev(struct cam_ed *device, void *arg)
+{
+	union ccb *ccb = arg;
+	struct cam_path *path = ccb->ccb_h.path;
+	void *async_arg = ccb->casync.async_arg_ptr;
+	u_int32_t async_code = ccb->casync.async_code;
+	int relock;
+
+	if (path->device != device
+	 && path->device->lun_id != CAM_LUN_WILDCARD
+	 && device->lun_id != CAM_LUN_WILDCARD)
+		return (1);
+
 	/*
-	 * Most async events come from a CAM interrupt context.  In
-	 * a few cases, the error recovery code at the peripheral layer,
-	 * which may run from our SWI or a process context, may signal
-	 * deferred events with a call to xpt_async.
+	 * The async callback could free the device.
+	 * If it is a broadcast async, it doesn't hold
+	 * device reference, so take our own reference.
 	 */
+	xpt_acquire_device(device);
 
-	bus = path->bus;
+	/*
+	 * If async for specific device is to be delivered to
+	 * the wildcard client, take the specific device lock.
+	 * XXX: We may need a way for client to specify it.
+	 */
+	if ((device->lun_id == CAM_LUN_WILDCARD &&
+	     path->device->lun_id != CAM_LUN_WILDCARD) ||
+	    (device->target->target_id == CAM_TARGET_WILDCARD &&
+	     path->target->target_id != CAM_TARGET_WILDCARD) ||
+	    (device->target->bus->path_id == CAM_BUS_WILDCARD &&
+	     path->target->bus->path_id != CAM_BUS_WILDCARD)) {
+		mtx_unlock(&device->device_mtx);
+		xpt_path_lock(path);
+		relock = 1;
+	} else
+		relock = 0;
 
-	if (async_code == AC_BUS_RESET) {
-		/* Update our notion of when the last reset occurred */
-		microtime(&bus->last_reset);
+	(*(device->target->bus->xport->async))(async_code,
+	    device->target->bus, device->target, device, async_arg);
+	xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
+
+	if (relock) {
+		xpt_path_unlock(path);
+		mtx_lock(&device->device_mtx);
 	}
+	xpt_release_device(device);
+	return (1);
+}
 
-	for (target = TAILQ_FIRST(&bus->et_entries);
-	     target != NULL;
-	     target = next_target) {
+static int
+xpt_async_process_tgt(struct cam_et *target, void *arg)
+{
+	union ccb *ccb = arg;
+	struct cam_path *path = ccb->ccb_h.path;
 
-		next_target = TAILQ_NEXT(target, links);
+	if (path->target != target
+	 && path->target->target_id != CAM_TARGET_WILDCARD
+	 && target->target_id != CAM_TARGET_WILDCARD)
+		return (1);
 
-		if (path->target != target
-		 && path->target->target_id != CAM_TARGET_WILDCARD
-		 && target->target_id != CAM_TARGET_WILDCARD)
-			continue;
+	if (ccb->casync.async_code == AC_SENT_BDR) {
+		/* Update our notion of when the last reset occurred */
+		microtime(&target->last_reset);
+	}
 
-		if (async_code == AC_SENT_BDR) {
-			/* Update our notion of when the last reset occurred */
-			microtime(&path->target->last_reset);
-		}
+	return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
+}
 
-		for (device = TAILQ_FIRST(&target->ed_entries);
-		     device != NULL;
-		     device = next_device) {
+static void
+xpt_async_process(struct cam_periph *periph, union ccb *ccb)
+{
+	struct cam_eb *bus;
+	struct cam_path *path;
+	void *async_arg;
+	u_int32_t async_code;
 
-			next_device = TAILQ_NEXT(device, links);
+	path = ccb->ccb_h.path;
+	async_code = ccb->casync.async_code;
+	async_arg = ccb->casync.async_arg_ptr;
+	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
+	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
+	bus = path->bus;
 
-			if (path->device != device
-			 && path->device->lun_id != CAM_LUN_WILDCARD
-			 && device->lun_id != CAM_LUN_WILDCARD)
-				continue;
-			/*
-			 * The async callback could free the device.
-			 * If it is a broadcast async, it doesn't hold
-			 * device reference, so take our own reference.
-			 */
-			xpt_acquire_device(device);
-			(*(bus->xport->async))(async_code, bus,
-					       target, device,
-					       async_arg);
-
-			xpt_async_bcast(&device->asyncs, async_code,
-					path, async_arg);
-			xpt_release_device(device);
-		}
+	if (async_code == AC_BUS_RESET) {
+		/* Update our notion of when the last reset occurred */
+		microtime(&bus->last_reset);
 	}
 
+	xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
+
 	/*
 	 * If this wasn't a fully wildcarded async, tell all
 	 * clients that want all async events.
 	 */
-	if (bus != xpt_periph->path->bus)
-		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
-				path, async_arg);
+	if (bus != xpt_periph->path->bus) {
+		xpt_path_lock(xpt_periph->path);
+		xpt_async_process_dev(xpt_periph->path->device, ccb);
+		xpt_path_unlock(xpt_periph->path);
+	}
+
+	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
+		xpt_release_devq(path, 1, TRUE);
+	else
+		xpt_release_simq(path->bus->sim, TRUE);
+	if (ccb->casync.async_arg_size > 0)
+		free(async_arg, M_CAMXPT);
+	xpt_free_path(path);
+	xpt_free_ccb(ccb);
 }
 
 static void
@@ -4174,6 +4234,7 @@
 		struct cam_path *path, void *async_arg)
 {
 	struct async_node *cur_entry;
+	struct mtx *mtx;
 
 	cur_entry = SLIST_FIRST(async_head);
 	while (cur_entry != NULL) {
@@ -4184,107 +4245,178 @@
 		 * can delete its async callback entry.
 		 */
 		next_entry = SLIST_NEXT(cur_entry, links);
-		if ((cur_entry->event_enable & async_code) != 0)
+		if ((cur_entry->event_enable & async_code) != 0) {
+			mtx = cur_entry->event_lock ?
+			    path->device->sim->mtx : NULL;
+			if (mtx)
+				mtx_lock(mtx);
 			cur_entry->callback(cur_entry->callback_arg,
 					    async_code, path,
 					    async_arg);
+			if (mtx)
+				mtx_unlock(mtx);
+		}
 		cur_entry = next_entry;
 	}
 }
 
+void
+xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
+{
+	union ccb *ccb;
+	int size;
+
+	ccb = xpt_alloc_ccb_nowait();
+	if (ccb == NULL) {
+		xpt_print(path, "Can't allocate CCB to send %s\n",
+		    xpt_async_string(async_code));
+		return;
+	}
+
+	if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
+		xpt_print(path, "Can't allocate path to send %s\n",
+		    xpt_async_string(async_code));
+		xpt_free_ccb(ccb);
+		return;
+	}
+	ccb->ccb_h.path->periph = NULL;
+	ccb->ccb_h.func_code = XPT_ASYNC;
+	ccb->ccb_h.cbfcnp = xpt_async_process;
+	ccb->ccb_h.flags |= CAM_UNLOCKED;
+	ccb->casync.async_code = async_code;
+	ccb->casync.async_arg_size = 0;
+	size = xpt_async_size(async_code);
+	if (size > 0 && async_arg != NULL) {
+		ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
+		if (ccb->casync.async_arg_ptr == NULL) {
+			xpt_print(path, "Can't allocate argument to send %s\n",
+			    xpt_async_string(async_code));
+			xpt_free_path(ccb->ccb_h.path);
+			xpt_free_ccb(ccb);
+			return;
+		}
+		memcpy(ccb->casync.async_arg_ptr, async_arg, size);
+		ccb->casync.async_arg_size = size;
+	} else if (size < 0) {
+		ccb->casync.async_arg_ptr = async_arg;
+		ccb->casync.async_arg_size = size;
+	}
+	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
+		xpt_freeze_devq(path, 1);
+	else
+		xpt_freeze_simq(path->bus->sim, 1);
+	xpt_done(ccb);
+}
+
 static void
 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 		      struct cam_et *target, struct cam_ed *device,
 		      void *async_arg)
 {
+
+	/*
+	 * We only need to handle events for real devices.
+	 */
+	if (target->target_id == CAM_TARGET_WILDCARD
+	 || device->lun_id == CAM_LUN_WILDCARD)
+		return;
+
 	printf("%s called\n", __func__);
 }
 
-u_int32_t
-xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count)
+static uint32_t
+xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
 {
-	struct cam_ed *dev = path->device;
+	struct cam_devq	*devq;
+	uint32_t freeze;
 
-	mtx_assert(path->bus->sim->mtx, MA_OWNED);
-	dev->sim->devq->alloc_openings +=
-	    cam_ccbq_freeze(&dev->ccbq, rl, count);
-	/* Remove frozen device from allocq. */
-	if (device_is_alloc_queued(dev) &&
-	    cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
-	     CAMQ_GET_PRIO(&dev->drvq)))) {
-		camq_remove(&dev->sim->devq->alloc_queue,
-		    dev->alloc_ccb_entry.pinfo.index);
-	}
+	devq = dev->sim->devq;
+	mtx_assert(&devq->send_mtx, MA_OWNED);
+	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
+	    ("xpt_freeze_devq_device(%d) %u->%u\n", count,
+	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
+	freeze = (dev->ccbq.queue.qfrozen_cnt += count);
 	/* Remove frozen device from sendq. */
-	if (device_is_send_queued(dev) &&
-	    cam_ccbq_frozen_top(&dev->ccbq)) {
-		camq_remove(&dev->sim->devq->send_queue,
-		    dev->send_ccb_entry.pinfo.index);
-	}
-	return (dev->ccbq.queue.qfrozen_cnt[rl]);
+	if (device_is_queued(dev))
+		camq_remove(&devq->send_queue, dev->devq_entry.index);
+	return (freeze);
 }
 
 u_int32_t
 xpt_freeze_devq(struct cam_path *path, u_int count)
 {
+	struct cam_ed	*dev = path->device;
+	struct cam_devq	*devq;
+	uint32_t	 freeze;
 
-	return (xpt_freeze_devq_rl(path, 0, count));
+	devq = dev->sim->devq;
+	mtx_lock(&devq->send_mtx);
+	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
+	freeze = xpt_freeze_devq_device(dev, count);
+	mtx_unlock(&devq->send_mtx);
+	return (freeze);
 }
 
 u_int32_t
 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 {
+	struct cam_devq	*devq;
+	uint32_t	 freeze;
 
-	mtx_assert(sim->mtx, MA_OWNED);
-	sim->devq->send_queue.qfrozen_cnt[0] += count;
-	return (sim->devq->send_queue.qfrozen_cnt[0]);
+	devq = sim->devq;
+	mtx_lock(&devq->send_mtx);
+	freeze = (devq->send_queue.qfrozen_cnt += count);
+	mtx_unlock(&devq->send_mtx);
+	return (freeze);
 }
 
 static void
 xpt_release_devq_timeout(void *arg)
 {
-	struct cam_ed *device;
+	struct cam_ed *dev;
+	struct cam_devq *devq;
 
-	device = (struct cam_ed *)arg;
-
-	xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE);
+	dev = (struct cam_ed *)arg;
+	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
+	devq = dev->sim->devq;
+	mtx_assert(&devq->send_mtx, MA_OWNED);
+	if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
+		xpt_run_devq(devq);
 }
 
 void
 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 {
-	mtx_assert(path->bus->sim->mtx, MA_OWNED);
+	struct cam_ed *dev;
+	struct cam_devq *devq;
 
-	xpt_release_devq_device(path->device, /*rl*/0, count, run_queue);
+	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
+	    count, run_queue));
+	dev = path->device;
+	devq = dev->sim->devq;
+	mtx_lock(&devq->send_mtx);
+	if (xpt_release_devq_device(dev, count, run_queue))
+		xpt_run_devq(dev->sim->devq);
+	mtx_unlock(&devq->send_mtx);
 }
 
-void
-xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue)
+static int
+xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 {
-	mtx_assert(path->bus->sim->mtx, MA_OWNED);
 
-	xpt_release_devq_device(path->device, rl, count, run_queue);
-}
-
-static void
-xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue)
-{
-
-	if (count > dev->ccbq.queue.qfrozen_cnt[rl]) {
+	mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
+	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
+	    ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
+	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
+	if (count > dev->ccbq.queue.qfrozen_cnt) {
 #ifdef INVARIANTS
-		printf("xpt_release_devq(%d): requested %u > present %u\n",
-		    rl, count, dev->ccbq.queue.qfrozen_cnt[rl]);
+		printf("xpt_release_devq(): requested %u > present %u\n",
+		    count, dev->ccbq.queue.qfrozen_cnt);
 #endif
-		count = dev->ccbq.queue.qfrozen_cnt[rl];
+		count = dev->ccbq.queue.qfrozen_cnt;
 	}
-	dev->sim->devq->alloc_openings -=
-	    cam_ccbq_release(&dev->ccbq, rl, count);
-	if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
-	    CAMQ_GET_PRIO(&dev->drvq))) == 0) {
-		if (xpt_schedule_dev_allocq(dev->target->bus, dev))
-			xpt_run_dev_allocq(dev->target->bus);
-	}
-	if (cam_ccbq_frozen_top(&dev->ccbq) == 0) {
+	dev->ccbq.queue.qfrozen_cnt -= count;
+	if (dev->ccbq.queue.qfrozen_cnt == 0) {
 		/*
 		 * No longer need to wait for a successful
 		 * command completion.
@@ -4298,33 +4430,32 @@
 			callout_stop(&dev->callout);
 			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 		}
-		if (run_queue == 0)
-			return;
 		/*
 		 * Now that we are unfrozen schedule the
 		 * device so any pending transactions are
 		 * run.
 		 */
-		if (xpt_schedule_dev_sendq(dev->target->bus, dev))
-			xpt_run_dev_sendq(dev->target->bus);
-	}
+		xpt_schedule_devq(dev->sim->devq, dev);
+	} else
+		run_queue = 0;
+	return (run_queue);
 }
 
 void
 xpt_release_simq(struct cam_sim *sim, int run_queue)
 {
-	struct	camq *sendq;
+	struct cam_devq	*devq;
 
-	mtx_assert(sim->mtx, MA_OWNED);
-	sendq = &(sim->devq->send_queue);
-	if (sendq->qfrozen_cnt[0] <= 0) {
+	devq = sim->devq;
+	mtx_lock(&devq->send_mtx);
+	if (devq->send_queue.qfrozen_cnt <= 0) {
 #ifdef INVARIANTS
 		printf("xpt_release_simq: requested 1 > present %u\n",
-		    sendq->qfrozen_cnt[0]);
+		    devq->send_queue.qfrozen_cnt);
 #endif
 	} else
-		sendq->qfrozen_cnt[0]--;
-	if (sendq->qfrozen_cnt[0] == 0) {
+		devq->send_queue.qfrozen_cnt--;
+	if (devq->send_queue.qfrozen_cnt == 0) {
 		/*
 		 * If there is a timeout scheduled to release this
 		 * sim queue, remove it.  The queue frozen count is
@@ -4335,16 +4466,13 @@
 			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 		}
 		if (run_queue) {
-			struct cam_eb *bus;
-
 			/*
 			 * Now that we are unfrozen run the send queue.
 			 */
-			bus = xpt_find_bus(sim->path_id);
-			xpt_run_dev_sendq(bus);
-			xpt_release_bus(bus);
+			xpt_run_devq(sim->devq);
 		}
 	}
+	mtx_unlock(&devq->send_mtx);
 }
 
 /*
@@ -4362,49 +4490,34 @@
 void
 xpt_done(union ccb *done_ccb)
 {
-	struct cam_sim *sim;
-	int	first;
+	struct cam_doneq *queue;
+	int	run, hash;
 
 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
-	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
-		/*
-		 * Queue up the request for handling by our SWI handler
-		 * any of the "non-immediate" type of ccbs.
-		 */
-		sim = done_ccb->ccb_h.path->bus->sim;
-		TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
-		    sim_links.tqe);
-		done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
-		if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED |
-		    CAM_SIM_BATCH)) == 0) {
-			mtx_lock(&cam_simq_lock);
-			first = TAILQ_EMPTY(&cam_simq);
-			TAILQ_INSERT_TAIL(&cam_simq, sim, links);
-			mtx_unlock(&cam_simq_lock);
-			sim->flags |= CAM_SIM_ON_DONEQ;
-			if (first)
-				swi_sched(cambio_ih, 0);
-		}
-	}
+	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
+		return;
+
+	hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
+	    done_ccb->ccb_h.target_lun) % cam_num_doneqs;
+	queue = &cam_doneqs[hash];
+	mtx_lock(&queue->cam_doneq_mtx);
+	run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
+	STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
+	done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
+	mtx_unlock(&queue->cam_doneq_mtx);
+	if (run)
+		wakeup(&queue->cam_doneq);
 }
 
 void
-xpt_batch_start(struct cam_sim *sim)
+xpt_done_direct(union ccb *done_ccb)
 {
 
-	KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set"));
-	sim->flags |= CAM_SIM_BATCH;
-}
+	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
+	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
+		return;
 
-void
-xpt_batch_done(struct cam_sim *sim)
-{
-
-	KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set"));
-	sim->flags &= ~CAM_SIM_BATCH;
-	if (!TAILQ_EMPTY(&sim->sim_doneq) &&
-	    (sim->flags & CAM_SIM_ON_DONEQ) == 0)
-		camisr_runqueue(&sim->sim_doneq);
+	xpt_done_process(&done_ccb->ccb_h);
 }
 
 union ccb *
@@ -4437,35 +4550,65 @@
 
 /*
  * Get a CAM control block for the caller. Charge the structure to the device
- * referenced by the path.  If the this device has no 'credits' then the
- * device already has the maximum number of outstanding operations under way
- * and we return NULL. If we don't have sufficient resources to allocate more
- * ccbs, we also return NULL.
+ * referenced by the path.  If we don't have sufficient resources to allocate
+ * more ccbs, we return NULL.
  */
 static union ccb *
-xpt_get_ccb(struct cam_ed *device)
+xpt_get_ccb_nowait(struct cam_periph *periph)
 {
 	union ccb *new_ccb;
-	struct cam_sim *sim;
 
-	sim = device->sim;
-	if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
-		new_ccb = xpt_alloc_ccb_nowait();
-                if (new_ccb == NULL) {
-			return (NULL);
-		}
-		if ((sim->flags & CAM_SIM_MPSAFE) == 0)
-			callout_handle_init(&new_ccb->ccb_h.timeout_ch);
-		SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
-				  xpt_links.sle);
-		sim->ccb_count++;
-	}
-	cam_ccbq_take_opening(&device->ccbq);
-	SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
+	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
+	if (new_ccb == NULL)
+		return (NULL);
+	periph->periph_allocated++;
+	cam_ccbq_take_opening(&periph->path->device->ccbq);
 	return (new_ccb);
 }
 
+static union ccb *
+xpt_get_ccb(struct cam_periph *periph)
+{
+	union ccb *new_ccb;
+
+	cam_periph_unlock(periph);
+	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
+	cam_periph_lock(periph);
+	periph->periph_allocated++;
+	cam_ccbq_take_opening(&periph->path->device->ccbq);
+	return (new_ccb);
+}
+
+union ccb *
+cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
+{
+	struct ccb_hdr *ccb_h;
+
+	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
+	cam_periph_assert(periph, MA_OWNED);
+	while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
+	    ccb_h->pinfo.priority != priority) {
+		if (priority < periph->immediate_priority) {
+			periph->immediate_priority = priority;
+			xpt_run_allocq(periph, 0);
+		} else
+			cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
+			    "cgticb", 0);
+	}
+	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
+	return ((union ccb *)ccb_h);
+}
+
 static void
+xpt_acquire_bus(struct cam_eb *bus)
+{
+
+	xpt_lock_buses();
+	bus->refcount++;
+	xpt_unlock_buses();
+}
+
+static void
 xpt_release_bus(struct cam_eb *bus)
 {
 
@@ -4475,12 +4618,13 @@
 		xpt_unlock_buses();
 		return;
 	}
-	KASSERT(TAILQ_EMPTY(&bus->et_entries),
-	    ("refcount is zero, but target list is not empty"));
 	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 	xsoftc.bus_generation++;
 	xpt_unlock_buses();
+	KASSERT(TAILQ_EMPTY(&bus->et_entries),
+	    ("destroying bus, but target list is not empty"));
 	cam_sim_release(bus->sim);
+	mtx_destroy(&bus->eb_mtx);
 	free(bus, M_CAMXPT);
 }
 
@@ -4489,7 +4633,8 @@
 {
 	struct cam_et *cur_target, *target;
 
-	mtx_assert(bus->sim->mtx, MA_OWNED);
+	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
+	mtx_assert(&bus->eb_mtx, MA_OWNED);
 	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
 					 M_NOWAIT|M_ZERO);
 	if (target == NULL)
@@ -4501,14 +4646,13 @@
 	target->refcount = 1;
 	target->generation = 0;
 	target->luns = NULL;
+	mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
 	timevalclear(&target->last_reset);
 	/*
 	 * Hold a reference to our parent bus so it
 	 * will not go away before we do.
 	 */
-	xpt_lock_buses();
 	bus->refcount++;
-	xpt_unlock_buses();
 
 	/* Insertion sort into our bus's target list */
 	cur_target = TAILQ_FIRST(&bus->et_entries);
@@ -4524,17 +4668,32 @@
 }
 
 static void
+xpt_acquire_target(struct cam_et *target)
+{
+	struct cam_eb *bus = target->bus;
+
+	mtx_lock(&bus->eb_mtx);
+	target->refcount++;
+	mtx_unlock(&bus->eb_mtx);
+}
+
+static void
 xpt_release_target(struct cam_et *target)
 {
+	struct cam_eb *bus = target->bus;
 
-	mtx_assert(target->bus->sim->mtx, MA_OWNED);
-	if (--target->refcount > 0)
+	mtx_lock(&bus->eb_mtx);
+	if (--target->refcount > 0) {
+		mtx_unlock(&bus->eb_mtx);
 		return;
+	}
+	TAILQ_REMOVE(&bus->et_entries, target, links);
+	bus->generation++;
+	mtx_unlock(&bus->eb_mtx);
 	KASSERT(TAILQ_EMPTY(&target->ed_entries),
-	    ("refcount is zero, but device list is not empty"));
-	TAILQ_REMOVE(&target->bus->et_entries, target, links);
-	target->bus->generation++;
-	xpt_release_bus(target->bus);
+	    ("destroying target, but device list is not empty"));
+	xpt_release_bus(bus);
+	mtx_destroy(&target->luns_mtx);
 	if (target->luns)
 		free(target->luns, M_CAMXPT);
 	free(target, M_CAMXPT);
@@ -4552,10 +4711,19 @@
 
 	device->mintags = 1;
 	device->maxtags = 1;
-	bus->sim->max_ccbs += device->ccbq.devq_openings;
 	return (device);
 }
 
+static void
+xpt_destroy_device(void *context, int pending)
+{
+	struct cam_ed	*device = context;
+
+	mtx_lock(&device->device_mtx);
+	mtx_destroy(&device->device_mtx);
+	free(device, M_CAMDEV);
+}
+
 struct cam_ed *
 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 {
@@ -4563,10 +4731,12 @@
 	struct cam_devq	*devq;
 	cam_status status;
 
-	mtx_assert(target->bus->sim->mtx, MA_OWNED);
+	mtx_assert(&bus->eb_mtx, MA_OWNED);
 	/* Make space for us in the device queue on our bus */
 	devq = bus->sim->devq;
-	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
+	mtx_lock(&devq->send_mtx);
+	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
+	mtx_unlock(&devq->send_mtx);
 	if (status != CAM_REQ_CMP)
 		return (NULL);
 
@@ -4575,21 +4745,12 @@
 	if (device == NULL)
 		return (NULL);
 
-	cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
-	device->alloc_ccb_entry.device = device;
-	cam_init_pinfo(&device->send_ccb_entry.pinfo);
-	device->send_ccb_entry.device = device;
+	cam_init_pinfo(&device->devq_entry);
 	device->target = target;
 	device->lun_id = lun_id;
 	device->sim = bus->sim;
-	/* Initialize our queues */
-	if (camq_init(&device->drvq, 0) != 0) {
-		free(device, M_CAMDEV);
-		return (NULL);
-	}
 	if (cam_ccbq_init(&device->ccbq,
 			  bus->sim->max_dev_openings) != 0) {
-		camq_fini(&device->drvq);
 		free(device, M_CAMDEV);
 		return (NULL);
 	}
@@ -4596,12 +4757,18 @@
 	SLIST_INIT(&device->asyncs);
 	SLIST_INIT(&device->periphs);
 	device->generation = 0;
-	device->owner = NULL;
 	device->flags = CAM_DEV_UNCONFIGURED;
 	device->tag_delay_count = 0;
 	device->tag_saved_openings = 0;
 	device->refcount = 1;
-	callout_init_mtx(&device->callout, bus->sim->mtx, 0);
+	mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
+	callout_init_mtx(&device->callout, &devq->send_mtx, 0);
+	TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
+	/*
+	 * Hold a reference to our parent bus so it
+	 * will not go away before we do.
+	 */
+	target->refcount++;
 
 	cur_device = TAILQ_FIRST(&target->ed_entries);
 	while (cur_device != NULL && cur_device->lun_id < lun_id)
@@ -4610,7 +4777,6 @@
 		TAILQ_INSERT_BEFORE(cur_device, device, links);
 	else
 		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
-	target->refcount++;
 	target->generation++;
 	return (device);
 }
@@ -4618,36 +4784,45 @@
 void
 xpt_acquire_device(struct cam_ed *device)
 {
+	struct cam_eb *bus = device->target->bus;
 
-	mtx_assert(device->sim->mtx, MA_OWNED);
+	mtx_lock(&bus->eb_mtx);
 	device->refcount++;
+	mtx_unlock(&bus->eb_mtx);
 }
 
 void
 xpt_release_device(struct cam_ed *device)
 {
+	struct cam_eb *bus = device->target->bus;
 	struct cam_devq *devq;
 
-	mtx_assert(device->sim->mtx, MA_OWNED);
-	if (--device->refcount > 0)
+	mtx_lock(&bus->eb_mtx);
+	if (--device->refcount > 0) {
+		mtx_unlock(&bus->eb_mtx);
 		return;
+	}
 
+	TAILQ_REMOVE(&device->target->ed_entries, device,links);
+	device->target->generation++;
+	mtx_unlock(&bus->eb_mtx);
+
+	/* Release our slot in the devq */
+	devq = bus->sim->devq;
+	mtx_lock(&devq->send_mtx);
+	cam_devq_resize(devq, devq->send_queue.array_size - 1);
+	mtx_unlock(&devq->send_mtx);
+
 	KASSERT(SLIST_EMPTY(&device->periphs),
-	    ("refcount is zero, but periphs list is not empty"));
-	if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
-	 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
-		panic("Removing device while still queued for ccbs");
+	    ("destroying device, but periphs list is not empty"));
+	KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
+	    ("destroying device while still queued for ccbs"));
 
 	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 		callout_stop(&device->callout);
 
-	TAILQ_REMOVE(&device->target->ed_entries, device,links);
-	device->target->generation++;
-	device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
-	/* Release our slot in the devq */
-	devq = device->target->bus->sim->devq;
-	cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
-	camq_fini(&device->drvq);
+	xpt_release_target(device->target);
+
 	cam_ccbq_fini(&device->ccbq);
 	/*
 	 * Free allocated memory.  free(9) does nothing if the
@@ -4656,33 +4831,26 @@
 	 */
 	free(device->supported_vpds, M_CAMXPT);
 	free(device->device_id, M_CAMXPT);
+	free(device->ext_inq, M_CAMXPT);
 	free(device->physpath, M_CAMXPT);
 	free(device->rcap_buf, M_CAMXPT);
 	free(device->serial_num, M_CAMXPT);
-
-	xpt_release_target(device->target);
-	free(device, M_CAMDEV);
+	taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
 }
 
 u_int32_t
 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 {
-	int	diff;
 	int	result;
 	struct	cam_ed *dev;
 
 	dev = path->device;
-
-	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
+	mtx_lock(&dev->sim->devq->send_mtx);
 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
-	if (result == CAM_REQ_CMP && (diff < 0)) {
-		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
-	}
+	mtx_unlock(&dev->sim->devq->send_mtx);
 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 	 || (dev->inq_flags & SID_CmdQue) != 0)
 		dev->tag_saved_openings = newopenings;
-	/* Adjust the global limit */
-	dev->sim->max_ccbs += diff;
 	return (result);
 }
 
@@ -4709,7 +4877,7 @@
 {
 	struct cam_et *target;
 
-	mtx_assert(bus->sim->mtx, MA_OWNED);
+	mtx_assert(&bus->eb_mtx, MA_OWNED);
 	for (target = TAILQ_FIRST(&bus->et_entries);
 	     target != NULL;
 	     target = TAILQ_NEXT(target, links)) {
@@ -4726,7 +4894,7 @@
 {
 	struct cam_ed *device;
 
-	mtx_assert(target->bus->sim->mtx, MA_OWNED);
+	mtx_assert(&target->bus->eb_mtx, MA_OWNED);
 	for (device = TAILQ_FIRST(&target->ed_entries);
 	     device != NULL;
 	     device = TAILQ_NEXT(device, links)) {
@@ -4806,10 +4974,12 @@
 	/*
 	 * Now that interrupts are enabled, go find our devices
 	 */
+	if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
+		printf("xpt_config: failed to create taskqueue thread.\n");
 
 	/* Setup debugging path */
 	if (cam_dflags != CAM_DEBUG_NONE) {
-		if (xpt_create_path_unlocked(&cam_dpath, xpt_periph,
+		if (xpt_create_path(&cam_dpath, NULL,
 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 			printf("xpt_config: xpt_create_path() failed for debug"
@@ -4823,10 +4993,11 @@
 	periphdriver_init(1);
 	xpt_hold_boot();
 	callout_init(&xsoftc.boot_callout, 1);
-	callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
-	    xpt_boot_delay, NULL);
+	callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
+	    xpt_boot_delay, NULL, 0);
 	/* Fire up rescan thread. */
-	if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
+	if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
+	    "cam", "scanner")) {
 		printf("xpt_config: failed to create rescan thread.\n");
 	}
 }
@@ -4911,13 +5082,11 @@
 	int xptpath = 0;
 
 	if (path == NULL) {
-		mtx_lock(&xsoftc.xpt_lock);
 		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
-		if (status != CAM_REQ_CMP) {
-			mtx_unlock(&xsoftc.xpt_lock);
+		if (status != CAM_REQ_CMP)
 			return (status);
-		}
+		xpt_path_lock(path);
 		xptpath = 1;
 	}
 
@@ -4930,8 +5099,8 @@
 	status = csa.ccb_h.status;
 
 	if (xptpath) {
+		xpt_path_unlock(path);
 		xpt_free_path(path);
-		mtx_unlock(&xsoftc.xpt_lock);
 	}
 
 	if ((status == CAM_REQ_CMP) &&
@@ -4974,9 +5143,9 @@
 		cpi->max_target = 0;
 		cpi->max_lun = 0;
 		cpi->initiator_id = 0;
-		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
-		strncpy(cpi->hba_vid, "", HBA_IDLEN);
-		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
+		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+		strlcpy(cpi->hba_vid, "", HBA_IDLEN);
+		strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 		cpi->unit_number = sim->unit_number;
 		cpi->bus_id = sim->bus_id;
 		cpi->base_transfer_speed = 0;
@@ -5016,125 +5185,167 @@
 	mtx_unlock(&xsoftc.xpt_topo_lock);
 }
 
-static void
-camisr(void *dummy)
+struct mtx *
+xpt_path_mtx(struct cam_path *path)
 {
-	cam_simq_t queue;
-	struct cam_sim *sim;
 
-	mtx_lock(&cam_simq_lock);
-	TAILQ_INIT(&queue);
-	while (!TAILQ_EMPTY(&cam_simq)) {
-		TAILQ_CONCAT(&queue, &cam_simq, links);
-		mtx_unlock(&cam_simq_lock);
-
-		while ((sim = TAILQ_FIRST(&queue)) != NULL) {
-			TAILQ_REMOVE(&queue, sim, links);
-			CAM_SIM_LOCK(sim);
-			camisr_runqueue(&sim->sim_doneq);
-			sim->flags &= ~CAM_SIM_ON_DONEQ;
-			CAM_SIM_UNLOCK(sim);
-		}
-		mtx_lock(&cam_simq_lock);
-	}
-	mtx_unlock(&cam_simq_lock);
+	return (&path->device->device_mtx);
 }
 
 static void
-camisr_runqueue(void *V_queue)
+xpt_done_process(struct ccb_hdr *ccb_h)
 {
-	cam_isrq_t *queue = V_queue;
-	struct	ccb_hdr *ccb_h;
+	struct cam_sim *sim;
+	struct cam_devq *devq;
+	struct mtx *mtx = NULL;
 
-	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
-		int	runq;
+	if (ccb_h->flags & CAM_HIGH_POWER) {
+		struct highpowerlist	*hphead;
+		struct cam_ed		*device;
 
-		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
-		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
+		mtx_lock(&xsoftc.xpt_highpower_lock);
+		hphead = &xsoftc.highpowerq;
 
-		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
-			  ("camisr\n"));
+		device = STAILQ_FIRST(hphead);
 
-		runq = FALSE;
+		/*
+		 * Increment the count since this command is done.
+		 */
+		xsoftc.num_highpower++;
 
-		if (ccb_h->flags & CAM_HIGH_POWER) {
-			struct highpowerlist	*hphead;
-			union ccb		*send_ccb;
+		/*
+		 * Any high powered commands queued up?
+		 */
+		if (device != NULL) {
 
-			mtx_lock(&xsoftc.xpt_lock);
-			hphead = &xsoftc.highpowerq;
+			STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
+			mtx_unlock(&xsoftc.xpt_highpower_lock);
 
-			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
+			mtx_lock(&device->sim->devq->send_mtx);
+			xpt_release_devq_device(device,
+					 /*count*/1, /*runqueue*/TRUE);
+			mtx_unlock(&device->sim->devq->send_mtx);
+		} else
+			mtx_unlock(&xsoftc.xpt_highpower_lock);
+	}
 
-			/*
-			 * Increment the count since this command is done.
-			 */
-			xsoftc.num_highpower++;
+	sim = ccb_h->path->bus->sim;
 
-			/*
-			 * Any high powered commands queued up?
-			 */
-			if (send_ccb != NULL) {
+	if (ccb_h->status & CAM_RELEASE_SIMQ) {
+		xpt_release_simq(sim, /*run_queue*/FALSE);
+		ccb_h->status &= ~CAM_RELEASE_SIMQ;
+	}
 
-				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
-				mtx_unlock(&xsoftc.xpt_lock);
+	if ((ccb_h->flags & CAM_DEV_QFRZDIS)
+	 && (ccb_h->status & CAM_DEV_QFRZN)) {
+		xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
+		ccb_h->status &= ~CAM_DEV_QFRZN;
+	}
 
-				xpt_release_devq(send_ccb->ccb_h.path,
-						 /*count*/1, /*runqueue*/TRUE);
-			} else
-				mtx_unlock(&xsoftc.xpt_lock);
-		}
+	devq = sim->devq;
+	if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
+		struct cam_ed *dev = ccb_h->path->device;
 
-		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
-			struct cam_ed *dev;
+		mtx_lock(&devq->send_mtx);
+		devq->send_active--;
+		devq->send_openings++;
+		cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 
-			dev = ccb_h->path->device;
+		if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
+		  && (dev->ccbq.dev_active == 0))) {
+			dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
+			xpt_release_devq_device(dev, /*count*/1,
+					 /*run_queue*/FALSE);
+		}
 
-			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
-			ccb_h->path->bus->sim->devq->send_active--;
-			ccb_h->path->bus->sim->devq->send_openings++;
-			runq = TRUE;
+		if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
+		  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
+			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
+			xpt_release_devq_device(dev, /*count*/1,
+					 /*run_queue*/FALSE);
+		}
 
-			if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
-			  && (dev->ccbq.dev_active == 0))) {
-				dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
-				xpt_release_devq(ccb_h->path, /*count*/1,
-						 /*run_queue*/FALSE);
-			}
+		if (!device_is_queued(dev))
+			(void)xpt_schedule_devq(devq, dev);
+		xpt_run_devq(devq);
+		mtx_unlock(&devq->send_mtx);
 
-			if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
-			  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
-				dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
-				xpt_release_devq(ccb_h->path, /*count*/1,
-						 /*run_queue*/FALSE);
-			}
+		if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
+			mtx = xpt_path_mtx(ccb_h->path);
+			mtx_lock(mtx);
 
 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 			 && (--dev->tag_delay_count == 0))
 				xpt_start_tags(ccb_h->path);
-			if (!device_is_send_queued(dev)) {
-				(void)xpt_schedule_dev_sendq(ccb_h->path->bus, 
-							     dev);
-			}
 		}
+	}
 
-		if (ccb_h->status & CAM_RELEASE_SIMQ) {
-			xpt_release_simq(ccb_h->path->bus->sim,
-					 /*run_queue*/TRUE);
-			ccb_h->status &= ~CAM_RELEASE_SIMQ;
-			runq = FALSE;
+	if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
+		if (mtx == NULL) {
+			mtx = xpt_path_mtx(ccb_h->path);
+			mtx_lock(mtx);
 		}
+	} else {
+		if (mtx != NULL) {
+			mtx_unlock(mtx);
+			mtx = NULL;
+		}
+	}
 
-		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
-		 && (ccb_h->status & CAM_DEV_QFRZN)) {
-			xpt_release_devq(ccb_h->path, /*count*/1,
-					 /*run_queue*/TRUE);
-			ccb_h->status &= ~CAM_DEV_QFRZN;
-		} else if (runq) {
-			xpt_run_dev_sendq(ccb_h->path->bus);
+	/* Call the peripheral driver's callback */
+	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
+	(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
+	if (mtx != NULL)
+		mtx_unlock(mtx);
+}
+
+void
+xpt_done_td(void *arg)
+{
+	struct cam_doneq *queue = arg;
+	struct ccb_hdr *ccb_h;
+	STAILQ_HEAD(, ccb_hdr)	doneq;
+
+	STAILQ_INIT(&doneq);
+	mtx_lock(&queue->cam_doneq_mtx);
+	while (1) {
+		while (STAILQ_EMPTY(&queue->cam_doneq)) {
+			queue->cam_doneq_sleep = 1;
+			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
+			    PRIBIO, "-", 0);
+			queue->cam_doneq_sleep = 0;
 		}
+		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
+		mtx_unlock(&queue->cam_doneq_mtx);
 
-		/* Call the peripheral driver's callback */
-		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
+		THREAD_NO_SLEEPING();
+		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
+			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
+			xpt_done_process(ccb_h);
+		}
+		THREAD_SLEEPING_OK();
+
+		mtx_lock(&queue->cam_doneq_mtx);
 	}
 }
+
+static void
+camisr_runqueue(void)
+{
+	struct	ccb_hdr *ccb_h;
+	struct cam_doneq *queue;
+	int i;
+
+	/* Process global queues. */
+	for (i = 0; i < cam_num_doneqs; i++) {
+		queue = &cam_doneqs[i];
+		mtx_lock(&queue->cam_doneq_mtx);
+		while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
+			STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
+			mtx_unlock(&queue->cam_doneq_mtx);
+			xpt_done_process(ccb_h);
+			mtx_lock(&queue->cam_doneq_mtx);
+		}
+		mtx_unlock(&queue->cam_doneq_mtx);
+	}
+}

Modified: trunk/sys/cam/cam_xpt.h
===================================================================
--- trunk/sys/cam/cam_xpt.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_xpt.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Data structures and definitions for dealing with the 
  * Common Access Method Transport (xpt) layer.
@@ -26,7 +27,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_xpt.h 292348 2015-12-16 19:01:14Z ken $
  */
 
 #ifndef _CAM_CAM_XPT_H
@@ -35,6 +36,7 @@
 /* Forward Declarations */
 union ccb;
 struct cam_periph;
+struct cam_ed;
 struct cam_sim;
 
 /*
@@ -55,6 +57,7 @@
 struct async_node {
 	SLIST_ENTRY(async_node)	links;
 	u_int32_t	event_enable;	/* Async Event enables */
+	u_int32_t	event_lock;	/* Take SIM lock for handlers. */
 	void		(*callback)(void *arg, u_int32_t code,
 				    struct cam_path *path, void *args);
 	void		*callback_arg;
@@ -68,6 +71,10 @@
 union ccb		*xpt_alloc_ccb(void);
 union ccb		*xpt_alloc_ccb_nowait(void);
 void			xpt_free_ccb(union ccb *free_ccb);
+void			xpt_setup_ccb_flags(struct ccb_hdr *ccb_h,
+					    struct cam_path *path,
+					    u_int32_t priority,
+					    u_int32_t flags);
 void			xpt_setup_ccb(struct ccb_hdr *ccb_h,
 				      struct cam_path *path,
 				      u_int32_t priority);
@@ -89,7 +96,10 @@
 					uint32_t *device_ref);
 int			xpt_path_comp(struct cam_path *path1,
 				      struct cam_path *path2);
+int			xpt_path_comp_dev(struct cam_path *path,
+					  struct cam_ed *dev);
 void			xpt_print_path(struct cam_path *path);
+void			xpt_print_device(struct cam_ed *device);
 void			xpt_print(struct cam_path *path, const char *fmt, ...);
 int			xpt_path_string(struct cam_path *path, char *str,
 					size_t str_len);
@@ -106,6 +116,13 @@
 void			xpt_release_boot(void);
 void			xpt_lock_buses(void);
 void			xpt_unlock_buses(void);
+struct mtx *		xpt_path_mtx(struct cam_path *path);
+#define xpt_path_lock(path)	mtx_lock(xpt_path_mtx(path))
+#define xpt_path_unlock(path)	mtx_unlock(xpt_path_mtx(path))
+#define xpt_path_assert(path, what)	mtx_assert(xpt_path_mtx(path), (what))
+#define xpt_path_owned(path)	mtx_owned(xpt_path_mtx(path))
+#define xpt_path_sleep(path, chan, priority, wmesg, timo)		\
+    msleep((chan), xpt_path_mtx(path), (priority), (wmesg), (timo))
 cam_status		xpt_register_async(int event, ac_callback_t *cbfunc,
 					   void *cbarg, struct cam_path *path);
 cam_status		xpt_compile_path(struct cam_path *new_path,
@@ -113,6 +130,10 @@
 					 path_id_t path_id,
 					 target_id_t target_id,
 					 lun_id_t lun_id);
+cam_status		xpt_clone_path(struct cam_path **new_path,
+				      struct cam_path *path);
+void			xpt_copy_path(struct cam_path *new_path,
+				      struct cam_path *path);
 
 void			xpt_release_path(struct cam_path *path);
 

Modified: trunk/sys/cam/cam_xpt_internal.h
===================================================================
--- trunk/sys/cam/cam_xpt_internal.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_xpt_internal.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright 2009 Scott Long
  * All rights reserved.
@@ -23,12 +24,14 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_xpt_internal.h 278974 2015-02-18 23:08:13Z ken $
  */
 
 #ifndef _CAM_CAM_XPT_INTERNAL_H
 #define _CAM_CAM_XPT_INTERNAL_H 1
 
+#include <sys/taskqueue.h>
+
 /* Forward Declarations */
 struct cam_eb;
 struct cam_et;
@@ -55,36 +58,20 @@
 };
 
 /*
- * Structure for queueing a device in a run queue.
- * There is one run queue for allocating new ccbs,
- * and another for sending ccbs to the controller.
- */
-struct cam_ed_qinfo {
-	cam_pinfo pinfo;
-	struct	  cam_ed *device;
-};
-
-/*
  * The CAM EDT (Existing Device Table) contains the device information for
  * all devices for all busses in the system.  The table contains a
  * cam_ed structure for each device on the bus.
  */
 struct cam_ed {
+	cam_pinfo	 devq_entry;
 	TAILQ_ENTRY(cam_ed) links;
-	struct	cam_ed_qinfo alloc_ccb_entry;
-	struct	cam_ed_qinfo send_ccb_entry;
 	struct	cam_et	 *target;
 	struct	cam_sim  *sim;
 	lun_id_t	 lun_id;
-	struct	camq drvq;		/*
-					 * Queue of type drivers wanting to do
-					 * work on this device.
-					 */
 	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
 	struct	async_list asyncs;	/* Async callback info for this B/T/L */
 	struct	periph_list periphs;	/* All attached devices */
 	u_int	generation;		/* Generation number */
-	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
 	void		 *quirk;	/* Oddities about this device */
 	u_int		 maxtags;
 	u_int		 mintags;
@@ -97,6 +84,8 @@
 	uint8_t		 supported_vpds_len;
 	uint32_t	 device_id_len;
 	uint8_t		 *device_id;
+	uint32_t	 ext_inq_len;
+	uint8_t		 *ext_inq;
 	uint8_t		 physpath_len;
 	uint8_t		 *physpath;	/* physical path string form */
 	uint32_t	 rcap_len;
@@ -116,7 +105,6 @@
 #define CAM_DEV_REL_TIMEOUT_PENDING	0x02
 #define CAM_DEV_REL_ON_COMPLETE		0x04
 #define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
-#define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
 #define CAM_DEV_TAG_AFTER_COUNT		0x20
 #define CAM_DEV_INQUIRY_DATA_VALID	0x40
 #define	CAM_DEV_IN_DV			0x80
@@ -127,6 +115,9 @@
 	u_int32_t	 tag_saved_openings;
 	u_int32_t	 refcount;
 	struct callout	 callout;
+	STAILQ_ENTRY(cam_ed) highpowerq_entry;
+	struct mtx	 device_mtx;
+	struct task	 device_destroy_task;
 };
 
 /*
@@ -145,6 +136,7 @@
 	struct		timeval last_reset;
 	u_int		rpl_size;
 	struct scsi_report_luns_data *luns;
+	struct mtx	luns_mtx;	/* Protection for luns field. */
 };
 
 /*
@@ -164,6 +156,7 @@
 	u_int		     generation;
 	device_t	     parent_dev;
 	struct xpt_xport     *xport;
+	struct mtx	     eb_mtx;	/* Bus topology mutex. */
 };
 
 struct cam_path {
@@ -181,8 +174,6 @@
 					 lun_id_t lun_id);
 void			xpt_acquire_device(struct cam_ed *device);
 void			xpt_release_device(struct cam_ed *device);
-int			xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
-					 u_int32_t new_priority);
 u_int32_t		xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
 void			xpt_start_tags(struct cam_path *path);
 void			xpt_stop_tags(struct cam_path *path);

Modified: trunk/sys/cam/cam_xpt_periph.h
===================================================================
--- trunk/sys/cam/cam_xpt_periph.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_xpt_periph.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Data structures and definitions for dealing with the 
  * Common Access Method Transport (xpt) layer from peripheral
@@ -27,7 +28,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_xpt_periph.h 257049 2013-10-24 10:33:31Z mav $
  */
 
 #ifndef _CAM_CAM_XPT_PERIPH_H
@@ -42,12 +43,12 @@
 void		xpt_release_ccb(union ccb *released_ccb);
 void		xpt_schedule(struct cam_periph *perph, u_int32_t new_priority);
 int32_t		xpt_add_periph(struct cam_periph *periph);
-void		xpt_remove_periph(struct cam_periph *periph,
-				  int topology_lock_held);
+void		xpt_remove_periph(struct cam_periph *periph);
 void		xpt_announce_periph(struct cam_periph *periph,
 				    char *announce_string);
 void		xpt_announce_quirks(struct cam_periph *periph,
 				    int quirks, char *bit_string);
+void		xpt_denounce_periph(struct cam_periph *periph);
 #endif
 
 #endif /* _CAM_CAM_XPT_PERIPH_H */

Modified: trunk/sys/cam/cam_xpt_sim.h
===================================================================
--- trunk/sys/cam/cam_xpt_sim.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/cam_xpt_sim.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Data structures and definitions for dealing with the 
  * Common Access Method Transport (xpt) layer.
@@ -26,7 +27,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/cam_xpt_sim.h 260387 2014-01-07 01:51:48Z scottl $
  */
 
 #ifndef _CAM_CAM_XPT_SIM_H
@@ -43,16 +44,10 @@
 u_int32_t	xpt_freeze_simq(struct cam_sim *sim, u_int count);
 void		xpt_release_simq(struct cam_sim *sim, int run_queue);
 u_int32_t	xpt_freeze_devq(struct cam_path *path, u_int count);
-u_int32_t	xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl,
-		    u_int count);
 void		xpt_release_devq(struct cam_path *path,
 		    u_int count, int run_queue);
-void		xpt_release_devq_rl(struct cam_path *path, cam_rl rl,
-		    u_int count, int run_queue);
-int		xpt_sim_opened(struct cam_sim *sim);
 void		xpt_done(union ccb *done_ccb);
-void		xpt_batch_start(struct cam_sim *sim);
-void		xpt_batch_done(struct cam_sim *sim);
+void		xpt_done_direct(union ccb *done_ccb);
 #endif
 
 #endif /* _CAM_CAM_XPT_SIM_H */

Modified: trunk/sys/cam/ctl/README.ctl.txt
===================================================================
--- trunk/sys/cam/ctl/README.ctl.txt	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/README.ctl.txt	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,4 +1,4 @@
-/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/cam/ctl/README.ctl.txt 288810 2015-10-05 11:30:18Z mav $ */
 
 CTL - CAM Target Layer Description
 
@@ -19,9 +19,9 @@
 Introduction:
 ============
 
-CTL is a disk and processor device emulation subsystem originally written
-for Copan Systems under Linux starting in 2003.  It has been shipping in
-Copan (now SGI) products since 2005.
+CTL is a disk, processor and cdrom device emulation subsystem originally
+written for Copan Systems under Linux starting in 2003.  It has been
+shipping in Copan (now SGI) products since 2005.
 
 It was ported to FreeBSD in 2008, and thanks to an agreement between SGI
 (who acquired Copan's assets in 2010) and Spectra Logic in 2010, CTL is
@@ -31,7 +31,7 @@
 Features:
 ========
 
- - Disk and processor device emulation.
+ - Disk, processor and cdrom device emulation.
  - Tagged queueing
  - SCSI task attribute support (ordered, head of queue, simple tags)
  - SCSI implicit command ordering support.  (e.g. if a read follows a mode
@@ -40,28 +40,24 @@
  - Support for multiple ports
  - Support for multiple simultaneous initiators
  - Support for multiple simultaneous backing stores
+ - Support for VMWare VAAI: COMPARE AND WRITE, XCOPY, WRITE SAME and
+   UNMAP commands
+ - Support for Microsoft ODX: POPULATE TOKEN/WRITE USING TOKEN, WRITE SAME
+   and UNMAP commands
  - Persistent reservation support
  - Mode sense/select support
  - Error injection support
- - High Availability support (1)
+ - High Availability clustering support with ALUA
  - All I/O handled in-kernel, no userland context switch overhead.
 
-(1) HA Support is just an API stub, and needs much more to be fully
-    functional.  See the to-do list below.
-
 Configuring and Running CTL:
 ===========================
 
- - After applying the CTL patchset to your tree, build world and install it
-   on your target system.
+ - Add 'device ctl' to your kernel configuration file or load the module.
 
- - Add 'device ctl' to your kernel configuration file.
-
  - If you're running with a 8Gb or 4Gb Qlogic FC board, add
-   'options ISP_TARGET_MODE' to your kernel config file.  Keep in mind that
-   the isp(4) driver can run in target or initiator mode, but not both on
-   the same machine.  'device ispfw' or loading the ispfw module is also
-   recommended.
+   'options ISP_TARGET_MODE' to your kernel config file. 'device ispfw' or
+   loading the ispfw module is also recommended.
 
  - Rebuild and install a new kernel.
 
@@ -245,27 +241,6 @@
    another data structure in the stack, more memory allocations, etc.  This
    will also require changes to the CAM CCB structure to support CTL.
 
- - Full-featured High Availability support.  The HA API that is in ctl_ha.h
-   is essentially a renamed version of Copan's HA API.  There is no
-   substance to it, but it remains in CTL to show what needs to be done to
-   implement active/active HA from a CTL standpoint.  The things that would
-   need to be done include:
-	- A kernel level software API for message passing as well as DMA
-	  between at least two nodes.
-	- Hardware support and drivers for inter-node communication.  This
-	  could be as simples as ethernet hardware and drivers.
-	- A "supervisor", or startup framework to control and coordinate
-	  HA startup, failover (going from active/active to single mode),
-	  and failback (going from single mode to active/active).
-	- HA support in other components of the stack.  The goal behind HA
-	  is that one node can fail and another node can seamlessly take
-	  over handling I/O requests.  This requires support from pretty
-	  much every component in the storage stack, from top to bottom.
-	  CTL is one piece of it, but you also need support in the RAID
-	  stack/filesystem/backing store.  You also need full configuration
-	  mirroring, and all peer nodes need to be able to talk to the
-	  underlying storage hardware.
-
 Code Roadmap:
 ============
 
@@ -318,7 +293,6 @@
 explain the API.
 
 ctl_backend_block.c
-ctl_backend_block.h:
 -------------------
 
 The block and file backend.  This allows for using a disk or a file as the
@@ -366,21 +340,11 @@
 frontend allows for using CTL without any target-capable hardware.  So any
 LUNs you create in CTL are visible via this port.
 
-
-ctl_frontend_internal.c
-ctl_frontend_internal.h:
------------------------
-
-This is a frontend port written for Copan to do some system-specific tasks
-that required sending commands into CTL from inside the kernel.  This isn't
-entirely relevant to FreeBSD in general, but can perhaps be repurposed or
-removed later.
-
+ctl_ha.c:
 ctl_ha.h:
 --------
 
-This is a stubbed-out High Availability API.  See the comments in the
-header and the description of what is needed as far as HA support above.
+This is a High Availability API and TCP-based interlink implementation.
 
 ctl_io.h:
 --------
@@ -394,14 +358,6 @@
 This defines all ioctls available through the CTL character device, and
 the data structures needed for those ioctls.
 
-ctl_mem_pool.c
-ctl_mem_pool.h:
---------------
-
-Generic memory pool implementation.  This is currently only used by the
-internal frontend.  The internal frontend can probably be rewritten to use
-UMA zones and this can be removed.
-
 ctl_private.h:
 -------------
 


Property changes on: trunk/sys/cam/ctl/README.ctl.txt
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/cam/ctl/ctl.c
===================================================================
--- trunk/sys/cam/ctl/ctl.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,6 +1,8 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
  * Copyright (c) 2012 The FreeBSD Foundation
+ * Copyright (c) 2014-2017 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Portions of this software were developed by Edward Tomasz Napierala
@@ -31,7 +33,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id$
  */
 /*
  * CAM Target Layer, a SCSI device emulation subsystem.
@@ -42,10 +44,11 @@
 #define _CTL_C
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 317320 2017-04-23 07:35:51Z mav $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/ctype.h>
 #include <sys/kernel.h>
 #include <sys/types.h>
 #include <sys/kthread.h>
@@ -60,16 +63,18 @@
 #include <sys/ioccom.h>
 #include <sys/queue.h>
 #include <sys/sbuf.h>
+#include <sys/smp.h>
 #include <sys/endian.h>
 #include <sys/sysctl.h>
+#include <vm/uma.h>
 
 #include <cam/cam.h>
 #include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_cd.h>
 #include <cam/scsi/scsi_da.h>
 #include <cam/ctl/ctl_io.h>
 #include <cam/ctl/ctl.h>
 #include <cam/ctl/ctl_frontend.h>
-#include <cam/ctl/ctl_frontend_internal.h>
 #include <cam/ctl/ctl_util.h>
 #include <cam/ctl/ctl_backend.h>
 #include <cam/ctl/ctl_ioctl.h>
@@ -82,39 +87,6 @@
 struct ctl_softc *control_softc = NULL;
 
 /*
- * The default is to run with CTL_DONE_THREAD turned on.  Completed
- * transactions are queued for processing by the CTL work thread.  When
- * CTL_DONE_THREAD is not defined, completed transactions are processed in
- * the caller's context.
- */
-#define CTL_DONE_THREAD
-
-/*
- * Use the serial number and device ID provided by the backend, rather than
- * making up our own.
- */
-#define CTL_USE_BACKEND_SN
-
-/*
- * Size and alignment macros needed for Copan-specific HA hardware.  These
- * can go away when the HA code is re-written, and uses busdma for any
- * hardware.
- */
-#define	CTL_ALIGN_8B(target, source, type)				\
-	if (((uint32_t)source & 0x7) != 0)				\
-		target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
-	else								\
-		target = (type)source;
-
-#define	CTL_SIZE_8B(target, size)					\
-	if ((size & 0x7) != 0)						\
-		target = size + (0x8 - (size & 0x7));			\
-	else								\
-		target = size;
-
-#define CTL_ALIGN_8B_MARGIN	16
-
-/*
  * Template mode pages.
  */
 
@@ -122,76 +94,35 @@
  * Note that these are default values only.  The actual values will be
  * filled in when the user does a mode sense.
  */
-static struct copan_power_subpage power_page_default = {
-	/*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
-	/*subpage*/ PWR_SUBPAGE_CODE,
-	/*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
-			 (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
-	/*page_version*/ PWR_VERSION,
-	/* total_luns */ 26,
-	/* max_active_luns*/ PWR_DFLT_MAX_LUNS,
-	/*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
-		      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		      0, 0, 0, 0, 0, 0}
+const static struct scsi_da_rw_recovery_page rw_er_page_default = {
+	/*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
+	/*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
+	/*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE,
+	/*read_retry_count*/0,
+	/*correction_span*/0,
+	/*head_offset_count*/0,
+	/*data_strobe_offset_cnt*/0,
+	/*byte8*/SMS_RWER_LBPERE,
+	/*write_retry_count*/0,
+	/*reserved2*/0,
+	/*recovery_time_limit*/{0, 0},
 };
 
-static struct copan_power_subpage power_page_changeable = {
-	/*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
-	/*subpage*/ PWR_SUBPAGE_CODE,
-	/*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
-			 (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
-	/*page_version*/ 0,
-	/* total_luns */ 0,
-	/* max_active_luns*/ 0,
-	/*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
-		      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		      0, 0, 0, 0, 0, 0}
+const static struct scsi_da_rw_recovery_page rw_er_page_changeable = {
+	/*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
+	/*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
+	/*byte3*/SMS_RWER_PER,
+	/*read_retry_count*/0,
+	/*correction_span*/0,
+	/*head_offset_count*/0,
+	/*data_strobe_offset_cnt*/0,
+	/*byte8*/SMS_RWER_LBPERE,
+	/*write_retry_count*/0,
+	/*reserved2*/0,
+	/*recovery_time_limit*/{0, 0},
 };
 
-static struct copan_aps_subpage aps_page_default = {
-	APS_PAGE_CODE | SMPH_SPF, //page_code
-	APS_SUBPAGE_CODE, //subpage
-	{(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
-	 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
-	APS_VERSION, //page_version
-	0, //lock_active
-	{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0} //reserved
-};
-
-static struct copan_aps_subpage aps_page_changeable = {
-	APS_PAGE_CODE | SMPH_SPF, //page_code
-	APS_SUBPAGE_CODE, //subpage
-	{(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
-	 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
-	0, //page_version
-	0, //lock_active
-	{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0} //reserved
-};
-
-static struct copan_debugconf_subpage debugconf_page_default = {
-	DBGCNF_PAGE_CODE | SMPH_SPF,	/* page_code */
-	DBGCNF_SUBPAGE_CODE,		/* subpage */
-	{(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
-	 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
-	DBGCNF_VERSION,			/* page_version */
-	{CTL_TIME_IO_DEFAULT_SECS>>8,
-	 CTL_TIME_IO_DEFAULT_SECS>>0},	/* ctl_time_io_secs */
-};
-
-static struct copan_debugconf_subpage debugconf_page_changeable = {
-	DBGCNF_PAGE_CODE | SMPH_SPF,	/* page_code */
-	DBGCNF_SUBPAGE_CODE,		/* subpage */
-	{(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
-	 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
-	0,				/* page_version */
-	{0xff,0xff},			/* ctl_time_io_secs */
-};
-
-static struct scsi_format_page format_page_default = {
+const static struct scsi_format_page format_page_default = {
 	/*page_code*/SMS_FORMAT_DEVICE_PAGE,
 	/*page_length*/sizeof(struct scsi_format_page) - 2,
 	/*tracks_per_zone*/ {0, 0},
@@ -208,7 +139,7 @@
 	/*reserved*/ {0, 0, 0}
 };
 
-static struct scsi_format_page format_page_changeable = {
+const static struct scsi_format_page format_page_changeable = {
 	/*page_code*/SMS_FORMAT_DEVICE_PAGE,
 	/*page_length*/sizeof(struct scsi_format_page) - 2,
 	/*tracks_per_zone*/ {0, 0},
@@ -224,7 +155,7 @@
 	/*reserved*/ {0, 0, 0}
 };
 
-static struct scsi_rigid_disk_page rigid_disk_page_default = {
+const static struct scsi_rigid_disk_page rigid_disk_page_default = {
 	/*page_code*/SMS_RIGID_DISK_PAGE,
 	/*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
 	/*cylinders*/ {0, 0, 0},
@@ -241,7 +172,7 @@
 	/*reserved2*/ {0, 0}
 };
 
-static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
+const static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
 	/*page_code*/SMS_RIGID_DISK_PAGE,
 	/*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
 	/*cylinders*/ {0, 0, 0},
@@ -257,7 +188,25 @@
 	/*reserved2*/ {0, 0}
 };
 
-static struct scsi_caching_page caching_page_default = {
+const static struct scsi_da_verify_recovery_page verify_er_page_default = {
+	/*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE,
+	/*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2,
+	/*byte3*/0,
+	/*read_retry_count*/0,
+	/*reserved*/{ 0, 0, 0, 0, 0, 0 },
+	/*recovery_time_limit*/{0, 0},
+};
+
+const static struct scsi_da_verify_recovery_page verify_er_page_changeable = {
+	/*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE,
+	/*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2,
+	/*byte3*/SMS_VER_PER,
+	/*read_retry_count*/0,
+	/*reserved*/{ 0, 0, 0, 0, 0, 0 },
+	/*recovery_time_limit*/{0, 0},
+};
+
+const static struct scsi_caching_page caching_page_default = {
 	/*page_code*/SMS_CACHING_PAGE,
 	/*page_length*/sizeof(struct scsi_caching_page) - 2,
 	/*flags1*/ SCP_DISC | SCP_WCE,
@@ -273,10 +222,10 @@
 	/*non_cache_seg_size*/ {0, 0, 0}
 };
 
-static struct scsi_caching_page caching_page_changeable = {
+const static struct scsi_caching_page caching_page_changeable = {
 	/*page_code*/SMS_CACHING_PAGE,
 	/*page_length*/sizeof(struct scsi_caching_page) - 2,
-	/*flags1*/ 0,
+	/*flags1*/ SCP_WCE | SCP_RCD,
 	/*ret_priority*/ 0,
 	/*disable_pf_transfer_len*/ {0, 0},
 	/*min_prefetch*/ {0, 0},
@@ -289,89 +238,208 @@
 	/*non_cache_seg_size*/ {0, 0, 0}
 };
 
-static struct scsi_control_page control_page_default = {
+const static struct scsi_control_page control_page_default = {
 	/*page_code*/SMS_CONTROL_MODE_PAGE,
 	/*page_length*/sizeof(struct scsi_control_page) - 2,
 	/*rlec*/0,
-	/*queue_flags*/0,
+	/*queue_flags*/SCP_QUEUE_ALG_RESTRICTED,
 	/*eca_and_aen*/0,
-	/*reserved*/0,
-	/*aen_holdoff_period*/{0, 0}
+	/*flags4*/SCP_TAS,
+	/*aen_holdoff_period*/{0, 0},
+	/*busy_timeout_period*/{0, 0},
+	/*extended_selftest_completion_time*/{0, 0}
 };
 
-static struct scsi_control_page control_page_changeable = {
+const static struct scsi_control_page control_page_changeable = {
 	/*page_code*/SMS_CONTROL_MODE_PAGE,
 	/*page_length*/sizeof(struct scsi_control_page) - 2,
 	/*rlec*/SCP_DSENSE,
-	/*queue_flags*/0,
-	/*eca_and_aen*/0,
+	/*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR,
+	/*eca_and_aen*/SCP_SWP,
+	/*flags4*/0,
+	/*aen_holdoff_period*/{0, 0},
+	/*busy_timeout_period*/{0, 0},
+	/*extended_selftest_completion_time*/{0, 0}
+};
+
+#define CTL_CEM_LEN	(sizeof(struct scsi_control_ext_page) - 4)
+
+const static struct scsi_control_ext_page control_ext_page_default = {
+	/*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF,
+	/*subpage_code*/0x01,
+	/*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN},
+	/*flags*/0,
+	/*prio*/0,
+	/*max_sense*/0
+};
+
+const static struct scsi_control_ext_page control_ext_page_changeable = {
+	/*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF,
+	/*subpage_code*/0x01,
+	/*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN},
+	/*flags*/0,
+	/*prio*/0,
+	/*max_sense*/0xff
+};
+
+const static struct scsi_info_exceptions_page ie_page_default = {
+	/*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
+	/*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
+	/*info_flags*/SIEP_FLAGS_EWASC,
+	/*mrie*/SIEP_MRIE_NO,
+	/*interval_timer*/{0, 0, 0, 0},
+	/*report_count*/{0, 0, 0, 1}
+};
+
+const static struct scsi_info_exceptions_page ie_page_changeable = {
+	/*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
+	/*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
+	/*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST |
+	    SIEP_FLAGS_LOGERR,
+	/*mrie*/0x0f,
+	/*interval_timer*/{0xff, 0xff, 0xff, 0xff},
+	/*report_count*/{0xff, 0xff, 0xff, 0xff}
+};
+
+#define CTL_LBPM_LEN	(sizeof(struct ctl_logical_block_provisioning_page) - 4)
+
+const static struct ctl_logical_block_provisioning_page lbp_page_default = {{
+	/*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
+	/*subpage_code*/0x02,
+	/*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
+	/*flags*/0,
+	/*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+	/*descr*/{}},
+	{{/*flags*/0,
+	  /*resource*/0x01,
+	  /*reserved*/{0, 0},
+	  /*count*/{0, 0, 0, 0}},
+	 {/*flags*/0,
+	  /*resource*/0x02,
+	  /*reserved*/{0, 0},
+	  /*count*/{0, 0, 0, 0}},
+	 {/*flags*/0,
+	  /*resource*/0xf1,
+	  /*reserved*/{0, 0},
+	  /*count*/{0, 0, 0, 0}},
+	 {/*flags*/0,
+	  /*resource*/0xf2,
+	  /*reserved*/{0, 0},
+	  /*count*/{0, 0, 0, 0}}
+	}
+};
+
+const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{
+	/*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
+	/*subpage_code*/0x02,
+	/*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
+	/*flags*/SLBPP_SITUA,
+	/*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+	/*descr*/{}},
+	{{/*flags*/0,
+	  /*resource*/0,
+	  /*reserved*/{0, 0},
+	  /*count*/{0, 0, 0, 0}},
+	 {/*flags*/0,
+	  /*resource*/0,
+	  /*reserved*/{0, 0},
+	  /*count*/{0, 0, 0, 0}},
+	 {/*flags*/0,
+	  /*resource*/0,
+	  /*reserved*/{0, 0},
+	  /*count*/{0, 0, 0, 0}},
+	 {/*flags*/0,
+	  /*resource*/0,
+	  /*reserved*/{0, 0},
+	  /*count*/{0, 0, 0, 0}}
+	}
+};
+
+const static struct scsi_cddvd_capabilities_page cddvd_page_default = {
+	/*page_code*/SMS_CDDVD_CAPS_PAGE,
+	/*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
+	/*caps1*/0x3f,
+	/*caps2*/0x00,
+	/*caps3*/0xf0,
+	/*caps4*/0x00,
+	/*caps5*/0x29,
+	/*caps6*/0x00,
+	/*obsolete*/{0, 0},
+	/*nvol_levels*/{0, 0},
+	/*buffer_size*/{8, 0},
+	/*obsolete2*/{0, 0},
 	/*reserved*/0,
-	/*aen_holdoff_period*/{0, 0}
+	/*digital*/0,
+	/*obsolete3*/0,
+	/*copy_management*/0,
+	/*reserved2*/0,
+	/*rotation_control*/0,
+	/*cur_write_speed*/0,
+	/*num_speed_descr*/0,
 };
 
+const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = {
+	/*page_code*/SMS_CDDVD_CAPS_PAGE,
+	/*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
+	/*caps1*/0,
+	/*caps2*/0,
+	/*caps3*/0,
+	/*caps4*/0,
+	/*caps5*/0,
+	/*caps6*/0,
+	/*obsolete*/{0, 0},
+	/*nvol_levels*/{0, 0},
+	/*buffer_size*/{0, 0},
+	/*obsolete2*/{0, 0},
+	/*reserved*/0,
+	/*digital*/0,
+	/*obsolete3*/0,
+	/*copy_management*/0,
+	/*reserved2*/0,
+	/*rotation_control*/0,
+	/*cur_write_speed*/0,
+	/*num_speed_descr*/0,
+};
 
-/*
- * XXX KDM move these into the softc.
- */
-static int rcv_sync_msg;
-static int persis_offset;
-static uint8_t ctl_pause_rtr;
-static int     ctl_is_single;
-static int     index_to_aps_page;
-int	   ctl_disable = 0;
-
 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
-SYSCTL_INT(_kern_cam_ctl, OID_AUTO, disable, CTLFLAG_RDTUN, &ctl_disable, 0,
-	   "Disable CTL");
-TUNABLE_INT("kern.cam.ctl.disable", &ctl_disable);
+static int worker_threads = -1;
+TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
+SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
+    &worker_threads, 1, "Number of worker threads");
+static int ctl_debug = CTL_DEBUG_NONE;
+TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug);
+SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
+    &ctl_debug, 0, "Enabled debug flags");
+static int ctl_lun_map_size = 1024;
+SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN,
+    &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)");
 
 /*
- * Serial number (0x80), device id (0x83), and supported pages (0x00)
+ * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
+ * Extended INQUIRY Data (0x86), Mode Page Policy (0x87),
+ * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0),
+ * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2)
  */
-#define SCSI_EVPD_NUM_SUPPORTED_PAGES	3
+#define SCSI_EVPD_NUM_SUPPORTED_PAGES	10
 
 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
 				  int param);
 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
+static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
 static int ctl_init(void);
-void ctl_shutdown(void);
+static int ctl_shutdown(void);
 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
-static void ctl_ioctl_online(void *arg);
-static void ctl_ioctl_offline(void *arg);
-static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id);
-static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id);
-static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id);
-static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id);
-static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
-static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock);
-static int ctl_ioctl_submit_wait(union ctl_io *io);
-static void ctl_ioctl_datamove(union ctl_io *io);
-static void ctl_ioctl_done(union ctl_io *io);
-static void ctl_ioctl_hard_startstop_callback(void *arg,
-					      struct cfi_metatask *metatask);
-static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask);
-static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
-			      struct ctl_ooa *ooa_hdr);
+static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
+static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
+			      struct ctl_ooa *ooa_hdr,
+			      struct ctl_ooa_entry *kern_entries);
 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
 		     struct thread *td);
-uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
-uint32_t ctl_port_idx(int port_num);
-#ifdef unused
-static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port,
-				   uint32_t targ_target, uint32_t targ_lun,
-				   int can_wait);
-static void ctl_kfree_io(union ctl_io *io);
-#endif /* unused */
-static void ctl_free_io_internal(union ctl_io *io, int have_lock);
 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
-			 struct ctl_be_lun *be_lun, struct ctl_id target_id);
+			 struct ctl_be_lun *be_lun);
 static int ctl_free_lun(struct ctl_lun *lun);
 static void ctl_create_lun(struct ctl_be_lun *be_lun);
-/**
-static void ctl_failover_change_pages(struct ctl_softc *softc,
-				      struct ctl_scsiio *ctsio, int master);
-**/
 
 static int ctl_do_mode_select(union ctl_io *io);
 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
@@ -382,36 +450,49 @@
 			   struct scsi_per_res_out_parms* param);
 static void ctl_pro_preempt_other(struct ctl_lun *lun,
 				  union ctl_ha_msg *msg);
-static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
+static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io);
 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
+					 int alloc_len);
+static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
+					 int alloc_len);
+static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
 static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
-static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len);
-static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2);
-static ctl_action ctl_check_for_blockage(union ctl_io *pending_io,
-					 union ctl_io *ooa_io);
+static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
+static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2,
+    bool seq);
+static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
+static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
+    union ctl_io *pending_io, union ctl_io *ooa_io);
 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
 				union ctl_io *starting_io);
 static int ctl_check_blocked(struct ctl_lun *lun);
-static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc,
-				struct ctl_lun *lun,
-				struct ctl_cmd_entry *entry,
+static int ctl_scsiio_lun_check(struct ctl_lun *lun,
+				const struct ctl_cmd_entry *entry,
 				struct ctl_scsiio *ctsio);
-//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
-static void ctl_failover(void);
+static void ctl_failover_lun(union ctl_io *io);
 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
 			       struct ctl_scsiio *ctsio);
 static int ctl_scsiio(struct ctl_scsiio *ctsio);
 
-static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
-static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
-			    ctl_ua_type ua_type);
-static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
+static int ctl_target_reset(union ctl_io *io);
+static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx,
 			 ctl_ua_type ua_type);
+static int ctl_lun_reset(union ctl_io *io);
 static int ctl_abort_task(union ctl_io *io);
-static void ctl_run_task_queue(struct ctl_softc *ctl_softc);
+static int ctl_abort_task_set(union ctl_io *io);
+static int ctl_query_task(union ctl_io *io, int task_set);
+static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx,
+			      ctl_ua_type ua_type);
+static int ctl_i_t_nexus_reset(union ctl_io *io);
+static int ctl_query_async_event(union ctl_io *io);
+static void ctl_run_task(union ctl_io *io);
 #ifdef CTL_IO_DELAY
 static void ctl_datamove_timer_wakeup(void *arg);
 static void ctl_done_timer_wakeup(void *arg);
@@ -428,9 +509,28 @@
 				    ctl_ha_dt_cb callback);
 static void ctl_datamove_remote_read(union ctl_io *io);
 static void ctl_datamove_remote(union ctl_io *io);
-static int ctl_process_done(union ctl_io *io, int have_lock);
+static void ctl_process_done(union ctl_io *io);
+static void ctl_lun_thread(void *arg);
+static void ctl_thresh_thread(void *arg);
 static void ctl_work_thread(void *arg);
+static void ctl_enqueue_incoming(union ctl_io *io);
+static void ctl_enqueue_rtr(union ctl_io *io);
+static void ctl_enqueue_done(union ctl_io *io);
+static void ctl_enqueue_isc(union ctl_io *io);
+static const struct ctl_cmd_entry *
+    ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa);
+static const struct ctl_cmd_entry *
+    ctl_validate_command(struct ctl_scsiio *ctsio);
+static int ctl_cmd_applicable(uint8_t lun_type,
+    const struct ctl_cmd_entry *entry);
+static int ctl_ha_init(void);
+static int ctl_ha_shutdown(void);
 
+static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key);
+
 /*
  * Load the serialization table.  This isn't very pretty, but is probably
  * the easiest way to do it.
@@ -463,7 +563,197 @@
 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
 MODULE_VERSION(ctl, 1);
 
+static struct ctl_frontend ha_frontend =
+{
+	.name = "ha",
+	.init = ctl_ha_init,
+	.shutdown = ctl_ha_shutdown,
+};
+
+static int
+ctl_ha_init(void)
+{
+	struct ctl_softc *softc = control_softc;
+
+	if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
+	                    &softc->othersc_pool) != 0)
+		return (ENOMEM);
+	if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
+		ctl_pool_free(softc->othersc_pool);
+		return (EIO);
+	}
+	if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
+	    != CTL_HA_STATUS_SUCCESS) {
+		ctl_ha_msg_destroy(softc);
+		ctl_pool_free(softc->othersc_pool);
+		return (EIO);
+	}
+	return (0);
+};
+
+static int
+ctl_ha_shutdown(void)
+{
+	struct ctl_softc *softc = control_softc;
+	struct ctl_port *port;
+
+	ctl_ha_msg_shutdown(softc);
+	if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS)
+		return (EIO);
+	if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS)
+		return (EIO);
+	ctl_pool_free(softc->othersc_pool);
+	while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) {
+		ctl_port_deregister(port);
+		free(port->port_name, M_CTL);
+		free(port, M_CTL);
+	}
+	return (0);
+};
+
 static void
+ctl_ha_datamove(union ctl_io *io)
+{
+	struct ctl_lun *lun = CTL_LUN(io);
+	struct ctl_sg_entry *sgl;
+	union ctl_ha_msg msg;
+	uint32_t sg_entries_sent;
+	int do_sg_copy, i, j;
+
+	memset(&msg.dt, 0, sizeof(msg.dt));
+	msg.hdr.msg_type = CTL_MSG_DATAMOVE;
+	msg.hdr.original_sc = io->io_hdr.original_sc;
+	msg.hdr.serializing_sc = io;
+	msg.hdr.nexus = io->io_hdr.nexus;
+	msg.hdr.status = io->io_hdr.status;
+	msg.dt.flags = io->io_hdr.flags;
+
+	/*
+	 * We convert everything into a S/G list here.  We can't
+	 * pass by reference, only by value between controllers.
+	 * So we can't pass a pointer to the S/G list, only as many
+	 * S/G entries as we can fit in here.  If it's possible for
+	 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
+	 * then we need to break this up into multiple transfers.
+	 */
+	if (io->scsiio.kern_sg_entries == 0) {
+		msg.dt.kern_sg_entries = 1;
+#if 0
+		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+			msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
+		} else {
+			/* XXX KDM use busdma here! */
+			msg.dt.sg_list[0].addr =
+			    (void *)vtophys(io->scsiio.kern_data_ptr);
+		}
+#else
+		KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
+		    ("HA does not support BUS_ADDR"));
+		msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
+#endif
+		msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
+		do_sg_copy = 0;
+	} else {
+		msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
+		do_sg_copy = 1;
+	}
+
+	msg.dt.kern_data_len = io->scsiio.kern_data_len;
+	msg.dt.kern_total_len = io->scsiio.kern_total_len;
+	msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
+	msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
+	msg.dt.sg_sequence = 0;
+
+	/*
+	 * Loop until we've sent all of the S/G entries.  On the
+	 * other end, we'll recompose these S/G entries into one
+	 * contiguous list before processing.
+	 */
+	for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries;
+	    msg.dt.sg_sequence++) {
+		msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) /
+		    sizeof(msg.dt.sg_list[0])),
+		    msg.dt.kern_sg_entries - sg_entries_sent);
+		if (do_sg_copy != 0) {
+			sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+			for (i = sg_entries_sent, j = 0;
+			     i < msg.dt.cur_sg_entries; i++, j++) {
+#if 0
+				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+					msg.dt.sg_list[j].addr = sgl[i].addr;
+				} else {
+					/* XXX KDM use busdma here! */
+					msg.dt.sg_list[j].addr =
+					    (void *)vtophys(sgl[i].addr);
+				}
+#else
+				KASSERT((io->io_hdr.flags &
+				    CTL_FLAG_BUS_ADDR) == 0,
+				    ("HA does not support BUS_ADDR"));
+				msg.dt.sg_list[j].addr = sgl[i].addr;
+#endif
+				msg.dt.sg_list[j].len = sgl[i].len;
+			}
+		}
+
+		sg_entries_sent += msg.dt.cur_sg_entries;
+		msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries);
+		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+		    sizeof(msg.dt) - sizeof(msg.dt.sg_list) +
+		    sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries,
+		    M_WAITOK) > CTL_HA_STATUS_SUCCESS) {
+			io->io_hdr.port_status = 31341;
+			io->scsiio.be_move_done(io);
+			return;
+		}
+		msg.dt.sent_sg_entries = sg_entries_sent;
+	}
+
+	/*
+	 * Officially handover the request from us to peer.
+	 * If failover has just happened, then we must return error.
+	 * If failover happen just after, then it is not our problem.
+	 */
+	if (lun)
+		mtx_lock(&lun->lun_lock);
+	if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
+		if (lun)
+			mtx_unlock(&lun->lun_lock);
+		io->io_hdr.port_status = 31342;
+		io->scsiio.be_move_done(io);
+		return;
+	}
+	io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+	io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
+	if (lun)
+		mtx_unlock(&lun->lun_lock);
+}
+
+static void
+ctl_ha_done(union ctl_io *io)
+{
+	union ctl_ha_msg msg;
+
+	if (io->io_hdr.io_type == CTL_IO_SCSI) {
+		memset(&msg, 0, sizeof(msg));
+		msg.hdr.msg_type = CTL_MSG_FINISH_IO;
+		msg.hdr.original_sc = io->io_hdr.original_sc;
+		msg.hdr.nexus = io->io_hdr.nexus;
+		msg.hdr.status = io->io_hdr.status;
+		msg.scsi.scsi_status = io->scsiio.scsi_status;
+		msg.scsi.tag_num = io->scsiio.tag_num;
+		msg.scsi.tag_type = io->scsiio.tag_type;
+		msg.scsi.sense_len = io->scsiio.sense_len;
+		memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
+		    io->scsiio.sense_len);
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+		    sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
+		    msg.scsi.sense_len, M_WAITOK);
+	}
+	ctl_free_io(io);
+}
+
+static void
 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
 			    union ctl_ha_msg *msg_info)
 {
@@ -481,14 +771,9 @@
 	ctsio->io_hdr.status = msg_info->hdr.status;
 	ctsio->scsi_status = msg_info->scsi.scsi_status;
 	ctsio->sense_len = msg_info->scsi.sense_len;
-	ctsio->sense_residual = msg_info->scsi.sense_residual;
-	ctsio->residual = msg_info->scsi.residual;
 	memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
-	       sizeof(ctsio->sense_data));
-	memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
-	       &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
-	STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
-	ctl_wakeup_thread();
+	       msg_info->scsi.sense_len);
+	ctl_enqueue_isc((union ctl_io *)ctsio);
 }
 
 static void
@@ -504,40 +789,589 @@
 	}
 
 	ctsio = &msg_info->hdr.serializing_sc->scsiio;
-#if 0
-	/*
-	 * Attempt to catch the situation where an I/O has
-	 * been freed, and we're using it again.
-	 */
-	if (ctsio->io_hdr.io_type == 0xff) {
-		union ctl_io *tmp_io;
-		tmp_io = (union ctl_io *)ctsio;
-		printf("%s: %p use after free!\n", __func__,
-		       ctsio);
-		printf("%s: type %d msg %d cdb %x iptl: "
-		       "%d:%d:%d:%d tag 0x%04x "
-		       "flag %#x status %x\n",
-			__func__,
-			tmp_io->io_hdr.io_type,
-			tmp_io->io_hdr.msg_type,
-			tmp_io->scsiio.cdb[0],
-			tmp_io->io_hdr.nexus.initid.id,
-			tmp_io->io_hdr.nexus.targ_port,
-			tmp_io->io_hdr.nexus.targ_target.id,
-			tmp_io->io_hdr.nexus.targ_lun,
-			(tmp_io->io_hdr.io_type ==
-			CTL_IO_TASK) ?
-			tmp_io->taskio.tag_num :
-			tmp_io->scsiio.tag_num,
-		        tmp_io->io_hdr.flags,
-			tmp_io->io_hdr.status);
-	}
-#endif
 	ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
-	STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
-	ctl_wakeup_thread();
+	ctl_enqueue_isc((union ctl_io *)ctsio);
 }
 
+void
+ctl_isc_announce_lun(struct ctl_lun *lun)
+{
+	struct ctl_softc *softc = lun->ctl_softc;
+	union ctl_ha_msg *msg;
+	struct ctl_ha_msg_lun_pr_key pr_key;
+	int i, k;
+
+	if (softc->ha_link != CTL_HA_LINK_ONLINE)
+		return;
+	mtx_lock(&lun->lun_lock);
+	i = sizeof(msg->lun);
+	if (lun->lun_devid)
+		i += lun->lun_devid->len;
+	i += sizeof(pr_key) * lun->pr_key_count;
+alloc:
+	mtx_unlock(&lun->lun_lock);
+	msg = malloc(i, M_CTL, M_WAITOK);
+	mtx_lock(&lun->lun_lock);
+	k = sizeof(msg->lun);
+	if (lun->lun_devid)
+		k += lun->lun_devid->len;
+	k += sizeof(pr_key) * lun->pr_key_count;
+	if (i < k) {
+		free(msg, M_CTL);
+		i = k;
+		goto alloc;
+	}
+	bzero(&msg->lun, sizeof(msg->lun));
+	msg->hdr.msg_type = CTL_MSG_LUN_SYNC;
+	msg->hdr.nexus.targ_lun = lun->lun;
+	msg->hdr.nexus.targ_mapped_lun = lun->lun;
+	msg->lun.flags = lun->flags;
+	msg->lun.pr_generation = lun->pr_generation;
+	msg->lun.pr_res_idx = lun->pr_res_idx;
+	msg->lun.pr_res_type = lun->pr_res_type;
+	msg->lun.pr_key_count = lun->pr_key_count;
+	i = 0;
+	if (lun->lun_devid) {
+		msg->lun.lun_devid_len = lun->lun_devid->len;
+		memcpy(&msg->lun.data[i], lun->lun_devid->data,
+		    msg->lun.lun_devid_len);
+		i += msg->lun.lun_devid_len;
+	}
+	for (k = 0; k < CTL_MAX_INITIATORS; k++) {
+		if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0)
+			continue;
+		pr_key.pr_iid = k;
+		memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key));
+		i += sizeof(pr_key);
+	}
+	mtx_unlock(&lun->lun_lock);
+	ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
+	    M_WAITOK);
+	free(msg, M_CTL);
+
+	if (lun->flags & CTL_LUN_PRIMARY_SC) {
+		for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+			ctl_isc_announce_mode(lun, -1,
+			    lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
+			    lun->mode_pages.index[i].subpage);
+		}
+	}
+}
+
+void
+ctl_isc_announce_port(struct ctl_port *port)
+{
+	struct ctl_softc *softc = port->ctl_softc;
+	union ctl_ha_msg *msg;
+	int i;
+
+	if (port->targ_port < softc->port_min ||
+	    port->targ_port >= softc->port_max ||
+	    softc->ha_link != CTL_HA_LINK_ONLINE)
+		return;
+	i = sizeof(msg->port) + strlen(port->port_name) + 1;
+	if (port->lun_map)
+		i += port->lun_map_size * sizeof(uint32_t);
+	if (port->port_devid)
+		i += port->port_devid->len;
+	if (port->target_devid)
+		i += port->target_devid->len;
+	if (port->init_devid)
+		i += port->init_devid->len;
+	msg = malloc(i, M_CTL, M_WAITOK);
+	bzero(&msg->port, sizeof(msg->port));
+	msg->hdr.msg_type = CTL_MSG_PORT_SYNC;
+	msg->hdr.nexus.targ_port = port->targ_port;
+	msg->port.port_type = port->port_type;
+	msg->port.physical_port = port->physical_port;
+	msg->port.virtual_port = port->virtual_port;
+	msg->port.status = port->status;
+	i = 0;
+	msg->port.name_len = sprintf(&msg->port.data[i],
+	    "%d:%s", softc->ha_id, port->port_name) + 1;
+	i += msg->port.name_len;
+	if (port->lun_map) {
+		msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t);
+		memcpy(&msg->port.data[i], port->lun_map,
+		    msg->port.lun_map_len);
+		i += msg->port.lun_map_len;
+	}
+	if (port->port_devid) {
+		msg->port.port_devid_len = port->port_devid->len;
+		memcpy(&msg->port.data[i], port->port_devid->data,
+		    msg->port.port_devid_len);
+		i += msg->port.port_devid_len;
+	}
+	if (port->target_devid) {
+		msg->port.target_devid_len = port->target_devid->len;
+		memcpy(&msg->port.data[i], port->target_devid->data,
+		    msg->port.target_devid_len);
+		i += msg->port.target_devid_len;
+	}
+	if (port->init_devid) {
+		msg->port.init_devid_len = port->init_devid->len;
+		memcpy(&msg->port.data[i], port->init_devid->data,
+		    msg->port.init_devid_len);
+		i += msg->port.init_devid_len;
+	}
+	ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
+	    M_WAITOK);
+	free(msg, M_CTL);
+}
+
+void
+ctl_isc_announce_iid(struct ctl_port *port, int iid)
+{
+	struct ctl_softc *softc = port->ctl_softc;
+	union ctl_ha_msg *msg;
+	int i, l;
+
+	if (port->targ_port < softc->port_min ||
+	    port->targ_port >= softc->port_max ||
+	    softc->ha_link != CTL_HA_LINK_ONLINE)
+		return;
+	mtx_lock(&softc->ctl_lock);
+	i = sizeof(msg->iid);
+	l = 0;
+	if (port->wwpn_iid[iid].name)
+		l = strlen(port->wwpn_iid[iid].name) + 1;
+	i += l;
+	msg = malloc(i, M_CTL, M_NOWAIT);
+	if (msg == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		return;
+	}
+	bzero(&msg->iid, sizeof(msg->iid));
+	msg->hdr.msg_type = CTL_MSG_IID_SYNC;
+	msg->hdr.nexus.targ_port = port->targ_port;
+	msg->hdr.nexus.initid = iid;
+	msg->iid.in_use = port->wwpn_iid[iid].in_use;
+	msg->iid.name_len = l;
+	msg->iid.wwpn = port->wwpn_iid[iid].wwpn;
+	if (port->wwpn_iid[iid].name)
+		strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l);
+	mtx_unlock(&softc->ctl_lock);
+	ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT);
+	free(msg, M_CTL);
+}
+
+void
+ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx,
+    uint8_t page, uint8_t subpage)
+{
+	struct ctl_softc *softc = lun->ctl_softc;
+	union ctl_ha_msg msg;
+	u_int i;
+
+	if (softc->ha_link != CTL_HA_LINK_ONLINE)
+		return;
+	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+		if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) ==
+		    page && lun->mode_pages.index[i].subpage == subpage)
+			break;
+	}
+	if (i == CTL_NUM_MODE_PAGES)
+		return;
+
+	/* Don't try to replicate pages not present on this device. */
+	if (lun->mode_pages.index[i].page_data == NULL)
+		return;
+
+	bzero(&msg.mode, sizeof(msg.mode));
+	msg.hdr.msg_type = CTL_MSG_MODE_SYNC;
+	msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT;
+	msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT;
+	msg.hdr.nexus.targ_lun = lun->lun;
+	msg.hdr.nexus.targ_mapped_lun = lun->lun;
+	msg.mode.page_code = page;
+	msg.mode.subpage = subpage;
+	msg.mode.page_len = lun->mode_pages.index[i].page_len;
+	memcpy(msg.mode.data, lun->mode_pages.index[i].page_data,
+	    msg.mode.page_len);
+	ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode),
+	    M_WAITOK);
+}
+
+static void
+ctl_isc_ha_link_up(struct ctl_softc *softc)
+{
+	struct ctl_port *port;
+	struct ctl_lun *lun;
+	union ctl_ha_msg msg;
+	int i;
+
+	/* Announce this node parameters to peer for validation. */
+	msg.login.msg_type = CTL_MSG_LOGIN;
+	msg.login.version = CTL_HA_VERSION;
+	msg.login.ha_mode = softc->ha_mode;
+	msg.login.ha_id = softc->ha_id;
+	msg.login.max_luns = CTL_MAX_LUNS;
+	msg.login.max_ports = CTL_MAX_PORTS;
+	msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT;
+	ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login),
+	    M_WAITOK);
+
+	STAILQ_FOREACH(port, &softc->port_list, links) {
+		ctl_isc_announce_port(port);
+		for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+			if (port->wwpn_iid[i].in_use)
+				ctl_isc_announce_iid(port, i);
+		}
+	}
+	STAILQ_FOREACH(lun, &softc->lun_list, links)
+		ctl_isc_announce_lun(lun);
+}
+
+static void
+ctl_isc_ha_link_down(struct ctl_softc *softc)
+{
+	struct ctl_port *port;
+	struct ctl_lun *lun;
+	union ctl_io *io;
+	int i;
+
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		mtx_lock(&lun->lun_lock);
+		if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) {
+			lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
+			ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
+		}
+		mtx_unlock(&lun->lun_lock);
+
+		mtx_unlock(&softc->ctl_lock);
+		io = ctl_alloc_io(softc->othersc_pool);
+		mtx_lock(&softc->ctl_lock);
+		ctl_zero_io(io);
+		io->io_hdr.msg_type = CTL_MSG_FAILOVER;
+		io->io_hdr.nexus.targ_mapped_lun = lun->lun;
+		ctl_enqueue_isc(io);
+	}
+
+	STAILQ_FOREACH(port, &softc->port_list, links) {
+		if (port->targ_port >= softc->port_min &&
+		    port->targ_port < softc->port_max)
+			continue;
+		port->status &= ~CTL_PORT_STATUS_ONLINE;
+		for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+			port->wwpn_iid[i].in_use = 0;
+			free(port->wwpn_iid[i].name, M_CTL);
+			port->wwpn_iid[i].name = NULL;
+		}
+	}
+	mtx_unlock(&softc->ctl_lock);
+}
+
+static void
+ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+	struct ctl_lun *lun;
+	uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
+
+	mtx_lock(&softc->ctl_lock);
+	if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		return;
+	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
+	if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set)
+		memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8);
+	if (msg->ua.ua_all) {
+		if (msg->ua.ua_set)
+			ctl_est_ua_all(lun, iid, msg->ua.ua_type);
+		else
+			ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
+	} else {
+		if (msg->ua.ua_set)
+			ctl_est_ua(lun, iid, msg->ua.ua_type);
+		else
+			ctl_clr_ua(lun, iid, msg->ua.ua_type);
+	}
+	mtx_unlock(&lun->lun_lock);
+}
+
+static void
+ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+	struct ctl_lun *lun;
+	struct ctl_ha_msg_lun_pr_key pr_key;
+	int i, k;
+	ctl_lun_flags oflags;
+	uint32_t targ_lun;
+
+	targ_lun = msg->hdr.nexus.targ_mapped_lun;
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		return;
+	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
+	if (lun->flags & CTL_LUN_DISABLED) {
+		mtx_unlock(&lun->lun_lock);
+		return;
+	}
+	i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
+	if (msg->lun.lun_devid_len != i || (i > 0 &&
+	    memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
+		mtx_unlock(&lun->lun_lock);
+		printf("%s: Received conflicting HA LUN %d\n",
+		    __func__, targ_lun);
+		return;
+	} else {
+		/* Record whether peer is primary. */
+		oflags = lun->flags;
+		if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
+		    (msg->lun.flags & CTL_LUN_DISABLED) == 0)
+			lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
+		else
+			lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
+		if (oflags != lun->flags)
+			ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
+
+		/* If peer is primary and we are not -- use data */
+		if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+		    (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
+			lun->pr_generation = msg->lun.pr_generation;
+			lun->pr_res_idx = msg->lun.pr_res_idx;
+			lun->pr_res_type = msg->lun.pr_res_type;
+			lun->pr_key_count = msg->lun.pr_key_count;
+			for (k = 0; k < CTL_MAX_INITIATORS; k++)
+				ctl_clr_prkey(lun, k);
+			for (k = 0; k < msg->lun.pr_key_count; k++) {
+				memcpy(&pr_key, &msg->lun.data[i],
+				    sizeof(pr_key));
+				ctl_alloc_prkey(lun, pr_key.pr_iid);
+				ctl_set_prkey(lun, pr_key.pr_iid,
+				    pr_key.pr_key);
+				i += sizeof(pr_key);
+			}
+		}
+
+		mtx_unlock(&lun->lun_lock);
+		CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
+		    __func__, targ_lun,
+		    (msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
+		    "primary" : "secondary"));
+
+		/* If we are primary but peer doesn't know -- notify */
+		if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
+		    (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
+			ctl_isc_announce_lun(lun);
+	}
+}
+
+static void
+ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+	struct ctl_port *port;
+	struct ctl_lun *lun;
+	int i, new;
+
+	port = softc->ctl_ports[msg->hdr.nexus.targ_port];
+	if (port == NULL) {
+		CTL_DEBUG_PRINT(("%s: New port %d\n", __func__,
+		    msg->hdr.nexus.targ_port));
+		new = 1;
+		port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO);
+		port->frontend = &ha_frontend;
+		port->targ_port = msg->hdr.nexus.targ_port;
+		port->fe_datamove = ctl_ha_datamove;
+		port->fe_done = ctl_ha_done;
+	} else if (port->frontend == &ha_frontend) {
+		CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__,
+		    msg->hdr.nexus.targ_port));
+		new = 0;
+	} else {
+		printf("%s: Received conflicting HA port %d\n",
+		    __func__, msg->hdr.nexus.targ_port);
+		return;
+	}
+	port->port_type = msg->port.port_type;
+	port->physical_port = msg->port.physical_port;
+	port->virtual_port = msg->port.virtual_port;
+	port->status = msg->port.status;
+	i = 0;
+	free(port->port_name, M_CTL);
+	port->port_name = strndup(&msg->port.data[i], msg->port.name_len,
+	    M_CTL);
+	i += msg->port.name_len;
+	if (msg->port.lun_map_len != 0) {
+		if (port->lun_map == NULL ||
+		    port->lun_map_size * sizeof(uint32_t) <
+		    msg->port.lun_map_len) {
+			port->lun_map_size = 0;
+			free(port->lun_map, M_CTL);
+			port->lun_map = malloc(msg->port.lun_map_len,
+			    M_CTL, M_WAITOK);
+		}
+		memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len);
+		port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t);
+		i += msg->port.lun_map_len;
+	} else {
+		port->lun_map_size = 0;
+		free(port->lun_map, M_CTL);
+		port->lun_map = NULL;
+	}
+	if (msg->port.port_devid_len != 0) {
+		if (port->port_devid == NULL ||
+		    port->port_devid->len < msg->port.port_devid_len) {
+			free(port->port_devid, M_CTL);
+			port->port_devid = malloc(sizeof(struct ctl_devid) +
+			    msg->port.port_devid_len, M_CTL, M_WAITOK);
+		}
+		memcpy(port->port_devid->data, &msg->port.data[i],
+		    msg->port.port_devid_len);
+		port->port_devid->len = msg->port.port_devid_len;
+		i += msg->port.port_devid_len;
+	} else {
+		free(port->port_devid, M_CTL);
+		port->port_devid = NULL;
+	}
+	if (msg->port.target_devid_len != 0) {
+		if (port->target_devid == NULL ||
+		    port->target_devid->len < msg->port.target_devid_len) {
+			free(port->target_devid, M_CTL);
+			port->target_devid = malloc(sizeof(struct ctl_devid) +
+			    msg->port.target_devid_len, M_CTL, M_WAITOK);
+		}
+		memcpy(port->target_devid->data, &msg->port.data[i],
+		    msg->port.target_devid_len);
+		port->target_devid->len = msg->port.target_devid_len;
+		i += msg->port.target_devid_len;
+	} else {
+		free(port->target_devid, M_CTL);
+		port->target_devid = NULL;
+	}
+	if (msg->port.init_devid_len != 0) {
+		if (port->init_devid == NULL ||
+		    port->init_devid->len < msg->port.init_devid_len) {
+			free(port->init_devid, M_CTL);
+			port->init_devid = malloc(sizeof(struct ctl_devid) +
+			    msg->port.init_devid_len, M_CTL, M_WAITOK);
+		}
+		memcpy(port->init_devid->data, &msg->port.data[i],
+		    msg->port.init_devid_len);
+		port->init_devid->len = msg->port.init_devid_len;
+		i += msg->port.init_devid_len;
+	} else {
+		free(port->init_devid, M_CTL);
+		port->init_devid = NULL;
+	}
+	if (new) {
+		if (ctl_port_register(port) != 0) {
+			printf("%s: ctl_port_register() failed with error\n",
+			    __func__);
+		}
+	}
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+			continue;
+		mtx_lock(&lun->lun_lock);
+		ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
+		mtx_unlock(&lun->lun_lock);
+	}
+	mtx_unlock(&softc->ctl_lock);
+}
+
+static void
+ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+	struct ctl_port *port;
+	int iid;
+
+	port = softc->ctl_ports[msg->hdr.nexus.targ_port];
+	if (port == NULL) {
+		printf("%s: Received IID for unknown port %d\n",
+		    __func__, msg->hdr.nexus.targ_port);
+		return;
+	}
+	iid = msg->hdr.nexus.initid;
+	if (port->wwpn_iid[iid].in_use != 0 &&
+	    msg->iid.in_use == 0)
+		ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON);
+	port->wwpn_iid[iid].in_use = msg->iid.in_use;
+	port->wwpn_iid[iid].wwpn = msg->iid.wwpn;
+	free(port->wwpn_iid[iid].name, M_CTL);
+	if (msg->iid.name_len) {
+		port->wwpn_iid[iid].name = strndup(&msg->iid.data[0],
+		    msg->iid.name_len, M_CTL);
+	} else
+		port->wwpn_iid[iid].name = NULL;
+}
+
+static void
+ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+
+	if (msg->login.version != CTL_HA_VERSION) {
+		printf("CTL HA peers have different versions %d != %d\n",
+		    msg->login.version, CTL_HA_VERSION);
+		ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
+		return;
+	}
+	if (msg->login.ha_mode != softc->ha_mode) {
+		printf("CTL HA peers have different ha_mode %d != %d\n",
+		    msg->login.ha_mode, softc->ha_mode);
+		ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
+		return;
+	}
+	if (msg->login.ha_id == softc->ha_id) {
+		printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id);
+		ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
+		return;
+	}
+	if (msg->login.max_luns != CTL_MAX_LUNS ||
+	    msg->login.max_ports != CTL_MAX_PORTS ||
+	    msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) {
+		printf("CTL HA peers have different limits\n");
+		ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
+		return;
+	}
+}
+
+static void
+ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+	struct ctl_lun *lun;
+	u_int i;
+	uint32_t initidx, targ_lun;
+
+	targ_lun = msg->hdr.nexus.targ_mapped_lun;
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		return;
+	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
+	if (lun->flags & CTL_LUN_DISABLED) {
+		mtx_unlock(&lun->lun_lock);
+		return;
+	}
+	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+		if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) ==
+		    msg->mode.page_code &&
+		    lun->mode_pages.index[i].subpage == msg->mode.subpage)
+			break;
+	}
+	if (i == CTL_NUM_MODE_PAGES) {
+		mtx_unlock(&lun->lun_lock);
+		return;
+	}
+	memcpy(lun->mode_pages.index[i].page_data, msg->mode.data,
+	    lun->mode_pages.index[i].page_len);
+	initidx = ctl_get_initindex(&msg->hdr.nexus);
+	if (initidx != -1)
+		ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
+	mtx_unlock(&lun->lun_lock);
+}
+
 /*
  * ISC (Inter Shelf Communication) event handler.  Events from the HA
  * subsystem come in here.
@@ -545,62 +1379,38 @@
 static void
 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
 {
-	struct ctl_softc *ctl_softc;
+	struct ctl_softc *softc = control_softc;
 	union ctl_io *io;
 	struct ctl_prio *presio;
 	ctl_ha_status isc_status;
 
-	ctl_softc = control_softc;
-	io = NULL;
-
-
-#if 0
-	printf("CTL: Isc Msg event %d\n", event);
-#endif
+	CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event));
 	if (event == CTL_HA_EVT_MSG_RECV) {
-		union ctl_ha_msg msg_info;
+		union ctl_ha_msg *msg, msgbuf;
 
-		isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
-					     sizeof(msg_info), /*wait*/ 0);
-#if 0
-		printf("CTL: msg_type %d\n", msg_info.msg_type);
-#endif
-		if (isc_status != 0) {
-			printf("Error receiving message, status = %d\n",
-			       isc_status);
+		if (param > sizeof(msgbuf))
+			msg = malloc(param, M_CTL, M_WAITOK);
+		else
+			msg = &msgbuf;
+		isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param,
+		    M_WAITOK);
+		if (isc_status != CTL_HA_STATUS_SUCCESS) {
+			printf("%s: Error receiving message: %d\n",
+			    __func__, isc_status);
+			if (msg != &msgbuf)
+				free(msg, M_CTL);
 			return;
 		}
-		mtx_lock(&ctl_softc->ctl_lock);
 
-		switch (msg_info.hdr.msg_type) {
+		CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type));
+		switch (msg->hdr.msg_type) {
 		case CTL_MSG_SERIALIZE:
-#if 0
-			printf("Serialize\n");
-#endif
-			io = ctl_alloc_io((void *)ctl_softc->othersc_pool);
-			if (io == NULL) {
-				printf("ctl_isc_event_handler: can't allocate "
-				       "ctl_io!\n");
-				/* Bad Juju */
-				/* Need to set busy and send msg back */
-				mtx_unlock(&ctl_softc->ctl_lock);
-				msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-				msg_info.hdr.status = CTL_SCSI_ERROR;
-				msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
-				msg_info.scsi.sense_len = 0;
-			        if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
-				    sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
-				}
-				goto bailout;
-			}
+			io = ctl_alloc_io(softc->othersc_pool);
 			ctl_zero_io(io);
-			// populate ctsio from msg_info
+			// populate ctsio from msg
 			io->io_hdr.io_type = CTL_IO_SCSI;
 			io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
-			io->io_hdr.original_sc = msg_info.hdr.original_sc;
-#if 0
-			printf("pOrig %x\n", (int)msg_info.original_sc);
-#endif
+			io->io_hdr.original_sc = msg->hdr.original_sc;
 			io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
 					    CTL_FLAG_IO_ACTIVE;
 			/*
@@ -610,33 +1420,33 @@
 			 *
 			 * XXX KDM add another flag that is more specific.
 			 */
-			if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)
+			if (softc->ha_mode != CTL_HA_MODE_XFER)
 				io->io_hdr.flags |= CTL_FLAG_INT_COPY;
-			io->io_hdr.nexus = msg_info.hdr.nexus;
+			io->io_hdr.nexus = msg->hdr.nexus;
 #if 0
-			printf("targ %d, port %d, iid %d, lun %d\n",
-			       io->io_hdr.nexus.targ_target.id,
+			printf("port %u, iid %u, lun %u\n",
 			       io->io_hdr.nexus.targ_port,
-			       io->io_hdr.nexus.initid.id,
+			       io->io_hdr.nexus.initid,
 			       io->io_hdr.nexus.targ_lun);
 #endif
-			io->scsiio.tag_num = msg_info.scsi.tag_num;
-			io->scsiio.tag_type = msg_info.scsi.tag_type;
-			memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
+			io->scsiio.tag_num = msg->scsi.tag_num;
+			io->scsiio.tag_type = msg->scsi.tag_type;
+#ifdef CTL_TIME_IO
+			io->io_hdr.start_time = time_uptime;
+			getbinuptime(&io->io_hdr.start_bt);
+#endif /* CTL_TIME_IO */
+			io->scsiio.cdb_len = msg->scsi.cdb_len;
+			memcpy(io->scsiio.cdb, msg->scsi.cdb,
 			       CTL_MAX_CDBLEN);
-			if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
-				struct ctl_cmd_entry *entry;
-				uint8_t opcode;
+			if (softc->ha_mode == CTL_HA_MODE_XFER) {
+				const struct ctl_cmd_entry *entry;
 
-				opcode = io->scsiio.cdb[0];
-				entry = &ctl_cmd_table[opcode];
+				entry = ctl_get_cmd_entry(&io->scsiio, NULL);
 				io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
 				io->io_hdr.flags |=
 					entry->flags & CTL_FLAG_DATA_MASK;
 			}
-			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &io->io_hdr, links);
-			ctl_wakeup_thread();
+			ctl_enqueue_isc(io);
 			break;
 
 		/* Performed on the Originating SC, XFER mode only */
@@ -644,7 +1454,7 @@
 			struct ctl_sg_entry *sgl;
 			int i, j;
 
-			io = msg_info.hdr.original_sc;
+			io = msg->hdr.original_sc;
 			if (io == NULL) {
 				printf("%s: original_sc == NULL!\n", __func__);
 				/* XXX KDM do something here */
@@ -656,84 +1466,55 @@
 			 * Keep track of this, we need to send it back over
 			 * when the datamove is complete.
 			 */
-			io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
+			io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
+			if (msg->hdr.status == CTL_SUCCESS)
+				io->io_hdr.status = msg->hdr.status;
 
-			if (msg_info.dt.sg_sequence == 0) {
-				/*
-				 * XXX KDM we use the preallocated S/G list
-				 * here, but we'll need to change this to
-				 * dynamic allocation if we need larger S/G
-				 * lists.
-				 */
-				if (msg_info.dt.kern_sg_entries >
-				    sizeof(io->io_hdr.remote_sglist) /
-				    sizeof(io->io_hdr.remote_sglist[0])) {
-					printf("%s: number of S/G entries "
-					    "needed %u > allocated num %zd\n",
-					    __func__,
-					    msg_info.dt.kern_sg_entries,
-					    sizeof(io->io_hdr.remote_sglist)/
-					    sizeof(io->io_hdr.remote_sglist[0]));
-				
-					/*
-					 * XXX KDM send a message back to
-					 * the other side to shut down the
-					 * DMA.  The error will come back
-					 * through via the normal channel.
-					 */
-					break;
-				}
-				sgl = io->io_hdr.remote_sglist;
-				memset(sgl, 0,
-				       sizeof(io->io_hdr.remote_sglist));
+			if (msg->dt.sg_sequence == 0) {
+#ifdef CTL_TIME_IO
+				getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
+				i = msg->dt.kern_sg_entries +
+				    msg->dt.kern_data_len /
+				    CTL_HA_DATAMOVE_SEGMENT + 1;
+				sgl = malloc(sizeof(*sgl) * i, M_CTL,
+				    M_WAITOK | M_ZERO);
+				io->io_hdr.remote_sglist = sgl;
+				io->io_hdr.local_sglist =
+				    &sgl[msg->dt.kern_sg_entries];
 
 				io->scsiio.kern_data_ptr = (uint8_t *)sgl;
 
 				io->scsiio.kern_sg_entries =
-					msg_info.dt.kern_sg_entries;
+					msg->dt.kern_sg_entries;
 				io->scsiio.rem_sg_entries =
-					msg_info.dt.kern_sg_entries;
+					msg->dt.kern_sg_entries;
 				io->scsiio.kern_data_len =
-					msg_info.dt.kern_data_len;
+					msg->dt.kern_data_len;
 				io->scsiio.kern_total_len =
-					msg_info.dt.kern_total_len;
+					msg->dt.kern_total_len;
 				io->scsiio.kern_data_resid =
-					msg_info.dt.kern_data_resid;
+					msg->dt.kern_data_resid;
 				io->scsiio.kern_rel_offset =
-					msg_info.dt.kern_rel_offset;
-				/*
-				 * Clear out per-DMA flags.
-				 */
-				io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
-				/*
-				 * Add per-DMA flags that are set for this
-				 * particular DMA request.
-				 */
-				io->io_hdr.flags |= msg_info.dt.flags &
-						    CTL_FLAG_RDMA_MASK;
+					msg->dt.kern_rel_offset;
+				io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR;
+				io->io_hdr.flags |= msg->dt.flags &
+				    CTL_FLAG_BUS_ADDR;
 			} else
 				sgl = (struct ctl_sg_entry *)
 					io->scsiio.kern_data_ptr;
 
-			for (i = msg_info.dt.sent_sg_entries, j = 0;
-			     i < (msg_info.dt.sent_sg_entries +
-			     msg_info.dt.cur_sg_entries); i++, j++) {
-				sgl[i].addr = msg_info.dt.sg_list[j].addr;
-				sgl[i].len = msg_info.dt.sg_list[j].len;
+			for (i = msg->dt.sent_sg_entries, j = 0;
+			     i < (msg->dt.sent_sg_entries +
+			     msg->dt.cur_sg_entries); i++, j++) {
+				sgl[i].addr = msg->dt.sg_list[j].addr;
+				sgl[i].len = msg->dt.sg_list[j].len;
 
 #if 0
-				printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
-				       __func__,
-				       msg_info.dt.sg_list[j].addr,
-				       msg_info.dt.sg_list[j].len,
-				       sgl[i].addr, sgl[i].len, j, i);
+				printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n",
+				    __func__, sgl[i].addr, sgl[i].len, j, i);
 #endif
 			}
-#if 0
-			memcpy(&sgl[msg_info.dt.sent_sg_entries],
-			       msg_info.dt.sg_list,
-			       sizeof(*sgl) * msg_info.dt.cur_sg_entries);
-#endif
 
 			/*
 			 * If this is the last piece of the I/O, we've got
@@ -740,16 +1521,13 @@
 			 * the full S/G list.  Queue processing in the thread.
 			 * Otherwise wait for the next piece.
 			 */
-			if (msg_info.dt.sg_last != 0) {
-				STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-						   &io->io_hdr, links);
-				ctl_wakeup_thread();
-			}
+			if (msg->dt.sg_last != 0)
+				ctl_enqueue_isc(io);
 			break;
 		}
 		/* Performed on the Serializing (primary) SC, XFER mode only */
 		case CTL_MSG_DATAMOVE_DONE: {
-			if (msg_info.hdr.serializing_sc == NULL) {
+			if (msg->hdr.serializing_sc == NULL) {
 				printf("%s: serializing_sc == NULL!\n",
 				       __func__);
 				/* XXX KDM now what? */
@@ -760,40 +1538,38 @@
 			 * there was a failure, so we can return status
 			 * back to the initiator.
 			 */
-			io = msg_info.hdr.serializing_sc;
+			io = msg->hdr.serializing_sc;
 			io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
-			io->io_hdr.status = msg_info.hdr.status;
-			io->scsiio.scsi_status = msg_info.scsi.scsi_status;
-			io->scsiio.sense_len = msg_info.scsi.sense_len;
-			io->scsiio.sense_residual =msg_info.scsi.sense_residual;
-			io->io_hdr.port_status = msg_info.scsi.fetd_status;
-			io->scsiio.residual = msg_info.scsi.residual;
-			memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
-			       sizeof(io->scsiio.sense_data));
-
-			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &io->io_hdr, links);
-			ctl_wakeup_thread();
+			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
+			io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
+			io->io_hdr.port_status = msg->scsi.port_status;
+			io->scsiio.kern_data_resid = msg->scsi.kern_data_resid;
+			if (msg->hdr.status != CTL_STATUS_NONE) {
+				io->io_hdr.status = msg->hdr.status;
+				io->scsiio.scsi_status = msg->scsi.scsi_status;
+				io->scsiio.sense_len = msg->scsi.sense_len;
+				memcpy(&io->scsiio.sense_data,
+				    &msg->scsi.sense_data,
+				    msg->scsi.sense_len);
+				if (msg->hdr.status == CTL_SUCCESS)
+					io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
+			}
+			ctl_enqueue_isc(io);
 			break;
 		}
 
 		/* Preformed on Originating SC, SER_ONLY mode */
 		case CTL_MSG_R2R:
-			io = msg_info.hdr.original_sc;
+			io = msg->hdr.original_sc;
 			if (io == NULL) {
-				printf("%s: Major Bummer\n", __func__);
-				mtx_unlock(&ctl_softc->ctl_lock);
-				return;
-			} else {
-#if 0
-				printf("pOrig %x\n",(int) ctsio);
-#endif
+				printf("%s: original_sc == NULL!\n",
+				    __func__);
+				break;
 			}
+			io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
 			io->io_hdr.msg_type = CTL_MSG_R2R;
-			io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
-			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &io->io_hdr, links);
-			ctl_wakeup_thread();
+			io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
+			ctl_enqueue_isc(io);
 			break;
 
 		/*
@@ -803,23 +1579,21 @@
 		 * mode
 		 */
 		case CTL_MSG_FINISH_IO:
-			if (ctl_softc->ha_mode == CTL_HA_MODE_XFER)
-				ctl_isc_handler_finish_xfer(ctl_softc,
-							    &msg_info);
+			if (softc->ha_mode == CTL_HA_MODE_XFER)
+				ctl_isc_handler_finish_xfer(softc, msg);
 			else
-				ctl_isc_handler_finish_ser_only(ctl_softc,
-								&msg_info);
+				ctl_isc_handler_finish_ser_only(softc, msg);
 			break;
 
 		/* Preformed on Originating SC */
 		case CTL_MSG_BAD_JUJU:
-			io = msg_info.hdr.original_sc;
+			io = msg->hdr.original_sc;
 			if (io == NULL) {
 				printf("%s: Bad JUJU!, original_sc is NULL!\n",
 				       __func__);
 				break;
 			}
-			ctl_copy_sense_data(&msg_info, io);
+			ctl_copy_sense_data(msg, io);
 			/*
 			 * IO should have already been cleaned up on other
 			 * SC so clear this flag so we won't send a message
@@ -828,11 +1602,9 @@
 			io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
 			io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
 
-			/* io = msg_info.hdr.serializing_sc; */
+			/* io = msg->hdr.serializing_sc; */
 			io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
-		        STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &io->io_hdr, links);
-			ctl_wakeup_thread();
+			ctl_enqueue_isc(io);
 			break;
 
 		/* Handle resets sent from the other side */
@@ -839,147 +1611,246 @@
 		case CTL_MSG_MANAGE_TASKS: {
 			struct ctl_taskio *taskio;
 			taskio = (struct ctl_taskio *)ctl_alloc_io(
-				(void *)ctl_softc->othersc_pool);
-			if (taskio == NULL) {
-				printf("ctl_isc_event_handler: can't allocate "
-				       "ctl_io!\n");
-				/* Bad Juju */
-				/* should I just call the proper reset func
-				   here??? */
-				mtx_unlock(&ctl_softc->ctl_lock);
-				goto bailout;
-			}
+			    softc->othersc_pool);
 			ctl_zero_io((union ctl_io *)taskio);
 			taskio->io_hdr.io_type = CTL_IO_TASK;
 			taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
-			taskio->io_hdr.nexus = msg_info.hdr.nexus;
-			taskio->task_action = msg_info.task.task_action;
-			taskio->tag_num = msg_info.task.tag_num;
-			taskio->tag_type = msg_info.task.tag_type;
+			taskio->io_hdr.nexus = msg->hdr.nexus;
+			taskio->task_action = msg->task.task_action;
+			taskio->tag_num = msg->task.tag_num;
+			taskio->tag_type = msg->task.tag_type;
 #ifdef CTL_TIME_IO
 			taskio->io_hdr.start_time = time_uptime;
-			getbintime(&taskio->io_hdr.start_bt);
-#if 0
-			cs_prof_gettime(&taskio->io_hdr.start_ticks);
-#endif
+			getbinuptime(&taskio->io_hdr.start_bt);
 #endif /* CTL_TIME_IO */
-		        STAILQ_INSERT_TAIL(&ctl_softc->task_queue,
-					   &taskio->io_hdr, links);
-			ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
-			ctl_wakeup_thread();
+			ctl_run_task((union ctl_io *)taskio);
 			break;
 		}
 		/* Persistent Reserve action which needs attention */
 		case CTL_MSG_PERS_ACTION:
 			presio = (struct ctl_prio *)ctl_alloc_io(
-				(void *)ctl_softc->othersc_pool);
-			if (presio == NULL) {
-				printf("ctl_isc_event_handler: can't allocate "
-				       "ctl_io!\n");
-				/* Bad Juju */
-				/* Need to set busy and send msg back */
-				mtx_unlock(&ctl_softc->ctl_lock);
-				goto bailout;
-			}
+			    softc->othersc_pool);
 			ctl_zero_io((union ctl_io *)presio);
 			presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
-			presio->pr_msg = msg_info.pr;
-		        STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &presio->io_hdr, links);
-			ctl_wakeup_thread();
+			presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
+			presio->io_hdr.nexus = msg->hdr.nexus;
+			presio->pr_msg = msg->pr;
+			ctl_enqueue_isc((union ctl_io *)presio);
 			break;
-		case CTL_MSG_SYNC_FE:
-			rcv_sync_msg = 1;
+		case CTL_MSG_UA:
+			ctl_isc_ua(softc, msg, param);
 			break;
-		case CTL_MSG_APS_LOCK: {
-			// It's quicker to execute this then to
-			// queue it.
-			struct ctl_lun *lun;
-			struct ctl_page_index *page_index;
-			struct copan_aps_subpage *current_sp;
-
-			lun = ctl_softc->ctl_luns[msg_info.hdr.nexus.targ_lun];
-			page_index = &lun->mode_pages.index[index_to_aps_page];
-			current_sp = (struct copan_aps_subpage *)
-				     (page_index->page_data +
-				     (page_index->page_len * CTL_PAGE_CURRENT));
-
-			current_sp->lock_active = msg_info.aps.lock_flag;
-		        break;
-		}
+		case CTL_MSG_PORT_SYNC:
+			ctl_isc_port_sync(softc, msg, param);
+			break;
+		case CTL_MSG_LUN_SYNC:
+			ctl_isc_lun_sync(softc, msg, param);
+			break;
+		case CTL_MSG_IID_SYNC:
+			ctl_isc_iid_sync(softc, msg, param);
+			break;
+		case CTL_MSG_LOGIN:
+			ctl_isc_login(softc, msg, param);
+			break;
+		case CTL_MSG_MODE_SYNC:
+			ctl_isc_mode_sync(softc, msg, param);
+			break;
 		default:
-		        printf("How did I get here?\n");
+			printf("Received HA message of unknown type %d\n",
+			    msg->hdr.msg_type);
+			ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
+			break;
 		}
-		mtx_unlock(&ctl_softc->ctl_lock);
-	} else if (event == CTL_HA_EVT_MSG_SENT) {
-		if (param != CTL_HA_STATUS_SUCCESS) {
-			printf("Bad status from ctl_ha_msg_send status %d\n",
-			       param);
+		if (msg != &msgbuf)
+			free(msg, M_CTL);
+	} else if (event == CTL_HA_EVT_LINK_CHANGE) {
+		printf("CTL: HA link status changed from %d to %d\n",
+		    softc->ha_link, param);
+		if (param == softc->ha_link)
+			return;
+		if (softc->ha_link == CTL_HA_LINK_ONLINE) {
+			softc->ha_link = param;
+			ctl_isc_ha_link_down(softc);
+		} else {
+			softc->ha_link = param;
+			if (softc->ha_link == CTL_HA_LINK_ONLINE)
+				ctl_isc_ha_link_up(softc);
 		}
 		return;
-	} else if (event == CTL_HA_EVT_DISCONNECT) {
-		printf("CTL: Got a disconnect from Isc\n");
-		return;
 	} else {
 		printf("ctl_isc_event_handler: Unknown event %d\n", event);
 		return;
 	}
-
-bailout:
-	return;
 }
 
 static void
 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
 {
-	struct scsi_sense_data *sense;
 
-	sense = &dest->scsiio.sense_data;
-	bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
+	memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data,
+	    src->scsi.sense_len);
 	dest->scsiio.scsi_status = src->scsi.scsi_status;
 	dest->scsiio.sense_len = src->scsi.sense_len;
 	dest->io_hdr.status = src->hdr.status;
 }
 
+static void
+ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest)
+{
+
+	memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data,
+	    src->scsiio.sense_len);
+	dest->scsi.scsi_status = src->scsiio.scsi_status;
+	dest->scsi.sense_len = src->scsiio.sense_len;
+	dest->hdr.status = src->io_hdr.status;
+}
+
+void
+ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
+{
+	struct ctl_softc *softc = lun->ctl_softc;
+	ctl_ua_type *pu;
+
+	if (initidx < softc->init_min || initidx >= softc->init_max)
+		return;
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+	pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
+	if (pu == NULL)
+		return;
+	pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua;
+}
+
+void
+ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua)
+{
+	int i;
+
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+	if (lun->pending_ua[port] == NULL)
+		return;
+	for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+		if (port * CTL_MAX_INIT_PER_PORT + i == except)
+			continue;
+		lun->pending_ua[port][i] |= ua;
+	}
+}
+
+void
+ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
+{
+	struct ctl_softc *softc = lun->ctl_softc;
+	int i;
+
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+	for (i = softc->port_min; i < softc->port_max; i++)
+		ctl_est_ua_port(lun, i, except, ua);
+}
+
+void
+ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
+{
+	struct ctl_softc *softc = lun->ctl_softc;
+	ctl_ua_type *pu;
+
+	if (initidx < softc->init_min || initidx >= softc->init_max)
+		return;
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+	pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
+	if (pu == NULL)
+		return;
+	pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua;
+}
+
+void
+ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
+{
+	struct ctl_softc *softc = lun->ctl_softc;
+	int i, j;
+
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+	for (i = softc->port_min; i < softc->port_max; i++) {
+		if (lun->pending_ua[i] == NULL)
+			continue;
+		for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
+			if (i * CTL_MAX_INIT_PER_PORT + j == except)
+				continue;
+			lun->pending_ua[i][j] &= ~ua;
+		}
+	}
+}
+
+void
+ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
+    ctl_ua_type ua_type)
+{
+	struct ctl_lun *lun;
+
+	mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
+	STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) {
+		mtx_lock(&lun->lun_lock);
+		ctl_clr_ua(lun, initidx, ua_type);
+		mtx_unlock(&lun->lun_lock);
+	}
+}
+
 static int
-ctl_init(void)
+ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS)
 {
-	struct ctl_softc *softc;
-	struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
-	struct ctl_frontend *fe;
+	struct ctl_softc *softc = (struct ctl_softc *)arg1;
 	struct ctl_lun *lun;
-        uint8_t sc_id =0;
-#if 0
-	int i;
-#endif
-	int error, retval;
-	//int isc_retval;
+	struct ctl_lun_req ireq;
+	int error, value;
 
-	retval = 0;
-	ctl_pause_rtr = 0;
-        rcv_sync_msg = 0;
+	value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1;
+	error = sysctl_handle_int(oidp, &value, 0, req);
+	if ((error != 0) || (req->newptr == NULL))
+		return (error);
 
-	/* If we're disabled, don't initialize. */
-	if (ctl_disable != 0)
-		return (0);
+	mtx_lock(&softc->ctl_lock);
+	if (value == 0)
+		softc->flags |= CTL_FLAG_ACTIVE_SHELF;
+	else
+		softc->flags &= ~CTL_FLAG_ACTIVE_SHELF;
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		mtx_unlock(&softc->ctl_lock);
+		bzero(&ireq, sizeof(ireq));
+		ireq.reqtype = CTL_LUNREQ_MODIFY;
+		ireq.reqdata.modify.lun_id = lun->lun;
+		lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0,
+		    curthread);
+		if (ireq.status != CTL_LUN_OK) {
+			printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n",
+			    __func__, ireq.status, ireq.error_str);
+		}
+		mtx_lock(&softc->ctl_lock);
+	}
+	mtx_unlock(&softc->ctl_lock);
+	return (0);
+}
 
-	control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
+static int
+ctl_init(void)
+{
+	struct make_dev_args args;
+	struct ctl_softc *softc;
+	int i, error;
+
+	softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
 			       M_WAITOK | M_ZERO);
-	softc = control_softc;
 
-	softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600,
-			      "cam/ctl");
+	make_dev_args_init(&args);
+	args.mda_devsw = &ctl_cdevsw;
+	args.mda_uid = UID_ROOT;
+	args.mda_gid = GID_OPERATOR;
+	args.mda_mode = 0600;
+	args.mda_si_drv1 = softc;
+	error = make_dev_s(&args, &softc->dev, "cam/ctl");
+	if (error != 0) {
+		free(softc, M_DEVBUF);
+		control_softc = NULL;
+		return (error);
+	}
 
-	softc->dev->si_drv1 = softc;
-
-	/*
-	 * By default, return a "bad LUN" peripheral qualifier for unknown
-	 * LUNs.  The user can override this default using the tunable or
-	 * sysctl.  See the comment in ctl_inquiry_std() for more details.
-	 */
-	softc->inquiry_pq_no_lun = 1;
-	TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun",
-			  &softc->inquiry_pq_no_lun);
 	sysctl_ctx_init(&softc->sysctl_ctx);
 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
 		SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
@@ -988,216 +1859,142 @@
 	if (softc->sysctl_tree == NULL) {
 		printf("%s: unable to allocate sysctl tree\n", __func__);
 		destroy_dev(softc->dev);
-		free(control_softc, M_DEVBUF);
+		free(softc, M_DEVBUF);
 		control_softc = NULL;
 		return (ENOMEM);
 	}
 
-	SYSCTL_ADD_INT(&softc->sysctl_ctx,
-		       SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
-		       "inquiry_pq_no_lun", CTLFLAG_RW,
-		       &softc->inquiry_pq_no_lun, 0,
-		       "Report no lun possible for invalid LUNs");
-
 	mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
-	softc->open_count = 0;
+	softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
+	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+	softc->flags = 0;
 
-	/*
-	 * Default to actually sending a SYNCHRONIZE CACHE command down to
-	 * the drive.
-	 */
-	softc->flags = CTL_FLAG_REAL_SYNC;
+	TUNABLE_INT_FETCH("kern.cam.ctl.ha_mode", (int *)&softc->ha_mode);
+	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0,
+	    "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)");
 
 	/*
 	 * In Copan's HA scheme, the "master" and "slave" roles are
 	 * figured out through the slot the controller is in.  Although it
 	 * is an active/active system, someone has to be in charge.
- 	 */
-#ifdef NEEDTOPORT
-        scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id);
-#endif
+	 */
+	TUNABLE_INT_FETCH("kern.cam.ctl.ha_id", &softc->ha_id);
+	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0,
+	    "HA head ID (0 - no HA)");
+	if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) {
+		softc->flags |= CTL_FLAG_ACTIVE_SHELF;
+		softc->is_single = 1;
+		softc->port_cnt = CTL_MAX_PORTS;
+		softc->port_min = 0;
+	} else {
+		softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES;
+		softc->port_min = (softc->ha_id - 1) * softc->port_cnt;
+	}
+	softc->port_max = softc->port_min + softc->port_cnt;
+	softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT;
+	softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT;
 
-        if (sc_id == 0) {
-		softc->flags |= CTL_FLAG_MASTER_SHELF;
-		persis_offset = 0;
-	} else
-		persis_offset = CTL_MAX_INITIATORS;
+	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0,
+	    "HA link state (0 - offline, 1 - unknown, 2 - online)");
 
-	/*
-	 * XXX KDM need to figure out where we want to get our target ID
-	 * and WWID.  Is it different on each port?
-	 */
-	softc->target.id = 0;
-	softc->target.wwid[0] = 0x12345678;
-	softc->target.wwid[1] = 0x87654321;
 	STAILQ_INIT(&softc->lun_list);
 	STAILQ_INIT(&softc->pending_lun_queue);
-	STAILQ_INIT(&softc->task_queue);
-	STAILQ_INIT(&softc->incoming_queue);
-	STAILQ_INIT(&softc->rtr_queue);
-	STAILQ_INIT(&softc->done_queue);
-	STAILQ_INIT(&softc->isc_queue);
 	STAILQ_INIT(&softc->fe_list);
+	STAILQ_INIT(&softc->port_list);
 	STAILQ_INIT(&softc->be_list);
-	STAILQ_INIT(&softc->io_pools);
+	ctl_tpc_init(softc);
 
-	lun = &softc->lun;
+	if (worker_threads <= 0)
+		worker_threads = max(1, mp_ncpus / 4);
+	if (worker_threads > CTL_MAX_THREADS)
+		worker_threads = CTL_MAX_THREADS;
 
-	/*
-	 * We don't bother calling these with ctl_lock held here, because,
-	 * in theory, no one else can try to do anything while we're in our
-	 * module init routine.
-	 */
-	if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
-			    &internal_pool)!= 0){
-		printf("ctl: can't allocate %d entry internal pool, "
-		       "exiting\n", CTL_POOL_ENTRIES_INTERNAL);
-		return (ENOMEM);
-	}
+	for (i = 0; i < worker_threads; i++) {
+		struct ctl_thread *thr = &softc->threads[i];
 
-	if (ctl_pool_create(softc, CTL_POOL_EMERGENCY,
-			    CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
-		printf("ctl: can't allocate %d entry emergency pool, "
-		       "exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
-		ctl_pool_free(softc, internal_pool);
-		return (ENOMEM);
-	}
+		mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
+		thr->ctl_softc = softc;
+		STAILQ_INIT(&thr->incoming_queue);
+		STAILQ_INIT(&thr->rtr_queue);
+		STAILQ_INIT(&thr->done_queue);
+		STAILQ_INIT(&thr->isc_queue);
 
-	if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC,
-	                    &other_pool) != 0)
-	{
-		printf("ctl: can't allocate %d entry other SC pool, "
-		       "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
-		ctl_pool_free(softc, internal_pool);
-		ctl_pool_free(softc, emergency_pool);
-		return (ENOMEM);
+		error = kproc_kthread_add(ctl_work_thread, thr,
+		    &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
+		if (error != 0) {
+			printf("error creating CTL work thread!\n");
+			return (error);
+		}
 	}
-
-	softc->internal_pool = internal_pool;
-	softc->emergency_pool = emergency_pool;
-	softc->othersc_pool = other_pool;
-
-	ctl_pool_acquire(internal_pool);
-	ctl_pool_acquire(emergency_pool);
-	ctl_pool_acquire(other_pool);
-
-	/*
-	 * We used to allocate a processor LUN here.  The new scheme is to
-	 * just let the user allocate LUNs as he sees fit.
-	 */
-#if 0
-	mtx_lock(&softc->ctl_lock);
-	ctl_alloc_lun(softc, lun, /*be_lun*/NULL, /*target*/softc->target);
-	mtx_unlock(&softc->ctl_lock);
-#endif
-
-	error = kproc_create(ctl_work_thread, softc, &softc->work_thread, 0, 0,
-			 "ctl_thrd");
+	error = kproc_kthread_add(ctl_lun_thread, softc,
+	    &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun");
 	if (error != 0) {
-		printf("error creating CTL work thread!\n");
-		ctl_free_lun(lun);
-		ctl_pool_free(softc, internal_pool);
-		ctl_pool_free(softc, emergency_pool);
-		ctl_pool_free(softc, other_pool);
+		printf("error creating CTL lun thread!\n");
 		return (error);
 	}
-	printf("ctl: CAM Target Layer loaded\n");
+	error = kproc_kthread_add(ctl_thresh_thread, softc,
+	    &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh");
+	if (error != 0) {
+		printf("error creating CTL threshold thread!\n");
+		return (error);
+	}
 
-	/*
-	 * Initialize the initiator and portname mappings
-	 */
-	memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid));
+	SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN,
+	    softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
 
-	/*
-	 * Initialize the ioctl front end.
-	 */
-	fe = &softc->ioctl_info.fe;
-	sprintf(softc->ioctl_info.port_name, "CTL ioctl");
-	fe->port_type = CTL_PORT_IOCTL;
-	fe->num_requested_ctl_io = 100;
-	fe->port_name = softc->ioctl_info.port_name;
-	fe->port_online = ctl_ioctl_online;
-	fe->port_offline = ctl_ioctl_offline;
-	fe->onoff_arg = &softc->ioctl_info;
-	fe->targ_enable = ctl_ioctl_targ_enable;
-	fe->targ_disable = ctl_ioctl_targ_disable;
-	fe->lun_enable = ctl_ioctl_lun_enable;
-	fe->lun_disable = ctl_ioctl_lun_disable;
-	fe->targ_lun_arg = &softc->ioctl_info;
-	fe->fe_datamove = ctl_ioctl_datamove;
-	fe->fe_done = ctl_ioctl_done;
-	fe->max_targets = 15;
-	fe->max_target_id = 15;
-
-	if (ctl_frontend_register(&softc->ioctl_info.fe,
-	                  (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) {
-		printf("ctl: ioctl front end registration failed, will "
-		       "continue anyway\n");
+	if (softc->is_single == 0) {
+		if (ctl_frontend_register(&ha_frontend) != 0)
+			softc->is_single = 1;
 	}
-
-#ifdef CTL_IO_DELAY
-	if (sizeof(struct callout) > CTL_TIMER_BYTES) {
-		printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n",
-		       sizeof(struct callout), CTL_TIMER_BYTES);
-		return (EINVAL);
-	}
-#endif /* CTL_IO_DELAY */
-
 	return (0);
 }
 
-void
+static int
 ctl_shutdown(void)
 {
-	struct ctl_softc *softc;
-	struct ctl_lun *lun, *next_lun;
-	struct ctl_io_pool *pool, *next_pool;
+	struct ctl_softc *softc = control_softc;
+	int i;
 
-	softc = (struct ctl_softc *)control_softc;
+	if (softc->is_single == 0)
+		ctl_frontend_deregister(&ha_frontend);
 
-	if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0)
-		printf("ctl: ioctl front end deregistration failed\n");
+	destroy_dev(softc->dev);
 
-	mtx_lock(&softc->ctl_lock);
-
-	/*
-	 * Free up each LUN.
-	 */
-	for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
-		next_lun = STAILQ_NEXT(lun, links);
-		ctl_free_lun(lun);
+	/* Shutdown CTL threads. */
+	softc->shutdown = 1;
+	for (i = 0; i < worker_threads; i++) {
+		struct ctl_thread *thr = &softc->threads[i];
+		while (thr->thread != NULL) {
+			wakeup(thr);
+			if (thr->thread != NULL)
+				pause("CTL thr shutdown", 1);
+		}
+		mtx_destroy(&thr->queue_lock);
 	}
-
-	/*
-	 * This will rip the rug out from under any FETDs or anyone else
-	 * that has a pool allocated.  Since we increment our module
-	 * refcount any time someone outside the main CTL module allocates
-	 * a pool, we shouldn't have any problems here.  The user won't be
-	 * able to unload the CTL module until client modules have
-	 * successfully unloaded.
-	 */
-	for (pool = STAILQ_FIRST(&softc->io_pools); pool != NULL;
-	     pool = next_pool) {
-		next_pool = STAILQ_NEXT(pool, links);
-		ctl_pool_free(softc, pool);
+	while (softc->lun_thread != NULL) {
+		wakeup(&softc->pending_lun_queue);
+		if (softc->lun_thread != NULL)
+			pause("CTL thr shutdown", 1);
 	}
+	while (softc->thresh_thread != NULL) {
+		wakeup(softc->thresh_thread);
+		if (softc->thresh_thread != NULL)
+			pause("CTL thr shutdown", 1);
+	}
 
-	mtx_unlock(&softc->ctl_lock);
-
-#if 0
-	ctl_shutdown_thread(softc->work_thread);
-#endif
-
+	ctl_tpc_shutdown(softc);
+	uma_zdestroy(softc->io_zone);
 	mtx_destroy(&softc->ctl_lock);
 
-	destroy_dev(softc->dev);
-
 	sysctl_ctx_free(&softc->sysctl_ctx);
 
-	free(control_softc, M_DEVBUF);
+	free(softc, M_DEVBUF);
 	control_softc = NULL;
-
-	printf("ctl: CAM Target Layer unloaded\n");
+	return (0);
 }
 
 static int
@@ -1208,7 +2005,7 @@
 	case MOD_LOAD:
 		return (ctl_init());
 	case MOD_UNLOAD:
-		return (EBUSY);
+		return (ctl_shutdown());
 	default:
 		return (EOPNOTSUPP);
 	}
@@ -1230,180 +2027,31 @@
 	return (0);
 }
 
-int
-ctl_port_enable(ctl_port_type port_type)
-{
-	struct ctl_softc *softc;
-	struct ctl_frontend *fe;
-
-	if (ctl_is_single == 0) {
-		union ctl_ha_msg msg_info;
-		int isc_retval;
-
-#if 0
-		printf("%s: HA mode, synchronizing frontend enable\n",
-		        __func__);
-#endif
-		msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
-	        if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
-		        sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
-			printf("Sync msg send error retval %d\n", isc_retval);
-		}
-		if (!rcv_sync_msg) {
-			isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
-			        sizeof(msg_info), 1);
-		}
-#if 0
-        	printf("CTL:Frontend Enable\n");
-	} else {
-		printf("%s: single mode, skipping frontend synchronization\n",
-		        __func__);
-#endif
-	}
-
-	softc = control_softc;
-
-	STAILQ_FOREACH(fe, &softc->fe_list, links) {
-		if (port_type & fe->port_type)
-		{
-#if 0
-			printf("port %d\n", fe->targ_port);
-#endif
-			ctl_frontend_online(fe);
-		}
-	}
-
-	return (0);
-}
-
-int
-ctl_port_disable(ctl_port_type port_type)
-{
-	struct ctl_softc *softc;
-	struct ctl_frontend *fe;
-
-	softc = control_softc;
-
-	STAILQ_FOREACH(fe, &softc->fe_list, links) {
-		if (port_type & fe->port_type)
-			ctl_frontend_offline(fe);
-	}
-
-	return (0);
-}
-
 /*
- * Returns 0 for success, 1 for failure.
- * Currently the only failure mode is if there aren't enough entries
- * allocated.  So, in case of a failure, look at num_entries_dropped,
- * reallocate and try again.
- */
-int
-ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
-	      int *num_entries_filled, int *num_entries_dropped,
-	      ctl_port_type port_type, int no_virtual)
-{
-	struct ctl_softc *softc;
-	struct ctl_frontend *fe;
-	int entries_dropped, entries_filled;
-	int retval;
-	int i;
-
-	softc = control_softc;
-
-	retval = 0;
-	entries_filled = 0;
-	entries_dropped = 0;
-
-	i = 0;
-	mtx_lock(&softc->ctl_lock);
-	STAILQ_FOREACH(fe, &softc->fe_list, links) {
-		struct ctl_port_entry *entry;
-
-		if ((fe->port_type & port_type) == 0)
-			continue;
-
-		if ((no_virtual != 0)
-		 && (fe->virtual_port != 0))
-			continue;
-
-		if (entries_filled >= num_entries_alloced) {
-			entries_dropped++;
-			continue;
-		}
-		entry = &entries[i];
-
-		entry->port_type = fe->port_type;
-		strlcpy(entry->port_name, fe->port_name,
-			sizeof(entry->port_name));
-		entry->physical_port = fe->physical_port;
-		entry->virtual_port = fe->virtual_port;
-		entry->wwnn = fe->wwnn;
-		entry->wwpn = fe->wwpn;
-
-		i++;
-		entries_filled++;
-	}
-
-	mtx_unlock(&softc->ctl_lock);
-
-	if (entries_dropped > 0)
-		retval = 1;
-
-	*num_entries_dropped = entries_dropped;
-	*num_entries_filled = entries_filled;
-
-	return (retval);
-}
-
-static void
-ctl_ioctl_online(void *arg)
-{
-	struct ctl_ioctl_info *ioctl_info;
-
-	ioctl_info = (struct ctl_ioctl_info *)arg;
-
-	ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED;
-}
-
-static void
-ctl_ioctl_offline(void *arg)
-{
-	struct ctl_ioctl_info *ioctl_info;
-
-	ioctl_info = (struct ctl_ioctl_info *)arg;
-
-	ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED;
-}
-
-/*
  * Remove an initiator by port number and initiator ID.
- * Returns 0 for success, 1 for failure.
- * Assumes the caller does NOT hold the CTL lock.
+ * Returns 0 for success, -1 for failure.
  */
 int
-ctl_remove_initiator(int32_t targ_port, uint32_t iid)
+ctl_remove_initiator(struct ctl_port *port, int iid)
 {
-	struct ctl_softc *softc;
+	struct ctl_softc *softc = port->ctl_softc;
+	int last;
 
-	softc = control_softc;
+	mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
 
-	if ((targ_port < 0)
-	 || (targ_port > CTL_MAX_PORTS)) {
-		printf("%s: invalid port number %d\n", __func__, targ_port);
-		return (1);
-	}
 	if (iid > CTL_MAX_INIT_PER_PORT) {
 		printf("%s: initiator ID %u > maximun %u!\n",
 		       __func__, iid, CTL_MAX_INIT_PER_PORT);
-		return (1);
+		return (-1);
 	}
 
 	mtx_lock(&softc->ctl_lock);
-
-	softc->wwpn_iid[targ_port][iid].in_use = 0;
-
+	last = (--port->wwpn_iid[iid].in_use == 0);
+	port->wwpn_iid[iid].last_use = time_uptime;
 	mtx_unlock(&softc->ctl_lock);
+	if (last)
+		ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON);
+	ctl_isc_announce_iid(port, iid);
 
 	return (0);
 }
@@ -1410,41 +2058,92 @@
 
 /*
  * Add an initiator to the initiator map.
- * Returns 0 for success, 1 for failure.
- * Assumes the caller does NOT hold the CTL lock.
+ * Returns iid for success, < 0 for failure.
  */
 int
-ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
+ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
 {
-	struct ctl_softc *softc;
-	int retval;
+	struct ctl_softc *softc = port->ctl_softc;
+	time_t best_time;
+	int i, best;
 
-	softc = control_softc;
+	mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
 
-	retval = 0;
-
-	if ((targ_port < 0)
-	 || (targ_port > CTL_MAX_PORTS)) {
-		printf("%s: invalid port number %d\n", __func__, targ_port);
-		return (1);
-	}
-	if (iid > CTL_MAX_INIT_PER_PORT) {
-		printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n",
+	if (iid >= CTL_MAX_INIT_PER_PORT) {
+		printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
 		       __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
-		return (1);
+		free(name, M_CTL);
+		return (-1);
 	}
 
 	mtx_lock(&softc->ctl_lock);
 
-	if (softc->wwpn_iid[targ_port][iid].in_use != 0) {
+	if (iid < 0 && (wwpn != 0 || name != NULL)) {
+		for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+			if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
+				iid = i;
+				break;
+			}
+			if (name != NULL && port->wwpn_iid[i].name != NULL &&
+			    strcmp(name, port->wwpn_iid[i].name) == 0) {
+				iid = i;
+				break;
+			}
+		}
+	}
+
+	if (iid < 0) {
+		for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+			if (port->wwpn_iid[i].in_use == 0 &&
+			    port->wwpn_iid[i].wwpn == 0 &&
+			    port->wwpn_iid[i].name == NULL) {
+				iid = i;
+				break;
+			}
+		}
+	}
+
+	if (iid < 0) {
+		best = -1;
+		best_time = INT32_MAX;
+		for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+			if (port->wwpn_iid[i].in_use == 0) {
+				if (port->wwpn_iid[i].last_use < best_time) {
+					best = i;
+					best_time = port->wwpn_iid[i].last_use;
+				}
+			}
+		}
+		iid = best;
+	}
+
+	if (iid < 0) {
+		mtx_unlock(&softc->ctl_lock);
+		free(name, M_CTL);
+		return (-2);
+	}
+
+	if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
 		/*
-		 * We don't treat this as an error.
+		 * This is not an error yet.
 		 */
-		if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) {
-			printf("%s: port %d iid %u WWPN %#jx arrived again?\n",
-			       __func__, targ_port, iid, (uintmax_t)wwpn);
-			goto bailout;
+		if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
+#if 0
+			printf("%s: port %d iid %u WWPN %#jx arrived"
+			    " again\n", __func__, port->targ_port,
+			    iid, (uintmax_t)wwpn);
+#endif
+			goto take;
 		}
+		if (name != NULL && port->wwpn_iid[iid].name != NULL &&
+		    strcmp(name, port->wwpn_iid[iid].name) == 0) {
+#if 0
+			printf("%s: port %d iid %u name '%s' arrived"
+			    " again\n", __func__, port->targ_port,
+			    iid, name);
+#endif
+			goto take;
+		}
 
 		/*
 		 * This is an error, but what do we do about it?  The
@@ -1451,217 +2150,76 @@
 		 * driver is telling us we have a new WWPN for this
 		 * initiator ID, so we pretty much need to use it.
 		 */
-		printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is "
-		       "still at that address\n", __func__, targ_port, iid,
-		       (uintmax_t)wwpn,
-		       (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn);
-
-		/*
-		 * XXX KDM clear have_ca and ua_pending on each LUN for
-		 * this initiator.
-		 */
+		printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
+		    " but WWPN %#jx '%s' is still at that address\n",
+		    __func__, port->targ_port, iid, wwpn, name,
+		    (uintmax_t)port->wwpn_iid[iid].wwpn,
+		    port->wwpn_iid[iid].name);
 	}
-	softc->wwpn_iid[targ_port][iid].in_use = 1;
-	softc->wwpn_iid[targ_port][iid].iid = iid;
-	softc->wwpn_iid[targ_port][iid].wwpn = wwpn;
-	softc->wwpn_iid[targ_port][iid].port = targ_port;
-
-bailout:
-
+take:
+	free(port->wwpn_iid[iid].name, M_CTL);
+	port->wwpn_iid[iid].name = name;
+	port->wwpn_iid[iid].wwpn = wwpn;
+	port->wwpn_iid[iid].in_use++;
 	mtx_unlock(&softc->ctl_lock);
+	ctl_isc_announce_iid(port, iid);
 
-	return (retval);
+	return (iid);
 }
 
-/*
- * XXX KDM should we pretend to do something in the target/lun
- * enable/disable functions?
- */
 static int
-ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id)
+ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
 {
-	return (0);
-}
+	int len;
 
-static int
-ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id)
-{
-	return (0);
-}
-
-static int
-ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
-{
-	return (0);
-}
-
-static int
-ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
-{
-	return (0);
-}
-
-/*
- * Data movement routine for the CTL ioctl frontend port.
- */
-static int
-ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
-{
-	struct ctl_sg_entry *ext_sglist, *kern_sglist;
-	struct ctl_sg_entry ext_entry, kern_entry;
-	int ext_sglen, ext_sg_entries, kern_sg_entries;
-	int ext_sg_start, ext_offset;
-	int len_to_copy, len_copied;
-	int kern_watermark, ext_watermark;
-	int ext_sglist_malloced;
-	int i, j;
-
-	ext_sglist_malloced = 0;
-	ext_sg_start = 0;
-	ext_offset = 0;
-
-	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
-
-	/*
-	 * If this flag is set, fake the data transfer.
-	 */
-	if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
-		ctsio->ext_data_filled = ctsio->ext_data_len;
-		goto bailout;
+	switch (port->port_type) {
+	case CTL_PORT_FC:
+	{
+		struct scsi_transportid_fcp *id =
+		    (struct scsi_transportid_fcp *)buf;
+		if (port->wwpn_iid[iid].wwpn == 0)
+			return (0);
+		memset(id, 0, sizeof(*id));
+		id->format_protocol = SCSI_PROTO_FC;
+		scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
+		return (sizeof(*id));
 	}
-
-	/*
-	 * To simplify things here, if we have a single buffer, stick it in
-	 * a S/G entry and just make it a single entry S/G list.
-	 */
-	if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
-		int len_seen;
-
-		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
-
-		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
-							   M_WAITOK);
-		ext_sglist_malloced = 1;
-		if (copyin(ctsio->ext_data_ptr, ext_sglist,
-				   ext_sglen) != 0) {
-			ctl_set_internal_failure(ctsio,
-						 /*sks_valid*/ 0,
-						 /*retry_count*/ 0);
-			goto bailout;
-		}
-		ext_sg_entries = ctsio->ext_sg_entries;
-		len_seen = 0;
-		for (i = 0; i < ext_sg_entries; i++) {
-			if ((len_seen + ext_sglist[i].len) >=
-			     ctsio->ext_data_filled) {
-				ext_sg_start = i;
-				ext_offset = ctsio->ext_data_filled - len_seen;
-				break;
-			}
-			len_seen += ext_sglist[i].len;
-		}
-	} else {
-		ext_sglist = &ext_entry;
-		ext_sglist->addr = ctsio->ext_data_ptr;
-		ext_sglist->len = ctsio->ext_data_len;
-		ext_sg_entries = 1;
-		ext_sg_start = 0;
-		ext_offset = ctsio->ext_data_filled;
+	case CTL_PORT_ISCSI:
+	{
+		struct scsi_transportid_iscsi_port *id =
+		    (struct scsi_transportid_iscsi_port *)buf;
+		if (port->wwpn_iid[iid].name == NULL)
+			return (0);
+		memset(id, 0, 256);
+		id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
+		    SCSI_PROTO_ISCSI;
+		len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
+		len = roundup2(min(len, 252), 4);
+		scsi_ulto2b(len, id->additional_length);
+		return (sizeof(*id) + len);
 	}
-
-	if (ctsio->kern_sg_entries > 0) {
-		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
-		kern_sg_entries = ctsio->kern_sg_entries;
-	} else {
-		kern_sglist = &kern_entry;
-		kern_sglist->addr = ctsio->kern_data_ptr;
-		kern_sglist->len = ctsio->kern_data_len;
-		kern_sg_entries = 1;
+	case CTL_PORT_SAS:
+	{
+		struct scsi_transportid_sas *id =
+		    (struct scsi_transportid_sas *)buf;
+		if (port->wwpn_iid[iid].wwpn == 0)
+			return (0);
+		memset(id, 0, sizeof(*id));
+		id->format_protocol = SCSI_PROTO_SAS;
+		scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
+		return (sizeof(*id));
 	}
-
-
-	kern_watermark = 0;
-	ext_watermark = ext_offset;
-	len_copied = 0;
-	for (i = ext_sg_start, j = 0;
-	     i < ext_sg_entries && j < kern_sg_entries;) {
-		uint8_t *ext_ptr, *kern_ptr;
-
-		len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
-				      kern_sglist[j].len - kern_watermark);
-
-		ext_ptr = (uint8_t *)ext_sglist[i].addr;
-		ext_ptr = ext_ptr + ext_watermark;
-		if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
-			/*
-			 * XXX KDM fix this!
-			 */
-			panic("need to implement bus address support");
-#if 0
-			kern_ptr = bus_to_virt(kern_sglist[j].addr);
-#endif
-		} else
-			kern_ptr = (uint8_t *)kern_sglist[j].addr;
-		kern_ptr = kern_ptr + kern_watermark;
-
-		kern_watermark += len_to_copy;
-		ext_watermark += len_to_copy;
-
-		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
-		     CTL_FLAG_DATA_IN) {
-			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
-					 "bytes to user\n", len_to_copy));
-			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
-					 "to %p\n", kern_ptr, ext_ptr));
-			if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
-				ctl_set_internal_failure(ctsio,
-							 /*sks_valid*/ 0,
-							 /*retry_count*/ 0);
-				goto bailout;
-			}
-		} else {
-			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
-					 "bytes from user\n", len_to_copy));
-			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
-					 "to %p\n", ext_ptr, kern_ptr));
-			if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
-				ctl_set_internal_failure(ctsio,
-							 /*sks_valid*/ 0,
-							 /*retry_count*/0);
-				goto bailout;
-			}
-		}
-
-		len_copied += len_to_copy;
-
-		if (ext_sglist[i].len == ext_watermark) {
-			i++;
-			ext_watermark = 0;
-		}
-
-		if (kern_sglist[j].len == kern_watermark) {
-			j++;
-			kern_watermark = 0;
-		}
+	default:
+	{
+		struct scsi_transportid_spi *id =
+		    (struct scsi_transportid_spi *)buf;
+		memset(id, 0, sizeof(*id));
+		id->format_protocol = SCSI_PROTO_SPI;
+		scsi_ulto2b(iid, id->scsi_addr);
+		scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
+		return (sizeof(*id));
 	}
-
-	ctsio->ext_data_filled += len_copied;
-
-	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
-			 "kern_sg_entries: %d\n", ext_sg_entries,
-			 kern_sg_entries));
-	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
-			 "kern_data_len = %d\n", ctsio->ext_data_len,
-			 ctsio->kern_data_len));
-
-
-	/* XXX KDM set residual?? */
-bailout:
-
-	if (ext_sglist_malloced != 0)
-		free(ext_sglist, M_CTL);
-
-	return (CTL_RETVAL_COMPLETE);
+	}
 }
 
 /*
@@ -1672,54 +2230,72 @@
  * command on this side (XFER mode) or tell the other side to execute it
  * (SER_ONLY mode).
  */
-static int
-ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
+static void
+ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
 {
-	struct ctl_softc *ctl_softc;
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_port *port = CTL_PORT(ctsio);
 	union ctl_ha_msg msg_info;
 	struct ctl_lun *lun;
-	int retval = 0;
+	const struct ctl_cmd_entry *entry;
+	uint32_t targ_lun;
 
-	ctl_softc = control_softc;
-	if (have_lock == 0)
-		mtx_lock(&ctl_softc->ctl_lock);
+	targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
 
-	lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun];
-	if (lun==NULL)
-	{
+	/* Make sure that we know about this port. */
+	if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) {
+		ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
+					 /*retry_count*/ 1);
+		goto badjuju;
+	}
+
+	/* Make sure that we know about this LUN. */
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+
 		/*
-		 * Why isn't LUN defined? The other side wouldn't
-		 * send a cmd if the LUN is undefined.
+		 * The other node would not send this request to us unless
+		 * received announce that we are primary node for this LUN.
+		 * If this LUN does not exist now, it is probably result of
+		 * a race, so respond to initiator in the most opaque way.
 		 */
-		printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
+		ctl_set_busy(ctsio);
+		goto badjuju;
+	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
 
-		/* "Logical unit not supported" */
-		ctl_set_sense_data(&msg_info.scsi.sense_data,
-				   lun,
-				   /*sense_format*/SSD_TYPE_NONE,
-				   /*current_error*/ 1,
-				   /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
-				   /*asc*/ 0x25,
-				   /*ascq*/ 0x00,
-				   SSD_ELEM_NONE);
+	/*
+	 * If the LUN is invalid, pretend that it doesn't exist.
+	 * It will go away as soon as all pending I/Os completed.
+	 */
+	if (lun->flags & CTL_LUN_DISABLED) {
+		mtx_unlock(&lun->lun_lock);
+		ctl_set_busy(ctsio);
+		goto badjuju;
+	}
 
-		msg_info.scsi.sense_len = SSD_FULL_SIZE;
-		msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
-		msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
-		msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
-		msg_info.hdr.serializing_sc = NULL;
-		msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-	        if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
-				sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
-		}
-		if (have_lock == 0)
-			mtx_unlock(&ctl_softc->ctl_lock);
-		return(1);
-
+	entry = ctl_get_cmd_entry(ctsio, NULL);
+	if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
+		mtx_unlock(&lun->lun_lock);
+		goto badjuju;
 	}
 
-    	TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+	CTL_LUN(ctsio) = lun;
+	CTL_BACKEND_LUN(ctsio) = lun->be_lun;
 
+	/*
+	 * Every I/O goes into the OOA queue for a
+	 * particular LUN, and stays there until completion.
+	 */
+#ifdef CTL_TIME_IO
+	if (TAILQ_EMPTY(&lun->ooa_queue))
+		lun->idle_time += getsbinuptime() - lun->last_busy;
+#endif
+	TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+
 	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
 		(union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
 		 ooa_links))) {
@@ -1727,272 +2303,69 @@
 		ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
 		TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
 				  blocked_links);
+		mtx_unlock(&lun->lun_lock);
 		break;
 	case CTL_ACTION_PASS:
 	case CTL_ACTION_SKIP:
-		if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+		if (softc->ha_mode == CTL_HA_MODE_XFER) {
 			ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
-			STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
-					   &ctsio->io_hdr, links);
+			ctl_enqueue_rtr((union ctl_io *)ctsio);
+			mtx_unlock(&lun->lun_lock);
 		} else {
+			ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+			mtx_unlock(&lun->lun_lock);
 
 			/* send msg back to other side */
 			msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
 			msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
 			msg_info.hdr.msg_type = CTL_MSG_R2R;
-#if 0
-			printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
-#endif
-		        if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
-			    sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
-			}
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+			    sizeof(msg_info.hdr), M_WAITOK);
 		}
 		break;
 	case CTL_ACTION_OVERLAP:
-		/* OVERLAPPED COMMANDS ATTEMPTED */
-		ctl_set_sense_data(&msg_info.scsi.sense_data,
-				   lun,
-				   /*sense_format*/SSD_TYPE_NONE,
-				   /*current_error*/ 1,
-				   /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
-				   /*asc*/ 0x4E,
-				   /*ascq*/ 0x00,
-				   SSD_ELEM_NONE);
-
-		msg_info.scsi.sense_len = SSD_FULL_SIZE;
-		msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
-		msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
-		msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
-		msg_info.hdr.serializing_sc = NULL;
-		msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
-		printf("BAD JUJU:Major Bummer Overlap\n");
-#endif
 		TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
-		retval = 1;
-		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
-		    sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
-		}
-		break;
+		mtx_unlock(&lun->lun_lock);
+		ctl_set_overlapped_cmd(ctsio);
+		goto badjuju;
 	case CTL_ACTION_OVERLAP_TAG:
-		/* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
-		ctl_set_sense_data(&msg_info.scsi.sense_data,
-				   lun,
-				   /*sense_format*/SSD_TYPE_NONE,
-				   /*current_error*/ 1,
-				   /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
-				   /*asc*/ 0x4D,
-				   /*ascq*/ ctsio->tag_num & 0xff,
-				   SSD_ELEM_NONE);
-
-		msg_info.scsi.sense_len = SSD_FULL_SIZE;
-		msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
-		msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
-		msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
-		msg_info.hdr.serializing_sc = NULL;
-		msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
-		printf("BAD JUJU:Major Bummer Overlap Tag\n");
-#endif
 		TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
-		retval = 1;
-		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
-		    sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
-		}
-		break;
+		mtx_unlock(&lun->lun_lock);
+		ctl_set_overlapped_tag(ctsio, ctsio->tag_num);
+		goto badjuju;
 	case CTL_ACTION_ERROR:
 	default:
-		/* "Internal target failure" */
-		ctl_set_sense_data(&msg_info.scsi.sense_data,
-				   lun,
-				   /*sense_format*/SSD_TYPE_NONE,
-				   /*current_error*/ 1,
-				   /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
-				   /*asc*/ 0x44,
-				   /*ascq*/ 0x00,
-				   SSD_ELEM_NONE);
+		TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+		mtx_unlock(&lun->lun_lock);
 
-		msg_info.scsi.sense_len = SSD_FULL_SIZE;
-		msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
-		msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+		ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
+					 /*retry_count*/ 0);
+badjuju:
+		ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
 		msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
 		msg_info.hdr.serializing_sc = NULL;
 		msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
-		printf("BAD JUJU:Major Bummer HW Error\n");
-#endif
-		TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
-		retval = 1;
-		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
-		    sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
-		}
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+		    sizeof(msg_info.scsi), M_WAITOK);
+		ctl_free_io((union ctl_io *)ctsio);
 		break;
 	}
-	if (have_lock == 0)
-		mtx_unlock(&ctl_softc->ctl_lock);
-	return (retval);
 }
 
-static int
-ctl_ioctl_submit_wait(union ctl_io *io)
-{
-	struct ctl_fe_ioctl_params params;
-	ctl_fe_ioctl_state last_state;
-	int done, retval;
-
-	retval = 0;
-
-	bzero(&params, sizeof(params));
-
-	mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
-	cv_init(&params.sem, "ctlioccv");
-	params.state = CTL_IOCTL_INPROG;
-	last_state = params.state;
-
-	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms;
-
-	CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n"));
-
-	/* This shouldn't happen */
-	if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
-		return (retval);
-
-	done = 0;
-
-	do {
-		mtx_lock(&params.ioctl_mtx);
-		/*
-		 * Check the state here, and don't sleep if the state has
-		 * already changed (i.e. wakeup has already occured, but we
-		 * weren't waiting yet).
-		 */
-		if (params.state == last_state) {
-			/* XXX KDM cv_wait_sig instead? */
-			cv_wait(&params.sem, &params.ioctl_mtx);
-		}
-		last_state = params.state;
-
-		switch (params.state) {
-		case CTL_IOCTL_INPROG:
-			/* Why did we wake up? */
-			/* XXX KDM error here? */
-			mtx_unlock(&params.ioctl_mtx);
-			break;
-		case CTL_IOCTL_DATAMOVE:
-			CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
-
-			/*
-			 * change last_state back to INPROG to avoid
-			 * deadlock on subsequent data moves.
-			 */
-			params.state = last_state = CTL_IOCTL_INPROG;
-
-			mtx_unlock(&params.ioctl_mtx);
-			ctl_ioctl_do_datamove(&io->scsiio);
-			/*
-			 * Note that in some cases, most notably writes,
-			 * this will queue the I/O and call us back later.
-			 * In other cases, generally reads, this routine
-			 * will immediately call back and wake us up,
-			 * probably using our own context.
-			 */
-			io->scsiio.be_move_done(io);
-			break;
-		case CTL_IOCTL_DONE:
-			mtx_unlock(&params.ioctl_mtx);
-			CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
-			done = 1;
-			break;
-		default:
-			mtx_unlock(&params.ioctl_mtx);
-			/* XXX KDM error here? */
-			break;
-		}
-	} while (done == 0);
-
-	mtx_destroy(&params.ioctl_mtx);
-	cv_destroy(&params.sem);
-
-	return (CTL_RETVAL_COMPLETE);
-}
-
-static void
-ctl_ioctl_datamove(union ctl_io *io)
-{
-	struct ctl_fe_ioctl_params *params;
-
-	params = (struct ctl_fe_ioctl_params *)
-		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
-
-	mtx_lock(&params->ioctl_mtx);
-	params->state = CTL_IOCTL_DATAMOVE;
-	cv_broadcast(&params->sem);
-	mtx_unlock(&params->ioctl_mtx);
-}
-
-static void
-ctl_ioctl_done(union ctl_io *io)
-{
-	struct ctl_fe_ioctl_params *params;
-
-	params = (struct ctl_fe_ioctl_params *)
-		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
-
-	mtx_lock(&params->ioctl_mtx);
-	params->state = CTL_IOCTL_DONE;
-	cv_broadcast(&params->sem);
-	mtx_unlock(&params->ioctl_mtx);
-}
-
-static void
-ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask)
-{
-	struct ctl_fe_ioctl_startstop_info *sd_info;
-
-	sd_info = (struct ctl_fe_ioctl_startstop_info *)arg;
-
-	sd_info->hs_info.status = metatask->status;
-	sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns;
-	sd_info->hs_info.luns_complete =
-		metatask->taskinfo.startstop.luns_complete;
-	sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed;
-
-	cv_broadcast(&sd_info->sem);
-}
-
-static void
-ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask)
-{
-	struct ctl_fe_ioctl_bbrread_info *fe_bbr_info;
-
-	fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg;
-
-	mtx_lock(fe_bbr_info->lock);
-	fe_bbr_info->bbr_info->status = metatask->status;
-	fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
-	fe_bbr_info->wakeup_done = 1;
-	mtx_unlock(fe_bbr_info->lock);
-
-	cv_broadcast(&fe_bbr_info->sem);
-}
-
 /*
- * Must be called with the ctl_lock held.
  * Returns 0 for success, errno for failure.
  */
-static int
+static void
 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
-		   struct ctl_ooa *ooa_hdr)
+		   struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
 {
 	union ctl_io *io;
-	int retval;
 
-	retval = 0;
-
+	mtx_lock(&lun->lun_lock);
 	for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
 	     (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
 	     ooa_links)) {
-		struct ctl_ooa_entry *cur_entry, entry;
+		struct ctl_ooa_entry *entry;
 
 		/*
 		 * If we've got more than we can fit, just count the
@@ -2001,39 +2374,31 @@
 		if (*cur_fill_num >= ooa_hdr->alloc_num)
 			continue;
 
-		cur_entry = &ooa_hdr->entries[*cur_fill_num];
+		entry = &kern_entries[*cur_fill_num];
 
-		bzero(&entry, sizeof(entry));
-
-		entry.tag_num = io->scsiio.tag_num;
-		entry.lun_num = lun->lun;
+		entry->tag_num = io->scsiio.tag_num;
+		entry->lun_num = lun->lun;
 #ifdef CTL_TIME_IO
-		entry.start_bt = io->io_hdr.start_bt;
+		entry->start_bt = io->io_hdr.start_bt;
 #endif
-		bcopy(io->scsiio.cdb, entry.cdb, io->scsiio.cdb_len);
-		entry.cdb_len = io->scsiio.cdb_len;
+		bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
+		entry->cdb_len = io->scsiio.cdb_len;
 		if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
-			entry.cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
+			entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
 
 		if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
-			entry.cmd_flags |= CTL_OOACMD_FLAG_DMA;
+			entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
 
 		if (io->io_hdr.flags & CTL_FLAG_ABORT)
-			entry.cmd_flags |= CTL_OOACMD_FLAG_ABORT;
+			entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
 
 		if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
-			entry.cmd_flags |= CTL_OOACMD_FLAG_RTR;
+			entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
 
 		if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
-			entry.cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
-
-		retval = copyout(&entry, cur_entry, sizeof(entry));
-
-		if (retval != 0)
-			break;
+			entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
 	}
-
-	return (retval);
+	mtx_unlock(&lun->lun_lock);
 }
 
 static void *
@@ -2056,40 +2421,40 @@
 }
 
 static void
-ctl_free_args(int num_be_args, struct ctl_be_arg *be_args)
+ctl_free_args(int num_args, struct ctl_be_arg *args)
 {
 	int i;
 
-	if (be_args == NULL)
+	if (args == NULL)
 		return;
 
-	for (i = 0; i < num_be_args; i++) {
-		free(be_args[i].kname, M_CTL);
-		free(be_args[i].kvalue, M_CTL);
+	for (i = 0; i < num_args; i++) {
+		free(args[i].kname, M_CTL);
+		free(args[i].kvalue, M_CTL);
 	}
 
-	free(be_args, M_CTL);
+	free(args, M_CTL);
 }
 
 static struct ctl_be_arg *
-ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args,
+ctl_copyin_args(int num_args, struct ctl_be_arg *uargs,
 		char *error_str, size_t error_str_len)
 {
 	struct ctl_be_arg *args;
 	int i;
 
-	args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args),
+	args = ctl_copyin_alloc(uargs, num_args * sizeof(*args),
 				error_str, error_str_len);
 
 	if (args == NULL)
 		goto bailout;
 
-	for (i = 0; i < num_be_args; i++) {
+	for (i = 0; i < num_args; i++) {
 		args[i].kname = NULL;
 		args[i].kvalue = NULL;
 	}
 
-	for (i = 0; i < num_be_args; i++) {
+	for (i = 0; i < num_args; i++) {
 		uint8_t *tmpptr;
 
 		args[i].kname = ctl_copyin_alloc(args[i].name,
@@ -2103,20 +2468,22 @@
 			goto bailout;
 		}
 
-		args[i].kvalue = NULL;
-
-		tmpptr = ctl_copyin_alloc(args[i].value,
-			args[i].vallen, error_str, error_str_len);
-		if (tmpptr == NULL)
-			goto bailout;
-
-		args[i].kvalue = tmpptr;
-
-		if ((args[i].flags & CTL_BEARG_ASCII)
-		 && (tmpptr[args[i].vallen - 1] != '\0')) {
-			snprintf(error_str, error_str_len, "Argument %d "
-				 "value is not NUL-terminated", i);
-			goto bailout;
+		if (args[i].flags & CTL_BEARG_RD) {
+			tmpptr = ctl_copyin_alloc(args[i].value,
+				args[i].vallen, error_str, error_str_len);
+			if (tmpptr == NULL)
+				goto bailout;
+			if ((args[i].flags & CTL_BEARG_ASCII)
+			 && (tmpptr[args[i].vallen - 1] != '\0')) {
+				snprintf(error_str, error_str_len, "Argument "
+				    "%d value is not NUL-terminated", i);
+				free(tmpptr, M_CTL);
+				goto bailout;
+			}
+			args[i].kvalue = tmpptr;
+		} else {
+			args[i].kvalue = malloc(args[i].vallen,
+			    M_CTL, M_WAITOK | M_ZERO);
 		}
 	}
 
@@ -2123,22 +2490,34 @@
 	return (args);
 bailout:
 
-	ctl_free_args(num_be_args, args);
+	ctl_free_args(num_args, args);
 
 	return (NULL);
 }
 
+static void
+ctl_copyout_args(int num_args, struct ctl_be_arg *args)
+{
+	int i;
+
+	for (i = 0; i < num_args; i++) {
+		if (args[i].flags & CTL_BEARG_WR)
+			copyout(args[i].kvalue, args[i].value, args[i].vallen);
+	}
+}
+
 /*
  * Escape characters that are illegal or not recommended in XML.
  */
 int
-ctl_sbuf_printf_esc(struct sbuf *sb, char *str)
+ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size)
 {
+	char *end = str + size;
 	int retval;
 
 	retval = 0;
 
-	for (; *str; str++) {
+	for (; *str && str < end; str++) {
 		switch (*str) {
 		case '&':
 			retval = sbuf_printf(sb, "&");
@@ -2162,92 +2541,78 @@
 	return (retval);
 }
 
+static void
+ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb)
+{
+	struct scsi_vpd_id_descriptor *desc;
+	int i;
+
+	if (id == NULL || id->len < 4)
+		return;
+	desc = (struct scsi_vpd_id_descriptor *)id->data;
+	switch (desc->id_type & SVPD_ID_TYPE_MASK) {
+	case SVPD_ID_TYPE_T10:
+		sbuf_printf(sb, "t10.");
+		break;
+	case SVPD_ID_TYPE_EUI64:
+		sbuf_printf(sb, "eui.");
+		break;
+	case SVPD_ID_TYPE_NAA:
+		sbuf_printf(sb, "naa.");
+		break;
+	case SVPD_ID_TYPE_SCSI_NAME:
+		break;
+	}
+	switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) {
+	case SVPD_ID_CODESET_BINARY:
+		for (i = 0; i < desc->length; i++)
+			sbuf_printf(sb, "%02x", desc->identifier[i]);
+		break;
+	case SVPD_ID_CODESET_ASCII:
+		sbuf_printf(sb, "%.*s", (int)desc->length,
+		    (char *)desc->identifier);
+		break;
+	case SVPD_ID_CODESET_UTF8:
+		sbuf_printf(sb, "%s", (char *)desc->identifier);
+		break;
+	}
+}
+
 static int
 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
 	  struct thread *td)
 {
-	struct ctl_softc *softc;
+	struct ctl_softc *softc = dev->si_drv1;
+	struct ctl_port *port;
+	struct ctl_lun *lun;
 	int retval;
 
-	softc = control_softc;
-
 	retval = 0;
 
 	switch (cmd) {
-	case CTL_IO: {
-		union ctl_io *io;
-		void *pool_tmp;
-
-		/*
-		 * If we haven't been "enabled", don't allow any SCSI I/O
-		 * to this FETD.
-		 */
-		if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) {
-			retval = -EPERM;
-			break;
-		}
-
-		io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref);
-		if (io == NULL) {
-			printf("ctl_ioctl: can't allocate ctl_io!\n");
-			retval = -ENOSPC;
-			break;
-		}
-
-		/*
-		 * Need to save the pool reference so it doesn't get
-		 * spammed by the user's ctl_io.
-		 */
-		pool_tmp = io->io_hdr.pool;
-
-		memcpy(io, (void *)addr, sizeof(*io));
-
-		io->io_hdr.pool = pool_tmp;
-		/*
-		 * No status yet, so make sure the status is set properly.
-		 */
-		io->io_hdr.status = CTL_STATUS_NONE;
-
-		/*
-		 * The user sets the initiator ID, target and LUN IDs.
-		 */
-		io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port;
-		io->io_hdr.flags |= CTL_FLAG_USER_REQ;
-		if ((io->io_hdr.io_type == CTL_IO_SCSI)
-		 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
-			io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++;
-
-		retval = ctl_ioctl_submit_wait(io);
-
-		if (retval != 0) {
-			ctl_free_io(io);
-			break;
-		}
-
-		memcpy((void *)addr, io, sizeof(*io));
-
-		/* return this to our pool */
-		ctl_free_io(io);
-
+	case CTL_IO:
+		retval = ctl_ioctl_io(dev, cmd, addr, flag, td);
 		break;
-	}
 	case CTL_ENABLE_PORT:
 	case CTL_DISABLE_PORT:
 	case CTL_SET_PORT_WWNS: {
-		struct ctl_frontend *fe;
+		struct ctl_port *port;
 		struct ctl_port_entry *entry;
 
 		entry = (struct ctl_port_entry *)addr;
 		
 		mtx_lock(&softc->ctl_lock);
-		STAILQ_FOREACH(fe, &softc->fe_list, links) {
+		STAILQ_FOREACH(port, &softc->port_list, links) {
 			int action, done;
 
+			if (port->targ_port < softc->port_min ||
+			    port->targ_port >= softc->port_max)
+				continue;
+
 			action = 0;
 			done = 0;
-
 			if ((entry->port_type == CTL_PORT_NONE)
-			 && (entry->targ_port == fe->targ_port)) {
+			 && (entry->targ_port == port->targ_port)) {
 				/*
 				 * If the user only wants to enable or
 				 * disable or set WWNs on a specific port,
@@ -2255,7 +2620,7 @@
 				 */
 				action = 1;
 				done = 1;
-			} else if (entry->port_type & fe->port_type) {
+			} else if (entry->port_type & port->port_type) {
 				/*
 				 * Compare the user's type mask with the
 				 * particular frontend type to see if we
@@ -2275,49 +2640,29 @@
 					break;
 				}
 			}
-			if (action != 0) {
-				/*
-				 * XXX KDM we have to drop the lock here,
-				 * because the online/offline operations
-				 * can potentially block.  We need to
-				 * reference count the frontends so they
-				 * can't go away,
-				 */
+			if (action == 0)
+				continue;
+
+			/*
+			 * XXX KDM we have to drop the lock here, because
+			 * the online/offline operations can potentially
+			 * block.  We need to reference count the frontends
+			 * so they can't go away,
+			 */
+			if (cmd == CTL_ENABLE_PORT) {
 				mtx_unlock(&softc->ctl_lock);
-
-				if (cmd == CTL_ENABLE_PORT) {
-					struct ctl_lun *lun;
-
-					STAILQ_FOREACH(lun, &softc->lun_list,
-						       links) {
-						fe->lun_enable(fe->targ_lun_arg,
-						    lun->target,
-						    lun->lun);
-					}
-
-					ctl_frontend_online(fe);
-				} else if (cmd == CTL_DISABLE_PORT) {
-					struct ctl_lun *lun;
-
-					ctl_frontend_offline(fe);
-
-					STAILQ_FOREACH(lun, &softc->lun_list,
-						       links) {
-						fe->lun_disable(
-						    fe->targ_lun_arg,
-						    lun->target,
-						    lun->lun);
-					}
-				}
-
+				ctl_port_online(port);
 				mtx_lock(&softc->ctl_lock);
-
-				if (cmd == CTL_SET_PORT_WWNS)
-					ctl_frontend_set_wwns(fe,
-					    (entry->flags & CTL_PORT_WWNN_VALID) ?
-					    1 : 0, entry->wwnn,
-					    (entry->flags & CTL_PORT_WWPN_VALID) ?
-					    1 : 0, entry->wwpn);
+			} else if (cmd == CTL_DISABLE_PORT) {
+				mtx_unlock(&softc->ctl_lock);
+				ctl_port_offline(port);
+				mtx_lock(&softc->ctl_lock);
+			} else if (cmd == CTL_SET_PORT_WWNS) {
+				ctl_port_set_wwns(port,
+				    (entry->flags & CTL_PORT_WWNN_VALID) ?
+				    1 : 0, entry->wwnn,
+				    (entry->flags & CTL_PORT_WWPN_VALID) ?
+				    1 : 0, entry->wwpn);
 			}
 			if (done != 0)
 				break;
@@ -2325,114 +2670,9 @@
 		mtx_unlock(&softc->ctl_lock);
 		break;
 	}
-	case CTL_GET_PORT_LIST: {
-		struct ctl_frontend *fe;
-		struct ctl_port_list *list;
-		int i;
-
-		list = (struct ctl_port_list *)addr;
-
-		if (list->alloc_len != (list->alloc_num *
-		    sizeof(struct ctl_port_entry))) {
-			printf("%s: CTL_GET_PORT_LIST: alloc_len %u != "
-			       "alloc_num %u * sizeof(struct ctl_port_entry) "
-			       "%zu\n", __func__, list->alloc_len,
-			       list->alloc_num, sizeof(struct ctl_port_entry));
-			retval = EINVAL;
-			break;
-		}
-		list->fill_len = 0;
-		list->fill_num = 0;
-		list->dropped_num = 0;
-		i = 0;
-		mtx_lock(&softc->ctl_lock);
-		STAILQ_FOREACH(fe, &softc->fe_list, links) {
-			struct ctl_port_entry entry, *list_entry;
-
-			if (list->fill_num >= list->alloc_num) {
-				list->dropped_num++;
-				continue;
-			}
-
-			entry.port_type = fe->port_type;
-			strlcpy(entry.port_name, fe->port_name,
-				sizeof(entry.port_name));
-			entry.targ_port = fe->targ_port;
-			entry.physical_port = fe->physical_port;
-			entry.virtual_port = fe->virtual_port;
-			entry.wwnn = fe->wwnn;
-			entry.wwpn = fe->wwpn;
-			if (fe->status & CTL_PORT_STATUS_ONLINE)
-				entry.online = 1;
-			else
-				entry.online = 0;
-
-			list_entry = &list->entries[i];
-
-			retval = copyout(&entry, list_entry, sizeof(entry));
-			if (retval != 0) {
-				printf("%s: CTL_GET_PORT_LIST: copyout "
-				       "returned %d\n", __func__, retval);
-				break;
-			}
-			i++;
-			list->fill_num++;
-			list->fill_len += sizeof(entry);
-		}
-		mtx_unlock(&softc->ctl_lock);
-
-		/*
-		 * If this is non-zero, we had a copyout fault, so there's
-		 * probably no point in attempting to set the status inside
-		 * the structure.
-		 */
-		if (retval != 0)
-			break;
-
-		if (list->dropped_num > 0)
-			list->status = CTL_PORT_LIST_NEED_MORE_SPACE;
-		else
-			list->status = CTL_PORT_LIST_OK;
-		break;
-	}
-	case CTL_DUMP_OOA: {
-		struct ctl_lun *lun;
-		union ctl_io *io;
-		char printbuf[128];
-		struct sbuf sb;
-
-		mtx_lock(&softc->ctl_lock);
-		printf("Dumping OOA queues:\n");
-		STAILQ_FOREACH(lun, &softc->lun_list, links) {
-			for (io = (union ctl_io *)TAILQ_FIRST(
-			     &lun->ooa_queue); io != NULL;
-			     io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
-			     ooa_links)) {
-				sbuf_new(&sb, printbuf, sizeof(printbuf),
-					 SBUF_FIXEDLEN);
-				sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ",
-					    (intmax_t)lun->lun,
-					    io->scsiio.tag_num,
-					    (io->io_hdr.flags &
-					    CTL_FLAG_BLOCKED) ? "" : " BLOCKED",
-					    (io->io_hdr.flags &
-					    CTL_FLAG_DMA_INPROG) ? " DMA" : "",
-					    (io->io_hdr.flags &
-					    CTL_FLAG_ABORT) ? " ABORT" : "",
-			                    (io->io_hdr.flags &
-		                        CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : "");
-				ctl_scsi_command_string(&io->scsiio, NULL, &sb);
-				sbuf_finish(&sb);
-				printf("%s\n", sbuf_data(&sb));
-			}
-		}
-		printf("OOA queues dump done\n");
-		mtx_unlock(&softc->ctl_lock);
-		break;
-	}
 	case CTL_GET_OOA: {
-		struct ctl_lun *lun;
 		struct ctl_ooa *ooa_hdr;
+		struct ctl_ooa_entry *entries;
 		uint32_t cur_fill_num;
 
 		ooa_hdr = (struct ctl_ooa *)addr;
@@ -2456,11 +2696,20 @@
 			break;
 		}
 
+		entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
+		if (entries == NULL) {
+			printf("%s: could not allocate %d bytes for OOA "
+			       "dump\n", __func__, ooa_hdr->alloc_len);
+			retval = ENOMEM;
+			break;
+		}
+
 		mtx_lock(&softc->ctl_lock);
-		if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
-		 && ((ooa_hdr->lun_num > CTL_MAX_LUNS)
-		  || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
+		if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 &&
+		    (ooa_hdr->lun_num >= CTL_MAX_LUNS ||
+		     softc->ctl_luns[ooa_hdr->lun_num] == NULL)) {
 			mtx_unlock(&softc->ctl_lock);
+			free(entries, M_CTL);
 			printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
 			       __func__, (uintmax_t)ooa_hdr->lun_num);
 			retval = EINVAL;
@@ -2471,19 +2720,13 @@
 
 		if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
 			STAILQ_FOREACH(lun, &softc->lun_list, links) {
-				retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,
-					ooa_hdr);
-				if (retval != 0)
-					break;
+				ctl_ioctl_fill_ooa(lun, &cur_fill_num,
+				    ooa_hdr, entries);
 			}
-			if (retval != 0) {
-				mtx_unlock(&softc->ctl_lock);
-				break;
-			}
 		} else {
 			lun = softc->ctl_luns[ooa_hdr->lun_num];
-
-			retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr);
+			ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr,
+			    entries);
 		}
 		mtx_unlock(&softc->ctl_lock);
 
@@ -2490,8 +2733,13 @@
 		ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
 		ooa_hdr->fill_len = ooa_hdr->fill_num *
 			sizeof(struct ctl_ooa_entry);
+		retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
+		if (retval != 0) {
+			printf("%s: error copying out %d bytes for OOA dump\n", 
+			       __func__, ooa_hdr->fill_len);
+		}
 
-		getbintime(&ooa_hdr->cur_bt);
+		getbinuptime(&ooa_hdr->cur_bt);
 
 		if (cur_fill_num > ooa_hdr->alloc_num) {
 			ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
@@ -2500,295 +2748,88 @@
 			ooa_hdr->dropped_num = 0;
 			ooa_hdr->status = CTL_OOA_OK;
 		}
-		break;
-	}
-	case CTL_CHECK_OOA: {
-		union ctl_io *io;
-		struct ctl_lun *lun;
-		struct ctl_ooa_info *ooa_info;
 
-
-		ooa_info = (struct ctl_ooa_info *)addr;
-
-		if (ooa_info->lun_id >= CTL_MAX_LUNS) {
-			ooa_info->status = CTL_OOA_INVALID_LUN;
-			break;
-		}
-		mtx_lock(&softc->ctl_lock);
-		lun = softc->ctl_luns[ooa_info->lun_id];
-		if (lun == NULL) {
-			mtx_unlock(&softc->ctl_lock);
-			ooa_info->status = CTL_OOA_INVALID_LUN;
-			break;
-		}
-
-		ooa_info->num_entries = 0;
-		for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
-		     io != NULL; io = (union ctl_io *)TAILQ_NEXT(
-		     &io->io_hdr, ooa_links)) {
-			ooa_info->num_entries++;
-		}
-
-		mtx_unlock(&softc->ctl_lock);
-		ooa_info->status = CTL_OOA_SUCCESS;
-
+		free(entries, M_CTL);
 		break;
 	}
-	case CTL_HARD_START:
-	case CTL_HARD_STOP: {
-		struct ctl_fe_ioctl_startstop_info ss_info;
-		struct cfi_metatask *metatask;
-		struct mtx hs_mtx;
-
-		mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF);
-
-		cv_init(&ss_info.sem, "hard start/stop cv" );
-
-		metatask = cfi_alloc_metatask(/*can_wait*/ 1);
-		if (metatask == NULL) {
-			retval = ENOMEM;
-			mtx_destroy(&hs_mtx);
-			break;
-		}
-
-		if (cmd == CTL_HARD_START)
-			metatask->tasktype = CFI_TASK_STARTUP;
-		else
-			metatask->tasktype = CFI_TASK_SHUTDOWN;
-
-		metatask->callback = ctl_ioctl_hard_startstop_callback;
-		metatask->callback_arg = &ss_info;
-
-		cfi_action(metatask);
-
-		/* Wait for the callback */
-		mtx_lock(&hs_mtx);
-		cv_wait_sig(&ss_info.sem, &hs_mtx);
-		mtx_unlock(&hs_mtx);
-
-		/*
-		 * All information has been copied from the metatask by the
-		 * time cv_broadcast() is called, so we free the metatask here.
-		 */
-		cfi_free_metatask(metatask);
-
-		memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info));
-
-		mtx_destroy(&hs_mtx);
-		break;
-	}
-	case CTL_BBRREAD: {
-		struct ctl_bbrread_info *bbr_info;
-		struct ctl_fe_ioctl_bbrread_info fe_bbr_info;
-		struct mtx bbr_mtx;
-		struct cfi_metatask *metatask;
-
-		bbr_info = (struct ctl_bbrread_info *)addr;
-
-		bzero(&fe_bbr_info, sizeof(fe_bbr_info));
-
-		bzero(&bbr_mtx, sizeof(bbr_mtx));
-		mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF);
-
-		fe_bbr_info.bbr_info = bbr_info;
-		fe_bbr_info.lock = &bbr_mtx;
-
-		cv_init(&fe_bbr_info.sem, "BBR read cv");
-		metatask = cfi_alloc_metatask(/*can_wait*/ 1);
-
-		if (metatask == NULL) {
-			mtx_destroy(&bbr_mtx);
-			cv_destroy(&fe_bbr_info.sem);
-			retval = ENOMEM;
-			break;
-		}
-		metatask->tasktype = CFI_TASK_BBRREAD;
-		metatask->callback = ctl_ioctl_bbrread_callback;
-		metatask->callback_arg = &fe_bbr_info;
-		metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num;
-		metatask->taskinfo.bbrread.lba = bbr_info->lba;
-		metatask->taskinfo.bbrread.len = bbr_info->len;
-
-		cfi_action(metatask);
-
-		mtx_lock(&bbr_mtx);
-		while (fe_bbr_info.wakeup_done == 0)
-			cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx);
-		mtx_unlock(&bbr_mtx);
-
-		bbr_info->status = metatask->status;
-		bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
-		bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status;
-		memcpy(&bbr_info->sense_data,
-		       &metatask->taskinfo.bbrread.sense_data,
-		       ctl_min(sizeof(bbr_info->sense_data),
-			       sizeof(metatask->taskinfo.bbrread.sense_data)));
-
-		cfi_free_metatask(metatask);
-
-		mtx_destroy(&bbr_mtx);
-		cv_destroy(&fe_bbr_info.sem);
-
-		break;
-	}
 	case CTL_DELAY_IO: {
 		struct ctl_io_delay_info *delay_info;
-#ifdef CTL_IO_DELAY
-		struct ctl_lun *lun;
-#endif /* CTL_IO_DELAY */
 
 		delay_info = (struct ctl_io_delay_info *)addr;
 
 #ifdef CTL_IO_DELAY
 		mtx_lock(&softc->ctl_lock);
-
-		if ((delay_info->lun_id > CTL_MAX_LUNS)
-		 || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
+		if (delay_info->lun_id >= CTL_MAX_LUNS ||
+		    (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) {
+			mtx_unlock(&softc->ctl_lock);
 			delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
-		} else {
-			lun = softc->ctl_luns[delay_info->lun_id];
-
-			delay_info->status = CTL_DELAY_STATUS_OK;
-
-			switch (delay_info->delay_type) {
-			case CTL_DELAY_TYPE_CONT:
-				break;
-			case CTL_DELAY_TYPE_ONESHOT:
-				break;
-			default:
-				delay_info->status =
-					CTL_DELAY_STATUS_INVALID_TYPE;
-				break;
-			}
-
-			switch (delay_info->delay_loc) {
-			case CTL_DELAY_LOC_DATAMOVE:
-				lun->delay_info.datamove_type =
-					delay_info->delay_type;
-				lun->delay_info.datamove_delay =
-					delay_info->delay_secs;
-				break;
-			case CTL_DELAY_LOC_DONE:
-				lun->delay_info.done_type =
-					delay_info->delay_type;
-				lun->delay_info.done_delay =
-					delay_info->delay_secs;
-				break;
-			default:
-				delay_info->status =
-					CTL_DELAY_STATUS_INVALID_LOC;
-				break;
-			}
+			break;
 		}
-
+		mtx_lock(&lun->lun_lock);
 		mtx_unlock(&softc->ctl_lock);
-#else
-		delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
-#endif /* CTL_IO_DELAY */
-		break;
-	}
-	case CTL_REALSYNC_SET: {
-		int *syncstate;
-
-		syncstate = (int *)addr;
-
-		mtx_lock(&softc->ctl_lock);
-		switch (*syncstate) {
-		case 0:
-			softc->flags &= ~CTL_FLAG_REAL_SYNC;
+		delay_info->status = CTL_DELAY_STATUS_OK;
+		switch (delay_info->delay_type) {
+		case CTL_DELAY_TYPE_CONT:
+		case CTL_DELAY_TYPE_ONESHOT:
 			break;
-		case 1:
-			softc->flags |= CTL_FLAG_REAL_SYNC;
+		default:
+			delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE;
 			break;
+		}
+		switch (delay_info->delay_loc) {
+		case CTL_DELAY_LOC_DATAMOVE:
+			lun->delay_info.datamove_type = delay_info->delay_type;
+			lun->delay_info.datamove_delay = delay_info->delay_secs;
+			break;
+		case CTL_DELAY_LOC_DONE:
+			lun->delay_info.done_type = delay_info->delay_type;
+			lun->delay_info.done_delay = delay_info->delay_secs;
+			break;
 		default:
-			retval = -EINVAL;
+			delay_info->status = CTL_DELAY_STATUS_INVALID_LOC;
 			break;
 		}
-		mtx_unlock(&softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
+#else
+		delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
+#endif /* CTL_IO_DELAY */
 		break;
 	}
-	case CTL_REALSYNC_GET: {
-		int *syncstate;
-
-		syncstate = (int*)addr;
-
-		mtx_lock(&softc->ctl_lock);
-		if (softc->flags & CTL_FLAG_REAL_SYNC)
-			*syncstate = 1;
-		else
-			*syncstate = 0;
-		mtx_unlock(&softc->ctl_lock);
-
-		break;
-	}
-	case CTL_SETSYNC:
-	case CTL_GETSYNC: {
-		struct ctl_sync_info *sync_info;
-		struct ctl_lun *lun;
-
-		sync_info = (struct ctl_sync_info *)addr;
-
-		mtx_lock(&softc->ctl_lock);
-		lun = softc->ctl_luns[sync_info->lun_id];
-		if (lun == NULL) {
-			mtx_unlock(&softc->ctl_lock);
-			sync_info->status = CTL_GS_SYNC_NO_LUN;
-		}
-		/*
-		 * Get or set the sync interval.  We're not bounds checking
-		 * in the set case, hopefully the user won't do something
-		 * silly.
-		 */
-		if (cmd == CTL_GETSYNC)
-			sync_info->sync_interval = lun->sync_interval;
-		else
-			lun->sync_interval = sync_info->sync_interval;
-
-		mtx_unlock(&softc->ctl_lock);
-
-		sync_info->status = CTL_GS_SYNC_OK;
-
-		break;
-	}
+#ifdef CTL_LEGACY_STATS
 	case CTL_GETSTATS: {
-		struct ctl_stats *stats;
-		struct ctl_lun *lun;
+		struct ctl_stats *stats = (struct ctl_stats *)addr;
 		int i;
 
-		stats = (struct ctl_stats *)addr;
-
-		if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
-		     stats->alloc_len) {
-			stats->status = CTL_SS_NEED_MORE_SPACE;
-			stats->num_luns = softc->num_luns;
-			break;
-		}
 		/*
 		 * XXX KDM no locking here.  If the LUN list changes,
 		 * things can blow up.
 		 */
-		for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL;
-		     i++, lun = STAILQ_NEXT(lun, links)) {
-			retval = copyout(&lun->stats, &stats->lun_stats[i],
-					 sizeof(lun->stats));
+		i = 0;
+		stats->status = CTL_SS_OK;
+		stats->fill_len = 0;
+		STAILQ_FOREACH(lun, &softc->lun_list, links) {
+			if (stats->fill_len + sizeof(lun->legacy_stats) >
+			    stats->alloc_len) {
+				stats->status = CTL_SS_NEED_MORE_SPACE;
+				break;
+			}
+			retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++],
+					 sizeof(lun->legacy_stats));
 			if (retval != 0)
 				break;
+			stats->fill_len += sizeof(lun->legacy_stats);
 		}
 		stats->num_luns = softc->num_luns;
-		stats->fill_len = sizeof(struct ctl_lun_io_stats) *
-				 softc->num_luns;
-		stats->status = CTL_SS_OK;
+		stats->flags = CTL_STATS_FLAG_NONE;
 #ifdef CTL_TIME_IO
-		stats->flags = CTL_STATS_FLAG_TIME_VALID;
-#else
-		stats->flags = CTL_STATS_FLAG_NONE;
+		stats->flags |= CTL_STATS_FLAG_TIME_VALID;
 #endif
 		getnanouptime(&stats->timestamp);
 		break;
 	}
+#endif /* CTL_LEGACY_STATS */
 	case CTL_ERROR_INJECT: {
 		struct ctl_error_desc *err_desc, *new_err_desc;
-		struct ctl_lun *lun;
 
 		err_desc = (struct ctl_error_desc *)addr;
 
@@ -2797,14 +2838,17 @@
 		bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
 
 		mtx_lock(&softc->ctl_lock);
-		lun = softc->ctl_luns[err_desc->lun_id];
-		if (lun == NULL) {
+		if (err_desc->lun_id >= CTL_MAX_LUNS ||
+		    (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) {
 			mtx_unlock(&softc->ctl_lock);
+			free(new_err_desc, M_CTL);
 			printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
 			       __func__, (uintmax_t)err_desc->lun_id);
 			retval = EINVAL;
 			break;
 		}
+		mtx_lock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 
 		/*
 		 * We could do some checking here to verify the validity
@@ -2827,12 +2871,11 @@
 		err_desc->serial = lun->error_serial;
 		lun->error_serial++;
 
-		mtx_unlock(&softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		break;
 	}
 	case CTL_ERROR_INJECT_DELETE: {
 		struct ctl_error_desc *delete_desc, *desc, *desc2;
-		struct ctl_lun *lun;
 		int delete_done;
 
 		delete_desc = (struct ctl_error_desc *)addr;
@@ -2839,8 +2882,8 @@
 		delete_done = 0;
 
 		mtx_lock(&softc->ctl_lock);
-		lun = softc->ctl_luns[delete_desc->lun_id];
-		if (lun == NULL) {
+		if (delete_desc->lun_id >= CTL_MAX_LUNS ||
+		    (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) {
 			mtx_unlock(&softc->ctl_lock);
 			printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
 			       __func__, (uintmax_t)delete_desc->lun_id);
@@ -2847,6 +2890,8 @@
 			retval = EINVAL;
 			break;
 		}
+		mtx_lock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 		STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
 			if (desc->serial != delete_desc->serial)
 				continue;
@@ -2856,7 +2901,7 @@
 			free(desc, M_CTL);
 			delete_done = 1;
 		}
-		mtx_unlock(&softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		if (delete_done == 0) {
 			printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
 			       "error serial %ju on LUN %u\n", __func__, 
@@ -2867,64 +2912,64 @@
 		break;
 	}
 	case CTL_DUMP_STRUCTS: {
-		int i, j, k;
+		int j, k;
+		struct ctl_port *port;
 		struct ctl_frontend *fe;
 
-		printf("CTL IID to WWPN map start:\n");
-		for (i = 0; i < CTL_MAX_PORTS; i++) {
-			for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
-				if (softc->wwpn_iid[i][j].in_use == 0)
-					continue;
-
-				printf("port %d iid %u WWPN %#jx\n",
-				       softc->wwpn_iid[i][j].port,
-				       softc->wwpn_iid[i][j].iid, 
-				       (uintmax_t)softc->wwpn_iid[i][j].wwpn);
-			}
-		}
-		printf("CTL IID to WWPN map end\n");
+		mtx_lock(&softc->ctl_lock);
 		printf("CTL Persistent Reservation information start:\n");
-		for (i = 0; i < CTL_MAX_LUNS; i++) {
-			struct ctl_lun *lun;
-
-			lun = softc->ctl_luns[i];
-
-			if ((lun == NULL)
-			 || ((lun->flags & CTL_LUN_DISABLED) != 0))
+		STAILQ_FOREACH(lun, &softc->lun_list, links) {
+			mtx_lock(&lun->lun_lock);
+			if ((lun->flags & CTL_LUN_DISABLED) != 0) {
+				mtx_unlock(&lun->lun_lock);
 				continue;
+			}
 
-			for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
+			for (j = 0; j < CTL_MAX_PORTS; j++) {
+				if (lun->pr_keys[j] == NULL)
+					continue;
 				for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
-					if (lun->per_res[j+k].registered == 0)
+					if (lun->pr_keys[j][k] == 0)
 						continue;
-					printf("LUN %d port %d iid %d key "
-					       "%#jx\n", i, j, k,
-					       (uintmax_t)scsi_8btou64(
-					       lun->per_res[j+k].res_key.key));
+					printf("  LUN %ju port %d iid %d key "
+					       "%#jx\n", lun->lun, j, k,
+					       (uintmax_t)lun->pr_keys[j][k]);
 				}
 			}
+			mtx_unlock(&lun->lun_lock);
 		}
 		printf("CTL Persistent Reservation information end\n");
-		printf("CTL Frontends:\n");
+		printf("CTL Ports:\n");
+		STAILQ_FOREACH(port, &softc->port_list, links) {
+			printf("  Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
+			       "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
+			       port->frontend->name, port->port_type,
+			       port->physical_port, port->virtual_port,
+			       (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
+			for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
+				if (port->wwpn_iid[j].in_use == 0 &&
+				    port->wwpn_iid[j].wwpn == 0 &&
+				    port->wwpn_iid[j].name == NULL)
+					continue;
+
+				printf("    iid %u use %d WWPN %#jx '%s'\n",
+				    j, port->wwpn_iid[j].in_use,
+				    (uintmax_t)port->wwpn_iid[j].wwpn,
+				    port->wwpn_iid[j].name);
+			}
+		}
+		printf("CTL Port information end\n");
+		mtx_unlock(&softc->ctl_lock);
 		/*
 		 * XXX KDM calling this without a lock.  We'd likely want
 		 * to drop the lock before calling the frontend's dump
 		 * routine anyway.
 		 */
+		printf("CTL Frontends:\n");
 		STAILQ_FOREACH(fe, &softc->fe_list, links) {
-			printf("Frontend %s Type %u pport %d vport %d WWNN "
-			       "%#jx WWPN %#jx\n", fe->port_name, fe->port_type,
-			       fe->physical_port, fe->virtual_port,
-			       (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn);
-
-			/*
-			 * Frontends are not required to support the dump
-			 * routine.
-			 */
-			if (fe->fe_dump == NULL)
-				continue;
-
-			fe->fe_dump();
+			printf("  Frontend '%s'\n", fe->name);
+			if (fe->fe_dump != NULL)
+				fe->fe_dump();
 		}
 		printf("CTL Frontend information end\n");
 		break;
@@ -2959,6 +3004,8 @@
 		retval = backend->ioctl(dev, cmd, addr, flag, td);
 
 		if (lun_req->num_be_args > 0) {
+			ctl_copyout_args(lun_req->num_be_args,
+				      lun_req->kern_be_args);
 			ctl_free_args(lun_req->num_be_args,
 				      lun_req->kern_be_args);
 		}
@@ -2966,8 +3013,8 @@
 	}
 	case CTL_LUN_LIST: {
 		struct sbuf *sb;
-		struct ctl_lun *lun;
 		struct ctl_lun_list *list;
+		struct ctl_option *opt;
 
 		list = (struct ctl_lun_list *)addr;
 
@@ -3008,8 +3055,8 @@
 		sbuf_printf(sb, "<ctllunlist>\n");
 
 		mtx_lock(&softc->ctl_lock);
-
 		STAILQ_FOREACH(lun, &softc->lun_list, links) {
+			mtx_lock(&lun->lun_lock);
 			retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
 					     (uintmax_t)lun->lun);
 
@@ -3020,7 +3067,7 @@
 			if (retval != 0)
 				break;
 
-			retval = sbuf_printf(sb, "<backend_type>%s"
+			retval = sbuf_printf(sb, "\t<backend_type>%s"
 					     "</backend_type>\n",
 					     (lun->backend == NULL) ?  "none" :
 					     lun->backend->name);
@@ -3028,7 +3075,7 @@
 			if (retval != 0)
 				break;
 
-			retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n",
+			retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
 					     lun->be_lun->lun_type);
 
 			if (retval != 0)
@@ -3041,7 +3088,7 @@
 				continue;
 			}
 
-			retval = sbuf_printf(sb, "<size>%ju</size>\n",
+			retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
 					     (lun->be_lun->maxlba > 0) ?
 					     lun->be_lun->maxlba + 1 : 0);
 
@@ -3048,19 +3095,20 @@
 			if (retval != 0)
 				break;
 
-			retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n",
+			retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
 					     lun->be_lun->blocksize);
 
 			if (retval != 0)
 				break;
 
-			retval = sbuf_printf(sb, "<serial_number>");
+			retval = sbuf_printf(sb, "\t<serial_number>");
 
 			if (retval != 0)
 				break;
 
 			retval = ctl_sbuf_printf_esc(sb,
-						     lun->be_lun->serial_num);
+			    lun->be_lun->serial_num,
+			    sizeof(lun->be_lun->serial_num));
 
 			if (retval != 0)
 				break;
@@ -3070,12 +3118,14 @@
 			if (retval != 0)
 				break;
 
-			retval = sbuf_printf(sb, "<device_id>");
+			retval = sbuf_printf(sb, "\t<device_id>");
 
 			if (retval != 0)
 				break;
 
-			retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id);
+			retval = ctl_sbuf_printf_esc(sb,
+			    lun->be_lun->device_id,
+			    sizeof(lun->be_lun->device_id));
 
 			if (retval != 0)
 				break;
@@ -3085,27 +3135,226 @@
 			if (retval != 0)
 				break;
 
-			if (lun->backend->lun_info == NULL) {
-				retval = sbuf_printf(sb, "</lun>\n");
+			if (lun->backend->lun_info != NULL) {
+				retval = lun->backend->lun_info(lun->be_lun->be_lun, sb);
 				if (retval != 0)
 					break;
-				continue;
 			}
+			STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
+				retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
+				    opt->name, opt->value, opt->name);
+				if (retval != 0)
+					break;
+			}
 
-			retval =lun->backend->lun_info(lun->be_lun->be_lun, sb);
+			retval = sbuf_printf(sb, "</lun>\n");
 
 			if (retval != 0)
 				break;
+			mtx_unlock(&lun->lun_lock);
+		}
+		if (lun != NULL)
+			mtx_unlock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 
-			retval = sbuf_printf(sb, "</lun>\n");
+		if ((retval != 0)
+		 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
+			retval = 0;
+			sbuf_delete(sb);
+			list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
+			snprintf(list->error_str, sizeof(list->error_str),
+				 "Out of space, %d bytes is too small",
+				 list->alloc_len);
+			break;
+		}
 
+		sbuf_finish(sb);
+
+		retval = copyout(sbuf_data(sb), list->lun_xml,
+				 sbuf_len(sb) + 1);
+
+		list->fill_len = sbuf_len(sb) + 1;
+		list->status = CTL_LUN_LIST_OK;
+		sbuf_delete(sb);
+		break;
+	}
+	case CTL_ISCSI: {
+		struct ctl_iscsi *ci;
+		struct ctl_frontend *fe;
+
+		ci = (struct ctl_iscsi *)addr;
+
+		fe = ctl_frontend_find("iscsi");
+		if (fe == NULL) {
+			ci->status = CTL_ISCSI_ERROR;
+			snprintf(ci->error_str, sizeof(ci->error_str),
+			    "Frontend \"iscsi\" not found.");
+			break;
+		}
+
+		retval = fe->ioctl(dev, cmd, addr, flag, td);
+		break;
+	}
+	case CTL_PORT_REQ: {
+		struct ctl_req *req;
+		struct ctl_frontend *fe;
+
+		req = (struct ctl_req *)addr;
+
+		fe = ctl_frontend_find(req->driver);
+		if (fe == NULL) {
+			req->status = CTL_LUN_ERROR;
+			snprintf(req->error_str, sizeof(req->error_str),
+			    "Frontend \"%s\" not found.", req->driver);
+			break;
+		}
+		if (req->num_args > 0) {
+			req->kern_args = ctl_copyin_args(req->num_args,
+			    req->args, req->error_str, sizeof(req->error_str));
+			if (req->kern_args == NULL) {
+				req->status = CTL_LUN_ERROR;
+				break;
+			}
+		}
+
+		if (fe->ioctl)
+			retval = fe->ioctl(dev, cmd, addr, flag, td);
+		else
+			retval = ENODEV;
+
+		if (req->num_args > 0) {
+			ctl_copyout_args(req->num_args, req->kern_args);
+			ctl_free_args(req->num_args, req->kern_args);
+		}
+		break;
+	}
+	case CTL_PORT_LIST: {
+		struct sbuf *sb;
+		struct ctl_port *port;
+		struct ctl_lun_list *list;
+		struct ctl_option *opt;
+		int j;
+		uint32_t plun;
+
+		list = (struct ctl_lun_list *)addr;
+
+		sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
+		if (sb == NULL) {
+			list->status = CTL_LUN_LIST_ERROR;
+			snprintf(list->error_str, sizeof(list->error_str),
+				 "Unable to allocate %d bytes for LUN list",
+				 list->alloc_len);
+			break;
+		}
+
+		sbuf_printf(sb, "<ctlportlist>\n");
+
+		mtx_lock(&softc->ctl_lock);
+		STAILQ_FOREACH(port, &softc->port_list, links) {
+			retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
+					     (uintmax_t)port->targ_port);
+
+			/*
+			 * Bail out as soon as we see that we've overfilled
+			 * the buffer.
+			 */
 			if (retval != 0)
 				break;
+
+			retval = sbuf_printf(sb, "\t<frontend_type>%s"
+			    "</frontend_type>\n", port->frontend->name);
+			if (retval != 0)
+				break;
+
+			retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
+					     port->port_type);
+			if (retval != 0)
+				break;
+
+			retval = sbuf_printf(sb, "\t<online>%s</online>\n",
+			    (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
+			if (retval != 0)
+				break;
+
+			retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
+			    port->port_name);
+			if (retval != 0)
+				break;
+
+			retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
+			    port->physical_port);
+			if (retval != 0)
+				break;
+
+			retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
+			    port->virtual_port);
+			if (retval != 0)
+				break;
+
+			if (port->target_devid != NULL) {
+				sbuf_printf(sb, "\t<target>");
+				ctl_id_sbuf(port->target_devid, sb);
+				sbuf_printf(sb, "</target>\n");
+			}
+
+			if (port->port_devid != NULL) {
+				sbuf_printf(sb, "\t<port>");
+				ctl_id_sbuf(port->port_devid, sb);
+				sbuf_printf(sb, "</port>\n");
+			}
+
+			if (port->port_info != NULL) {
+				retval = port->port_info(port->onoff_arg, sb);
+				if (retval != 0)
+					break;
+			}
+			STAILQ_FOREACH(opt, &port->options, links) {
+				retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
+				    opt->name, opt->value, opt->name);
+				if (retval != 0)
+					break;
+			}
+
+			if (port->lun_map != NULL) {
+				sbuf_printf(sb, "\t<lun_map>on</lun_map>\n");
+				for (j = 0; j < port->lun_map_size; j++) {
+					plun = ctl_lun_map_from_port(port, j);
+					if (plun == UINT32_MAX)
+						continue;
+					sbuf_printf(sb,
+					    "\t<lun id=\"%u\">%u</lun>\n",
+					    j, plun);
+				}
+			}
+
+			for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
+				if (port->wwpn_iid[j].in_use == 0 ||
+				    (port->wwpn_iid[j].wwpn == 0 &&
+				     port->wwpn_iid[j].name == NULL))
+					continue;
+
+				if (port->wwpn_iid[j].name != NULL)
+					retval = sbuf_printf(sb,
+					    "\t<initiator id=\"%u\">%s</initiator>\n",
+					    j, port->wwpn_iid[j].name);
+				else
+					retval = sbuf_printf(sb,
+					    "\t<initiator id=\"%u\">naa.%08jx</initiator>\n",
+					    j, port->wwpn_iid[j].wwpn);
+				if (retval != 0)
+					break;
+			}
+			if (retval != 0)
+				break;
+
+			retval = sbuf_printf(sb, "</targ_port>\n");
+			if (retval != 0)
+				break;
 		}
 		mtx_unlock(&softc->ctl_lock);
 
 		if ((retval != 0)
-		 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
+		 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
 			retval = 0;
 			sbuf_delete(sb);
 			list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
@@ -3125,6 +3374,113 @@
 		sbuf_delete(sb);
 		break;
 	}
+	case CTL_LUN_MAP: {
+		struct ctl_lun_map *lm  = (struct ctl_lun_map *)addr;
+		struct ctl_port *port;
+
+		mtx_lock(&softc->ctl_lock);
+		if (lm->port < softc->port_min ||
+		    lm->port >= softc->port_max ||
+		    (port = softc->ctl_ports[lm->port]) == NULL) {
+			mtx_unlock(&softc->ctl_lock);
+			return (ENXIO);
+		}
+		if (port->status & CTL_PORT_STATUS_ONLINE) {
+			STAILQ_FOREACH(lun, &softc->lun_list, links) {
+				if (ctl_lun_map_to_port(port, lun->lun) ==
+				    UINT32_MAX)
+					continue;
+				mtx_lock(&lun->lun_lock);
+				ctl_est_ua_port(lun, lm->port, -1,
+				    CTL_UA_LUN_CHANGE);
+				mtx_unlock(&lun->lun_lock);
+			}
+		}
+		mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
+		if (lm->plun != UINT32_MAX) {
+			if (lm->lun == UINT32_MAX)
+				retval = ctl_lun_map_unset(port, lm->plun);
+			else if (lm->lun < CTL_MAX_LUNS &&
+			    softc->ctl_luns[lm->lun] != NULL)
+				retval = ctl_lun_map_set(port, lm->plun, lm->lun);
+			else
+				return (ENXIO);
+		} else {
+			if (lm->lun == UINT32_MAX)
+				retval = ctl_lun_map_deinit(port);
+			else
+				retval = ctl_lun_map_init(port);
+		}
+		if (port->status & CTL_PORT_STATUS_ONLINE)
+			ctl_isc_announce_port(port);
+		break;
+	}
+	case CTL_GET_LUN_STATS: {
+		struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr;
+		int i;
+
+		/*
+		 * XXX KDM no locking here.  If the LUN list changes,
+		 * things can blow up.
+		 */
+		i = 0;
+		stats->status = CTL_SS_OK;
+		stats->fill_len = 0;
+		STAILQ_FOREACH(lun, &softc->lun_list, links) {
+			if (lun->lun < stats->first_item)
+				continue;
+			if (stats->fill_len + sizeof(lun->stats) >
+			    stats->alloc_len) {
+				stats->status = CTL_SS_NEED_MORE_SPACE;
+				break;
+			}
+			retval = copyout(&lun->stats, &stats->stats[i++],
+					 sizeof(lun->stats));
+			if (retval != 0)
+				break;
+			stats->fill_len += sizeof(lun->stats);
+		}
+		stats->num_items = softc->num_luns;
+		stats->flags = CTL_STATS_FLAG_NONE;
+#ifdef CTL_TIME_IO
+		stats->flags |= CTL_STATS_FLAG_TIME_VALID;
+#endif
+		getnanouptime(&stats->timestamp);
+		break;
+	}
+	case CTL_GET_PORT_STATS: {
+		struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr;
+		int i;
+
+		/*
+		 * XXX KDM no locking here.  If the LUN list changes,
+		 * things can blow up.
+		 */
+		i = 0;
+		stats->status = CTL_SS_OK;
+		stats->fill_len = 0;
+		STAILQ_FOREACH(port, &softc->port_list, links) {
+			if (port->targ_port < stats->first_item)
+				continue;
+			if (stats->fill_len + sizeof(port->stats) >
+			    stats->alloc_len) {
+				stats->status = CTL_SS_NEED_MORE_SPACE;
+				break;
+			}
+			retval = copyout(&port->stats, &stats->stats[i++],
+					 sizeof(port->stats));
+			if (retval != 0)
+				break;
+			stats->fill_len += sizeof(port->stats);
+		}
+		stats->num_items = softc->num_ports;
+		stats->flags = CTL_STATS_FLAG_NONE;
+#ifdef CTL_TIME_IO
+		stats->flags |= CTL_STATS_FLAG_TIME_VALID;
+#endif
+		getnanouptime(&stats->timestamp);
+		break;
+	}
 	default: {
 		/* XXX KDM should we fix this? */
 #if 0
@@ -3150,7 +3506,7 @@
 		if (found == 0) {
 			printf("ctl: unknown ioctl command %#lx or backend "
 			       "%d\n", cmd, type);
-			retval = -EINVAL;
+			retval = EINVAL;
 			break;
 		}
 		retval = backend->ioctl(dev, cmd, addr, flag, td);
@@ -3165,52 +3521,198 @@
 uint32_t
 ctl_get_initindex(struct ctl_nexus *nexus)
 {
-	if (nexus->targ_port < CTL_MAX_PORTS)
-		return (nexus->initid.id +
-			(nexus->targ_port * CTL_MAX_INIT_PER_PORT));
-	else
-		return (nexus->initid.id +
-		       ((nexus->targ_port - CTL_MAX_PORTS) *
-			CTL_MAX_INIT_PER_PORT));
+	return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
 }
 
+int
+ctl_lun_map_init(struct ctl_port *port)
+{
+	struct ctl_softc *softc = port->ctl_softc;
+	struct ctl_lun *lun;
+	int size = ctl_lun_map_size;
+	uint32_t i;
+
+	if (port->lun_map == NULL || port->lun_map_size < size) {
+		port->lun_map_size = 0;
+		free(port->lun_map, M_CTL);
+		port->lun_map = malloc(size * sizeof(uint32_t),
+		    M_CTL, M_NOWAIT);
+	}
+	if (port->lun_map == NULL)
+		return (ENOMEM);
+	for (i = 0; i < size; i++)
+		port->lun_map[i] = UINT32_MAX;
+	port->lun_map_size = size;
+	if (port->status & CTL_PORT_STATUS_ONLINE) {
+		if (port->lun_disable != NULL) {
+			STAILQ_FOREACH(lun, &softc->lun_list, links)
+				port->lun_disable(port->targ_lun_arg, lun->lun);
+		}
+		ctl_isc_announce_port(port);
+	}
+	return (0);
+}
+
+int
+ctl_lun_map_deinit(struct ctl_port *port)
+{
+	struct ctl_softc *softc = port->ctl_softc;
+	struct ctl_lun *lun;
+
+	if (port->lun_map == NULL)
+		return (0);
+	port->lun_map_size = 0;
+	free(port->lun_map, M_CTL);
+	port->lun_map = NULL;
+	if (port->status & CTL_PORT_STATUS_ONLINE) {
+		if (port->lun_enable != NULL) {
+			STAILQ_FOREACH(lun, &softc->lun_list, links)
+				port->lun_enable(port->targ_lun_arg, lun->lun);
+		}
+		ctl_isc_announce_port(port);
+	}
+	return (0);
+}
+
+int
+ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
+{
+	int status;
+	uint32_t old;
+
+	if (port->lun_map == NULL) {
+		status = ctl_lun_map_init(port);
+		if (status != 0)
+			return (status);
+	}
+	if (plun >= port->lun_map_size)
+		return (EINVAL);
+	old = port->lun_map[plun];
+	port->lun_map[plun] = glun;
+	if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) {
+		if (port->lun_enable != NULL)
+			port->lun_enable(port->targ_lun_arg, plun);
+		ctl_isc_announce_port(port);
+	}
+	return (0);
+}
+
+int
+ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
+{
+	uint32_t old;
+
+	if (port->lun_map == NULL || plun >= port->lun_map_size)
+		return (0);
+	old = port->lun_map[plun];
+	port->lun_map[plun] = UINT32_MAX;
+	if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) {
+		if (port->lun_disable != NULL)
+			port->lun_disable(port->targ_lun_arg, plun);
+		ctl_isc_announce_port(port);
+	}
+	return (0);
+}
+
 uint32_t
-ctl_get_resindex(struct ctl_nexus *nexus)
+ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id)
 {
-	return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
+
+	if (port == NULL)
+		return (UINT32_MAX);
+	if (port->lun_map == NULL)
+		return (lun_id);
+	if (lun_id > port->lun_map_size)
+		return (UINT32_MAX);
+	return (port->lun_map[lun_id]);
 }
 
 uint32_t
-ctl_port_idx(int port_num)
+ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
 {
-	if (port_num < CTL_MAX_PORTS)
-		return(port_num);
-	else
-		return(port_num - CTL_MAX_PORTS);
+	uint32_t i;
+
+	if (port == NULL)
+		return (UINT32_MAX);
+	if (port->lun_map == NULL)
+		return (lun_id);
+	for (i = 0; i < port->lun_map_size; i++) {
+		if (port->lun_map[i] == lun_id)
+			return (i);
+	}
+	return (UINT32_MAX);
 }
 
-/*
- * Note:  This only works for bitmask sizes that are at least 32 bits, and
- * that are a power of 2.
- */
-int
-ctl_ffz(uint32_t *mask, uint32_t size)
+uint32_t
+ctl_decode_lun(uint64_t encoded)
 {
-	uint32_t num_chunks, num_pieces;
-	int i, j;
+	uint8_t lun[8];
+	uint32_t result = 0xffffffff;
 
-	num_chunks = (size >> 5);
-	if (num_chunks == 0)
-		num_chunks++;
-	num_pieces = ctl_min((sizeof(uint32_t) * 8), size);
-
-	for (i = 0; i < num_chunks; i++) {
-		for (j = 0; j < num_pieces; j++) {
-			if ((mask[i] & (1 << j)) == 0)
-				return ((i << 5) + j);
+	be64enc(lun, encoded);
+	switch (lun[0] & RPL_LUNDATA_ATYP_MASK) {
+	case RPL_LUNDATA_ATYP_PERIPH:
+		if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 &&
+		    lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0)
+			result = lun[1];
+		break;
+	case RPL_LUNDATA_ATYP_FLAT:
+		if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 &&
+		    lun[6] == 0 && lun[7] == 0)
+			result = ((lun[0] & 0x3f) << 8) + lun[1];
+		break;
+	case RPL_LUNDATA_ATYP_EXTLUN:
+		switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) {
+		case 0x02:
+			switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) {
+			case 0x00:
+				result = lun[1];
+				break;
+			case 0x10:
+				result = (lun[1] << 16) + (lun[2] << 8) +
+				    lun[3];
+				break;
+			case 0x20:
+				if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0)
+					result = (lun[2] << 24) +
+					    (lun[3] << 16) + (lun[4] << 8) +
+					    lun[5];
+				break;
+			}
+			break;
+		case RPL_LUNDATA_EXT_EAM_NOT_SPEC:
+			result = 0xffffffff;
+			break;
 		}
+		break;
 	}
+	return (result);
+}
 
+uint64_t
+ctl_encode_lun(uint32_t decoded)
+{
+	uint64_t l = decoded;
+
+	if (l <= 0xff)
+		return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48));
+	if (l <= 0x3fff)
+		return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48));
+	if (l <= 0xffffff)
+		return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) |
+		    (l << 32));
+	return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16));
+}
+
+int
+ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
+{
+	int i;
+
+	for (i = first; i < last; i++) {
+		if ((mask[i / 32] & (1 << (i % 32))) == 0)
+			return (i);
+	}
 	return (-1);
 }
 
@@ -3260,600 +3762,202 @@
 		return (1);
 }
 
-#ifdef unused
-/*
- * The bus, target and lun are optional, they can be filled in later.
- * can_wait is used to determine whether we can wait on the malloc or not.
- */
-union ctl_io*
-ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target,
-	      uint32_t targ_lun, int can_wait)
+static uint64_t
+ctl_get_prkey(struct ctl_lun *lun, uint32_t residx)
 {
-	union ctl_io *io;
+	uint64_t *t;
 
-	if (can_wait)
-		io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK);
-	else
-		io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
+	t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
+	if (t == NULL)
+		return (0);
+	return (t[residx % CTL_MAX_INIT_PER_PORT]);
+}
 
-	if (io != NULL) {
-		io->io_hdr.io_type = io_type;
-		io->io_hdr.targ_port = targ_port;
-		/*
-		 * XXX KDM this needs to change/go away.  We need to move
-		 * to a preallocated pool of ctl_scsiio structures.
-		 */
-		io->io_hdr.nexus.targ_target.id = targ_target;
-		io->io_hdr.nexus.targ_lun = targ_lun;
-	}
+static void
+ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx)
+{
+	uint64_t *t;
 
-	return (io);
+	t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
+	if (t == NULL)
+		return;
+	t[residx % CTL_MAX_INIT_PER_PORT] = 0;
 }
 
-void
-ctl_kfree_io(union ctl_io *io)
+static void
+ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx)
 {
-	free(io, M_CTL);
+	uint64_t *p;
+	u_int i;
+
+	i = residx/CTL_MAX_INIT_PER_PORT;
+	if (lun->pr_keys[i] != NULL)
+		return;
+	mtx_unlock(&lun->lun_lock);
+	p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL,
+	    M_WAITOK | M_ZERO);
+	mtx_lock(&lun->lun_lock);
+	if (lun->pr_keys[i] == NULL)
+		lun->pr_keys[i] = p;
+	else
+		free(p, M_CTL);
 }
-#endif /* unused */
 
+static void
+ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key)
+{
+	uint64_t *t;
+
+	t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
+	KASSERT(t != NULL, ("prkey %d is not allocated", residx));
+	t[residx % CTL_MAX_INIT_PER_PORT] = key;
+}
+
 /*
- * ctl_softc, pool_type, total_ctl_io are passed in.
+ * ctl_softc, pool_name, total_ctl_io are passed in.
  * npool is passed out.
  */
 int
-ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
-		uint32_t total_ctl_io, struct ctl_io_pool **npool)
+ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
+		uint32_t total_ctl_io, void **npool)
 {
-	uint32_t i;
-	union ctl_io *cur_io, *next_io;
 	struct ctl_io_pool *pool;
-	int retval;
 
-	retval = 0;
-
 	pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
 					    M_NOWAIT | M_ZERO);
-	if (pool == NULL) {
-		retval = -ENOMEM;
-		goto bailout;
-	}
+	if (pool == NULL)
+		return (ENOMEM);
 
-	pool->type = pool_type;
+	snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
 	pool->ctl_softc = ctl_softc;
-
-	mtx_lock(&ctl_softc->ctl_lock);
-	pool->id = ctl_softc->cur_pool_id++;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	pool->flags = CTL_POOL_FLAG_NONE;
-	STAILQ_INIT(&pool->free_queue);
-
-	/*
-	 * XXX KDM other options here:
-	 * - allocate a page at a time
-	 * - allocate one big chunk of memory.
-	 * Page allocation might work well, but would take a little more
-	 * tracking.
-	 */
-	for (i = 0; i < total_ctl_io; i++) {
-		cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL,
-						M_NOWAIT);
-		if (cur_io == NULL) {
-			retval = ENOMEM;
-			break;
-		}
-		cur_io->io_hdr.pool = pool;
-		STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links);
-		pool->total_ctl_io++;
-		pool->free_ctl_io++;
-	}
-
-	if (retval != 0) {
-		for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
-		     cur_io != NULL; cur_io = next_io) {
-			next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
-							      links);
-			STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
-				      ctl_io_hdr, links);
-			free(cur_io, M_CTL);
-		}
-
-		free(pool, M_CTL);
-		goto bailout;
-	}
-	mtx_lock(&ctl_softc->ctl_lock);
-	ctl_softc->num_pools++;
-	STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
-	/*
-	 * Increment our usage count if this is an external consumer, so we
-	 * can't get unloaded until the external consumer (most likely a
-	 * FETD) unloads and frees his pool.
-	 *
-	 * XXX KDM will this increment the caller's module use count, or
-	 * mine?
-	 */
-#if 0
-	if ((pool_type != CTL_POOL_EMERGENCY)
-	 && (pool_type != CTL_POOL_INTERNAL)
-	 && (pool_type != CTL_POOL_IOCTL)
-	 && (pool_type != CTL_POOL_4OTHERSC))
-		MOD_INC_USE_COUNT;
+#ifdef IO_POOLS
+	pool->zone = uma_zsecond_create(pool->name, NULL,
+	    NULL, NULL, NULL, ctl_softc->io_zone);
+	/* uma_prealloc(pool->zone, total_ctl_io); */
+#else
+	pool->zone = ctl_softc->io_zone;
 #endif
 
-	mtx_unlock(&ctl_softc->ctl_lock);
-
 	*npool = pool;
-
-bailout:
-
-	return (retval);
-}
-
-/*
- * Caller must hold ctl_softc->ctl_lock.
- */
-int
-ctl_pool_acquire(struct ctl_io_pool *pool)
-{
-	if (pool == NULL)
-		return (-EINVAL);
-
-	if (pool->flags & CTL_POOL_FLAG_INVALID)
-		return (-EINVAL);
-
-	pool->refcount++;
-
 	return (0);
 }
 
-/*
- * Caller must hold ctl_softc->ctl_lock.
- */
-int
-ctl_pool_invalidate(struct ctl_io_pool *pool)
+void
+ctl_pool_free(struct ctl_io_pool *pool)
 {
-	if (pool == NULL)
-		return (-EINVAL);
 
-	pool->flags |= CTL_POOL_FLAG_INVALID;
-
-	return (0);
-}
-
-/*
- * Caller must hold ctl_softc->ctl_lock.
- */
-int
-ctl_pool_release(struct ctl_io_pool *pool)
-{
 	if (pool == NULL)
-		return (-EINVAL);
+		return;
 
-	if ((--pool->refcount == 0)
-	 && (pool->flags & CTL_POOL_FLAG_INVALID)) {
-		ctl_pool_free(pool->ctl_softc, pool);
-	}
-
-	return (0);
-}
-
-/*
- * Must be called with ctl_softc->ctl_lock held.
- */
-void
-ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool)
-{
-	union ctl_io *cur_io, *next_io;
-
-	for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
-	     cur_io != NULL; cur_io = next_io) {
-		next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
-						      links);
-		STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, ctl_io_hdr,
-			      links);
-		free(cur_io, M_CTL);
-	}
-
-	STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
-	ctl_softc->num_pools--;
-
-	/*
-	 * XXX KDM will this decrement the caller's usage count or mine?
-	 */
-#if 0
-	if ((pool->type != CTL_POOL_EMERGENCY)
-	 && (pool->type != CTL_POOL_INTERNAL)
-	 && (pool->type != CTL_POOL_IOCTL))
-		MOD_DEC_USE_COUNT;
+#ifdef IO_POOLS
+	uma_zdestroy(pool->zone);
 #endif
-
 	free(pool, M_CTL);
 }
 
-/*
- * This routine does not block (except for spinlocks of course).
- * It tries to allocate a ctl_io union from the caller's pool as quickly as
- * possible.
- */
 union ctl_io *
 ctl_alloc_io(void *pool_ref)
 {
+	struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
 	union ctl_io *io;
-	struct ctl_softc *ctl_softc;
-	struct ctl_io_pool *pool, *npool;
-	struct ctl_io_pool *emergency_pool;
 
-	pool = (struct ctl_io_pool *)pool_ref;
-
-	if (pool == NULL) {
-		printf("%s: pool is NULL\n", __func__);
-		return (NULL);
-	}
-
-	emergency_pool = NULL;
-
-	ctl_softc = pool->ctl_softc;
-
-	mtx_lock(&ctl_softc->ctl_lock);
-	/*
-	 * First, try to get the io structure from the user's pool.
-	 */
-	if (ctl_pool_acquire(pool) == 0) {
-		io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
-		if (io != NULL) {
-			STAILQ_REMOVE_HEAD(&pool->free_queue, links);
-			pool->total_allocated++;
-			pool->free_ctl_io--;
-			mtx_unlock(&ctl_softc->ctl_lock);
-			return (io);
-		} else
-			ctl_pool_release(pool);
-	}
-	/*
-	 * If he doesn't have any io structures left, search for an
-	 * emergency pool and grab one from there.
-	 */
-	STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) {
-		if (npool->type != CTL_POOL_EMERGENCY)
-			continue;
-
-		if (ctl_pool_acquire(npool) != 0)
-			continue;
-
-		emergency_pool = npool;
-
-		io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue);
-		if (io != NULL) {
-			STAILQ_REMOVE_HEAD(&npool->free_queue, links);
-			npool->total_allocated++;
-			npool->free_ctl_io--;
-			mtx_unlock(&ctl_softc->ctl_lock);
-			return (io);
-		} else
-			ctl_pool_release(npool);
-	}
-
-	/* Drop the spinlock before we malloc */
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	/*
-	 * The emergency pool (if it exists) didn't have one, so try an
-	 * atomic (i.e. nonblocking) malloc and see if we get lucky.
-	 */
-	io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
+	io = uma_zalloc(pool->zone, M_WAITOK);
 	if (io != NULL) {
-		/*
-		 * If the emergency pool exists but is empty, add this
-		 * ctl_io to its list when it gets freed.
-		 */
-		if (emergency_pool != NULL) {
-			mtx_lock(&ctl_softc->ctl_lock);
-			if (ctl_pool_acquire(emergency_pool) == 0) {
-				io->io_hdr.pool = emergency_pool;
-				emergency_pool->total_ctl_io++;
-				/*
-				 * Need to bump this, otherwise
-				 * total_allocated and total_freed won't
-				 * match when we no longer have anything
-				 * outstanding.
-				 */
-				emergency_pool->total_allocated++;
-			}
-			mtx_unlock(&ctl_softc->ctl_lock);
-		} else
-			io->io_hdr.pool = NULL;
+		io->io_hdr.pool = pool_ref;
+		CTL_SOFTC(io) = pool->ctl_softc;
 	}
-
 	return (io);
 }
 
-static void
-ctl_free_io_internal(union ctl_io *io, int have_lock)
+union ctl_io *
+ctl_alloc_io_nowait(void *pool_ref)
 {
-	if (io == NULL)
-		return;
+	struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
+	union ctl_io *io;
 
-	/*
-	 * If this ctl_io has a pool, return it to that pool.
-	 */
-	if (io->io_hdr.pool != NULL) {
-		struct ctl_io_pool *pool;
-#if 0
-		struct ctl_softc *ctl_softc;
-		union ctl_io *tmp_io;
-		unsigned long xflags;
-		int i;
-
-		ctl_softc = control_softc;
-#endif
-
-		pool = (struct ctl_io_pool *)io->io_hdr.pool;
-
-		if (have_lock == 0)
-			mtx_lock(&pool->ctl_softc->ctl_lock);
-#if 0
-		save_flags(xflags);
-
-		for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST(
-		     &ctl_softc->task_queue); tmp_io != NULL; i++,
-		     tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr,
-		     links)) {
-			if (tmp_io == io) {
-				printf("%s: %p is still on the task queue!\n",
-				       __func__, tmp_io);
-				printf("%s: (%d): type %d "
-				       "msg %d cdb %x iptl: "
-				       "%d:%d:%d:%d tag 0x%04x "
-				       "flg %#lx\n",
-					__func__, i,
-					tmp_io->io_hdr.io_type,
-					tmp_io->io_hdr.msg_type,
-					tmp_io->scsiio.cdb[0],
-					tmp_io->io_hdr.nexus.initid.id,
-					tmp_io->io_hdr.nexus.targ_port,
-					tmp_io->io_hdr.nexus.targ_target.id,
-					tmp_io->io_hdr.nexus.targ_lun,
-					(tmp_io->io_hdr.io_type ==
-					CTL_IO_TASK) ?
-					tmp_io->taskio.tag_num :
-					tmp_io->scsiio.tag_num,
-					xflags);
-				panic("I/O still on the task queue!");
-			}
-		}
-#endif
-		io->io_hdr.io_type = 0xff;
-		STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
-		pool->total_freed++;
-		pool->free_ctl_io++;
-		ctl_pool_release(pool);
-		if (have_lock == 0)
-			mtx_unlock(&pool->ctl_softc->ctl_lock);
-	} else {
-		/*
-		 * Otherwise, just free it.  We probably malloced it and
-		 * the emergency pool wasn't available.
-		 */
-		free(io, M_CTL);
+	io = uma_zalloc(pool->zone, M_NOWAIT);
+	if (io != NULL) {
+		io->io_hdr.pool = pool_ref;
+		CTL_SOFTC(io) = pool->ctl_softc;
 	}
-
+	return (io);
 }
 
 void
 ctl_free_io(union ctl_io *io)
 {
-	ctl_free_io_internal(io, /*have_lock*/ 0);
-}
+	struct ctl_io_pool *pool;
 
-void
-ctl_zero_io(union ctl_io *io)
-{
-	void *pool_ref;
-
 	if (io == NULL)
 		return;
 
-	/*
-	 * May need to preserve linked list pointers at some point too.
-	 */
-	pool_ref = io->io_hdr.pool;
-
-	memset(io, 0, sizeof(*io));
-
-	io->io_hdr.pool = pool_ref;
+	pool = (struct ctl_io_pool *)io->io_hdr.pool;
+	uma_zfree(pool->zone, io);
 }
 
-/*
- * This routine is currently used for internal copies of ctl_ios that need
- * to persist for some reason after we've already returned status to the
- * FETD.  (Thus the flag set.)
- *
- * XXX XXX
- * Note that this makes a blind copy of all fields in the ctl_io, except
- * for the pool reference.  This includes any memory that has been
- * allocated!  That memory will no longer be valid after done has been
- * called, so this would be VERY DANGEROUS for command that actually does
- * any reads or writes.  Right now (11/7/2005), this is only used for immediate
- * start and stop commands, which don't transfer any data, so this is not a
- * problem.  If it is used for anything else, the caller would also need to
- * allocate data buffer space and this routine would need to be modified to
- * copy the data buffer(s) as well.
- */
 void
-ctl_copy_io(union ctl_io *src, union ctl_io *dest)
+ctl_zero_io(union ctl_io *io)
 {
-	void *pool_ref;
+	struct ctl_io_pool *pool;
 
-	if ((src == NULL)
-	 || (dest == NULL))
+	if (io == NULL)
 		return;
 
 	/*
 	 * May need to preserve linked list pointers at some point too.
 	 */
-	pool_ref = dest->io_hdr.pool;
-
-	memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest)));
-
-	dest->io_hdr.pool = pool_ref;
-	/*
-	 * We need to know that this is an internal copy, and doesn't need
-	 * to get passed back to the FETD that allocated it.
-	 */
-	dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
+	pool = io->io_hdr.pool;
+	memset(io, 0, sizeof(*io));
+	io->io_hdr.pool = pool;
+	CTL_SOFTC(io) = pool->ctl_softc;
 }
 
-#ifdef NEEDTOPORT
-static void
-ctl_update_power_subpage(struct copan_power_subpage *page)
+int
+ctl_expand_number(const char *buf, uint64_t *num)
 {
-	int num_luns, num_partitions, config_type;
-	struct ctl_softc *softc;
-	cs_BOOL_t aor_present, shelf_50pct_power;
-	cs_raidset_personality_t rs_type;
-	int max_active_luns;
+	char *endptr;
+	uint64_t number;
+	unsigned shift;
 
-	softc = control_softc;
+	number = strtoq(buf, &endptr, 0);
 
-	/* subtract out the processor LUN */
-	num_luns = softc->num_luns - 1;
-	/*
-	 * Default to 7 LUNs active, which was the only number we allowed
-	 * in the past.
-	 */
-	max_active_luns = 7;
-
-	num_partitions = config_GetRsPartitionInfo();
-	config_type = config_GetConfigType();
-	shelf_50pct_power = config_GetShelfPowerMode();
-	aor_present = config_IsAorRsPresent();
-
-	rs_type = ddb_GetRsRaidType(1);
-	if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5)
-	 && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) {
-		EPRINT(0, "Unsupported RS type %d!", rs_type);
-	}
-
-
-	page->total_luns = num_luns;
-
-	switch (config_type) {
-	case 40:
-		/*
-		 * In a 40 drive configuration, it doesn't matter what DC
-		 * cards we have, whether we have AOR enabled or not,
-		 * partitioning or not, or what type of RAIDset we have.
-		 * In that scenario, we can power up every LUN we present
-		 * to the user.
-		 */
-		max_active_luns = num_luns;
-
+	switch (tolower((unsigned char)*endptr)) {
+	case 'e':
+		shift = 60;
 		break;
-	case 64:
-		if (shelf_50pct_power == CS_FALSE) {
-			/* 25% power */
-			if (aor_present == CS_TRUE) {
-				if (rs_type ==
-				     CS_RAIDSET_PERSONALITY_RAID5) {
-					max_active_luns = 7;
-				} else if (rs_type ==
-					 CS_RAIDSET_PERSONALITY_RAID1){
-					max_active_luns = 14;
-				} else {
-					/* XXX KDM now what?? */
-				}
-			} else {
-				if (rs_type ==
-				     CS_RAIDSET_PERSONALITY_RAID5) {
-					max_active_luns = 8;
-				} else if (rs_type ==
-					 CS_RAIDSET_PERSONALITY_RAID1){
-					max_active_luns = 16;
-				} else {
-					/* XXX KDM now what?? */
-				}
-			}
-		} else {
-			/* 50% power */
-			/*
-			 * With 50% power in a 64 drive configuration, we
-			 * can power all LUNs we present.
-			 */
-			max_active_luns = num_luns;
-		}
+	case 'p':
+		shift = 50;
 		break;
-	case 112:
-		if (shelf_50pct_power == CS_FALSE) {
-			/* 25% power */
-			if (aor_present == CS_TRUE) {
-				if (rs_type ==
-				     CS_RAIDSET_PERSONALITY_RAID5) {
-					max_active_luns = 7;
-				} else if (rs_type ==
-					 CS_RAIDSET_PERSONALITY_RAID1){
-					max_active_luns = 14;
-				} else {
-					/* XXX KDM now what?? */
-				}
-			} else {
-				if (rs_type ==
-				     CS_RAIDSET_PERSONALITY_RAID5) {
-					max_active_luns = 8;
-				} else if (rs_type ==
-					 CS_RAIDSET_PERSONALITY_RAID1){
-					max_active_luns = 16;
-				} else {
-					/* XXX KDM now what?? */
-				}
-			}
-		} else {
-			/* 50% power */
-			if (aor_present == CS_TRUE) {
-				if (rs_type ==
-				     CS_RAIDSET_PERSONALITY_RAID5) {
-					max_active_luns = 14;
-				} else if (rs_type ==
-					 CS_RAIDSET_PERSONALITY_RAID1){
-					/*
-					 * We're assuming here that disk
-					 * caching is enabled, and so we're
-					 * able to power up half of each
-					 * LUN, and cache all writes.
-					 */
-					max_active_luns = num_luns;
-				} else {
-					/* XXX KDM now what?? */
-				}
-			} else {
-				if (rs_type ==
-				     CS_RAIDSET_PERSONALITY_RAID5) {
-					max_active_luns = 15;
-				} else if (rs_type ==
-					 CS_RAIDSET_PERSONALITY_RAID1){
-					max_active_luns = 30;
-				} else {
-					/* XXX KDM now what?? */
-				}
-			}
-		}
+	case 't':
+		shift = 40;
 		break;
+	case 'g':
+		shift = 30;
+		break;
+	case 'm':
+		shift = 20;
+		break;
+	case 'k':
+		shift = 10;
+		break;
+	case 'b':
+	case '\0': /* No unit. */
+		*num = number;
+		return (0);
 	default:
-		/*
-		 * In this case, we have an unknown configuration, so we
-		 * just use the default from above.
-		 */
-		break;
+		/* Unrecognized unit. */
+		return (-1);
 	}
 
-	page->max_active_luns = max_active_luns;
-#if 0
-	printk("%s: total_luns = %d, max_active_luns = %d\n", __func__,
-	       page->total_luns, page->max_active_luns);
-#endif
+	if ((number << shift) >> shift != number) {
+		/* Overflow */
+		return (-1);
+	}
+	*num = number << shift;
+	return (0);
 }
-#endif /* NEEDTOPORT */
 
+
 /*
  * This routine could be used in the future to load default and/or saved
  * mode page parameters for a particuar lun.
@@ -3861,34 +3965,55 @@
 static int
 ctl_init_page_index(struct ctl_lun *lun)
 {
-	int i;
+	int i, page_code;
 	struct ctl_page_index *page_index;
-	struct ctl_softc *softc;
+	const char *value;
+	uint64_t ival;
 
 	memcpy(&lun->mode_pages.index, page_index_template,
 	       sizeof(page_index_template));
 
-	softc = lun->ctl_softc;
-
 	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
 
 		page_index = &lun->mode_pages.index[i];
-		/*
-		 * If this is a disk-only mode page, there's no point in
-		 * setting it up.  For some pages, we have to have some
-		 * basic information about the disk in order to calculate the
-		 * mode page data.
-		 */
-		if ((lun->be_lun->lun_type != T_DIRECT)
-		 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
+		if (lun->be_lun->lun_type == T_DIRECT &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
 			continue;
+		if (lun->be_lun->lun_type == T_PROCESSOR &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
+			continue;
+		if (lun->be_lun->lun_type == T_CDROM &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
+			continue;
 
-		switch (page_index->page_code & SMPH_PC_MASK) {
+		page_code = page_index->page_code & SMPH_PC_MASK;
+		switch (page_code) {
+		case SMS_RW_ERROR_RECOVERY_PAGE: {
+			KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
+			    ("subpage %#x for page %#x is incorrect!",
+			    page_index->subpage, page_code));
+			memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT],
+			       &rw_er_page_default,
+			       sizeof(rw_er_page_default));
+			memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE],
+			       &rw_er_page_changeable,
+			       sizeof(rw_er_page_changeable));
+			memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT],
+			       &rw_er_page_default,
+			       sizeof(rw_er_page_default));
+			memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED],
+			       &rw_er_page_default,
+			       sizeof(rw_er_page_default));
+			page_index->page_data =
+				(uint8_t *)lun->mode_pages.rw_er_page;
+			break;
+		}
 		case SMS_FORMAT_DEVICE_PAGE: {
 			struct scsi_format_page *format_page;
 
-			if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
-				panic("subpage is incorrect!");
+			KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
+			    ("subpage %#x for page %#x is incorrect!",
+			    page_index->subpage, page_code));
 
 			/*
 			 * Sectors per track are set above.  Bytes per
@@ -3934,9 +4059,9 @@
 			int shift;
 #endif /* !__XSCALE__ */
 
-			if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
-				panic("invalid subpage value %d",
-				      page_index->subpage);
+			KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
+			    ("subpage %#x for page %#x is incorrect!",
+			    page_index->subpage, page_code));
 
 			/*
 			 * Rotation rate and sectors per track are set
@@ -3950,17 +4075,11 @@
 			 * works out a fake geometry based on the capacity.
 			 */
 			memcpy(&lun->mode_pages.rigid_disk_page[
-			       CTL_PAGE_CURRENT], &rigid_disk_page_default,
+			       CTL_PAGE_DEFAULT], &rigid_disk_page_default,
 			       sizeof(rigid_disk_page_default));
 			memcpy(&lun->mode_pages.rigid_disk_page[
 			       CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
 			       sizeof(rigid_disk_page_changeable));
-			memcpy(&lun->mode_pages.rigid_disk_page[
-			       CTL_PAGE_DEFAULT], &rigid_disk_page_default,
-			       sizeof(rigid_disk_page_default));
-			memcpy(&lun->mode_pages.rigid_disk_page[
-			       CTL_PAGE_SAVED], &rigid_disk_page_default,
-			       sizeof(rigid_disk_page_default));
 
 			sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
 				CTL_DEFAULT_HEADS;
@@ -3997,196 +4116,346 @@
 				cylinders = 0xffffff;
 
 			rigid_disk_page = &lun->mode_pages.rigid_disk_page[
-				CTL_PAGE_CURRENT];
-			scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
-
-			rigid_disk_page = &lun->mode_pages.rigid_disk_page[
 				CTL_PAGE_DEFAULT];
 			scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
 
-			rigid_disk_page = &lun->mode_pages.rigid_disk_page[
-				CTL_PAGE_SAVED];
-			scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
+			if ((value = ctl_get_opt(&lun->be_lun->options,
+			    "rpm")) != NULL) {
+				scsi_ulto2b(strtol(value, NULL, 0),
+				     rigid_disk_page->rotation_rate);
+			}
 
+			memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT],
+			       &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
+			       sizeof(rigid_disk_page_default));
+			memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED],
+			       &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
+			       sizeof(rigid_disk_page_default));
+
 			page_index->page_data =
 				(uint8_t *)lun->mode_pages.rigid_disk_page;
 			break;
 		}
+		case SMS_VERIFY_ERROR_RECOVERY_PAGE: {
+			KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
+			    ("subpage %#x for page %#x is incorrect!",
+			    page_index->subpage, page_code));
+			memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT],
+			       &verify_er_page_default,
+			       sizeof(verify_er_page_default));
+			memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE],
+			       &verify_er_page_changeable,
+			       sizeof(verify_er_page_changeable));
+			memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT],
+			       &verify_er_page_default,
+			       sizeof(verify_er_page_default));
+			memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED],
+			       &verify_er_page_default,
+			       sizeof(verify_er_page_default));
+			page_index->page_data =
+				(uint8_t *)lun->mode_pages.verify_er_page;
+			break;
+		}
 		case SMS_CACHING_PAGE: {
+			struct scsi_caching_page *caching_page;
 
-			if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
-				panic("invalid subpage value %d",
-				      page_index->subpage);
-			/*
-			 * Defaults should be okay here, no calculations
-			 * needed.
-			 */
-			memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
+			KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
+			    ("subpage %#x for page %#x is incorrect!",
+			    page_index->subpage, page_code));
+			memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
 			       &caching_page_default,
 			       sizeof(caching_page_default));
 			memcpy(&lun->mode_pages.caching_page[
 			       CTL_PAGE_CHANGEABLE], &caching_page_changeable,
 			       sizeof(caching_page_changeable));
-			memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
-			       &caching_page_default,
-			       sizeof(caching_page_default));
 			memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
 			       &caching_page_default,
 			       sizeof(caching_page_default));
+			caching_page = &lun->mode_pages.caching_page[
+			    CTL_PAGE_SAVED];
+			value = ctl_get_opt(&lun->be_lun->options, "writecache");
+			if (value != NULL && strcmp(value, "off") == 0)
+				caching_page->flags1 &= ~SCP_WCE;
+			value = ctl_get_opt(&lun->be_lun->options, "readcache");
+			if (value != NULL && strcmp(value, "off") == 0)
+				caching_page->flags1 |= SCP_RCD;
+			memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
+			       &lun->mode_pages.caching_page[CTL_PAGE_SAVED],
+			       sizeof(caching_page_default));
 			page_index->page_data =
 				(uint8_t *)lun->mode_pages.caching_page;
 			break;
 		}
 		case SMS_CONTROL_MODE_PAGE: {
-
-			if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
-				panic("invalid subpage value %d",
-				      page_index->subpage);
-
-			/*
-			 * Defaults should be okay here, no calculations
-			 * needed.
-			 */
-			memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT],
-			       &control_page_default,
-			       sizeof(control_page_default));
-			memcpy(&lun->mode_pages.control_page[
-			       CTL_PAGE_CHANGEABLE], &control_page_changeable,
-			       sizeof(control_page_changeable));
-			memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT],
-			       &control_page_default,
-			       sizeof(control_page_default));
-			memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED],
-			       &control_page_default,
-			       sizeof(control_page_default));
-			page_index->page_data =
-				(uint8_t *)lun->mode_pages.control_page;
-			break;
-
-		}
-		case SMS_VENDOR_SPECIFIC_PAGE:{
 			switch (page_index->subpage) {
-			case PWR_SUBPAGE_CODE: {
-				struct copan_power_subpage *current_page,
-							   *saved_page;
+			case SMS_SUBPAGE_PAGE_0: {
+				struct scsi_control_page *control_page;
 
-				memcpy(&lun->mode_pages.power_subpage[
-				       CTL_PAGE_CURRENT],
-				       &power_page_default,
-				       sizeof(power_page_default));
-				memcpy(&lun->mode_pages.power_subpage[
-				       CTL_PAGE_CHANGEABLE],
-				       &power_page_changeable,
-				       sizeof(power_page_changeable));
-				memcpy(&lun->mode_pages.power_subpage[
-				       CTL_PAGE_DEFAULT],
-				       &power_page_default,
-				       sizeof(power_page_default));
-				memcpy(&lun->mode_pages.power_subpage[
-				       CTL_PAGE_SAVED],
-				       &power_page_default,
-				       sizeof(power_page_default));
+				memcpy(&lun->mode_pages.control_page[
+				    CTL_PAGE_DEFAULT],
+				       &control_page_default,
+				       sizeof(control_page_default));
+				memcpy(&lun->mode_pages.control_page[
+				    CTL_PAGE_CHANGEABLE],
+				       &control_page_changeable,
+				       sizeof(control_page_changeable));
+				memcpy(&lun->mode_pages.control_page[
+				    CTL_PAGE_SAVED],
+				       &control_page_default,
+				       sizeof(control_page_default));
+				control_page = &lun->mode_pages.control_page[
+				    CTL_PAGE_SAVED];
+				value = ctl_get_opt(&lun->be_lun->options,
+				    "reordering");
+				if (value != NULL &&
+				    strcmp(value, "unrestricted") == 0) {
+					control_page->queue_flags &=
+					    ~SCP_QUEUE_ALG_MASK;
+					control_page->queue_flags |=
+					    SCP_QUEUE_ALG_UNRESTRICTED;
+				}
+				memcpy(&lun->mode_pages.control_page[
+				    CTL_PAGE_CURRENT],
+				       &lun->mode_pages.control_page[
+				    CTL_PAGE_SAVED],
+				       sizeof(control_page_default));
 				page_index->page_data =
-				    (uint8_t *)lun->mode_pages.power_subpage;
-
-				current_page = (struct copan_power_subpage *)
-					(page_index->page_data +
-					 (page_index->page_len *
-					  CTL_PAGE_CURRENT));
-			        saved_page = (struct copan_power_subpage *)
-				        (page_index->page_data +
-					 (page_index->page_len *
-					  CTL_PAGE_SAVED));
+				    (uint8_t *)lun->mode_pages.control_page;
 				break;
 			}
-			case APS_SUBPAGE_CODE: {
-				struct copan_aps_subpage *current_page,
-							 *saved_page;
-
-				// This gets set multiple times but
-				// it should always be the same. It's
-				// only done during init so who cares.
-				index_to_aps_page = i;
-
-				memcpy(&lun->mode_pages.aps_subpage[
-				       CTL_PAGE_CURRENT],
-				       &aps_page_default,
-				       sizeof(aps_page_default));
-				memcpy(&lun->mode_pages.aps_subpage[
-				       CTL_PAGE_CHANGEABLE],
-				       &aps_page_changeable,
-				       sizeof(aps_page_changeable));
-				memcpy(&lun->mode_pages.aps_subpage[
-				       CTL_PAGE_DEFAULT],
-				       &aps_page_default,
-				       sizeof(aps_page_default));
-				memcpy(&lun->mode_pages.aps_subpage[
-				       CTL_PAGE_SAVED],
-				       &aps_page_default,
-				       sizeof(aps_page_default));
+			case 0x01:
+				memcpy(&lun->mode_pages.control_ext_page[
+				    CTL_PAGE_DEFAULT],
+				       &control_ext_page_default,
+				       sizeof(control_ext_page_default));
+				memcpy(&lun->mode_pages.control_ext_page[
+				    CTL_PAGE_CHANGEABLE],
+				       &control_ext_page_changeable,
+				       sizeof(control_ext_page_changeable));
+				memcpy(&lun->mode_pages.control_ext_page[
+				    CTL_PAGE_SAVED],
+				       &control_ext_page_default,
+				       sizeof(control_ext_page_default));
+				memcpy(&lun->mode_pages.control_ext_page[
+				    CTL_PAGE_CURRENT],
+				       &lun->mode_pages.control_ext_page[
+				    CTL_PAGE_SAVED],
+				       sizeof(control_ext_page_default));
 				page_index->page_data =
-					(uint8_t *)lun->mode_pages.aps_subpage;
-
-				current_page = (struct copan_aps_subpage *)
-					(page_index->page_data +
-					 (page_index->page_len *
-					  CTL_PAGE_CURRENT));
-				saved_page = (struct copan_aps_subpage *)
-					(page_index->page_data +
-					 (page_index->page_len *
-					  CTL_PAGE_SAVED));
+				    (uint8_t *)lun->mode_pages.control_ext_page;
 				break;
+			default:
+				panic("subpage %#x for page %#x is incorrect!",
+				      page_index->subpage, page_code);
 			}
-			case DBGCNF_SUBPAGE_CODE: {
-				struct copan_debugconf_subpage *current_page,
-							       *saved_page;
+			break;
+		}
+		case SMS_INFO_EXCEPTIONS_PAGE: {
+			switch (page_index->subpage) {
+			case SMS_SUBPAGE_PAGE_0:
+				memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT],
+				       &ie_page_default,
+				       sizeof(ie_page_default));
+				memcpy(&lun->mode_pages.ie_page[
+				       CTL_PAGE_CHANGEABLE], &ie_page_changeable,
+				       sizeof(ie_page_changeable));
+				memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT],
+				       &ie_page_default,
+				       sizeof(ie_page_default));
+				memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED],
+				       &ie_page_default,
+				       sizeof(ie_page_default));
+				page_index->page_data =
+					(uint8_t *)lun->mode_pages.ie_page;
+				break;
+			case 0x02: {
+				struct ctl_logical_block_provisioning_page *page;
 
-				memcpy(&lun->mode_pages.debugconf_subpage[
-				       CTL_PAGE_CURRENT],
-				       &debugconf_page_default,
-				       sizeof(debugconf_page_default));
-				memcpy(&lun->mode_pages.debugconf_subpage[
-				       CTL_PAGE_CHANGEABLE],
-				       &debugconf_page_changeable,
-				       sizeof(debugconf_page_changeable));
-				memcpy(&lun->mode_pages.debugconf_subpage[
-				       CTL_PAGE_DEFAULT],
-				       &debugconf_page_default,
-				       sizeof(debugconf_page_default));
-				memcpy(&lun->mode_pages.debugconf_subpage[
-				       CTL_PAGE_SAVED],
-				       &debugconf_page_default,
-				       sizeof(debugconf_page_default));
+				memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT],
+				       &lbp_page_default,
+				       sizeof(lbp_page_default));
+				memcpy(&lun->mode_pages.lbp_page[
+				       CTL_PAGE_CHANGEABLE], &lbp_page_changeable,
+				       sizeof(lbp_page_changeable));
+				memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
+				       &lbp_page_default,
+				       sizeof(lbp_page_default));
+				page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED];
+				value = ctl_get_opt(&lun->be_lun->options,
+				    "avail-threshold");
+				if (value != NULL &&
+				    ctl_expand_number(value, &ival) == 0) {
+					page->descr[0].flags |= SLBPPD_ENABLED |
+					    SLBPPD_ARMING_DEC;
+					if (lun->be_lun->blocksize)
+						ival /= lun->be_lun->blocksize;
+					else
+						ival /= 512;
+					scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
+					    page->descr[0].count);
+				}
+				value = ctl_get_opt(&lun->be_lun->options,
+				    "used-threshold");
+				if (value != NULL &&
+				    ctl_expand_number(value, &ival) == 0) {
+					page->descr[1].flags |= SLBPPD_ENABLED |
+					    SLBPPD_ARMING_INC;
+					if (lun->be_lun->blocksize)
+						ival /= lun->be_lun->blocksize;
+					else
+						ival /= 512;
+					scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
+					    page->descr[1].count);
+				}
+				value = ctl_get_opt(&lun->be_lun->options,
+				    "pool-avail-threshold");
+				if (value != NULL &&
+				    ctl_expand_number(value, &ival) == 0) {
+					page->descr[2].flags |= SLBPPD_ENABLED |
+					    SLBPPD_ARMING_DEC;
+					if (lun->be_lun->blocksize)
+						ival /= lun->be_lun->blocksize;
+					else
+						ival /= 512;
+					scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
+					    page->descr[2].count);
+				}
+				value = ctl_get_opt(&lun->be_lun->options,
+				    "pool-used-threshold");
+				if (value != NULL &&
+				    ctl_expand_number(value, &ival) == 0) {
+					page->descr[3].flags |= SLBPPD_ENABLED |
+					    SLBPPD_ARMING_INC;
+					if (lun->be_lun->blocksize)
+						ival /= lun->be_lun->blocksize;
+					else
+						ival /= 512;
+					scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
+					    page->descr[3].count);
+				}
+				memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT],
+				       &lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
+				       sizeof(lbp_page_default));
 				page_index->page_data =
-					(uint8_t *)lun->mode_pages.debugconf_subpage;
-
-				current_page = (struct copan_debugconf_subpage *)
-					(page_index->page_data +
-					 (page_index->page_len *
-					  CTL_PAGE_CURRENT));
-				saved_page = (struct copan_debugconf_subpage *)
-					(page_index->page_data +
-					 (page_index->page_len *
-					  CTL_PAGE_SAVED));
+					(uint8_t *)lun->mode_pages.lbp_page;
 				break;
 			}
 			default:
-				panic("invalid subpage value %d",
-				      page_index->subpage);
-				break;
+				panic("subpage %#x for page %#x is incorrect!",
+				      page_index->subpage, page_code);
 			}
-   			break;
+			break;
 		}
+		case SMS_CDDVD_CAPS_PAGE:{
+			KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
+			    ("subpage %#x for page %#x is incorrect!",
+			    page_index->subpage, page_code));
+			memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT],
+			       &cddvd_page_default,
+			       sizeof(cddvd_page_default));
+			memcpy(&lun->mode_pages.cddvd_page[
+			       CTL_PAGE_CHANGEABLE], &cddvd_page_changeable,
+			       sizeof(cddvd_page_changeable));
+			memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED],
+			       &cddvd_page_default,
+			       sizeof(cddvd_page_default));
+			memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT],
+			       &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED],
+			       sizeof(cddvd_page_default));
+			page_index->page_data =
+				(uint8_t *)lun->mode_pages.cddvd_page;
+			break;
+		}
 		default:
-			panic("invalid page value %d",
-			      page_index->page_code & SMPH_PC_MASK);
-			break;
-    	}
+			panic("invalid page code value %#x", page_code);
+		}
 	}
 
 	return (CTL_RETVAL_COMPLETE);
 }
 
+static int
+ctl_init_log_page_index(struct ctl_lun *lun)
+{
+	struct ctl_page_index *page_index;
+	int i, j, k, prev;
+
+	memcpy(&lun->log_pages.index, log_page_index_template,
+	       sizeof(log_page_index_template));
+
+	prev = -1;
+	for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) {
+
+		page_index = &lun->log_pages.index[i];
+		if (lun->be_lun->lun_type == T_DIRECT &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
+			continue;
+		if (lun->be_lun->lun_type == T_PROCESSOR &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
+			continue;
+		if (lun->be_lun->lun_type == T_CDROM &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
+			continue;
+
+		if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING &&
+		    lun->backend->lun_attr == NULL)
+			continue;
+
+		if (page_index->page_code != prev) {
+			lun->log_pages.pages_page[j] = page_index->page_code;
+			prev = page_index->page_code;
+			j++;
+		}
+		lun->log_pages.subpages_page[k*2] = page_index->page_code;
+		lun->log_pages.subpages_page[k*2+1] = page_index->subpage;
+		k++;
+	}
+	lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0];
+	lun->log_pages.index[0].page_len = j;
+	lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0];
+	lun->log_pages.index[1].page_len = k * 2;
+	lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0];
+	lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS;
+	lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page;
+	lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page);
+	lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page;
+	lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page);
+
+	return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+hex2bin(const char *str, uint8_t *buf, int buf_size)
+{
+	int i;
+	u_char c;
+
+	memset(buf, 0, buf_size);
+	while (isspace(str[0]))
+		str++;
+	if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X'))
+		str += 2;
+	buf_size *= 2;
+	for (i = 0; str[i] != 0 && i < buf_size; i++) {
+		while (str[i] == '-')	/* Skip dashes in UUIDs. */
+			str++;
+		c = str[i];
+		if (isdigit(c))
+			c -= '0';
+		else if (isalpha(c))
+			c -= isupper(c) ? 'A' - 10 : 'a' - 10;
+		else
+			break;
+		if (c >= 16)
+			break;
+		if ((i & 1) == 0)
+			buf[i / 2] |= (c << 4);
+		else
+			buf[i / 2] |= c;
+	}
+	return ((i + 1) / 2);
+}
+
 /*
  * LUN allocation.
  *
@@ -4200,11 +4469,14 @@
  */
 static int
 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
-	      struct ctl_be_lun *const be_lun, struct ctl_id target_id)
+	      struct ctl_be_lun *const be_lun)
 {
 	struct ctl_lun *nlun, *lun;
-	struct ctl_frontend *fe;
-	int lun_number, i;
+	struct scsi_vpd_id_descriptor *desc;
+	struct scsi_vpd_id_t10 *t10id;
+	const char *eui, *naa, *scsiname, *uuid, *vendor, *value;
+	int lun_number, lun_malloced;
+	int devidlen, idlen1, idlen2 = 0, len;
 
 	if (be_lun == NULL)
 		return (EINVAL);
@@ -4214,8 +4486,8 @@
 	 */
 	switch (be_lun->lun_type) {
 	case T_DIRECT:
-		break;
 	case T_PROCESSOR:
+	case T_CDROM:
 		break;
 	case T_SEQUENTIAL:
 	case T_CHANGER:
@@ -4226,12 +4498,96 @@
 	}
 	if (ctl_lun == NULL) {
 		lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
-		lun->flags = CTL_LUN_MALLOCED;
-	} else
+		lun_malloced = 1;
+	} else {
+		lun_malloced = 0;
 		lun = ctl_lun;
+	}
 
 	memset(lun, 0, sizeof(*lun));
+	if (lun_malloced)
+		lun->flags = CTL_LUN_MALLOCED;
 
+	/* Generate LUN ID. */
+	devidlen = max(CTL_DEVID_MIN_LEN,
+	    strnlen(be_lun->device_id, CTL_DEVID_LEN));
+	idlen1 = sizeof(*t10id) + devidlen;
+	len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
+	scsiname = ctl_get_opt(&be_lun->options, "scsiname");
+	if (scsiname != NULL) {
+		idlen2 = roundup2(strlen(scsiname) + 1, 4);
+		len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
+	}
+	eui = ctl_get_opt(&be_lun->options, "eui");
+	if (eui != NULL) {
+		len += sizeof(struct scsi_vpd_id_descriptor) + 16;
+	}
+	naa = ctl_get_opt(&be_lun->options, "naa");
+	if (naa != NULL) {
+		len += sizeof(struct scsi_vpd_id_descriptor) + 16;
+	}
+	uuid = ctl_get_opt(&be_lun->options, "uuid");
+	if (uuid != NULL) {
+		len += sizeof(struct scsi_vpd_id_descriptor) + 18;
+	}
+	lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
+	    M_CTL, M_WAITOK | M_ZERO);
+	desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
+	desc->proto_codeset = SVPD_ID_CODESET_ASCII;
+	desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
+	desc->length = idlen1;
+	t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
+	memset(t10id->vendor, ' ', sizeof(t10id->vendor));
+	if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) {
+		strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
+	} else {
+		strncpy(t10id->vendor, vendor,
+		    min(sizeof(t10id->vendor), strlen(vendor)));
+	}
+	strncpy((char *)t10id->vendor_spec_id,
+	    (char *)be_lun->device_id, devidlen);
+	if (scsiname != NULL) {
+		desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+		    desc->length);
+		desc->proto_codeset = SVPD_ID_CODESET_UTF8;
+		desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+		    SVPD_ID_TYPE_SCSI_NAME;
+		desc->length = idlen2;
+		strlcpy(desc->identifier, scsiname, idlen2);
+	}
+	if (eui != NULL) {
+		desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+		    desc->length);
+		desc->proto_codeset = SVPD_ID_CODESET_BINARY;
+		desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+		    SVPD_ID_TYPE_EUI64;
+		desc->length = hex2bin(eui, desc->identifier, 16);
+		desc->length = desc->length > 12 ? 16 :
+		    (desc->length > 8 ? 12 : 8);
+		len -= 16 - desc->length;
+	}
+	if (naa != NULL) {
+		desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+		    desc->length);
+		desc->proto_codeset = SVPD_ID_CODESET_BINARY;
+		desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+		    SVPD_ID_TYPE_NAA;
+		desc->length = hex2bin(naa, desc->identifier, 16);
+		desc->length = desc->length > 8 ? 16 : 8;
+		len -= 16 - desc->length;
+	}
+	if (uuid != NULL) {
+		desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+		    desc->length);
+		desc->proto_codeset = SVPD_ID_CODESET_BINARY;
+		desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+		    SVPD_ID_TYPE_UUID;
+		desc->identifier[0] = 0x10;
+		hex2bin(uuid, &desc->identifier[2], 16);
+		desc->length = 18;
+	}
+	lun->lun_devid->len = len;
+
 	mtx_lock(&ctl_softc->ctl_lock);
 	/*
 	 * See if the caller requested a particular LUN number.  If so, see
@@ -4253,6 +4609,8 @@
 				printf("ctl: requested LUN ID %d is already "
 				       "in use\n", be_lun->req_lun_id);
 			}
+fail:
+			free(lun->lun_devid, M_CTL);
 			if (lun->flags & CTL_LUN_MALLOCED)
 				free(lun, M_CTL);
 			be_lun->lun_config_status(be_lun->be_lun,
@@ -4261,21 +4619,17 @@
 		}
 		lun_number = be_lun->req_lun_id;
 	} else {
-		lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
+		lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS);
 		if (lun_number == -1) {
 			mtx_unlock(&ctl_softc->ctl_lock);
-			printf("ctl: can't allocate LUN on target %ju, out of "
-			       "LUNs\n", (uintmax_t)target_id.id);
-			if (lun->flags & CTL_LUN_MALLOCED)
-				free(lun, M_CTL);
-			be_lun->lun_config_status(be_lun->be_lun,
-						  CTL_LUN_CONFIG_FAILURE);
-			return (ENOSPC);
+			printf("ctl: can't allocate LUN, out of LUNs\n");
+			goto fail;
 		}
 	}
 	ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
+	mtx_unlock(&ctl_softc->ctl_lock);
 
-	lun->target = target_id;
+	mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
 	lun->lun = lun_number;
 	lun->be_lun = be_lun;
 	/*
@@ -4282,101 +4636,77 @@
 	 * The processor LUN is always enabled.  Disk LUNs come on line
 	 * disabled, and must be enabled by the backend.
 	 */
-	lun->flags = CTL_LUN_DISABLED;
+	lun->flags |= CTL_LUN_DISABLED;
 	lun->backend = be_lun->be;
 	be_lun->ctl_lun = lun;
 	be_lun->lun_id = lun_number;
 	atomic_add_int(&be_lun->be->num_luns, 1);
-	if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF)
+	if (be_lun->flags & CTL_LUN_FLAG_EJECTED)
+		lun->flags |= CTL_LUN_EJECTED;
+	if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA)
+		lun->flags |= CTL_LUN_NO_MEDIA;
+	if (be_lun->flags & CTL_LUN_FLAG_STOPPED)
 		lun->flags |= CTL_LUN_STOPPED;
 
-	if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE)
-		lun->flags |= CTL_LUN_INOPERABLE;
-
 	if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
 		lun->flags |= CTL_LUN_PRIMARY_SC;
 
+	value = ctl_get_opt(&be_lun->options, "removable");
+	if (value != NULL) {
+		if (strcmp(value, "on") == 0)
+			lun->flags |= CTL_LUN_REMOVABLE;
+	} else if (be_lun->lun_type == T_CDROM)
+		lun->flags |= CTL_LUN_REMOVABLE;
+
 	lun->ctl_softc = ctl_softc;
+#ifdef CTL_TIME_IO
+	lun->last_busy = getsbinuptime();
+#endif
 	TAILQ_INIT(&lun->ooa_queue);
 	TAILQ_INIT(&lun->blocked_queue);
 	STAILQ_INIT(&lun->error_list);
+	lun->ie_reported = 1;
+	callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0);
+	ctl_tpc_lun_init(lun);
+	if (lun->flags & CTL_LUN_REMOVABLE) {
+		lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4,
+		    M_CTL, M_WAITOK);
+	}
 
 	/*
-	 * Initialize the mode page index.
+	 * Initialize the mode and log page index.
 	 */
 	ctl_init_page_index(lun);
+	ctl_init_log_page_index(lun);
 
-	/*
-	 * Set the poweron UA for all initiators on this LUN only.
-	 */
-	for (i = 0; i < CTL_MAX_INITIATORS; i++)
-		lun->pending_sense[i].ua_pending = CTL_UA_POWERON;
+	/* Setup statistics gathering */
+#ifdef CTL_LEGACY_STATS
+	lun->legacy_stats.device_type = be_lun->lun_type;
+	lun->legacy_stats.lun_number = lun_number;
+	lun->legacy_stats.blocksize = be_lun->blocksize;
+	if (be_lun->blocksize == 0)
+		lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
+	for (len = 0; len < CTL_MAX_PORTS; len++)
+		lun->legacy_stats.ports[len].targ_port = len;
+#endif /* CTL_LEGACY_STATS */
+	lun->stats.item = lun_number;
 
 	/*
 	 * Now, before we insert this lun on the lun list, set the lun
 	 * inventory changed UA for all other luns.
 	 */
+	mtx_lock(&ctl_softc->ctl_lock);
 	STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
-		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-			nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
-		}
+		mtx_lock(&nlun->lun_lock);
+		ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
+		mtx_unlock(&nlun->lun_lock);
 	}
-
 	STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
-
 	ctl_softc->ctl_luns[lun_number] = lun;
-
 	ctl_softc->num_luns++;
-
-	/* Setup statistics gathering */
-	lun->stats.device_type = be_lun->lun_type;
-	lun->stats.lun_number = lun_number;
-	if (lun->stats.device_type == T_DIRECT)
-		lun->stats.blocksize = be_lun->blocksize;
-	else
-		lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
-	for (i = 0;i < CTL_MAX_PORTS;i++)
-		lun->stats.ports[i].targ_port = i;
-
 	mtx_unlock(&ctl_softc->ctl_lock);
 
 	lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
-
-	/*
-	 * Run through each registered FETD and bring it online if it isn't
-	 * already.  Enable the target ID if it hasn't been enabled, and
-	 * enable this particular LUN.
-	 */
-	STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
-		int retval;
-
-		/*
-		 * XXX KDM this only works for ONE TARGET ID.  We'll need
-		 * to do things differently if we go to a multiple target
-		 * ID scheme.
-		 */
-		if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) {
-
-			retval = fe->targ_enable(fe->targ_lun_arg, target_id);
-			if (retval != 0) {
-				printf("ctl_alloc_lun: FETD %s port %d "
-				       "returned error %d for targ_enable on "
-				       "target %ju\n", fe->port_name,
-				       fe->targ_port, retval,
-				       (uintmax_t)target_id.id);
-			} else
-				fe->status |= CTL_PORT_STATUS_TARG_ONLINE;
-		}
-
-		retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number);
-		if (retval != 0) {
-			printf("ctl_alloc_lun: FETD %s port %d returned error "
-			       "%d for lun_enable on target %ju lun %d\n",
-			       fe->port_name, fe->targ_port, retval,
-			       (uintmax_t)target_id.id, lun_number);
-		} else
-			fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
-	}
 	return (0);
 }
 
@@ -4383,7 +4713,6 @@
 /*
  * Delete a LUN.
  * Assumptions:
- * - caller holds ctl_softc->ctl_lock.
  * - LUN has already been marked invalid and any pending I/O has been taken
  *   care of.
  */
@@ -4390,15 +4719,11 @@
 static int
 ctl_free_lun(struct ctl_lun *lun)
 {
-	struct ctl_softc *softc;
-#if 0
-	struct ctl_frontend *fe;
-#endif
+	struct ctl_softc *softc = lun->ctl_softc;
 	struct ctl_lun *nlun;
-	union ctl_io *io, *next_io;
 	int i;
 
-	softc = lun->ctl_softc;
+	mtx_assert(&softc->ctl_lock, MA_OWNED);
 
 	STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
 
@@ -4406,112 +4731,36 @@
 
 	softc->ctl_luns[lun->lun] = NULL;
 
-	if (TAILQ_FIRST(&lun->ooa_queue) != NULL) {
-		printf("ctl_free_lun: aieee!! freeing a LUN with "
-		       "outstanding I/O!!\n");
-	}
+	if (!TAILQ_EMPTY(&lun->ooa_queue))
+		panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
 
-	/*
-	 * If we have anything pending on the RtR queue, remove it.
-	 */
-	for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL;
-	     io = next_io) {
-		next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
-		if ((io->io_hdr.nexus.targ_target.id == lun->target.id)
-		 && (io->io_hdr.nexus.targ_lun == lun->lun))
-			STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
-				      ctl_io_hdr, links);
-	}
-
-	/*
-	 * Then remove everything from the blocked queue.
-	 */
-	for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL;
-	     io = next_io) {
-		next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links);
-		TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links);
-		io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
-	}
-
-	/*
-	 * Now clear out the OOA queue, and free all the I/O.
-	 * XXX KDM should we notify the FETD here?  We probably need to
-	 * quiesce the LUN before deleting it.
-	 */
-	for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL;
-	     io = next_io) {
-		next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
-		TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
-		ctl_free_io_internal(io, /*have_lock*/ 1);
-	}
-
 	softc->num_luns--;
 
 	/*
-	 * XXX KDM this scheme only works for a single target/multiple LUN
-	 * setup.  It needs to be revamped for a multiple target scheme.
-	 *
-	 * XXX KDM this results in fe->lun_disable() getting called twice,
-	 * once when ctl_disable_lun() is called, and a second time here.
-	 * We really need to re-think the LUN disable semantics.  There
-	 * should probably be several steps/levels to LUN removal:
-	 *  - disable
-	 *  - invalidate
-	 *  - free
- 	 *
-	 * Right now we only have a disable method when communicating to
-	 * the front end ports, at least for individual LUNs.
-	 */
-#if 0
-	STAILQ_FOREACH(fe, &softc->fe_list, links) {
-		int retval;
-
-		retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
-					 lun->lun);
-		if (retval != 0) {
-			printf("ctl_free_lun: FETD %s port %d returned error "
-			       "%d for lun_disable on target %ju lun %jd\n",
-			       fe->port_name, fe->targ_port, retval,
-			       (uintmax_t)lun->target.id, (intmax_t)lun->lun);
-		}
-
-		if (STAILQ_FIRST(&softc->lun_list) == NULL) {
-			fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
-
-			retval = fe->targ_disable(fe->targ_lun_arg,lun->target);
-			if (retval != 0) {
-				printf("ctl_free_lun: FETD %s port %d "
-				       "returned error %d for targ_disable on "
-				       "target %ju\n", fe->port_name,
-				       fe->targ_port, retval,
-				       (uintmax_t)lun->target.id);
-			} else
-				fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
-
-			if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
-				continue;
-
-#if 0
-			fe->port_offline(fe->onoff_arg);
-			fe->status &= ~CTL_PORT_STATUS_ONLINE;
-#endif
-		}
-	}
-#endif
-
-	/*
 	 * Tell the backend to free resources, if this LUN has a backend.
 	 */
 	atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
 	lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
 
+	lun->ie_reportcnt = UINT32_MAX;
+	callout_drain(&lun->ie_callout);
+
+	ctl_tpc_lun_shutdown(lun);
+	mtx_destroy(&lun->lun_lock);
+	free(lun->lun_devid, M_CTL);
+	for (i = 0; i < CTL_MAX_PORTS; i++)
+		free(lun->pending_ua[i], M_CTL);
+	for (i = 0; i < CTL_MAX_PORTS; i++)
+		free(lun->pr_keys[i], M_CTL);
+	free(lun->write_buffer, M_CTL);
+	free(lun->prevent, M_CTL);
 	if (lun->flags & CTL_LUN_MALLOCED)
 		free(lun, M_CTL);
 
 	STAILQ_FOREACH(nlun, &softc->lun_list, links) {
-		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-			nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
-		}
+		mtx_lock(&nlun->lun_lock);
+		ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
+		mtx_unlock(&nlun->lun_lock);
 	}
 
 	return (0);
@@ -4520,29 +4769,23 @@
 static void
 ctl_create_lun(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
 
-	ctl_softc = control_softc;
-
 	/*
 	 * ctl_alloc_lun() should handle all potential failure cases.
 	 */
-	ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target);
+	ctl_alloc_lun(control_softc, NULL, be_lun);
 }
 
 int
 ctl_add_lun(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
+	struct ctl_softc *softc = control_softc;
 
-	ctl_softc = control_softc;
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links);
+	mtx_unlock(&softc->ctl_lock);
+	wakeup(&softc->pending_lun_queue);
 
-	mtx_lock(&ctl_softc->ctl_lock);
-	STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links);
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	ctl_wakeup_thread();
-
 	return (0);
 }
 
@@ -4549,28 +4792,32 @@
 int
 ctl_enable_lun(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
-	struct ctl_frontend *fe, *nfe;
+	struct ctl_softc *softc;
+	struct ctl_port *port, *nport;
 	struct ctl_lun *lun;
 	int retval;
 
-	ctl_softc = control_softc;
-
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
+	softc = lun->ctl_softc;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	if ((lun->flags & CTL_LUN_DISABLED) == 0) {
 		/*
 		 * eh?  Why did we get called if the LUN is already
 		 * enabled?
 		 */
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 		return (0);
 	}
 	lun->flags &= ~CTL_LUN_DISABLED;
+	mtx_unlock(&lun->lun_lock);
 
-	for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) {
-		nfe = STAILQ_NEXT(fe, links);
+	STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) {
+		if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
+		    port->lun_map != NULL || port->lun_enable == NULL)
+			continue;
 
 		/*
 		 * Drop the lock while we call the FETD's enable routine.
@@ -4577,24 +4824,19 @@
 		 * This can lead to a callback into CTL (at least in the
 		 * case of the internal initiator frontend.
 		 */
-		mtx_unlock(&ctl_softc->ctl_lock);
-		retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun);
-		mtx_lock(&ctl_softc->ctl_lock);
+		mtx_unlock(&softc->ctl_lock);
+		retval = port->lun_enable(port->targ_lun_arg, lun->lun);
+		mtx_lock(&softc->ctl_lock);
 		if (retval != 0) {
 			printf("%s: FETD %s port %d returned error "
-			       "%d for lun_enable on target %ju lun %jd\n",
-			       __func__, fe->port_name, fe->targ_port, retval,
-			       (uintmax_t)lun->target.id, (intmax_t)lun->lun);
+			       "%d for lun_enable on lun %jd\n",
+			       __func__, port->port_name, port->targ_port,
+			       retval, (intmax_t)lun->lun);
 		}
-#if 0
-		 else {
-            /* NOTE:  TODO:  why does lun enable affect port status? */
-			fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
-		}
-#endif
 	}
 
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&softc->ctl_lock);
+	ctl_isc_announce_lun(lun);
 
 	return (0);
 }
@@ -4602,25 +4844,29 @@
 int
 ctl_disable_lun(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
-	struct ctl_frontend *fe;
+	struct ctl_softc *softc;
+	struct ctl_port *port;
 	struct ctl_lun *lun;
 	int retval;
 
-	ctl_softc = control_softc;
-
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
+	softc = lun->ctl_softc;
 
-	mtx_lock(&ctl_softc->ctl_lock);
-
+	mtx_lock(&softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	if (lun->flags & CTL_LUN_DISABLED) {
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 		return (0);
 	}
 	lun->flags |= CTL_LUN_DISABLED;
+	mtx_unlock(&lun->lun_lock);
 
-	STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
-		mtx_unlock(&ctl_softc->ctl_lock);
+	STAILQ_FOREACH(port, &softc->port_list, links) {
+		if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
+		    port->lun_map != NULL || port->lun_disable == NULL)
+			continue;
+
 		/*
 		 * Drop the lock before we call the frontend's disable
 		 * routine, to avoid lock order reversals.
@@ -4628,18 +4874,19 @@
 		 * XXX KDM what happens if the frontend list changes while
 		 * we're traversing it?  It's unlikely, but should be handled.
 		 */
-		retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
-					 lun->lun);
-		mtx_lock(&ctl_softc->ctl_lock);
+		mtx_unlock(&softc->ctl_lock);
+		retval = port->lun_disable(port->targ_lun_arg, lun->lun);
+		mtx_lock(&softc->ctl_lock);
 		if (retval != 0) {
-			printf("ctl_alloc_lun: FETD %s port %d returned error "
-			       "%d for lun_disable on target %ju lun %jd\n",
-			       fe->port_name, fe->targ_port, retval,
-			       (uintmax_t)lun->target.id, (intmax_t)lun->lun);
+			printf("%s: FETD %s port %d returned error "
+			       "%d for lun_disable on lun %jd\n",
+			       __func__, port->port_name, port->targ_port,
+			       retval, (intmax_t)lun->lun);
 		}
 	}
 
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&softc->ctl_lock);
+	ctl_isc_announce_lun(lun);
 
 	return (0);
 }
@@ -4647,17 +4894,11 @@
 int
 ctl_start_lun(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
+	struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	ctl_softc = control_softc;
-
-	lun = (struct ctl_lun *)be_lun->ctl_lun;
-
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	lun->flags &= ~CTL_LUN_STOPPED;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
+	mtx_unlock(&lun->lun_lock);
 	return (0);
 }
 
@@ -4664,51 +4905,87 @@
 int
 ctl_stop_lun(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
+	struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	ctl_softc = control_softc;
+	mtx_lock(&lun->lun_lock);
+	lun->flags |= CTL_LUN_STOPPED;
+	mtx_unlock(&lun->lun_lock);
+	return (0);
+}
 
-	lun = (struct ctl_lun *)be_lun->ctl_lun;
+int
+ctl_lun_no_media(struct ctl_be_lun *be_lun)
+{
+	struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
-	lun->flags |= CTL_LUN_STOPPED;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
+	mtx_lock(&lun->lun_lock);
+	lun->flags |= CTL_LUN_NO_MEDIA;
+	mtx_unlock(&lun->lun_lock);
 	return (0);
 }
 
 int
-ctl_lun_offline(struct ctl_be_lun *be_lun)
+ctl_lun_has_media(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
+	struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
+	union ctl_ha_msg msg;
 
-	ctl_softc = control_softc;
+	mtx_lock(&lun->lun_lock);
+	lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED);
+	if (lun->flags & CTL_LUN_REMOVABLE)
+		ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE);
+	mtx_unlock(&lun->lun_lock);
+	if ((lun->flags & CTL_LUN_REMOVABLE) &&
+	    lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+		bzero(&msg.ua, sizeof(msg.ua));
+		msg.hdr.msg_type = CTL_MSG_UA;
+		msg.hdr.nexus.initid = -1;
+		msg.hdr.nexus.targ_port = -1;
+		msg.hdr.nexus.targ_lun = lun->lun;
+		msg.hdr.nexus.targ_mapped_lun = lun->lun;
+		msg.ua.ua_all = 1;
+		msg.ua.ua_set = 1;
+		msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE;
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
+		    M_WAITOK);
+	}
+	return (0);
+}
 
-	lun = (struct ctl_lun *)be_lun->ctl_lun;
+int
+ctl_lun_ejected(struct ctl_be_lun *be_lun)
+{
+	struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
-	lun->flags |= CTL_LUN_OFFLINE;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
+	mtx_lock(&lun->lun_lock);
+	lun->flags |= CTL_LUN_EJECTED;
+	mtx_unlock(&lun->lun_lock);
 	return (0);
 }
 
 int
-ctl_lun_online(struct ctl_be_lun *be_lun)
+ctl_lun_primary(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
+	struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	ctl_softc = control_softc;
+	mtx_lock(&lun->lun_lock);
+	lun->flags |= CTL_LUN_PRIMARY_SC;
+	ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
+	mtx_unlock(&lun->lun_lock);
+	ctl_isc_announce_lun(lun);
+	return (0);
+}
 
-	lun = (struct ctl_lun *)be_lun->ctl_lun;
+int
+ctl_lun_secondary(struct ctl_be_lun *be_lun)
+{
+	struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
-	lun->flags &= ~CTL_LUN_OFFLINE;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
+	mtx_lock(&lun->lun_lock);
+	lun->flags &= ~CTL_LUN_PRIMARY_SC;
+	ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
+	mtx_unlock(&lun->lun_lock);
+	ctl_isc_announce_lun(lun);
 	return (0);
 }
 
@@ -4715,20 +4992,19 @@
 int
 ctl_invalidate_lun(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
+	struct ctl_softc *softc;
 	struct ctl_lun *lun;
 
-	ctl_softc = control_softc;
-
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
+	softc = lun->ctl_softc;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 
 	/*
 	 * The LUN needs to be disabled before it can be marked invalid.
 	 */
 	if ((lun->flags & CTL_LUN_DISABLED) == 0) {
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		return (-1);
 	}
 	/*
@@ -4741,126 +5017,14 @@
 	 * If we have something in the OOA queue, we'll free it when the
 	 * last I/O completes.
 	 */
-	if (TAILQ_FIRST(&lun->ooa_queue) == NULL)
+	if (TAILQ_EMPTY(&lun->ooa_queue)) {
+		mtx_unlock(&lun->lun_lock);
+		mtx_lock(&softc->ctl_lock);
 		ctl_free_lun(lun);
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	return (0);
-}
-
-int
-ctl_lun_inoperable(struct ctl_be_lun *be_lun)
-{
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
-
-	ctl_softc = control_softc;
-	lun = (struct ctl_lun *)be_lun->ctl_lun;
-
-	mtx_lock(&ctl_softc->ctl_lock);
-	lun->flags |= CTL_LUN_INOPERABLE;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	return (0);
-}
-
-int
-ctl_lun_operable(struct ctl_be_lun *be_lun)
-{
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
-
-	ctl_softc = control_softc;
-	lun = (struct ctl_lun *)be_lun->ctl_lun;
-
-	mtx_lock(&ctl_softc->ctl_lock);
-	lun->flags &= ~CTL_LUN_INOPERABLE;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	return (0);
-}
-
-int
-ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
-		   int lock)
-{
-	struct ctl_softc *softc;
-	struct ctl_lun *lun;
-	struct copan_aps_subpage *current_sp;
-	struct ctl_page_index *page_index;
-	int i;
-
-	softc = control_softc;
-
-	mtx_lock(&softc->ctl_lock);
-
-	lun = (struct ctl_lun *)be_lun->ctl_lun;
-
-	page_index = NULL;
-	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
-		if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
-		     APS_PAGE_CODE)
-			continue;
-
-		if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE)
-			continue;
-		page_index = &lun->mode_pages.index[i];
-	}
-
-	if (page_index == NULL) {
 		mtx_unlock(&softc->ctl_lock);
-		printf("%s: APS subpage not found for lun %ju!\n", __func__,
-		       (uintmax_t)lun->lun);
-		return (1);
-	}
-#if 0
-	if ((softc->aps_locked_lun != 0)
-	 && (softc->aps_locked_lun != lun->lun)) {
-		printf("%s: attempt to lock LUN %llu when %llu is already "
-		       "locked\n");
-		mtx_unlock(&softc->ctl_lock);
-		return (1);
-	}
-#endif
+	} else
+		mtx_unlock(&lun->lun_lock);
 
-	current_sp = (struct copan_aps_subpage *)(page_index->page_data +
-		(page_index->page_len * CTL_PAGE_CURRENT));
-
-	if (lock != 0) {
-		current_sp->lock_active = APS_LOCK_ACTIVE;
-		softc->aps_locked_lun = lun->lun;
-	} else {
-		current_sp->lock_active = 0;
-		softc->aps_locked_lun = 0;
-	}
-
-
-	/*
-	 * If we're in HA mode, try to send the lock message to the other
-	 * side.
-	 */
-	if (ctl_is_single == 0) {
-		int isc_retval;
-		union ctl_ha_msg lock_msg;
-
-		lock_msg.hdr.nexus = *nexus;
-		lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK;
-		if (lock != 0)
-			lock_msg.aps.lock_flag = 1;
-		else
-			lock_msg.aps.lock_flag = 0;
-		isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg,
-					 sizeof(lock_msg), 0);
-		if (isc_retval > CTL_HA_STATUS_SUCCESS) {
-			printf("%s: APS (lock=%d) error returned from "
-			       "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
-			mtx_unlock(&softc->ctl_lock);
-			return (1);
-		}
-	}
-
-	mtx_unlock(&softc->ctl_lock);
-
 	return (0);
 }
 
@@ -4867,20 +5031,26 @@
 void
 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
 {
-	struct ctl_lun *lun;
-	struct ctl_softc *softc;
-	int i;
+	struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
+	union ctl_ha_msg msg;
 
-	softc = control_softc;
-
-	mtx_lock(&softc->ctl_lock);
-
-	lun = (struct ctl_lun *)be_lun->ctl_lun;
-
-	for (i = 0; i < CTL_MAX_INITIATORS; i++) 
-		lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED;
-
-	mtx_unlock(&softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
+	ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE);
+	mtx_unlock(&lun->lun_lock);
+	if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+		/* Send msg to other side. */
+		bzero(&msg.ua, sizeof(msg.ua));
+		msg.hdr.msg_type = CTL_MSG_UA;
+		msg.hdr.nexus.initid = -1;
+		msg.hdr.nexus.targ_port = -1;
+		msg.hdr.nexus.targ_lun = lun->lun;
+		msg.hdr.nexus.targ_mapped_lun = lun->lun;
+		msg.ua.ua_all = 1;
+		msg.ua.ua_set = 1;
+		msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE;
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
+		    M_WAITOK);
+	}
 }
 
 /*
@@ -4892,51 +5062,37 @@
 {
 	int retval;
 
-	retval = CTL_RETVAL_COMPLETE;
-
-
 	CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
-	/*
-	 * XXX KDM this shouldn't happen, but what if it does?
-	 */
-	if (io->io_hdr.io_type != CTL_IO_SCSI)
-		panic("I/O type isn't CTL_IO_SCSI!");
+	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
+	    ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type));
 
-	if ((io->io_hdr.port_status == 0)
-	 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
-	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
-		io->io_hdr.status = CTL_SUCCESS;
-	else if ((io->io_hdr.port_status != 0)
-	      && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
-	      && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
-		/*
-		 * For hardware error sense keys, the sense key
-		 * specific value is defined to be a retry count,
-		 * but we use it to pass back an internal FETD
-		 * error code.  XXX KDM  Hopefully the FETD is only
-		 * using 16 bits for an error code, since that's
-		 * all the space we have in the sks field.
-		 */
-		ctl_set_internal_failure(&io->scsiio,
-					 /*sks_valid*/ 1,
-					 /*retry_count*/
-					 io->io_hdr.port_status);
-		free(io->scsiio.kern_data_ptr, M_CTL);
-		ctl_done(io);
-		goto bailout;
+	if ((io->io_hdr.port_status != 0) &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+		ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+		    /*retry_count*/ io->io_hdr.port_status);
+	} else if (io->scsiio.kern_data_resid != 0 &&
+	    (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+		ctl_set_invalid_field_ciu(&io->scsiio);
 	}
 
-	if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
-	 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
-	 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
+	if (ctl_debug & CTL_DEBUG_CDB_DATA)
+		ctl_data_print(io);
+	if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ||
+	    ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
+	     (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ||
+	    ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
 		/*
 		 * XXX KDM just assuming a single pointer here, and not a
 		 * S/G list.  If we start using S/G lists for config data,
 		 * we'll need to know how to clean them up here as well.
 		 */
-		free(io->scsiio.kern_data_ptr, M_CTL);
-		/* Hopefully the user has already set the status... */
+		if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
+			free(io->scsiio.kern_data_ptr, M_CTL);
 		ctl_done(io);
+		retval = CTL_RETVAL_COMPLETE;
 	} else {
 		/*
 		 * XXX KDM now we need to continue data movement.  Some
@@ -4957,17 +5113,43 @@
 		 */
 		retval = ctl_scsiio(&io->scsiio);
 	}
-bailout:
 	return (retval);
 }
 
 /*
  * This gets called by a backend driver when it is done with a
+ * data_submit method.
+ */
+void
+ctl_data_submit_done(union ctl_io *io)
+{
+	/*
+	 * If the IO_CONT flag is set, we need to call the supplied
+	 * function to continue processing the I/O, instead of completing
+	 * the I/O just yet.
+	 *
+	 * If there is an error, though, we don't want to keep processing.
+	 * Instead, just send status back to the initiator.
+	 */
+	if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
+	    (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+		io->scsiio.io_cont(io);
+		return;
+	}
+	ctl_done(io);
+}
+
+/*
+ * This gets called by a backend driver when it is done with a
  * configuration write.
  */
 void
 ctl_config_write_done(union ctl_io *io)
 {
+	uint8_t *buf;
+
 	/*
 	 * If the IO_CONT flag is set, we need to call the supplied
 	 * function to continue processing the I/O, instead of completing
@@ -4976,9 +5158,10 @@
 	 * If there is an error, though, we don't want to keep processing.
 	 * Instead, just send status back to the initiator.
 	 */
-	if ((io->io_hdr.flags & CTL_FLAG_IO_CONT)
-	 && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)
-	  || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) {
+	if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
+	    (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
 		io->scsiio.io_cont(io);
 		return;
 	}
@@ -4987,11 +5170,49 @@
 	 * have data allocated, like write buffer, and commands that have
 	 * no data, like start/stop unit, we need to check here.
 	 */
-	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
-		free(io->scsiio.kern_data_ptr, M_CTL);
+	if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
+		buf = io->scsiio.kern_data_ptr;
+	else
+		buf = NULL;
 	ctl_done(io);
+	if (buf)
+		free(buf, M_CTL);
 }
 
+void
+ctl_config_read_done(union ctl_io *io)
+{
+	uint8_t *buf;
+
+	/*
+	 * If there is some error -- we are done, skip data transfer.
+	 */
+	if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 ||
+	    ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
+	     (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
+		if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
+			buf = io->scsiio.kern_data_ptr;
+		else
+			buf = NULL;
+		ctl_done(io);
+		if (buf)
+			free(buf, M_CTL);
+		return;
+	}
+
+	/*
+	 * If the IO_CONT flag is set, we need to call the supplied
+	 * function to continue processing the I/O, instead of completing
+	 * the I/O just yet.
+	 */
+	if (io->io_hdr.flags & CTL_FLAG_IO_CONT) {
+		io->scsiio.io_cont(io);
+		return;
+	}
+
+	ctl_datamove(io);
+}
+
 /*
  * SCSI release command.
  */
@@ -4998,75 +5219,13 @@
 int
 ctl_scsi_release(struct ctl_scsiio *ctsio)
 {
-	int length, longid, thirdparty_id, resv_id;
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	uint32_t residx;
 
-	length = 0;
-	resv_id = 0;
-
 	CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-	ctl_softc = control_softc;
+	residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
 
-	switch (ctsio->cdb[0]) {
-	case RELEASE: {
-		struct scsi_release *cdb;
-
-		cdb = (struct scsi_release *)ctsio->cdb;
-		if ((cdb->byte2 & 0x1f) != 0) {
-			ctl_set_invalid_field(ctsio,
-					      /*sks_valid*/ 1,
-					      /*command*/ 1,
-					      /*field*/ 1,
-					      /*bit_valid*/ 0,
-					      /*bit*/ 0);
-			ctl_done((union ctl_io *)ctsio);
-			return (CTL_RETVAL_COMPLETE);
-		}
-		break;
-	}
-	case RELEASE_10: {
-		struct scsi_release_10 *cdb;
-
-		cdb = (struct scsi_release_10 *)ctsio->cdb;
-
-		if ((cdb->byte2 & SR10_EXTENT) != 0) {
-			ctl_set_invalid_field(ctsio,
-					      /*sks_valid*/ 1,
-					      /*command*/ 1,
-					      /*field*/ 1,
-					      /*bit_valid*/ 1,
-					      /*bit*/ 0);
-			ctl_done((union ctl_io *)ctsio);
-			return (CTL_RETVAL_COMPLETE);
-
-		}
-
-		if ((cdb->byte2 & SR10_3RDPTY) != 0) {
-			ctl_set_invalid_field(ctsio,
-					      /*sks_valid*/ 1,
-					      /*command*/ 1,
-					      /*field*/ 1,
-					      /*bit_valid*/ 1,
-					      /*bit*/ 4);
-			ctl_done((union ctl_io *)ctsio);
-			return (CTL_RETVAL_COMPLETE);
-		}
-
-		if (cdb->byte2 & SR10_LONGID)
-			longid = 1;
-		else
-			thirdparty_id = cdb->thirdparty_id;
-
-		resv_id = cdb->resv_id;
-		length = scsi_2btoul(cdb->length);
-		break;
-	}
-	}
-
-
 	/*
 	 * XXX KDM right now, we only support LUN reservation.  We don't
 	 * support 3rd party reservations, or extent reservations, which
@@ -5074,28 +5233,9 @@
 	 * far, we've got a LUN reservation.  Anything else got kicked out
 	 * above.  So, according to SPC, ignore the length.
 	 */
-	length = 0;
 
-	if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
-	 && (length > 0)) {
-		ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
-		ctsio->kern_data_len = length;
-		ctsio->kern_total_len = length;
-		ctsio->kern_data_resid = 0;
-		ctsio->kern_rel_offset = 0;
-		ctsio->kern_sg_entries = 0;
-		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
-		ctsio->be_move_done = ctl_config_move_done;
-		ctl_datamove((union ctl_io *)ctsio);
+	mtx_lock(&lun->lun_lock);
 
-		return (CTL_RETVAL_COMPLETE);
-	}
-
-	if (length > 0)
-		thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
-
-	mtx_lock(&ctl_softc->ctl_lock);
-
 	/*
 	 * According to SPC, it is not an error for an intiator to attempt
 	 * to release a reservation on a LUN that isn't reserved, or that
@@ -5103,25 +5243,12 @@
 	 * released, though, by the initiator who made it or by one of
 	 * several reset type events.
 	 */
-	if (lun->flags & CTL_LUN_RESERVED) {
-		if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id)
-		 && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port)
-		 && (ctsio->io_hdr.nexus.targ_target.id ==
-		     lun->rsv_nexus.targ_target.id)) {
+	if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
 			lun->flags &= ~CTL_LUN_RESERVED;
-		}
-	}
 
-	ctsio->scsi_status = SCSI_STATUS_OK;
-	ctsio->io_hdr.status = CTL_SUCCESS;
+	mtx_unlock(&lun->lun_lock);
 
-	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
-		free(ctsio->kern_data_ptr, M_CTL);
-		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
-	}
-
-	mtx_unlock(&ctl_softc->ctl_lock);
-
+	ctl_set_success(ctsio);
 	ctl_done((union ctl_io *)ctsio);
 	return (CTL_RETVAL_COMPLETE);
 }
@@ -5129,79 +5256,13 @@
 int
 ctl_scsi_reserve(struct ctl_scsiio *ctsio)
 {
-	int extent, thirdparty, longid;
-	int resv_id, length;
-	uint64_t thirdparty_id;
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	uint32_t residx;
 
-	extent = 0;
-	thirdparty = 0;
-	longid = 0;
-	resv_id = 0;
-	length = 0;
-	thirdparty_id = 0;
-
 	CTL_DEBUG_PRINT(("ctl_reserve\n"));
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-	ctl_softc = control_softc;
+	residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
 
-	switch (ctsio->cdb[0]) {
-	case RESERVE: {
-		struct scsi_reserve *cdb;
-
-		cdb = (struct scsi_reserve *)ctsio->cdb;
-		if ((cdb->byte2 & 0x1f) != 0) {
-			ctl_set_invalid_field(ctsio,
-					      /*sks_valid*/ 1,
-					      /*command*/ 1,
-					      /*field*/ 1,
-					      /*bit_valid*/ 0,
-					      /*bit*/ 0);
-			ctl_done((union ctl_io *)ctsio);
-			return (CTL_RETVAL_COMPLETE);
-		}
-		resv_id = cdb->resv_id;
-		length = scsi_2btoul(cdb->length);
-		break;
-	}
-	case RESERVE_10: {
-		struct scsi_reserve_10 *cdb;
-
-		cdb = (struct scsi_reserve_10 *)ctsio->cdb;
-
-		if ((cdb->byte2 & SR10_EXTENT) != 0) {
-			ctl_set_invalid_field(ctsio,
-					      /*sks_valid*/ 1,
-					      /*command*/ 1,
-					      /*field*/ 1,
-					      /*bit_valid*/ 1,
-					      /*bit*/ 0);
-			ctl_done((union ctl_io *)ctsio);
-			return (CTL_RETVAL_COMPLETE);
-		}
-		if ((cdb->byte2 & SR10_3RDPTY) != 0) {
-			ctl_set_invalid_field(ctsio,
-					      /*sks_valid*/ 1,
-					      /*command*/ 1,
-					      /*field*/ 1,
-					      /*bit_valid*/ 1,
-					      /*bit*/ 4);
-			ctl_done((union ctl_io *)ctsio);
-			return (CTL_RETVAL_COMPLETE);
-		}
-		if (cdb->byte2 & SR10_LONGID)
-			longid = 1;
-		else
-			thirdparty_id = cdb->thirdparty_id;
-
-		resv_id = cdb->resv_id;
-		length = scsi_2btoul(cdb->length);
-		break;
-	}
-	}
-
 	/*
 	 * XXX KDM right now, we only support LUN reservation.  We don't
 	 * support 3rd party reservations, or extent reservations, which
@@ -5209,52 +5270,25 @@
 	 * far, we've got a LUN reservation.  Anything else got kicked out
 	 * above.  So, according to SPC, ignore the length.
 	 */
-	length = 0;
 
-	if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
-	 && (length > 0)) {
-		ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
-		ctsio->kern_data_len = length;
-		ctsio->kern_total_len = length;
-		ctsio->kern_data_resid = 0;
-		ctsio->kern_rel_offset = 0;
-		ctsio->kern_sg_entries = 0;
-		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
-		ctsio->be_move_done = ctl_config_move_done;
-		ctl_datamove((union ctl_io *)ctsio);
-
-		return (CTL_RETVAL_COMPLETE);
+	mtx_lock(&lun->lun_lock);
+	if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) {
+		ctl_set_reservation_conflict(ctsio);
+		goto bailout;
 	}
 
-	if (length > 0)
-		thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
-
-	mtx_lock(&ctl_softc->ctl_lock);
-	if (lun->flags & CTL_LUN_RESERVED) {
-		if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
-		 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
-		 || (ctsio->io_hdr.nexus.targ_target.id !=
-		     lun->rsv_nexus.targ_target.id)) {
-			ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
-			ctsio->io_hdr.status = CTL_SCSI_ERROR;
-			goto bailout;
-		}
+	/* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */
+	if (lun->flags & CTL_LUN_PR_RESERVED) {
+		ctl_set_success(ctsio);
+		goto bailout;
 	}
 
 	lun->flags |= CTL_LUN_RESERVED;
-	lun->rsv_nexus = ctsio->io_hdr.nexus;
+	lun->res_idx = residx;
+	ctl_set_success(ctsio);
 
-	ctsio->scsi_status = SCSI_STATUS_OK;
-	ctsio->io_hdr.status = CTL_SUCCESS;
-
 bailout:
-	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
-		free(ctsio->kern_data_ptr, M_CTL);
-		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
-	}
-
-	mtx_unlock(&ctl_softc->ctl_lock);
-
+	mtx_unlock(&lun->lun_lock);
 	ctl_done((union ctl_io *)ctsio);
 	return (CTL_RETVAL_COMPLETE);
 }
@@ -5262,157 +5296,88 @@
 int
 ctl_start_stop(struct ctl_scsiio *ctsio)
 {
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_start_stop_unit *cdb;
-	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
 	int retval;
 
 	CTL_DEBUG_PRINT(("ctl_start_stop\n"));
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-	ctl_softc = control_softc;
-	retval = 0;
-
 	cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
 
-	/*
-	 * XXX KDM
-	 * We don't support the immediate bit on a stop unit.  In order to
-	 * do that, we would need to code up a way to know that a stop is
-	 * pending, and hold off any new commands until it completes, one
-	 * way or another.  Then we could accept or reject those commands
-	 * depending on its status.  We would almost need to do the reverse
-	 * of what we do below for an immediate start -- return the copy of
-	 * the ctl_io to the FETD with status to send to the host (and to
-	 * free the copy!) and then free the original I/O once the stop
-	 * actually completes.  That way, the OOA queue mechanism can work
-	 * to block commands that shouldn't proceed.  Another alternative
-	 * would be to put the copy in the queue in place of the original,
-	 * and return the original back to the caller.  That could be
-	 * slightly safer..
-	 */
-	if ((cdb->byte2 & SSS_IMMED)
-	 && ((cdb->how & SSS_START) == 0)) {
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 0);
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-	}
+	if ((cdb->how & SSS_PC_MASK) == 0) {
+		if ((lun->flags & CTL_LUN_PR_RESERVED) &&
+		    (cdb->how & SSS_START) == 0) {
+			uint32_t residx;
 
-	/*
-	 * We don't support the power conditions field.  We need to check
-	 * this prior to checking the load/eject and start/stop bits.
-	 */
-	if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) {
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 4,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 4);
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-	}
+			residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+			if (ctl_get_prkey(lun, residx) == 0 ||
+			    (lun->pr_res_idx != residx && lun->pr_res_type < 4)) {
 
-	/*
-	 * Media isn't removable, so we can't load or eject it.
-	 */
-	if ((cdb->how & SSS_LOEJ) != 0) {
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 4,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 1);
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-	}
+				ctl_set_reservation_conflict(ctsio);
+				ctl_done((union ctl_io *)ctsio);
+				return (CTL_RETVAL_COMPLETE);
+			}
+		}
 
-	if ((lun->flags & CTL_LUN_PR_RESERVED)
-	 && ((cdb->how & SSS_START)==0)) {
-		uint32_t residx;
+		if ((cdb->how & SSS_LOEJ) &&
+		    (lun->flags & CTL_LUN_REMOVABLE) == 0) {
+			ctl_set_invalid_field(ctsio,
+					      /*sks_valid*/ 1,
+					      /*command*/ 1,
+					      /*field*/ 4,
+					      /*bit_valid*/ 1,
+					      /*bit*/ 1);
+			ctl_done((union ctl_io *)ctsio);
+			return (CTL_RETVAL_COMPLETE);
+		}
 
-		residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
-		if (!lun->per_res[residx].registered
-		 || (lun->pr_res_idx!=residx && lun->res_type < 4)) {
-
-			ctl_set_reservation_conflict(ctsio);
+		if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) &&
+		    lun->prevent_count > 0) {
+			/* "Medium removal prevented" */
+			ctl_set_sense(ctsio, /*current_error*/ 1,
+			    /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ?
+			     SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST,
+			    /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE);
 			ctl_done((union ctl_io *)ctsio);
 			return (CTL_RETVAL_COMPLETE);
 		}
 	}
 
-	/*
-	 * If there is no backend on this device, we can't start or stop
-	 * it.  In theory we shouldn't get any start/stop commands in the
-	 * first place at this level if the LUN doesn't have a backend.
-	 * That should get stopped by the command decode code.
-	 */
-	if (lun->backend == NULL) {
+	retval = lun->backend->config_write((union ctl_io *)ctsio);
+	return (retval);
+}
+
+int
+ctl_prevent_allow(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_prevent *cdb;
+	int retval;
+	uint32_t initidx;
+
+	CTL_DEBUG_PRINT(("ctl_prevent_allow\n"));
+
+	cdb = (struct scsi_prevent *)ctsio->cdb;
+
+	if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) {
 		ctl_set_invalid_opcode(ctsio);
 		ctl_done((union ctl_io *)ctsio);
 		return (CTL_RETVAL_COMPLETE);
 	}
 
-	/*
-	 * XXX KDM Copan-specific offline behavior.
-	 * Figure out a reasonable way to port this?
-	 */
-#ifdef NEEDTOPORT
-	mtx_lock(&ctl_softc->ctl_lock);
-
-	if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
-	 && (lun->flags & CTL_LUN_OFFLINE)) {
-		/*
-		 * If the LUN is offline, and the on/offline bit isn't set,
-		 * reject the start or stop.  Otherwise, let it through.
-		 */
-		mtx_unlock(&ctl_softc->ctl_lock);
-		ctl_set_lun_not_ready(ctsio);
-		ctl_done((union ctl_io *)ctsio);
-	} else {
-		mtx_unlock(&ctl_softc->ctl_lock);
-#endif /* NEEDTOPORT */
-		/*
-		 * This could be a start or a stop when we're online,
-		 * or a stop/offline or start/online.  A start or stop when
-		 * we're offline is covered in the case above.
-		 */
-		/*
-		 * In the non-immediate case, we send the request to
-		 * the backend and return status to the user when
-		 * it is done.
-		 *
-		 * In the immediate case, we allocate a new ctl_io
-		 * to hold a copy of the request, and send that to
-		 * the backend.  We then set good status on the
-		 * user's request and return it immediately.
-		 */
-		if (cdb->byte2 & SSS_IMMED) {
-			union ctl_io *new_io;
-
-			new_io = ctl_alloc_io(ctsio->io_hdr.pool);
-			if (new_io == NULL) {
-				ctl_set_busy(ctsio);
-				ctl_done((union ctl_io *)ctsio);
-			} else {
-				ctl_copy_io((union ctl_io *)ctsio,
-					    new_io);
-				retval = lun->backend->config_write(new_io);
-				ctl_set_success(ctsio);
-				ctl_done((union ctl_io *)ctsio);
-			}
-		} else {
-			retval = lun->backend->config_write(
-				(union ctl_io *)ctsio);
-		}
-#ifdef NEEDTOPORT
+	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+	mtx_lock(&lun->lun_lock);
+	if ((cdb->how & PR_PREVENT) &&
+	    ctl_is_set(lun->prevent, initidx) == 0) {
+		ctl_set_mask(lun->prevent, initidx);
+		lun->prevent_count++;
+	} else if ((cdb->how & PR_PREVENT) == 0 &&
+	    ctl_is_set(lun->prevent, initidx)) {
+		ctl_clear_mask(lun->prevent, initidx);
+		lun->prevent_count--;
 	}
-#endif
+	mtx_unlock(&lun->lun_lock);
+	retval = lun->backend->config_write((union ctl_io *)ctsio);
 	return (retval);
 }
 
@@ -5425,20 +5390,16 @@
 int
 ctl_sync_cache(struct ctl_scsiio *ctsio)
 {
-	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct ctl_lba_len_flags *lbalen;
 	uint64_t starting_lba;
 	uint32_t block_count;
-	int reladr, immed;
 	int retval;
+	uint8_t byte2;
 
 	CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-	ctl_softc = control_softc;
 	retval = 0;
-	reladr = 0;
-	immed = 0;
 
 	switch (ctsio->cdb[0]) {
 	case SYNCHRONIZE_CACHE: {
@@ -5445,14 +5406,9 @@
 		struct scsi_sync_cache *cdb;
 		cdb = (struct scsi_sync_cache *)ctsio->cdb;
 
-		if (cdb->byte2 & SSC_RELADR)
-			reladr = 1;
-
-		if (cdb->byte2 & SSC_IMMED)
-			immed = 1;
-
 		starting_lba = scsi_4btoul(cdb->begin_lba);
 		block_count = scsi_2btoul(cdb->lb_count);
+		byte2 = cdb->byte2;
 		break;
 	}
 	case SYNCHRONIZE_CACHE_16: {
@@ -5459,14 +5415,9 @@
 		struct scsi_sync_cache_16 *cdb;
 		cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
 
-		if (cdb->byte2 & SSC_RELADR)
-			reladr = 1;
-
-		if (cdb->byte2 & SSC_IMMED)
-			immed = 1;
-
 		starting_lba = scsi_8btou64(cdb->begin_lba);
 		block_count = scsi_4btoul(cdb->lb_count);
+		byte2 = cdb->byte2;
 		break;
 	}
 	default:
@@ -5476,41 +5427,6 @@
 		break; /* NOTREACHED */
 	}
 
-	if (immed) {
-		/*
-		 * We don't support the immediate bit.  Since it's in the
-		 * same place for the 10 and 16 byte SYNCHRONIZE CACHE
-		 * commands, we can just return the same error in either
-		 * case.
-		 */
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 1);
-		ctl_done((union ctl_io *)ctsio);
-		goto bailout;
-	}
-
-	if (reladr) {
-		/*
-		 * We don't support the reladr bit either.  It can only be
-		 * used with linked commands, and we don't support linked
-		 * commands.  Since the bit is in the same place for the
-		 * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can
-		 * just return the same error in either case.
-		 */
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 0);
-		ctl_done((union ctl_io *)ctsio);
-		goto bailout;
-	}
-
 	/*
 	 * We check the LBA and length, but don't do anything with them.
 	 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
@@ -5518,38 +5434,19 @@
 	 * to see an error for an out of range LBA.
 	 */
 	if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
-		ctl_set_lba_out_of_range(ctsio);
+		ctl_set_lba_out_of_range(ctsio,
+		    MAX(starting_lba, lun->be_lun->maxlba + 1));
 		ctl_done((union ctl_io *)ctsio);
 		goto bailout;
 	}
 
-	/*
-	 * If this LUN has no backend, we can't flush the cache anyway.
-	 */
-	if (lun->backend == NULL) {
-		ctl_set_invalid_opcode(ctsio);
-		ctl_done((union ctl_io *)ctsio);
-		goto bailout;
-	}
+	lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	lbalen->lba = starting_lba;
+	lbalen->len = block_count;
+	lbalen->flags = byte2;
+	retval = lun->backend->config_write((union ctl_io *)ctsio);
 
-	/*
-	 * Check to see whether we're configured to send the SYNCHRONIZE
-	 * CACHE command directly to the back end.
-	 */
-	mtx_lock(&ctl_softc->ctl_lock);
-	if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC)
-	 && (++(lun->sync_count) >= lun->sync_interval)) {
-		lun->sync_count = 0;
-		mtx_unlock(&ctl_softc->ctl_lock);
-		retval = lun->backend->config_write((union ctl_io *)ctsio);
-	} else {
-		mtx_unlock(&ctl_softc->ctl_lock);
-		ctl_set_success(ctsio);
-		ctl_done((union ctl_io *)ctsio);
-	}
-
 bailout:
-
 	return (retval);
 }
 
@@ -5557,15 +5454,10 @@
 ctl_format(struct ctl_scsiio *ctsio)
 {
 	struct scsi_format *cdb;
-	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
 	int length, defect_list_len;
 
 	CTL_DEBUG_PRINT(("ctl_format\n"));
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-	ctl_softc = control_softc;
-
 	cdb = (struct scsi_format *)ctsio->cdb;
 
 	length = 0;
@@ -5581,7 +5473,6 @@
 		ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
 		ctsio->kern_data_len = length;
 		ctsio->kern_total_len = length;
-		ctsio->kern_data_resid = 0;
 		ctsio->kern_rel_offset = 0;
 		ctsio->kern_sg_entries = 0;
 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5629,19 +5520,7 @@
 		}
 	}
 
-	/*
-	 * The format command will clear out the "Medium format corrupted"
-	 * status if set by the configuration code.  That status is really
-	 * just a way to notify the host that we have lost the media, and
-	 * get them to issue a command that will basically make them think
-	 * they're blowing away the media.
-	 */
-	mtx_lock(&ctl_softc->ctl_lock);
-	lun->flags &= ~CTL_LUN_INOPERABLE;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	ctsio->scsi_status = SCSI_STATUS_OK;
-	ctsio->io_hdr.status = CTL_SUCCESS;
+	ctl_set_success(ctsio);
 bailout:
 
 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
@@ -5654,40 +5533,48 @@
 }
 
 int
-ctl_write_buffer(struct ctl_scsiio *ctsio)
+ctl_read_buffer(struct ctl_scsiio *ctsio)
 {
-	struct scsi_write_buffer *cdb;
-	struct copan_page_header *header;
-	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
-	int buffer_offset, len;
-	int retval;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	uint64_t buffer_offset;
+	uint32_t len;
+	uint8_t byte2;
+	static uint8_t descr[4];
+	static uint8_t echo_descr[4] = { 0 };
 
-	header = NULL;
+	CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
 
-	retval = CTL_RETVAL_COMPLETE;
+	switch (ctsio->cdb[0]) {
+	case READ_BUFFER: {
+		struct scsi_read_buffer *cdb;
 
-	CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
+		cdb = (struct scsi_read_buffer *)ctsio->cdb;
+		buffer_offset = scsi_3btoul(cdb->offset);
+		len = scsi_3btoul(cdb->length);
+		byte2 = cdb->byte2;
+		break;
+	}
+	case READ_BUFFER_16: {
+		struct scsi_read_buffer_16 *cdb;
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-	ctl_softc = control_softc;
-	cdb = (struct scsi_write_buffer *)ctsio->cdb;
-
-	if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 4);
+		cdb = (struct scsi_read_buffer_16 *)ctsio->cdb;
+		buffer_offset = scsi_8btou64(cdb->offset);
+		len = scsi_4btoul(cdb->length);
+		byte2 = cdb->byte2;
+		break;
+	}
+	default: /* This shouldn't happen. */
+		ctl_set_invalid_opcode(ctsio);
 		ctl_done((union ctl_io *)ctsio);
 		return (CTL_RETVAL_COMPLETE);
 	}
-	if (cdb->buffer_id != 0) {
+
+	if (buffer_offset > CTL_WRITE_BUFFER_SIZE ||
+	    buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
 		ctl_set_invalid_field(ctsio,
 				      /*sks_valid*/ 1,
 				      /*command*/ 1,
-				      /*field*/ 2,
+				      /*field*/ 6,
 				      /*bit_valid*/ 0,
 				      /*bit*/ 0);
 		ctl_done((union ctl_io *)ctsio);
@@ -5694,10 +5581,46 @@
 		return (CTL_RETVAL_COMPLETE);
 	}
 
+	if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) {
+		descr[0] = 0;
+		scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]);
+		ctsio->kern_data_ptr = descr;
+		len = min(len, sizeof(descr));
+	} else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
+		ctsio->kern_data_ptr = echo_descr;
+		len = min(len, sizeof(echo_descr));
+	} else {
+		if (lun->write_buffer == NULL) {
+			lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
+			    M_CTL, M_WAITOK);
+		}
+		ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
+	}
+	ctsio->kern_data_len = len;
+	ctsio->kern_total_len = len;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_sg_entries = 0;
+	ctl_set_success(ctsio);
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_write_buffer(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_write_buffer *cdb;
+	int buffer_offset, len;
+
+	CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
+
+	cdb = (struct scsi_write_buffer *)ctsio->cdb;
+
 	len = scsi_3btoul(cdb->length);
 	buffer_offset = scsi_3btoul(cdb->offset);
 
-	if (len > sizeof(lun->write_buffer)) {
+	if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
 		ctl_set_invalid_field(ctsio,
 				      /*sks_valid*/ 1,
 				      /*command*/ 1,
@@ -5708,26 +5631,18 @@
 		return (CTL_RETVAL_COMPLETE);
 	}
 
-	if (buffer_offset != 0) {
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 3,
-				      /*bit_valid*/ 0,
-				      /*bit*/ 0);
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-	}
-
 	/*
 	 * If we've got a kernel request that hasn't been malloced yet,
 	 * malloc it and tell the caller the data buffer is here.
 	 */
 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
-		ctsio->kern_data_ptr = lun->write_buffer;
+		if (lun->write_buffer == NULL) {
+			lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
+			    M_CTL, M_WAITOK);
+		}
+		ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
 		ctsio->kern_data_len = len;
 		ctsio->kern_total_len = len;
-		ctsio->kern_data_resid = 0;
 		ctsio->kern_rel_offset = 0;
 		ctsio->kern_sg_entries = 0;
 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5737,343 +5652,335 @@
 		return (CTL_RETVAL_COMPLETE);
 	}
 
+	ctl_set_success(ctsio);
 	ctl_done((union ctl_io *)ctsio);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
-/*
- * Note that this function currently doesn't actually do anything inside
- * CTL to enforce things if the DQue bit is turned on.
- *
- * Also note that this function can't be used in the default case, because
- * the DQue bit isn't set in the changeable mask for the control mode page
- * anyway.  This is just here as an example for how to implement a page
- * handler, and a placeholder in case we want to allow the user to turn
- * tagged queueing on and off.
- *
- * The D_SENSE bit handling is functional, however, and will turn
- * descriptor sense on and off for a given LUN.
- */
 int
-ctl_control_page_handler(struct ctl_scsiio *ctsio,
-			 struct ctl_page_index *page_index, uint8_t *page_ptr)
+ctl_write_same(struct ctl_scsiio *ctsio)
 {
-	struct scsi_control_page *current_cp, *saved_cp, *user_cp;
-	struct ctl_lun *lun;
-	struct ctl_softc *softc;
-	int set_ua;
-	uint32_t initidx;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct ctl_lba_len_flags *lbalen;
+	uint64_t lba;
+	uint32_t num_blocks;
+	int len, retval;
+	uint8_t byte2;
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
-	set_ua = 0;
+	CTL_DEBUG_PRINT(("ctl_write_same\n"));
 
-	user_cp = (struct scsi_control_page *)page_ptr;
-	current_cp = (struct scsi_control_page *)
-		(page_index->page_data + (page_index->page_len *
-		CTL_PAGE_CURRENT));
-	saved_cp = (struct scsi_control_page *)
-		(page_index->page_data + (page_index->page_len *
-		CTL_PAGE_SAVED));
+	switch (ctsio->cdb[0]) {
+	case WRITE_SAME_10: {
+		struct scsi_write_same_10 *cdb;
 
-	softc = control_softc;
+		cdb = (struct scsi_write_same_10 *)ctsio->cdb;
 
-	mtx_lock(&softc->ctl_lock);
-	if (((current_cp->rlec & SCP_DSENSE) == 0)
-	 && ((user_cp->rlec & SCP_DSENSE) != 0)) {
-		/*
-		 * Descriptor sense is currently turned off and the user
-		 * wants to turn it on.
-		 */
-		current_cp->rlec |= SCP_DSENSE;
-		saved_cp->rlec |= SCP_DSENSE;
-		lun->flags |= CTL_LUN_SENSE_DESC;
-		set_ua = 1;
-	} else if (((current_cp->rlec & SCP_DSENSE) != 0)
-		&& ((user_cp->rlec & SCP_DSENSE) == 0)) {
-		/*
-		 * Descriptor sense is currently turned on, and the user
-		 * wants to turn it off.
-		 */
-		current_cp->rlec &= ~SCP_DSENSE;
-		saved_cp->rlec &= ~SCP_DSENSE;
-		lun->flags &= ~CTL_LUN_SENSE_DESC;
-		set_ua = 1;
+		lba = scsi_4btoul(cdb->addr);
+		num_blocks = scsi_2btoul(cdb->length);
+		byte2 = cdb->byte2;
+		break;
 	}
-	if (current_cp->queue_flags & SCP_QUEUE_DQUE) {
-		if (user_cp->queue_flags & SCP_QUEUE_DQUE) {
-#ifdef NEEDTOPORT
-			csevent_log(CSC_CTL | CSC_SHELF_SW |
-				    CTL_UNTAG_TO_UNTAG,
-				    csevent_LogType_Trace,
-				    csevent_Severity_Information,
-				    csevent_AlertLevel_Green,
-				    csevent_FRU_Firmware,
-				    csevent_FRU_Unknown,
-				    "Received untagged to untagged transition");
-#endif /* NEEDTOPORT */
-		} else {
-#ifdef NEEDTOPORT
-			csevent_log(CSC_CTL | CSC_SHELF_SW |
-				    CTL_UNTAG_TO_TAG,
-				    csevent_LogType_ConfigChange,
-				    csevent_Severity_Information,
-				    csevent_AlertLevel_Green,
-				    csevent_FRU_Firmware,
-				    csevent_FRU_Unknown,
-				    "Received untagged to tagged "
-				    "queueing transition");
-#endif /* NEEDTOPORT */
+	case WRITE_SAME_16: {
+		struct scsi_write_same_16 *cdb;
 
-			current_cp->queue_flags &= ~SCP_QUEUE_DQUE;
-			saved_cp->queue_flags &= ~SCP_QUEUE_DQUE;
-			set_ua = 1;
-		}
-	} else {
-		if (user_cp->queue_flags & SCP_QUEUE_DQUE) {
-#ifdef NEEDTOPORT
-			csevent_log(CSC_CTL | CSC_SHELF_SW |
-				    CTL_TAG_TO_UNTAG,
-				    csevent_LogType_ConfigChange,
-				    csevent_Severity_Warning,
-				    csevent_AlertLevel_Yellow,
-				    csevent_FRU_Firmware,
-				    csevent_FRU_Unknown,
-				    "Received tagged queueing to untagged "
-				    "transition");
-#endif /* NEEDTOPORT */
+		cdb = (struct scsi_write_same_16 *)ctsio->cdb;
 
-			current_cp->queue_flags |= SCP_QUEUE_DQUE;
-			saved_cp->queue_flags |= SCP_QUEUE_DQUE;
-			set_ua = 1;
-		} else {
-#ifdef NEEDTOPORT
-			csevent_log(CSC_CTL | CSC_SHELF_SW |
-				    CTL_TAG_TO_TAG,
-				    csevent_LogType_Trace,
-				    csevent_Severity_Information,
-				    csevent_AlertLevel_Green,
-				    csevent_FRU_Firmware,
-				    csevent_FRU_Unknown,
-				    "Received tagged queueing to tagged "
-				    "queueing transition");
-#endif /* NEEDTOPORT */
-		}
+		lba = scsi_8btou64(cdb->addr);
+		num_blocks = scsi_4btoul(cdb->length);
+		byte2 = cdb->byte2;
+		break;
 	}
-	if (set_ua != 0) {
-		int i;
+	default:
 		/*
-		 * Let other initiators know that the mode
-		 * parameters for this LUN have changed.
+		 * We got a command we don't support.  This shouldn't
+		 * happen, commands should be filtered out above us.
 		 */
-		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-			if (i == initidx)
-				continue;
+		ctl_set_invalid_opcode(ctsio);
+		ctl_done((union ctl_io *)ctsio);
 
-			lun->pending_sense[i].ua_pending |=
-				CTL_UA_MODE_CHANGE;
+		return (CTL_RETVAL_COMPLETE);
+		break; /* NOTREACHED */
+	}
+
+	/* ANCHOR flag can be used only together with UNMAP */
+	if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	/*
+	 * The first check is to make sure we're in bounds, the second
+	 * check is to catch wrap-around problems.  If the lba + num blocks
+	 * is less than the lba, then we've wrapped around and the block
+	 * range is invalid anyway.
+	 */
+	if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+	 || ((lba + num_blocks) < lba)) {
+		ctl_set_lba_out_of_range(ctsio,
+		    MAX(lba, lun->be_lun->maxlba + 1));
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	/* Zero number of blocks means "to the last logical block" */
+	if (num_blocks == 0) {
+		if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) {
+			ctl_set_invalid_field(ctsio,
+					      /*sks_valid*/ 0,
+					      /*command*/ 1,
+					      /*field*/ 0,
+					      /*bit_valid*/ 0,
+					      /*bit*/ 0);
+			ctl_done((union ctl_io *)ctsio);
+			return (CTL_RETVAL_COMPLETE);
 		}
+		num_blocks = (lun->be_lun->maxlba + 1) - lba;
 	}
-	mtx_unlock(&softc->ctl_lock);
 
-	return (0);
-}
+	len = lun->be_lun->blocksize;
 
-int
-ctl_power_sp_handler(struct ctl_scsiio *ctsio,
-		     struct ctl_page_index *page_index, uint8_t *page_ptr)
-{
-	return (0);
-}
+	/*
+	 * If we've got a kernel request that hasn't been malloced yet,
+	 * malloc it and tell the caller the data buffer is here.
+	 */
+	if ((byte2 & SWS_NDOB) == 0 &&
+	    (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+		ctsio->kern_data_len = len;
+		ctsio->kern_total_len = len;
+		ctsio->kern_rel_offset = 0;
+		ctsio->kern_sg_entries = 0;
+		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+		ctsio->be_move_done = ctl_config_move_done;
+		ctl_datamove((union ctl_io *)ctsio);
 
-int
-ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
-			   struct ctl_page_index *page_index, int pc)
-{
-	struct copan_power_subpage *page;
+		return (CTL_RETVAL_COMPLETE);
+	}
 
-	page = (struct copan_power_subpage *)page_index->page_data +
-		(page_index->page_len * pc);
+	lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	lbalen->lba = lba;
+	lbalen->len = num_blocks;
+	lbalen->flags = byte2;
+	retval = lun->backend->config_write((union ctl_io *)ctsio);
 
-	switch (pc) {
-	case SMS_PAGE_CTRL_CHANGEABLE >> 6:
-		/*
-		 * We don't update the changable bits for this page.
-		 */
-		break;
-	case SMS_PAGE_CTRL_CURRENT >> 6:
-	case SMS_PAGE_CTRL_DEFAULT >> 6:
-	case SMS_PAGE_CTRL_SAVED >> 6:
-#ifdef NEEDTOPORT
-		ctl_update_power_subpage(page);
-#endif
-		break;
-	default:
-#ifdef NEEDTOPORT
-		EPRINT(0, "Invalid PC %d!!", pc);
-#endif
-		break;
-	}
-	return (0);
+	return (retval);
 }
 
-
 int
-ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
-		   struct ctl_page_index *page_index, uint8_t *page_ptr)
+ctl_unmap(struct ctl_scsiio *ctsio)
 {
-	struct copan_aps_subpage *user_sp;
-	struct copan_aps_subpage *current_sp;
-	union ctl_modepage_info *modepage_info;
-	struct ctl_softc *softc;
-	struct ctl_lun *lun;
-	int retval;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_unmap *cdb;
+	struct ctl_ptr_len_flags *ptrlen;
+	struct scsi_unmap_header *hdr;
+	struct scsi_unmap_desc *buf, *end, *endnz, *range;
+	uint64_t lba;
+	uint32_t num_blocks;
+	int len, retval;
+	uint8_t byte2;
 
-	retval = CTL_RETVAL_COMPLETE;
-	current_sp = (struct copan_aps_subpage *)(page_index->page_data +
-		     (page_index->page_len * CTL_PAGE_CURRENT));
-	softc = control_softc;
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+	CTL_DEBUG_PRINT(("ctl_unmap\n"));
 
-	user_sp = (struct copan_aps_subpage *)page_ptr;
+	cdb = (struct scsi_unmap *)ctsio->cdb;
+	len = scsi_2btoul(cdb->length);
+	byte2 = cdb->byte2;
 
-	modepage_info = (union ctl_modepage_info *)
-		ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
+	/*
+	 * If we've got a kernel request that hasn't been malloced yet,
+	 * malloc it and tell the caller the data buffer is here.
+	 */
+	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+		ctsio->kern_data_len = len;
+		ctsio->kern_total_len = len;
+		ctsio->kern_rel_offset = 0;
+		ctsio->kern_sg_entries = 0;
+		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+		ctsio->be_move_done = ctl_config_move_done;
+		ctl_datamove((union ctl_io *)ctsio);
 
-	modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK;
-	modepage_info->header.subpage = page_index->subpage;
-	modepage_info->aps.lock_active = user_sp->lock_active;
+		return (CTL_RETVAL_COMPLETE);
+	}
 
-	mtx_lock(&softc->ctl_lock);
+	len = ctsio->kern_total_len - ctsio->kern_data_resid;
+	hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr;
+	if (len < sizeof (*hdr) ||
+	    len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) ||
+	    len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) ||
+	    scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) {
+		ctl_set_invalid_field(ctsio,
+				      /*sks_valid*/ 0,
+				      /*command*/ 0,
+				      /*field*/ 0,
+				      /*bit_valid*/ 0,
+				      /*bit*/ 0);
+		goto done;
+	}
+	len = scsi_2btoul(hdr->desc_length);
+	buf = (struct scsi_unmap_desc *)(hdr + 1);
+	end = buf + len / sizeof(*buf);
 
-	/*
-	 * If there is a request to lock the LUN and another LUN is locked
-	 * this is an error. If the requested LUN is already locked ignore
-	 * the request. If no LUN is locked attempt to lock it.
-	 * if there is a request to unlock the LUN and the LUN is currently
-	 * locked attempt to unlock it. Otherwise ignore the request. i.e.
-	 * if another LUN is locked or no LUN is locked.
-	 */
-	if (user_sp->lock_active & APS_LOCK_ACTIVE) {
-		if (softc->aps_locked_lun == lun->lun) {
-			/*
-			 * This LUN is already locked, so we're done.
-			 */
-			retval = CTL_RETVAL_COMPLETE;
-		} else if (softc->aps_locked_lun == 0) {
-			/*
-			 * No one has the lock, pass the request to the
-			 * backend.
-			 */
-			retval = lun->backend->config_write(
-				(union ctl_io *)ctsio);
-		} else {
-			/*
-			 * Someone else has the lock, throw out the request.
-			 */
-			ctl_set_already_locked(ctsio);
-			free(ctsio->kern_data_ptr, M_CTL);
+	endnz = buf;
+	for (range = buf; range < end; range++) {
+		lba = scsi_8btou64(range->lba);
+		num_blocks = scsi_4btoul(range->length);
+		if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+		 || ((lba + num_blocks) < lba)) {
+			ctl_set_lba_out_of_range(ctsio,
+			    MAX(lba, lun->be_lun->maxlba + 1));
 			ctl_done((union ctl_io *)ctsio);
-
-			/*
-			 * Set the return value so that ctl_do_mode_select()
-			 * won't try to complete the command.  We already
-			 * completed it here.
-			 */
-			retval = CTL_RETVAL_ERROR;
+			return (CTL_RETVAL_COMPLETE);
 		}
-	} else if (softc->aps_locked_lun == lun->lun) {
-		/*
-		 * This LUN is locked, so pass the unlock request to the
-		 * backend.
-		 */
-		retval = lun->backend->config_write((union ctl_io *)ctsio);
+		if (num_blocks != 0)
+			endnz = range + 1;
 	}
-	mtx_unlock(&softc->ctl_lock);
 
+	/*
+	 * Block backend can not handle zero last range.
+	 * Filter it out and return if there is nothing left.
+	 */
+	len = (uint8_t *)endnz - (uint8_t *)buf;
+	if (len == 0) {
+		ctl_set_success(ctsio);
+		goto done;
+	}
+
+	mtx_lock(&lun->lun_lock);
+	ptrlen = (struct ctl_ptr_len_flags *)
+	    &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	ptrlen->ptr = (void *)buf;
+	ptrlen->len = len;
+	ptrlen->flags = byte2;
+	ctl_check_blocked(lun);
+	mtx_unlock(&lun->lun_lock);
+
+	retval = lun->backend->config_write((union ctl_io *)ctsio);
 	return (retval);
+
+done:
+	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
+		free(ctsio->kern_data_ptr, M_CTL);
+		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
+	}
+	ctl_done((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
 }
 
 int
-ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
-				struct ctl_page_index *page_index,
-				uint8_t *page_ptr)
+ctl_default_page_handler(struct ctl_scsiio *ctsio,
+			 struct ctl_page_index *page_index, uint8_t *page_ptr)
 {
-	uint8_t *c;
-	int i;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	uint8_t *current_cp;
+	int set_ua;
+	uint32_t initidx;
 
-	c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs;
-	ctl_time_io_secs =
-		(c[0] << 8) |
-		(c[1] << 0) |
-		0;
-	CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs));
-	printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs);
-	printf("page data:");
-	for (i=0; i<8; i++)
-		printf(" %.2x",page_ptr[i]);
-	printf("\n");
-	return (0);
+	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+	set_ua = 0;
+
+	current_cp = (page_index->page_data + (page_index->page_len *
+	    CTL_PAGE_CURRENT));
+
+	mtx_lock(&lun->lun_lock);
+	if (memcmp(current_cp, page_ptr, page_index->page_len)) {
+		memcpy(current_cp, page_ptr, page_index->page_len);
+		set_ua = 1;
+	}
+	if (set_ua != 0)
+		ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
+	mtx_unlock(&lun->lun_lock);
+	if (set_ua) {
+		ctl_isc_announce_mode(lun,
+		    ctl_get_initindex(&ctsio->io_hdr.nexus),
+		    page_index->page_code, page_index->subpage);
+	}
+	return (CTL_RETVAL_COMPLETE);
 }
 
+static void
+ctl_ie_timer(void *arg)
+{
+	struct ctl_lun *lun = arg;
+	uint64_t t;
+
+	if (lun->ie_asc == 0)
+		return;
+
+	if (lun->MODE_IE.mrie == SIEP_MRIE_UA)
+		ctl_est_ua_all(lun, -1, CTL_UA_IE);
+	else
+		lun->ie_reported = 0;
+
+	if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) {
+		lun->ie_reportcnt++;
+		t = scsi_4btoul(lun->MODE_IE.interval_timer);
+		if (t == 0 || t == UINT32_MAX)
+			t = 3000;  /* 5 min */
+		callout_schedule(&lun->ie_callout, t * hz / 10);
+	}
+}
+
 int
-ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
-			       struct ctl_page_index *page_index,
-			       int pc)
+ctl_ie_page_handler(struct ctl_scsiio *ctsio,
+			 struct ctl_page_index *page_index, uint8_t *page_ptr)
 {
-	struct copan_debugconf_subpage *page;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_info_exceptions_page *pg;
+	uint64_t t;
 
-	page = (struct copan_debugconf_subpage *)page_index->page_data +
-		(page_index->page_len * pc);
+	(void)ctl_default_page_handler(ctsio, page_index, page_ptr);
 
-	switch (pc) {
-	case SMS_PAGE_CTRL_CHANGEABLE >> 6:
-	case SMS_PAGE_CTRL_DEFAULT >> 6:
-	case SMS_PAGE_CTRL_SAVED >> 6:
-		/*
-		 * We don't update the changable or default bits for this page.
-		 */
-		break;
-	case SMS_PAGE_CTRL_CURRENT >> 6:
-		page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8;
-		page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0;
-		break;
-	default:
-#ifdef NEEDTOPORT
-		EPRINT(0, "Invalid PC %d!!", pc);
-#endif /* NEEDTOPORT */
-		break;
+	pg = (struct scsi_info_exceptions_page *)page_ptr;
+	mtx_lock(&lun->lun_lock);
+	if (pg->info_flags & SIEP_FLAGS_TEST) {
+		lun->ie_asc = 0x5d;
+		lun->ie_ascq = 0xff;
+		if (pg->mrie == SIEP_MRIE_UA) {
+			ctl_est_ua_all(lun, -1, CTL_UA_IE);
+			lun->ie_reported = 1;
+		} else {
+			ctl_clr_ua_all(lun, -1, CTL_UA_IE);
+			lun->ie_reported = -1;
+		}
+		lun->ie_reportcnt = 1;
+		if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) {
+			lun->ie_reportcnt++;
+			t = scsi_4btoul(pg->interval_timer);
+			if (t == 0 || t == UINT32_MAX)
+				t = 3000;  /* 5 min */
+			callout_reset(&lun->ie_callout, t * hz / 10,
+			    ctl_ie_timer, lun);
+		}
+	} else {
+		lun->ie_asc = 0;
+		lun->ie_ascq = 0;
+		lun->ie_reported = 1;
+		ctl_clr_ua_all(lun, -1, CTL_UA_IE);
+		lun->ie_reportcnt = UINT32_MAX;
+		callout_stop(&lun->ie_callout);
 	}
-	return (0);
+	mtx_unlock(&lun->lun_lock);
+	return (CTL_RETVAL_COMPLETE);
 }
 
-
 static int
 ctl_do_mode_select(union ctl_io *io)
 {
+	struct ctl_lun *lun = CTL_LUN(io);
 	struct scsi_mode_page_header *page_header;
 	struct ctl_page_index *page_index;
 	struct ctl_scsiio *ctsio;
-	int control_dev, page_len;
-	int page_len_offset, page_len_size;
+	int page_len, page_len_offset, page_len_size;
 	union ctl_modepage_info *modepage_info;
-	struct ctl_lun *lun;
-	int *len_left, *len_used;
+	uint16_t *len_left, *len_used;
 	int retval, i;
 
 	ctsio = &io->scsiio;
 	page_index = NULL;
 	page_len = 0;
-	retval = CTL_RETVAL_COMPLETE;
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
-	if (lun->be_lun->lun_type != T_DIRECT)
-		control_dev = 1;
-	else
-		control_dev = 0;
-
 	modepage_info = (union ctl_modepage_info *)
 		ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
 	len_left = &modepage_info->header.len_left;
@@ -6110,13 +6017,18 @@
 	 * XXX KDM should we do something with the block descriptor?
 	 */
 	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
-
-		if ((control_dev != 0)
-		 && (lun->mode_pages.index[i].page_flags &
-		     CTL_PAGE_FLAG_DISK_ONLY))
+		page_index = &lun->mode_pages.index[i];
+		if (lun->be_lun->lun_type == T_DIRECT &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
 			continue;
+		if (lun->be_lun->lun_type == T_PROCESSOR &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
+			continue;
+		if (lun->be_lun->lun_type == T_CDROM &&
+		    (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
+			continue;
 
-		if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
+		if ((page_index->page_code & SMPH_PC_MASK) !=
 		    (page_header->page_code & SMPH_PC_MASK))
 			continue;
 
@@ -6124,9 +6036,8 @@
 		 * If neither page has a subpage code, then we've got a
 		 * match.
 		 */
-		if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0)
+		if (((page_index->page_code & SMPH_SPF) == 0)
 		 && ((page_header->page_code & SMPH_SPF) == 0)) {
-			page_index = &lun->mode_pages.index[i];
 			page_len = page_header->page_length;
 			break;
 		}
@@ -6135,15 +6046,12 @@
 		 * If both pages have subpages, then the subpage numbers
 		 * have to match.
 		 */
-		if ((lun->mode_pages.index[i].page_code & SMPH_SPF)
+		if ((page_index->page_code & SMPH_SPF)
 		  && (page_header->page_code & SMPH_SPF)) {
 			struct scsi_mode_page_header_sp *sph;
 
 			sph = (struct scsi_mode_page_header_sp *)page_header;
-
-			if (lun->mode_pages.index[i].subpage ==
-			    sph->subpage) {
-				page_index = &lun->mode_pages.index[i];
+			if (page_index->subpage == sph->subpage) {
 				page_len = scsi_2btoul(sph->page_length);
 				break;
 			}
@@ -6154,7 +6062,7 @@
 	 * If we couldn't find the page, or if we don't have a mode select
 	 * handler for it, send back an error to the user.
 	 */
-	if ((page_index == NULL)
+	if ((i >= CTL_NUM_MODE_PAGES)
 	 || (page_index->select_handler == NULL)) {
 		ctl_set_invalid_field(ctsio,
 				      /*sks_valid*/ 1,
@@ -6180,11 +6088,7 @@
 	 * the mode page header, or if they didn't specify enough data in
 	 * the CDB to avoid truncating this page, kick out the request.
 	 */
-	if ((page_len != (page_index->page_len - page_len_offset -
-			  page_len_size))
-	 || (*len_left < page_index->page_len)) {
-
-
+	if (page_len != page_index->page_len - page_len_offset - page_len_size) {
 		ctl_set_invalid_field(ctsio,
 				      /*sks_valid*/ 1,
 				      /*command*/ 0,
@@ -6195,6 +6099,12 @@
 		ctl_done((union ctl_io *)ctsio);
 		return (CTL_RETVAL_COMPLETE);
 	}
+	if (*len_left < page_index->page_len) {
+		free(ctsio->kern_data_ptr, M_CTL);
+		ctl_set_param_len_error(ctsio);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
 
 	/*
 	 * Run through the mode page, checking to make sure that the bits
@@ -6282,31 +6192,12 @@
 int
 ctl_mode_select(struct ctl_scsiio *ctsio)
 {
-	int param_len, pf, sp;
-	int header_size, bd_len;
-	int len_left, len_used;
-	struct ctl_page_index *page_index;
-	struct ctl_lun *lun;
-	int control_dev, page_len;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	union ctl_modepage_info *modepage_info;
-	int retval;
+	int bd_len, i, header_size, param_len, pf, rtd, sp;
+	uint32_t initidx;
 
-	pf = 0;
-	sp = 0;
-	page_len = 0;
-	len_used = 0;
-	len_left = 0;
-	retval = 0;
-	bd_len = 0;
-	page_index = NULL;
-
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
-	if (lun->be_lun->lun_type != T_DIRECT)
-		control_dev = 1;
-	else
-		control_dev = 0;
-
+	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
 	switch (ctsio->cdb[0]) {
 	case MODE_SELECT_6: {
 		struct scsi_mode_select_6 *cdb;
@@ -6314,8 +6205,8 @@
 		cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
 
 		pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
+		rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
 		sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
-
 		param_len = cdb->length;
 		header_size = sizeof(struct scsi_mode_header_6);
 		break;
@@ -6326,8 +6217,8 @@
 		cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
 
 		pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
+		rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
 		sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
-
 		param_len = scsi_2btoul(cdb->length);
 		header_size = sizeof(struct scsi_mode_header_10);
 		break;
@@ -6336,9 +6227,32 @@
 		ctl_set_invalid_opcode(ctsio);
 		ctl_done((union ctl_io *)ctsio);
 		return (CTL_RETVAL_COMPLETE);
-		break; /* NOTREACHED */
 	}
 
+	if (rtd) {
+		if (param_len != 0) {
+			ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
+			    /*command*/ 1, /*field*/ 0,
+			    /*bit_valid*/ 0, /*bit*/ 0);
+			ctl_done((union ctl_io *)ctsio);
+			return (CTL_RETVAL_COMPLETE);
+		}
+
+		/* Revert to defaults. */
+		ctl_init_page_index(lun);
+		mtx_lock(&lun->lun_lock);
+		ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
+		mtx_unlock(&lun->lun_lock);
+		for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+			ctl_isc_announce_mode(lun, -1,
+			    lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
+			    lun->mode_pages.index[i].subpage);
+		}
+		ctl_set_success(ctsio);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
 	/*
 	 * From SPC-3:
 	 * "A parameter list length of zero indicates that the Data-Out Buffer
@@ -6370,7 +6284,6 @@
 		ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
 		ctsio->kern_data_len = param_len;
 		ctsio->kern_total_len = param_len;
-		ctsio->kern_data_resid = 0;
 		ctsio->kern_rel_offset = 0;
 		ctsio->kern_sg_entries = 0;
 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -6396,8 +6309,7 @@
 		break;
 	}
 	default:
-		panic("Invalid CDB type %#x", ctsio->cdb[0]);
-		break;
+		panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]);
 	}
 
 	if (param_len < (header_size + bd_len)) {
@@ -6418,15 +6330,10 @@
 
 	modepage_info = (union ctl_modepage_info *)
 		ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
-
 	memset(modepage_info, 0, sizeof(*modepage_info));
+	modepage_info->header.len_left = param_len - header_size - bd_len;
+	modepage_info->header.len_used = header_size + bd_len;
 
-	len_left = param_len - header_size - bd_len;
-	len_used = header_size + bd_len;
-
-	modepage_info->header.len_left = len_left;
-	modepage_info->header.len_used = len_used;
-
 	return (ctl_do_mode_select((union ctl_io *)ctsio));
 }
 
@@ -6433,27 +6340,18 @@
 int
 ctl_mode_sense(struct ctl_scsiio *ctsio)
 {
-	struct ctl_lun *lun;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	int pc, page_code, dbd, llba, subpage;
 	int alloc_len, page_len, header_len, total_len;
 	struct scsi_mode_block_descr *block_desc;
 	struct ctl_page_index *page_index;
-	int control_dev;
 
 	dbd = 0;
 	llba = 0;
 	block_desc = NULL;
-	page_index = NULL;
 
 	CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
-	if (lun->be_lun->lun_type != T_DIRECT)
-		control_dev = 1;
-	else
-		control_dev = 0;
-
 	switch (ctsio->cdb[0]) {
 	case MODE_SENSE_6: {
 		struct scsi_mode_sense_6 *cdb;
@@ -6505,7 +6403,7 @@
 	 */
 	switch (page_code) {
 	case SMS_ALL_PAGES_PAGE: {
-		int i;
+		u_int i;
 
 		page_len = 0;
 
@@ -6526,59 +6424,71 @@
 		}
 
 		for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
-			if ((control_dev != 0)
-			 && (lun->mode_pages.index[i].page_flags &
-			     CTL_PAGE_FLAG_DISK_ONLY))
+			page_index = &lun->mode_pages.index[i];
+
+			/* Make sure the page is supported for this dev type */
+			if (lun->be_lun->lun_type == T_DIRECT &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
 				continue;
+			if (lun->be_lun->lun_type == T_PROCESSOR &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
+				continue;
+			if (lun->be_lun->lun_type == T_CDROM &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
+				continue;
 
 			/*
 			 * We don't use this subpage if the user didn't
 			 * request all subpages.
 			 */
-			if ((lun->mode_pages.index[i].subpage != 0)
+			if ((page_index->subpage != 0)
 			 && (subpage == SMS_SUBPAGE_PAGE_0))
 				continue;
 
 #if 0
 			printf("found page %#x len %d\n",
-			       lun->mode_pages.index[i].page_code &
-			       SMPH_PC_MASK,
-			       lun->mode_pages.index[i].page_len);
+			       page_index->page_code & SMPH_PC_MASK,
+			       page_index->page_len);
 #endif
-			page_len += lun->mode_pages.index[i].page_len;
+			page_len += page_index->page_len;
 		}
 		break;
 	}
 	default: {
-		int i;
+		u_int i;
 
 		page_len = 0;
 
 		for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+			page_index = &lun->mode_pages.index[i];
+
+			/* Make sure the page is supported for this dev type */
+			if (lun->be_lun->lun_type == T_DIRECT &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
+				continue;
+			if (lun->be_lun->lun_type == T_PROCESSOR &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
+				continue;
+			if (lun->be_lun->lun_type == T_CDROM &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
+				continue;
+
 			/* Look for the right page code */
-			if ((lun->mode_pages.index[i].page_code &
-			     SMPH_PC_MASK) != page_code)
+			if ((page_index->page_code & SMPH_PC_MASK) != page_code)
 				continue;
 
 			/* Look for the right subpage or the subpage wildcard*/
-			if ((lun->mode_pages.index[i].subpage != subpage)
+			if ((page_index->subpage != subpage)
 			 && (subpage != SMS_SUBPAGE_ALL))
 				continue;
 
-			/* Make sure the page is supported for this dev type */
-			if ((control_dev != 0)
-			 && (lun->mode_pages.index[i].page_flags &
-			     CTL_PAGE_FLAG_DISK_ONLY))
-				continue;
-
 #if 0
 			printf("found page %#x len %d\n",
-			       lun->mode_pages.index[i].page_code &
-			       SMPH_PC_MASK,
-			       lun->mode_pages.index[i].page_len);
+			       page_index->page_code & SMPH_PC_MASK,
+			       page_index->page_len);
 #endif
 
-			page_len += lun->mode_pages.index[i].page_len;
+			page_len += page_index->page_len;
 		}
 
 		if (page_len == 0) {
@@ -6603,17 +6513,9 @@
 
 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
 	ctsio->kern_sg_entries = 0;
-	ctsio->kern_data_resid = 0;
 	ctsio->kern_rel_offset = 0;
-	if (total_len < alloc_len) {
-		ctsio->residual = alloc_len - total_len;
-		ctsio->kern_data_len = total_len;
-		ctsio->kern_total_len = total_len;
-	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
-	}
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
 	switch (ctsio->cdb[0]) {
 	case MODE_SENSE_6: {
@@ -6621,8 +6523,13 @@
 
 		header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
 
-		header->datalen = ctl_min(total_len - 1, 254);
-
+		header->datalen = MIN(total_len - 1, 254);
+		if (lun->be_lun->lun_type == T_DIRECT) {
+			header->dev_specific = 0x10; /* DPOFUA */
+			if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
+			    (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0)
+				header->dev_specific |= 0x80; /* WP */
+		}
 		if (dbd)
 			header->block_descr_len = 0;
 		else
@@ -6637,8 +6544,14 @@
 
 		header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
 
-		datalen = ctl_min(total_len - 2, 65533);
+		datalen = MIN(total_len - 2, 65533);
 		scsi_ulto2b(datalen, header->datalen);
+		if (lun->be_lun->lun_type == T_DIRECT) {
+			header->dev_specific = 0x10; /* DPOFUA */
+			if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
+			    (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0)
+				header->dev_specific |= 0x80; /* WP */
+		}
 		if (dbd)
 			scsi_ulto2b(0, header->block_descr_len);
 		else
@@ -6648,8 +6561,7 @@
 		break;
 	}
 	default:
-		panic("invalid CDB type %#x", ctsio->cdb[0]);
-		break; /* NOTREACHED */
+		panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]);
 	}
 
 	/*
@@ -6657,7 +6569,7 @@
 	 * descriptor.  Otherwise, just set it to 0.
 	 */
 	if (dbd == 0) {
-		if (control_dev != 0)
+		if (lun->be_lun->lun_type == T_DIRECT)
 			scsi_ulto3b(lun->be_lun->blocksize,
 				    block_desc->block_len);
 		else
@@ -6673,11 +6585,15 @@
 			struct ctl_page_index *page_index;
 
 			page_index = &lun->mode_pages.index[i];
-
-			if ((control_dev != 0)
-			 && (page_index->page_flags &
-			    CTL_PAGE_FLAG_DISK_ONLY))
+			if (lun->be_lun->lun_type == T_DIRECT &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
 				continue;
+			if (lun->be_lun->lun_type == T_PROCESSOR &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
+				continue;
+			if (lun->be_lun->lun_type == T_CDROM &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
+				continue;
 
 			/*
 			 * We don't use this subpage if the user didn't
@@ -6724,10 +6640,15 @@
 				continue;
 
 			/* Make sure the page is supported for this dev type */
-			if ((control_dev != 0)
-			 && (page_index->page_flags &
-			     CTL_PAGE_FLAG_DISK_ONLY))
+			if (lun->be_lun->lun_type == T_DIRECT &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
 				continue;
+			if (lun->be_lun->lun_type == T_PROCESSOR &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
+				continue;
+			if (lun->be_lun->lun_type == T_CDROM &&
+			    (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
+				continue;
 
 			/*
 			 * Call the handler, if it exists, to update the
@@ -6746,11 +6667,224 @@
 	}
 	}
 
-	ctsio->scsi_status = SCSI_STATUS_OK;
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
 
+int
+ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
+			       struct ctl_page_index *page_index,
+			       int pc)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_log_param_header *phdr;
+	uint8_t *data;
+	uint64_t val;
+
+	data = page_index->page_data;
+
+	if (lun->backend->lun_attr != NULL &&
+	    (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail"))
+	     != UINT64_MAX) {
+		phdr = (struct scsi_log_param_header *)data;
+		scsi_ulto2b(0x0001, phdr->param_code);
+		phdr->param_control = SLP_LBIN | SLP_LP;
+		phdr->param_len = 8;
+		data = (uint8_t *)(phdr + 1);
+		scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
+		data[4] = 0x02; /* per-pool */
+		data += phdr->param_len;
+	}
+
+	if (lun->backend->lun_attr != NULL &&
+	    (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused"))
+	     != UINT64_MAX) {
+		phdr = (struct scsi_log_param_header *)data;
+		scsi_ulto2b(0x0002, phdr->param_code);
+		phdr->param_control = SLP_LBIN | SLP_LP;
+		phdr->param_len = 8;
+		data = (uint8_t *)(phdr + 1);
+		scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
+		data[4] = 0x01; /* per-LUN */
+		data += phdr->param_len;
+	}
+
+	if (lun->backend->lun_attr != NULL &&
+	    (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail"))
+	     != UINT64_MAX) {
+		phdr = (struct scsi_log_param_header *)data;
+		scsi_ulto2b(0x00f1, phdr->param_code);
+		phdr->param_control = SLP_LBIN | SLP_LP;
+		phdr->param_len = 8;
+		data = (uint8_t *)(phdr + 1);
+		scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
+		data[4] = 0x02; /* per-pool */
+		data += phdr->param_len;
+	}
+
+	if (lun->backend->lun_attr != NULL &&
+	    (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused"))
+	     != UINT64_MAX) {
+		phdr = (struct scsi_log_param_header *)data;
+		scsi_ulto2b(0x00f2, phdr->param_code);
+		phdr->param_control = SLP_LBIN | SLP_LP;
+		phdr->param_len = 8;
+		data = (uint8_t *)(phdr + 1);
+		scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
+		data[4] = 0x02; /* per-pool */
+		data += phdr->param_len;
+	}
+
+	page_index->page_len = data - page_index->page_data;
+	return (0);
+}
+
+int
+ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
+			       struct ctl_page_index *page_index,
+			       int pc)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct stat_page *data;
+	struct bintime *t;
+
+	data = (struct stat_page *)page_index->page_data;
+
+	scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
+	data->sap.hdr.param_control = SLP_LBIN;
+	data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
+	    sizeof(struct scsi_log_param_header);
+	scsi_u64to8b(lun->stats.operations[CTL_STATS_READ],
+	    data->sap.read_num);
+	scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE],
+	    data->sap.write_num);
+	if (lun->be_lun->blocksize > 0) {
+		scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] /
+		    lun->be_lun->blocksize, data->sap.recvieved_lba);
+		scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] /
+		    lun->be_lun->blocksize, data->sap.transmitted_lba);
+	}
+	t = &lun->stats.time[CTL_STATS_READ];
+	scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000),
+	    data->sap.read_int);
+	t = &lun->stats.time[CTL_STATS_WRITE];
+	scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000),
+	    data->sap.write_int);
+	scsi_u64to8b(0, data->sap.weighted_num);
+	scsi_u64to8b(0, data->sap.weighted_int);
+	scsi_ulto2b(SLP_IT, data->it.hdr.param_code);
+	data->it.hdr.param_control = SLP_LBIN;
+	data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) -
+	    sizeof(struct scsi_log_param_header);
+#ifdef CTL_TIME_IO
+	scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int);
+#endif
+	scsi_ulto2b(SLP_TI, data->ti.hdr.param_code);
+	data->it.hdr.param_control = SLP_LBIN;
+	data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) -
+	    sizeof(struct scsi_log_param_header);
+	scsi_ulto4b(3, data->ti.exponent);
+	scsi_ulto4b(1, data->ti.integer);
+	return (0);
+}
+
+int
+ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
+			       struct ctl_page_index *page_index,
+			       int pc)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_log_informational_exceptions *data;
+
+	data = (struct scsi_log_informational_exceptions *)page_index->page_data;
+
+	scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code);
+	data->hdr.param_control = SLP_LBIN;
+	data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) -
+	    sizeof(struct scsi_log_param_header);
+	data->ie_asc = lun->ie_asc;
+	data->ie_ascq = lun->ie_ascq;
+	data->temperature = 0xff;
+	return (0);
+}
+
+int
+ctl_log_sense(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	int i, pc, page_code, subpage;
+	int alloc_len, total_len;
+	struct ctl_page_index *page_index;
+	struct scsi_log_sense *cdb;
+	struct scsi_log_header *header;
+
+	CTL_DEBUG_PRINT(("ctl_log_sense\n"));
+
+	cdb = (struct scsi_log_sense *)ctsio->cdb;
+	pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
+	page_code = cdb->page & SLS_PAGE_CODE;
+	subpage = cdb->subpage;
+	alloc_len = scsi_2btoul(cdb->length);
+
+	page_index = NULL;
+	for (i = 0; i < CTL_NUM_LOG_PAGES; i++) {
+		page_index = &lun->log_pages.index[i];
+
+		/* Look for the right page code */
+		if ((page_index->page_code & SL_PAGE_CODE) != page_code)
+			continue;
+
+		/* Look for the right subpage or the subpage wildcard*/
+		if (page_index->subpage != subpage)
+			continue;
+
+		break;
+	}
+	if (i >= CTL_NUM_LOG_PAGES) {
+		ctl_set_invalid_field(ctsio,
+				      /*sks_valid*/ 1,
+				      /*command*/ 1,
+				      /*field*/ 2,
+				      /*bit_valid*/ 0,
+				      /*bit*/ 0);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	total_len = sizeof(struct scsi_log_header) + page_index->page_len;
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	header = (struct scsi_log_header *)ctsio->kern_data_ptr;
+	header->page = page_index->page_code;
+	if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING)
+		header->page |= SL_DS;
+	if (page_index->subpage) {
+		header->page |= SL_SPF;
+		header->subpage = page_index->subpage;
+	}
+	scsi_ulto2b(page_index->page_len, header->datalen);
+
+	/*
+	 * Call the handler, if it exists, to update the
+	 * page to the latest values.
+	 */
+	if (page_index->sense_handler != NULL)
+		page_index->sense_handler(ctsio, page_index, pc);
+
+	memcpy(header + 1, page_index->page_data, page_index->page_len);
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
@@ -6757,9 +6891,9 @@
 int
 ctl_read_capacity(struct ctl_scsiio *ctsio)
 {
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_read_capacity *cdb;
 	struct scsi_read_capacity_data *data;
-	struct ctl_lun *lun;
 	uint32_t lba;
 
 	CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
@@ -6779,14 +6913,10 @@
 		return (CTL_RETVAL_COMPLETE);
 	}
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 	ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
 	data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
-	ctsio->residual = 0;
 	ctsio->kern_data_len = sizeof(*data);
 	ctsio->kern_total_len = sizeof(*data);
-	ctsio->kern_data_resid = 0;
 	ctsio->kern_rel_offset = 0;
 	ctsio->kern_sg_entries = 0;
 
@@ -6805,20 +6935,19 @@
 	 */
 	scsi_ulto4b(lun->be_lun->blocksize, data->length);
 
-	ctsio->scsi_status = SCSI_STATUS_OK;
-
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
-static int
+int
 ctl_read_capacity_16(struct ctl_scsiio *ctsio)
 {
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_read_capacity_16 *cdb;
 	struct scsi_read_capacity_data_long *data;
-	struct ctl_lun *lun;
 	uint64_t lba;
 	uint32_t alloc_len;
 
@@ -6841,237 +6970,567 @@
 		return (CTL_RETVAL_COMPLETE);
 	}
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 	ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
 	data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
-
-	if (sizeof(*data) < alloc_len) {
-		ctsio->residual = alloc_len - sizeof(*data);
-		ctsio->kern_data_len = sizeof(*data);
-		ctsio->kern_total_len = sizeof(*data);
-	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
-	}
-	ctsio->kern_data_resid = 0;
 	ctsio->kern_rel_offset = 0;
 	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(sizeof(*data), alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
 	scsi_u64to8b(lun->be_lun->maxlba, data->addr);
 	/* XXX KDM this may not be 512 bytes... */
 	scsi_ulto4b(lun->be_lun->blocksize, data->length);
+	data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
+	scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp);
+	if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
+		data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
 
-	ctsio->scsi_status = SCSI_STATUS_OK;
-
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
 int
-ctl_service_action_in(struct ctl_scsiio *ctsio)
+ctl_get_lba_status(struct ctl_scsiio *ctsio)
 {
-	struct scsi_service_action_in *cdb;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_get_lba_status *cdb;
+	struct scsi_get_lba_status_data *data;
+	struct ctl_lba_len_flags *lbalen;
+	uint64_t lba;
+	uint32_t alloc_len, total_len;
 	int retval;
 
-	CTL_DEBUG_PRINT(("ctl_service_action_in\n"));
+	CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
 
-	cdb = (struct scsi_service_action_in *)ctsio->cdb;
+	cdb = (struct scsi_get_lba_status *)ctsio->cdb;
+	lba = scsi_8btou64(cdb->addr);
+	alloc_len = scsi_4btoul(cdb->alloc_len);
 
-	retval = CTL_RETVAL_COMPLETE;
-
-	switch (cdb->service_action) {
-	case SRC16_SERVICE_ACTION:
-		retval = ctl_read_capacity_16(ctsio);
-		break;
-	default:
-		ctl_set_invalid_field(/*ctsio*/ ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 4);
+	if (lba > lun->be_lun->maxlba) {
+		ctl_set_lba_out_of_range(ctsio, lba);
 		ctl_done((union ctl_io *)ctsio);
-		break;
+		return (CTL_RETVAL_COMPLETE);
 	}
 
+	total_len = sizeof(*data) + sizeof(data->descr[0]);
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	/* Fill dummy data in case backend can't tell anything. */
+	scsi_ulto4b(4 + sizeof(data->descr[0]), data->length);
+	scsi_u64to8b(lba, data->descr[0].addr);
+	scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba),
+	    data->descr[0].length);
+	data->descr[0].status = 0; /* Mapped or unknown. */
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+
+	lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	lbalen->lba = lba;
+	lbalen->len = total_len;
+	lbalen->flags = 0;
+	retval = lun->backend->config_read((union ctl_io *)ctsio);
 	return (retval);
 }
 
 int
-ctl_maintenance_in(struct ctl_scsiio *ctsio)
+ctl_read_defect(struct ctl_scsiio *ctsio)
 {
+	struct scsi_read_defect_data_10 *ccb10;
+	struct scsi_read_defect_data_12 *ccb12;
+	struct scsi_read_defect_data_hdr_10 *data10;
+	struct scsi_read_defect_data_hdr_12 *data12;
+	uint32_t alloc_len, data_len;
+	uint8_t format;
+
+	CTL_DEBUG_PRINT(("ctl_read_defect\n"));
+
+	if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
+		ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb;
+		format = ccb10->format;
+		alloc_len = scsi_2btoul(ccb10->alloc_length);
+		data_len = sizeof(*data10);
+	} else {
+		ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb;
+		format = ccb12->format;
+		alloc_len = scsi_4btoul(ccb12->alloc_length);
+		data_len = sizeof(*data12);
+	}
+	if (alloc_len == 0) {
+		ctl_set_success(ctsio);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
+		data10 = (struct scsi_read_defect_data_hdr_10 *)
+		    ctsio->kern_data_ptr;
+		data10->format = format;
+		scsi_ulto2b(0, data10->length);
+	} else {
+		data12 = (struct scsi_read_defect_data_hdr_12 *)
+		    ctsio->kern_data_ptr;
+		data12->format = format;
+		scsi_ulto2b(0, data12->generation);
+		scsi_ulto4b(0, data12->length);
+	}
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
+{
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_maintenance_in *cdb;
 	int retval;
-	int alloc_len, total_len = 0;
-	int num_target_port_groups;
-	struct ctl_lun *lun;
-	struct ctl_softc *softc;
+	int alloc_len, ext, total_len = 0, g, pc, pg, ts, os;
+	int num_ha_groups, num_target_ports, shared_group;
+	struct ctl_port *port;
 	struct scsi_target_group_data *rtg_ptr;
-	struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2;
-	struct scsi_target_port_descriptor  *tp_desc_ptr1_1, *tp_desc_ptr1_2,
-	                                    *tp_desc_ptr2_1, *tp_desc_ptr2_2;
+	struct scsi_target_group_data_extended *rtg_ext_ptr;
+	struct scsi_target_port_group_descriptor *tpg_desc;
 
-	CTL_DEBUG_PRINT(("ctl_maintenance_in\n"));
+	CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
 
 	cdb = (struct scsi_maintenance_in *)ctsio->cdb;
-	softc = control_softc;
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 	retval = CTL_RETVAL_COMPLETE;
-	mtx_lock(&softc->ctl_lock);
 
-	if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) {
+	switch (cdb->byte2 & STG_PDF_MASK) {
+	case STG_PDF_LENGTH:
+		ext = 0;
+		break;
+	case STG_PDF_EXTENDED:
+		ext = 1;
+		break;
+	default:
 		ctl_set_invalid_field(/*ctsio*/ ctsio,
 				      /*sks_valid*/ 1,
 				      /*command*/ 1,
-				      /*field*/ 1,
+				      /*field*/ 2,
 				      /*bit_valid*/ 1,
-				      /*bit*/ 4);
+				      /*bit*/ 5);
 		ctl_done((union ctl_io *)ctsio);
 		return(retval);
 	}
 
-	if (ctl_is_single)
-        	num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1;
+	num_target_ports = 0;
+	shared_group = (softc->is_single != 0);
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(port, &softc->port_list, links) {
+		if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+			continue;
+		if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+			continue;
+		num_target_ports++;
+		if (port->status & CTL_PORT_STATUS_HA_SHARED)
+			shared_group = 1;
+	}
+	mtx_unlock(&softc->ctl_lock);
+	num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES;
+
+	if (ext)
+		total_len = sizeof(struct scsi_target_group_data_extended);
 	else
-        	num_target_port_groups = NUM_TARGET_PORT_GROUPS;
+		total_len = sizeof(struct scsi_target_group_data);
+	total_len += sizeof(struct scsi_target_port_group_descriptor) *
+		(shared_group + num_ha_groups) +
+	    sizeof(struct scsi_target_port_descriptor) * num_target_ports;
 
-	total_len = sizeof(struct scsi_target_group_data) +
-		sizeof(struct scsi_target_port_group_descriptor) *
-		num_target_port_groups +
-		sizeof(struct scsi_target_port_descriptor) *
-		NUM_PORTS_PER_GRP * num_target_port_groups;
-
 	alloc_len = scsi_4btoul(cdb->length);
 
 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
 	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
-	if (total_len < alloc_len) {
-		ctsio->residual = alloc_len - total_len;
-		ctsio->kern_data_len = total_len;
-		ctsio->kern_total_len = total_len;
+	if (ext) {
+		rtg_ext_ptr = (struct scsi_target_group_data_extended *)
+		    ctsio->kern_data_ptr;
+		scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
+		rtg_ext_ptr->format_type = 0x10;
+		rtg_ext_ptr->implicit_transition_time = 0;
+		tpg_desc = &rtg_ext_ptr->groups[0];
 	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
+		rtg_ptr = (struct scsi_target_group_data *)
+		    ctsio->kern_data_ptr;
+		scsi_ulto4b(total_len - 4, rtg_ptr->length);
+		tpg_desc = &rtg_ptr->groups[0];
 	}
-	ctsio->kern_data_resid = 0;
-	ctsio->kern_rel_offset = 0;
 
-	rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr;
+	mtx_lock(&softc->ctl_lock);
+	pg = softc->port_min / softc->port_cnt;
+	if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) {
+		/* Some shelf is known to be primary. */
+		if (softc->ha_link == CTL_HA_LINK_OFFLINE)
+			os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
+		else if (softc->ha_link == CTL_HA_LINK_UNKNOWN)
+			os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
+		else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY)
+			os = TPG_ASYMMETRIC_ACCESS_STANDBY;
+		else
+			os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+		if (lun->flags & CTL_LUN_PRIMARY_SC) {
+			ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
+		} else {
+			ts = os;
+			os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
+		}
+	} else {
+		/* No known primary shelf. */
+		if (softc->ha_link == CTL_HA_LINK_OFFLINE) {
+			ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
+			os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
+		} else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) {
+			ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
+			os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
+		} else {
+			ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
+		}
+	}
+	if (shared_group) {
+		tpg_desc->pref_state = ts;
+		tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
+		    TPG_U_SUP | TPG_T_SUP;
+		scsi_ulto2b(1, tpg_desc->target_port_group);
+		tpg_desc->status = TPG_IMPLICIT;
+		pc = 0;
+		STAILQ_FOREACH(port, &softc->port_list, links) {
+			if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+				continue;
+			if (!softc->is_single &&
+			    (port->status & CTL_PORT_STATUS_HA_SHARED) == 0)
+				continue;
+			if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+				continue;
+			scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
+			    relative_target_port_identifier);
+			pc++;
+		}
+		tpg_desc->target_port_count = pc;
+		tpg_desc = (struct scsi_target_port_group_descriptor *)
+		    &tpg_desc->descriptors[pc];
+	}
+	for (g = 0; g < num_ha_groups; g++) {
+		tpg_desc->pref_state = (g == pg) ? ts : os;
+		tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
+		    TPG_U_SUP | TPG_T_SUP;
+		scsi_ulto2b(2 + g, tpg_desc->target_port_group);
+		tpg_desc->status = TPG_IMPLICIT;
+		pc = 0;
+		STAILQ_FOREACH(port, &softc->port_list, links) {
+			if (port->targ_port < g * softc->port_cnt ||
+			    port->targ_port >= (g + 1) * softc->port_cnt)
+				continue;
+			if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+				continue;
+			if (port->status & CTL_PORT_STATUS_HA_SHARED)
+				continue;
+			if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+				continue;
+			scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
+			    relative_target_port_identifier);
+			pc++;
+		}
+		tpg_desc->target_port_count = pc;
+		tpg_desc = (struct scsi_target_port_group_descriptor *)
+		    &tpg_desc->descriptors[pc];
+	}
+	mtx_unlock(&softc->ctl_lock);
 
-	tpg_desc_ptr1 = &rtg_ptr->groups[0];
-	tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0];
-	tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *)
-	        &tp_desc_ptr1_1->desc_list[0];
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return(retval);
+}
 
+int
+ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_report_supported_opcodes *cdb;
+	const struct ctl_cmd_entry *entry, *sentry;
+	struct scsi_report_supported_opcodes_all *all;
+	struct scsi_report_supported_opcodes_descr *descr;
+	struct scsi_report_supported_opcodes_one *one;
+	int retval;
+	int alloc_len, total_len;
+	int opcode, service_action, i, j, num;
 
+	CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
 
-	if (ctl_is_single == 0) {
-		tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *)
-	                &tp_desc_ptr1_2->desc_list[0];
-		tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0];
-		tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *)
-	        	&tp_desc_ptr2_1->desc_list[0];
-        } else {
-		tpg_desc_ptr2 = NULL;
-		tp_desc_ptr2_1 = NULL;
-		tp_desc_ptr2_2 = NULL;
-	}
+	cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
+	retval = CTL_RETVAL_COMPLETE;
 
-	scsi_ulto4b(total_len - 4, rtg_ptr->length);
-	if (ctl_is_single == 0) {
-        	if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
-			if (lun->flags & CTL_LUN_PRIMARY_SC) {
-				tpg_desc_ptr1->pref_state = TPG_PRIMARY;
-				tpg_desc_ptr2->pref_state =
-					TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+	opcode = cdb->requested_opcode;
+	service_action = scsi_2btoul(cdb->requested_service_action);
+	switch (cdb->options & RSO_OPTIONS_MASK) {
+	case RSO_OPTIONS_ALL:
+		num = 0;
+		for (i = 0; i < 256; i++) {
+			entry = &ctl_cmd_table[i];
+			if (entry->flags & CTL_CMD_FLAG_SA5) {
+				for (j = 0; j < 32; j++) {
+					sentry = &((const struct ctl_cmd_entry *)
+					    entry->execute)[j];
+					if (ctl_cmd_applicable(
+					    lun->be_lun->lun_type, sentry))
+						num++;
+				}
 			} else {
-				tpg_desc_ptr1->pref_state =
-					TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
-				tpg_desc_ptr2->pref_state = TPG_PRIMARY;
+				if (ctl_cmd_applicable(lun->be_lun->lun_type,
+				    entry))
+					num++;
 			}
-		} else {
-			if (lun->flags & CTL_LUN_PRIMARY_SC) {
-				tpg_desc_ptr1->pref_state =
-					TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
-				tpg_desc_ptr2->pref_state = TPG_PRIMARY;
-			} else {
-				tpg_desc_ptr1->pref_state = TPG_PRIMARY;
-				tpg_desc_ptr2->pref_state =
-					TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
-			}
 		}
-	} else {
-		tpg_desc_ptr1->pref_state = TPG_PRIMARY;
+		total_len = sizeof(struct scsi_report_supported_opcodes_all) +
+		    num * sizeof(struct scsi_report_supported_opcodes_descr);
+		break;
+	case RSO_OPTIONS_OC:
+		if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
+			ctl_set_invalid_field(/*ctsio*/ ctsio,
+					      /*sks_valid*/ 1,
+					      /*command*/ 1,
+					      /*field*/ 2,
+					      /*bit_valid*/ 1,
+					      /*bit*/ 2);
+			ctl_done((union ctl_io *)ctsio);
+			return (CTL_RETVAL_COMPLETE);
+		}
+		total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
+		break;
+	case RSO_OPTIONS_OC_SA:
+		if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
+		    service_action >= 32) {
+			ctl_set_invalid_field(/*ctsio*/ ctsio,
+					      /*sks_valid*/ 1,
+					      /*command*/ 1,
+					      /*field*/ 2,
+					      /*bit_valid*/ 1,
+					      /*bit*/ 2);
+			ctl_done((union ctl_io *)ctsio);
+			return (CTL_RETVAL_COMPLETE);
+		}
+		/* FALLTHROUGH */
+	case RSO_OPTIONS_OC_ASA:
+		total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
+		break;
+	default:
+		ctl_set_invalid_field(/*ctsio*/ ctsio,
+				      /*sks_valid*/ 1,
+				      /*command*/ 1,
+				      /*field*/ 2,
+				      /*bit_valid*/ 1,
+				      /*bit*/ 2);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
 	}
-	tpg_desc_ptr1->support = 0;
-	tpg_desc_ptr1->target_port_group[1] = 1;
-	tpg_desc_ptr1->status = TPG_IMPLICIT;
-	tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP;
 
-	if (ctl_is_single == 0) {
-		tpg_desc_ptr2->support = 0;
-		tpg_desc_ptr2->target_port_group[1] = 2;
-		tpg_desc_ptr2->status = TPG_IMPLICIT;
-		tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP;
+	alloc_len = scsi_4btoul(cdb->length);
 
-		tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
-		tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
-		tp_desc_ptr2_1->relative_target_port_identifier[1] = 9;
-		tp_desc_ptr2_2->relative_target_port_identifier[1] = 10;
-	} else {
-        	if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
-			tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
-			tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
-		} else {
-			tp_desc_ptr1_1->relative_target_port_identifier[1] = 9;
-			tp_desc_ptr1_2->relative_target_port_identifier[1] = 10;
+	switch (cdb->options & RSO_OPTIONS_MASK) {
+	case RSO_OPTIONS_ALL:
+		all = (struct scsi_report_supported_opcodes_all *)
+		    ctsio->kern_data_ptr;
+		num = 0;
+		for (i = 0; i < 256; i++) {
+			entry = &ctl_cmd_table[i];
+			if (entry->flags & CTL_CMD_FLAG_SA5) {
+				for (j = 0; j < 32; j++) {
+					sentry = &((const struct ctl_cmd_entry *)
+					    entry->execute)[j];
+					if (!ctl_cmd_applicable(
+					    lun->be_lun->lun_type, sentry))
+						continue;
+					descr = &all->descr[num++];
+					descr->opcode = i;
+					scsi_ulto2b(j, descr->service_action);
+					descr->flags = RSO_SERVACTV;
+					scsi_ulto2b(sentry->length,
+					    descr->cdb_length);
+				}
+			} else {
+				if (!ctl_cmd_applicable(lun->be_lun->lun_type,
+				    entry))
+					continue;
+				descr = &all->descr[num++];
+				descr->opcode = i;
+				scsi_ulto2b(0, descr->service_action);
+				descr->flags = 0;
+				scsi_ulto2b(entry->length, descr->cdb_length);
+			}
 		}
+		scsi_ulto4b(
+		    num * sizeof(struct scsi_report_supported_opcodes_descr),
+		    all->length);
+		break;
+	case RSO_OPTIONS_OC:
+		one = (struct scsi_report_supported_opcodes_one *)
+		    ctsio->kern_data_ptr;
+		entry = &ctl_cmd_table[opcode];
+		goto fill_one;
+	case RSO_OPTIONS_OC_SA:
+		one = (struct scsi_report_supported_opcodes_one *)
+		    ctsio->kern_data_ptr;
+		entry = &ctl_cmd_table[opcode];
+		entry = &((const struct ctl_cmd_entry *)
+		    entry->execute)[service_action];
+fill_one:
+		if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
+			one->support = 3;
+			scsi_ulto2b(entry->length, one->cdb_length);
+			one->cdb_usage[0] = opcode;
+			memcpy(&one->cdb_usage[1], entry->usage,
+			    entry->length - 1);
+		} else
+			one->support = 1;
+		break;
+	case RSO_OPTIONS_OC_ASA:
+		one = (struct scsi_report_supported_opcodes_one *)
+		    ctsio->kern_data_ptr;
+		entry = &ctl_cmd_table[opcode];
+		if (entry->flags & CTL_CMD_FLAG_SA5) {
+			entry = &((const struct ctl_cmd_entry *)
+			    entry->execute)[service_action];
+		} else if (service_action != 0) {
+			one->support = 1;
+			break;
+		}
+		goto fill_one;
 	}
 
-	mtx_unlock(&softc->ctl_lock);
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return(retval);
+}
 
+int
+ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
+{
+	struct scsi_report_supported_tmf *cdb;
+	struct scsi_report_supported_tmf_ext_data *data;
+	int retval;
+	int alloc_len, total_len;
+
+	CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
+
+	cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
+
+	retval = CTL_RETVAL_COMPLETE;
+
+	if (cdb->options & RST_REPD)
+		total_len = sizeof(struct scsi_report_supported_tmf_ext_data);
+	else
+		total_len = sizeof(struct scsi_report_supported_tmf_data);
+	alloc_len = scsi_4btoul(cdb->length);
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr;
+	data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS |
+	    RST_TRS;
+	data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS;
+	data->length = total_len - 4;
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (retval);
+}
 
-	CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
-			 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
-			 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
-			 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
-			 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
+int
+ctl_report_timestamp(struct ctl_scsiio *ctsio)
+{
+	struct scsi_report_timestamp *cdb;
+	struct scsi_report_timestamp_data *data;
+	struct timeval tv;
+	int64_t timestamp;
+	int retval;
+	int alloc_len, total_len;
 
+	CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
+
+	cdb = (struct scsi_report_timestamp *)ctsio->cdb;
+
+	retval = CTL_RETVAL_COMPLETE;
+
+	total_len = sizeof(struct scsi_report_timestamp_data);
+	alloc_len = scsi_4btoul(cdb->length);
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
+	scsi_ulto2b(sizeof(*data) - 2, data->length);
+	data->origin = RTS_ORIG_OUTSIDE;
+	getmicrotime(&tv);
+	timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
+	scsi_ulto4b(timestamp >> 16, data->timestamp);
+	scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
-	return(retval);
+	return (retval);
 }
 
 int
 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
 {
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_per_res_in *cdb;
 	int alloc_len, total_len = 0;
 	/* struct scsi_per_res_in_rsrv in_data; */
-	struct ctl_lun *lun;
-	struct ctl_softc *softc;
+	uint64_t key;
 
 	CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
 
-	softc = control_softc;
-
 	cdb = (struct scsi_per_res_in *)ctsio->cdb;
 
 	alloc_len = scsi_2btoul(cdb->length);
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 retry:
-	mtx_lock(&softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	switch (cdb->action) {
 	case SPRI_RK: /* read keys */
 		total_len = sizeof(struct scsi_per_res_in_keys) +
@@ -7088,37 +7547,22 @@
 		total_len = sizeof(struct scsi_per_res_cap);
 		break;
 	case SPRI_RS: /* read full status */
+		total_len = sizeof(struct scsi_per_res_in_header) +
+		    (sizeof(struct scsi_per_res_in_full_desc) + 256) *
+		    lun->pr_key_count;
+		break;
 	default:
-		mtx_unlock(&softc->ctl_lock);
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 0);
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-		break; /* NOTREACHED */
+		panic("%s: Invalid PR type %#x", __func__, cdb->action);
 	}
-	mtx_unlock(&softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
-	if (total_len < alloc_len) {
-		ctsio->residual = alloc_len - total_len;
-		ctsio->kern_data_len = total_len;
-		ctsio->kern_total_len = total_len;
-	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
-	}
-
-	ctsio->kern_data_resid = 0;
 	ctsio->kern_rel_offset = 0;
 	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
-	mtx_lock(&softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	switch (cdb->action) {
 	case SPRI_RK: { // read keys
         struct scsi_per_res_in_keys *res_keys;
@@ -7136,7 +7580,7 @@
 		if (total_len != (sizeof(struct scsi_per_res_in_keys) +
 		    (lun->pr_key_count *
 		     sizeof(struct scsi_per_res_key)))){
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 			free(ctsio->kern_data_ptr, M_CTL);
 			printf("%s: reservation length changed, retrying\n",
 			       __func__);
@@ -7143,13 +7587,13 @@
 			goto retry;
 		}
 
-		scsi_ulto4b(lun->PRGeneration, res_keys->header.generation);
+		scsi_ulto4b(lun->pr_generation, res_keys->header.generation);
 
 		scsi_ulto4b(sizeof(struct scsi_per_res_key) *
 			     lun->pr_key_count, res_keys->header.length);
 
-		for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
-			if (!lun->per_res[i].registered)
+		for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) {
+			if ((key = ctl_get_prkey(lun, i)) == 0)
 				continue;
 
 			/*
@@ -7160,25 +7604,10 @@
 			 * sync), we've got a problem.
 			 */
 			if (key_count >= lun->pr_key_count) {
-#ifdef NEEDTOPORT
-				csevent_log(CSC_CTL | CSC_SHELF_SW |
-					    CTL_PR_ERROR,
-					    csevent_LogType_Fault,
-					    csevent_AlertLevel_Yellow,
-					    csevent_FRU_ShelfController,
-					    csevent_FRU_Firmware,
-				        csevent_FRU_Unknown,
-					    "registered keys %d >= key "
-					    "count %d", key_count,
-					    lun->pr_key_count);
-#endif
 				key_count++;
 				continue;
 			}
-			memcpy(res_keys->keys[key_count].key,
-			       lun->per_res[i].res_key.key,
-			       ctl_min(sizeof(res_keys->keys[key_count].key),
-			       sizeof(lun->per_res[i].res_key)));
+			scsi_u64to8b(key, res_keys->keys[key_count].key);
 			key_count++;
 		}
 		break;
@@ -7189,7 +7618,7 @@
 
 		res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
 
-		scsi_ulto4b(lun->PRGeneration, res->header.generation);
+		scsi_ulto4b(lun->pr_generation, res->header.generation);
 
 		if (lun->flags & CTL_LUN_PR_RESERVED)
 		{
@@ -7211,7 +7640,7 @@
 		 * command active right now.)
 		 */
 		if (tmp_len != total_len) {
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 			free(ctsio->kern_data_ptr, M_CTL);
 			printf("%s: reservation status changed, retrying\n",
 			       __func__);
@@ -7229,11 +7658,10 @@
 		 * is 0, since it doesn't really matter.
 		 */
 		if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
-			memcpy(res->data.reservation,
-			       &lun->per_res[lun->pr_res_idx].res_key,
-			       sizeof(struct scsi_per_res_key));
+			scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx),
+			    res->data.reservation);
 		}
-		res->data.scopetype = lun->res_type;
+		res->data.scopetype = lun->pr_res_type;
 		break;
 	}
 	case SPRI_RC:     //report capabilities
@@ -7243,7 +7671,8 @@
 
 		res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
 		scsi_ulto2b(sizeof(*res_cap), res_cap->length);
-		res_cap->flags2 |= SPRI_TMV;
+		res_cap->flags1 = SPRI_CRH;
+		res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5;
 		type_mask = SPRI_TM_WR_EX_AR |
 			    SPRI_TM_EX_AC_RO |
 			    SPRI_TM_WR_EX_RO |
@@ -7253,27 +7682,70 @@
 		scsi_ulto2b(type_mask, res_cap->type_mask);
 		break;
 	}
-	case SPRI_RS: //read full status
-	default:
+	case SPRI_RS: { // read full status
+		struct scsi_per_res_in_full *res_status;
+		struct scsi_per_res_in_full_desc *res_desc;
+		struct ctl_port *port;
+		int i, len;
+
+		res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
+
 		/*
-		 * This is a bug, because we just checked for this above,
-		 * and should have returned an error.
+		 * We had to drop the lock to allocate our buffer, which
+		 * leaves time for someone to come in with another
+		 * persistent reservation.  (That is unlikely, though,
+		 * since this should be the only persistent reservation
+		 * command active right now.)
 		 */
-		panic("Invalid PR type %x", cdb->action);
-		break; /* NOTREACHED */
+		if (total_len < (sizeof(struct scsi_per_res_in_header) +
+		    (sizeof(struct scsi_per_res_in_full_desc) + 256) *
+		     lun->pr_key_count)){
+			mtx_unlock(&lun->lun_lock);
+			free(ctsio->kern_data_ptr, M_CTL);
+			printf("%s: reservation length changed, retrying\n",
+			       __func__);
+			goto retry;
+		}
+
+		scsi_ulto4b(lun->pr_generation, res_status->header.generation);
+
+		res_desc = &res_status->desc[0];
+		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+			if ((key = ctl_get_prkey(lun, i)) == 0)
+				continue;
+
+			scsi_u64to8b(key, res_desc->res_key.key);
+			if ((lun->flags & CTL_LUN_PR_RESERVED) &&
+			    (lun->pr_res_idx == i ||
+			     lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
+				res_desc->flags = SPRI_FULL_R_HOLDER;
+				res_desc->scopetype = lun->pr_res_type;
+			}
+			scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
+			    res_desc->rel_trgt_port_id);
+			len = 0;
+			port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
+			if (port != NULL)
+				len = ctl_create_iid(port,
+				    i % CTL_MAX_INIT_PER_PORT,
+				    res_desc->transport_id);
+			scsi_ulto4b(len, res_desc->additional_length);
+			res_desc = (struct scsi_per_res_in_full_desc *)
+			    &res_desc->transport_id[len];
+		}
+		scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
+		    res_status->header.length);
+		break;
 	}
-	mtx_unlock(&softc->ctl_lock);
+	default:
+		panic("%s: Invalid PR type %#x", __func__, cdb->action);
+	}
+	mtx_unlock(&lun->lun_lock);
 
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
-
-	CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
-			 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
-			 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
-			 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
-			 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
-
 	ctl_datamove((union ctl_io *)ctsio);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
@@ -7288,18 +7760,15 @@
 		struct scsi_per_res_out_parms* param)
 {
 	union ctl_ha_msg persis_io;
-	int retval, i;
-	int isc_retval;
+	int i;
 
-	retval = 0;
-
+	mtx_lock(&lun->lun_lock);
 	if (sa_res_key == 0) {
-		mtx_lock(&softc->ctl_lock);
 		if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
 			/* validate scope and type */
 			if ((cdb->scope_type & SPR_SCOPE_MASK) !=
 			     SPR_LU_SCOPE) {
-				mtx_unlock(&softc->ctl_lock);
+				mtx_unlock(&lun->lun_lock);
 				ctl_set_invalid_field(/*ctsio*/ ctsio,
 						      /*sks_valid*/ 1,
 						      /*command*/ 1,
@@ -7311,7 +7780,7 @@
 			}
 
 		        if (type>8 || type==2 || type==4 || type==0) {
-				mtx_unlock(&softc->ctl_lock);
+				mtx_unlock(&lun->lun_lock);
 				ctl_set_invalid_field(/*ctsio*/ ctsio,
        	           				      /*sks_valid*/ 1,
 						      /*command*/ 1,
@@ -7322,38 +7791,25 @@
 				return (1);
 		        }
 
-			/* temporarily unregister this nexus */
-			lun->per_res[residx].registered = 0;
-
 			/*
 			 * Unregister everybody else and build UA for
 			 * them
 			 */
-			for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
-				if (lun->per_res[i].registered == 0)
+			for(i = 0; i < CTL_MAX_INITIATORS; i++) {
+				if (i == residx || ctl_get_prkey(lun, i) == 0)
 					continue;
 
-				if (!persis_offset
-				 && i <CTL_MAX_INITIATORS)
-					lun->pending_sense[i].ua_pending |=
-						CTL_UA_REG_PREEMPT;
-				else if (persis_offset
-				      && i >= persis_offset)
-					lun->pending_sense[i-persis_offset
-						].ua_pending |=
-						CTL_UA_REG_PREEMPT;
-				lun->per_res[i].registered = 0;
-				memset(&lun->per_res[i].res_key, 0,
-				       sizeof(struct scsi_per_res_key));
+				ctl_clr_prkey(lun, i);
+				ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
 			}
-			lun->per_res[residx].registered = 1;
 			lun->pr_key_count = 1;
-			lun->res_type = type;
-			if (lun->res_type != SPR_TYPE_WR_EX_AR
-			 && lun->res_type != SPR_TYPE_EX_AC_AR)
+			lun->pr_res_type = type;
+			if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
+			    lun->pr_res_type != SPR_TYPE_EX_AC_AR)
 				lun->pr_res_idx = residx;
+			lun->pr_generation++;
+			mtx_unlock(&lun->lun_lock);
 
-			mtx_unlock(&softc->ctl_lock);
 			/* send msg to other side */
 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7363,16 +7819,11 @@
 			memcpy(persis_io.pr.pr_info.sa_res_key,
 			       param->serv_act_res_key,
 			       sizeof(param->serv_act_res_key));
-			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-			     &persis_io, sizeof(persis_io), 0)) >
-			     CTL_HA_STATUS_SUCCESS) {
-				printf("CTL:Persis Out error returned "
-				       "from ctl_ha_msg_send %d\n",
-				       isc_retval);
-			}
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+			    sizeof(persis_io.pr), M_WAITOK);
 		} else {
 			/* not all registrants */
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 			free(ctsio->kern_data_ptr, M_CTL);
 			ctl_set_invalid_field(ctsio,
 					      /*sks_valid*/ 1,
@@ -7387,7 +7838,6 @@
 		|| !(lun->flags & CTL_LUN_PR_RESERVED)) {
 		int found = 0;
 
-		mtx_lock(&softc->ctl_lock);
 		if (res_key == sa_res_key) {
 			/* special case */
 			/*
@@ -7399,7 +7849,7 @@
 			 * zero I'll take that approach since this has
 			 * to do with the sa_res_key.
 			 */
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 			free(ctsio->kern_data_ptr, M_CTL);
 			ctl_set_invalid_field(ctsio,
 					      /*sks_valid*/ 1,
@@ -7411,35 +7861,25 @@
 			return (1);
 		}
 
-		for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
-			if (lun->per_res[i].registered
-			 && memcmp(param->serv_act_res_key,
-			    lun->per_res[i].res_key.key,
-			    sizeof(struct scsi_per_res_key)) != 0)
+		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+			if (ctl_get_prkey(lun, i) != sa_res_key)
 				continue;
 
 			found = 1;
-			lun->per_res[i].registered = 0;
-			memset(&lun->per_res[i].res_key, 0,
-			       sizeof(struct scsi_per_res_key));
+			ctl_clr_prkey(lun, i);
 			lun->pr_key_count--;
-
-			if (!persis_offset
-			 && i < CTL_MAX_INITIATORS)
-				lun->pending_sense[i].ua_pending |=
-					CTL_UA_REG_PREEMPT;
-			else if (persis_offset
-			      && i >= persis_offset)
-				lun->pending_sense[i-persis_offset].ua_pending|=
-					CTL_UA_REG_PREEMPT;
+			ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
 		}
-		mtx_unlock(&softc->ctl_lock);
 		if (!found) {
+			mtx_unlock(&lun->lun_lock);
 			free(ctsio->kern_data_ptr, M_CTL);
 			ctl_set_reservation_conflict(ctsio);
 			ctl_done((union ctl_io *)ctsio);
 			return (CTL_RETVAL_COMPLETE);
 		}
+		lun->pr_generation++;
+		mtx_unlock(&lun->lun_lock);
+
 		/* send msg to other side */
 		persis_io.hdr.nexus = ctsio->io_hdr.nexus;
 		persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7449,21 +7889,16 @@
 		memcpy(persis_io.pr.pr_info.sa_res_key,
 		       param->serv_act_res_key,
 		       sizeof(param->serv_act_res_key));
-		if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-		     &persis_io, sizeof(persis_io), 0)) >
-		     CTL_HA_STATUS_SUCCESS) {
-			printf("CTL:Persis Out error returned from "
-			       "ctl_ha_msg_send %d\n", isc_retval);
-		}
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+		    sizeof(persis_io.pr), M_WAITOK);
 	} else {
 		/* Reserved but not all registrants */
 		/* sa_res_key is res holder */
-		if (memcmp(param->serv_act_res_key,
-                   lun->per_res[lun->pr_res_idx].res_key.key,
-                   sizeof(struct scsi_per_res_key)) == 0) {
+		if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) {
 			/* validate scope and type */
 			if ((cdb->scope_type & SPR_SCOPE_MASK) !=
 			     SPR_LU_SCOPE) {
+				mtx_unlock(&lun->lun_lock);
 				ctl_set_invalid_field(/*ctsio*/ ctsio,
 						      /*sks_valid*/ 1,
 						      /*command*/ 1,
@@ -7475,6 +7910,7 @@
 			}
 
 			if (type>8 || type==2 || type==4 || type==0) {
+				mtx_unlock(&lun->lun_lock);
 				ctl_set_invalid_field(/*ctsio*/ ctsio,
 						      /*sks_valid*/ 1,
 						      /*command*/ 1,
@@ -7499,58 +7935,28 @@
 			 * except don't unregister the res holder.
 			 */
 
-			/*
-			 * Temporarily unregister so it won't get
-			 * removed or UA generated
-			 */
-			lun->per_res[residx].registered = 0;
-			for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
-				if (lun->per_res[i].registered == 0)
+			for(i = 0; i < CTL_MAX_INITIATORS; i++) {
+				if (i == residx || ctl_get_prkey(lun, i) == 0)
 					continue;
 
-				if (memcmp(param->serv_act_res_key,
-				    lun->per_res[i].res_key.key,
-				    sizeof(struct scsi_per_res_key)) == 0) {
-					lun->per_res[i].registered = 0;
-					memset(&lun->per_res[i].res_key,
-					       0,
-					       sizeof(struct scsi_per_res_key));
+				if (sa_res_key == ctl_get_prkey(lun, i)) {
+					ctl_clr_prkey(lun, i);
 					lun->pr_key_count--;
-
-					if (!persis_offset
-					 && i < CTL_MAX_INITIATORS)
-						lun->pending_sense[i
-							].ua_pending |=
-							CTL_UA_REG_PREEMPT;
-					else if (persis_offset
-					      && i >= persis_offset)
-						lun->pending_sense[
-						  i-persis_offset].ua_pending |=
-						  CTL_UA_REG_PREEMPT;
-				} else if (type != lun->res_type
-					&& (lun->res_type == SPR_TYPE_WR_EX_RO
-					 || lun->res_type ==SPR_TYPE_EX_AC_RO)){
-						if (!persis_offset
-						 && i < CTL_MAX_INITIATORS)
-							lun->pending_sense[i
-							].ua_pending |=
-							CTL_UA_RES_RELEASE;
-						else if (persis_offset
-						      && i >= persis_offset)
-							lun->pending_sense[
-							i-persis_offset
-							].ua_pending |=
-							CTL_UA_RES_RELEASE;
+					ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
+				} else if (type != lun->pr_res_type &&
+				    (lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
+				     lun->pr_res_type == SPR_TYPE_EX_AC_RO)) {
+					ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
 				}
 			}
-			lun->per_res[residx].registered = 1;
-			lun->res_type = type;
-			if (lun->res_type != SPR_TYPE_WR_EX_AR
-			 && lun->res_type != SPR_TYPE_EX_AC_AR)
+			lun->pr_res_type = type;
+			if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
+			    lun->pr_res_type != SPR_TYPE_EX_AC_AR)
 				lun->pr_res_idx = residx;
 			else
-				lun->pr_res_idx =
-					CTL_PR_ALL_REGISTRANTS;
+				lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
+			lun->pr_generation++;
+			mtx_unlock(&lun->lun_lock);
 
 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7560,13 +7966,8 @@
 			memcpy(persis_io.pr.pr_info.sa_res_key,
 			       param->serv_act_res_key,
 			       sizeof(param->serv_act_res_key));
-			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-			     &persis_io, sizeof(persis_io), 0)) >
-			     CTL_HA_STATUS_SUCCESS) {
-				printf("CTL:Persis Out error returned "
-				       "from ctl_ha_msg_send %d\n",
-				       isc_retval);
-			}
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+			    sizeof(persis_io.pr), M_WAITOK);
 		} else {
 			/*
 			 * sa_res_key is not the res holder just
@@ -7573,39 +7974,27 @@
 			 * remove registrants
 			 */
 			int found=0;
-			mtx_lock(&softc->ctl_lock);
 
-			for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
-				if (memcmp(param->serv_act_res_key,
-				    lun->per_res[i].res_key.key,
-				    sizeof(struct scsi_per_res_key)) != 0)
+			for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+				if (sa_res_key != ctl_get_prkey(lun, i))
 					continue;
 
 				found = 1;
-				lun->per_res[i].registered = 0;
-				memset(&lun->per_res[i].res_key, 0,
-				       sizeof(struct scsi_per_res_key));
+				ctl_clr_prkey(lun, i);
 				lun->pr_key_count--;
-
-				if (!persis_offset
-				 && i < CTL_MAX_INITIATORS)
-					lun->pending_sense[i].ua_pending |=
-						CTL_UA_REG_PREEMPT;
-				else if (persis_offset
-				      && i >= persis_offset)
-					lun->pending_sense[
-						i-persis_offset].ua_pending |=
-						CTL_UA_REG_PREEMPT;
+				ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
 			}
 
 			if (!found) {
-				mtx_unlock(&softc->ctl_lock);
+				mtx_unlock(&lun->lun_lock);
 				free(ctsio->kern_data_ptr, M_CTL);
 				ctl_set_reservation_conflict(ctsio);
 				ctl_done((union ctl_io *)ctsio);
 		        	return (1);
 			}
-			mtx_unlock(&softc->ctl_lock);
+			lun->pr_generation++;
+			mtx_unlock(&lun->lun_lock);
+
 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
 			persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
@@ -7614,138 +8003,77 @@
 			memcpy(persis_io.pr.pr_info.sa_res_key,
 			       param->serv_act_res_key,
 			       sizeof(param->serv_act_res_key));
-			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-			     &persis_io, sizeof(persis_io), 0)) >
-			     CTL_HA_STATUS_SUCCESS) {
-				printf("CTL:Persis Out error returned "
-				       "from ctl_ha_msg_send %d\n",
-				isc_retval);
-			}
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+			    sizeof(persis_io.pr), M_WAITOK);
 		}
 	}
-
-	lun->PRGeneration++;
-
-	return (retval);
+	return (0);
 }
 
 static void
 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
 {
+	uint64_t sa_res_key;
 	int i;
 
+	sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
+
 	if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
 	 || lun->pr_res_idx == CTL_PR_NO_RESERVATION
-	 || memcmp(&lun->per_res[lun->pr_res_idx].res_key,
-		   msg->pr.pr_info.sa_res_key,
-		   sizeof(struct scsi_per_res_key)) != 0) {
-		uint64_t sa_res_key;
-		sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
-
+	 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) {
 		if (sa_res_key == 0) {
-			/* temporarily unregister this nexus */
-			lun->per_res[msg->pr.pr_info.residx].registered = 0;
-
 			/*
 			 * Unregister everybody else and build UA for
 			 * them
 			 */
-			for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
-				if (lun->per_res[i].registered == 0)
+			for(i = 0; i < CTL_MAX_INITIATORS; i++) {
+				if (i == msg->pr.pr_info.residx ||
+				    ctl_get_prkey(lun, i) == 0)
 					continue;
 
-				if (!persis_offset
-				 && i < CTL_MAX_INITIATORS)
-					lun->pending_sense[i].ua_pending |=
-						CTL_UA_REG_PREEMPT;
-				else if (persis_offset && i >= persis_offset)
-					lun->pending_sense[i -
-						persis_offset].ua_pending |=
-						CTL_UA_REG_PREEMPT;
-				lun->per_res[i].registered = 0;
-				memset(&lun->per_res[i].res_key, 0,
-				       sizeof(struct scsi_per_res_key));
+				ctl_clr_prkey(lun, i);
+				ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
 			}
 
-			lun->per_res[msg->pr.pr_info.residx].registered = 1;
 			lun->pr_key_count = 1;
-			lun->res_type = msg->pr.pr_info.res_type;
-			if (lun->res_type != SPR_TYPE_WR_EX_AR
-			 && lun->res_type != SPR_TYPE_EX_AC_AR)
+			lun->pr_res_type = msg->pr.pr_info.res_type;
+			if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
+			    lun->pr_res_type != SPR_TYPE_EX_AC_AR)
 				lun->pr_res_idx = msg->pr.pr_info.residx;
 		} else {
-		        for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
-				if (memcmp(msg->pr.pr_info.sa_res_key,
-		                   lun->per_res[i].res_key.key,
-		                   sizeof(struct scsi_per_res_key)) != 0)
+		        for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+				if (sa_res_key == ctl_get_prkey(lun, i))
 					continue;
 
-				lun->per_res[i].registered = 0;
-				memset(&lun->per_res[i].res_key, 0,
-				       sizeof(struct scsi_per_res_key));
+				ctl_clr_prkey(lun, i);
 				lun->pr_key_count--;
-
-				if (!persis_offset
-				 && i < persis_offset)
-					lun->pending_sense[i].ua_pending |=
-						CTL_UA_REG_PREEMPT;
-				else if (persis_offset
-				      && i >= persis_offset)
-					lun->pending_sense[i -
-						persis_offset].ua_pending |=
-						CTL_UA_REG_PREEMPT;
+				ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
 			}
 		}
 	} else {
-		/*
-		 * Temporarily unregister so it won't get removed
-		 * or UA generated
-		 */
-		lun->per_res[msg->pr.pr_info.residx].registered = 0;
-		for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
-			if (lun->per_res[i].registered == 0)
+		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+			if (i == msg->pr.pr_info.residx ||
+			    ctl_get_prkey(lun, i) == 0)
 				continue;
 
-			if (memcmp(msg->pr.pr_info.sa_res_key,
-	                   lun->per_res[i].res_key.key,
-	                   sizeof(struct scsi_per_res_key)) == 0) {
-				lun->per_res[i].registered = 0;
-				memset(&lun->per_res[i].res_key, 0,
-				       sizeof(struct scsi_per_res_key));
+			if (sa_res_key == ctl_get_prkey(lun, i)) {
+				ctl_clr_prkey(lun, i);
 				lun->pr_key_count--;
-				if (!persis_offset
-				 && i < CTL_MAX_INITIATORS)
-					lun->pending_sense[i].ua_pending |=
-						CTL_UA_REG_PREEMPT;
-				else if (persis_offset
-				      && i >= persis_offset)
-					lun->pending_sense[i -
-						persis_offset].ua_pending |=
-						CTL_UA_REG_PREEMPT;
-			} else if (msg->pr.pr_info.res_type != lun->res_type
-				&& (lun->res_type == SPR_TYPE_WR_EX_RO
-				 || lun->res_type == SPR_TYPE_EX_AC_RO)) {
-					if (!persis_offset
-					 && i < persis_offset)
-						lun->pending_sense[i
-							].ua_pending |=
-							CTL_UA_RES_RELEASE;
-					else if (persis_offset
-					      && i >= persis_offset)
-					lun->pending_sense[i -
-						persis_offset].ua_pending |=
-						CTL_UA_RES_RELEASE;
+				ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
+			} else if (msg->pr.pr_info.res_type != lun->pr_res_type
+			    && (lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
+			     lun->pr_res_type == SPR_TYPE_EX_AC_RO)) {
+				ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
 			}
 		}
-		lun->per_res[msg->pr.pr_info.residx].registered = 1;
-		lun->res_type = msg->pr.pr_info.res_type;
-		if (lun->res_type != SPR_TYPE_WR_EX_AR
-		 && lun->res_type != SPR_TYPE_EX_AC_AR)
+		lun->pr_res_type = msg->pr.pr_info.res_type;
+		if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
+		    lun->pr_res_type != SPR_TYPE_EX_AC_AR)
 			lun->pr_res_idx = msg->pr.pr_info.residx;
 		else
 			lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
 	}
-	lun->PRGeneration++;
+	lun->pr_generation++;
 
 }
 
@@ -7753,15 +8081,14 @@
 int
 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
 {
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	int retval;
-	int isc_retval;
 	u_int32_t param_len;
 	struct scsi_per_res_out *cdb;
-	struct ctl_lun *lun;
 	struct scsi_per_res_out_parms* param;
-	struct ctl_softc *softc;
 	uint32_t residx;
-	uint64_t res_key, sa_res_key;
+	uint64_t res_key, sa_res_key, key;
 	uint8_t type;
 	union ctl_ha_msg persis_io;
 	int    i;
@@ -7768,13 +8095,9 @@
 
 	CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
 
+	cdb = (struct scsi_per_res_out *)ctsio->cdb;
 	retval = CTL_RETVAL_COMPLETE;
 
-	softc = control_softc;
-
-	cdb = (struct scsi_per_res_out *)ctsio->cdb;
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 	/*
 	 * We only support whole-LUN scope.  The scope & type are ignored for
 	 * register, register and ignore existing key and clear.
@@ -7807,28 +8130,6 @@
 		}
 	}
 
-	switch (cdb->action & SPRO_ACTION_MASK) {
-	case SPRO_REGISTER:
-	case SPRO_RESERVE:
-	case SPRO_RELEASE:
-	case SPRO_CLEAR:
-	case SPRO_PREEMPT:
-	case SPRO_REG_IGNO:
-		break;
-	case SPRO_REG_MOVE:
-	case SPRO_PRE_ABO:
-	default:
-		ctl_set_invalid_field(/*ctsio*/ ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 0);
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-		break; /* NOTREACHED */
-	}
-
 	param_len = scsi_4btoul(cdb->length);
 
 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
@@ -7835,7 +8136,6 @@
 		ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
 		ctsio->kern_data_len = param_len;
 		ctsio->kern_total_len = param_len;
-		ctsio->kern_data_resid = 0;
 		ctsio->kern_rel_offset = 0;
 		ctsio->kern_sg_entries = 0;
 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -7847,7 +8147,7 @@
 
 	param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
 
-	residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+	residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
 	res_key = scsi_8btou64(param->res_key.key);
 	sa_res_key = scsi_8btou64(param->serv_act_res_key);
 
@@ -7856,18 +8156,15 @@
 	 * This must be done for all other service actions
 	 */
 	if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
-		mtx_lock(&softc->ctl_lock);
-		if (lun->per_res[residx].registered) {
-		    if (memcmp(param->res_key.key,
-			       lun->per_res[residx].res_key.key,
-			       ctl_min(sizeof(param->res_key),
-			       sizeof(lun->per_res[residx].res_key))) != 0) {
+		mtx_lock(&lun->lun_lock);
+		if ((key = ctl_get_prkey(lun, residx)) != 0) {
+			if (res_key != key) {
 				/*
 				 * The current key passed in doesn't match
 				 * the one the initiator previously
 				 * registered.
 				 */
-				mtx_unlock(&softc->ctl_lock);
+				mtx_unlock(&lun->lun_lock);
 				free(ctsio->kern_data_ptr, M_CTL);
 				ctl_set_reservation_conflict(ctsio);
 				ctl_done((union ctl_io *)ctsio);
@@ -7877,7 +8174,7 @@
 			/*
 			 * We are not registered
 			 */
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 			free(ctsio->kern_data_ptr, M_CTL);
 			ctl_set_reservation_conflict(ctsio);
 			ctl_done((union ctl_io *)ctsio);
@@ -7887,13 +8184,13 @@
 			 * We are not registered and trying to register but
 			 * the register key isn't zero.
 			 */
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 			free(ctsio->kern_data_ptr, M_CTL);
 			ctl_set_reservation_conflict(ctsio);
 			ctl_done((union ctl_io *)ctsio);
 			return (CTL_RETVAL_COMPLETE);
 		}
-		mtx_unlock(&softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 	}
 
 	switch (cdb->action & SPRO_ACTION_MASK) {
@@ -7932,7 +8229,7 @@
 			return (CTL_RETVAL_COMPLETE);
 		}
 
-		mtx_lock(&softc->ctl_lock);
+		mtx_lock(&lun->lun_lock);
 
 		/*
 		 * The initiator wants to clear the
@@ -7942,14 +8239,12 @@
 			if ((res_key == 0
 			  && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
 			 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
-			  && !lun->per_res[residx].registered)) {
-				mtx_unlock(&softc->ctl_lock);
+			  && ctl_get_prkey(lun, residx) == 0)) {
+				mtx_unlock(&lun->lun_lock);
 				goto done;
 			}
 
-			lun->per_res[residx].registered = 0;
-			memset(&lun->per_res[residx].res_key,
-			       0, sizeof(lun->per_res[residx].res_key));
+			ctl_clr_prkey(lun, residx);
 			lun->pr_key_count--;
 
 			if (residx == lun->pr_res_idx) {
@@ -7956,9 +8251,9 @@
 				lun->flags &= ~CTL_LUN_PR_RESERVED;
 				lun->pr_res_idx = CTL_PR_NO_RESERVATION;
 
-				if ((lun->res_type == SPR_TYPE_WR_EX_RO
-				  || lun->res_type == SPR_TYPE_EX_AC_RO)
-				 && lun->pr_key_count) {
+				if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
+				     lun->pr_res_type == SPR_TYPE_EX_AC_RO) &&
+				    lun->pr_key_count) {
 					/*
 					 * If the reservation is a registrants
 					 * only type we need to generate a UA
@@ -7967,35 +8262,30 @@
 					 * RELEASED
 					 */
 
-					for (i = 0; i < CTL_MAX_INITIATORS;i++){
-						if (lun->per_res[
-						    i+persis_offset].registered
-						    == 0)
+					for (i = softc->init_min; i < softc->init_max; i++){
+						if (ctl_get_prkey(lun, i) == 0)
 							continue;
-						lun->pending_sense[i
-							].ua_pending |=
-							CTL_UA_RES_RELEASE;
+						ctl_est_ua(lun, i,
+						    CTL_UA_RES_RELEASE);
 					}
 				}
-				lun->res_type = 0;
+				lun->pr_res_type = 0;
 			} else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
 				if (lun->pr_key_count==0) {
 					lun->flags &= ~CTL_LUN_PR_RESERVED;
-					lun->res_type = 0;
+					lun->pr_res_type = 0;
 					lun->pr_res_idx = CTL_PR_NO_RESERVATION;
 				}
 			}
+			lun->pr_generation++;
+			mtx_unlock(&lun->lun_lock);
+
 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
 			persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
 			persis_io.pr.pr_info.residx = residx;
-			if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-			     &persis_io, sizeof(persis_io), 0 )) >
-			     CTL_HA_STATUS_SUCCESS) {
-				printf("CTL:Persis Out error returned from "
-				       "ctl_ha_msg_send %d\n", isc_retval);
-			}
-			mtx_unlock(&softc->ctl_lock);
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+			    sizeof(persis_io.pr), M_WAITOK);
 		} else /* sa_res_key != 0 */ {
 
 			/*
@@ -8002,16 +8292,13 @@
 			 * If we aren't registered currently then increment
 			 * the key count and set the registered flag.
 			 */
-			if (!lun->per_res[residx].registered) {
+			ctl_alloc_prkey(lun, residx);
+			if (ctl_get_prkey(lun, residx) == 0)
 				lun->pr_key_count++;
-				lun->per_res[residx].registered = 1;
-			}
+			ctl_set_prkey(lun, residx, sa_res_key);
+			lun->pr_generation++;
+			mtx_unlock(&lun->lun_lock);
 
-			memcpy(&lun->per_res[residx].res_key,
-			       param->serv_act_res_key,
-			       ctl_min(sizeof(param->serv_act_res_key),
-			       sizeof(lun->per_res[residx].res_key)));
-
 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
 			persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
@@ -8019,15 +8306,9 @@
 			memcpy(persis_io.pr.pr_info.sa_res_key,
 			       param->serv_act_res_key,
 			       sizeof(param->serv_act_res_key));
-			mtx_unlock(&softc->ctl_lock);
-			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-			     &persis_io, sizeof(persis_io), 0)) >
-			     CTL_HA_STATUS_SUCCESS) {
-				printf("CTL:Persis Out error returned from "
-				       "ctl_ha_msg_send %d\n", isc_retval);
-			}
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+			    sizeof(persis_io.pr), M_WAITOK);
 		}
-		lun->PRGeneration++;
 
 		break;
 	}
@@ -8035,7 +8316,7 @@
 #if 0
                 printf("Reserve executed type %d\n", type);
 #endif
-		mtx_lock(&softc->ctl_lock);
+		mtx_lock(&lun->lun_lock);
 		if (lun->flags & CTL_LUN_PR_RESERVED) {
 			/*
 			 * if this isn't the reservation holder and it's
@@ -8044,13 +8325,14 @@
 			 */
 			if ((lun->pr_res_idx != residx
 			  && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
-			 || lun->res_type != type) {
-				mtx_unlock(&softc->ctl_lock);
+			 || lun->pr_res_type != type) {
+				mtx_unlock(&lun->lun_lock);
 				free(ctsio->kern_data_ptr, M_CTL);
 				ctl_set_reservation_conflict(ctsio);
 				ctl_done((union ctl_io *)ctsio);
 				return (CTL_RETVAL_COMPLETE);
 			}
+			mtx_unlock(&lun->lun_lock);
 		} else /* create a reservation */ {
 			/*
 			 * If it's not an "all registrants" type record
@@ -8063,9 +8345,9 @@
 				lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
 
 			lun->flags |= CTL_LUN_PR_RESERVED;
-			lun->res_type = type;
+			lun->pr_res_type = type;
 
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 
 			/* send msg to other side */
 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
@@ -8073,20 +8355,16 @@
 			persis_io.pr.pr_info.action = CTL_PR_RESERVE;
 			persis_io.pr.pr_info.residx = lun->pr_res_idx;
 			persis_io.pr.pr_info.res_type = type;
-			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-			     &persis_io, sizeof(persis_io), 0)) >
-			     CTL_HA_STATUS_SUCCESS) {
-				printf("CTL:Persis Out error returned from "
-				       "ctl_ha_msg_send %d\n", isc_retval);
-			}
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+			    sizeof(persis_io.pr), M_WAITOK);
 		}
 		break;
 
 	case SPRO_RELEASE:
-		mtx_lock(&softc->ctl_lock);
+		mtx_lock(&lun->lun_lock);
 		if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
 			/* No reservation exists return good status */
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 			goto done;
 		}
 		/*
@@ -8098,12 +8376,12 @@
 			 * not a res holder return good status but
 			 * do nothing
 			 */
-			mtx_unlock(&softc->ctl_lock);
+			mtx_unlock(&lun->lun_lock);
 			goto done;
 		}
 
-		if (lun->res_type != type) {
-			mtx_unlock(&softc->ctl_lock);
+		if (lun->pr_res_type != type) {
+			mtx_unlock(&lun->lun_lock);
 			free(ctsio->kern_data_ptr, M_CTL);
 			ctl_set_illegal_pr_release(ctsio);
 			ctl_done((union ctl_io *)ctsio);
@@ -8113,82 +8391,57 @@
 		/* okay to release */
 		lun->flags &= ~CTL_LUN_PR_RESERVED;
 		lun->pr_res_idx = CTL_PR_NO_RESERVATION;
-		lun->res_type = 0;
+		lun->pr_res_type = 0;
 
 		/*
-		 * if this isn't an exclusive access
-		 * res generate UA for all other
-		 * registrants.
+		 * If this isn't an exclusive access reservation and NUAR
+		 * is not set, generate UA for all other registrants.
 		 */
-		if (type != SPR_TYPE_EX_AC
-		 && type != SPR_TYPE_WR_EX) {
-			/*
-			 * temporarily unregister so we don't generate UA
-			 */
-			lun->per_res[residx].registered = 0;
-
-			for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-				if (lun->per_res[i+persis_offset].registered
-				    == 0)
+		if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX &&
+		    (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) {
+			for (i = softc->init_min; i < softc->init_max; i++) {
+				if (i == residx || ctl_get_prkey(lun, i) == 0)
 					continue;
-				lun->pending_sense[i].ua_pending |=
-					CTL_UA_RES_RELEASE;
+				ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
 			}
+		}
+		mtx_unlock(&lun->lun_lock);
 
-			lun->per_res[residx].registered = 1;
-		}
-		mtx_unlock(&softc->ctl_lock);
 		/* Send msg to other side */
 		persis_io.hdr.nexus = ctsio->io_hdr.nexus;
 		persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
 		persis_io.pr.pr_info.action = CTL_PR_RELEASE;
-		if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
-		     sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
-			printf("CTL:Persis Out error returned from "
-			       "ctl_ha_msg_send %d\n", isc_retval);
-		}
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+		     sizeof(persis_io.pr), M_WAITOK);
 		break;
 
 	case SPRO_CLEAR:
 		/* send msg to other side */
 
-		mtx_lock(&softc->ctl_lock);
+		mtx_lock(&lun->lun_lock);
 		lun->flags &= ~CTL_LUN_PR_RESERVED;
-		lun->res_type = 0;
+		lun->pr_res_type = 0;
 		lun->pr_key_count = 0;
 		lun->pr_res_idx = CTL_PR_NO_RESERVATION;
 
+		ctl_clr_prkey(lun, residx);
+		for (i = 0; i < CTL_MAX_INITIATORS; i++)
+			if (ctl_get_prkey(lun, i) != 0) {
+				ctl_clr_prkey(lun, i);
+				ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
+			}
+		lun->pr_generation++;
+		mtx_unlock(&lun->lun_lock);
 
-		memset(&lun->per_res[residx].res_key,
-		       0, sizeof(lun->per_res[residx].res_key));
-		lun->per_res[residx].registered = 0;
-
-		for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
-			if (lun->per_res[i].registered) {
-				if (!persis_offset && i < CTL_MAX_INITIATORS)
-					lun->pending_sense[i].ua_pending |=
-						CTL_UA_RES_PREEMPT;
-				else if (persis_offset && i >= persis_offset)
-					lun->pending_sense[i-persis_offset
-					    ].ua_pending |= CTL_UA_RES_PREEMPT;
-
-				memset(&lun->per_res[i].res_key,
-				       0, sizeof(struct scsi_per_res_key));
-				lun->per_res[i].registered = 0;
-			}
-		lun->PRGeneration++;
-		mtx_unlock(&softc->ctl_lock);
 		persis_io.hdr.nexus = ctsio->io_hdr.nexus;
 		persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
 		persis_io.pr.pr_info.action = CTL_PR_CLEAR;
-		if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
-		     sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
-			printf("CTL:Persis Out error returned from "
-			       "ctl_ha_msg_send %d\n", isc_retval);
-		}
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+		     sizeof(persis_io.pr), M_WAITOK);
 		break;
 
-	case SPRO_PREEMPT: {
+	case SPRO_PREEMPT:
+	case SPRO_PRE_ABO: {
 		int nretval;
 
 		nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
@@ -8197,19 +8450,8 @@
 			return (CTL_RETVAL_COMPLETE);
 		break;
 	}
-	case SPRO_REG_MOVE:
-	case SPRO_PRE_ABO:
 	default:
-		free(ctsio->kern_data_ptr, M_CTL);
-		ctl_set_invalid_field(/*ctsio*/ ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 0);
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-		break; /* NOTREACHED */
+		panic("%s: Invalid PR type %#x", __func__, cdb->action);
 	}
 
 done:
@@ -8227,33 +8469,40 @@
  * in sync.
  */
 static void
-ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
+ctl_hndl_per_res_out_on_other_sc(union ctl_io *io)
 {
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg;
 	struct ctl_lun *lun;
-	struct ctl_softc *softc;
 	int i;
+	uint32_t residx, targ_lun;
 
-	softc = control_softc;
-
+	targ_lun = msg->hdr.nexus.targ_mapped_lun;
 	mtx_lock(&softc->ctl_lock);
-
-	lun = softc->ctl_luns[msg->hdr.nexus.targ_lun];
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		return;
+	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
+	if (lun->flags & CTL_LUN_DISABLED) {
+		mtx_unlock(&lun->lun_lock);
+		return;
+	}
+	residx = ctl_get_initindex(&msg->hdr.nexus);
 	switch(msg->pr.pr_info.action) {
 	case CTL_PR_REG_KEY:
-		if (!lun->per_res[msg->pr.pr_info.residx].registered) {
-			lun->per_res[msg->pr.pr_info.residx].registered = 1;
+		ctl_alloc_prkey(lun, msg->pr.pr_info.residx);
+		if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0)
 			lun->pr_key_count++;
-		}
-		lun->PRGeneration++;
-		memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key,
-		       msg->pr.pr_info.sa_res_key,
-		       sizeof(struct scsi_per_res_key));
+		ctl_set_prkey(lun, msg->pr.pr_info.residx,
+		    scsi_8btou64(msg->pr.pr_info.sa_res_key));
+		lun->pr_generation++;
 		break;
 
 	case CTL_PR_UNREG_KEY:
-		lun->per_res[msg->pr.pr_info.residx].registered = 0;
-		memset(&lun->per_res[msg->pr.pr_info.residx].res_key,
-		       0, sizeof(struct scsi_per_res_key));
+		ctl_clr_prkey(lun, msg->pr.pr_info.residx);
 		lun->pr_key_count--;
 
 		/* XXX Need to see if the reservation has been released */
@@ -8262,9 +8511,9 @@
 			lun->flags &= ~CTL_LUN_PR_RESERVED;
 			lun->pr_res_idx = CTL_PR_NO_RESERVATION;
 
-			if ((lun->res_type == SPR_TYPE_WR_EX_RO
-			  || lun->res_type == SPR_TYPE_EX_AC_RO)
-			 && lun->pr_key_count) {
+			if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
+			     lun->pr_res_type == SPR_TYPE_EX_AC_RO) &&
+			    lun->pr_key_count) {
 				/*
 				 * If the reservation is a registrants
 				 * only type we need to generate a UA
@@ -8273,30 +8522,27 @@
 				 * RELEASED
 				 */
 
-				for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-					if (lun->per_res[i+
-					    persis_offset].registered == 0)
+				for (i = softc->init_min; i < softc->init_max; i++) {
+					if (ctl_get_prkey(lun, i) == 0)
 						continue;
 
-					lun->pending_sense[i
-						].ua_pending |=
-						CTL_UA_RES_RELEASE;
+					ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
 				}
 			}
-			lun->res_type = 0;
+			lun->pr_res_type = 0;
 		} else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
 			if (lun->pr_key_count==0) {
 				lun->flags &= ~CTL_LUN_PR_RESERVED;
-				lun->res_type = 0;
+				lun->pr_res_type = 0;
 				lun->pr_res_idx = CTL_PR_NO_RESERVATION;
 			}
 		}
-		lun->PRGeneration++;
+		lun->pr_generation++;
 		break;
 
 	case CTL_PR_RESERVE:
 		lun->flags |= CTL_LUN_PR_RESERVED;
-		lun->res_type = msg->pr.pr_info.res_type;
+		lun->pr_res_type = msg->pr.pr_info.res_type;
 		lun->pr_res_idx = msg->pr.pr_info.residx;
 
 		break;
@@ -8303,20 +8549,21 @@
 
 	case CTL_PR_RELEASE:
 		/*
-		 * if this isn't an exclusive access res generate UA for all
-		 * other registrants.
+		 * If this isn't an exclusive access reservation and NUAR
+		 * is not set, generate UA for all other registrants.
 		 */
-		if (lun->res_type != SPR_TYPE_EX_AC
-		 && lun->res_type != SPR_TYPE_WR_EX) {
-			for (i = 0; i < CTL_MAX_INITIATORS; i++)
-				if (lun->per_res[i+persis_offset].registered)
-					lun->pending_sense[i].ua_pending |=
-						CTL_UA_RES_RELEASE;
+		if (lun->pr_res_type != SPR_TYPE_EX_AC &&
+		    lun->pr_res_type != SPR_TYPE_WR_EX &&
+		    (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) {
+			for (i = softc->init_min; i < softc->init_max; i++)
+				if (i == residx || ctl_get_prkey(lun, i) == 0)
+					continue;
+				ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
 		}
 
 		lun->flags &= ~CTL_LUN_PR_RESERVED;
 		lun->pr_res_idx = CTL_PR_NO_RESERVATION;
-		lun->res_type = 0;
+		lun->pr_res_type = 0;
 		break;
 
 	case CTL_PR_PREEMPT:
@@ -8324,74 +8571,38 @@
 		break;
 	case CTL_PR_CLEAR:
 		lun->flags &= ~CTL_LUN_PR_RESERVED;
-		lun->res_type = 0;
+		lun->pr_res_type = 0;
 		lun->pr_key_count = 0;
 		lun->pr_res_idx = CTL_PR_NO_RESERVATION;
 
-		for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
-			if (lun->per_res[i].registered == 0)
+		for (i=0; i < CTL_MAX_INITIATORS; i++) {
+			if (ctl_get_prkey(lun, i) == 0)
 				continue;
-			if (!persis_offset
-			 && i < CTL_MAX_INITIATORS)
-				lun->pending_sense[i].ua_pending |=
-					CTL_UA_RES_PREEMPT;
-			else if (persis_offset
-			      && i >= persis_offset)
-   				lun->pending_sense[i-persis_offset].ua_pending|=
-					CTL_UA_RES_PREEMPT;
-			memset(&lun->per_res[i].res_key, 0,
-			       sizeof(struct scsi_per_res_key));
-			lun->per_res[i].registered = 0;
+			ctl_clr_prkey(lun, i);
+			ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
 		}
-		lun->PRGeneration++;
+		lun->pr_generation++;
 		break;
 	}
 
-	mtx_unlock(&softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 }
 
 int
 ctl_read_write(struct ctl_scsiio *ctsio)
 {
-	struct ctl_lun *lun;
-	struct ctl_lba_len lbalen;
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct ctl_lba_len_flags *lbalen;
 	uint64_t lba;
 	uint32_t num_blocks;
-	int reladdr, fua, dpo, ebp;
-	int retval;
+	int flags, retval;
 	int isread;
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 	CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
 
-	reladdr = 0;
-	fua = 0;
-	dpo = 0;
-	ebp = 0;
-
-	retval = CTL_RETVAL_COMPLETE;
-
+	flags = 0;
 	isread = ctsio->cdb[0] == READ_6  || ctsio->cdb[0] == READ_10
 	      || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
-	if (lun->flags & CTL_LUN_PR_RESERVED && isread) {
-		uint32_t residx;
-
-		/*
-		 * XXX KDM need a lock here.
-		 */
-		residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
-		if ((lun->res_type == SPR_TYPE_EX_AC
-		  && residx != lun->pr_res_idx)
-		 || ((lun->res_type == SPR_TYPE_EX_AC_RO
-		   || lun->res_type == SPR_TYPE_EX_AC_AR)
-		  && !lun->per_res[residx].registered)) {
-			ctl_set_reservation_conflict(ctsio);
-			ctl_done((union ctl_io *)ctsio);
-			return (CTL_RETVAL_COMPLETE);
-	        }
-	}
-
 	switch (ctsio->cdb[0]) {
 	case READ_6:
 	case WRITE_6: {
@@ -8415,18 +8626,10 @@
 		struct scsi_rw_10 *cdb;
 
 		cdb = (struct scsi_rw_10 *)ctsio->cdb;
-
-		if (cdb->byte2 & SRW10_RELADDR)
-			reladdr = 1;
 		if (cdb->byte2 & SRW10_FUA)
-			fua = 1;
+			flags |= CTL_LLF_FUA;
 		if (cdb->byte2 & SRW10_DPO)
-			dpo = 1;
-
-		if ((cdb->opcode == WRITE_10)
-		 && (cdb->byte2 & SRW10_EBP))
-			ebp = 1;
-
+			flags |= CTL_LLF_DPO;
 		lba = scsi_4btoul(cdb->addr);
 		num_blocks = scsi_2btoul(cdb->length);
 		break;
@@ -8435,17 +8638,9 @@
 		struct scsi_write_verify_10 *cdb;
 
 		cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
-
-		/*
-		 * XXX KDM we should do actual write verify support at some
-		 * point.  This is obviously fake, we're just translating
-		 * things to a write.  So we don't even bother checking the
-		 * BYTCHK field, since we don't do any verification.  If
-		 * the user asks for it, we'll just pretend we did it.
-		 */
+		flags |= CTL_LLF_FUA;
 		if (cdb->byte2 & SWV_DPO)
-			dpo = 1;
-
+			flags |= CTL_LLF_DPO;
 		lba = scsi_4btoul(cdb->addr);
 		num_blocks = scsi_2btoul(cdb->length);
 		break;
@@ -8455,13 +8650,10 @@
 		struct scsi_rw_12 *cdb;
 
 		cdb = (struct scsi_rw_12 *)ctsio->cdb;
-
-		if (cdb->byte2 & SRW12_RELADDR)
-			reladdr = 1;
 		if (cdb->byte2 & SRW12_FUA)
-			fua = 1;
+			flags |= CTL_LLF_FUA;
 		if (cdb->byte2 & SRW12_DPO)
-			dpo = 1;
+			flags |= CTL_LLF_DPO;
 		lba = scsi_4btoul(cdb->addr);
 		num_blocks = scsi_4btoul(cdb->length);
 		break;
@@ -8470,13 +8662,11 @@
 		struct scsi_write_verify_12 *cdb;
 
 		cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
-
+		flags |= CTL_LLF_FUA;
 		if (cdb->byte2 & SWV_DPO)
-			dpo = 1;
-		
+			flags |= CTL_LLF_DPO;
 		lba = scsi_4btoul(cdb->addr);
 		num_blocks = scsi_4btoul(cdb->length);
-
 		break;
 	}
 	case READ_16:
@@ -8484,26 +8674,46 @@
 		struct scsi_rw_16 *cdb;
 
 		cdb = (struct scsi_rw_16 *)ctsio->cdb;
-
-		if (cdb->byte2 & SRW12_RELADDR)
-			reladdr = 1;
 		if (cdb->byte2 & SRW12_FUA)
-			fua = 1;
+			flags |= CTL_LLF_FUA;
 		if (cdb->byte2 & SRW12_DPO)
-			dpo = 1;
-
+			flags |= CTL_LLF_DPO;
 		lba = scsi_8btou64(cdb->addr);
 		num_blocks = scsi_4btoul(cdb->length);
 		break;
 	}
+	case WRITE_ATOMIC_16: {
+		struct scsi_write_atomic_16 *cdb;
+
+		if (lun->be_lun->atomicblock == 0) {
+			ctl_set_invalid_opcode(ctsio);
+			ctl_done((union ctl_io *)ctsio);
+			return (CTL_RETVAL_COMPLETE);
+		}
+
+		cdb = (struct scsi_write_atomic_16 *)ctsio->cdb;
+		if (cdb->byte2 & SRW12_FUA)
+			flags |= CTL_LLF_FUA;
+		if (cdb->byte2 & SRW12_DPO)
+			flags |= CTL_LLF_DPO;
+		lba = scsi_8btou64(cdb->addr);
+		num_blocks = scsi_2btoul(cdb->length);
+		if (num_blocks > lun->be_lun->atomicblock) {
+			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+			    /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0,
+			    /*bit*/ 0);
+			ctl_done((union ctl_io *)ctsio);
+			return (CTL_RETVAL_COMPLETE);
+		}
+		break;
+	}
 	case WRITE_VERIFY_16: {
 		struct scsi_write_verify_16 *cdb;
 
 		cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
-
+		flags |= CTL_LLF_FUA;
 		if (cdb->byte2 & SWV_DPO)
-			dpo = 1;
-
+			flags |= CTL_LLF_DPO;
 		lba = scsi_8btou64(cdb->addr);
 		num_blocks = scsi_4btoul(cdb->length);
 		break;
@@ -8521,26 +8731,112 @@
 	}
 
 	/*
-	 * XXX KDM what do we do with the DPO and FUA bits?  FUA might be
-	 * interesting for us, but if RAIDCore is in write-back mode,
-	 * getting it to do write-through for a particular transaction may
-	 * not be possible.
+	 * The first check is to make sure we're in bounds, the second
+	 * check is to catch wrap-around problems.  If the lba + num blocks
+	 * is less than the lba, then we've wrapped around and the block
+	 * range is invalid anyway.
 	 */
+	if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+	 || ((lba + num_blocks) < lba)) {
+		ctl_set_lba_out_of_range(ctsio,
+		    MAX(lba, lun->be_lun->maxlba + 1));
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
 	/*
-	 * We don't support relative addressing.  That also requires
-	 * supporting linked commands, which we don't do.
+	 * According to SBC-3, a transfer length of 0 is not an error.
+	 * Note that this cannot happen with WRITE(6) or READ(6), since 0
+	 * translates to 256 blocks for those commands.
 	 */
-	if (reladdr != 0) {
-		ctl_set_invalid_field(ctsio,
-				      /*sks_valid*/ 1,
-				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 0);
+	if (num_blocks == 0) {
+		ctl_set_success(ctsio);
 		ctl_done((union ctl_io *)ctsio);
 		return (CTL_RETVAL_COMPLETE);
 	}
 
+	/* Set FUA and/or DPO if caches are disabled. */
+	if (isread) {
+		if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0)
+			flags |= CTL_LLF_FUA | CTL_LLF_DPO;
+	} else {
+		if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0)
+			flags |= CTL_LLF_FUA;
+	}
+
+	lbalen = (struct ctl_lba_len_flags *)
+	    &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	lbalen->lba = lba;
+	lbalen->len = num_blocks;
+	lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags;
+
+	ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
+	ctsio->kern_rel_offset = 0;
+
+	CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
+
+	retval = lun->backend->data_submit((union ctl_io *)ctsio);
+	return (retval);
+}
+
+static int
+ctl_cnw_cont(union ctl_io *io)
+{
+	struct ctl_lun *lun = CTL_LUN(io);
+	struct ctl_scsiio *ctsio;
+	struct ctl_lba_len_flags *lbalen;
+	int retval;
+
+	ctsio = &io->scsiio;
+	ctsio->io_hdr.status = CTL_STATUS_NONE;
+	ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
+	lbalen = (struct ctl_lba_len_flags *)
+	    &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	lbalen->flags &= ~CTL_LLF_COMPARE;
+	lbalen->flags |= CTL_LLF_WRITE;
+
+	CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
+	retval = lun->backend->data_submit((union ctl_io *)ctsio);
+	return (retval);
+}
+
+int
+ctl_cnw(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct ctl_lba_len_flags *lbalen;
+	uint64_t lba;
+	uint32_t num_blocks;
+	int flags, retval;
+
+	CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
+
+	flags = 0;
+	switch (ctsio->cdb[0]) {
+	case COMPARE_AND_WRITE: {
+		struct scsi_compare_and_write *cdb;
+
+		cdb = (struct scsi_compare_and_write *)ctsio->cdb;
+		if (cdb->byte2 & SRW10_FUA)
+			flags |= CTL_LLF_FUA;
+		if (cdb->byte2 & SRW10_DPO)
+			flags |= CTL_LLF_DPO;
+		lba = scsi_8btou64(cdb->addr);
+		num_blocks = cdb->length;
+		break;
+	}
+	default:
+		/*
+		 * We got a command we don't support.  This shouldn't
+		 * happen, commands should be filtered out above us.
+		 */
+		ctl_set_invalid_opcode(ctsio);
+		ctl_done((union ctl_io *)ctsio);
+
+		return (CTL_RETVAL_COMPLETE);
+		break; /* NOTREACHED */
+	}
+
 	/*
 	 * The first check is to make sure we're in bounds, the second
 	 * check is to catch wrap-around problems.  If the lba + num blocks
@@ -8549,7 +8845,8 @@
 	 */
 	if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
 	 || ((lba + num_blocks) < lba)) {
-		ctl_set_lba_out_of_range(ctsio);
+		ctl_set_lba_out_of_range(ctsio,
+		    MAX(lba, lun->be_lun->maxlba + 1));
 		ctl_done((union ctl_io *)ctsio);
 		return (CTL_RETVAL_COMPLETE);
 	}
@@ -8556,8 +8853,6 @@
 
 	/*
 	 * According to SBC-3, a transfer length of 0 is not an error.
-	 * Note that this cannot happen with WRITE(6) or READ(6), since 0
-	 * translates to 256 blocks for those commands.
 	 */
 	if (num_blocks == 0) {
 		ctl_set_success(ctsio);
@@ -8565,15 +8860,131 @@
 		return (CTL_RETVAL_COMPLETE);
 	}
 
-	lbalen.lba = lba;
-	lbalen.len = num_blocks;
-	memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
-	       sizeof(lbalen));
+	/* Set FUA if write cache is disabled. */
+	if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0)
+		flags |= CTL_LLF_FUA;
 
-	CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
+	ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
+	ctsio->kern_rel_offset = 0;
 
+	/*
+	 * Set the IO_CONT flag, so that if this I/O gets passed to
+	 * ctl_data_submit_done(), it'll get passed back to
+	 * ctl_ctl_cnw_cont() for further processing.
+	 */
+	ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
+	ctsio->io_cont = ctl_cnw_cont;
+
+	lbalen = (struct ctl_lba_len_flags *)
+	    &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	lbalen->lba = lba;
+	lbalen->len = num_blocks;
+	lbalen->flags = CTL_LLF_COMPARE | flags;
+
+	CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
 	retval = lun->backend->data_submit((union ctl_io *)ctsio);
+	return (retval);
+}
 
+int
+ctl_verify(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct ctl_lba_len_flags *lbalen;
+	uint64_t lba;
+	uint32_t num_blocks;
+	int bytchk, flags;
+	int retval;
+
+	CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
+
+	bytchk = 0;
+	flags = CTL_LLF_FUA;
+	switch (ctsio->cdb[0]) {
+	case VERIFY_10: {
+		struct scsi_verify_10 *cdb;
+
+		cdb = (struct scsi_verify_10 *)ctsio->cdb;
+		if (cdb->byte2 & SVFY_BYTCHK)
+			bytchk = 1;
+		if (cdb->byte2 & SVFY_DPO)
+			flags |= CTL_LLF_DPO;
+		lba = scsi_4btoul(cdb->addr);
+		num_blocks = scsi_2btoul(cdb->length);
+		break;
+	}
+	case VERIFY_12: {
+		struct scsi_verify_12 *cdb;
+
+		cdb = (struct scsi_verify_12 *)ctsio->cdb;
+		if (cdb->byte2 & SVFY_BYTCHK)
+			bytchk = 1;
+		if (cdb->byte2 & SVFY_DPO)
+			flags |= CTL_LLF_DPO;
+		lba = scsi_4btoul(cdb->addr);
+		num_blocks = scsi_4btoul(cdb->length);
+		break;
+	}
+	case VERIFY_16: {
+		struct scsi_rw_16 *cdb;
+
+		cdb = (struct scsi_rw_16 *)ctsio->cdb;
+		if (cdb->byte2 & SVFY_BYTCHK)
+			bytchk = 1;
+		if (cdb->byte2 & SVFY_DPO)
+			flags |= CTL_LLF_DPO;
+		lba = scsi_8btou64(cdb->addr);
+		num_blocks = scsi_4btoul(cdb->length);
+		break;
+	}
+	default:
+		/*
+		 * We got a command we don't support.  This shouldn't
+		 * happen, commands should be filtered out above us.
+		 */
+		ctl_set_invalid_opcode(ctsio);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	/*
+	 * The first check is to make sure we're in bounds, the second
+	 * check is to catch wrap-around problems.  If the lba + num blocks
+	 * is less than the lba, then we've wrapped around and the block
+	 * range is invalid anyway.
+	 */
+	if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+	 || ((lba + num_blocks) < lba)) {
+		ctl_set_lba_out_of_range(ctsio,
+		    MAX(lba, lun->be_lun->maxlba + 1));
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	/*
+	 * According to SBC-3, a transfer length of 0 is not an error.
+	 */
+	if (num_blocks == 0) {
+		ctl_set_success(ctsio);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	lbalen = (struct ctl_lba_len_flags *)
+	    &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	lbalen->lba = lba;
+	lbalen->len = num_blocks;
+	if (bytchk) {
+		lbalen->flags = CTL_LLF_COMPARE | flags;
+		ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
+	} else {
+		lbalen->flags = CTL_LLF_VERIFY | flags;
+		ctsio->kern_total_len = 0;
+	}
+	ctsio->kern_rel_offset = 0;
+
+	CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
+	retval = lun->backend->data_submit((union ctl_io *)ctsio);
 	return (retval);
 }
 
@@ -8580,31 +8991,37 @@
 int
 ctl_report_luns(struct ctl_scsiio *ctsio)
 {
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_port *port = CTL_PORT(ctsio);
+	struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio);
 	struct scsi_report_luns *cdb;
 	struct scsi_report_luns_data *lun_data;
-	struct ctl_lun *lun, *request_lun;
-	int num_luns, retval;
+	int num_filled, num_luns, num_port_luns, retval;
 	uint32_t alloc_len, lun_datalen;
-	int num_filled, well_known;
-	uint32_t initidx;
+	uint32_t initidx, targ_lun_id, lun_id;
 
 	retval = CTL_RETVAL_COMPLETE;
-	well_known = 0;
-
 	cdb = (struct scsi_report_luns *)ctsio->cdb;
 
 	CTL_DEBUG_PRINT(("ctl_report_luns\n"));
 
-	mtx_lock(&control_softc->ctl_lock);
-	num_luns = control_softc->num_luns;
-	mtx_unlock(&control_softc->ctl_lock);
+	num_luns = 0;
+	num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS;
+	mtx_lock(&softc->ctl_lock);
+	for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) {
+		if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX)
+			num_luns++;
+	}
+	mtx_unlock(&softc->ctl_lock);
 
 	switch (cdb->select_report) {
 	case RPL_REPORT_DEFAULT:
 	case RPL_REPORT_ALL:
+	case RPL_REPORT_NONSUBSID:
 		break;
 	case RPL_REPORT_WELLKNOWN:
-		well_known = 1;
+	case RPL_REPORT_ADMIN:
+	case RPL_REPORT_CONGLOM:
 		num_luns = 0;
 		break;
 	default:
@@ -8637,9 +9054,6 @@
 		return (retval);
 	}
 
-	request_lun = (struct ctl_lun *)
-		ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 	lun_datalen = sizeof(*lun_data) +
 		(num_luns * sizeof(struct scsi_report_luns_lundata));
 
@@ -8647,64 +9061,22 @@
 	lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
 	ctsio->kern_sg_entries = 0;
 
-	if (lun_datalen < alloc_len) {
-		ctsio->residual = alloc_len - lun_datalen;
-		ctsio->kern_data_len = lun_datalen;
-		ctsio->kern_total_len = lun_datalen;
-	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
-	}
-	ctsio->kern_data_resid = 0;
-	ctsio->kern_rel_offset = 0;
-	ctsio->kern_sg_entries = 0;
-
 	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
 
-	/*
-	 * We set this to the actual data length, regardless of how much
-	 * space we actually have to return results.  If the user looks at
-	 * this value, he'll know whether or not he allocated enough space
-	 * and reissue the command if necessary.  We don't support well
-	 * known logical units, so if the user asks for that, return none.
-	 */
-	scsi_ulto4b(lun_datalen - 8, lun_data->length);
+	mtx_lock(&softc->ctl_lock);
+	for (targ_lun_id = 0, num_filled = 0;
+	    targ_lun_id < num_port_luns && num_filled < num_luns;
+	    targ_lun_id++) {
+		lun_id = ctl_lun_map_from_port(port, targ_lun_id);
+		if (lun_id == UINT32_MAX)
+			continue;
+		lun = softc->ctl_luns[lun_id];
+		if (lun == NULL)
+			continue;
 
-	mtx_lock(&control_softc->ctl_lock);
-	for (num_filled = 0, lun = STAILQ_FIRST(&control_softc->lun_list);
-	     (lun != NULL) && (num_filled < num_luns);
-	     lun = STAILQ_NEXT(lun, links)) {
+		be64enc(lun_data->luns[num_filled++].lundata,
+		    ctl_encode_lun(targ_lun_id));
 
-		if (lun->lun <= 0xff) {
-			/*
-			 * Peripheral addressing method, bus number 0.
-			 */
-			lun_data->luns[num_filled].lundata[0] =
-				RPL_LUNDATA_ATYP_PERIPH;
-			lun_data->luns[num_filled].lundata[1] = lun->lun;
-			num_filled++;
-		} else if (lun->lun <= 0x3fff) {
-			/*
-			 * Flat addressing method.
-			 */
-			lun_data->luns[num_filled].lundata[0] =
-				RPL_LUNDATA_ATYP_FLAT |
-				(lun->lun & RPL_LUNDATA_FLAT_LUN_MASK);
-#ifdef OLDCTLHEADERS
-				(SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) |
-				(lun->lun & SRLD_BUS_LUN_MASK);
-#endif
-			lun_data->luns[num_filled].lundata[1] =
-#ifdef OLDCTLHEADERS
-				lun->lun >> SRLD_BUS_LUN_BITS;
-#endif
-				lun->lun >> RPL_LUNDATA_FLAT_LUN_BITS;
-			num_filled++;
-		} else {
-			printf("ctl_report_luns: bogus LUN number %jd, "
-			       "skipping\n", (intmax_t)lun->lun);
-		}
 		/*
 		 * According to SPC-3, rev 14 section 6.21:
 		 *
@@ -8721,21 +9093,42 @@
 		 * case, we shouldn't clear any pending lun change unit
 		 * attention.
 		 */
-		if (request_lun != NULL)
-			lun->pending_sense[initidx].ua_pending &=
-				~CTL_UA_LUN_CHANGE;
+		if (request_lun != NULL) {
+			mtx_lock(&lun->lun_lock);
+			ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE);
+			mtx_unlock(&lun->lun_lock);
+		}
 	}
-	mtx_unlock(&control_softc->ctl_lock);
+	mtx_unlock(&softc->ctl_lock);
 
 	/*
+	 * It's quite possible that we've returned fewer LUNs than we allocated
+	 * space for.  Trim it.
+	 */
+	lun_datalen = sizeof(*lun_data) +
+		(num_filled * sizeof(struct scsi_report_luns_lundata));
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(lun_datalen, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	/*
+	 * We set this to the actual data length, regardless of how much
+	 * space we actually have to return results.  If the user looks at
+	 * this value, he'll know whether or not he allocated enough space
+	 * and reissue the command if necessary.  We don't support well
+	 * known logical units, so if the user asks for that, return none.
+	 */
+	scsi_ulto4b(lun_datalen - 8, lun_data->length);
+
+	/*
 	 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
 	 * this request.
 	 */
-	ctsio->scsi_status = SCSI_STATUS_OK;
-
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
-
 	return (retval);
 }
 
@@ -8742,17 +9135,19 @@
 int
 ctl_request_sense(struct ctl_scsiio *ctsio)
 {
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_request_sense *cdb;
-	struct scsi_sense_data *sense_ptr;
-	struct ctl_lun *lun;
+	struct scsi_sense_data *sense_ptr, *ps;
 	uint32_t initidx;
 	int have_error;
+	u_int sense_len = SSD_FULL_SIZE;
 	scsi_sense_data_type sense_format;
+	ctl_ua_type ua_type;
+	uint8_t asc = 0, ascq = 0;
 
 	cdb = (struct scsi_request_sense *)ctsio->cdb;
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 	CTL_DEBUG_PRINT(("ctl_request_sense\n"));
 
 	/*
@@ -8766,6 +9161,7 @@
 	ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
 	sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
 	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
 
 	/*
 	 * struct scsi_sense_data, which is currently set to 256 bytes, is
@@ -8772,19 +9168,24 @@
 	 * larger than the largest allowed value for the length field in the
 	 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
 	 */
-	ctsio->residual = 0;
 	ctsio->kern_data_len = cdb->length;
 	ctsio->kern_total_len = cdb->length;
 
-	ctsio->kern_data_resid = 0;
-	ctsio->kern_rel_offset = 0;
-	ctsio->kern_sg_entries = 0;
-
 	/*
 	 * If we don't have a LUN, we don't have any pending sense.
 	 */
-	if (lun == NULL)
-		goto no_sense;
+	if (lun == NULL ||
+	    ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+	     softc->ha_link < CTL_HA_LINK_UNKNOWN)) {
+		/* "Logical unit not supported" */
+		ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format,
+		    /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+		    /*asc*/ 0x25,
+		    /*ascq*/ 0x00,
+		    SSD_ELEM_NONE);
+		goto send;
+	}
 
 	have_error = 0;
 	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
@@ -8792,8 +9193,11 @@
 	 * Check for pending sense, and then for pending unit attentions.
 	 * Pending sense gets returned first, then pending unit attentions.
 	 */
-	mtx_lock(&lun->ctl_softc->ctl_lock);
-	if (ctl_is_set(lun->have_ca, initidx)) {
+	mtx_lock(&lun->lun_lock);
+	ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT];
+	if (ps != NULL)
+		ps += initidx % CTL_MAX_INIT_PER_PORT;
+	if (ps != NULL && ps->error_code != 0) {
 		scsi_sense_data_type stored_format;
 
 		/*
@@ -8800,8 +9204,7 @@
 		 * Check to see which sense format was used for the stored
 		 * sense data.
 		 */
-		stored_format = scsi_sense_type(
-		    &lun->pending_sense[initidx].sense);
+		stored_format = scsi_sense_type(ps);
 
 		/*
 		 * If the user requested a different sense format than the
@@ -8816,81 +9219,48 @@
 		if ((stored_format == SSD_TYPE_FIXED)
 		 && (sense_format == SSD_TYPE_DESC))
 			ctl_sense_to_desc((struct scsi_sense_data_fixed *)
-			    &lun->pending_sense[initidx].sense,
-			    (struct scsi_sense_data_desc *)sense_ptr);
+			    ps, (struct scsi_sense_data_desc *)sense_ptr);
 		else if ((stored_format == SSD_TYPE_DESC)
 		      && (sense_format == SSD_TYPE_FIXED))
 			ctl_sense_to_fixed((struct scsi_sense_data_desc *)
-			    &lun->pending_sense[initidx].sense,
-			    (struct scsi_sense_data_fixed *)sense_ptr);
+			    ps, (struct scsi_sense_data_fixed *)sense_ptr);
 		else
-			memcpy(sense_ptr, &lun->pending_sense[initidx].sense,
-			       ctl_min(sizeof(*sense_ptr),
-			       sizeof(lun->pending_sense[initidx].sense)));
+			memcpy(sense_ptr, ps, sizeof(*sense_ptr));
 
-		ctl_clear_mask(lun->have_ca, initidx);
+		ps->error_code = 0;
 		have_error = 1;
-	} else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) {
-		ctl_ua_type ua_type;
-
-		ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending,
-				       sense_ptr, sense_format);
-		if (ua_type != CTL_UA_NONE) {
+	} else {
+		ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len,
+		    sense_format);
+		if (ua_type != CTL_UA_NONE)
 			have_error = 1;
-			/* We're reporting this UA, so clear it */
-			lun->pending_sense[initidx].ua_pending &= ~ua_type;
-		}
 	}
-	mtx_unlock(&lun->ctl_softc->ctl_lock);
-
-	/*
-	 * We already have a pending error, return it.
-	 */
-	if (have_error != 0) {
+	if (have_error == 0) {
 		/*
-		 * We report the SCSI status as OK, since the status of the
-		 * request sense command itself is OK.
+		 * Report informational exception if have one and allowed.
 		 */
-		ctsio->scsi_status = SCSI_STATUS_OK;
-
-		/*
-		 * We report 0 for the sense length, because we aren't doing
-		 * autosense in this case.  We're reporting sense as
-		 * parameter data.
-		 */
-		ctsio->sense_len = 0;
-
-		ctsio->be_move_done = ctl_config_move_done;
-		ctl_datamove((union ctl_io *)ctsio);
-
-		return (CTL_RETVAL_COMPLETE);
+		if (lun->MODE_IE.mrie != SIEP_MRIE_NO) {
+			asc = lun->ie_asc;
+			ascq = lun->ie_ascq;
+		}
+		ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format,
+		    /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_NO_SENSE,
+		    /*asc*/ asc,
+		    /*ascq*/ ascq,
+		    SSD_ELEM_NONE);
 	}
+	mtx_unlock(&lun->lun_lock);
 
-no_sense:
-
+send:
 	/*
-	 * No sense information to report, so we report that everything is
-	 * okay.
+	 * We report the SCSI status as OK, since the status of the command
+	 * itself is OK.  We're reporting sense as parameter data.
 	 */
-	ctl_set_sense_data(sense_ptr,
-			   lun,
-			   sense_format,
-			   /*current_error*/ 1,
-			   /*sense_key*/ SSD_KEY_NO_SENSE,
-			   /*asc*/ 0x00,
-			   /*ascq*/ 0x00,
-			   SSD_ELEM_NONE);
-
-	ctsio->scsi_status = SCSI_STATUS_OK;
-
-	/*
-	 * We report 0 for the sense length, because we aren't doing
-	 * autosense in this case.  We're reporting sense as parameter data.
-	 */
-	ctsio->sense_len = 0;
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
@@ -8897,68 +9267,34 @@
 int
 ctl_tur(struct ctl_scsiio *ctsio)
 {
-	struct ctl_lun *lun;
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 	CTL_DEBUG_PRINT(("ctl_tur\n"));
 
-	if (lun == NULL)
-		return (-EINVAL);
-
-	ctsio->scsi_status = SCSI_STATUS_OK;
-	ctsio->io_hdr.status = CTL_SUCCESS;
-
+	ctl_set_success(ctsio);
 	ctl_done((union ctl_io *)ctsio);
 
 	return (CTL_RETVAL_COMPLETE);
 }
 
-#ifdef notyet
+/*
+ * SCSI VPD page 0x00, the Supported VPD Pages page.
+ */
 static int
-ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
-{
-
-}
-#endif
-
-static int
 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
 {
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_vpd_supported_pages *pages;
 	int sup_page_size;
-	struct ctl_lun *lun;
+	int p;
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
-	sup_page_size = sizeof(struct scsi_vpd_supported_pages) +
-		SCSI_EVPD_NUM_SUPPORTED_PAGES;
-	/*
-	 * XXX KDM GFP_???  We probably don't want to wait here,
-	 * unless we end up having a process/thread context.
-	 */
+	sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
+	    SCSI_EVPD_NUM_SUPPORTED_PAGES;
 	ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
-	if (ctsio->kern_data_ptr == NULL) {
-		ctsio->io_hdr.status = CTL_SCSI_ERROR;
-		ctsio->scsi_status = SCSI_STATUS_BUSY;
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-	}
 	pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
-	ctsio->kern_sg_entries = 0;
-
-	if (sup_page_size < alloc_len) {
-		ctsio->residual = alloc_len - sup_page_size;
-		ctsio->kern_data_len = sup_page_size;
-		ctsio->kern_total_len = sup_page_size;
-	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
-	}
-	ctsio->kern_data_resid = 0;
 	ctsio->kern_rel_offset = 0;
 	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(sup_page_size, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
 	/*
 	 * The control device is always connected.  The disk device, on the
@@ -8971,56 +9307,55 @@
 	else
 		pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
 
-	pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES;
+	p = 0;
 	/* Supported VPD pages */
-	pages->page_list[0] = SVPD_SUPPORTED_PAGES;
+	pages->page_list[p++] = SVPD_SUPPORTED_PAGES;
 	/* Serial Number */
-	pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER;
+	pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER;
 	/* Device Identification */
-	pages->page_list[2] = SVPD_DEVICE_ID;
+	pages->page_list[p++] = SVPD_DEVICE_ID;
+	/* Extended INQUIRY Data */
+	pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA;
+	/* Mode Page Policy */
+	pages->page_list[p++] = SVPD_MODE_PAGE_POLICY;
+	/* SCSI Ports */
+	pages->page_list[p++] = SVPD_SCSI_PORTS;
+	/* Third-party Copy */
+	pages->page_list[p++] = SVPD_SCSI_TPC;
+	if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) {
+		/* Block limits */
+		pages->page_list[p++] = SVPD_BLOCK_LIMITS;
+		/* Block Device Characteristics */
+		pages->page_list[p++] = SVPD_BDC;
+		/* Logical Block Provisioning */
+		pages->page_list[p++] = SVPD_LBP;
+	}
+	pages->length = p;
 
-	ctsio->scsi_status = SCSI_STATUS_OK;
-
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
+/*
+ * SCSI VPD page 0x80, the Unit Serial Number page.
+ */
 static int
 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
 {
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_vpd_unit_serial_number *sn_ptr;
-	struct ctl_lun *lun;
-#ifndef CTL_USE_BACKEND_SN
-	char tmpstr[32];
-#endif
+	int data_len;
 
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
-	/* XXX KDM which malloc flags here?? */
-	ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO);
-	if (ctsio->kern_data_ptr == NULL) {
-		ctsio->io_hdr.status = CTL_SCSI_ERROR;
-		ctsio->scsi_status = SCSI_STATUS_BUSY;
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-	}
+	data_len = 4 + CTL_SN_LEN;
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
 	sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
-	ctsio->kern_sg_entries = 0;
-
-	if (sizeof(*sn_ptr) < alloc_len) {
-		ctsio->residual = alloc_len - sizeof(*sn_ptr);
-		ctsio->kern_data_len = sizeof(*sn_ptr);
-		ctsio->kern_total_len = sizeof(*sn_ptr);
-	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
-	}
-	ctsio->kern_data_resid = 0;
 	ctsio->kern_rel_offset = 0;
 	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
 	/*
 	 * The control device is always connected.  The disk device, on the
@@ -9034,95 +9369,165 @@
 		sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
 
 	sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
-	sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN);
-#ifdef CTL_USE_BACKEND_SN
+	sn_ptr->length = CTL_SN_LEN;
 	/*
 	 * If we don't have a LUN, we just leave the serial number as
 	 * all spaces.
 	 */
-	memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
 	if (lun != NULL) {
 		strncpy((char *)sn_ptr->serial_num,
 			(char *)lun->be_lun->serial_num, CTL_SN_LEN);
-	}
-#else
+	} else
+		memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN);
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+
+/*
+ * SCSI VPD page 0x86, the Extended INQUIRY Data page.
+ */
+static int
+ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_vpd_extended_inquiry_data *eid_ptr;
+	int data_len;
+
+	data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
 	/*
-	 * Note that we're using a non-unique serial number here,
+	 * The control device is always connected.  The disk device, on the
+	 * other hand, may not be online all the time.
 	 */
-	snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000");
-	memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
-	strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN,
-		ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4)));
-#endif
-	ctsio->scsi_status = SCSI_STATUS_OK;
+	if (lun != NULL)
+		eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+				     lun->be_lun->lun_type;
+	else
+		eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+	eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA;
+	scsi_ulto2b(data_len - 4, eid_ptr->page_length);
+	/*
+	 * We support head of queue, ordered and simple tags.
+	 */
+	eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP;
+	/*
+	 * Volatile cache supported.
+	 */
+	eid_ptr->flags3 = SVPD_EID_V_SUP;
 
+	/*
+	 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit
+	 * attention for a particular IT nexus on all LUNs once we report
+	 * it to that nexus once.  This bit is required as of SPC-4.
+	 */
+	eid_ptr->flags4 = SVPD_EID_LUICLR;
+
+	/*
+	 * We support revert to defaults (RTD) bit in MODE SELECT.
+	 */
+	eid_ptr->flags5 = SVPD_EID_RTD_SUP;
+
+	/*
+	 * XXX KDM in order to correctly answer this, we would need
+	 * information from the SIM to determine how much sense data it
+	 * can send.  So this would really be a path inquiry field, most
+	 * likely.  This can be set to a maximum of 252 according to SPC-4,
+	 * but the hardware may or may not be able to support that much.
+	 * 0 just means that the maximum sense data length is not reported.
+	 */
+	eid_ptr->max_sense_length = 0;
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
 
+static int
+ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_vpd_mode_page_policy *mpp_ptr;
+	int data_len;
+
+	data_len = sizeof(struct scsi_vpd_mode_page_policy) +
+	    sizeof(struct scsi_vpd_mode_page_policy_descr);
+
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	/*
+	 * The control device is always connected.  The disk device, on the
+	 * other hand, may not be online all the time.
+	 */
+	if (lun != NULL)
+		mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+				     lun->be_lun->lun_type;
+	else
+		mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+	mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY;
+	scsi_ulto2b(data_len - 4, mpp_ptr->page_length);
+	mpp_ptr->descr[0].page_code = 0x3f;
+	mpp_ptr->descr[0].subpage_code = 0xff;
+	mpp_ptr->descr[0].policy = SVPD_MPP_SHARED;
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
 	return (CTL_RETVAL_COMPLETE);
 }
 
-
+/*
+ * SCSI VPD page 0x83, the Device Identification page.
+ */
 static int
 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
 {
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_port *port = CTL_PORT(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_vpd_device_id *devid_ptr;
-	struct scsi_vpd_id_descriptor *desc, *desc1;
-	struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */
-	struct scsi_vpd_id_t10 *t10id;
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
-	struct ctl_frontend *fe;
-#ifndef CTL_USE_BACKEND_SN
-	char tmpstr[32];
-#endif /* CTL_USE_BACKEND_SN */
-	int devid_len;
+	struct scsi_vpd_id_descriptor *desc;
+	int data_len, g;
+	uint8_t proto;
 
-	ctl_softc = control_softc;
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
-	devid_len = sizeof(struct scsi_vpd_device_id) +
-		sizeof(struct scsi_vpd_id_descriptor) +
-		sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN +
-		sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN +
-		sizeof(struct scsi_vpd_id_descriptor) +
+	data_len = sizeof(struct scsi_vpd_device_id) +
+	    sizeof(struct scsi_vpd_id_descriptor) +
 		sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
-		sizeof(struct scsi_vpd_id_descriptor) +
+	    sizeof(struct scsi_vpd_id_descriptor) +
 		sizeof(struct scsi_vpd_id_trgt_port_grp_id);
+	if (lun && lun->lun_devid)
+		data_len += lun->lun_devid->len;
+	if (port && port->port_devid)
+		data_len += port->port_devid->len;
+	if (port && port->target_devid)
+		data_len += port->target_devid->len;
 
-	/* XXX KDM which malloc flags here ?? */
-	ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO);
-	if (ctsio->kern_data_ptr == NULL) {
-		ctsio->io_hdr.status = CTL_SCSI_ERROR;
-		ctsio->scsi_status = SCSI_STATUS_BUSY;
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-	}
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
 	devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
 	ctsio->kern_sg_entries = 0;
-
-	if (devid_len < alloc_len) {
-		ctsio->residual = alloc_len - devid_len;
-		ctsio->kern_data_len = devid_len;
-		ctsio->kern_total_len = devid_len;
-	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
-	}
-	ctsio->kern_data_resid = 0;
 	ctsio->kern_rel_offset = 0;
 	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
-	desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
-	t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
-	desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
-		sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN);
-	desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] +
-	          CTL_WWPN_LEN);
-	desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] +
-	         sizeof(struct scsi_vpd_id_rel_trgt_port_id));
-
 	/*
 	 * The control device is always connected.  The disk device, on the
 	 * other hand, may not be online all the time.
@@ -9132,133 +9537,344 @@
 				     lun->be_lun->lun_type;
 	else
 		devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
-
 	devid_ptr->page_code = SVPD_DEVICE_ID;
+	scsi_ulto2b(data_len - 4, devid_ptr->length);
 
-	scsi_ulto2b(devid_len - 4, devid_ptr->length);
+	if (port && port->port_type == CTL_PORT_FC)
+		proto = SCSI_PROTO_FC << 4;
+	else if (port && port->port_type == CTL_PORT_SAS)
+		proto = SCSI_PROTO_SAS << 4;
+	else if (port && port->port_type == CTL_PORT_ISCSI)
+		proto = SCSI_PROTO_ISCSI << 4;
+	else
+		proto = SCSI_PROTO_SPI << 4;
+	desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	/*
+	 * We're using a LUN association here.  i.e., this device ID is a
+	 * per-LUN identifier.
+	 */
+	if (lun && lun->lun_devid) {
+		memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
+		desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
+		    lun->lun_devid->len);
+	}
 
-	fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
+	/*
+	 * This is for the WWPN which is a port association.
+	 */
+	if (port && port->port_devid) {
+		memcpy(desc, port->port_devid->data, port->port_devid->len);
+		desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
+		    port->port_devid->len);
+	}
 
 	/*
-	 * For Fibre channel,
+	 * This is for the Relative Target Port(type 4h) identifier
 	 */
-	if (fe->port_type == CTL_PORT_FC)
-	{
-		desc->proto_codeset = (SCSI_PROTO_FC << 4) |
-				      SVPD_ID_CODESET_ASCII;
-        	desc1->proto_codeset = (SCSI_PROTO_FC << 4) |
-		              SVPD_ID_CODESET_BINARY;
-	}
+	desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+	desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+	    SVPD_ID_TYPE_RELTARG;
+	desc->length = 4;
+	scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
+	desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+	    sizeof(struct scsi_vpd_id_rel_trgt_port_id));
+
+	/*
+	 * This is for the Target Port Group(type 5h) identifier
+	 */
+	desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+	desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+	    SVPD_ID_TYPE_TPORTGRP;
+	desc->length = 4;
+	if (softc->is_single ||
+	    (port && port->status & CTL_PORT_STATUS_HA_SHARED))
+		g = 1;
 	else
-	{
-		desc->proto_codeset = (SCSI_PROTO_SPI << 4) |
-				      SVPD_ID_CODESET_ASCII;
-        	desc1->proto_codeset = (SCSI_PROTO_SPI << 4) |
-		              SVPD_ID_CODESET_BINARY;
-	}
-	desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset;
-	mtx_unlock(&ctl_softc->ctl_lock);
+		g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt;
+	scsi_ulto2b(g, &desc->identifier[2]);
+	desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+	    sizeof(struct scsi_vpd_id_trgt_port_grp_id));
 
 	/*
-	 * We're using a LUN association here.  i.e., this device ID is a
-	 * per-LUN identifier.
+	 * This is for the Target identifier
 	 */
-	desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
-	desc->length = sizeof(*t10id) + CTL_DEVID_LEN;
-	strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
+	if (port && port->target_devid) {
+		memcpy(desc, port->target_devid->data, port->target_devid->len);
+	}
 
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
+{
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_vpd_scsi_ports *sp;
+	struct scsi_vpd_port_designation *pd;
+	struct scsi_vpd_port_designation_cont *pdc;
+	struct ctl_port *port;
+	int data_len, num_target_ports, iid_len, id_len;
+
+	num_target_ports = 0;
+	iid_len = 0;
+	id_len = 0;
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(port, &softc->port_list, links) {
+		if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+			continue;
+		if (lun != NULL &&
+		    ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+			continue;
+		num_target_ports++;
+		if (port->init_devid)
+			iid_len += port->init_devid->len;
+		if (port->port_devid)
+			id_len += port->port_devid->len;
+	}
+	mtx_unlock(&softc->ctl_lock);
+
+	data_len = sizeof(struct scsi_vpd_scsi_ports) +
+	    num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
+	     sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
 	/*
-	 * desc1 is for the WWPN which is a port asscociation.
+	 * The control device is always connected.  The disk device, on the
+	 * other hand, may not be online all the time.  Need to change this
+	 * to figure out whether the disk device is actually online or not.
 	 */
-	desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA;
-	desc1->length = CTL_WWPN_LEN;
-	/* XXX Call Reggie's get_WWNN func here then add port # to the end */
-	/* For testing just create the WWPN */
-#if 0
-	ddb_GetWWNN((char *)desc1->identifier);
-
-	/* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
-	/* This is so Copancontrol will return something sane */
-	if (ctsio->io_hdr.nexus.targ_port!=0 &&
-	    ctsio->io_hdr.nexus.targ_port!=8)
-		desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1;
+	if (lun != NULL)
+		sp->device = (SID_QUAL_LU_CONNECTED << 5) |
+				  lun->be_lun->lun_type;
 	else
-		desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port;
-#endif
+		sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
 
-	be64enc(desc1->identifier, fe->wwpn);
+	sp->page_code = SVPD_SCSI_PORTS;
+	scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
+	    sp->page_length);
+	pd = &sp->design[0];
 
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(port, &softc->port_list, links) {
+		if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+			continue;
+		if (lun != NULL &&
+		    ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+			continue;
+		scsi_ulto2b(port->targ_port, pd->relative_port_id);
+		if (port->init_devid) {
+			iid_len = port->init_devid->len;
+			memcpy(pd->initiator_transportid,
+			    port->init_devid->data, port->init_devid->len);
+		} else
+			iid_len = 0;
+		scsi_ulto2b(iid_len, pd->initiator_transportid_length);
+		pdc = (struct scsi_vpd_port_designation_cont *)
+		    (&pd->initiator_transportid[iid_len]);
+		if (port->port_devid) {
+			id_len = port->port_devid->len;
+			memcpy(pdc->target_port_descriptors,
+			    port->port_devid->data, port->port_devid->len);
+		} else
+			id_len = 0;
+		scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
+		pd = (struct scsi_vpd_port_designation *)
+		    ((uint8_t *)pdc->target_port_descriptors + id_len);
+	}
+	mtx_unlock(&softc->ctl_lock);
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_vpd_block_limits *bl_ptr;
+	uint64_t ival;
+
+	ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
+	bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
 	/*
-	 * desc2 is for the Relative Target Port(type 4h) identifier
+	 * The control device is always connected.  The disk device, on the
+	 * other hand, may not be online all the time.  Need to change this
+	 * to figure out whether the disk device is actually online or not.
 	 */
-	desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
-	                 | SVPD_ID_TYPE_RELTARG;
-	desc2->length = 4;
-//#if 0
-	/* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
-	/* This is so Copancontrol will return something sane */
-	if (ctsio->io_hdr.nexus.targ_port!=0 &&
-	    ctsio->io_hdr.nexus.targ_port!=8)
-		desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1;
+	if (lun != NULL)
+		bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+				  lun->be_lun->lun_type;
 	else
-	        desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port;
-//#endif
+		bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
 
+	bl_ptr->page_code = SVPD_BLOCK_LIMITS;
+	scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length);
+	bl_ptr->max_cmp_write_len = 0xff;
+	scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
+	if (lun != NULL) {
+		scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len);
+		if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
+			ival = 0xffffffff;
+			ctl_get_opt_number(&lun->be_lun->options,
+			    "unmap_max_lba", &ival);
+			scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt);
+			ival = 0xffffffff;
+			ctl_get_opt_number(&lun->be_lun->options,
+			    "unmap_max_descr", &ival);
+			scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt);
+			if (lun->be_lun->ublockexp != 0) {
+				scsi_ulto4b((1 << lun->be_lun->ublockexp),
+				    bl_ptr->opt_unmap_grain);
+				scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff,
+				    bl_ptr->unmap_grain_align);
+			}
+		}
+		scsi_ulto4b(lun->be_lun->atomicblock,
+		    bl_ptr->max_atomic_transfer_length);
+		scsi_ulto4b(0, bl_ptr->atomic_alignment);
+		scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity);
+		scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary);
+		scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size);
+		ival = UINT64_MAX;
+		ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival);
+		scsi_u64to8b(ival, bl_ptr->max_write_same_length);
+	}
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_vpd_block_device_characteristics *bdc_ptr;
+	const char *value;
+	u_int i;
+
+	ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
+	bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
 	/*
-	 * desc3 is for the Target Port Group(type 5h) identifier
+	 * The control device is always connected.  The disk device, on the
+	 * other hand, may not be online all the time.  Need to change this
+	 * to figure out whether the disk device is actually online or not.
 	 */
-	desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
-	                 | SVPD_ID_TYPE_TPORTGRP;
-	desc3->length = 4;
-	if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single)
-		desc3->identifier[3] = 1;
+	if (lun != NULL)
+		bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+				  lun->be_lun->lun_type;
 	else
-		desc3->identifier[3] = 2;
+		bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+	bdc_ptr->page_code = SVPD_BDC;
+	scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length);
+	if (lun != NULL &&
+	    (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL)
+		i = strtol(value, NULL, 0);
+	else
+		i = CTL_DEFAULT_ROTATION_RATE;
+	scsi_ulto2b(i, bdc_ptr->medium_rotation_rate);
+	if (lun != NULL &&
+	    (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL)
+		i = strtol(value, NULL, 0);
+	else
+		i = 0;
+	bdc_ptr->wab_wac_ff = (i & 0x0f);
+	bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS;
 
-#ifdef CTL_USE_BACKEND_SN
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_vpd_logical_block_prov *lbp_ptr;
+	const char *value;
+
+	ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
+	lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
 	/*
-	 * If we've actually got a backend, copy the device id from the
-	 * per-LUN data.  Otherwise, set it to all spaces.
+	 * The control device is always connected.  The disk device, on the
+	 * other hand, may not be online all the time.  Need to change this
+	 * to figure out whether the disk device is actually online or not.
 	 */
-	if (lun != NULL) {
-		/*
-		 * Copy the backend's LUN ID.
-		 */
-		strncpy((char *)t10id->vendor_spec_id,
-			(char *)lun->be_lun->device_id, CTL_DEVID_LEN);
-	} else {
-		/*
-		 * No backend, set this to spaces.
-		 */
-		memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN);
+	if (lun != NULL)
+		lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+				  lun->be_lun->lun_type;
+	else
+		lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+
+	lbp_ptr->page_code = SVPD_LBP;
+	scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length);
+	lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT;
+	if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
+		lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 |
+		    SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP;
+		value = ctl_get_opt(&lun->be_lun->options, "provisioning_type");
+		if (value != NULL) {
+			if (strcmp(value, "resource") == 0)
+				lbp_ptr->prov_type = SVPD_LBP_RESOURCE;
+			else if (strcmp(value, "thin") == 0)
+				lbp_ptr->prov_type = SVPD_LBP_THIN;
+		} else
+			lbp_ptr->prov_type = SVPD_LBP_THIN;
 	}
-#else
-	snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d",
-		 (lun != NULL) ?  (int)lun->lun : 0);
-	strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN,
-		sizeof(tmpstr)));
-#endif
 
-	ctsio->scsi_status = SCSI_STATUS_OK;
-
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 	ctsio->be_move_done = ctl_config_move_done;
 	ctl_datamove((union ctl_io *)ctsio);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
+/*
+ * INQUIRY with the EVPD bit set.
+ */
 static int
 ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
 {
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_inquiry *cdb;
 	int alloc_len, retval;
 
 	cdb = (struct scsi_inquiry *)ctsio->cdb;
-
-	retval = CTL_RETVAL_COMPLETE;
-
 	alloc_len = scsi_2btoul(cdb->length);
 
 	switch (cdb->page_code) {
@@ -9271,7 +9887,35 @@
 	case SVPD_DEVICE_ID:
 		retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
 		break;
+	case SVPD_EXTENDED_INQUIRY_DATA:
+		retval = ctl_inquiry_evpd_eid(ctsio, alloc_len);
+		break;
+	case SVPD_MODE_PAGE_POLICY:
+		retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len);
+		break;
+	case SVPD_SCSI_PORTS:
+		retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
+		break;
+	case SVPD_SCSI_TPC:
+		retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
+		break;
+	case SVPD_BLOCK_LIMITS:
+		if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
+			goto err;
+		retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
+		break;
+	case SVPD_BDC:
+		if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
+			goto err;
+		retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len);
+		break;
+	case SVPD_LBP:
+		if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
+			goto err;
+		retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len);
+		break;
 	default:
+err:
 		ctl_set_invalid_field(ctsio,
 				      /*sks_valid*/ 1,
 				      /*command*/ 1,
@@ -9286,32 +9930,25 @@
 	return (retval);
 }
 
+/*
+ * Standard INQUIRY data.
+ */
 static int
 ctl_inquiry_std(struct ctl_scsiio *ctsio)
 {
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_port *port = CTL_PORT(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
 	struct scsi_inquiry_data *inq_ptr;
 	struct scsi_inquiry *cdb;
-	struct ctl_softc *ctl_softc;
-	struct ctl_lun *lun;
-	uint32_t alloc_len;
-	int is_fc;
+	char *val;
+	uint32_t alloc_len, data_len;
+	ctl_port_type port_type;
 
-	ctl_softc = control_softc;
+	port_type = port->port_type;
+	if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
+		port_type = CTL_PORT_SCSI;
 
-	/*
-	 * Figure out whether we're talking to a Fibre Channel port or not.
-	 * We treat the ioctl front end, and any SCSI adapters, as packetized
-	 * SCSI front ends.
-	 */
-	mtx_lock(&ctl_softc->ctl_lock);
-	if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type !=
-	    CTL_PORT_FC)
-		is_fc = 0;
-	else
-		is_fc = 1;
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
 	cdb = (struct scsi_inquiry *)ctsio->cdb;
 	alloc_len = scsi_2btoul(cdb->length);
 
@@ -9320,82 +9957,30 @@
 	 * in.  If the user only asks for less, we'll give him
 	 * that much.
 	 */
-	/* XXX KDM what malloc flags should we use here?? */
-	ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO);
-	if (ctsio->kern_data_ptr == NULL) {
-		ctsio->io_hdr.status = CTL_SCSI_ERROR;
-		ctsio->scsi_status = SCSI_STATUS_BUSY;
-		ctl_done((union ctl_io *)ctsio);
-		return (CTL_RETVAL_COMPLETE);
-	}
+	data_len = offsetof(struct scsi_inquiry_data, vendor_specific1);
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
 	inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
 	ctsio->kern_sg_entries = 0;
-	ctsio->kern_data_resid = 0;
 	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
 
-	if (sizeof(*inq_ptr) < alloc_len) {
-		ctsio->residual = alloc_len - sizeof(*inq_ptr);
-		ctsio->kern_data_len = sizeof(*inq_ptr);
-		ctsio->kern_total_len = sizeof(*inq_ptr);
-	} else {
-		ctsio->residual = 0;
-		ctsio->kern_data_len = alloc_len;
-		ctsio->kern_total_len = alloc_len;
-	}
-
-	/*
-	 * If we have a LUN configured, report it as connected.  Otherwise,
-	 * report that it is offline or no device is supported, depending 
-	 * on the value of inquiry_pq_no_lun.
-	 *
-	 * According to the spec (SPC-4 r34), the peripheral qualifier
-	 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario:
-	 *
-	 * "A peripheral device having the specified peripheral device type 
-	 * is not connected to this logical unit. However, the device
-	 * server is capable of supporting the specified peripheral device
-	 * type on this logical unit."
-	 *
-	 * According to the same spec, the peripheral qualifier
-	 * SID_QUAL_BAD_LU (011b) is used in this scenario:
-	 *
-	 * "The device server is not capable of supporting a peripheral
-	 * device on this logical unit. For this peripheral qualifier the
-	 * peripheral device type shall be set to 1Fh. All other peripheral
-	 * device type values are reserved for this peripheral qualifier."
-	 *
-	 * Given the text, it would seem that we probably want to report that
-	 * the LUN is offline here.  There is no LUN connected, but we can
-	 * support a LUN at the given LUN number.
-	 *
-	 * In the real world, though, it sounds like things are a little
-	 * different:
-	 *
-	 * - Linux, when presented with a LUN with the offline peripheral
-	 *   qualifier, will create an sg driver instance for it.  So when
-	 *   you attach it to CTL, you wind up with a ton of sg driver
-	 *   instances.  (One for every LUN that Linux bothered to probe.)
-	 *   Linux does this despite the fact that it issues a REPORT LUNs
-	 *   to LUN 0 to get the inventory of supported LUNs.
-	 *
-	 * - There is other anecdotal evidence (from Emulex folks) about
-	 *   arrays that use the offline peripheral qualifier for LUNs that
-	 *   are on the "passive" path in an active/passive array.
-	 *
-	 * So the solution is provide a hopefully reasonable default
-	 * (return bad/no LUN) and allow the user to change the behavior
-	 * with a tunable/sysctl variable.
-	 */
-	if (lun != NULL)
-		inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
-				  lun->be_lun->lun_type;
-	else if (ctl_softc->inquiry_pq_no_lun == 0)
-		inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
-	else
+	if (lun != NULL) {
+		if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
+		    softc->ha_link >= CTL_HA_LINK_UNKNOWN) {
+			inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+			    lun->be_lun->lun_type;
+		} else {
+			inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) |
+			    lun->be_lun->lun_type;
+		}
+		if (lun->flags & CTL_LUN_REMOVABLE)
+			inq_ptr->dev_qual2 |= SID_RMB;
+	} else
 		inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
 
 	/* RMB in byte 2 is 0 */
-	inq_ptr->version = SCSI_REV_SPC3;
+	inq_ptr->version = SCSI_REV_SPC5;
 
 	/*
 	 * According to SAM-3, even if a device only supports a single
@@ -9412,53 +9997,62 @@
 	 *
 	 * Therefore we set the HiSup bit here.
 	 *
-	 * The reponse format is 2, per SPC-3.
+	 * The response format is 2, per SPC-3.
 	 */
 	inq_ptr->response_format = SID_HiSup | 2;
 
-	inq_ptr->additional_length = sizeof(*inq_ptr) - 4;
+	inq_ptr->additional_length = data_len -
+	    (offsetof(struct scsi_inquiry_data, additional_length) + 1);
 	CTL_DEBUG_PRINT(("additional_length = %d\n",
 			 inq_ptr->additional_length));
 
-	inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT;
-	/* 16 bit addressing */
-	if (is_fc == 0)
+	inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT;
+	if (port_type == CTL_PORT_SCSI)
 		inq_ptr->spc2_flags = SPC2_SID_ADDR16;
-	/* XXX set the SID_MultiP bit here if we're actually going to
-	   respond on multiple ports */
 	inq_ptr->spc2_flags |= SPC2_SID_MultiP;
+	inq_ptr->flags = SID_CmdQue;
+	if (port_type == CTL_PORT_SCSI)
+		inq_ptr->flags |= SID_WBus16 | SID_Sync;
 
-	/* 16 bit data bus, synchronous transfers */
-	/* XXX these flags don't apply for FC */
-	if (is_fc == 0)
-		inq_ptr->flags = SID_WBus16 | SID_Sync;
 	/*
-	 * XXX KDM do we want to support tagged queueing on the control
-	 * device at all?
-	 */
-	if ((lun == NULL)
-	 || (lun->be_lun->lun_type != T_PROCESSOR))
-		inq_ptr->flags |= SID_CmdQue;
-	/*
 	 * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
 	 * We have 8 bytes for the vendor name, and 16 bytes for the device
 	 * name and 4 bytes for the revision.
 	 */
-	strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
+	if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
+	    "vendor")) == NULL) {
+		strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
+	} else {
+		memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
+		strncpy(inq_ptr->vendor, val,
+		    min(sizeof(inq_ptr->vendor), strlen(val)));
+	}
 	if (lun == NULL) {
-		strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
-	} else {
+		strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
+		    sizeof(inq_ptr->product));
+	} else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) {
 		switch (lun->be_lun->lun_type) {
 		case T_DIRECT:
-			strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
+			strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
+			    sizeof(inq_ptr->product));
 			break;
 		case T_PROCESSOR:
-			strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT);
+			strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT,
+			    sizeof(inq_ptr->product));
 			break;
+		case T_CDROM:
+			strncpy(inq_ptr->product, CTL_CDROM_PRODUCT,
+			    sizeof(inq_ptr->product));
+			break;
 		default:
-			strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT);
+			strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT,
+			    sizeof(inq_ptr->product));
 			break;
 		}
+	} else {
+		memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
+		strncpy(inq_ptr->product, val,
+		    min(sizeof(inq_ptr->product), strlen(val)));
 	}
 
 	/*
@@ -9465,7 +10059,14 @@
 	 * XXX make this a macro somewhere so it automatically gets
 	 * incremented when we make changes.
 	 */
-	strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
+	if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
+	    "revision")) == NULL) {
+		strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
+	} else {
+		memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
+		strncpy(inq_ptr->revision, val,
+		    min(sizeof(inq_ptr->revision), strlen(val)));
+	}
 
 	/*
 	 * For parallel SCSI, we support double transition and single
@@ -9473,49 +10074,52 @@
 	 * and Selection) and Information Unit transfers on both the
 	 * control and array devices.
 	 */
-	if (is_fc == 0)
+	if (port_type == CTL_PORT_SCSI)
 		inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
 				    SID_SPI_IUS;
 
-	/* SAM-3 */
-	scsi_ulto2b(0x0060, inq_ptr->version1);
-	/* SPC-3 (no version claimed) XXX should we claim a version? */
-	scsi_ulto2b(0x0300, inq_ptr->version2);
-	if (is_fc) {
+	/* SAM-6 (no version claimed) */
+	scsi_ulto2b(0x00C0, inq_ptr->version1);
+	/* SPC-5 (no version claimed) */
+	scsi_ulto2b(0x05C0, inq_ptr->version2);
+	if (port_type == CTL_PORT_FC) {
 		/* FCP-2 ANSI INCITS.350:2003 */
 		scsi_ulto2b(0x0917, inq_ptr->version3);
-	} else {
+	} else if (port_type == CTL_PORT_SCSI) {
 		/* SPI-4 ANSI INCITS.362:200x */
 		scsi_ulto2b(0x0B56, inq_ptr->version3);
+	} else if (port_type == CTL_PORT_ISCSI) {
+		/* iSCSI (no version claimed) */
+		scsi_ulto2b(0x0960, inq_ptr->version3);
+	} else if (port_type == CTL_PORT_SAS) {
+		/* SAS (no version claimed) */
+		scsi_ulto2b(0x0BE0, inq_ptr->version3);
 	}
 
 	if (lun == NULL) {
-		/* SBC-2 (no version claimed) XXX should we claim a version? */
-		scsi_ulto2b(0x0320, inq_ptr->version4);
+		/* SBC-4 (no version claimed) */
+		scsi_ulto2b(0x0600, inq_ptr->version4);
 	} else {
 		switch (lun->be_lun->lun_type) {
 		case T_DIRECT:
-			/*
-			 * SBC-2 (no version claimed) XXX should we claim a
-			 * version?
-			 */
-			scsi_ulto2b(0x0320, inq_ptr->version4);
+			/* SBC-4 (no version claimed) */
+			scsi_ulto2b(0x0600, inq_ptr->version4);
 			break;
 		case T_PROCESSOR:
+			break;
+		case T_CDROM:
+			/* MMC-6 (no version claimed) */
+			scsi_ulto2b(0x04E0, inq_ptr->version4);
+			break;
 		default:
 			break;
 		}
 	}
 
-	ctsio->scsi_status = SCSI_STATUS_OK;
-	if (ctsio->kern_data_len > 0) {
-		ctsio->be_move_done = ctl_config_move_done;
-		ctl_datamove((union ctl_io *)ctsio);
-	} else {
-		ctsio->io_hdr.status = CTL_SUCCESS;
-		ctl_done((union ctl_io *)ctsio);
-	}
-
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
 	return (CTL_RETVAL_COMPLETE);
 }
 
@@ -9525,59 +10129,397 @@
 	struct scsi_inquiry *cdb;
 	int retval;
 
-	cdb = (struct scsi_inquiry *)ctsio->cdb;
-
-	retval = 0;
-
 	CTL_DEBUG_PRINT(("ctl_inquiry\n"));
 
-	/*
-	 * Right now, we don't support the CmdDt inquiry information.
-	 * This would be nice to support in the future.  When we do
-	 * support it, we should change this test so that it checks to make
-	 * sure SI_EVPD and SI_CMDDT aren't both set at the same time.
-	 */
-#ifdef notyet
-	if (((cdb->byte2 & SI_EVPD)
-	 && (cdb->byte2 & SI_CMDDT)))
-#endif
-	if (cdb->byte2 & SI_CMDDT) {
-		/*
-		 * Point to the SI_CMDDT bit.  We might change this
-		 * when we support SI_CMDDT, but since both bits would be
-		 * "wrong", this should probably just stay as-is then.
-		 */
+	cdb = (struct scsi_inquiry *)ctsio->cdb;
+	if (cdb->byte2 & SI_EVPD)
+		retval = ctl_inquiry_evpd(ctsio);
+	else if (cdb->page_code == 0)
+		retval = ctl_inquiry_std(ctsio);
+	else {
 		ctl_set_invalid_field(ctsio,
 				      /*sks_valid*/ 1,
 				      /*command*/ 1,
-				      /*field*/ 1,
-				      /*bit_valid*/ 1,
-				      /*bit*/ 1);
+				      /*field*/ 2,
+				      /*bit_valid*/ 0,
+				      /*bit*/ 0);
 		ctl_done((union ctl_io *)ctsio);
 		return (CTL_RETVAL_COMPLETE);
 	}
-	if (cdb->byte2 & SI_EVPD)
-		retval = ctl_inquiry_evpd(ctsio);
-#ifdef notyet
-	else if (cdb->byte2 & SI_CMDDT)
-		retval = ctl_inquiry_cmddt(ctsio);
-#endif
-	else
-		retval = ctl_inquiry_std(ctsio);
 
 	return (retval);
 }
 
+int
+ctl_get_config(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_get_config_header *hdr;
+	struct scsi_get_config_feature *feature;
+	struct scsi_get_config *cdb;
+	uint32_t alloc_len, data_len;
+	int rt, starting;
+
+	cdb = (struct scsi_get_config *)ctsio->cdb;
+	rt = (cdb->rt & SGC_RT_MASK);
+	starting = scsi_2btoul(cdb->starting_feature);
+	alloc_len = scsi_2btoul(cdb->length);
+
+	data_len = sizeof(struct scsi_get_config_header) +
+	    sizeof(struct scsi_get_config_feature) + 8 +
+	    sizeof(struct scsi_get_config_feature) + 8 +
+	    sizeof(struct scsi_get_config_feature) + 4 +
+	    sizeof(struct scsi_get_config_feature) + 4 +
+	    sizeof(struct scsi_get_config_feature) + 8 +
+	    sizeof(struct scsi_get_config_feature) +
+	    sizeof(struct scsi_get_config_feature) + 4 +
+	    sizeof(struct scsi_get_config_feature) + 4 +
+	    sizeof(struct scsi_get_config_feature) + 4 +
+	    sizeof(struct scsi_get_config_feature) + 4 +
+	    sizeof(struct scsi_get_config_feature) + 4 +
+	    sizeof(struct scsi_get_config_feature) + 4;
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+
+	hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr;
+	if (lun->flags & CTL_LUN_NO_MEDIA)
+		scsi_ulto2b(0x0000, hdr->current_profile);
+	else
+		scsi_ulto2b(0x0010, hdr->current_profile);
+	feature = (struct scsi_get_config_feature *)(hdr + 1);
+
+	if (starting > 0x003b)
+		goto done;
+	if (starting > 0x003a)
+		goto f3b;
+	if (starting > 0x002b)
+		goto f3a;
+	if (starting > 0x002a)
+		goto f2b;
+	if (starting > 0x001f)
+		goto f2a;
+	if (starting > 0x001e)
+		goto f1f;
+	if (starting > 0x001d)
+		goto f1e;
+	if (starting > 0x0010)
+		goto f1d;
+	if (starting > 0x0003)
+		goto f10;
+	if (starting > 0x0002)
+		goto f3;
+	if (starting > 0x0001)
+		goto f2;
+	if (starting > 0x0000)
+		goto f1;
+
+	/* Profile List */
+	scsi_ulto2b(0x0000, feature->feature_code);
+	feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT;
+	feature->add_length = 8;
+	scsi_ulto2b(0x0008, &feature->feature_data[0]);	/* CD-ROM */
+	feature->feature_data[2] = 0x00;
+	scsi_ulto2b(0x0010, &feature->feature_data[4]);	/* DVD-ROM */
+	feature->feature_data[6] = 0x01;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f1:	/* Core */
+	scsi_ulto2b(0x0001, feature->feature_code);
+	feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT;
+	feature->add_length = 8;
+	scsi_ulto4b(0x00000000, &feature->feature_data[0]);
+	feature->feature_data[4] = 0x03;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f2:	/* Morphing */
+	scsi_ulto2b(0x0002, feature->feature_code);
+	feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT;
+	feature->add_length = 4;
+	feature->feature_data[0] = 0x02;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f3:	/* Removable Medium */
+	scsi_ulto2b(0x0003, feature->feature_code);
+	feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT;
+	feature->add_length = 4;
+	feature->feature_data[0] = 0x39;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+	if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA))
+		goto done;
+
+f10:	/* Random Read */
+	scsi_ulto2b(0x0010, feature->feature_code);
+	feature->flags = 0x00;
+	if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
+		feature->flags |= SGC_F_CURRENT;
+	feature->add_length = 8;
+	scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]);
+	scsi_ulto2b(1, &feature->feature_data[4]);
+	feature->feature_data[6] = 0x00;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f1d:	/* Multi-Read */
+	scsi_ulto2b(0x001D, feature->feature_code);
+	feature->flags = 0x00;
+	if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
+		feature->flags |= SGC_F_CURRENT;
+	feature->add_length = 0;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f1e:	/* CD Read */
+	scsi_ulto2b(0x001E, feature->feature_code);
+	feature->flags = 0x00;
+	if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
+		feature->flags |= SGC_F_CURRENT;
+	feature->add_length = 4;
+	feature->feature_data[0] = 0x00;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f1f:	/* DVD Read */
+	scsi_ulto2b(0x001F, feature->feature_code);
+	feature->flags = 0x08;
+	if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
+		feature->flags |= SGC_F_CURRENT;
+	feature->add_length = 4;
+	feature->feature_data[0] = 0x01;
+	feature->feature_data[2] = 0x03;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f2a:	/* DVD+RW */
+	scsi_ulto2b(0x002A, feature->feature_code);
+	feature->flags = 0x04;
+	if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
+		feature->flags |= SGC_F_CURRENT;
+	feature->add_length = 4;
+	feature->feature_data[0] = 0x00;
+	feature->feature_data[1] = 0x00;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f2b:	/* DVD+R */
+	scsi_ulto2b(0x002B, feature->feature_code);
+	feature->flags = 0x00;
+	if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
+		feature->flags |= SGC_F_CURRENT;
+	feature->add_length = 4;
+	feature->feature_data[0] = 0x00;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f3a:	/* DVD+RW Dual Layer */
+	scsi_ulto2b(0x003A, feature->feature_code);
+	feature->flags = 0x00;
+	if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
+		feature->flags |= SGC_F_CURRENT;
+	feature->add_length = 4;
+	feature->feature_data[0] = 0x00;
+	feature->feature_data[1] = 0x00;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+f3b:	/* DVD+R Dual Layer */
+	scsi_ulto2b(0x003B, feature->feature_code);
+	feature->flags = 0x00;
+	if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
+		feature->flags |= SGC_F_CURRENT;
+	feature->add_length = 4;
+	feature->feature_data[0] = 0x00;
+	feature = (struct scsi_get_config_feature *)
+	    &feature->feature_data[feature->add_length];
+
+done:
+	data_len = (uint8_t *)feature - (uint8_t *)hdr;
+	if (rt == SGC_RT_SPECIFIC && data_len > 4) {
+		feature = (struct scsi_get_config_feature *)(hdr + 1);
+		if (scsi_2btoul(feature->feature_code) == starting)
+			feature = (struct scsi_get_config_feature *)
+			    &feature->feature_data[feature->add_length];
+		data_len = (uint8_t *)feature - (uint8_t *)hdr;
+	}
+	scsi_ulto4b(data_len - 4, hdr->data_length);
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_get_event_status(struct ctl_scsiio *ctsio)
+{
+	struct scsi_get_event_status_header *hdr;
+	struct scsi_get_event_status *cdb;
+	uint32_t alloc_len, data_len;
+	int notif_class;
+
+	cdb = (struct scsi_get_event_status *)ctsio->cdb;
+	if ((cdb->byte2 & SGESN_POLLED) == 0) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+		    /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
+		ctl_done((union ctl_io *)ctsio);
+		return (CTL_RETVAL_COMPLETE);
+	}
+	notif_class = cdb->notif_class;
+	alloc_len = scsi_2btoul(cdb->length);
+
+	data_len = sizeof(struct scsi_get_event_status_header);
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr;
+	scsi_ulto2b(0, hdr->descr_length);
+	hdr->nea_class = SGESN_NEA;
+	hdr->supported_class = 0;
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_mechanism_status(struct ctl_scsiio *ctsio)
+{
+	struct scsi_mechanism_status_header *hdr;
+	struct scsi_mechanism_status *cdb;
+	uint32_t alloc_len, data_len;
+
+	cdb = (struct scsi_mechanism_status *)ctsio->cdb;
+	alloc_len = scsi_2btoul(cdb->length);
+
+	data_len = sizeof(struct scsi_mechanism_status_header);
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr;
+	hdr->state1 = 0x00;
+	hdr->state2 = 0xe0;
+	scsi_ulto3b(0, hdr->lba);
+	hdr->slots_num = 0;
+	scsi_ulto2b(0, hdr->slots_length);
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+static void
+ctl_ultomsf(uint32_t lba, uint8_t *buf)
+{
+
+	lba += 150;
+	buf[0] = 0;
+	buf[1] = bin2bcd((lba / 75) / 60);
+	buf[2] = bin2bcd((lba / 75) % 60);
+	buf[3] = bin2bcd(lba % 75);
+}
+
+int
+ctl_read_toc(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_read_toc_hdr *hdr;
+	struct scsi_read_toc_type01_descr *descr;
+	struct scsi_read_toc *cdb;
+	uint32_t alloc_len, data_len;
+	int format, msf;
+
+	cdb = (struct scsi_read_toc *)ctsio->cdb;
+	msf = (cdb->byte2 & CD_MSF) != 0;
+	format = cdb->format;
+	alloc_len = scsi_2btoul(cdb->data_len);
+
+	data_len = sizeof(struct scsi_read_toc_hdr);
+	if (format == 0)
+		data_len += 2 * sizeof(struct scsi_read_toc_type01_descr);
+	else
+		data_len += sizeof(struct scsi_read_toc_type01_descr);
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr;
+	if (format == 0) {
+		scsi_ulto2b(0x12, hdr->data_length);
+		hdr->first = 1;
+		hdr->last = 1;
+		descr = (struct scsi_read_toc_type01_descr *)(hdr + 1);
+		descr->addr_ctl = 0x14;
+		descr->track_number = 1;
+		if (msf)
+			ctl_ultomsf(0, descr->track_start);
+		else
+			scsi_ulto4b(0, descr->track_start);
+		descr++;
+		descr->addr_ctl = 0x14;
+		descr->track_number = 0xaa;
+		if (msf)
+			ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start);
+		else
+			scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start);
+	} else {
+		scsi_ulto2b(0x0a, hdr->data_length);
+		hdr->first = 1;
+		hdr->last = 1;
+		descr = (struct scsi_read_toc_type01_descr *)(hdr + 1);
+		descr->addr_ctl = 0x14;
+		descr->track_number = 1;
+		if (msf)
+			ctl_ultomsf(0, descr->track_start);
+		else
+			scsi_ulto4b(0, descr->track_start);
+	}
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
 /*
  * For known CDB types, parse the LBA and length.
  */
 static int
-ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len)
+ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
 {
 	if (io->io_hdr.io_type != CTL_IO_SCSI)
 		return (1);
 
 	switch (io->scsiio.cdb[0]) {
+	case COMPARE_AND_WRITE: {
+		struct scsi_compare_and_write *cdb;
+
+		cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
+
+		*lba = scsi_8btou64(cdb->addr);
+		*len = cdb->length;
+		break;
+	}
 	case READ_6:
 	case WRITE_6: {
 		struct scsi_rw_6 *cdb;
@@ -9638,16 +10580,82 @@
 		*len = scsi_4btoul(cdb->length);
 		break;
 	}
+	case WRITE_ATOMIC_16: {
+		struct scsi_write_atomic_16 *cdb;
+
+		cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb;
+
+		*lba = scsi_8btou64(cdb->addr);
+		*len = scsi_2btoul(cdb->length);
+		break;
+	}
 	case WRITE_VERIFY_16: {
 		struct scsi_write_verify_16 *cdb;
 
 		cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
 
-		
 		*lba = scsi_8btou64(cdb->addr);
 		*len = scsi_4btoul(cdb->length);
 		break;
 	}
+	case WRITE_SAME_10: {
+		struct scsi_write_same_10 *cdb;
+
+		cdb = (struct scsi_write_same_10 *)io->scsiio.cdb;
+
+		*lba = scsi_4btoul(cdb->addr);
+		*len = scsi_2btoul(cdb->length);
+		break;
+	}
+	case WRITE_SAME_16: {
+		struct scsi_write_same_16 *cdb;
+
+		cdb = (struct scsi_write_same_16 *)io->scsiio.cdb;
+
+		*lba = scsi_8btou64(cdb->addr);
+		*len = scsi_4btoul(cdb->length);
+		break;
+	}
+	case VERIFY_10: {
+		struct scsi_verify_10 *cdb;
+
+		cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
+
+		*lba = scsi_4btoul(cdb->addr);
+		*len = scsi_2btoul(cdb->length);
+		break;
+	}
+	case VERIFY_12: {
+		struct scsi_verify_12 *cdb;
+
+		cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
+
+		*lba = scsi_4btoul(cdb->addr);
+		*len = scsi_4btoul(cdb->length);
+		break;
+	}
+	case VERIFY_16: {
+		struct scsi_verify_16 *cdb;
+
+		cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
+
+		*lba = scsi_8btou64(cdb->addr);
+		*len = scsi_4btoul(cdb->length);
+		break;
+	}
+	case UNMAP: {
+		*lba = 0;
+		*len = UINT64_MAX;
+		break;
+	}
+	case SERVICE_ACTION_IN: {	/* GET LBA STATUS */
+		struct scsi_get_lba_status *cdb;
+
+		cdb = (struct scsi_get_lba_status *)io->scsiio.cdb;
+		*lba = scsi_8btou64(cdb->addr);
+		*len = UINT32_MAX;
+		break;
+	}
 	default:
 		return (1);
 		break; /* NOTREACHED */
@@ -9657,44 +10665,99 @@
 }
 
 static ctl_action
-ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2)
+ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2,
+    bool seq)
 {
 	uint64_t endlba1, endlba2;
 
-	endlba1 = lba1 + len1 - 1;
+	endlba1 = lba1 + len1 - (seq ? 0 : 1);
 	endlba2 = lba2 + len2 - 1;
 
-	if ((endlba1 < lba2)
-	 || (endlba2 < lba1))
+	if ((endlba1 < lba2) || (endlba2 < lba1))
 		return (CTL_ACTION_PASS);
 	else
 		return (CTL_ACTION_BLOCK);
 }
 
+static int
+ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
+{
+	struct ctl_ptr_len_flags *ptrlen;
+	struct scsi_unmap_desc *buf, *end, *range;
+	uint64_t lba;
+	uint32_t len;
+
+	/* If not UNMAP -- go other way. */
+	if (io->io_hdr.io_type != CTL_IO_SCSI ||
+	    io->scsiio.cdb[0] != UNMAP)
+		return (CTL_ACTION_ERROR);
+
+	/* If UNMAP without data -- block and wait for data. */
+	ptrlen = (struct ctl_ptr_len_flags *)
+	    &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+	if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 ||
+	    ptrlen->ptr == NULL)
+		return (CTL_ACTION_BLOCK);
+
+	/* UNMAP with data -- check for collision. */
+	buf = (struct scsi_unmap_desc *)ptrlen->ptr;
+	end = buf + ptrlen->len / sizeof(*buf);
+	for (range = buf; range < end; range++) {
+		lba = scsi_8btou64(range->lba);
+		len = scsi_4btoul(range->length);
+		if ((lba < lba2 + len2) && (lba + len > lba2))
+			return (CTL_ACTION_BLOCK);
+	}
+	return (CTL_ACTION_PASS);
+}
+
 static ctl_action
-ctl_extent_check(union ctl_io *io1, union ctl_io *io2)
+ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq)
 {
 	uint64_t lba1, lba2;
-	uint32_t len1, len2;
+	uint64_t len1, len2;
 	int retval;
 
-	retval = ctl_get_lba_len(io1, &lba1, &len1);
-	if (retval != 0)
+	if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
 		return (CTL_ACTION_ERROR);
 
-	retval = ctl_get_lba_len(io2, &lba2, &len2);
-	if (retval != 0)
+	retval = ctl_extent_check_unmap(io1, lba2, len2);
+	if (retval != CTL_ACTION_ERROR)
+		return (retval);
+
+	if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
 		return (CTL_ACTION_ERROR);
 
-	return (ctl_extent_check_lba(lba1, len1, lba2, len2));
+	if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
+		seq = FALSE;
+	return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq));
 }
 
 static ctl_action
-ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
+ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
 {
-	struct ctl_cmd_entry *pending_entry, *ooa_entry;
-	ctl_serialize_action *serialize_row;
+	uint64_t lba1, lba2;
+	uint64_t len1, len2;
 
+	if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
+		return (CTL_ACTION_PASS);
+	if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
+		return (CTL_ACTION_ERROR);
+	if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
+		return (CTL_ACTION_ERROR);
+
+	if (lba1 + len1 == lba2)
+		return (CTL_ACTION_BLOCK);
+	return (CTL_ACTION_PASS);
+}
+
+static ctl_action
+ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
+    union ctl_io *ooa_io)
+{
+	const struct ctl_cmd_entry *pending_entry, *ooa_entry;
+	const ctl_serialize_action *serialize_row;
+
 	/*
 	 * The initiator attempted multiple untagged commands at the same
 	 * time.  Can't do that.
@@ -9703,9 +10766,10 @@
 	 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
 	 && ((pending_io->io_hdr.nexus.targ_port ==
 	      ooa_io->io_hdr.nexus.targ_port)
-	  && (pending_io->io_hdr.nexus.initid.id ==
-	      ooa_io->io_hdr.nexus.initid.id))
-	 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
+	  && (pending_io->io_hdr.nexus.initid ==
+	      ooa_io->io_hdr.nexus.initid))
+	 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
+	      CTL_FLAG_STATUS_SENT)) == 0))
 		return (CTL_ACTION_OVERLAP);
 
 	/*
@@ -9724,9 +10788,10 @@
 	 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
 	 && ((pending_io->io_hdr.nexus.targ_port ==
 	      ooa_io->io_hdr.nexus.targ_port)
-	  && (pending_io->io_hdr.nexus.initid.id ==
-	      ooa_io->io_hdr.nexus.initid.id))
-	 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
+	  && (pending_io->io_hdr.nexus.initid ==
+	      ooa_io->io_hdr.nexus.initid))
+	 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
+	      CTL_FLAG_STATUS_SENT)) == 0))
 		return (CTL_ACTION_OVERLAP_TAG);
 
 	/*
@@ -9765,8 +10830,18 @@
 	  || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
 		return (CTL_ACTION_BLOCK);
 
-	pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]];
-	ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]];
+	pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL);
+	KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT,
+	    ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p",
+	     __func__, pending_entry->seridx, pending_io->scsiio.cdb[0],
+	     pending_io->scsiio.cdb[1], pending_io));
+	ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL);
+	if (ooa_entry->seridx == CTL_SERIDX_INVLD)
+		return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */
+	KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT,
+	    ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p",
+	     __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0],
+	     ooa_io->scsiio.cdb[1], ooa_io));
 
 	serialize_row = ctl_serialize_table[ooa_entry->seridx];
 
@@ -9773,20 +10848,33 @@
 	switch (serialize_row[pending_entry->seridx]) {
 	case CTL_SER_BLOCK:
 		return (CTL_ACTION_BLOCK);
-		break; /* NOTREACHED */
 	case CTL_SER_EXTENT:
-		return (ctl_extent_check(pending_io, ooa_io));
-		break; /* NOTREACHED */
+		return (ctl_extent_check(ooa_io, pending_io,
+		    (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
+	case CTL_SER_EXTENTOPT:
+		if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) !=
+		    SCP_QUEUE_ALG_UNRESTRICTED)
+			return (ctl_extent_check(ooa_io, pending_io,
+			    (lun->be_lun &&
+			     lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
+		return (CTL_ACTION_PASS);
+	case CTL_SER_EXTENTSEQ:
+		if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF)
+			return (ctl_extent_check_seq(ooa_io, pending_io));
+		return (CTL_ACTION_PASS);
 	case CTL_SER_PASS:
 		return (CTL_ACTION_PASS);
-		break; /* NOTREACHED */
+	case CTL_SER_BLOCKOPT:
+		if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) !=
+		    SCP_QUEUE_ALG_UNRESTRICTED)
+			return (CTL_ACTION_BLOCK);
+		return (CTL_ACTION_PASS);
 	case CTL_SER_SKIP:
 		return (CTL_ACTION_SKIP);
-		break;
 	default:
-		panic("invalid serialization value %d",
-		      serialize_row[pending_entry->seridx]);
-		break; /* NOTREACHED */
+		panic("%s: Invalid serialization value %d for %d => %d",
+		    __func__, serialize_row[pending_entry->seridx],
+		    pending_entry->seridx, ooa_entry->seridx);
 	}
 
 	return (CTL_ACTION_ERROR);
@@ -9795,7 +10883,6 @@
 /*
  * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
  * Assumptions:
- * - caller holds ctl_lock
  * - pending_io is generally either incoming, or on the blocked queue
  * - starting I/O is the I/O we want to start the check with.
  */
@@ -9806,6 +10893,8 @@
 	union ctl_io *ooa_io;
 	ctl_action action;
 
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+
 	/*
 	 * Run back along the OOA queue, starting with the current
 	 * blocked I/O and going through every I/O before it on the
@@ -9822,7 +10911,7 @@
 		 * of it in the queue.  It doesn't queue/dequeue
 		 * cur_blocked.
 		 */
-		action = ctl_check_for_blockage(pending_io, ooa_io);
+		action = ctl_check_for_blockage(lun, pending_io, ooa_io);
 		switch (action) {
 		case CTL_ACTION_BLOCK:
 		case CTL_ACTION_OVERLAP:
@@ -9834,8 +10923,7 @@
 		case CTL_ACTION_PASS:
 			break;
 		default:
-			panic("invalid action %d", action);
-			break;  /* NOTREACHED */
+			panic("%s: Invalid action %d\n", __func__, action);
 		}
 	}
 
@@ -9846,13 +10934,15 @@
  * Assumptions:
  * - An I/O has just completed, and has been removed from the per-LUN OOA
  *   queue, so some items on the blocked queue may now be unblocked.
- * - The caller holds ctl_softc->ctl_lock
  */
 static int
 ctl_check_blocked(struct ctl_lun *lun)
 {
+	struct ctl_softc *softc = lun->ctl_softc;
 	union ctl_io *cur_blocked, *next_blocked;
 
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+
 	/*
 	 * Run forward from the head of the blocked queue, checking each
 	 * entry against the I/Os prior to it on the OOA queue to see if
@@ -9893,11 +10983,7 @@
 			break;
 		case CTL_ACTION_PASS:
 		case CTL_ACTION_SKIP: {
-			struct ctl_softc *softc;
-			struct ctl_cmd_entry *entry;
-			uint32_t initidx;
-			uint8_t opcode;
-			int isc_retval;
+			const struct ctl_cmd_entry *entry;
 
 			/*
 			 * The skip case shouldn't happen, this transaction
@@ -9913,7 +10999,8 @@
 				     blocked_links);
 			cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
 
-			if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
+			if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
+			    (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){
 				/*
 				 * Need to send IO back to original side to
 				 * run
@@ -9920,25 +11007,17 @@
 				 */
 				union ctl_ha_msg msg_info;
 
+				cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
 				msg_info.hdr.original_sc =
 					cur_blocked->io_hdr.original_sc;
 				msg_info.hdr.serializing_sc = cur_blocked;
 				msg_info.hdr.msg_type = CTL_MSG_R2R;
-				if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-				     &msg_info, sizeof(msg_info), 0)) >
-				     CTL_HA_STATUS_SUCCESS) {
-					printf("CTL:Check Blocked error from "
-					       "ctl_ha_msg_send %d\n",
-					       isc_retval);
-				}
+				ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+				    sizeof(msg_info.hdr), M_NOWAIT);
 				break;
 			}
-			opcode = cur_blocked->scsiio.cdb[0];
-			entry = &ctl_cmd_table[opcode];
-			softc = control_softc;
+			entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
 
-			initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus);
-
 			/*
 			 * Check this I/O for LUN state changes that may
 			 * have happened while this command was blocked.
@@ -9947,32 +11026,13 @@
 			 * for any states that can be caused by SCSI
 			 * commands.
 			 */
-			if (ctl_scsiio_lun_check(softc, lun, entry,
+			if (ctl_scsiio_lun_check(lun, entry,
 						 &cur_blocked->scsiio) == 0) {
 				cur_blocked->io_hdr.flags |=
 				                      CTL_FLAG_IS_WAS_ON_RTR;
-				STAILQ_INSERT_TAIL(&lun->ctl_softc->rtr_queue,
-						   &cur_blocked->io_hdr, links);
-				/*
-				 * In the non CTL_DONE_THREAD case, we need
-				 * to wake up the work thread here.  When
-				 * we're processing completed requests from
-				 * the work thread context, we'll pop back
-				 * around and end up pulling things off the
-				 * RtR queue.  When we aren't processing
-				 * things from the work thread context,
-				 * though, we won't ever check the RtR queue.
-				 * So we need to wake up the thread to clear
-				 * things off the queue.  Otherwise this
-				 * transaction will just sit on the RtR queue
-				 * until a new I/O comes in.  (Which may or
-				 * may not happen...)
-				 */
-#ifndef CTL_DONE_THREAD
-				ctl_wakeup_thread();
-#endif
+				ctl_enqueue_rtr(cur_blocked);
 			} else
-				ctl_done_lock(cur_blocked, /*have_lock*/ 1);
+				ctl_done(cur_blocked);
 			break;
 		}
 		default:
@@ -10000,98 +11060,117 @@
  * careful attention to the placement of any new checks.
  */
 static int
-ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
-		     struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
+ctl_scsiio_lun_check(struct ctl_lun *lun,
+    const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
 {
+	struct ctl_softc *softc = lun->ctl_softc;
 	int retval;
+	uint32_t residx;
 
 	retval = 0;
 
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+
 	/*
-	 * If this shelf is a secondary shelf controller, we have to reject
-	 * any media access commands.
+	 * If this shelf is a secondary shelf controller, we may have to
+	 * reject some commands disallowed by HA mode and link state.
 	 */
-#if 0
-	/* No longer needed for HA */
-	if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0)
-	 && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) {
-		ctl_set_lun_standby(ctsio);
-		retval = 1;
-		goto bailout;
+	if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
+		if (softc->ha_link == CTL_HA_LINK_OFFLINE &&
+		    (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
+			ctl_set_lun_unavail(ctsio);
+			retval = 1;
+			goto bailout;
+		}
+		if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 &&
+		    (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
+			ctl_set_lun_transit(ctsio);
+			retval = 1;
+			goto bailout;
+		}
+		if (softc->ha_mode == CTL_HA_MODE_ACT_STBY &&
+		    (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) {
+			ctl_set_lun_standby(ctsio);
+			retval = 1;
+			goto bailout;
+		}
+
+		/* The rest of checks are only done on executing side */
+		if (softc->ha_mode == CTL_HA_MODE_XFER)
+			goto bailout;
 	}
-#endif
 
+	if (entry->pattern & CTL_LUN_PAT_WRITE) {
+		if (lun->be_lun &&
+		    lun->be_lun->flags & CTL_LUN_FLAG_READONLY) {
+			ctl_set_hw_write_protected(ctsio);
+			retval = 1;
+			goto bailout;
+		}
+		if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) {
+			ctl_set_sense(ctsio, /*current_error*/ 1,
+			    /*sense_key*/ SSD_KEY_DATA_PROTECT,
+			    /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE);
+			retval = 1;
+			goto bailout;
+		}
+	}
+
 	/*
 	 * Check for a reservation conflict.  If this command isn't allowed
 	 * even on reserved LUNs, and if this initiator isn't the one who
 	 * reserved us, reject the command with a reservation conflict.
 	 */
+	residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
 	if ((lun->flags & CTL_LUN_RESERVED)
 	 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
-		if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
-		 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
-		 || (ctsio->io_hdr.nexus.targ_target.id !=
-		     lun->rsv_nexus.targ_target.id)) {
-			ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
-			ctsio->io_hdr.status = CTL_SCSI_ERROR;
+		if (lun->res_idx != residx) {
+			ctl_set_reservation_conflict(ctsio);
 			retval = 1;
 			goto bailout;
 		}
 	}
 
-	if ( (lun->flags & CTL_LUN_PR_RESERVED)
-	 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) {
-		uint32_t residx;
-
-		residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+	if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 ||
+	    (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) {
+		/* No reservation or command is allowed. */;
+	} else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) &&
+	    (lun->pr_res_type == SPR_TYPE_WR_EX ||
+	     lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
+	     lun->pr_res_type == SPR_TYPE_WR_EX_AR)) {
+		/* The command is allowed for Write Exclusive resv. */;
+	} else {
 		/*
 		 * if we aren't registered or it's a res holder type
 		 * reservation and this isn't the res holder then set a
 		 * conflict.
-		 * NOTE: Commands which might be allowed on write exclusive
-		 * type reservations are checked in the particular command
-		 * for a conflict. Read and SSU are the only ones.
 		 */
-		if (!lun->per_res[residx].registered
-		 || (residx != lun->pr_res_idx && lun->res_type < 4)) {
-			ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
-			ctsio->io_hdr.status = CTL_SCSI_ERROR;
+		if (ctl_get_prkey(lun, residx) == 0 ||
+		    (residx != lun->pr_res_idx && lun->pr_res_type < 4)) {
+			ctl_set_reservation_conflict(ctsio);
 			retval = 1;
 			goto bailout;
 		}
-
 	}
 
-	if ((lun->flags & CTL_LUN_OFFLINE)
-	 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
-		ctl_set_lun_not_ready(ctsio);
+	if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) {
+		if (lun->flags & CTL_LUN_EJECTED)
+			ctl_set_lun_ejected(ctsio);
+		else if (lun->flags & CTL_LUN_NO_MEDIA) {
+			if (lun->flags & CTL_LUN_REMOVABLE)
+				ctl_set_lun_no_media(ctsio);
+			else
+				ctl_set_lun_int_reqd(ctsio);
+		} else if (lun->flags & CTL_LUN_STOPPED)
+			ctl_set_lun_stopped(ctsio);
+		else
+			goto bailout;
 		retval = 1;
 		goto bailout;
 	}
 
-	/*
-	 * If the LUN is stopped, see if this particular command is allowed
-	 * for a stopped lun.  Otherwise, reject it with 0x04,0x02.
-	 */
-	if ((lun->flags & CTL_LUN_STOPPED)
-	 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
-		/* "Logical unit not ready, initializing cmd. required" */
-		ctl_set_lun_stopped(ctsio);
-		retval = 1;
-		goto bailout;
-	}
-
-	if ((lun->flags & CTL_LUN_INOPERABLE)
-	 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) {
-		/* "Medium format corrupted" */
-		ctl_set_medium_format_corrupted(ctsio);
-		retval = 1;
-		goto bailout;
-	}
-
 bailout:
 	return (retval);
-
 }
 
 static void
@@ -10098,308 +11177,141 @@
 ctl_failover_io(union ctl_io *io, int have_lock)
 {
 	ctl_set_busy(&io->scsiio);
-	ctl_done_lock(io, have_lock);
+	ctl_done(io);
 }
 
 static void
-ctl_failover(void)
+ctl_failover_lun(union ctl_io *rio)
 {
+	struct ctl_softc *softc = CTL_SOFTC(rio);
 	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
-	union ctl_io *next_io, *pending_io;
-	union ctl_io *io;
-	int lun_idx;
-	int i;
+	struct ctl_io_hdr *io, *next_io;
+	uint32_t targ_lun;
 
-	ctl_softc = control_softc;
+	targ_lun = rio->io_hdr.nexus.targ_mapped_lun;
+	CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun));
 
-	mtx_lock(&ctl_softc->ctl_lock);
-	/*
-	 * Remove any cmds from the other SC from the rtr queue.  These
-	 * will obviously only be for LUNs for which we're the primary.
-	 * We can't send status or get/send data for these commands.
-	 * Since they haven't been executed yet, we can just remove them.
-	 * We'll either abort them or delete them below, depending on
-	 * which HA mode we're in.
-	 */
-	for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue);
-	     io != NULL; io = next_io) {
-		next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
-		if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
-			STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr,
-				      ctl_io_hdr, links);
+	/* Find and lock the LUN. */
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun > CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		return;
 	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
+	if (lun->flags & CTL_LUN_DISABLED) {
+		mtx_unlock(&lun->lun_lock);
+		return;
+	}
 
-	for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) {
-		lun = ctl_softc->ctl_luns[lun_idx];
-		if (lun==NULL)
-			continue;
-
-		/*
-		 * Processor LUNs are primary on both sides.
-		 * XXX will this always be true?
-		 */
-		if (lun->be_lun->lun_type == T_PROCESSOR)
-			continue;
-
-		if ((lun->flags & CTL_LUN_PRIMARY_SC)
-		 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
-			printf("FAILOVER: primary lun %d\n", lun_idx);
-		        /*
-			 * Remove all commands from the other SC. First from the
-			 * blocked queue then from the ooa queue. Once we have
-			 * removed them. Call ctl_check_blocked to see if there
-			 * is anything that can run.
-			 */
-			for (io = (union ctl_io *)TAILQ_FIRST(
-			     &lun->blocked_queue); io != NULL; io = next_io) {
-
-		        	next_io = (union ctl_io *)TAILQ_NEXT(
-				    &io->io_hdr, blocked_links);
-
-				if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
-					TAILQ_REMOVE(&lun->blocked_queue,
-						     &io->io_hdr,blocked_links);
-					io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
-					TAILQ_REMOVE(&lun->ooa_queue,
-						     &io->io_hdr, ooa_links);
-
-					ctl_free_io_internal(io, 1);
+	if (softc->ha_mode == CTL_HA_MODE_XFER) {
+		TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
+			/* We are master */
+			if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+				if (io->flags & CTL_FLAG_IO_ACTIVE) {
+					io->flags |= CTL_FLAG_ABORT;
+					io->flags |= CTL_FLAG_FAILOVER;
+				} else { /* This can be only due to DATAMOVE */
+					io->msg_type = CTL_MSG_DATAMOVE_DONE;
+					io->flags &= ~CTL_FLAG_DMA_INPROG;
+					io->flags |= CTL_FLAG_IO_ACTIVE;
+					io->port_status = 31340;
+					ctl_enqueue_isc((union ctl_io *)io);
 				}
 			}
-
-			for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
-	     		     io != NULL; io = next_io) {
-
-		        	next_io = (union ctl_io *)TAILQ_NEXT(
-				    &io->io_hdr, ooa_links);
-
-				if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
-
-					TAILQ_REMOVE(&lun->ooa_queue,
-						&io->io_hdr,
-					     	ooa_links);
-
-					ctl_free_io_internal(io, 1);
-				}
-			}
-			ctl_check_blocked(lun);
-		} else if ((lun->flags & CTL_LUN_PRIMARY_SC)
-			&& (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
-
-			printf("FAILOVER: primary lun %d\n", lun_idx);
-			/*
-			 * Abort all commands from the other SC.  We can't
-			 * send status back for them now.  These should get
-			 * cleaned up when they are completed or come out
-			 * for a datamove operation.
-			 */
-			for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
-	     		     io != NULL; io = next_io) {
-		        	next_io = (union ctl_io *)TAILQ_NEXT(
-					&io->io_hdr, ooa_links);
-
-				if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
-					io->io_hdr.flags |= CTL_FLAG_ABORT;
-			}
-		} else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
-			&& (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
-
-			printf("FAILOVER: secondary lun %d\n", lun_idx);
-
-			lun->flags |= CTL_LUN_PRIMARY_SC;
-
-			/*
-			 * We send all I/O that was sent to this controller
-			 * and redirected to the other side back with
-			 * busy status, and have the initiator retry it.
-			 * Figuring out how much data has been transferred,
-			 * etc. and picking up where we left off would be 
-			 * very tricky.
-			 *
-			 * XXX KDM need to remove I/O from the blocked
-			 * queue as well!
-			 */
-			for (pending_io = (union ctl_io *)TAILQ_FIRST(
-			     &lun->ooa_queue); pending_io != NULL;
-			     pending_io = next_io) {
-
-				next_io =  (union ctl_io *)TAILQ_NEXT(
-					&pending_io->io_hdr, ooa_links);
-
-				pending_io->io_hdr.flags &=
-					~CTL_FLAG_SENT_2OTHER_SC;
-
-				if (pending_io->io_hdr.flags &
-				    CTL_FLAG_IO_ACTIVE) {
-					pending_io->io_hdr.flags |=
-						CTL_FLAG_FAILOVER;
+			/* We are slave */
+			if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
+				io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+				if (io->flags & CTL_FLAG_IO_ACTIVE) {
+					io->flags |= CTL_FLAG_FAILOVER;
 				} else {
-					ctl_set_busy(&pending_io->scsiio);
-					ctl_done_lock(pending_io,
-						      /*have_lock*/1);
+					ctl_set_busy(&((union ctl_io *)io)->
+					    scsiio);
+					ctl_done((union ctl_io *)io);
 				}
 			}
-
-			/*
-			 * Build Unit Attention
-			 */
-			for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-				lun->pending_sense[i].ua_pending |=
-				                     CTL_UA_ASYM_ACC_CHANGE;
+		}
+	} else { /* SERIALIZE modes */
+		TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links,
+		    next_io) {
+			/* We are master */
+			if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+				TAILQ_REMOVE(&lun->blocked_queue, io,
+				    blocked_links);
+				io->flags &= ~CTL_FLAG_BLOCKED;
+				TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
+				ctl_free_io((union ctl_io *)io);
 			}
-		} else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
-			&& (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
-			printf("FAILOVER: secondary lun %d\n", lun_idx);
-			/*
-			 * if the first io on the OOA is not on the RtR queue
-			 * add it.
-			 */
-			lun->flags |= CTL_LUN_PRIMARY_SC;
-
-			pending_io = (union ctl_io *)TAILQ_FIRST(
-			    &lun->ooa_queue);
-			if (pending_io==NULL) {
-				printf("Nothing on OOA queue\n");
-				continue;
+		}
+		TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
+			/* We are master */
+			if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+				TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
+				ctl_free_io((union ctl_io *)io);
 			}
-
-			pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
-			if ((pending_io->io_hdr.flags &
-			     CTL_FLAG_IS_WAS_ON_RTR) == 0) {
-				pending_io->io_hdr.flags |=
-				    CTL_FLAG_IS_WAS_ON_RTR;
-				STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
-						   &pending_io->io_hdr, links);
-			}
-#if 0
-			else
-			{
-				printf("Tag 0x%04x is running\n",
-				      pending_io->scsiio.tag_num);
-			}
-#endif
-
-			next_io = (union ctl_io *)TAILQ_NEXT(
-			    &pending_io->io_hdr, ooa_links);
-			for (pending_io=next_io; pending_io != NULL;
-			     pending_io = next_io) {
-				pending_io->io_hdr.flags &=
-				    ~CTL_FLAG_SENT_2OTHER_SC;
-				next_io = (union ctl_io *)TAILQ_NEXT(
-					&pending_io->io_hdr, ooa_links);
-				if (pending_io->io_hdr.flags &
-				    CTL_FLAG_IS_WAS_ON_RTR) {
-#if 0
-				        printf("Tag 0x%04x is running\n",
-				      		pending_io->scsiio.tag_num);
-#endif
-					continue;
+			/* We are slave */
+			if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
+				io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+				if (!(io->flags & CTL_FLAG_IO_ACTIVE)) {
+					ctl_set_busy(&((union ctl_io *)io)->
+					    scsiio);
+					ctl_done((union ctl_io *)io);
 				}
-
-				switch (ctl_check_ooa(lun, pending_io,
-			            (union ctl_io *)TAILQ_PREV(
-				    &pending_io->io_hdr, ctl_ooaq,
-				    ooa_links))) {
-
-				case CTL_ACTION_BLOCK:
-					TAILQ_INSERT_TAIL(&lun->blocked_queue,
-							  &pending_io->io_hdr,
-							  blocked_links);
-					pending_io->io_hdr.flags |=
-					    CTL_FLAG_BLOCKED;
-					break;
-				case CTL_ACTION_PASS:
-				case CTL_ACTION_SKIP:
-					pending_io->io_hdr.flags |=
-					    CTL_FLAG_IS_WAS_ON_RTR;
-					STAILQ_INSERT_TAIL(
-					    &ctl_softc->rtr_queue,
-					    &pending_io->io_hdr, links);
-					break;
-				case CTL_ACTION_OVERLAP:
-					ctl_set_overlapped_cmd(
-					    (struct ctl_scsiio *)pending_io);
-					ctl_done_lock(pending_io,
-						      /*have_lock*/ 1);
-					break;
-				case CTL_ACTION_OVERLAP_TAG:
-					ctl_set_overlapped_tag(
-					    (struct ctl_scsiio *)pending_io,
-					    pending_io->scsiio.tag_num & 0xff);
-					ctl_done_lock(pending_io,
-						      /*have_lock*/ 1);
-					break;
-				case CTL_ACTION_ERROR:
-				default:
-					ctl_set_internal_failure(
-						(struct ctl_scsiio *)pending_io,
-						0,  // sks_valid
-						0); //retry count
-					ctl_done_lock(pending_io,
-						      /*have_lock*/ 1);
-					break;
-				}
 			}
-
-			/*
-			 * Build Unit Attention
-			 */
-			for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-				lun->pending_sense[i].ua_pending |=
-				                     CTL_UA_ASYM_ACC_CHANGE;
-			}
-		} else {
-			panic("Unhandled HA mode failover, LUN flags = %#x, "
-			      "ha_mode = #%x", lun->flags, ctl_softc->ha_mode);
 		}
+		ctl_check_blocked(lun);
 	}
-	ctl_pause_rtr = 0;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 }
 
 static int
-ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
+ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
 {
 	struct ctl_lun *lun;
-	struct ctl_cmd_entry *entry;
-	uint8_t opcode;
-	uint32_t initidx;
-	int retval;
+	const struct ctl_cmd_entry *entry;
+	uint32_t initidx, targ_lun;
+	int retval = 0;
 
-	retval = 0;
-
 	lun = NULL;
-
-	opcode = ctsio->cdb[0];
-
-	mtx_lock(&ctl_softc->ctl_lock);
-
-	if ((ctsio->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
-	 && (ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun] != NULL)) {
-		lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun];
+	targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
+	if (targ_lun < CTL_MAX_LUNS)
+		lun = softc->ctl_luns[targ_lun];
+	if (lun) {
 		/*
 		 * If the LUN is invalid, pretend that it doesn't exist.
 		 * It will go away as soon as all pending I/O has been
 		 * completed.
 		 */
+		mtx_lock(&lun->lun_lock);
 		if (lun->flags & CTL_LUN_DISABLED) {
+			mtx_unlock(&lun->lun_lock);
 			lun = NULL;
-		} else {
-			ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
-			ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
-				lun->be_lun;
-			if (lun->be_lun->lun_type == T_PROCESSOR) {
-				ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
-			}
 		}
-	} else {
-		ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
-		ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
 	}
+	CTL_LUN(ctsio) = lun;
+	if (lun) {
+		CTL_BACKEND_LUN(ctsio) = lun->be_lun;
 
-	entry = &ctl_cmd_table[opcode];
+		/*
+		 * Every I/O goes into the OOA queue for a particular LUN,
+		 * and stays there until completion.
+		 */
+#ifdef CTL_TIME_IO
+		if (TAILQ_EMPTY(&lun->ooa_queue))
+			lun->idle_time += getsbinuptime() - lun->last_busy;
+#endif
+		TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+	}
 
+	/* Get command entry and return error if it is unsuppotyed. */
+	entry = ctl_validate_command(ctsio);
+	if (entry == NULL) {
+		if (lun)
+			mtx_unlock(&lun->lun_lock);
+		return (retval);
+	}
+
 	ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
 	ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
 
@@ -10411,51 +11323,26 @@
 	 * it on the rtr queue.
 	 */
 	if (lun == NULL) {
-		if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
-			goto queue_rtr;
+		if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) {
+			ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+			ctl_enqueue_rtr((union ctl_io *)ctsio);
+			return (retval);
+		}
 
 		ctl_set_unsupported_lun(ctsio);
-		mtx_unlock(&ctl_softc->ctl_lock);
 		ctl_done((union ctl_io *)ctsio);
-		goto bailout;
+		CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
+		return (retval);
 	} else {
 		/*
-		 * Every I/O goes into the OOA queue for a particular LUN, and
-		 * stays there until completion.
-		 */
-		TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
-
-		/*
 		 * Make sure we support this particular command on this LUN.
 		 * e.g., we don't support writes to the control LUN.
 		 */
-		switch (lun->be_lun->lun_type) {
-		case T_PROCESSOR:
-		 	if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
-			 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
-			      == 0)) {
-				ctl_set_invalid_opcode(ctsio);
-				mtx_unlock(&ctl_softc->ctl_lock);
-				ctl_done((union ctl_io *)ctsio);
-				goto bailout;
-			}
-			break;
-		case T_DIRECT:
-			if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0)
-			 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
-			      == 0)){
-				ctl_set_invalid_opcode(ctsio);
-				mtx_unlock(&ctl_softc->ctl_lock);
-				ctl_done((union ctl_io *)ctsio);
-				goto bailout;
-			}
-			break;
-		default:
-			printf("Unsupported CTL LUN type %d\n",
-			       lun->be_lun->lun_type);
-			panic("Unsupported CTL LUN type %d\n",
-			      lun->be_lun->lun_type);
-			break; /* NOTREACHED */
+		if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
+			mtx_unlock(&lun->lun_lock);
+			ctl_set_invalid_opcode(ctsio);
+			ctl_done((union ctl_io *)ctsio);
+			return (retval);
 		}
 	}
 
@@ -10467,10 +11354,14 @@
 	 * this initiator, clear it, because it sent down a command other
 	 * than request sense.
 	 */
-	if ((opcode != REQUEST_SENSE)
-	 && (ctl_is_set(lun->have_ca, initidx)))
-		ctl_clear_mask(lun->have_ca, initidx);
+	if (ctsio->cdb[0] != REQUEST_SENSE) {
+		struct scsi_sense_data *ps;
 
+		ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT];
+		if (ps != NULL)
+			ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0;
+	}
+
 	/*
 	 * If the command has this flag set, it handles its own unit
 	 * attention reporting, we shouldn't do anything.  Otherwise we
@@ -10495,39 +11386,25 @@
 	 */
 	if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
 		ctl_ua_type ua_type;
+		u_int sense_len = 0;
 
-		ua_type = lun->pending_sense[initidx].ua_pending;
+		ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data,
+		    &sense_len, SSD_TYPE_NONE);
 		if (ua_type != CTL_UA_NONE) {
-			scsi_sense_data_type sense_format;
-
-			if (lun != NULL)
-				sense_format = (lun->flags &
-				    CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC :
-				    SSD_TYPE_FIXED;
-			else
-				sense_format = SSD_TYPE_FIXED;
-
-			ua_type = ctl_build_ua(ua_type, &ctsio->sense_data,
-					       sense_format);
-			if (ua_type != CTL_UA_NONE) {
-				ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
-				ctsio->io_hdr.status = CTL_SCSI_ERROR |
-						       CTL_AUTOSENSE;
-				ctsio->sense_len = SSD_FULL_SIZE;
-				lun->pending_sense[initidx].ua_pending &=
-					~ua_type;
-				mtx_unlock(&ctl_softc->ctl_lock);
-				ctl_done((union ctl_io *)ctsio);
-				goto bailout;
-			}
+			mtx_unlock(&lun->lun_lock);
+			ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
+			ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+			ctsio->sense_len = sense_len;
+			ctl_done((union ctl_io *)ctsio);
+			return (retval);
 		}
 	}
 
 
-	if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) {
-		mtx_unlock(&ctl_softc->ctl_lock);
+	if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
+		mtx_unlock(&lun->lun_lock);
 		ctl_done((union ctl_io *)ctsio);
-		goto bailout;
+		return (retval);
 	}
 
 	/*
@@ -10539,45 +11416,33 @@
 	 * find it easily. Something similar will need be done on the other
 	 * side so when we are done we can find the copy.
 	 */
-	if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
+	if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+	    (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 &&
+	    (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) {
 		union ctl_ha_msg msg_info;
 		int isc_retval;
 
 		ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
+		ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+		mtx_unlock(&lun->lun_lock);
 
 		msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
 		msg_info.hdr.original_sc = (union ctl_io *)ctsio;
-#if 0
-		printf("1. ctsio %p\n", ctsio);
-#endif
 		msg_info.hdr.serializing_sc = NULL;
 		msg_info.hdr.nexus = ctsio->io_hdr.nexus;
 		msg_info.scsi.tag_num = ctsio->tag_num;
 		msg_info.scsi.tag_type = ctsio->tag_type;
+		msg_info.scsi.cdb_len = ctsio->cdb_len;
 		memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
 
-		ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
-
-		if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-		    (void *)&msg_info, sizeof(msg_info), 0)) >
-		    CTL_HA_STATUS_SUCCESS) {
-			printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
-			       isc_retval);
-			printf("CTL:opcode is %x\n",opcode);
-		} else {
-#if 0
-			printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
-#endif
+		if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+		    sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data),
+		    M_WAITOK)) > CTL_HA_STATUS_SUCCESS) {
+			ctl_set_busy(ctsio);
+			ctl_done((union ctl_io *)ctsio);
+			return (retval);
 		}
-
-		/*
-		 * XXX KDM this I/O is off the incoming queue, but hasn't
-		 * been inserted on any other queue.  We may need to come
-		 * up with a holding queue while we wait for serialization
-		 * so that we have an idea of what we're waiting for from
-		 * the other side.
-		 */
-		goto bailout_unlock;
+		return (retval);
 	}
 
 	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
@@ -10587,59 +11452,127 @@
 		ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
 		TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
 				  blocked_links);
-		goto bailout_unlock;
-		break; /* NOTREACHED */
+		mtx_unlock(&lun->lun_lock);
+		return (retval);
 	case CTL_ACTION_PASS:
 	case CTL_ACTION_SKIP:
-		goto queue_rtr;
-		break; /* NOTREACHED */
+		ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+		mtx_unlock(&lun->lun_lock);
+		ctl_enqueue_rtr((union ctl_io *)ctsio);
+		break;
 	case CTL_ACTION_OVERLAP:
+		mtx_unlock(&lun->lun_lock);
 		ctl_set_overlapped_cmd(ctsio);
-		mtx_unlock(&ctl_softc->ctl_lock);
 		ctl_done((union ctl_io *)ctsio);
-		goto bailout;
-		break; /* NOTREACHED */
+		break;
 	case CTL_ACTION_OVERLAP_TAG:
+		mtx_unlock(&lun->lun_lock);
 		ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
-		mtx_unlock(&ctl_softc->ctl_lock);
 		ctl_done((union ctl_io *)ctsio);
-		goto bailout;
-		break; /* NOTREACHED */
+		break;
 	case CTL_ACTION_ERROR:
 	default:
+		mtx_unlock(&lun->lun_lock);
 		ctl_set_internal_failure(ctsio,
 					 /*sks_valid*/ 0,
 					 /*retry_count*/ 0);
-		mtx_unlock(&ctl_softc->ctl_lock);
 		ctl_done((union ctl_io *)ctsio);
-		goto bailout;
-		break; /* NOTREACHED */
+		break;
 	}
+	return (retval);
+}
 
-	goto bailout_unlock;
+const struct ctl_cmd_entry *
+ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa)
+{
+	const struct ctl_cmd_entry *entry;
+	int service_action;
 
-queue_rtr:
-	ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
-	STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, &ctsio->io_hdr, links);
+	entry = &ctl_cmd_table[ctsio->cdb[0]];
+	if (sa)
+		*sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0);
+	if (entry->flags & CTL_CMD_FLAG_SA5) {
+		service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
+		entry = &((const struct ctl_cmd_entry *)
+		    entry->execute)[service_action];
+	}
+	return (entry);
+}
 
-bailout_unlock:
-	mtx_unlock(&ctl_softc->ctl_lock);
+const struct ctl_cmd_entry *
+ctl_validate_command(struct ctl_scsiio *ctsio)
+{
+	const struct ctl_cmd_entry *entry;
+	int i, sa;
+	uint8_t diff;
 
-bailout:
-	return (retval);
+	entry = ctl_get_cmd_entry(ctsio, &sa);
+	if (entry->execute == NULL) {
+		if (sa)
+			ctl_set_invalid_field(ctsio,
+					      /*sks_valid*/ 1,
+					      /*command*/ 1,
+					      /*field*/ 1,
+					      /*bit_valid*/ 1,
+					      /*bit*/ 4);
+		else
+			ctl_set_invalid_opcode(ctsio);
+		ctl_done((union ctl_io *)ctsio);
+		return (NULL);
+	}
+	KASSERT(entry->length > 0,
+	    ("Not defined length for command 0x%02x/0x%02x",
+	     ctsio->cdb[0], ctsio->cdb[1]));
+	for (i = 1; i < entry->length; i++) {
+		diff = ctsio->cdb[i] & ~entry->usage[i - 1];
+		if (diff == 0)
+			continue;
+		ctl_set_invalid_field(ctsio,
+				      /*sks_valid*/ 1,
+				      /*command*/ 1,
+				      /*field*/ i,
+				      /*bit_valid*/ 1,
+				      /*bit*/ fls(diff) - 1);
+		ctl_done((union ctl_io *)ctsio);
+		return (NULL);
+	}
+	return (entry);
 }
 
 static int
+ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
+{
+
+	switch (lun_type) {
+	case T_DIRECT:
+		if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0)
+			return (0);
+		break;
+	case T_PROCESSOR:
+		if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
+			return (0);
+		break;
+	case T_CDROM:
+		if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0)
+			return (0);
+		break;
+	default:
+		return (0);
+	}
+	return (1);
+}
+
+static int
 ctl_scsiio(struct ctl_scsiio *ctsio)
 {
 	int retval;
-	struct ctl_cmd_entry *entry;
+	const struct ctl_cmd_entry *entry;
 
 	retval = CTL_RETVAL_COMPLETE;
 
 	CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
 
-	entry = &ctl_cmd_table[ctsio->cdb[0]];
+	entry = ctl_get_cmd_entry(ctsio, NULL);
 
 	/*
 	 * If this I/O has been aborted, just send it straight to
@@ -10660,45 +11593,42 @@
 	return (retval);
 }
 
-/*
- * Since we only implement one target right now, a bus reset simply resets
- * our single target.
- */
 static int
-ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io)
+ctl_target_reset(union ctl_io *io)
 {
-	return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET));
-}
-
-static int
-ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
-		 ctl_ua_type ua_type)
-{
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_port *port = CTL_PORT(io);
 	struct ctl_lun *lun;
-	int retval;
+	uint32_t initidx;
+	ctl_ua_type ua_type;
 
 	if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
 		union ctl_ha_msg msg_info;
 
-		io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
 		msg_info.hdr.nexus = io->io_hdr.nexus;
-		if (ua_type==CTL_UA_TARG_RESET)
-			msg_info.task.task_action = CTL_TASK_TARGET_RESET;
-		else
-			msg_info.task.task_action = CTL_TASK_BUS_RESET;
+		msg_info.task.task_action = io->taskio.task_action;
 		msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
 		msg_info.hdr.original_sc = NULL;
 		msg_info.hdr.serializing_sc = NULL;
-		if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-		    (void *)&msg_info, sizeof(msg_info), 0)) {
-		}
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+		    sizeof(msg_info.task), M_WAITOK);
 	}
-	retval = 0;
 
-	STAILQ_FOREACH(lun, &ctl_softc->lun_list, links)
-		retval += ctl_lun_reset(lun, io, ua_type);
-
-	return (retval);
+	initidx = ctl_get_initindex(&io->io_hdr.nexus);
+	if (io->taskio.task_action == CTL_TASK_TARGET_RESET)
+		ua_type = CTL_UA_TARG_RESET;
+	else
+		ua_type = CTL_UA_BUS_RESET;
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		if (port != NULL &&
+		    ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+			continue;
+		ctl_do_lun_reset(lun, initidx, ua_type);
+	}
+	mtx_unlock(&softc->ctl_lock);
+	io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
+	return (0);
 }
 
 /*
@@ -10722,53 +11652,210 @@
  *
  * XXX KDM for now, we're setting unit attention for all initiators.
  */
-static int
-ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
+static void
+ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type)
 {
 	union ctl_io *xio;
+	int i;
+
+	mtx_lock(&lun->lun_lock);
+	/* Abort tasks. */
+	for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
+	     xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
+		xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
+	}
+	/* Clear CA. */
+	for (i = 0; i < CTL_MAX_PORTS; i++) {
+		free(lun->pending_sense[i], M_CTL);
+		lun->pending_sense[i] = NULL;
+	}
+	/* Clear reservation. */
+	lun->flags &= ~CTL_LUN_RESERVED;
+	/* Clear prevent media removal. */
+	if (lun->prevent) {
+		for (i = 0; i < CTL_MAX_INITIATORS; i++)
+			ctl_clear_mask(lun->prevent, i);
+		lun->prevent_count = 0;
+	}
+	/* Clear TPC status */
+	ctl_tpc_lun_clear(lun, -1);
+	/* Establish UA. */
 #if 0
-	uint32_t initindex;
+	ctl_est_ua_all(lun, initidx, ua_type);
+#else
+	ctl_est_ua_all(lun, -1, ua_type);
 #endif
-	int i;
+	mtx_unlock(&lun->lun_lock);
+}
 
+static int
+ctl_lun_reset(union ctl_io *io)
+{
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_lun *lun;
+	uint32_t targ_lun, initidx;
+
+	targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+	initidx = ctl_get_initindex(&io->io_hdr.nexus);
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
+		return (1);
+	}
+	ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET);
+	mtx_unlock(&softc->ctl_lock);
+	io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
+
+	if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
+		union ctl_ha_msg msg_info;
+
+		msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
+		msg_info.hdr.nexus = io->io_hdr.nexus;
+		msg_info.task.task_action = CTL_TASK_LUN_RESET;
+		msg_info.hdr.original_sc = NULL;
+		msg_info.hdr.serializing_sc = NULL;
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+		    sizeof(msg_info.task), M_WAITOK);
+	}
+	return (0);
+}
+
+static void
+ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
+    int other_sc)
+{
+	union ctl_io *xio;
+
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+
 	/*
-	 * Run through the OOA queue and abort each I/O.
+	 * Run through the OOA queue and attempt to find the given I/O.
+	 * The target port, initiator ID, tag type and tag number have to
+	 * match the values that we got from the initiator.  If we have an
+	 * untagged command to abort, simply abort the first untagged command
+	 * we come to.  We only allow one untagged command at a time of course.
 	 */
-#if 0
-	TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
-#endif
 	for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
 	     xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
-		xio->io_hdr.flags |= CTL_FLAG_ABORT;
+
+		if ((targ_port == UINT32_MAX ||
+		     targ_port == xio->io_hdr.nexus.targ_port) &&
+		    (init_id == UINT32_MAX ||
+		     init_id == xio->io_hdr.nexus.initid)) {
+			if (targ_port != xio->io_hdr.nexus.targ_port ||
+			    init_id != xio->io_hdr.nexus.initid)
+				xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
+			xio->io_hdr.flags |= CTL_FLAG_ABORT;
+			if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
+				union ctl_ha_msg msg_info;
+
+				msg_info.hdr.nexus = xio->io_hdr.nexus;
+				msg_info.task.task_action = CTL_TASK_ABORT_TASK;
+				msg_info.task.tag_num = xio->scsiio.tag_num;
+				msg_info.task.tag_type = xio->scsiio.tag_type;
+				msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
+				msg_info.hdr.original_sc = NULL;
+				msg_info.hdr.serializing_sc = NULL;
+				ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+				    sizeof(msg_info.task), M_NOWAIT);
+			}
+		}
 	}
+}
 
+static int
+ctl_abort_task_set(union ctl_io *io)
+{
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_lun *lun;
+	uint32_t targ_lun;
+
 	/*
-	 * This version sets unit attention for every
+	 * Look up the LUN.
 	 */
-#if 0
-	initindex = ctl_get_initindex(&io->io_hdr.nexus);
-	for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-		if (initindex == i)
-			continue;
-		lun->pending_sense[i].ua_pending |= ua_type;
+	targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
+		return (1);
 	}
-#endif
 
-	/*
-	 * A reset (any kind, really) clears reservations established with
-	 * RESERVE/RELEASE.  It does not clear reservations established
-	 * with PERSISTENT RESERVE OUT, but we don't support that at the
-	 * moment anyway.  See SPC-2, section 5.6.  SPC-3 doesn't address
-	 * reservations made with the RESERVE/RELEASE commands, because
-	 * those commands are obsolete in SPC-3.
-	 */
-	lun->flags &= ~CTL_LUN_RESERVED;
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
+	if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
+		ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
+		    io->io_hdr.nexus.initid,
+		    (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
+	} else { /* CTL_TASK_CLEAR_TASK_SET */
+		ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
+		    (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
+	}
+	mtx_unlock(&lun->lun_lock);
+	io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
+	return (0);
+}
 
-	for (i = 0; i < CTL_MAX_INITIATORS; i++) {
-		ctl_clear_mask(lun->have_ca, i);
-		lun->pending_sense[i].ua_pending |= ua_type;
+static void
+ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx,
+    ctl_ua_type ua_type)
+{
+	struct ctl_lun *lun;
+	struct scsi_sense_data *ps;
+	uint32_t p, i;
+
+	p = initidx / CTL_MAX_INIT_PER_PORT;
+	i = initidx % CTL_MAX_INIT_PER_PORT;
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		mtx_lock(&lun->lun_lock);
+		/* Abort tasks. */
+		ctl_abort_tasks_lun(lun, p, i, 1);
+		/* Clear CA. */
+		ps = lun->pending_sense[p];
+		if (ps != NULL)
+			ps[i].error_code = 0;
+		/* Clear reservation. */
+		if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
+			lun->flags &= ~CTL_LUN_RESERVED;
+		/* Clear prevent media removal. */
+		if (lun->prevent && ctl_is_set(lun->prevent, initidx)) {
+			ctl_clear_mask(lun->prevent, initidx);
+			lun->prevent_count--;
+		}
+		/* Clear TPC status */
+		ctl_tpc_lun_clear(lun, initidx);
+		/* Establish UA. */
+		ctl_est_ua(lun, initidx, ua_type);
+		mtx_unlock(&lun->lun_lock);
 	}
+	mtx_unlock(&softc->ctl_lock);
+}
 
+static int
+ctl_i_t_nexus_reset(union ctl_io *io)
+{
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	uint32_t initidx;
+
+	if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
+		union ctl_ha_msg msg_info;
+
+		msg_info.hdr.nexus = io->io_hdr.nexus;
+		msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET;
+		msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
+		msg_info.hdr.original_sc = NULL;
+		msg_info.hdr.serializing_sc = NULL;
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+		    sizeof(msg_info.task), M_WAITOK);
+	}
+
+	initidx = ctl_get_initindex(&io->io_hdr.nexus);
+	ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS);
+	io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
 	return (0);
 }
 
@@ -10775,26 +11862,29 @@
 static int
 ctl_abort_task(union ctl_io *io)
 {
+	struct ctl_softc *softc = CTL_SOFTC(io);
 	union ctl_io *xio;
 	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
 #if 0
 	struct sbuf sb;
 	char printbuf[128];
 #endif
 	int found;
+	uint32_t targ_lun;
 
-	ctl_softc = control_softc;
 	found = 0;
 
 	/*
 	 * Look up the LUN.
 	 */
-	if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
-	 && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL))
-		lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
-	else
-		goto bailout;
+	targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
+		return (1);
+	}
 
 #if 0
 	printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
@@ -10801,6 +11891,8 @@
 	       lun->lun, io->taskio.tag_num, io->taskio.tag_type);
 #endif
 
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
 	/*
 	 * Run through the OOA queue and attempt to find the given I/O.
 	 * The target port, initiator ID, tag type and tag number have to
@@ -10808,9 +11900,6 @@
 	 * untagged command to abort, simply abort the first untagged command
 	 * we come to.  We only allow one untagged command at a time of course.
 	 */
-#if 0
-	TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
-#endif
 	for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
 	     xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
 #if 0
@@ -10824,77 +11913,66 @@
 			    (xio->io_hdr.flags &
 			    CTL_FLAG_DMA_INPROG) ? " DMA" : "",
 			    (xio->io_hdr.flags &
-			    CTL_FLAG_ABORT) ? " ABORT" : ""),
+			    CTL_FLAG_ABORT) ? " ABORT" : "",
 			    (xio->io_hdr.flags &
-			    CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "");
+			    CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""));
 		ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
 		sbuf_finish(&sb);
 		printf("%s\n", sbuf_data(&sb));
 #endif
 
-		if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port)
-		 && (xio->io_hdr.nexus.initid.id ==
-		     io->io_hdr.nexus.initid.id)) {
-			/*
-			 * If the abort says that the task is untagged, the
-			 * task in the queue must be untagged.  Otherwise,
-			 * we just check to see whether the tag numbers
-			 * match.  This is because the QLogic firmware
-			 * doesn't pass back the tag type in an abort
-			 * request.
-			 */
+		if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
+		 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
+		 || (xio->io_hdr.flags & CTL_FLAG_ABORT))
+			continue;
+
+		/*
+		 * If the abort says that the task is untagged, the
+		 * task in the queue must be untagged.  Otherwise,
+		 * we just check to see whether the tag numbers
+		 * match.  This is because the QLogic firmware
+		 * doesn't pass back the tag type in an abort
+		 * request.
+		 */
 #if 0
-			if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
-			  && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
-			 || (xio->scsiio.tag_num == io->taskio.tag_num)) {
+		if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
+		  && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
+		 || (xio->scsiio.tag_num == io->taskio.tag_num))
 #endif
-			/*
-			 * XXX KDM we've got problems with FC, because it
-			 * doesn't send down a tag type with aborts.  So we
-			 * can only really go by the tag number...
-			 * This may cause problems with parallel SCSI.
-			 * Need to figure that out!!
-			 */
-			if (xio->scsiio.tag_num == io->taskio.tag_num) {
-				xio->io_hdr.flags |= CTL_FLAG_ABORT;
-				found = 1;
-				if ((io->io_hdr.flags &
-				     CTL_FLAG_FROM_OTHER_SC) == 0 &&
-				    !(lun->flags & CTL_LUN_PRIMARY_SC)) {
-					union ctl_ha_msg msg_info;
+		/*
+		 * XXX KDM we've got problems with FC, because it
+		 * doesn't send down a tag type with aborts.  So we
+		 * can only really go by the tag number...
+		 * This may cause problems with parallel SCSI.
+		 * Need to figure that out!!
+		 */
+		if (xio->scsiio.tag_num == io->taskio.tag_num) {
+			xio->io_hdr.flags |= CTL_FLAG_ABORT;
+			found = 1;
+			if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 &&
+			    !(lun->flags & CTL_LUN_PRIMARY_SC)) {
+				union ctl_ha_msg msg_info;
 
-					io->io_hdr.flags |=
-					                CTL_FLAG_SENT_2OTHER_SC;
-					msg_info.hdr.nexus = io->io_hdr.nexus;
-					msg_info.task.task_action =
-						CTL_TASK_ABORT_TASK;
-					msg_info.task.tag_num =
-						io->taskio.tag_num;
-					msg_info.task.tag_type =
-						io->taskio.tag_type;
-					msg_info.hdr.msg_type =
-						CTL_MSG_MANAGE_TASKS;
-					msg_info.hdr.original_sc = NULL;
-					msg_info.hdr.serializing_sc = NULL;
+				msg_info.hdr.nexus = io->io_hdr.nexus;
+				msg_info.task.task_action = CTL_TASK_ABORT_TASK;
+				msg_info.task.tag_num = io->taskio.tag_num;
+				msg_info.task.tag_type = io->taskio.tag_type;
+				msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
+				msg_info.hdr.original_sc = NULL;
+				msg_info.hdr.serializing_sc = NULL;
 #if 0
-					printf("Sent Abort to other side\n");
+				printf("Sent Abort to other side\n");
 #endif
-					if (CTL_HA_STATUS_SUCCESS !=
-					        ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-		    				(void *)&msg_info,
-						sizeof(msg_info), 0)) {
-					}
-				}
+				ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+				    sizeof(msg_info.task), M_NOWAIT);
+			}
 #if 0
-				printf("ctl_abort_task: found I/O to abort\n");
+			printf("ctl_abort_task: found I/O to abort\n");
 #endif
-				break;
-			}
 		}
 	}
+	mtx_unlock(&lun->lun_lock);
 
-bailout:
-
 	if (found == 0) {
 		/*
 		 * This isn't really an error.  It's entirely possible for
@@ -10903,171 +11981,138 @@
 		 */
 #if 0
 		printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
-		       "%d:%d:%d:%d tag %d type %d\n",
-		       io->io_hdr.nexus.initid.id,
+		       "%u:%u:%u tag %d type %d\n",
+		       io->io_hdr.nexus.initid,
 		       io->io_hdr.nexus.targ_port,
-		       io->io_hdr.nexus.targ_target.id,
 		       io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
 		       io->taskio.tag_type);
 #endif
-		return (1);
-	} else
-		return (0);
+	}
+	io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
+	return (0);
 }
 
-/*
- * Assumptions:  caller holds ctl_softc->ctl_lock
- *
- * This routine cannot block!  It must be callable from an interrupt
- * handler as well as from the work thread.
- */
-static void
-ctl_run_task_queue(struct ctl_softc *ctl_softc)
+static int
+ctl_query_task(union ctl_io *io, int task_set)
 {
-	union ctl_io *io, *next_io;
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	union ctl_io *xio;
+	struct ctl_lun *lun;
+	int found = 0;
+	uint32_t targ_lun;
 
-	CTL_DEBUG_PRINT(("ctl_run_task_queue\n"));
+	targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
+		return (1);
+	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
+	for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
+	     xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
 
-	for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue);
-	     io != NULL; io = next_io) {
-		int retval;
-		const char *task_desc;
+		if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
+		 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
+		 || (xio->io_hdr.flags & CTL_FLAG_ABORT))
+			continue;
 
-		next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
+		if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) {
+			found = 1;
+			break;
+		}
+	}
+	mtx_unlock(&lun->lun_lock);
+	if (found)
+		io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
+	else
+		io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
+	return (0);
+}
 
-		retval = 0;
+static int
+ctl_query_async_event(union ctl_io *io)
+{
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_lun *lun;
+	ctl_ua_type ua;
+	uint32_t targ_lun, initidx;
 
-		switch (io->io_hdr.io_type) {
-		case CTL_IO_TASK: {
-			task_desc = ctl_scsi_task_string(&io->taskio);
-			if (task_desc != NULL) {
-#ifdef NEEDTOPORT
-				csevent_log(CSC_CTL | CSC_SHELF_SW |
-					    CTL_TASK_REPORT,
-					    csevent_LogType_Trace,
-					    csevent_Severity_Information,
-					    csevent_AlertLevel_Green,
-					    csevent_FRU_Firmware,
-					    csevent_FRU_Unknown,
-					    "CTL: received task: %s",task_desc);
-#endif
-			} else {
-#ifdef NEEDTOPORT
-				csevent_log(CSC_CTL | CSC_SHELF_SW |
-					    CTL_TASK_REPORT,
-					    csevent_LogType_Trace,
-					    csevent_Severity_Information,
-					    csevent_AlertLevel_Green,
-					    csevent_FRU_Firmware,
-					    csevent_FRU_Unknown,
-					    "CTL: received unknown task "
-					    "type: %d (%#x)",
-					    io->taskio.task_action,
-					    io->taskio.task_action);
-#endif
-			}
-			switch (io->taskio.task_action) {
-			case CTL_TASK_ABORT_TASK:
-				retval = ctl_abort_task(io);
-				break;
-			case CTL_TASK_ABORT_TASK_SET:
-				break;
-			case CTL_TASK_CLEAR_ACA:
-				break;
-			case CTL_TASK_CLEAR_TASK_SET:
-				break;
-			case CTL_TASK_LUN_RESET: {
-				struct ctl_lun *lun;
-				uint32_t targ_lun;
-				int retval;
+	targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
+		io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
+		return (1);
+	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
+	initidx = ctl_get_initindex(&io->io_hdr.nexus);
+	ua = ctl_build_qae(lun, initidx, io->taskio.task_resp);
+	mtx_unlock(&lun->lun_lock);
+	if (ua != CTL_UA_NONE)
+		io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
+	else
+		io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
+	return (0);
+}
 
-				targ_lun = io->io_hdr.nexus.targ_lun;
+static void
+ctl_run_task(union ctl_io *io)
+{
+	int retval = 1;
 
-				if ((targ_lun < CTL_MAX_LUNS)
-				 && (ctl_softc->ctl_luns[targ_lun] != NULL))
-					lun = ctl_softc->ctl_luns[targ_lun];
-				else {
-					retval = 1;
-					break;
-				}
-
-				if (!(io->io_hdr.flags &
-				    CTL_FLAG_FROM_OTHER_SC)) {
-					union ctl_ha_msg msg_info;
-
-					io->io_hdr.flags |=
-						CTL_FLAG_SENT_2OTHER_SC;
-					msg_info.hdr.msg_type =
-						CTL_MSG_MANAGE_TASKS;
-					msg_info.hdr.nexus = io->io_hdr.nexus;
-					msg_info.task.task_action =
-						CTL_TASK_LUN_RESET;
-					msg_info.hdr.original_sc = NULL;
-					msg_info.hdr.serializing_sc = NULL;
-					if (CTL_HA_STATUS_SUCCESS !=
-					    ctl_ha_msg_send(CTL_HA_CHAN_CTL,
-					    (void *)&msg_info,
-					    sizeof(msg_info), 0)) {
-					}
-				}
-
-				retval = ctl_lun_reset(lun, io,
-						       CTL_UA_LUN_RESET);
-				break;
-			}
-			case CTL_TASK_TARGET_RESET:
-				retval = ctl_target_reset(ctl_softc, io,
-							  CTL_UA_TARG_RESET);
-				break;
-			case CTL_TASK_BUS_RESET:
-				retval = ctl_bus_reset(ctl_softc, io);
-				break;
-			case CTL_TASK_PORT_LOGIN:
-				break;
-			case CTL_TASK_PORT_LOGOUT:
-				break;
-			default:
-				printf("ctl_run_task_queue: got unknown task "
-				       "management event %d\n",
-				       io->taskio.task_action);
-				break;
-			}
-			if (retval == 0)
-				io->io_hdr.status = CTL_SUCCESS;
-			else
-				io->io_hdr.status = CTL_ERROR;
-
-			STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
-				      ctl_io_hdr, links);
-			/*
-			 * This will queue this I/O to the done queue, but the
-			 * work thread won't be able to process it until we
-			 * return and the lock is released.
-			 */
-			ctl_done_lock(io, /*have_lock*/ 1);
-			break;
-		}
-		default: {
-
-			printf("%s: invalid I/O type %d msg %d cdb %x"
-			       " iptl: %ju:%d:%ju:%d tag 0x%04x\n",
-			       __func__, io->io_hdr.io_type,
-			       io->io_hdr.msg_type, io->scsiio.cdb[0],
-			       (uintmax_t)io->io_hdr.nexus.initid.id,
-			       io->io_hdr.nexus.targ_port,
-			       (uintmax_t)io->io_hdr.nexus.targ_target.id,
-			       io->io_hdr.nexus.targ_lun,
-			       (io->io_hdr.io_type == CTL_IO_TASK) ?
-			       io->taskio.tag_num : io->scsiio.tag_num);
-			STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
-				      ctl_io_hdr, links);
-			ctl_free_io_internal(io, 1);
-			break;
-		}
-		}
+	CTL_DEBUG_PRINT(("ctl_run_task\n"));
+	KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
+	    ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type));
+	io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED;
+	bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp));
+	switch (io->taskio.task_action) {
+	case CTL_TASK_ABORT_TASK:
+		retval = ctl_abort_task(io);
+		break;
+	case CTL_TASK_ABORT_TASK_SET:
+	case CTL_TASK_CLEAR_TASK_SET:
+		retval = ctl_abort_task_set(io);
+		break;
+	case CTL_TASK_CLEAR_ACA:
+		break;
+	case CTL_TASK_I_T_NEXUS_RESET:
+		retval = ctl_i_t_nexus_reset(io);
+		break;
+	case CTL_TASK_LUN_RESET:
+		retval = ctl_lun_reset(io);
+		break;
+	case CTL_TASK_TARGET_RESET:
+	case CTL_TASK_BUS_RESET:
+		retval = ctl_target_reset(io);
+		break;
+	case CTL_TASK_PORT_LOGIN:
+		break;
+	case CTL_TASK_PORT_LOGOUT:
+		break;
+	case CTL_TASK_QUERY_TASK:
+		retval = ctl_query_task(io, 0);
+		break;
+	case CTL_TASK_QUERY_TASK_SET:
+		retval = ctl_query_task(io, 1);
+		break;
+	case CTL_TASK_QUERY_ASYNC_EVENT:
+		retval = ctl_query_async_event(io);
+		break;
+	default:
+		printf("%s: got unknown task management event %d\n",
+		       __func__, io->taskio.task_action);
+		break;
 	}
-
-	ctl_softc->flags &= ~CTL_FLAG_TASK_PENDING;
+	if (retval == 0)
+		io->io_hdr.status = CTL_SUCCESS;
+	else
+		io->io_hdr.status = CTL_ERROR;
+	ctl_done(io);
 }
 
 /*
@@ -11077,84 +12122,72 @@
 static void
 ctl_handle_isc(union ctl_io *io)
 {
-	int free_io;
+	struct ctl_softc *softc = CTL_SOFTC(io);
 	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
+	const struct ctl_cmd_entry *entry;
+	uint32_t targ_lun;
 
-	ctl_softc = control_softc;
-
-	lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
-
+	targ_lun = io->io_hdr.nexus.targ_mapped_lun;
 	switch (io->io_hdr.msg_type) {
 	case CTL_MSG_SERIALIZE:
-		free_io = ctl_serialize_other_sc_cmd(&io->scsiio,
-						     /*have_lock*/ 0);
+		ctl_serialize_other_sc_cmd(&io->scsiio);
 		break;
-	case CTL_MSG_R2R: {
-		uint8_t opcode;
-		struct ctl_cmd_entry *entry;
-
-		/*
-		 * This is only used in SER_ONLY mode.
-		 */
-		free_io = 0;
-		opcode = io->scsiio.cdb[0];
-		entry = &ctl_cmd_table[opcode];
-		mtx_lock(&ctl_softc->ctl_lock);
-		if (ctl_scsiio_lun_check(ctl_softc, lun,
-		    entry, (struct ctl_scsiio *)io) != 0) {
-			ctl_done_lock(io, /*have_lock*/ 1);
-			mtx_unlock(&ctl_softc->ctl_lock);
+	case CTL_MSG_R2R:		/* Only used in SER_ONLY mode. */
+		entry = ctl_get_cmd_entry(&io->scsiio, NULL);
+		if (targ_lun >= CTL_MAX_LUNS ||
+		    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+			ctl_done(io);
 			break;
 		}
+		mtx_lock(&lun->lun_lock);
+		if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
+			mtx_unlock(&lun->lun_lock);
+			ctl_done(io);
+			break;
+		}
 		io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
-		STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
-				   &io->io_hdr, links);
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
+		ctl_enqueue_rtr(io);
 		break;
-	}
 	case CTL_MSG_FINISH_IO:
-		if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
-			free_io = 0;
-			ctl_done_lock(io, /*have_lock*/ 0);
-		} else {
-			free_io = 1;
-			mtx_lock(&ctl_softc->ctl_lock);
-			TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
-				     ooa_links);
-			STAILQ_REMOVE(&ctl_softc->task_queue,
-				      &io->io_hdr, ctl_io_hdr, links);
-			ctl_check_blocked(lun);
-			mtx_unlock(&ctl_softc->ctl_lock);
+		if (softc->ha_mode == CTL_HA_MODE_XFER) {
+			ctl_done(io);
+			break;
 		}
+		if (targ_lun >= CTL_MAX_LUNS ||
+		    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+			ctl_free_io(io);
+			break;
+		}
+		mtx_lock(&lun->lun_lock);
+		TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
+		ctl_check_blocked(lun);
+		mtx_unlock(&lun->lun_lock);
+		ctl_free_io(io);
 		break;
 	case CTL_MSG_PERS_ACTION:
-		ctl_hndl_per_res_out_on_other_sc(
-			(union ctl_ha_msg *)&io->presio.pr_msg);
-		free_io = 1;
+		ctl_hndl_per_res_out_on_other_sc(io);
+		ctl_free_io(io);
 		break;
 	case CTL_MSG_BAD_JUJU:
-		free_io = 0;
-		ctl_done_lock(io, /*have_lock*/ 0);
+		ctl_done(io);
 		break;
-	case CTL_MSG_DATAMOVE:
-		/* Only used in XFER mode */
-		free_io = 0;
+	case CTL_MSG_DATAMOVE:		/* Only used in XFER mode */
 		ctl_datamove_remote(io);
 		break;
-	case CTL_MSG_DATAMOVE_DONE:
-		/* Only used in XFER mode */
-		free_io = 0;
+	case CTL_MSG_DATAMOVE_DONE:	/* Only used in XFER mode */
 		io->scsiio.be_move_done(io);
 		break;
+	case CTL_MSG_FAILOVER:
+		ctl_failover_lun(io);
+		ctl_free_io(io);
+		break;
 	default:
-		free_io = 1;
 		printf("%s: Invalid message type %d\n",
 		       __func__, io->io_hdr.msg_type);
+		ctl_free_io(io);
 		break;
 	}
-	if (free_io)
-		ctl_free_io_internal(io, 0);
 
 }
 
@@ -11166,9 +12199,8 @@
 static ctl_lun_error_pattern
 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
 {
-	struct ctl_cmd_entry *entry;
+	const struct ctl_cmd_entry *entry;
 	ctl_lun_error_pattern filtered_pattern, pattern;
-	uint8_t opcode;
 
 	pattern = desc->error_pattern;
 
@@ -11183,8 +12215,7 @@
 	if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
 		return (CTL_LUN_PAT_ANY);
 
-	opcode = ctsio->cdb[0];
-	entry = &ctl_cmd_table[opcode];
+	entry = ctl_get_cmd_entry(ctsio, NULL);
 
 	filtered_pattern = entry->pattern & pattern;
 
@@ -11206,7 +12237,7 @@
 	 */
 	if (filtered_pattern & CTL_LUN_PAT_RANGE) {
 		uint64_t lba1;
-		uint32_t len1;
+		uint64_t len1;
 		ctl_action action;
 		int retval;
 
@@ -11215,7 +12246,7 @@
 			return (CTL_LUN_PAT_NONE);
 
 		action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
-					      desc->lba_range.len);
+					      desc->lba_range.len, FALSE);
 		/*
 		 * A "pass" means that the LBA ranges don't overlap, so
 		 * this doesn't match the user's range criteria.
@@ -11227,14 +12258,13 @@
 	return (filtered_pattern);
 }
 
-/*
- * Called with the CTL lock held.
- */
 static void
 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
 {
 	struct ctl_error_desc *desc, *desc2;
 
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+
 	STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
 		ctl_lun_error_pattern pattern;
 		/*
@@ -11250,7 +12280,9 @@
 			ctl_set_aborted(&io->scsiio);
 			break;
 		case CTL_LUN_INJ_MEDIUM_ERR:
-			ctl_set_medium_error(&io->scsiio);
+			ctl_set_medium_error(&io->scsiio,
+			    (io->io_hdr.flags & CTL_FLAG_DATA_MASK) !=
+			     CTL_FLAG_DATA_OUT);
 			break;
 		case CTL_LUN_INJ_UA:
 			/* 29h/00h  POWER ON, RESET, OR BUS DEVICE RESET
@@ -11264,8 +12296,8 @@
 			 * checks.
 			 */
 			bcopy(&desc->custom_sense, &io->scsiio.sense_data,
-			      ctl_min(sizeof(desc->custom_sense),
-				      sizeof(io->scsiio.sense_data)));
+			      MIN(sizeof(desc->custom_sense),
+				  sizeof(io->scsiio.sense_data)));
 			io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
 			io->scsiio.sense_len = SSD_FULL_SIZE;
 			io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
@@ -11304,16 +12336,18 @@
 }
 #endif /* CTL_IO_DELAY */
 
-/*
- * Assumption:  caller does NOT hold ctl_lock
- */
 void
 ctl_datamove(union ctl_io *io)
 {
 	void (*fe_datamove)(union ctl_io *io);
 
+	mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
+
 	CTL_DEBUG_PRINT(("ctl_datamove\n"));
 
+	/* No data transferred yet.  Frontend must update this when done. */
+	io->scsiio.kern_data_resid = io->scsiio.kern_data_len;
+
 #ifdef CTL_TIME_IO
 	if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
 		char str[256];
@@ -11338,9 +12372,8 @@
 				    io->taskio.tag_num, io->taskio.tag_type);
 			break;
 		default:
-			printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
-			panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
-			break;
+			panic("%s: Invalid CTL I/O type %d\n",
+			    __func__, io->io_hdr.io_type);
 		}
 		sbuf_cat(&sb, path_str);
 		sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
@@ -11350,56 +12383,28 @@
 	}
 #endif /* CTL_TIME_IO */
 
-	mtx_lock(&control_softc->ctl_lock);
 #ifdef CTL_IO_DELAY
 	if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
-		struct ctl_lun *lun;
-
-		lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 		io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
 	} else {
 		struct ctl_lun *lun;
 
-		lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+		lun = CTL_LUN(io);
 		if ((lun != NULL)
 		 && (lun->delay_info.datamove_delay > 0)) {
-			struct callout *callout;
 
-			callout = (struct callout *)&io->io_hdr.timer_bytes;
-			callout_init(callout, /*mpsafe*/ 1);
+			callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
 			io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
-			callout_reset(callout,
+			callout_reset(&io->io_hdr.delay_callout,
 				      lun->delay_info.datamove_delay * hz,
 				      ctl_datamove_timer_wakeup, io);
 			if (lun->delay_info.datamove_type ==
 			    CTL_DELAY_TYPE_ONESHOT)
 				lun->delay_info.datamove_delay = 0;
-			mtx_unlock(&control_softc->ctl_lock);
 			return;
 		}
 	}
 #endif
-	/*
-	 * If we have any pending task management commands, process them
-	 * first.  This is necessary to eliminate a race condition with the
-	 * FETD:
-	 *
-	 * - FETD submits a task management command, like an abort.
-	 * - Back end calls fe_datamove() to move the data for the aborted
-	 *   command.  The FETD can't really accept it, but if it did, it
-	 *   would end up transmitting data for a command that the initiator
-	 *   told us to abort.
-	 *
-	 * We close the race by processing all pending task management
-	 * commands here (we can't block!), and then check this I/O to see
-	 * if it has been aborted.  If so, return it to the back end with
-	 * bad status, so the back end can say return an error to the back end
-	 * and then when the back end returns an error, we can return the
-	 * aborted command to the FETD, so it can clean up its resources.
-	 */
-	if (control_softc->flags & CTL_FLAG_TASK_PENDING)
-		ctl_run_task_queue(control_softc);
 
 	/*
 	 * This command has been aborted.  Set the port status, so we fail
@@ -11406,14 +12411,11 @@
 	 * the data move.
 	 */
 	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
-		printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n",
-		       io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id,
+		printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n",
+		       io->scsiio.tag_num, io->io_hdr.nexus.initid,
 		       io->io_hdr.nexus.targ_port,
-		       (uintmax_t)io->io_hdr.nexus.targ_target.id,
 		       io->io_hdr.nexus.targ_lun);
-		io->io_hdr.status = CTL_CMD_ABORTED;
 		io->io_hdr.port_status = 31337;
-		mtx_unlock(&control_softc->ctl_lock);
 		/*
 		 * Note that the backend, in this case, will get the
 		 * callback in its context.  In other cases it may get
@@ -11423,176 +12425,14 @@
 		return;
 	}
 
-	/*
-	 * If we're in XFER mode and this I/O is from the other shelf
-	 * controller, we need to send the DMA to the other side to
-	 * actually transfer the data to/from the host.  In serialize only
-	 * mode the transfer happens below CTL and ctl_datamove() is only
-	 * called on the machine that originally received the I/O.
-	 */
-	if ((control_softc->ha_mode == CTL_HA_MODE_XFER)
-	 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
-		union ctl_ha_msg msg;
-		uint32_t sg_entries_sent;
-		int do_sg_copy;
-		int i;
+	/* Don't confuse frontend with zero length data move. */
+	if (io->scsiio.kern_data_len == 0) {
+		io->scsiio.be_move_done(io);
+		return;
+	}
 
-		memset(&msg, 0, sizeof(msg));
-		msg.hdr.msg_type = CTL_MSG_DATAMOVE;
-		msg.hdr.original_sc = io->io_hdr.original_sc;
-		msg.hdr.serializing_sc = io;
-		msg.hdr.nexus = io->io_hdr.nexus;
-		msg.dt.flags = io->io_hdr.flags;
-		/*
-		 * We convert everything into a S/G list here.  We can't
-		 * pass by reference, only by value between controllers.
-		 * So we can't pass a pointer to the S/G list, only as many
-		 * S/G entries as we can fit in here.  If it's possible for
-		 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
-		 * then we need to break this up into multiple transfers.
-		 */
-		if (io->scsiio.kern_sg_entries == 0) {
-			msg.dt.kern_sg_entries = 1;
-			/*
-			 * If this is in cached memory, flush the cache
-			 * before we send the DMA request to the other
-			 * controller.  We want to do this in either the
-			 * read or the write case.  The read case is
-			 * straightforward.  In the write case, we want to
-			 * make sure nothing is in the local cache that
-			 * could overwrite the DMAed data.
-			 */
-			if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
-				/*
-				 * XXX KDM use bus_dmamap_sync() here.
-				 */
-			}
-
-			/*
-			 * Convert to a physical address if this is a
-			 * virtual address.
-			 */
-			if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
-				msg.dt.sg_list[0].addr =
-					io->scsiio.kern_data_ptr;
-			} else {
-				/*
-				 * XXX KDM use busdma here!
-				 */
-#if 0
-				msg.dt.sg_list[0].addr = (void *)
-					vtophys(io->scsiio.kern_data_ptr);
-#endif
-			}
-
-			msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
-			do_sg_copy = 0;
-		} else {
-			struct ctl_sg_entry *sgl;
-
-			do_sg_copy = 1;
-			msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
-			sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
-			if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
-				/*
-				 * XXX KDM use bus_dmamap_sync() here.
-				 */
-			}
-		}
-
-		msg.dt.kern_data_len = io->scsiio.kern_data_len;
-		msg.dt.kern_total_len = io->scsiio.kern_total_len;
-		msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
-		msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
-		msg.dt.sg_sequence = 0;
-
-		/*
-		 * Loop until we've sent all of the S/G entries.  On the
-		 * other end, we'll recompose these S/G entries into one
-		 * contiguous list before passing it to the
-		 */
-		for (sg_entries_sent = 0; sg_entries_sent <
-		     msg.dt.kern_sg_entries; msg.dt.sg_sequence++) {
-			msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/
-				sizeof(msg.dt.sg_list[0])),
-				msg.dt.kern_sg_entries - sg_entries_sent);
-
-			if (do_sg_copy != 0) {
-				struct ctl_sg_entry *sgl;
-				int j;
-
-				sgl = (struct ctl_sg_entry *)
-					io->scsiio.kern_data_ptr;
-				/*
-				 * If this is in cached memory, flush the cache
-				 * before we send the DMA request to the other
-				 * controller.  We want to do this in either
-				 * the * read or the write case.  The read
-				 * case is straightforward.  In the write
-				 * case, we want to make sure nothing is
-				 * in the local cache that could overwrite
-				 * the DMAed data.
-				 */
-
-				for (i = sg_entries_sent, j = 0;
-				     i < msg.dt.cur_sg_entries; i++, j++) {
-					if ((io->io_hdr.flags &
-					     CTL_FLAG_NO_DATASYNC) == 0) {
-						/*
-						 * XXX KDM use bus_dmamap_sync()
-						 */
-					}
-					if ((io->io_hdr.flags &
-					     CTL_FLAG_BUS_ADDR) == 0) {
-						/*
-						 * XXX KDM use busdma.
-						 */
-#if 0
-						msg.dt.sg_list[j].addr =(void *)
-						       vtophys(sgl[i].addr);
-#endif
-					} else {
-						msg.dt.sg_list[j].addr =
-							sgl[i].addr;
-					}
-					msg.dt.sg_list[j].len = sgl[i].len;
-				}
-			}
-
-			sg_entries_sent += msg.dt.cur_sg_entries;
-			if (sg_entries_sent >= msg.dt.kern_sg_entries)
-				msg.dt.sg_last = 1;
-			else
-				msg.dt.sg_last = 0;
-
-			/*
-			 * XXX KDM drop and reacquire the lock here?
-			 */
-			if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
-			    sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
-				/*
-				 * XXX do something here.
-				 */
-			}
-
-			msg.dt.sent_sg_entries = sg_entries_sent;
-		}
-		io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
-		if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
-			ctl_failover_io(io, /*have_lock*/ 1);
-
-	} else {
-
-		/*
-		 * Lookup the fe_datamove() function for this particular
-		 * front end.
-		 */
-		fe_datamove =
-		    control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
-		mtx_unlock(&control_softc->ctl_lock);
-
-		fe_datamove(io);
-	}
+	fe_datamove = CTL_PORT(io)->fe_datamove;
+	fe_datamove(io);
 }
 
 static void
@@ -11599,36 +12439,39 @@
 ctl_send_datamove_done(union ctl_io *io, int have_lock)
 {
 	union ctl_ha_msg msg;
-	int isc_status;
+#ifdef CTL_TIME_IO
+	struct bintime cur_bt;
+#endif
 
 	memset(&msg, 0, sizeof(msg));
-
 	msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
 	msg.hdr.original_sc = io;
 	msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
 	msg.hdr.nexus = io->io_hdr.nexus;
 	msg.hdr.status = io->io_hdr.status;
+	msg.scsi.kern_data_resid = io->scsiio.kern_data_resid;
 	msg.scsi.tag_num = io->scsiio.tag_num;
 	msg.scsi.tag_type = io->scsiio.tag_type;
 	msg.scsi.scsi_status = io->scsiio.scsi_status;
 	memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
-	       sizeof(io->scsiio.sense_data));
+	       io->scsiio.sense_len);
 	msg.scsi.sense_len = io->scsiio.sense_len;
-	msg.scsi.sense_residual = io->scsiio.sense_residual;
-	msg.scsi.fetd_status = io->io_hdr.port_status;
-	msg.scsi.residual = io->scsiio.residual;
+	msg.scsi.port_status = io->io_hdr.port_status;
 	io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
-
 	if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
 		ctl_failover_io(io, /*have_lock*/ have_lock);
 		return;
 	}
+	ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+	    sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
+	    msg.scsi.sense_len, M_WAITOK);
 
-	isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
-	if (isc_status > CTL_HA_STATUS_SUCCESS) {
-		/* XXX do something if this fails */
-	}
-
+#ifdef CTL_TIME_IO
+	getbinuptime(&cur_bt);
+	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
+	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
+#endif
+	io->io_hdr.num_dmas++;
 }
 
 /*
@@ -11639,6 +12482,7 @@
 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
 {
 	union ctl_io *io;
+	uint32_t i;
 
 	io = rq->context;
 
@@ -11652,15 +12496,13 @@
 
 	ctl_dt_req_free(rq);
 
+	for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+		free(io->io_hdr.local_sglist[i].addr, M_CTL);
+	free(io->io_hdr.remote_sglist, M_CTL);
+	io->io_hdr.remote_sglist = NULL;
+	io->io_hdr.local_sglist = NULL;
+
 	/*
-	 * In this case, we had to malloc the memory locally.  Free it.
-	 */
-	if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
-		int i;
-		for (i = 0; i < io->scsiio.kern_sg_entries; i++)
-			free(io->io_hdr.local_sglist[i].addr, M_CTL);
-	}
-	/*
 	 * The data is in local and remote memory, so now we need to send
 	 * status (good or back) back to the other side.
 	 */
@@ -11676,11 +12518,8 @@
 {
 	int retval;
 
-	retval = 0;
-
 	retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
 					  ctl_datamove_remote_write_cb);
-
 	return (retval);
 }
 
@@ -11709,12 +12548,8 @@
 	 */
 	io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
 
-	fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
-
+	fe_datamove = CTL_PORT(io)->fe_datamove;
 	fe_datamove(io);
-
-	return;
-
 }
 
 static int
@@ -11725,15 +12560,13 @@
 	char path_str[64];
 	struct sbuf sb;
 #endif
+	uint32_t i;
 
-	/*
-	 * In this case, we had to malloc the memory locally.  Free it.
-	 */
-	if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
-		int i;
-		for (i = 0; i < io->scsiio.kern_sg_entries; i++)
-			free(io->io_hdr.local_sglist[i].addr, M_CTL);
-	}
+	for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+		free(io->io_hdr.local_sglist[i].addr, M_CTL);
+	free(io->io_hdr.remote_sglist, M_CTL);
+	io->io_hdr.remote_sglist = NULL;
+	io->io_hdr.local_sglist = NULL;
 
 #if 0
 	scsi_path_string(io, path_str, sizeof(path_str));
@@ -11770,7 +12603,7 @@
 	io = rq->context;
 
 	if (rq->ret != CTL_HA_STATUS_SUCCESS) {
-		printf("%s: ISC DMA read failed with error %d", __func__,
+		printf("%s: ISC DMA read failed with error %d\n", __func__,
 		       rq->ret);
 		ctl_set_internal_failure(&io->scsiio,
 					 /*sks_valid*/ 1,
@@ -11790,8 +12623,7 @@
 
 	/* XXX KDM add checks like the ones in ctl_datamove? */
 
-	fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
-
+	fe_datamove = CTL_PORT(io)->fe_datamove;
 	fe_datamove(io);
 }
 
@@ -11798,137 +12630,43 @@
 static int
 ctl_datamove_remote_sgl_setup(union ctl_io *io)
 {
-	struct ctl_sg_entry *local_sglist, *remote_sglist;
-	struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
-	struct ctl_softc *softc;
+	struct ctl_sg_entry *local_sglist;
+	uint32_t len_to_go;
 	int retval;
 	int i;
 
 	retval = 0;
-	softc = control_softc;
-
 	local_sglist = io->io_hdr.local_sglist;
-	local_dma_sglist = io->io_hdr.local_dma_sglist;
-	remote_sglist = io->io_hdr.remote_sglist;
-	remote_dma_sglist = io->io_hdr.remote_dma_sglist;
+	len_to_go = io->scsiio.kern_data_len;
 
-	if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
-		for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
-			local_sglist[i].len = remote_sglist[i].len;
+	/*
+	 * The difficult thing here is that the size of the various
+	 * S/G segments may be different than the size from the
+	 * remote controller.  That'll make it harder when DMAing
+	 * the data back to the other side.
+	 */
+	for (i = 0; len_to_go > 0; i++) {
+		local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT);
+		local_sglist[i].addr =
+		    malloc(local_sglist[i].len, M_CTL, M_WAITOK);
 
-			/*
-			 * XXX Detect the situation where the RS-level I/O
-			 * redirector on the other side has already read the
-			 * data off of the AOR RS on this side, and
-			 * transferred it to remote (mirror) memory on the
-			 * other side.  Since we already have the data in
-			 * memory here, we just need to use it.
-			 *
-			 * XXX KDM this can probably be removed once we
-			 * get the cache device code in and take the
-			 * current AOR implementation out.
-			 */
-#ifdef NEEDTOPORT
-			if ((remote_sglist[i].addr >=
-			     (void *)vtophys(softc->mirr->addr))
-			 && (remote_sglist[i].addr <
-			     ((void *)vtophys(softc->mirr->addr) +
-			     CacheMirrorOffset))) {
-				local_sglist[i].addr = remote_sglist[i].addr -
-					CacheMirrorOffset;
-				if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
-				     CTL_FLAG_DATA_IN)
-					io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
-			} else {
-				local_sglist[i].addr = remote_sglist[i].addr +
-					CacheMirrorOffset;
-			}
-#endif
-#if 0
-			printf("%s: local %p, remote %p, len %d\n",
-			       __func__, local_sglist[i].addr,
-			       remote_sglist[i].addr, local_sglist[i].len);
-#endif
-		}
-	} else {
-		uint32_t len_to_go;
+		len_to_go -= local_sglist[i].len;
+	}
+	/*
+	 * Reset the number of S/G entries accordingly.  The original
+	 * number of S/G entries is available in rem_sg_entries.
+	 */
+	io->scsiio.kern_sg_entries = i;
 
-		/*
-		 * In this case, we don't have automatically allocated
-		 * memory for this I/O on this controller.  This typically
-		 * happens with internal CTL I/O -- e.g. inquiry, mode
-		 * sense, etc.  Anything coming from RAIDCore will have
-		 * a mirror area available.
-		 */
-		len_to_go = io->scsiio.kern_data_len;
-
-		/*
-		 * Clear the no datasync flag, we have to use malloced
-		 * buffers.
-		 */
-		io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
-
-		/*
-		 * The difficult thing here is that the size of the various
-		 * S/G segments may be different than the size from the
-		 * remote controller.  That'll make it harder when DMAing
-		 * the data back to the other side.
-		 */
-		for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
-		     sizeof(io->io_hdr.remote_sglist[0])) &&
-		     (len_to_go > 0); i++) {
-			local_sglist[i].len = ctl_min(len_to_go, 131072);
-			CTL_SIZE_8B(local_dma_sglist[i].len,
-				    local_sglist[i].len);
-			local_sglist[i].addr =
-				malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
-
-			local_dma_sglist[i].addr = local_sglist[i].addr;
-
-			if (local_sglist[i].addr == NULL) {
-				int j;
-
-				printf("malloc failed for %zd bytes!",
-				       local_dma_sglist[i].len);
-				for (j = 0; j < i; j++) {
-					free(local_sglist[j].addr, M_CTL);
-				}
-				ctl_set_internal_failure(&io->scsiio,
-							 /*sks_valid*/ 1,
-							 /*retry_count*/ 4857);
-				retval = 1;
-				goto bailout_error;
-				
-			}
-			/* XXX KDM do we need a sync here? */
-
-			len_to_go -= local_sglist[i].len;
-		}
-		/*
-		 * Reset the number of S/G entries accordingly.  The
-		 * original number of S/G entries is available in
-		 * rem_sg_entries.
-		 */
-		io->scsiio.kern_sg_entries = i;
-
 #if 0
-		printf("%s: kern_sg_entries = %d\n", __func__,
-		       io->scsiio.kern_sg_entries);
-		for (i = 0; i < io->scsiio.kern_sg_entries; i++)
-			printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
-			       local_sglist[i].addr, local_sglist[i].len,
-			       local_dma_sglist[i].len);
+	printf("%s: kern_sg_entries = %d\n", __func__,
+	       io->scsiio.kern_sg_entries);
+	for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+		printf("%s: sg[%d] = %p, %lu\n", __func__, i,
+		       local_sglist[i].addr, local_sglist[i].len);
 #endif
-	}
 
-
 	return (retval);
-
-bailout_error:
-
-	ctl_send_datamove_done(io, /*have_lock*/ 0);
-
-	return (retval);
 }
 
 static int
@@ -11937,13 +12675,9 @@
 {
 	struct ctl_ha_dt_req *rq;
 	struct ctl_sg_entry *remote_sglist, *local_sglist;
-	struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
 	uint32_t local_used, remote_used, total_used;
-	int retval;
-	int i, j;
+	int i, j, isc_ret;
 
-	retval = 0;
-
 	rq = ctl_dt_req_alloc();
 
 	/*
@@ -11952,10 +12686,12 @@
 	 * failure.
 	 */
 	if ((rq == NULL)
-	 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE))
+	 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
+	     (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS))
 		ctl_set_busy(&io->scsiio);
 
-	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
+	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
+	    (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
 
 		if (rq != NULL)
 			ctl_dt_req_free(rq);
@@ -11968,26 +12704,15 @@
 
 		ctl_send_datamove_done(io, /*have_lock*/ 0);
 
-		retval = 1;
-
-		goto bailout;
+		return (1);
 	}
 
 	local_sglist = io->io_hdr.local_sglist;
-	local_dma_sglist = io->io_hdr.local_dma_sglist;
 	remote_sglist = io->io_hdr.remote_sglist;
-	remote_dma_sglist = io->io_hdr.remote_dma_sglist;
 	local_used = 0;
 	remote_used = 0;
 	total_used = 0;
 
-	if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
-		rq->ret = CTL_HA_STATUS_SUCCESS;
-		rq->context = io;
-		callback(rq);
-		goto bailout;
-	}
-
 	/*
 	 * Pull/push the data over the wire from/to the other controller.
 	 * This takes into account the possibility that the local and
@@ -11998,12 +12723,11 @@
 	 * both the local and remote sglists is identical.  Otherwise, we've
 	 * essentially got a coding error of some sort.
 	 */
+	isc_ret = CTL_HA_STATUS_SUCCESS;
 	for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
-		int isc_ret;
-		uint32_t cur_len, dma_length;
+		uint32_t cur_len;
 		uint8_t *tmp_ptr;
 
-		rq->id = CTL_HA_DATA_CTL;
 		rq->command = command;
 		rq->context = io;
 
@@ -12013,54 +12737,25 @@
 		 * also have enough slack left over at the end, though,
 		 * to round up to the next 8 byte boundary.
 		 */
-		cur_len = ctl_min(local_sglist[i].len - local_used,
-				  remote_sglist[j].len - remote_used);
+		cur_len = MIN(local_sglist[i].len - local_used,
+			      remote_sglist[j].len - remote_used);
+		rq->size = cur_len;
 
-		/*
-		 * In this case, we have a size issue and need to decrease
-		 * the size, except in the case where we actually have less
-		 * than 8 bytes left.  In that case, we need to increase
-		 * the DMA length to get the last bit.
-		 */
-		if ((cur_len & 0x7) != 0) {
-			if (cur_len > 0x7) {
-				cur_len = cur_len - (cur_len & 0x7);
-				dma_length = cur_len;
-			} else {
-				CTL_SIZE_8B(dma_length, cur_len);
-			}
-
-		} else
-			dma_length = cur_len;
-
-		/*
-		 * If we had to allocate memory for this I/O, instead of using
-		 * the non-cached mirror memory, we'll need to flush the cache
-		 * before trying to DMA to the other controller.
-		 *
-		 * We could end up doing this multiple times for the same
-		 * segment if we have a larger local segment than remote
-		 * segment.  That shouldn't be an issue.
-		 */
-		if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
-			/*
-			 * XXX KDM use bus_dmamap_sync() here.
-			 */
-		}
-
-		rq->size = dma_length;
-
 		tmp_ptr = (uint8_t *)local_sglist[i].addr;
 		tmp_ptr += local_used;
 
+#if 0
 		/* Use physical addresses when talking to ISC hardware */
 		if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
 			/* XXX KDM use busdma */
-#if 0
 			rq->local = vtophys(tmp_ptr);
-#endif
 		} else
 			rq->local = tmp_ptr;
+#else
+		KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
+		    ("HA does not support BUS_ADDR"));
+		rq->local = tmp_ptr;
+#endif
 
 		tmp_ptr = (uint8_t *)remote_sglist[j].addr;
 		tmp_ptr += remote_used;
@@ -12084,40 +12779,22 @@
 		if (total_used >= io->scsiio.kern_data_len)
 			rq->callback = callback;
 
-		if ((rq->size & 0x7) != 0) {
-			printf("%s: warning: size %d is not on 8b boundary\n",
-			       __func__, rq->size);
-		}
-		if (((uintptr_t)rq->local & 0x7) != 0) {
-			printf("%s: warning: local %p not on 8b boundary\n",
-			       __func__, rq->local);
-		}
-		if (((uintptr_t)rq->remote & 0x7) != 0) {
-			printf("%s: warning: remote %p not on 8b boundary\n",
-			       __func__, rq->local);
-		}
 #if 0
-		printf("%s: %s: local %#x remote %#x size %d\n", __func__,
+		printf("%s: %s: local %p remote %p size %d\n", __func__,
 		       (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
 		       rq->local, rq->remote, rq->size);
 #endif
 
 		isc_ret = ctl_dt_single(rq);
-		if (isc_ret == CTL_HA_STATUS_WAIT)
-			continue;
-
-		if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
-			rq->ret = CTL_HA_STATUS_SUCCESS;
-		} else {
-			rq->ret = isc_ret;
-		}
+		if (isc_ret > CTL_HA_STATUS_SUCCESS)
+			break;
+	}
+	if (isc_ret != CTL_HA_STATUS_WAIT) {
+		rq->ret = isc_ret;
 		callback(rq);
-		goto bailout;
 	}
 
-bailout:
-	return (retval);
-
+	return (0);
 }
 
 static void
@@ -12124,7 +12801,7 @@
 ctl_datamove_remote_read(union ctl_io *io)
 {
 	int retval;
-	int i;
+	uint32_t i;
 
 	/*
 	 * This will send an error to the other controller in the case of a
@@ -12136,8 +12813,7 @@
 
 	retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
 					  ctl_datamove_remote_read_cb);
-	if ((retval != 0)
-	 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
+	if (retval != 0) {
 		/*
 		 * Make sure we free memory if there was an error..  The
 		 * ctl_datamove_remote_xfer() function will send the
@@ -12146,9 +12822,10 @@
 		 */
 		for (i = 0; i < io->scsiio.kern_sg_entries; i++)
 			free(io->io_hdr.local_sglist[i].addr, M_CTL);
+		free(io->io_hdr.remote_sglist, M_CTL);
+		io->io_hdr.remote_sglist = NULL;
+		io->io_hdr.local_sglist = NULL;
 	}
-
-	return;
 }
 
 /*
@@ -12157,111 +12834,56 @@
  * first.  Once that is complete, the data gets DMAed into the remote
  * controller's memory.  For reads, we DMA from the remote controller's
  * memory into our memory first, and then move it out to the FETD.
- *
- * Should be called without the ctl_lock held.
  */
 static void
 ctl_datamove_remote(union ctl_io *io)
 {
-	struct ctl_softc *softc;
 
-	softc = control_softc;
+	mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
 
+	if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
+		ctl_failover_io(io, /*have_lock*/ 0);
+		return;
+	}
+
 	/*
 	 * Note that we look for an aborted I/O here, but don't do some of
-	 * the other checks that ctl_datamove() normally does.  We don't
-	 * need to run the task queue, because this I/O is on the ISC
-	 * queue, which is executed by the work thread after the task queue.
+	 * the other checks that ctl_datamove() normally does.
 	 * We don't need to run the datamove delay code, since that should
 	 * have been done if need be on the other controller.
 	 */
-	mtx_lock(&softc->ctl_lock);
-
 	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
-
-		printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__,
-		       io->scsiio.tag_num, io->io_hdr.nexus.initid.id,
+		printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__,
+		       io->scsiio.tag_num, io->io_hdr.nexus.initid,
 		       io->io_hdr.nexus.targ_port,
-		       io->io_hdr.nexus.targ_target.id,
 		       io->io_hdr.nexus.targ_lun);
-		io->io_hdr.status = CTL_CMD_ABORTED;
 		io->io_hdr.port_status = 31338;
-
-		mtx_unlock(&softc->ctl_lock);
-
 		ctl_send_datamove_done(io, /*have_lock*/ 0);
-
 		return;
 	}
 
-	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
-		mtx_unlock(&softc->ctl_lock);
+	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
 		ctl_datamove_remote_write(io);
-	} else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
-		mtx_unlock(&softc->ctl_lock);
+	else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
 		ctl_datamove_remote_read(io);
-	} else {
-		union ctl_ha_msg msg;
-		struct scsi_sense_data *sense;
-		uint8_t sks[3];
-		int retry_count;
-
-		memset(&msg, 0, sizeof(msg));
-
-		msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
-		msg.hdr.status = CTL_SCSI_ERROR;
-		msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
-
-		retry_count = 4243;
-
-		sense = &msg.scsi.sense_data;
-		sks[0] = SSD_SCS_VALID;
-		sks[1] = (retry_count >> 8) & 0xff;
-		sks[2] = retry_count & 0xff;
-
-		/* "Internal target failure" */
-		scsi_set_sense_data(sense,
-				    /*sense_format*/ SSD_TYPE_NONE,
-				    /*current_error*/ 1,
-				    /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
-				    /*asc*/ 0x44,
-				    /*ascq*/ 0x00,
-				    /*type*/ SSD_ELEM_SKS,
-				    /*size*/ sizeof(sks),
-				    /*data*/ sks,
-				    SSD_ELEM_NONE);
-
-		io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
-		if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
-			ctl_failover_io(io, /*have_lock*/ 1);
-			mtx_unlock(&softc->ctl_lock);
-			return;
-		}
-
-		mtx_unlock(&softc->ctl_lock);
-
-		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
-		    CTL_HA_STATUS_SUCCESS) {
-			/* XXX KDM what to do if this fails? */
-		}
-		return;
+	else {
+		io->io_hdr.port_status = 31339;
+		ctl_send_datamove_done(io, /*have_lock*/ 0);
 	}
-	
 }
 
-static int
-ctl_process_done(union ctl_io *io, int have_lock)
+static void
+ctl_process_done(union ctl_io *io)
 {
-	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_port *port = CTL_PORT(io);
+	struct ctl_lun *lun = CTL_LUN(io);
 	void (*fe_done)(union ctl_io *io);
-	uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
+	union ctl_ha_msg msg;
 
 	CTL_DEBUG_PRINT(("ctl_process_done\n"));
+	fe_done = port->fe_done;
 
-	fe_done =
-	    control_softc->ctl_ports[targ_port]->fe_done;
-
 #ifdef CTL_TIME_IO
 	if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
 		char str[256];
@@ -12286,9 +12908,8 @@
 				    io->taskio.tag_num, io->taskio.tag_type);
 			break;
 		default:
-			printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
-			panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
-			break;
+			panic("%s: Invalid CTL I/O type %d\n",
+			    __func__, io->io_hdr.io_type);
 		}
 		sbuf_cat(&sb, path_str);
 		sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
@@ -12302,42 +12923,58 @@
 	case CTL_IO_SCSI:
 		break;
 	case CTL_IO_TASK:
-		ctl_io_error_print(io, NULL);
-		if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
-			ctl_free_io_internal(io, /*have_lock*/ 0);
-		else
-			fe_done(io);
-		return (CTL_RETVAL_COMPLETE);
-		break;
+		if (ctl_debug & CTL_DEBUG_INFO)
+			ctl_io_error_print(io, NULL);
+		fe_done(io);
+		return;
 	default:
-		printf("ctl_process_done: invalid io type %d\n",
-		       io->io_hdr.io_type);
-		panic("ctl_process_done: invalid io type %d\n",
-		      io->io_hdr.io_type);
-		break; /* NOTREACHED */
+		panic("%s: Invalid CTL I/O type %d\n",
+		    __func__, io->io_hdr.io_type);
 	}
 
-	lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
 	if (lun == NULL) {
 		CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
-				 io->io_hdr.nexus.targ_lun));
-		fe_done(io);
+				 io->io_hdr.nexus.targ_mapped_lun));
 		goto bailout;
 	}
-	ctl_softc = lun->ctl_softc;
 
+	mtx_lock(&lun->lun_lock);
+
 	/*
-	 * Remove this from the OOA queue.
+	 * Check to see if we have any informational exception and status
+	 * of this command can be modified to report it in form of either
+	 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field.
 	 */
-	if (have_lock == 0)
-		mtx_lock(&ctl_softc->ctl_lock);
+	if (lun->ie_reported == 0 && lun->ie_asc != 0 &&
+	    io->io_hdr.status == CTL_SUCCESS &&
+	    (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) {
+		uint8_t mrie = lun->MODE_IE.mrie;
+		uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) ||
+		    (lun->MODE_VER.byte3 & SMS_VER_PER));
+		if (((mrie == SIEP_MRIE_REC_COND && per) ||
+		     mrie == SIEP_MRIE_REC_UNCOND ||
+		     mrie == SIEP_MRIE_NO_SENSE) &&
+		    (ctl_get_cmd_entry(&io->scsiio, NULL)->flags &
+		     CTL_CMD_FLAG_NO_SENSE) == 0) {
+			ctl_set_sense(&io->scsiio,
+			      /*current_error*/ 1,
+			      /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ?
+			        SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR,
+			      /*asc*/ lun->ie_asc,
+			      /*ascq*/ lun->ie_ascq,
+			      SSD_ELEM_NONE);
+			lun->ie_reported = 1;
+		}
+	} else if (lun->ie_reported < 0)
+		lun->ie_reported = 0;
 
 	/*
 	 * Check to see if we have any errors to inject here.  We only
 	 * inject errors for commands that don't already have errors set.
 	 */
-	if ((STAILQ_FIRST(&lun->error_list) != NULL)
-	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))
+	if (!STAILQ_EMPTY(&lun->error_list) &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) &&
+	    ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0))
 		ctl_inject_error(lun, io);
 
 	/*
@@ -12346,135 +12983,66 @@
 	 *
 	 * XXX KDM should we also track I/O latency?
 	 */
-	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
-		uint32_t blocksize;
+	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
+	    io->io_hdr.io_type == CTL_IO_SCSI) {
+		int type;
 #ifdef CTL_TIME_IO
-		struct bintime cur_bt;
+		struct bintime bt;
+
+		getbinuptime(&bt);
+		bintime_sub(&bt, &io->io_hdr.start_bt);
 #endif
-
-		if ((lun->be_lun != NULL)
-		 && (lun->be_lun->blocksize != 0))
-			blocksize = lun->be_lun->blocksize;
+		if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+		    CTL_FLAG_DATA_IN)
+			type = CTL_STATS_READ;
+		else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+		    CTL_FLAG_DATA_OUT)
+			type = CTL_STATS_WRITE;
 		else
-			blocksize = 512;
+			type = CTL_STATS_NO_IO;
 
-		switch (io->io_hdr.io_type) {
-		case CTL_IO_SCSI: {
-			int isread;
-			struct ctl_lba_len lbalen;
-
-			isread = 0;
-			switch (io->scsiio.cdb[0]) {
-			case READ_6:
-			case READ_10:
-			case READ_12:
-			case READ_16:
-				isread = 1;
-				/* FALLTHROUGH */
-			case WRITE_6:
-			case WRITE_10:
-			case WRITE_12:
-			case WRITE_16:
-			case WRITE_VERIFY_10:
-			case WRITE_VERIFY_12:
-			case WRITE_VERIFY_16:
-				memcpy(&lbalen, io->io_hdr.ctl_private[
-				       CTL_PRIV_LBA_LEN].bytes, sizeof(lbalen));
-
-				if (isread) {
-					lun->stats.ports[targ_port].bytes[CTL_STATS_READ] +=
-						lbalen.len * blocksize;
-					lun->stats.ports[targ_port].operations[CTL_STATS_READ]++;
-
+#ifdef CTL_LEGACY_STATS
+		uint32_t targ_port = port->targ_port;
+		lun->legacy_stats.ports[targ_port].bytes[type] +=
+		    io->scsiio.kern_total_len;
+		lun->legacy_stats.ports[targ_port].operations[type] ++;
+		lun->legacy_stats.ports[targ_port].num_dmas[type] +=
+		    io->io_hdr.num_dmas;
 #ifdef CTL_TIME_IO
-					bintime_add(
-					   &lun->stats.ports[targ_port].dma_time[CTL_STATS_READ],
-					   &io->io_hdr.dma_bt);
-					lun->stats.ports[targ_port].num_dmas[CTL_STATS_READ] +=
-						io->io_hdr.num_dmas;
-					getbintime(&cur_bt);
-					bintime_sub(&cur_bt,
-						    &io->io_hdr.start_bt);
-
-					bintime_add(
-					    &lun->stats.ports[targ_port].time[CTL_STATS_READ],
-					    &cur_bt);
-
-#if 0
-					cs_prof_gettime(&cur_ticks);
-					lun->stats.time[CTL_STATS_READ] +=
-						cur_ticks -
-						io->io_hdr.start_ticks;
+		bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type],
+		   &io->io_hdr.dma_bt);
+		bintime_add(&lun->legacy_stats.ports[targ_port].time[type],
+		    &bt);
 #endif
-#if 0
-					lun->stats.time[CTL_STATS_READ] +=
-						jiffies - io->io_hdr.start_time;
-#endif
-#endif /* CTL_TIME_IO */
-				} else {
-					lun->stats.ports[targ_port].bytes[CTL_STATS_WRITE] +=
-						lbalen.len * blocksize;
-					lun->stats.ports[targ_port].operations[
-						CTL_STATS_WRITE]++;
+#endif /* CTL_LEGACY_STATS */
 
+		lun->stats.bytes[type] += io->scsiio.kern_total_len;
+		lun->stats.operations[type] ++;
+		lun->stats.dmas[type] += io->io_hdr.num_dmas;
 #ifdef CTL_TIME_IO
-					bintime_add(
-					  &lun->stats.ports[targ_port].dma_time[CTL_STATS_WRITE],
-					  &io->io_hdr.dma_bt);
-					lun->stats.ports[targ_port].num_dmas[CTL_STATS_WRITE] +=
-						io->io_hdr.num_dmas;
-					getbintime(&cur_bt);
-					bintime_sub(&cur_bt,
-						    &io->io_hdr.start_bt);
-
-					bintime_add(
-					    &lun->stats.ports[targ_port].time[CTL_STATS_WRITE],
-					    &cur_bt);
-#if 0
-					cs_prof_gettime(&cur_ticks);
-					lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
-						cur_ticks -
-						io->io_hdr.start_ticks;
-					lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
-						jiffies - io->io_hdr.start_time;
+		bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt);
+		bintime_add(&lun->stats.time[type], &bt);
 #endif
-#endif /* CTL_TIME_IO */
-				}
-				break;
-			default:
-				lun->stats.ports[targ_port].operations[CTL_STATS_NO_IO]++;
 
+		mtx_lock(&port->port_lock);
+		port->stats.bytes[type] += io->scsiio.kern_total_len;
+		port->stats.operations[type] ++;
+		port->stats.dmas[type] += io->io_hdr.num_dmas;
 #ifdef CTL_TIME_IO
-				bintime_add(
-				  &lun->stats.ports[targ_port].dma_time[CTL_STATS_NO_IO],
-				  &io->io_hdr.dma_bt);
-				lun->stats.ports[targ_port].num_dmas[CTL_STATS_NO_IO] +=
-					io->io_hdr.num_dmas;
-				getbintime(&cur_bt);
-				bintime_sub(&cur_bt, &io->io_hdr.start_bt);
-
-				bintime_add(&lun->stats.ports[targ_port].time[CTL_STATS_NO_IO],
-					    &cur_bt);
-
-#if 0
-				cs_prof_gettime(&cur_ticks);
-				lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
-					cur_ticks -
-					io->io_hdr.start_ticks;
-				lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
-					jiffies - io->io_hdr.start_time;
+		bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt);
+		bintime_add(&port->stats.time[type], &bt);
 #endif
-#endif /* CTL_TIME_IO */
-				break;
-			}
-			break;
-		}
-		default:
-			break;
-		}
+		mtx_unlock(&port->port_lock);
 	}
 
+	/*
+	 * Remove this from the OOA queue.
+	 */
 	TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
+#ifdef CTL_TIME_IO
+	if (TAILQ_EMPTY(&lun->ooa_queue))
+		lun->last_busy = getsbinuptime();
+#endif
 
 	/*
 	 * Run through the blocked queue on this LUN and see if anything
@@ -12487,9 +13055,16 @@
 	 * left on its OOA queue.
 	 */
 	if ((lun->flags & CTL_LUN_INVALID)
-	 && (TAILQ_FIRST(&lun->ooa_queue) == NULL))
+	 && TAILQ_EMPTY(&lun->ooa_queue)) {
+		mtx_unlock(&lun->lun_lock);
+		mtx_lock(&softc->ctl_lock);
 		ctl_free_lun(lun);
+		mtx_unlock(&softc->ctl_lock);
+	} else
+		mtx_unlock(&lun->lun_lock);
 
+bailout:
+
 	/*
 	 * If this command has been aborted, make sure we set the status
 	 * properly.  The FETD is responsible for freeing the I/O and doing
@@ -12496,140 +13071,32 @@
 	 * whatever it needs to do to clean up its state.
 	 */
 	if (io->io_hdr.flags & CTL_FLAG_ABORT)
-		io->io_hdr.status = CTL_CMD_ABORTED;
+		ctl_set_task_aborted(&io->scsiio);
 
 	/*
-	 * We print out status for every task management command.  For SCSI
-	 * commands, we filter out any unit attention errors; they happen
-	 * on every boot, and would clutter up the log.  Note:  task
-	 * management commands aren't printed here, they are printed above,
-	 * since they should never even make it down here.
+	 * If enabled, print command error status.
 	 */
-	switch (io->io_hdr.io_type) {
-	case CTL_IO_SCSI: {
-		int error_code, sense_key, asc, ascq;
-
-		sense_key = 0;
-
-		if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR)
-		 && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) {
-			/*
-			 * Since this is just for printing, no need to
-			 * show errors here.
-			 */
-			scsi_extract_sense_len(&io->scsiio.sense_data,
-					       io->scsiio.sense_len,
-					       &error_code,
-					       &sense_key,
-					       &asc,
-					       &ascq,
-					       /*show_errors*/ 0);
-		}
-
-		if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
-		 && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR)
-		  || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND)
-		  || (sense_key != SSD_KEY_UNIT_ATTENTION))) {
-
-			if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){
-				ctl_softc->skipped_prints++;
-				if (have_lock == 0)
-					mtx_unlock(&ctl_softc->ctl_lock);
-			} else {
-				uint32_t skipped_prints;
-
-				skipped_prints = ctl_softc->skipped_prints;
-
-				ctl_softc->skipped_prints = 0;
-				ctl_softc->last_print_jiffies = time_uptime;
-
-				if (have_lock == 0)
-					mtx_unlock(&ctl_softc->ctl_lock);
-				if (skipped_prints > 0) {
-#ifdef NEEDTOPORT
-					csevent_log(CSC_CTL | CSC_SHELF_SW |
-					    CTL_ERROR_REPORT,
-					    csevent_LogType_Trace,
-					    csevent_Severity_Information,
-					    csevent_AlertLevel_Green,
-					    csevent_FRU_Firmware,
-					    csevent_FRU_Unknown,
-					    "High CTL error volume, %d prints "
-					    "skipped", skipped_prints);
-#endif
-				}
-				ctl_io_error_print(io, NULL);
-			}
-		} else {
-			if (have_lock == 0)
-				mtx_unlock(&ctl_softc->ctl_lock);
-		}
-		break;
-	}
-	case CTL_IO_TASK:
-		if (have_lock == 0)
-			mtx_unlock(&ctl_softc->ctl_lock);
+	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS &&
+	    (ctl_debug & CTL_DEBUG_INFO) != 0)
 		ctl_io_error_print(io, NULL);
-		break;
-	default:
-		if (have_lock == 0)
-			mtx_unlock(&ctl_softc->ctl_lock);
-		break;
-	}
 
 	/*
 	 * Tell the FETD or the other shelf controller we're done with this
 	 * command.  Note that only SCSI commands get to this point.  Task
 	 * management commands are completed above.
-	 *
-	 * We only send status to the other controller if we're in XFER
-	 * mode.  In SER_ONLY mode, the I/O is done on the controller that
-	 * received the I/O (from CTL's perspective), and so the status is
-	 * generated there.
-	 * 
-	 * XXX KDM if we hold the lock here, we could cause a deadlock
-	 * if the frontend comes back in in this context to queue
-	 * something.
 	 */
-	if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER)
-	 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
-		union ctl_ha_msg msg;
-
+	if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
+	    (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) {
 		memset(&msg, 0, sizeof(msg));
 		msg.hdr.msg_type = CTL_MSG_FINISH_IO;
-		msg.hdr.original_sc = io->io_hdr.original_sc;
+		msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
 		msg.hdr.nexus = io->io_hdr.nexus;
-		msg.hdr.status = io->io_hdr.status;
-		msg.scsi.scsi_status = io->scsiio.scsi_status;
-		msg.scsi.tag_num = io->scsiio.tag_num;
-		msg.scsi.tag_type = io->scsiio.tag_type;
-		msg.scsi.sense_len = io->scsiio.sense_len;
-		msg.scsi.sense_residual = io->scsiio.sense_residual;
-		msg.scsi.residual = io->scsiio.residual;
-		memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
-		       sizeof(io->scsiio.sense_data));
-		/*
-		 * We copy this whether or not this is an I/O-related
-		 * command.  Otherwise, we'd have to go and check to see
-		 * whether it's a read/write command, and it really isn't
-		 * worth it.
-		 */
-		memcpy(&msg.scsi.lbalen,
-		       &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
-		       sizeof(msg.scsi.lbalen));
+		ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+		    sizeof(msg.scsi) - sizeof(msg.scsi.sense_data),
+		    M_WAITOK);
+	}
 
-		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
-				sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
-			/* XXX do something here */
-		}
-
-		ctl_free_io_internal(io, /*have_lock*/ 0);
-	} else 
-		fe_done(io);
-
-bailout:
-
-	return (CTL_RETVAL_COMPLETE);
+	fe_done(io);
 }
 
 /*
@@ -12639,14 +13106,16 @@
 int
 ctl_queue_sense(union ctl_io *io)
 {
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_port *port = CTL_PORT(io);
 	struct ctl_lun *lun;
-	struct ctl_softc *ctl_softc;
-	uint32_t initidx;
+	struct scsi_sense_data *ps;
+	uint32_t initidx, p, targ_lun;
 
-	ctl_softc = control_softc;
-
 	CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
 
+	targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
+
 	/*
 	 * LUN lookup will likely move to the ctl_work_thread() once we
 	 * have our new queueing infrastructure (that doesn't put things on
@@ -12653,37 +13122,32 @@
 	 * a per-LUN queue initially).  That is so that we can handle
 	 * things like an INQUIRY to a LUN that we don't have enabled.  We
 	 * can't deal with that right now.
+	 * If we don't have a LUN for this, just toss the sense information.
 	 */
-	mtx_lock(&ctl_softc->ctl_lock);
-
-	/*
-	 * If we don't have a LUN for this, just toss the sense
-	 * information.
-	 */
-	if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
-	 && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL))
-		lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
-	else
+	mtx_lock(&softc->ctl_lock);
+	if (targ_lun >= CTL_MAX_LUNS ||
+	    (lun = softc->ctl_luns[targ_lun]) == NULL) {
+		mtx_unlock(&softc->ctl_lock);
 		goto bailout;
+	}
+	mtx_lock(&lun->lun_lock);
+	mtx_unlock(&softc->ctl_lock);
 
 	initidx = ctl_get_initindex(&io->io_hdr.nexus);
+	p = initidx / CTL_MAX_INIT_PER_PORT;
+	if (lun->pending_sense[p] == NULL) {
+		lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT,
+		    M_CTL, M_NOWAIT | M_ZERO);
+	}
+	if ((ps = lun->pending_sense[p]) != NULL) {
+		ps += initidx % CTL_MAX_INIT_PER_PORT;
+		memset(ps, 0, sizeof(*ps));
+		memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len);
+	}
+	mtx_unlock(&lun->lun_lock);
 
-	/*
-	 * Already have CA set for this LUN...toss the sense information.
-	 */
-	if (ctl_is_set(lun->have_ca, initidx))
-		goto bailout;
-
-	memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data,
-	       ctl_min(sizeof(lun->pending_sense[initidx].sense),
-	       sizeof(io->scsiio.sense_data)));
-	ctl_set_mask(lun->have_ca, initidx);
-
 bailout:
-	mtx_unlock(&ctl_softc->ctl_lock);
-
 	ctl_free_io(io);
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
@@ -12694,54 +13158,31 @@
 int
 ctl_queue(union ctl_io *io)
 {
-	struct ctl_softc *ctl_softc;
+	struct ctl_port *port = CTL_PORT(io);
 
 	CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
 
-	ctl_softc = control_softc;
-
 #ifdef CTL_TIME_IO
 	io->io_hdr.start_time = time_uptime;
-	getbintime(&io->io_hdr.start_bt);
+	getbinuptime(&io->io_hdr.start_bt);
 #endif /* CTL_TIME_IO */
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	/* Map FE-specific LUN ID into global one. */
+	io->io_hdr.nexus.targ_mapped_lun =
+	    ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
 
 	switch (io->io_hdr.io_type) {
 	case CTL_IO_SCSI:
-		STAILQ_INSERT_TAIL(&ctl_softc->incoming_queue, &io->io_hdr,
-				   links);
-		break;
 	case CTL_IO_TASK:
-		STAILQ_INSERT_TAIL(&ctl_softc->task_queue, &io->io_hdr, links);
-		/*
-		 * Set the task pending flag.  This is necessary to close a
-		 * race condition with the FETD:
-		 *
-		 * - FETD submits a task management command, like an abort.
-		 * - Back end calls fe_datamove() to move the data for the
-		 *   aborted command.  The FETD can't really accept it, but
-		 *   if it did, it would end up transmitting data for a
-		 *   command that the initiator told us to abort.
-		 *
-		 * We close the race condition by setting the flag here,
-		 * and checking it in ctl_datamove(), before calling the
-		 * FETD's fe_datamove routine.  If we've got a task
-		 * pending, we run the task queue and then check to see
-		 * whether our particular I/O has been aborted.
-		 */
-		ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
+		if (ctl_debug & CTL_DEBUG_CDB)
+			ctl_io_print(io);
+		ctl_enqueue_incoming(io);
 		break;
 	default:
-		mtx_unlock(&ctl_softc->ctl_lock);
 		printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
-		return (-EINVAL);
-		break; /* NOTREACHED */
+		return (EINVAL);
 	}
-	mtx_unlock(&ctl_softc->ctl_lock);
 
-	ctl_wakeup_thread();
-
 	return (CTL_RETVAL_COMPLETE);
 }
 
@@ -12752,22 +13193,27 @@
 	union ctl_io *io;
 
 	io = (union ctl_io *)arg;
-	ctl_done_lock(io, /*have_lock*/ 0);
+	ctl_done(io);
 }
 #endif /* CTL_IO_DELAY */
 
 void
-ctl_done_lock(union ctl_io *io, int have_lock)
+ctl_serseq_done(union ctl_io *io)
 {
-	struct ctl_softc *ctl_softc;
-#ifndef CTL_DONE_THREAD
-	union ctl_io *xio;
-#endif /* !CTL_DONE_THREAD */
+	struct ctl_lun *lun = CTL_LUN(io);;
 
-	ctl_softc = control_softc;
+	if (lun->be_lun == NULL ||
+	    lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
+		return;
+	mtx_lock(&lun->lun_lock);
+	io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
+	ctl_check_blocked(lun);
+	mtx_unlock(&lun->lun_lock);
+}
 
-	if (have_lock == 0)
-		mtx_lock(&ctl_softc->ctl_lock);
+void
+ctl_done(union ctl_io *io)
+{
 
 	/*
 	 * Enable this to catch duplicate completion issues.
@@ -12775,15 +13221,14 @@
 #if 0
 	if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
 		printf("%s: type %d msg %d cdb %x iptl: "
-		       "%d:%d:%d:%d tag 0x%04x "
+		       "%u:%u:%u tag 0x%04x "
 		       "flag %#x status %x\n",
 			__func__,
 			io->io_hdr.io_type,
 			io->io_hdr.msg_type,
 			io->scsiio.cdb[0],
-			io->io_hdr.nexus.initid.id,
+			io->io_hdr.nexus.initid,
 			io->io_hdr.nexus.targ_port,
-			io->io_hdr.nexus.targ_target.id,
 			io->io_hdr.nexus.targ_lun,
 			(io->io_hdr.io_type ==
 			CTL_IO_TASK) ?
@@ -12799,128 +13244,46 @@
 	 * This is an internal copy of an I/O, and should not go through
 	 * the normal done processing logic.
 	 */
-	if (io->io_hdr.flags & CTL_FLAG_INT_COPY) {
-		if (have_lock == 0)
-			mtx_unlock(&ctl_softc->ctl_lock);
+	if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
 		return;
-	}
 
-	/*
-	 * We need to send a msg to the serializing shelf to finish the IO
-	 * as well.  We don't send a finish message to the other shelf if
-	 * this is a task management command.  Task management commands
-	 * aren't serialized in the OOA queue, but rather just executed on
-	 * both shelf controllers for commands that originated on that
-	 * controller.
-	 */
-	if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
-	 && (io->io_hdr.io_type != CTL_IO_TASK)) {
-		union ctl_ha_msg msg_io;
-
-		msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
-		msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
-		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
-		    sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
-		}
-		/* continue on to finish IO */
-	}
 #ifdef CTL_IO_DELAY
 	if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
-		struct ctl_lun *lun;
-
-		lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 		io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
 	} else {
-		struct ctl_lun *lun;
+		struct ctl_lun *lun = CTL_LUN(io);
 
-		lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
 		if ((lun != NULL)
 		 && (lun->delay_info.done_delay > 0)) {
-			struct callout *callout;
 
-			callout = (struct callout *)&io->io_hdr.timer_bytes;
-			callout_init(callout, /*mpsafe*/ 1);
+			callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
 			io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
-			callout_reset(callout,
+			callout_reset(&io->io_hdr.delay_callout,
 				      lun->delay_info.done_delay * hz,
 				      ctl_done_timer_wakeup, io);
 			if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
 				lun->delay_info.done_delay = 0;
-			if (have_lock == 0)
-				mtx_unlock(&ctl_softc->ctl_lock);
 			return;
 		}
 	}
 #endif /* CTL_IO_DELAY */
 
-	STAILQ_INSERT_TAIL(&ctl_softc->done_queue, &io->io_hdr, links);
-
-#ifdef CTL_DONE_THREAD
-	if (have_lock == 0)
-		mtx_unlock(&ctl_softc->ctl_lock);
-
-	ctl_wakeup_thread();
-#else /* CTL_DONE_THREAD */
-	for (xio = (union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue);
-	     xio != NULL;
-	     xio =(union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue)) {
-
-		STAILQ_REMOVE_HEAD(&ctl_softc->done_queue, links);
-
-		ctl_process_done(xio, /*have_lock*/ 1);
-	}
-	if (have_lock == 0)
-		mtx_unlock(&ctl_softc->ctl_lock);
-#endif /* CTL_DONE_THREAD */
+	ctl_enqueue_done(io);
 }
 
-void
-ctl_done(union ctl_io *io)
-{
-	ctl_done_lock(io, /*have_lock*/ 0);
-}
-
-int
-ctl_isc(struct ctl_scsiio *ctsio)
-{
-	struct ctl_lun *lun;
-	int retval;
-
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
-	CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
-
-	CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
-
-	retval = lun->backend->data_submit((union ctl_io *)ctsio);
-
-	return (retval);
-}
-
-
 static void
 ctl_work_thread(void *arg)
 {
-	struct ctl_softc *softc;
+	struct ctl_thread *thr = (struct ctl_thread *)arg;
+	struct ctl_softc *softc = thr->ctl_softc;
 	union ctl_io *io;
-	struct ctl_be_lun *be_lun;
 	int retval;
 
 	CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
 
-	softc = (struct ctl_softc *)arg;
-	if (softc == NULL)
-		return;
-
-	mtx_lock(&softc->ctl_lock);
-	for (;;) {
-		retval = 0;
-
+	while (!softc->shutdown) {
 		/*
 		 * We handle the queues in this order:
-		 * - task management
 		 * - ISC
 		 * - done queue (to free up resources, unblock other commands)
 		 * - RtR queue
@@ -12929,193 +13292,235 @@
 		 * If those queues are empty, we break out of the loop and
 		 * go to sleep.
 		 */
-		io = (union ctl_io *)STAILQ_FIRST(&softc->task_queue);
+		mtx_lock(&thr->queue_lock);
+		io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
 		if (io != NULL) {
-			ctl_run_task_queue(softc);
+			STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
+			mtx_unlock(&thr->queue_lock);
+			ctl_handle_isc(io);
 			continue;
 		}
-		io = (union ctl_io *)STAILQ_FIRST(&softc->isc_queue);
+		io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
 		if (io != NULL) {
-			STAILQ_REMOVE_HEAD(&softc->isc_queue, links);
-			ctl_handle_isc(io);
+			STAILQ_REMOVE_HEAD(&thr->done_queue, links);
+			/* clear any blocked commands, call fe_done */
+			mtx_unlock(&thr->queue_lock);
+			ctl_process_done(io);
 			continue;
 		}
-		io = (union ctl_io *)STAILQ_FIRST(&softc->done_queue);
+		io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
 		if (io != NULL) {
-			STAILQ_REMOVE_HEAD(&softc->done_queue, links);
-			/* clear any blocked commands, call fe_done */
-			mtx_unlock(&softc->ctl_lock);
-			/*
-			 * XXX KDM
-			 * Call this without a lock for now.  This will
-			 * depend on whether there is any way the FETD can
-			 * sleep or deadlock if called with the CTL lock
-			 * held.
-			 */
-			retval = ctl_process_done(io, /*have_lock*/ 0);
-			mtx_lock(&softc->ctl_lock);
+			STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
+			mtx_unlock(&thr->queue_lock);
+			if (io->io_hdr.io_type == CTL_IO_TASK)
+				ctl_run_task(io);
+			else
+				ctl_scsiio_precheck(softc, &io->scsiio);
 			continue;
 		}
-		if (!ctl_pause_rtr) {
-			io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
-			if (io != NULL) {
-				STAILQ_REMOVE_HEAD(&softc->rtr_queue, links);
-				mtx_unlock(&softc->ctl_lock);
-				goto execute;
-			}
-		}
-		io = (union ctl_io *)STAILQ_FIRST(&softc->incoming_queue);
+		io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
 		if (io != NULL) {
-			STAILQ_REMOVE_HEAD(&softc->incoming_queue, links);
-			mtx_unlock(&softc->ctl_lock);
-			ctl_scsiio_precheck(softc, &io->scsiio);
-			mtx_lock(&softc->ctl_lock);
+			STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
+			mtx_unlock(&thr->queue_lock);
+			retval = ctl_scsiio(&io->scsiio);
+			if (retval != CTL_RETVAL_COMPLETE)
+				CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
 			continue;
 		}
-		/*
-		 * We might want to move this to a separate thread, so that
-		 * configuration requests (in this case LUN creations)
-		 * won't impact the I/O path.
-		 */
+
+		/* Sleep until we have something to do. */
+		mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
+	}
+	thr->thread = NULL;
+	kthread_exit();
+}
+
+static void
+ctl_lun_thread(void *arg)
+{
+	struct ctl_softc *softc = (struct ctl_softc *)arg;
+	struct ctl_be_lun *be_lun;
+
+	CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
+
+	while (!softc->shutdown) {
+		mtx_lock(&softc->ctl_lock);
 		be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
 		if (be_lun != NULL) {
 			STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
 			mtx_unlock(&softc->ctl_lock);
 			ctl_create_lun(be_lun);
-			mtx_lock(&softc->ctl_lock);
 			continue;
 		}
 
-		/* XXX KDM use the PDROP flag?? */
 		/* Sleep until we have something to do. */
-		mtx_sleep(softc, &softc->ctl_lock, PRIBIO, "ctl_work", 0);
-
-		/* Back to the top of the loop to see what woke us up. */
-		continue;
-
-execute:
-		retval = ctl_scsiio(&io->scsiio);
-		switch (retval) {
-		case CTL_RETVAL_COMPLETE:
-			break;
-		default:
-			/*
-			 * Probably need to make sure this doesn't happen.
-			 */
-			break;
-		}
-		mtx_lock(&softc->ctl_lock);
+		mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
+		    PDROP | PRIBIO, "-", 0);
 	}
+	softc->lun_thread = NULL;
+	kthread_exit();
 }
 
-void
-ctl_wakeup_thread()
+static void
+ctl_thresh_thread(void *arg)
 {
-	struct ctl_softc *softc;
+	struct ctl_softc *softc = (struct ctl_softc *)arg;
+	struct ctl_lun *lun;
+	struct ctl_logical_block_provisioning_page *page;
+	const char *attr;
+	union ctl_ha_msg msg;
+	uint64_t thres, val;
+	int i, e, set;
 
-	softc = control_softc;
+	CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
 
-	wakeup(softc);
+	while (!softc->shutdown) {
+		mtx_lock(&softc->ctl_lock);
+		STAILQ_FOREACH(lun, &softc->lun_list, links) {
+			if ((lun->flags & CTL_LUN_DISABLED) ||
+			    (lun->flags & CTL_LUN_NO_MEDIA) ||
+			    lun->backend->lun_attr == NULL)
+				continue;
+			if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+			    softc->ha_mode == CTL_HA_MODE_XFER)
+				continue;
+			if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0)
+				continue;
+			e = 0;
+			page = &lun->MODE_LBP;
+			for (i = 0; i < CTL_NUM_LBP_THRESH; i++) {
+				if ((page->descr[i].flags & SLBPPD_ENABLED) == 0)
+					continue;
+				thres = scsi_4btoul(page->descr[i].count);
+				thres <<= CTL_LBP_EXPONENT;
+				switch (page->descr[i].resource) {
+				case 0x01:
+					attr = "blocksavail";
+					break;
+				case 0x02:
+					attr = "blocksused";
+					break;
+				case 0xf1:
+					attr = "poolblocksavail";
+					break;
+				case 0xf2:
+					attr = "poolblocksused";
+					break;
+				default:
+					continue;
+				}
+				mtx_unlock(&softc->ctl_lock); // XXX
+				val = lun->backend->lun_attr(
+				    lun->be_lun->be_lun, attr);
+				mtx_lock(&softc->ctl_lock);
+				if (val == UINT64_MAX)
+					continue;
+				if ((page->descr[i].flags & SLBPPD_ARMING_MASK)
+				    == SLBPPD_ARMING_INC)
+					e = (val >= thres);
+				else
+					e = (val <= thres);
+				if (e)
+					break;
+			}
+			mtx_lock(&lun->lun_lock);
+			if (e) {
+				scsi_u64to8b((uint8_t *)&page->descr[i] -
+				    (uint8_t *)page, lun->ua_tpt_info);
+				if (lun->lasttpt == 0 ||
+				    time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
+					lun->lasttpt = time_uptime;
+					ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
+					set = 1;
+				} else
+					set = 0;
+			} else {
+				lun->lasttpt = 0;
+				ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
+				set = -1;
+			}
+			mtx_unlock(&lun->lun_lock);
+			if (set != 0 &&
+			    lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+				/* Send msg to other side. */
+				bzero(&msg.ua, sizeof(msg.ua));
+				msg.hdr.msg_type = CTL_MSG_UA;
+				msg.hdr.nexus.initid = -1;
+				msg.hdr.nexus.targ_port = -1;
+				msg.hdr.nexus.targ_lun = lun->lun;
+				msg.hdr.nexus.targ_mapped_lun = lun->lun;
+				msg.ua.ua_all = 1;
+				msg.ua.ua_set = (set > 0);
+				msg.ua.ua_type = CTL_UA_THIN_PROV_THRES;
+				memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8);
+				mtx_unlock(&softc->ctl_lock); // XXX
+				ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+				    sizeof(msg.ua), M_WAITOK);
+				mtx_lock(&softc->ctl_lock);
+			}
+		}
+		mtx_sleep(&softc->thresh_thread, &softc->ctl_lock,
+		    PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz);
+	}
+	softc->thresh_thread = NULL;
+	kthread_exit();
 }
 
-/* Initialization and failover */
+static void
+ctl_enqueue_incoming(union ctl_io *io)
+{
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_thread *thr;
+	u_int idx;
 
-void
-ctl_init_isc_msg(void)
-{
-	printf("CTL: Still calling this thing\n");
+	idx = (io->io_hdr.nexus.targ_port * 127 +
+	       io->io_hdr.nexus.initid) % worker_threads;
+	thr = &softc->threads[idx];
+	mtx_lock(&thr->queue_lock);
+	STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
+	mtx_unlock(&thr->queue_lock);
+	wakeup(thr);
 }
 
-/*
- * Init component
- * 	Initializes component into configuration defined by bootMode
- *	(see hasc-sv.c)
- *  	returns hasc_Status:
- * 		OK
- *		ERROR - fatal error
- */
-static ctl_ha_comp_status
-ctl_isc_init(struct ctl_ha_component *c)
+static void
+ctl_enqueue_rtr(union ctl_io *io)
 {
-	ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_thread *thr;
 
-	c->status = ret;
-	return ret;
+	thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+	mtx_lock(&thr->queue_lock);
+	STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
+	mtx_unlock(&thr->queue_lock);
+	wakeup(thr);
 }
 
-/* Start component
- * 	Starts component in state requested. If component starts successfully,
- *	it must set its own state to the requestrd state
- *	When requested state is HASC_STATE_HA, the component may refine it
- * 	by adding _SLAVE or _MASTER flags.
- *	Currently allowed state transitions are:
- *	UNKNOWN->HA		- initial startup
- *	UNKNOWN->SINGLE - initial startup when no parter detected
- *	HA->SINGLE		- failover
- * returns ctl_ha_comp_status:
- * 		OK	- component successfully started in requested state
- *		FAILED  - could not start the requested state, failover may
- * 			  be possible
- *		ERROR	- fatal error detected, no future startup possible
- */
-static ctl_ha_comp_status
-ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
+static void
+ctl_enqueue_done(union ctl_io *io)
 {
-	ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_thread *thr;
 
-	// UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
-	if (c->state == CTL_HA_STATE_UNKNOWN ) {
-		ctl_is_single = 0;
-		if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
-		    != CTL_HA_STATUS_SUCCESS) {
-			printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
-			ret = CTL_HA_COMP_STATUS_ERROR;
-		}
-	} else if (CTL_HA_STATE_IS_HA(c->state)
-		&& CTL_HA_STATE_IS_SINGLE(state)){
-		// HA->SINGLE transition
-	        ctl_failover();
-		ctl_is_single = 1;
-	} else {
-		printf("ctl_isc_start:Invalid state transition %X->%X\n",
-		       c->state, state);
-		ret = CTL_HA_COMP_STATUS_ERROR;
-	}
-	if (CTL_HA_STATE_IS_SINGLE(state))
-		ctl_is_single = 1;
-
-	c->state = state;
-	c->status = ret;
-	return ret;
+	thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+	mtx_lock(&thr->queue_lock);
+	STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
+	mtx_unlock(&thr->queue_lock);
+	wakeup(thr);
 }
 
-/*
- * Quiesce component
- * The component must clear any error conditions (set status to OK) and
- * prepare itself to another Start call
- * returns ctl_ha_comp_status:
- * 	OK
- *	ERROR
- */
-static ctl_ha_comp_status
-ctl_isc_quiesce(struct ctl_ha_component *c)
+static void
+ctl_enqueue_isc(union ctl_io *io)
 {
-	int ret = CTL_HA_COMP_STATUS_OK;
+	struct ctl_softc *softc = CTL_SOFTC(io);
+	struct ctl_thread *thr;
 
-	ctl_pause_rtr = 1;
-	c->status = ret;
-	return ret;
+	thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+	mtx_lock(&thr->queue_lock);
+	STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
+	mtx_unlock(&thr->queue_lock);
+	wakeup(thr);
 }
 
-struct ctl_ha_component ctl_ha_component_ctlisc =
-{
-	.name = "CTL ISC",
-	.state = CTL_HA_STATE_UNKNOWN,
-	.init = ctl_isc_init,
-	.start = ctl_isc_start,
-	.quiesce = ctl_isc_quiesce
-};
-
 /*
  *  vim: ts=8
  */

Modified: trunk/sys/cam/ctl/ctl.h
===================================================================
--- trunk/sys/cam/ctl/ctl.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
  * All rights reserved.
@@ -27,8 +28,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.h#5 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl.h 312581 2017-01-21 08:37:53Z mav $
  */
 /*
  * Function definitions used both within CTL and potentially in various CTL
@@ -40,7 +41,6 @@
 #ifndef	_CTL_H_
 #define	_CTL_H_
 
-#define	ctl_min(x,y)	(((x) < (y)) ? (x) : (y))
 #define	CTL_RETVAL_COMPLETE	0
 #define	CTL_RETVAL_QUEUED	1
 #define	CTL_RETVAL_ALLOCATED	2
@@ -52,6 +52,8 @@
 	CTL_PORT_SCSI		= 0x02,
 	CTL_PORT_IOCTL		= 0x04,
 	CTL_PORT_INTERNAL	= 0x08,
+	CTL_PORT_ISCSI		= 0x10,
+	CTL_PORT_SAS		= 0x20,
 	CTL_PORT_ALL		= 0xff,
 	CTL_PORT_ISC		= 0x100 // FC port for inter-shelf communication
 } ctl_port_type;
@@ -73,18 +75,12 @@
 struct ctl_modepage_header {
 	uint8_t page_code;
 	uint8_t subpage;
-	int32_t len_used;
-	int32_t len_left;
+	uint16_t len_used;
+	uint16_t len_left;
 };
 
-struct ctl_modepage_aps {
-	struct ctl_modepage_header header;
-	uint8_t lock_active;
-};
-
 union ctl_modepage_info {
 	struct ctl_modepage_header header;
-	struct ctl_modepage_aps aps;
 };
 
 /*
@@ -95,12 +91,15 @@
 /*
  * Device ID length, for VPD page 0x83.
  */
-#define	CTL_DEVID_LEN	16
+#define	CTL_DEVID_LEN	64
+#define	CTL_DEVID_MIN_LEN	16
 /*
  * WWPN length, for VPD page 0x83.
  */
 #define CTL_WWPN_LEN   8
 
+#define	CTL_DRIVER_NAME_LEN	32
+
 /*
  * Unit attention types. ASC/ASCQ values for these should be placed in
  * ctl_build_ua.  These are also listed in order of reporting priority.
@@ -111,17 +110,20 @@
 	CTL_UA_POWERON		= 0x0001,
 	CTL_UA_BUS_RESET	= 0x0002,
 	CTL_UA_TARG_RESET	= 0x0004,
-	CTL_UA_LUN_RESET	= 0x0008,
-	CTL_UA_LUN_CHANGE	= 0x0010,
-	CTL_UA_MODE_CHANGE	= 0x0020,
-	CTL_UA_LOG_CHANGE	= 0x0040,
-	CTL_UA_LVD		= 0x0080,
-	CTL_UA_SE		= 0x0100,
-	CTL_UA_RES_PREEMPT	= 0x0200,
-	CTL_UA_RES_RELEASE	= 0x0400,
-	CTL_UA_REG_PREEMPT  	= 0x0800,
-	CTL_UA_ASYM_ACC_CHANGE  = 0x1000,
-	CTL_UA_CAPACITY_CHANGED = 0x2000
+	CTL_UA_I_T_NEXUS_LOSS	= 0x0008,
+	CTL_UA_LUN_RESET	= 0x0010,
+	CTL_UA_LUN_CHANGE	= 0x0020,
+	CTL_UA_MODE_CHANGE	= 0x0040,
+	CTL_UA_LOG_CHANGE	= 0x0080,
+	CTL_UA_INQ_CHANGE	= 0x0100,
+	CTL_UA_RES_PREEMPT	= 0x0400,
+	CTL_UA_RES_RELEASE	= 0x0800,
+	CTL_UA_REG_PREEMPT	= 0x1000,
+	CTL_UA_ASYM_ACC_CHANGE	= 0x2000,
+	CTL_UA_CAPACITY_CHANGE	= 0x4000,
+	CTL_UA_THIN_PROV_THRES	= 0x8000,
+	CTL_UA_MEDIUM_CHANGE	= 0x10000,
+	CTL_UA_IE		= 0x20000
 } ctl_ua_type;
 
 #ifdef	_KERNEL
@@ -128,21 +130,6 @@
 
 MALLOC_DECLARE(M_CTL);
 
-typedef enum {
-	CTL_THREAD_NONE		= 0x00,
-	CTL_THREAD_WAKEUP	= 0x01
-} ctl_thread_flags;
-
-struct ctl_thread {
-	void			(*thread_func)(void *arg);
-	void			*arg;
-	struct cv		wait_queue;
-	const char		*thread_name;
-	ctl_thread_flags	thread_flags;
-	struct completion	*thread_event;
-	struct task_struct	*task;
-};
-
 struct ctl_page_index;
 
 #ifdef SYSCTL_DECL	/* from sysctl.h */
@@ -149,65 +136,84 @@
 SYSCTL_DECL(_kern_cam_ctl);
 #endif
 
-/*
- * Call these routines to enable or disable front end ports.
- */
-int ctl_port_enable(ctl_port_type port_type);
-int ctl_port_disable(ctl_port_type port_type);
-/*
- * This routine grabs a list of frontend ports.
- */
-int ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
-		  int *num_entries_filled, int *num_entries_dropped,
-		  ctl_port_type port_type, int no_virtual);
+struct ctl_lun;
+struct ctl_port;
+struct ctl_softc;
 
 /*
  * Put a string into an sbuf, escaping characters that are illegal or not
  * recommended in XML.  Note this doesn't escape everything, just > < and &.
  */
-int ctl_sbuf_printf_esc(struct sbuf *sb, char *str);
+int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size);
 
-int ctl_ffz(uint32_t *mask, uint32_t size);
+int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last);
 int ctl_set_mask(uint32_t *mask, uint32_t bit);
 int ctl_clear_mask(uint32_t *mask, uint32_t bit);
 int ctl_is_set(uint32_t *mask, uint32_t bit);
-int ctl_control_page_handler(struct ctl_scsiio *ctsio,
+int ctl_default_page_handler(struct ctl_scsiio *ctsio,
 			     struct ctl_page_index *page_index,
 			     uint8_t *page_ptr);
-/**
-int ctl_failover_sp_handler(struct ctl_scsiio *ctsio,
-			    struct ctl_page_index *page_index,
-			    uint8_t *page_ptr);
-**/
-int ctl_power_sp_handler(struct ctl_scsiio *ctsio,
-			 struct ctl_page_index *page_index, uint8_t *page_ptr);
-int ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
-			       struct ctl_page_index *page_index, int pc);
-int ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
-		       struct ctl_page_index *page_index, uint8_t *page_ptr);
-int ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
+int ctl_ie_page_handler(struct ctl_scsiio *ctsio,
+			struct ctl_page_index *page_index,
+			uint8_t *page_ptr);
+int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
 				   struct ctl_page_index *page_index,
 				   int pc);
-int ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
-				    struct ctl_page_index *page_index,
-				    uint8_t *page_ptr);
+int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
+				   struct ctl_page_index *page_index,
+				   int pc);
+int ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
+				   struct ctl_page_index *page_index,
+				   int pc);
 int ctl_config_move_done(union ctl_io *io);
 void ctl_datamove(union ctl_io *io);
+void ctl_serseq_done(union ctl_io *io);
 void ctl_done(union ctl_io *io);
+void ctl_data_submit_done(union ctl_io *io);
+void ctl_config_read_done(union ctl_io *io);
 void ctl_config_write_done(union ctl_io *io);
-#if 0
-int ctl_thread(void *arg);
-#endif
-void ctl_wakeup_thread(void);
-#if 0
-struct ctl_thread *ctl_create_thread(void (*thread_func)
-	(void *thread_arg), void *thread_arg, const char *thread_name);
-void ctl_signal_thread(struct ctl_thread *thread);
-void ctl_shutdown_thread(struct ctl_thread *thread);
-#endif
 void ctl_portDB_changed(int portnum);
-void ctl_init_isc_msg(void);
+int ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+		 struct thread *td);
 
+void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
+void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except,
+    ctl_ua_type ua);
+void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
+void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
+void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
+void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
+    ctl_ua_type ua_type);
+
+uint32_t ctl_decode_lun(uint64_t encoded);
+uint64_t ctl_encode_lun(uint32_t decoded);
+
+void ctl_isc_announce_lun(struct ctl_lun *lun);
+void ctl_isc_announce_port(struct ctl_port *port);
+void ctl_isc_announce_iid(struct ctl_port *port, int iid);
+void ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx,
+    uint8_t page, uint8_t subpage);
+
+/*
+ * KPI to manipulate LUN/port options
+ */
+
+struct ctl_option {
+	STAILQ_ENTRY(ctl_option)	links;
+	char			*name;
+	char			*value;
+};
+typedef STAILQ_HEAD(ctl_options, ctl_option) ctl_options_t;
+
+struct ctl_be_arg;
+void ctl_init_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args);
+void ctl_update_opts(ctl_options_t *opts, int num_args,
+    struct ctl_be_arg *args);
+void ctl_free_opts(ctl_options_t *opts);
+char * ctl_get_opt(ctl_options_t *opts, const char *name);
+int ctl_get_opt_number(ctl_options_t *opts, const char *name, uint64_t *num);
+int ctl_expand_number(const char *buf, uint64_t *num);
+
 #endif	/* _KERNEL */
 
 #endif	/* _CTL_H_ */

Modified: trunk/sys/cam/ctl/ctl_backend.c
===================================================================
--- trunk/sys/cam/ctl/ctl_backend.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_backend.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
  * All rights reserved.
@@ -27,7 +28,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_backend.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend.c#3 $
  */
 /*
  * CTL backend driver registration routines
@@ -36,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_backend.c 313369 2017-02-07 01:56:26Z mav $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -55,7 +56,6 @@
 #include <cam/ctl/ctl.h>
 #include <cam/ctl/ctl_frontend.h>
 #include <cam/ctl/ctl_backend.h>
-#include <cam/ctl/ctl_frontend_internal.h>
 #include <cam/ctl/ctl_ioctl.h>
 #include <cam/ctl/ctl_ha.h>
 #include <cam/ctl/ctl_private.h>
@@ -62,64 +62,41 @@
 #include <cam/ctl/ctl_debug.h>
 
 extern struct ctl_softc *control_softc;
-extern int ctl_disable;
 
 int
 ctl_backend_register(struct ctl_backend_driver *be)
 {
-	struct ctl_softc *ctl_softc;
+	struct ctl_softc *softc = control_softc;
 	struct ctl_backend_driver *be_tmp;
+	int error;
 
-	ctl_softc = control_softc;
-
-	/* Don't continue if CTL is disabled */
-	if (ctl_disable != 0)
-		return (0);
-
-	mtx_lock(&ctl_softc->ctl_lock);
-	/*
-	 * Sanity check, make sure this isn't a duplicate registration.
-	 */
-	STAILQ_FOREACH(be_tmp, &ctl_softc->be_list, links) {
+	/* Sanity check, make sure this isn't a duplicate registration. */
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(be_tmp, &softc->be_list, links) {
 		if (strcmp(be_tmp->name, be->name) == 0) {
-			mtx_unlock(&ctl_softc->ctl_lock);
+			mtx_unlock(&softc->ctl_lock);
 			return (-1);
 		}
 	}
-	mtx_unlock(&ctl_softc->ctl_lock);
-
-	/*
-	 * Call the backend's initialization routine.
-	 */
-	be->init();
-
-	mtx_lock(&ctl_softc->ctl_lock);
-	
-	STAILQ_INSERT_TAIL(&ctl_softc->be_list, be, links);
-
-	ctl_softc->num_backends++;
-
-	/*
-	 * Don't want to increment the usage count for internal consumers,
-	 * we won't be able to unload otherwise.
-	 */
-	/* XXX KDM find a substitute for this? */
-#if 0
-	if ((be->flags & CTL_BE_FLAG_INTERNAL) == 0)
-		MOD_INC_USE_COUNT;
-#endif
-
+	mtx_unlock(&softc->ctl_lock);
 #ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED
 	be->config_move_done = ctl_config_move_done;
 #endif
-	/* XXX KDM fix this! */
 	be->num_luns = 0;
-#if 0
-	atomic_set(&be->num_luns, 0);
-#endif
 
-	mtx_unlock(&ctl_softc->ctl_lock);
+	/* Call the backend's initialization routine. */
+	if (be->init != NULL) {
+		if ((error = be->init()) != 0) {
+			printf("%s backend init error: %d\n",
+			    be->name, error);
+			return (error);
+		}
+	}
 
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_INSERT_TAIL(&softc->be_list, be, links);
+	softc->num_backends++;
+	mtx_unlock(&softc->ctl_lock);
 	return (0);
 }
 
@@ -126,58 +103,130 @@
 int
 ctl_backend_deregister(struct ctl_backend_driver *be)
 {
-	struct ctl_softc *ctl_softc;
+	struct ctl_softc *softc = control_softc;
+	int error;
 
-	ctl_softc = control_softc;
+	/* Call the backend's shutdown routine. */
+	if (be->shutdown != NULL) {
+		if ((error = be->shutdown()) != 0) {
+			printf("%s backend shutdown error: %d\n",
+			    be->name, error);
+			return (error);
+		}
+	}
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_REMOVE(&softc->be_list, be, ctl_backend_driver, links);
+	softc->num_backends--;
+	mtx_unlock(&softc->ctl_lock);
+	return (0);
+}
 
-#if 0
-	if (atomic_read(&be->num_luns) != 0) {
-#endif
-	/* XXX KDM fix this! */
-	if (be->num_luns != 0) {
-		mtx_unlock(&ctl_softc->ctl_lock);
-		return (-1);
+struct ctl_backend_driver *
+ctl_backend_find(char *backend_name)
+{
+	struct ctl_softc *softc = control_softc;
+	struct ctl_backend_driver *be_tmp;
+
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(be_tmp, &softc->be_list, links) {
+		if (strcmp(be_tmp->name, backend_name) == 0) {
+			mtx_unlock(&softc->ctl_lock);
+			return (be_tmp);
+		}
 	}
+	mtx_unlock(&softc->ctl_lock);
 
-	STAILQ_REMOVE(&ctl_softc->be_list, be, ctl_backend_driver, links);
+	return (NULL);
+}
 
-	ctl_softc->num_backends--;
+void
+ctl_init_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args)
+{
+	struct ctl_option *opt;
+	int i;
 
-	/* XXX KDM find a substitute for this? */
-#if 0
-	if ((be->flags & CTL_BE_FLAG_INTERNAL) == 0)
-		MOD_DEC_USE_COUNT;
-#endif
+	STAILQ_INIT(opts);
+	for (i = 0; i < num_args; i++) {
+		if ((args[i].flags & CTL_BEARG_RD) == 0)
+			continue;
+		if ((args[i].flags & CTL_BEARG_ASCII) == 0)
+			continue;
+		opt = malloc(sizeof(*opt), M_CTL, M_WAITOK);
+		opt->name = strdup(args[i].kname, M_CTL);
+		opt->value = strdup(args[i].kvalue, M_CTL);
+		STAILQ_INSERT_TAIL(opts, opt, links);
+	}
+}
 
-	mtx_unlock(&ctl_softc->ctl_lock);
+void
+ctl_update_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args)
+{
+	struct ctl_option *opt;
+	int i;
 
-	return (0);
+	for (i = 0; i < num_args; i++) {
+		if ((args[i].flags & CTL_BEARG_RD) == 0)
+			continue;
+		if ((args[i].flags & CTL_BEARG_ASCII) == 0)
+			continue;
+		STAILQ_FOREACH(opt, opts, links) {
+			if (strcmp(opt->name, args[i].kname) == 0)
+				break;
+		}
+		if (args[i].kvalue != NULL &&
+		    ((char *)args[i].kvalue)[0] != 0) {
+			if (opt) {
+				free(opt->value, M_CTL);
+				opt->value = strdup(args[i].kvalue, M_CTL);
+			} else {
+				opt = malloc(sizeof(*opt), M_CTL, M_WAITOK);
+				opt->name = strdup(args[i].kname, M_CTL);
+				opt->value = strdup(args[i].kvalue, M_CTL);
+				STAILQ_INSERT_TAIL(opts, opt, links);
+			}
+		} else if (opt) {
+			STAILQ_REMOVE(opts, opt, ctl_option, links);
+			free(opt->name, M_CTL);
+			free(opt->value, M_CTL);
+			free(opt, M_CTL);
+		}
+	}
 }
 
-struct ctl_backend_driver *
-ctl_backend_find(char *backend_name)
+void
+ctl_free_opts(ctl_options_t *opts)
 {
-	struct ctl_softc *ctl_softc;
-	struct ctl_backend_driver *be_tmp;
+	struct ctl_option *opt;
 
-	ctl_softc = control_softc;
+	while ((opt = STAILQ_FIRST(opts)) != NULL) {
+		STAILQ_REMOVE_HEAD(opts, links);
+		free(opt->name, M_CTL);
+		free(opt->value, M_CTL);
+		free(opt, M_CTL);
+	}
+}
 
-	mtx_lock(&ctl_softc->ctl_lock);
+char *
+ctl_get_opt(ctl_options_t *opts, const char *name)
+{
+	struct ctl_option *opt;
 
-	STAILQ_FOREACH(be_tmp, &ctl_softc->be_list, links) {
-		if (strcmp(be_tmp->name, backend_name) == 0) {
-			mtx_unlock(&ctl_softc->ctl_lock);
-			return (be_tmp);
+	STAILQ_FOREACH(opt, opts, links) {
+		if (strcmp(opt->name, name) == 0) {
+			return (opt->value);
 		}
 	}
+	return (NULL);
+}
 
-	mtx_unlock(&ctl_softc->ctl_lock);
+int
+ctl_get_opt_number(ctl_options_t *opts, const char *name, uint64_t *val)
+{
+	const char *value;
 
-	return (NULL);
+	value = ctl_get_opt(opts, name);
+	if (value == NULL)
+		return (-2);
+	return (ctl_expand_number(value, val));
 }
-
-/*
- * vim: ts=8
- */

Modified: trunk/sys/cam/ctl/ctl_backend.h
===================================================================
--- trunk/sys/cam/ctl/ctl_backend.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_backend.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,5 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2017 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,8 +29,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_backend.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend.h#2 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_backend.h 313369 2017-02-07 01:56:26Z mav $
  */
 /*
  * CTL backend driver definitions
@@ -39,48 +41,13 @@
 #ifndef	_CTL_BACKEND_H_
 #define	_CTL_BACKEND_H_
 
-/*
- * XXX KDM move this to another header file?
- */
-#define	CTL_BE_NAME_LEN		32
+#include <cam/ctl/ctl_ioctl.h>
 
-/*
- * The ID_REQ flag is used to say that the caller has requested a
- * particular LUN ID in the req_lun_id field.  If we cannot allocate that
- * LUN ID, the ctl_add_lun() call will fail.
- *
- * The POWERED_OFF flag tells us that the LUN should default to the powered
- * off state.  It will return 0x04,0x02 until it is powered up.  ("Logical
- * unit not ready, initializing command required.")
- *
- * The INOPERABLE flag tells us that this LUN is not operable for whatever
- * reason.  This means that user data may have been (or has been?) lost.
- * We will return 0x31,0x00 ("Medium format corrupted") until the host
- * issues a FORMAT UNIT command to clear the error.
- *
- * The PRIMARY flag tells us that this LUN is registered as a Primary LUN
- * which is accessible via the Master shelf controller in an HA. This flag
- * being set indicates a Primary LUN. This flag being reset represents a
- * Secondary LUN controlled by the Secondary controller in an HA
- * configuration. Flag is applicable at this time to T_DIRECT types. 
- *
- * The SERIAL_NUM flag tells us that the serial_num field is filled in and
- * valid for use in SCSI INQUIRY VPD page 0x80.
- *
- * The DEVID flag tells us that the device_id field is filled in and
- * valid for use in SCSI INQUIRY VPD page 0x83.
- *
- * The DEV_TYPE flag tells us that the device_type field is filled in.
- */
 typedef enum {
-	CTL_LUN_FLAG_ID_REQ		= 0x01,
-	CTL_LUN_FLAG_POWERED_OFF	= 0x02,
-	CTL_LUN_FLAG_INOPERABLE		= 0x04,
-	CTL_LUN_FLAG_PRIMARY		= 0x08,
-	CTL_LUN_FLAG_SERIAL_NUM		= 0x10,
-	CTL_LUN_FLAG_DEVID		= 0x20,
-	CTL_LUN_FLAG_DEV_TYPE		= 0x40
-} ctl_backend_lun_flags;
+	CTL_LUN_SERSEQ_OFF,
+	CTL_LUN_SERSEQ_READ,
+	CTL_LUN_SERSEQ_ON
+} ctl_lun_serseq;
 
 #ifdef _KERNEL
 
@@ -89,12 +56,13 @@
 	{ \
 		switch (type) { \
 		case MOD_LOAD: \
-			ctl_backend_register( \
-				(struct ctl_backend_driver *)data); \
+			return (ctl_backend_register( \
+				(struct ctl_backend_driver *)data)); \
 			break; \
 		case MOD_UNLOAD: \
-			printf(#name " module unload - not possible for this module type\n"); \
-			return EINVAL; \
+			return (ctl_backend_deregister( \
+				(struct ctl_backend_driver *)data)); \
+			break; \
 		default: \
 			return EOPNOTSUPP; \
 		} \
@@ -137,6 +105,18 @@
  * this should be 512.  In theory CTL should be able to handle other block
  * sizes.  Host application software may not deal with it very well, though.
  *
+ * pblockexp is the log2() of number of LBAs on the LUN per physical sector.
+ *
+ * pblockoff is the lowest LBA on the LUN aligned to physical sector.
+ *
+ * ublockexp is the log2() of number of LBAs on the LUN per UNMAP block.
+ *
+ * ublockoff is the lowest LBA on the LUN aligned to UNMAP block.
+ *
+ * atomicblock is the number of blocks that can be written atomically.
+ *
+ * opttxferlen is the number of blocks that can be written in one operation.
+ *
  * req_lun_id is the requested LUN ID.  CTL only pays attention to this
  * field if the CTL_LUN_FLAG_ID_REQ flag is set.  If the requested LUN ID is
  * not available, the LUN addition will fail.  If a particular LUN ID isn't
@@ -176,9 +156,16 @@
 struct ctl_be_lun {
 	uint8_t			lun_type;	/* passed to CTL */
 	ctl_backend_lun_flags	flags;		/* passed to CTL */
+	ctl_lun_serseq		serseq;		/* passed to CTL */
 	void			*be_lun;	/* passed to CTL */
 	uint64_t		maxlba;		/* passed to CTL */
 	uint32_t		blocksize;	/* passed to CTL */
+	uint16_t		pblockexp;	/* passed to CTL */
+	uint16_t		pblockoff;	/* passed to CTL */
+	uint16_t		ublockexp;	/* passed to CTL */
+	uint16_t		ublockoff;	/* passed to CTL */
+	uint32_t		atomicblock;	/* passed to CTL */
+	uint32_t		opttxferlen;	/* passed to CTL */
 	uint32_t		req_lun_id;	/* passed to CTL */
 	uint32_t		lun_id;		/* returned from CTL */
 	uint8_t			serial_num[CTL_SN_LEN];	 /* passed to CTL */
@@ -187,6 +174,7 @@
 	be_lun_config_t		lun_config_status; /* passed to CTL */
 	struct ctl_backend_driver *be;		/* passed to CTL */
 	void			*ctl_lun;	/* used by CTL */
+	ctl_options_t		options;	/* passed to CTL */
 	STAILQ_ENTRY(ctl_be_lun) links;		/* used by CTL */
 };
 
@@ -193,20 +181,22 @@
 typedef enum {
 	CTL_BE_FLAG_NONE	= 0x00,	/* no flags */
 	CTL_BE_FLAG_HAS_CONFIG	= 0x01,	/* can do config reads, writes */
-	CTL_BE_FLAG_INTERNAL	= 0x02	/* don't inc mod refcount */
 } ctl_backend_flags;
 
 typedef int (*be_init_t)(void);
+typedef int (*be_shutdown_t)(void);
 typedef int (*be_func_t)(union ctl_io *io);
 typedef void (*be_vfunc_t)(union ctl_io *io);
 typedef int (*be_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
 			  struct thread *td);
 typedef int (*be_luninfo_t)(void *be_lun, struct sbuf *sb);
+typedef uint64_t (*be_lunattr_t)(void *be_lun, const char *attrname);
 
 struct ctl_backend_driver {
 	char		  name[CTL_BE_NAME_LEN]; /* passed to CTL */
 	ctl_backend_flags flags;	         /* passed to CTL */
 	be_init_t	  init;			 /* passed to CTL */
+	be_shutdown_t	  shutdown;		 /* passed to CTL */
 	be_func_t	  data_submit;		 /* passed to CTL */
 	be_func_t	  data_move_done;	 /* passed to CTL */
 	be_func_t	  config_read;		 /* passed to CTL */
@@ -213,6 +203,7 @@
 	be_func_t	  config_write;		 /* passed to CTL */
 	be_ioctl_t	  ioctl;		 /* passed to CTL */
 	be_luninfo_t	  lun_info;		 /* passed to CTL */
+	be_lunattr_t	  lun_attr;		 /* passed to CTL */
 #ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED
 	be_func_t	  config_move_done;	 /* passed to backend */
 #endif
@@ -254,35 +245,21 @@
 int ctl_stop_lun(struct ctl_be_lun *be_lun);
 
 /*
- * If a LUN is inoperable, call ctl_lun_inoperable().  Generally the LUN
- * will become operable once again when the user issues the SCSI FORMAT UNIT
- * command.  (CTL will automatically clear the inoperable flag.)  If we
- * need to re-enable the LUN, we can call ctl_lun_operable() to enable it
- * without a SCSI command.
+ * Methods to notify about media and tray status changes.
  */
-int ctl_lun_inoperable(struct ctl_be_lun *be_lun);
-int ctl_lun_operable(struct ctl_be_lun *be_lun);
+int ctl_lun_no_media(struct ctl_be_lun *be_lun);
+int ctl_lun_has_media(struct ctl_be_lun *be_lun);
+int ctl_lun_ejected(struct ctl_be_lun *be_lun);
 
 /*
- * If a LUN is locked on or unlocked from a power/APS standpoint, call
- * ctl_lun_power_lock() to update the current status in CTL's APS subpage.
- * Set the lock flag to 1 to lock the LUN, set it to 0 to unlock the LUN.
+ * Called on LUN HA role change.
  */
-int ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
-		       int lock);
+int ctl_lun_primary(struct ctl_be_lun *be_lun);
+int ctl_lun_secondary(struct ctl_be_lun *be_lun);
 
 /*
- * To take a LUN offline, call ctl_lun_offline().  Generally the LUN will
- * be online again once the user sends a SCSI START STOP UNIT command with
- * the start and on/offline bits set.  The backend can bring the LUN back
- * online via the ctl_lun_online() function, if necessary.
+ * Let the backend notify the initiators about changes.
  */
-int ctl_lun_offline(struct ctl_be_lun *be_lun);
-int ctl_lun_online(struct ctl_be_lun *be_lun);
-
-/*
- * Let the backend notify the initiator about changed capacity.
- */
 void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun);
 
 #endif /* _KERNEL */

Modified: trunk/sys/cam/ctl/ctl_backend_block.c
===================================================================
--- trunk/sys/cam/ctl/ctl_backend_block.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_backend_block.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,7 +1,9 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
  * Copyright (c) 2009-2011 Spectra Logic Corporation
  * Copyright (c) 2012 The FreeBSD Foundation
+ * Copyright (c) 2014-2015 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Portions of this software were developed by Edward Tomasz Napierala
@@ -32,7 +34,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_backend_block.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
  */
 /*
  * CAM Target Layer driver backend for block devices.
@@ -40,7 +42,7 @@
  * Author: Ken Merry <ken at FreeBSD.org>
  */
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_backend_block.c 313369 2017-02-07 01:56:26Z mav $");
 
 #include <opt_kdtrace.h>
 
@@ -51,6 +53,7 @@
 #include <sys/kthread.h>
 #include <sys/bio.h>
 #include <sys/fcntl.h>
+#include <sys/limits.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/condvar.h>
@@ -69,6 +72,7 @@
 #include <sys/disk.h>
 #include <sys/fcntl.h>
 #include <sys/filedesc.h>
+#include <sys/filio.h>
 #include <sys/proc.h>
 #include <sys/pcpu.h>
 #include <sys/module.h>
@@ -84,17 +88,21 @@
 #include <cam/ctl/ctl_io.h>
 #include <cam/ctl/ctl.h>
 #include <cam/ctl/ctl_backend.h>
-#include <cam/ctl/ctl_frontend_internal.h>
 #include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
 #include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_private.h>
 #include <cam/ctl/ctl_error.h>
 
 /*
- * The idea here is that we'll allocate enough S/G space to hold a 16MB
- * I/O.  If we get an I/O larger than that, we'll reject it.
+ * The idea here is that we'll allocate enough S/G space to hold a 1MB
+ * I/O.  If we get an I/O larger than that, we'll split it.
  */
-#define	CTLBLK_MAX_IO_SIZE	(16 * 1024 * 1024)
-#define	CTLBLK_MAX_SEGS		(CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1
+#define	CTLBLK_HALF_IO_SIZE	(512 * 1024)
+#define	CTLBLK_MAX_IO_SIZE	(CTLBLK_HALF_IO_SIZE * 2)
+#define	CTLBLK_MAX_SEG		MAXPHYS
+#define	CTLBLK_HALF_SEGS	MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1)
+#define	CTLBLK_MAX_SEGS		(CTLBLK_HALF_SEGS * 2)
 
 #ifdef CTLBLK_DEBUG
 #define DPRINTF(fmt, args...) \
@@ -103,6 +111,11 @@
 #define DPRINTF(fmt, args...) do {} while(0)
 #endif
 
+#define PRIV(io)	\
+    ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
+#define ARGS(io)	\
+    ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
+
 SDT_PROVIDER_DEFINE(cbb);
 
 typedef enum {
@@ -109,7 +122,6 @@
 	CTL_BE_BLOCK_LUN_UNCONFIGURED	= 0x01,
 	CTL_BE_BLOCK_LUN_CONFIG_ERR	= 0x02,
 	CTL_BE_BLOCK_LUN_WAITING	= 0x04,
-	CTL_BE_BLOCK_LUN_MULTI_THREAD	= 0x08
 } ctl_be_block_lun_flags;
 
 typedef enum {
@@ -118,18 +130,11 @@
 	CTL_BE_BLOCK_FILE
 } ctl_be_block_type;
 
-struct ctl_be_block_devdata {
-	struct cdev *cdev;
-	struct cdevsw *csw;
-	int dev_ref;
-};
-
 struct ctl_be_block_filedata {
 	struct ucred *cred;
 };
 
 union ctl_be_block_bedata {
-	struct ctl_be_block_devdata dev;
 	struct ctl_be_block_filedata file;
 };
 
@@ -138,6 +143,8 @@
 
 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
 			       struct ctl_be_block_io *beio);
+typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun,
+				  const char *attrname);
 
 /*
  * Backend LUN structure.  There is a 1:1 mapping between a block device
@@ -144,7 +151,7 @@
  * and a backend block LUN, and between a backend block LUN and a CTL LUN.
  */
 struct ctl_be_block_lun {
-	struct ctl_block_disk *disk;
+	struct ctl_lun_create_params params;
 	char lunname[32];
 	char *dev_path;
 	ctl_be_block_type dev_type;
@@ -152,23 +159,26 @@
 	union ctl_be_block_bedata backend;
 	cbb_dispatch_t dispatch;
 	cbb_dispatch_t lun_flush;
-	struct mtx lock;
+	cbb_dispatch_t unmap;
+	cbb_dispatch_t get_lba_status;
+	cbb_getattr_t getattr;
 	uma_zone_t lun_zone;
 	uint64_t size_blocks;
 	uint64_t size_bytes;
-	uint32_t blocksize;
-	int blocksize_shift;
 	struct ctl_be_block_softc *softc;
 	struct devstat *disk_stats;
 	ctl_be_block_lun_flags flags;
 	STAILQ_ENTRY(ctl_be_block_lun) links;
-	struct ctl_be_lun ctl_be_lun;
+	struct ctl_be_lun cbe_lun;
 	struct taskqueue *io_taskqueue;
 	struct task io_task;
 	int num_threads;
 	STAILQ_HEAD(, ctl_io_hdr) input_queue;
+	STAILQ_HEAD(, ctl_io_hdr) config_read_queue;
 	STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
 	STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
+	struct mtx_padalign io_lock;
+	struct mtx_padalign queue_lock;
 };
 
 /*
@@ -175,11 +185,8 @@
  * Overall softc structure for the block backend module.
  */
 struct ctl_be_block_softc {
-	STAILQ_HEAD(, ctl_be_block_io)   beio_free_queue;
 	struct mtx			 lock;
-	int				 prealloc_beio;
-	int				 num_disks;
-	STAILQ_HEAD(, ctl_block_disk)	 disk_list;
+	uma_zone_t			 beio_zone;
 	int				 num_luns;
 	STAILQ_HEAD(, ctl_be_block_lun)	 lun_list;
 };
@@ -194,22 +201,25 @@
 	struct ctl_sg_entry		sg_segs[CTLBLK_MAX_SEGS];
 	struct iovec			xiovecs[CTLBLK_MAX_SEGS];
 	int				bio_cmd;
-	int				bio_flags;
 	int				num_segs;
 	int				num_bios_sent;
 	int				num_bios_done;
 	int				send_complete;
-	int				num_errors;
+	int				first_error;
+	uint64_t			first_error_offset;
 	struct bintime			ds_t0;
 	devstat_tag_type		ds_tag_type;
 	devstat_trans_flags		ds_trans_type;
 	uint64_t			io_len;
 	uint64_t			io_offset;
+	int				io_arg;
 	struct ctl_be_block_softc	*softc;
 	struct ctl_be_block_lun		*lun;
-	STAILQ_ENTRY(ctl_be_block_io)	links;
+	void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */
 };
 
+extern struct ctl_softc *control_softc;
+
 static int cbb_num_threads = 14;
 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
@@ -219,10 +229,6 @@
 
 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
 static void ctl_free_beio(struct ctl_be_block_io *beio);
-static int ctl_grow_beio(struct ctl_be_block_softc *softc, int count);
-#if 0
-static void ctl_shrink_beio(struct ctl_be_block_softc *softc);
-#endif
 static void ctl_complete_beio(struct ctl_be_block_io *beio);
 static int ctl_be_block_move_done(union ctl_io *io);
 static void ctl_be_block_biodone(struct bio *bio);
@@ -230,10 +236,20 @@
 				    struct ctl_be_block_io *beio);
 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
 				       struct ctl_be_block_io *beio);
+static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun,
+				  struct ctl_be_block_io *beio);
+static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun,
+					 const char *attrname);
 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
 				   struct ctl_be_block_io *beio);
+static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
+				   struct ctl_be_block_io *beio);
 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
 				      struct ctl_be_block_io *beio);
+static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun,
+					 const char *attrname);
+static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun,
+				    union ctl_io *io);
 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
 				    union ctl_io *io);
 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
@@ -247,17 +263,12 @@
 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
 				 struct ctl_lun_req *req);
 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
-static int ctl_be_block_open(struct ctl_be_block_softc *softc,
-			     struct ctl_be_block_lun *be_lun,
+static int ctl_be_block_open(struct ctl_be_block_lun *be_lun,
 			     struct ctl_lun_req *req);
 static int ctl_be_block_create(struct ctl_be_block_softc *softc,
 			       struct ctl_lun_req *req);
 static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
 			   struct ctl_lun_req *req);
-static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
-				  struct ctl_lun_req *req);
-static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
-				 struct ctl_lun_req *req);
 static int ctl_be_block_modify(struct ctl_be_block_softc *softc,
 			   struct ctl_lun_req *req);
 static void ctl_be_block_lun_shutdown(void *be_lun);
@@ -266,7 +277,9 @@
 static int ctl_be_block_config_write(union ctl_io *io);
 static int ctl_be_block_config_read(union ctl_io *io);
 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
-int ctl_be_block_init(void);
+static uint64_t ctl_be_block_lun_attr(void *be_lun, const char *attrname);
+static int ctl_be_block_init(void);
+static int ctl_be_block_shutdown(void);
 
 static struct ctl_backend_driver ctl_be_block_driver = 
 {
@@ -273,12 +286,14 @@
 	.name = "block",
 	.flags = CTL_BE_FLAG_HAS_CONFIG,
 	.init = ctl_be_block_init,
+	.shutdown = ctl_be_block_shutdown,
 	.data_submit = ctl_be_block_submit,
 	.data_move_done = ctl_be_block_move_done,
 	.config_read = ctl_be_block_config_read,
 	.config_write = ctl_be_block_config_write,
 	.ioctl = ctl_be_block_ioctl,
-	.lun_info = ctl_be_block_lun_info
+	.lun_info = ctl_be_block_lun_info,
+	.lun_attr = ctl_be_block_lun_attr
 };
 
 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
@@ -288,53 +303,9 @@
 ctl_alloc_beio(struct ctl_be_block_softc *softc)
 {
 	struct ctl_be_block_io *beio;
-	int count;
 
-	mtx_lock(&softc->lock);
-
-	beio = STAILQ_FIRST(&softc->beio_free_queue);
-	if (beio != NULL) {
-		STAILQ_REMOVE(&softc->beio_free_queue, beio,
-			      ctl_be_block_io, links);
-	}
-	mtx_unlock(&softc->lock);
-
-	if (beio != NULL) {
-		bzero(beio, sizeof(*beio));
-		beio->softc = softc;
-		return (beio);
-	}
-
-	for (;;) {
-
-		count = ctl_grow_beio(softc, /*count*/ 10);
-
-		/*
-		 * This shouldn't be possible, since ctl_grow_beio() uses a
-		 * blocking malloc.
-		 */
-		if (count == 0)
-			return (NULL);
-
-		/*
-		 * Since we have to drop the lock when we're allocating beio
-		 * structures, it's possible someone else can come along and
-		 * allocate the beio's we've just allocated.
-		 */
-		mtx_lock(&softc->lock);
-		beio = STAILQ_FIRST(&softc->beio_free_queue);
-		if (beio != NULL) {
-			STAILQ_REMOVE(&softc->beio_free_queue, beio,
-				      ctl_be_block_io, links);
-		}
-		mtx_unlock(&softc->lock);
-
-		if (beio != NULL) {
-			bzero(beio, sizeof(*beio));
-			beio->softc = softc;
-			break;
-		}
-	}
+	beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO);
+	beio->softc = softc;
 	return (beio);
 }
 
@@ -341,11 +312,9 @@
 static void
 ctl_free_beio(struct ctl_be_block_io *beio)
 {
-	struct ctl_be_block_softc *softc;
 	int duplicate_free;
 	int i;
 
-	softc = beio->softc;
 	duplicate_free = 0;
 
 	for (i = 0; i < beio->num_segs; i++) {
@@ -354,6 +323,13 @@
 
 		uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
 		beio->sg_segs[i].addr = NULL;
+
+		/* For compare we had two equal S/G lists. */
+		if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) {
+			uma_zfree(beio->lun->lun_zone,
+			    beio->sg_segs[i + CTLBLK_HALF_SEGS].addr);
+			beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL;
+		}
 	}
 
 	if (duplicate_free > 0) {
@@ -360,69 +336,63 @@
 		printf("%s: %d duplicate frees out of %d segments\n", __func__,
 		       duplicate_free, beio->num_segs);
 	}
-	mtx_lock(&softc->lock);
-	STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
-	mtx_unlock(&softc->lock);
+
+	uma_zfree(beio->softc->beio_zone, beio);
 }
 
-static int
-ctl_grow_beio(struct ctl_be_block_softc *softc, int count)
+static void
+ctl_complete_beio(struct ctl_be_block_io *beio)
 {
-	int i;
+	union ctl_io *io = beio->io;
 
-	for (i = 0; i < count; i++) {
-		struct ctl_be_block_io *beio;
-
-		beio = (struct ctl_be_block_io *)malloc(sizeof(*beio),
-							   M_CTLBLK,
-							   M_WAITOK | M_ZERO);
-		beio->softc = softc;
-		mtx_lock(&softc->lock);
-		STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
-		mtx_unlock(&softc->lock);
+	if (beio->beio_cont != NULL) {
+		beio->beio_cont(beio);
+	} else {
+		ctl_free_beio(beio);
+		ctl_data_submit_done(io);
 	}
-
-	return (i);
 }
 
-#if 0
-static void
-ctl_shrink_beio(struct ctl_be_block_softc *softc)
+static size_t
+cmp(uint8_t *a, uint8_t *b, size_t size)
 {
-	struct ctl_be_block_io *beio, *beio_tmp;
+	size_t i;
 
-	mtx_lock(&softc->lock);
-	STAILQ_FOREACH_SAFE(beio, &softc->beio_free_queue, links, beio_tmp) {
-		STAILQ_REMOVE(&softc->beio_free_queue, beio,
-			      ctl_be_block_io, links);
-		free(beio, M_CTLBLK);
+	for (i = 0; i < size; i++) {
+		if (a[i] != b[i])
+			break;
 	}
-	mtx_unlock(&softc->lock);
+	return (i);
 }
-#endif
 
 static void
-ctl_complete_beio(struct ctl_be_block_io *beio)
+ctl_be_block_compare(union ctl_io *io)
 {
-	union ctl_io *io;
-	int io_len;
+	struct ctl_be_block_io *beio;
+	uint64_t off, res;
+	int i;
+	uint8_t info[8];
 
-	io = beio->io;
-
-	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
-		io_len = beio->io_len;
-	else
-		io_len = 0;
-
-	devstat_end_transaction(beio->lun->disk_stats,
-				/*bytes*/ io_len,
-				beio->ds_tag_type,
-				beio->ds_trans_type,
-				/*now*/ NULL,
-				/*then*/&beio->ds_t0);
-
-	ctl_free_beio(beio);
-	ctl_done(io);
+	beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+	off = 0;
+	for (i = 0; i < beio->num_segs; i++) {
+		res = cmp(beio->sg_segs[i].addr,
+		    beio->sg_segs[i + CTLBLK_HALF_SEGS].addr,
+		    beio->sg_segs[i].len);
+		off += res;
+		if (res < beio->sg_segs[i].len)
+			break;
+	}
+	if (i < beio->num_segs) {
+		scsi_u64to8b(off, info);
+		ctl_set_sense(&io->scsiio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_MISCOMPARE,
+		    /*asc*/ 0x1D, /*ascq*/ 0x00,
+		    /*type*/ SSD_ELEM_INFO,
+		    /*size*/ sizeof(info), /*data*/ &info,
+		    /*type*/ SSD_ELEM_NONE);
+	} else
+		ctl_set_success(&io->scsiio);
 }
 
 static int
@@ -430,48 +400,49 @@
 {
 	struct ctl_be_block_io *beio;
 	struct ctl_be_block_lun *be_lun;
+	struct ctl_lba_len_flags *lbalen;
 #ifdef CTL_TIME_IO
 	struct bintime cur_bt;
-#endif  
+#endif
 
-	beio = (struct ctl_be_block_io *)
-		io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
-
+	beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
 	be_lun = beio->lun;
 
 	DPRINTF("entered\n");
 
 #ifdef CTL_TIME_IO
-	getbintime(&cur_bt);
+	getbinuptime(&cur_bt);
 	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
 	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
+#endif
 	io->io_hdr.num_dmas++;
-#endif  
+	io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
 
 	/*
 	 * We set status at this point for read commands, and write
 	 * commands with errors.
 	 */
-	if ((beio->bio_cmd == BIO_READ)
-	 && (io->io_hdr.port_status == 0)
-	 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
-	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
-		ctl_set_success(&io->scsiio);
-	else if ((io->io_hdr.port_status != 0)
-	      && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
-	      && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
-		/*
-		 * For hardware error sense keys, the sense key
-		 * specific value is defined to be a retry count,
-		 * but we use it to pass back an internal FETD
-		 * error code.  XXX KDM  Hopefully the FETD is only
-		 * using 16 bits for an error code, since that's
-		 * all the space we have in the sks field.
-		 */
-		ctl_set_internal_failure(&io->scsiio,
-					 /*sks_valid*/ 1,
-					 /*retry_count*/
-					 io->io_hdr.port_status);
+	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
+		;
+	} else if ((io->io_hdr.port_status != 0) &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+		ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+		    /*retry_count*/ io->io_hdr.port_status);
+	} else if (io->scsiio.kern_data_resid != 0 &&
+	    (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+		ctl_set_invalid_field_ciu(&io->scsiio);
+	} else if ((io->io_hdr.port_status == 0) &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
+		lbalen = ARGS(beio->io);
+		if (lbalen->flags & CTL_LLF_READ) {
+			ctl_set_success(&io->scsiio);
+		} else if (lbalen->flags & CTL_LLF_COMPARE) {
+			/* We have two data blocks ready for comparison. */
+			ctl_be_block_compare(io);
+		}
 	}
 
 	/*
@@ -492,15 +463,9 @@
 	 * This move done routine is generally called in the SIM's
 	 * interrupt context, and therefore we cannot block.
 	 */
-	mtx_lock(&be_lun->lock);
-	/*
-	 * XXX KDM make sure that links is okay to use at this point.
-	 * Otherwise, we either need to add another field to ctl_io_hdr,
-	 * or deal with resource allocation here.
-	 */
+	mtx_lock(&be_lun->queue_lock);
 	STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
-	mtx_unlock(&be_lun->lock);
-
+	mtx_unlock(&be_lun->queue_lock);
 	taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
 
 	return (0);
@@ -512,6 +477,7 @@
 	struct ctl_be_block_io *beio;
 	struct ctl_be_block_lun *be_lun;
 	union ctl_io *io;
+	int error;
 
 	beio = bio->bio_caller1;
 	be_lun = beio->lun;
@@ -519,9 +485,14 @@
 
 	DPRINTF("entered\n");
 
-	mtx_lock(&be_lun->lock);
-	if (bio->bio_error != 0)
-		beio->num_errors++;
+	error = bio->bio_error;
+	mtx_lock(&be_lun->io_lock);
+	if (error != 0 &&
+	    (beio->first_error == 0 ||
+	     bio->bio_offset < beio->first_error_offset)) {
+		beio->first_error = error;
+		beio->first_error_offset = bio->bio_offset;
+	}
 
 	beio->num_bios_done++;
 
@@ -537,7 +508,7 @@
 	 */
 	if ((beio->send_complete == 0)
 	 || (beio->num_bios_done < beio->num_bios_sent)) {
-		mtx_unlock(&be_lun->lock);
+		mtx_unlock(&be_lun->io_lock);
 		return;
 	}
 
@@ -545,44 +516,55 @@
 	 * At this point, we've verified that we are the last I/O to
 	 * complete, so it's safe to drop the lock.
 	 */
-	mtx_unlock(&be_lun->lock);
+	devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
+	    beio->ds_tag_type, beio->ds_trans_type,
+	    /*now*/ NULL, /*then*/&beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
 
 	/*
 	 * If there are any errors from the backing device, we fail the
 	 * entire I/O with a medium error.
 	 */
-	if (beio->num_errors > 0) {
-		if (beio->bio_cmd == BIO_FLUSH) {
+	error = beio->first_error;
+	if (error != 0) {
+		if (error == EOPNOTSUPP) {
+			ctl_set_invalid_opcode(&io->scsiio);
+		} else if (error == ENOSPC || error == EDQUOT) {
+			ctl_set_space_alloc_fail(&io->scsiio);
+		} else if (error == EROFS || error == EACCES) {
+			ctl_set_hw_write_protected(&io->scsiio);
+		} else if (beio->bio_cmd == BIO_FLUSH) {
 			/* XXX KDM is there is a better error here? */
 			ctl_set_internal_failure(&io->scsiio,
 						 /*sks_valid*/ 1,
 						 /*retry_count*/ 0xbad2);
-		} else
-			ctl_set_medium_error(&io->scsiio);
+		} else {
+			ctl_set_medium_error(&io->scsiio,
+			    beio->bio_cmd == BIO_READ);
+		}
 		ctl_complete_beio(beio);
 		return;
 	}
 
 	/*
-	 * If this is a write or a flush, we're all done.
+	 * If this is a write, a flush, a delete or verify, we're all done.
 	 * If this is a read, we can now send the data to the user.
 	 */
 	if ((beio->bio_cmd == BIO_WRITE)
-	 || (beio->bio_cmd == BIO_FLUSH)) {
+	 || (beio->bio_cmd == BIO_FLUSH)
+	 || (beio->bio_cmd == BIO_DELETE)
+	 || (ARGS(io)->flags & CTL_LLF_VERIFY)) {
 		ctl_set_success(&io->scsiio);
 		ctl_complete_beio(beio);
 	} else {
-		io->scsiio.be_move_done = ctl_be_block_move_done;
-		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
-		io->scsiio.kern_data_len = beio->io_len;
-		io->scsiio.kern_total_len = beio->io_len;
-		io->scsiio.kern_rel_offset = 0;
-		io->scsiio.kern_data_resid = 0;
-		io->scsiio.kern_sg_entries = beio->num_segs;
-		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
+		if ((ARGS(io)->flags & CTL_LLF_READ) &&
+		    beio->beio_cont == NULL) {
+			ctl_set_success(&io->scsiio);
+			ctl_serseq_done(io);
+		}
 #ifdef CTL_TIME_IO
-        	getbintime(&io->io_hdr.dma_start_bt);
-#endif  
+		getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
 		ctl_datamove(io);
 	}
 }
@@ -591,36 +573,36 @@
 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
 			struct ctl_be_block_io *beio)
 {
-	union ctl_io *io;
+	union ctl_io *io = beio->io;
 	struct mount *mountpoint;
-	int vfs_is_locked, error, lock_flags;
+	int error, lock_flags;
 
 	DPRINTF("entered\n");
 
-	io = beio->io;
+	binuptime(&beio->ds_t0);
+	mtx_lock(&be_lun->io_lock);
+	devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
 
-	vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
+	(void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
 
-       	(void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
-
-	if (MNT_SHARED_WRITES(mountpoint)
-	 || ((mountpoint == NULL)
-	  && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
+	if (MNT_SHARED_WRITES(mountpoint) ||
+	    ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
 		lock_flags = LK_SHARED;
 	else
 		lock_flags = LK_EXCLUSIVE;
-
 	vn_lock(be_lun->vn, lock_flags | LK_RETRY);
-
-	binuptime(&beio->ds_t0);
-	devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
-
-	error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
+	error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT,
+	    curthread);
 	VOP_UNLOCK(be_lun->vn, 0);
 
 	vn_finished_write(mountpoint);
 
-	VFS_UNLOCK_GIANT(vfs_is_locked);
+	mtx_lock(&be_lun->io_lock);
+	devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
+	    beio->ds_tag_type, beio->ds_trans_type,
+	    /*now*/ NULL, /*then*/&beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
 
 	if (error == 0)
 		ctl_set_success(&io->scsiio);
@@ -634,10 +616,10 @@
 	ctl_complete_beio(beio);
 }
 
-SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, file_start, "uint64_t");
-SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, file_start, "uint64_t");
-SDT_PROBE_DEFINE1(cbb, kernel, read, file_done, file_done,"uint64_t");
-SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, file_done, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, , read, file_start, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, , write, file_start, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, , read, file_done,"uint64_t");
+SDT_PROBE_DEFINE1(cbb, , write, file_done, "uint64_t");
 
 static void
 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
@@ -647,27 +629,27 @@
 	union ctl_io *io;
 	struct uio xuio;
 	struct iovec *xiovec;
-	int vfs_is_locked, flags;
-	int error, i;
+	size_t s;
+	int error, flags, i;
 
 	DPRINTF("entered\n");
 
 	file_data = &be_lun->backend.file;
 	io = beio->io;
-	flags = beio->bio_flags;
+	flags = 0;
+	if (ARGS(io)->flags & CTL_LLF_DPO)
+		flags |= IO_DIRECT;
+	if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
+		flags |= IO_SYNC;
 
+	bzero(&xuio, sizeof(xuio));
 	if (beio->bio_cmd == BIO_READ) {
-		SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
+		SDT_PROBE0(cbb, , read, file_start);
+		xuio.uio_rw = UIO_READ;
 	} else {
-		SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
+		SDT_PROBE0(cbb, , write, file_start);
+		xuio.uio_rw = UIO_WRITE;
 	}
-
-	bzero(&xuio, sizeof(xuio));
-	if (beio->bio_cmd == BIO_READ)
-		xuio.uio_rw = UIO_READ;
-	else
-		xuio.uio_rw = UIO_WRITE;
-
 	xuio.uio_offset = beio->io_offset;
 	xuio.uio_resid = beio->io_len;
 	xuio.uio_segflg = UIO_SYSSPACE;
@@ -680,13 +662,14 @@
 		xiovec->iov_len = beio->sg_segs[i].len;
 	}
 
-	vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
+	binuptime(&beio->ds_t0);
+	mtx_lock(&be_lun->io_lock);
+	devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
+
 	if (beio->bio_cmd == BIO_READ) {
 		vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
 
-		binuptime(&beio->ds_t0);
-		devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
-
 		/*
 		 * UFS pays attention to IO_DIRECT for reads.  If the
 		 * DIRECTIO option is configured into the kernel, it calls
@@ -703,14 +686,27 @@
 		 * ZFS pays attention to IO_SYNC (which translates into the
 		 * Solaris define FRSYNC for zfs_read()) for reads.  It
 		 * attempts to sync the file before reading.
-		 *
-		 * So, to attempt to provide some barrier semantics in the
-		 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
 		 */
-		error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
-				 (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
+		error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred);
 
 		VOP_UNLOCK(be_lun->vn, 0);
+		SDT_PROBE0(cbb, , read, file_done);
+		if (error == 0 && xuio.uio_resid > 0) {
+			/*
+			 * If we red less then requested (EOF), then
+			 * we should clean the rest of the buffer.
+			 */
+			s = beio->io_len - xuio.uio_resid;
+			for (i = 0; i < beio->num_segs; i++) {
+				if (s >= beio->sg_segs[i].len) {
+					s -= beio->sg_segs[i].len;
+					continue;
+				}
+				bzero((uint8_t *)beio->sg_segs[i].addr + s,
+				    beio->sg_segs[i].len - s);
+				s = 0;
+			}
+		}
 	} else {
 		struct mount *mountpoint;
 		int lock_flags;
@@ -717,18 +713,13 @@
 
 		(void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
 
-		if (MNT_SHARED_WRITES(mountpoint)
-		 || ((mountpoint == NULL)
+		if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL)
 		  && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
 			lock_flags = LK_SHARED;
 		else
 			lock_flags = LK_EXCLUSIVE;
-
 		vn_lock(be_lun->vn, lock_flags | LK_RETRY);
 
-		binuptime(&beio->ds_t0);
-		devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
-
 		/*
 		 * UFS pays attention to IO_DIRECT for writes.  The write
 		 * is done asynchronously.  (Normally the write would just
@@ -743,74 +734,285 @@
 		 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
 		 * for writes.  It will flush the transaction from the
 		 * cache before returning.
-		 *
-		 * So if we've got the BIO_ORDERED flag set, we want
-		 * IO_SYNC in either the UFS or ZFS case.
 		 */
-		error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
-				  IO_SYNC : 0, file_data->cred);
+		error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred);
 		VOP_UNLOCK(be_lun->vn, 0);
 
 		vn_finished_write(mountpoint);
+		SDT_PROBE0(cbb, , write, file_done);
         }
-        VFS_UNLOCK_GIANT(vfs_is_locked);
 
+	mtx_lock(&be_lun->io_lock);
+	devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
+	    beio->ds_tag_type, beio->ds_trans_type,
+	    /*now*/ NULL, /*then*/&beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
+
 	/*
 	 * If we got an error, set the sense data to "MEDIUM ERROR" and
 	 * return the I/O to the user.
 	 */
 	if (error != 0) {
-		char path_str[32];
+		if (error == ENOSPC || error == EDQUOT) {
+			ctl_set_space_alloc_fail(&io->scsiio);
+		} else if (error == EROFS || error == EACCES) {
+			ctl_set_hw_write_protected(&io->scsiio);
+		} else {
+			ctl_set_medium_error(&io->scsiio,
+			    beio->bio_cmd == BIO_READ);
+		}
+		ctl_complete_beio(beio);
+		return;
+	}
 
-		ctl_scsi_path_string(io, path_str, sizeof(path_str));
-		/*
-		 * XXX KDM ZFS returns ENOSPC when the underlying
-		 * filesystem fills up.  What kind of SCSI error should we
-		 * return for that?
-		 */
-		printf("%s%s command returned errno %d\n", path_str,
-		       (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
-		ctl_set_medium_error(&io->scsiio);
+	/*
+	 * If this is a write or a verify, we're all done.
+	 * If this is a read, we can now send the data to the user.
+	 */
+	if ((beio->bio_cmd == BIO_WRITE) ||
+	    (ARGS(io)->flags & CTL_LLF_VERIFY)) {
+		ctl_set_success(&io->scsiio);
 		ctl_complete_beio(beio);
+	} else {
+		if ((ARGS(io)->flags & CTL_LLF_READ) &&
+		    beio->beio_cont == NULL) {
+			ctl_set_success(&io->scsiio);
+			ctl_serseq_done(io);
+		}
+#ifdef CTL_TIME_IO
+		getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
+		ctl_datamove(io);
+	}
+}
+
+static void
+ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun,
+			struct ctl_be_block_io *beio)
+{
+	union ctl_io *io = beio->io;
+	struct ctl_lba_len_flags *lbalen = ARGS(io);
+	struct scsi_get_lba_status_data *data;
+	off_t roff, off;
+	int error, status;
+
+	DPRINTF("entered\n");
+
+	off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize;
+	vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
+	error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off,
+	    0, curthread->td_ucred, curthread);
+	if (error == 0 && off > roff)
+		status = 0;	/* mapped up to off */
+	else {
+		error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off,
+		    0, curthread->td_ucred, curthread);
+		if (error == 0 && off > roff)
+			status = 1;	/* deallocated up to off */
+		else {
+			status = 0;	/* unknown up to the end */
+			off = be_lun->size_bytes;
+		}
+	}
+	VOP_UNLOCK(be_lun->vn, 0);
+
+	data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
+	scsi_u64to8b(lbalen->lba, data->descr[0].addr);
+	scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize -
+	    lbalen->lba), data->descr[0].length);
+	data->descr[0].status = status;
+
+	ctl_complete_beio(beio);
+}
+
+static uint64_t
+ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname)
+{
+	struct vattr		vattr;
+	struct statfs		statfs;
+	uint64_t		val;
+	int			error;
+
+	val = UINT64_MAX;
+	if (be_lun->vn == NULL)
+		return (val);
+	vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
+	if (strcmp(attrname, "blocksused") == 0) {
+		error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
+		if (error == 0)
+			val = vattr.va_bytes / be_lun->cbe_lun.blocksize;
+	}
+	if (strcmp(attrname, "blocksavail") == 0 &&
+	    (be_lun->vn->v_iflag & VI_DOOMED) == 0) {
+		error = VFS_STATFS(be_lun->vn->v_mount, &statfs);
+		if (error == 0)
+			val = statfs.f_bavail * statfs.f_bsize /
+			    be_lun->cbe_lun.blocksize;
+	}
+	VOP_UNLOCK(be_lun->vn, 0);
+	return (val);
+}
+
+static void
+ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
+			   struct ctl_be_block_io *beio)
+{
+	union ctl_io *io;
+	struct cdevsw *csw;
+	struct cdev *dev;
+	struct uio xuio;
+	struct iovec *xiovec;
+	int error, flags, i, ref;
+
+	DPRINTF("entered\n");
+
+	io = beio->io;
+	flags = 0;
+	if (ARGS(io)->flags & CTL_LLF_DPO)
+		flags |= IO_DIRECT;
+	if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
+		flags |= IO_SYNC;
+
+	bzero(&xuio, sizeof(xuio));
+	if (beio->bio_cmd == BIO_READ) {
+		SDT_PROBE0(cbb, , read, file_start);
+		xuio.uio_rw = UIO_READ;
+	} else {
+		SDT_PROBE0(cbb, , write, file_start);
+		xuio.uio_rw = UIO_WRITE;
+	}
+	xuio.uio_offset = beio->io_offset;
+	xuio.uio_resid = beio->io_len;
+	xuio.uio_segflg = UIO_SYSSPACE;
+	xuio.uio_iov = beio->xiovecs;
+	xuio.uio_iovcnt = beio->num_segs;
+	xuio.uio_td = curthread;
+
+	for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
+		xiovec->iov_base = beio->sg_segs[i].addr;
+		xiovec->iov_len = beio->sg_segs[i].len;
+	}
+
+	binuptime(&beio->ds_t0);
+	mtx_lock(&be_lun->io_lock);
+	devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
+
+	csw = devvn_refthread(be_lun->vn, &dev, &ref);
+	if (csw) {
+		if (beio->bio_cmd == BIO_READ)
+			error = csw->d_read(dev, &xuio, flags);
+		else
+			error = csw->d_write(dev, &xuio, flags);
+		dev_relthread(dev, ref);
+	} else
+		error = ENXIO;
+
+	if (beio->bio_cmd == BIO_READ)
+		SDT_PROBE0(cbb, , read, file_done);
+	else
+		SDT_PROBE0(cbb, , write, file_done);
+
+	mtx_lock(&be_lun->io_lock);
+	devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
+	    beio->ds_tag_type, beio->ds_trans_type,
+	    /*now*/ NULL, /*then*/&beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
+
+	/*
+	 * If we got an error, set the sense data to "MEDIUM ERROR" and
+	 * return the I/O to the user.
+	 */
+	if (error != 0) {
+		if (error == ENOSPC || error == EDQUOT) {
+			ctl_set_space_alloc_fail(&io->scsiio);
+		} else if (error == EROFS || error == EACCES) {
+			ctl_set_hw_write_protected(&io->scsiio);
+		} else {
+			ctl_set_medium_error(&io->scsiio,
+			    beio->bio_cmd == BIO_READ);
+		}
+		ctl_complete_beio(beio);
 		return;
 	}
 
 	/*
-	 * If this is a write, we're all done.
+	 * If this is a write or a verify, we're all done.
 	 * If this is a read, we can now send the data to the user.
 	 */
-	if (beio->bio_cmd == BIO_WRITE) {
+	if ((beio->bio_cmd == BIO_WRITE) ||
+	    (ARGS(io)->flags & CTL_LLF_VERIFY)) {
 		ctl_set_success(&io->scsiio);
-		SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
 		ctl_complete_beio(beio);
 	} else {
-		SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
-		io->scsiio.be_move_done = ctl_be_block_move_done;
-		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
-		io->scsiio.kern_data_len = beio->io_len;
-		io->scsiio.kern_total_len = beio->io_len;
-		io->scsiio.kern_rel_offset = 0;
-		io->scsiio.kern_data_resid = 0;
-		io->scsiio.kern_sg_entries = beio->num_segs;
-		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
+		if ((ARGS(io)->flags & CTL_LLF_READ) &&
+		    beio->beio_cont == NULL) {
+			ctl_set_success(&io->scsiio);
+			ctl_serseq_done(io);
+		}
 #ifdef CTL_TIME_IO
-        	getbintime(&io->io_hdr.dma_start_bt);
-#endif  
+		getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
 		ctl_datamove(io);
 	}
 }
 
 static void
+ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun,
+			struct ctl_be_block_io *beio)
+{
+	union ctl_io *io = beio->io;
+	struct cdevsw *csw;
+	struct cdev *dev;
+	struct ctl_lba_len_flags *lbalen = ARGS(io);
+	struct scsi_get_lba_status_data *data;
+	off_t roff, off;
+	int error, ref, status;
+
+	DPRINTF("entered\n");
+
+	csw = devvn_refthread(be_lun->vn, &dev, &ref);
+	if (csw == NULL) {
+		status = 0;	/* unknown up to the end */
+		off = be_lun->size_bytes;
+		goto done;
+	}
+	off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize;
+	error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD,
+	    curthread);
+	if (error == 0 && off > roff)
+		status = 0;	/* mapped up to off */
+	else {
+		error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD,
+		    curthread);
+		if (error == 0 && off > roff)
+			status = 1;	/* deallocated up to off */
+		else {
+			status = 0;	/* unknown up to the end */
+			off = be_lun->size_bytes;
+		}
+	}
+	dev_relthread(dev, ref);
+
+done:
+	data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
+	scsi_u64to8b(lbalen->lba, data->descr[0].addr);
+	scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize -
+	    lbalen->lba), data->descr[0].length);
+	data->descr[0].status = status;
+
+	ctl_complete_beio(beio);
+}
+
+static void
 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
 		       struct ctl_be_block_io *beio)
 {
 	struct bio *bio;
-	union ctl_io *io;
-	struct ctl_be_block_devdata *dev_data;
+	struct cdevsw *csw;
+	struct cdev *dev;
+	int ref;
 
-	dev_data = &be_lun->backend.dev;
-	io = beio->io;
-
 	DPRINTF("entered\n");
 
 	/* This can't fail, it's a blocking allocation. */
@@ -817,8 +1019,6 @@
 	bio = g_alloc_bio();
 
 	bio->bio_cmd	    = BIO_FLUSH;
-	bio->bio_flags	   |= BIO_ORDERED;
-	bio->bio_dev	    = dev_data->cdev;
 	bio->bio_offset	    = 0;
 	bio->bio_data	    = 0;
 	bio->bio_done	    = ctl_be_block_biodone;
@@ -834,43 +1034,128 @@
 	beio->send_complete = 1;
 
 	binuptime(&beio->ds_t0);
+	mtx_lock(&be_lun->io_lock);
 	devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
 
-	(*dev_data->csw->d_strategy)(bio);
+	csw = devvn_refthread(be_lun->vn, &dev, &ref);
+	if (csw) {
+		bio->bio_dev = dev;
+		csw->d_strategy(bio);
+		dev_relthread(dev, ref);
+	} else {
+		bio->bio_error = ENXIO;
+		ctl_be_block_biodone(bio);
+	}
 }
 
 static void
+ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun,
+		       struct ctl_be_block_io *beio,
+		       uint64_t off, uint64_t len, int last)
+{
+	struct bio *bio;
+	uint64_t maxlen;
+	struct cdevsw *csw;
+	struct cdev *dev;
+	int ref;
+
+	csw = devvn_refthread(be_lun->vn, &dev, &ref);
+	maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize);
+	while (len > 0) {
+		bio = g_alloc_bio();
+		bio->bio_cmd	    = BIO_DELETE;
+		bio->bio_dev	    = dev;
+		bio->bio_offset	    = off;
+		bio->bio_length	    = MIN(len, maxlen);
+		bio->bio_data	    = 0;
+		bio->bio_done	    = ctl_be_block_biodone;
+		bio->bio_caller1    = beio;
+		bio->bio_pblkno     = off / be_lun->cbe_lun.blocksize;
+
+		off += bio->bio_length;
+		len -= bio->bio_length;
+
+		mtx_lock(&be_lun->io_lock);
+		beio->num_bios_sent++;
+		if (last && len == 0)
+			beio->send_complete = 1;
+		mtx_unlock(&be_lun->io_lock);
+
+		if (csw) {
+			csw->d_strategy(bio);
+		} else {
+			bio->bio_error = ENXIO;
+			ctl_be_block_biodone(bio);
+		}
+	}
+	if (csw)
+		dev_relthread(dev, ref);
+}
+
+static void
+ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
+		       struct ctl_be_block_io *beio)
+{
+	union ctl_io *io;
+	struct ctl_ptr_len_flags *ptrlen;
+	struct scsi_unmap_desc *buf, *end;
+	uint64_t len;
+
+	io = beio->io;
+
+	DPRINTF("entered\n");
+
+	binuptime(&beio->ds_t0);
+	mtx_lock(&be_lun->io_lock);
+	devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
+	mtx_unlock(&be_lun->io_lock);
+
+	if (beio->io_offset == -1) {
+		beio->io_len = 0;
+		ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+		buf = (struct scsi_unmap_desc *)ptrlen->ptr;
+		end = buf + ptrlen->len / sizeof(*buf);
+		for (; buf < end; buf++) {
+			len = (uint64_t)scsi_4btoul(buf->length) *
+			    be_lun->cbe_lun.blocksize;
+			beio->io_len += len;
+			ctl_be_block_unmap_dev_range(be_lun, beio,
+			    scsi_8btou64(buf->lba) * be_lun->cbe_lun.blocksize,
+			    len, (end - buf < 2) ? TRUE : FALSE);
+		}
+	} else
+		ctl_be_block_unmap_dev_range(be_lun, beio,
+		    beio->io_offset, beio->io_len, TRUE);
+}
+
+static void
 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
 			  struct ctl_be_block_io *beio)
 {
-	int i;
+	TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
 	struct bio *bio;
-	struct ctl_be_block_devdata *dev_data;
+	struct cdevsw *csw;
+	struct cdev *dev;
 	off_t cur_offset;
-	int max_iosize;
+	int i, max_iosize, ref;
 
 	DPRINTF("entered\n");
+	csw = devvn_refthread(be_lun->vn, &dev, &ref);
 
-	dev_data = &be_lun->backend.dev;
-
 	/*
 	 * We have to limit our I/O size to the maximum supported by the
 	 * backend device.  Hopefully it is MAXPHYS.  If the driver doesn't
 	 * set it properly, use DFLTPHYS.
 	 */
-	max_iosize = dev_data->cdev->si_iosize_max;
-	if (max_iosize < PAGE_SIZE)
+	if (csw) {
+		max_iosize = dev->si_iosize_max;
+		if (max_iosize < PAGE_SIZE)
+			max_iosize = DFLTPHYS;
+	} else
 		max_iosize = DFLTPHYS;
 
 	cur_offset = beio->io_offset;
-
-	/*
-	 * XXX KDM need to accurately reflect the number of I/Os outstanding
-	 * to a device.
-	 */
-	binuptime(&beio->ds_t0);
-	devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
-
 	for (i = 0; i < beio->num_segs; i++) {
 		size_t cur_size;
 		uint8_t *cur_ptr;
@@ -885,49 +1170,254 @@
 			KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
 
 			bio->bio_cmd = beio->bio_cmd;
-			bio->bio_flags |= beio->bio_flags;
-			bio->bio_dev = dev_data->cdev;
+			bio->bio_dev = dev;
 			bio->bio_caller1 = beio;
 			bio->bio_length = min(cur_size, max_iosize);
 			bio->bio_offset = cur_offset;
 			bio->bio_data = cur_ptr;
 			bio->bio_done = ctl_be_block_biodone;
-			bio->bio_pblkno = cur_offset / be_lun->blocksize;
+			bio->bio_pblkno = cur_offset / be_lun->cbe_lun.blocksize;
 
 			cur_offset += bio->bio_length;
 			cur_ptr += bio->bio_length;
 			cur_size -= bio->bio_length;
 
-			/*
-			 * Make sure we set the complete bit just before we
-			 * issue the last bio so we don't wind up with a
-			 * race.
-			 *
-			 * Use the LUN mutex here instead of a combination
-			 * of atomic variables for simplicity.
-			 *
-			 * XXX KDM we could have a per-IO lock, but that
-			 * would cause additional per-IO setup and teardown
-			 * overhead.  Hopefully there won't be too much
-			 * contention on the LUN lock.
-			 */
-			mtx_lock(&be_lun->lock);
-
+			TAILQ_INSERT_TAIL(&queue, bio, bio_queue);
 			beio->num_bios_sent++;
+		}
+	}
+	binuptime(&beio->ds_t0);
+	mtx_lock(&be_lun->io_lock);
+	devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
+	beio->send_complete = 1;
+	mtx_unlock(&be_lun->io_lock);
 
-			if ((i == beio->num_segs - 1)
-			 && (cur_size == 0))
-				beio->send_complete = 1;
+	/*
+	 * Fire off all allocated requests!
+	 */
+	while ((bio = TAILQ_FIRST(&queue)) != NULL) {
+		TAILQ_REMOVE(&queue, bio, bio_queue);
+		if (csw)
+			csw->d_strategy(bio);
+		else {
+			bio->bio_error = ENXIO;
+			ctl_be_block_biodone(bio);
+		}
+	}
+	if (csw)
+		dev_relthread(dev, ref);
+}
 
-			mtx_unlock(&be_lun->lock);
+static uint64_t
+ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname)
+{
+	struct diocgattr_arg	arg;
+	struct cdevsw *csw;
+	struct cdev *dev;
+	int error, ref;
 
-			(*dev_data->csw->d_strategy)(bio);
+	csw = devvn_refthread(be_lun->vn, &dev, &ref);
+	if (csw == NULL)
+		return (UINT64_MAX);
+	strlcpy(arg.name, attrname, sizeof(arg.name));
+	arg.len = sizeof(arg.value.off);
+	if (csw->d_ioctl) {
+		error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD,
+		    curthread);
+	} else
+		error = ENODEV;
+	dev_relthread(dev, ref);
+	if (error != 0)
+		return (UINT64_MAX);
+	return (arg.value.off);
+}
+
+static void
+ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun,
+			    union ctl_io *io)
+{
+	struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
+	struct ctl_be_block_io *beio;
+	struct ctl_lba_len_flags *lbalen;
+
+	DPRINTF("entered\n");
+	beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+	lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+
+	beio->io_len = lbalen->len * cbe_lun->blocksize;
+	beio->io_offset = lbalen->lba * cbe_lun->blocksize;
+	beio->io_arg = (lbalen->flags & SSC_IMMED) != 0;
+	beio->bio_cmd = BIO_FLUSH;
+	beio->ds_trans_type = DEVSTAT_NO_DATA;
+	DPRINTF("SYNC\n");
+	be_lun->lun_flush(be_lun, beio);
+}
+
+static void
+ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio)
+{
+	union ctl_io *io;
+
+	io = beio->io;
+	ctl_free_beio(beio);
+	if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
+	    ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
+	     (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
+		ctl_config_write_done(io);
+		return;
+	}
+
+	ctl_be_block_config_write(io);
+}
+
+static void
+ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
+			    union ctl_io *io)
+{
+	struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
+	struct ctl_be_block_io *beio;
+	struct ctl_lba_len_flags *lbalen;
+	uint64_t len_left, lba;
+	uint32_t pb, pbo, adj;
+	int i, seglen;
+	uint8_t *buf, *end;
+
+	DPRINTF("entered\n");
+
+	beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+	lbalen = ARGS(beio->io);
+
+	if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) ||
+	    (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) {
+		ctl_free_beio(beio);
+		ctl_set_invalid_field(&io->scsiio,
+				      /*sks_valid*/ 1,
+				      /*command*/ 1,
+				      /*field*/ 1,
+				      /*bit_valid*/ 0,
+				      /*bit*/ 0);
+		ctl_config_write_done(io);
+		return;
+	}
+
+	if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) {
+		beio->io_offset = lbalen->lba * cbe_lun->blocksize;
+		beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize;
+		beio->bio_cmd = BIO_DELETE;
+		beio->ds_trans_type = DEVSTAT_FREE;
+
+		be_lun->unmap(be_lun, beio);
+		return;
+	}
+
+	beio->bio_cmd = BIO_WRITE;
+	beio->ds_trans_type = DEVSTAT_WRITE;
+
+	DPRINTF("WRITE SAME at LBA %jx len %u\n",
+	       (uintmax_t)lbalen->lba, lbalen->len);
+
+	pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp;
+	if (be_lun->cbe_lun.pblockoff > 0)
+		pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff;
+	else
+		pbo = 0;
+	len_left = (uint64_t)lbalen->len * cbe_lun->blocksize;
+	for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) {
+
+		/*
+		 * Setup the S/G entry for this chunk.
+		 */
+		seglen = MIN(CTLBLK_MAX_SEG, len_left);
+		if (pb > cbe_lun->blocksize) {
+			adj = ((lbalen->lba + lba) * cbe_lun->blocksize +
+			    seglen - pbo) % pb;
+			if (seglen > adj)
+				seglen -= adj;
+			else
+				seglen -= seglen % cbe_lun->blocksize;
+		} else
+			seglen -= seglen % cbe_lun->blocksize;
+		beio->sg_segs[i].len = seglen;
+		beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
+
+		DPRINTF("segment %d addr %p len %zd\n", i,
+			beio->sg_segs[i].addr, beio->sg_segs[i].len);
+
+		beio->num_segs++;
+		len_left -= seglen;
+
+		buf = beio->sg_segs[i].addr;
+		end = buf + seglen;
+		for (; buf < end; buf += cbe_lun->blocksize) {
+			if (lbalen->flags & SWS_NDOB) {
+				memset(buf, 0, cbe_lun->blocksize);
+			} else {
+				memcpy(buf, io->scsiio.kern_data_ptr,
+				    cbe_lun->blocksize);
+			}
+			if (lbalen->flags & SWS_LBDATA)
+				scsi_ulto4b(lbalen->lba + lba, buf);
+			lba++;
 		}
 	}
+
+	beio->io_offset = lbalen->lba * cbe_lun->blocksize;
+	beio->io_len = lba * cbe_lun->blocksize;
+
+	/* We can not do all in one run. Correct and schedule rerun. */
+	if (len_left > 0) {
+		lbalen->lba += lba;
+		lbalen->len -= lba;
+		beio->beio_cont = ctl_be_block_cw_done_ws;
+	}
+
+	be_lun->dispatch(be_lun, beio);
 }
 
 static void
-ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
+ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun,
+			    union ctl_io *io)
+{
+	struct ctl_be_block_io *beio;
+	struct ctl_ptr_len_flags *ptrlen;
+
+	DPRINTF("entered\n");
+
+	beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+	ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+
+	if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) {
+		ctl_free_beio(beio);
+		ctl_set_invalid_field(&io->scsiio,
+				      /*sks_valid*/ 0,
+				      /*command*/ 1,
+				      /*field*/ 0,
+				      /*bit_valid*/ 0,
+				      /*bit*/ 0);
+		ctl_config_write_done(io);
+		return;
+	}
+
+	beio->io_len = 0;
+	beio->io_offset = -1;
+	beio->bio_cmd = BIO_DELETE;
+	beio->ds_trans_type = DEVSTAT_FREE;
+	DPRINTF("UNMAP\n");
+	be_lun->unmap(be_lun, beio);
+}
+
+static void
+ctl_be_block_cr_done(struct ctl_be_block_io *beio)
+{
+	union ctl_io *io;
+
+	io = beio->io;
+	ctl_free_beio(beio);
+	ctl_config_read_done(io);
+}
+
+static void
+ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun,
 			 union ctl_io *io)
 {
 	struct ctl_be_block_io *beio;
@@ -937,30 +1427,80 @@
 
 	softc = be_lun->softc;
 	beio = ctl_alloc_beio(softc);
-	if (beio == NULL) {
-		/*
-		 * This should not happen.  ctl_alloc_beio() will call
-		 * ctl_grow_beio() with a blocking malloc as needed.
-		 * A malloc with M_WAITOK should not fail.
-		 */
-		ctl_set_busy(&io->scsiio);
-		ctl_done(io);
-		return;
+	beio->io = io;
+	beio->lun = be_lun;
+	beio->beio_cont = ctl_be_block_cr_done;
+	PRIV(io)->ptr = (void *)beio;
+
+	switch (io->scsiio.cdb[0]) {
+	case SERVICE_ACTION_IN:		/* GET LBA STATUS */
+		beio->bio_cmd = -1;
+		beio->ds_trans_type = DEVSTAT_NO_DATA;
+		beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
+		beio->io_len = 0;
+		if (be_lun->get_lba_status)
+			be_lun->get_lba_status(be_lun, beio);
+		else
+			ctl_be_block_cr_done(beio);
+		break;
+	default:
+		panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
+		break;
 	}
+}
 
+static void
+ctl_be_block_cw_done(struct ctl_be_block_io *beio)
+{
+	union ctl_io *io;
+
+	io = beio->io;
+	ctl_free_beio(beio);
+	ctl_config_write_done(io);
+}
+
+static void
+ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
+			 union ctl_io *io)
+{
+	struct ctl_be_block_io *beio;
+	struct ctl_be_block_softc *softc;
+
+	DPRINTF("entered\n");
+
+	softc = be_lun->softc;
+	beio = ctl_alloc_beio(softc);
 	beio->io = io;
-	beio->softc = softc;
 	beio->lun = be_lun;
-	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
+	beio->beio_cont = ctl_be_block_cw_done;
+	switch (io->scsiio.tag_type) {
+	case CTL_TAG_ORDERED:
+		beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
+		break;
+	case CTL_TAG_HEAD_OF_QUEUE:
+		beio->ds_tag_type = DEVSTAT_TAG_HEAD;
+		break;
+	case CTL_TAG_UNTAGGED:
+	case CTL_TAG_SIMPLE:
+	case CTL_TAG_ACA:
+	default:
+		beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
+		break;
+	}
+	PRIV(io)->ptr = (void *)beio;
 
 	switch (io->scsiio.cdb[0]) {
 	case SYNCHRONIZE_CACHE:
 	case SYNCHRONIZE_CACHE_16:
-		beio->ds_trans_type = DEVSTAT_NO_DATA;
-		beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
-		beio->io_len = 0;
-		be_lun->lun_flush(be_lun, beio);
+		ctl_be_block_cw_dispatch_sync(be_lun, io);
 		break;
+	case WRITE_SAME_10:
+	case WRITE_SAME_16:
+		ctl_be_block_cw_dispatch_ws(be_lun, io);
+		break;
+	case UNMAP:
+		ctl_be_block_cw_dispatch_unmap(be_lun, io);
+		break;
 	default:
 		panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
 		break;
@@ -967,19 +1507,46 @@
 	}
 }
 
-SDT_PROBE_DEFINE1(cbb, kernel, read, start, start, "uint64_t");
-SDT_PROBE_DEFINE1(cbb, kernel, write, start, start, "uint64_t");
-SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, alloc_done, "uint64_t");
-SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, alloc_done, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, , write, alloc_done, "uint64_t");
 
 static void
+ctl_be_block_next(struct ctl_be_block_io *beio)
+{
+	struct ctl_be_block_lun *be_lun;
+	union ctl_io *io;
+
+	io = beio->io;
+	be_lun = beio->lun;
+	ctl_free_beio(beio);
+	if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
+	    ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
+	     (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
+		ctl_data_submit_done(io);
+		return;
+	}
+
+	io->io_hdr.status &= ~CTL_STATUS_MASK;
+	io->io_hdr.status |= CTL_STATUS_NONE;
+
+	mtx_lock(&be_lun->queue_lock);
+	STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
+	mtx_unlock(&be_lun->queue_lock);
+	taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
+}
+
+static void
 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
 			   union ctl_io *io)
 {
+	struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
 	struct ctl_be_block_io *beio;
 	struct ctl_be_block_softc *softc;
-	struct ctl_lba_len lbalen;
-	uint64_t len_left, io_size_bytes;
+	struct ctl_lba_len_flags *lbalen;
+	struct ctl_ptr_len_flags *bptrlen;
+	uint64_t len_left, lbas;
 	int i;
 
 	softc = be_lun->softc;
@@ -986,66 +1553,19 @@
 
 	DPRINTF("entered\n");
 
-	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
-		SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
+	lbalen = ARGS(io);
+	if (lbalen->flags & CTL_LLF_WRITE) {
+		SDT_PROBE0(cbb, , write, start);
 	} else {
-		SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
+		SDT_PROBE0(cbb, , read, start);
 	}
 
-	memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
-	       sizeof(lbalen));
-
-	io_size_bytes = lbalen.len * be_lun->blocksize;
-
-	/*
-	 * XXX KDM this is temporary, until we implement chaining of beio
-	 * structures and multiple datamove calls to move all the data in
-	 * or out.
-	 */
-	if (io_size_bytes > CTLBLK_MAX_IO_SIZE) {
-		printf("%s: IO length %ju > max io size %u\n", __func__,
-		       io_size_bytes, CTLBLK_MAX_IO_SIZE);
-		ctl_set_invalid_field(&io->scsiio,
-				      /*sks_valid*/ 0,
-				      /*command*/ 1,
-				      /*field*/ 0,
-				      /*bit_valid*/ 0,
-				      /*bit*/ 0);
-		ctl_done(io);
-		return;
-	}
-
 	beio = ctl_alloc_beio(softc);
-	if (beio == NULL) {
-		/*
-		 * This should not happen.  ctl_alloc_beio() will call
-		 * ctl_grow_beio() with a blocking malloc as needed.
-		 * A malloc with M_WAITOK should not fail.
-		 */
-		ctl_set_busy(&io->scsiio);
-		ctl_done(io);
-		return;
-	}
-
 	beio->io = io;
-	beio->softc = softc;
 	beio->lun = be_lun;
-	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
+	bptrlen = PRIV(io);
+	bptrlen->ptr = (void *)beio;
 
-	/*
-	 * If the I/O came down with an ordered or head of queue tag, set
-	 * the BIO_ORDERED attribute.  For head of queue tags, that's
-	 * pretty much the best we can do.
-	 *
-	 * XXX KDM we don't have a great way to easily know about the FUA
-	 * bit right now (it is decoded in ctl_read_write(), but we don't
-	 * pass that knowledge to the backend), and in any case we would
-	 * need to determine how to handle it.  
-	 */
-	if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
-	 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
-		beio->bio_flags = BIO_ORDERED;
-
 	switch (io->scsiio.tag_type) {
 	case CTL_TAG_ORDERED:
 		beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
@@ -1061,40 +1581,61 @@
 		break;
 	}
 
-	/*
-	 * This path handles read and write only.  The config write path
-	 * handles flush operations.
-	 */
-	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
+	if (lbalen->flags & CTL_LLF_WRITE) {
+		beio->bio_cmd = BIO_WRITE;
+		beio->ds_trans_type = DEVSTAT_WRITE;
+	} else {
 		beio->bio_cmd = BIO_READ;
 		beio->ds_trans_type = DEVSTAT_READ;
-	} else {
-		beio->bio_cmd = BIO_WRITE;
-		beio->ds_trans_type = DEVSTAT_WRITE;
 	}
 
-	beio->io_len = lbalen.len * be_lun->blocksize;
-	beio->io_offset = lbalen.lba * be_lun->blocksize;
-
-	DPRINTF("%s at LBA %jx len %u\n",
+	DPRINTF("%s at LBA %jx len %u @%ju\n",
 	       (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
-	       (uintmax_t)lbalen.lba, lbalen.len);
+	       (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len);
+	if (lbalen->flags & CTL_LLF_COMPARE)
+		lbas = CTLBLK_HALF_IO_SIZE;
+	else
+		lbas = CTLBLK_MAX_IO_SIZE;
+	lbas = MIN(lbalen->len - bptrlen->len, lbas / cbe_lun->blocksize);
+	beio->io_offset = (lbalen->lba + bptrlen->len) * cbe_lun->blocksize;
+	beio->io_len = lbas * cbe_lun->blocksize;
+	bptrlen->len += lbas;
 
-	for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS &&
-	     len_left > 0; i++) {
+	for (i = 0, len_left = beio->io_len; len_left > 0; i++) {
+		KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)",
+		    i, CTLBLK_MAX_SEGS));
 
 		/*
 		 * Setup the S/G entry for this chunk.
 		 */
-		beio->sg_segs[i].len = min(MAXPHYS, len_left);
+		beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left);
 		beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
 
 		DPRINTF("segment %d addr %p len %zd\n", i,
 			beio->sg_segs[i].addr, beio->sg_segs[i].len);
 
+		/* Set up second segment for compare operation. */
+		if (lbalen->flags & CTL_LLF_COMPARE) {
+			beio->sg_segs[i + CTLBLK_HALF_SEGS].len =
+			    beio->sg_segs[i].len;
+			beio->sg_segs[i + CTLBLK_HALF_SEGS].addr =
+			    uma_zalloc(be_lun->lun_zone, M_WAITOK);
+		}
+
 		beio->num_segs++;
 		len_left -= beio->sg_segs[i].len;
 	}
+	if (bptrlen->len < lbalen->len)
+		beio->beio_cont = ctl_be_block_next;
+	io->scsiio.be_move_done = ctl_be_block_move_done;
+	/* For compare we have separate S/G lists for read and datamove. */
+	if (lbalen->flags & CTL_LLF_COMPARE)
+		io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS];
+	else
+		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
+	io->scsiio.kern_data_len = beio->io_len;
+	io->scsiio.kern_sg_entries = beio->num_segs;
+	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
 
 	/*
 	 * For the read case, we need to read the data into our buffers and
@@ -1102,21 +1643,13 @@
 	 * need to get the data from the user first.
 	 */
 	if (beio->bio_cmd == BIO_READ) {
-		SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0);
+		SDT_PROBE0(cbb, , read, alloc_done);
 		be_lun->dispatch(be_lun, beio);
 	} else {
-		SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
-		io->scsiio.be_move_done = ctl_be_block_move_done;
-		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
-		io->scsiio.kern_data_len = beio->io_len;
-		io->scsiio.kern_total_len = beio->io_len;
-		io->scsiio.kern_rel_offset = 0;
-		io->scsiio.kern_data_resid = 0;
-		io->scsiio.kern_sg_entries = beio->num_segs;
-		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
+		SDT_PROBE0(cbb, , write, alloc_done);
 #ifdef CTL_TIME_IO
-        	getbintime(&io->io_hdr.dma_start_bt);
-#endif  
+		getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
 		ctl_datamove(io);
 	}
 }
@@ -1124,66 +1657,74 @@
 static void
 ctl_be_block_worker(void *context, int pending)
 {
-	struct ctl_be_block_lun *be_lun;
-	struct ctl_be_block_softc *softc;
+	struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context;
+	struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
 	union ctl_io *io;
+	struct ctl_be_block_io *beio;
 
-	be_lun = (struct ctl_be_block_lun *)context;
-	softc = be_lun->softc;
-
 	DPRINTF("entered\n");
-
-	mtx_lock(&be_lun->lock);
+	/*
+	 * Fetch and process I/Os from all queues.  If we detect LUN
+	 * CTL_LUN_FLAG_NO_MEDIA status here -- it is result of a race,
+	 * so make response maximally opaque to not confuse initiator.
+	 */
 	for (;;) {
+		mtx_lock(&be_lun->queue_lock);
 		io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
 		if (io != NULL) {
-			struct ctl_be_block_io *beio;
-
 			DPRINTF("datamove queue\n");
-
 			STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
 				      ctl_io_hdr, links);
-
-			mtx_unlock(&be_lun->lock);
-
-			beio = (struct ctl_be_block_io *)
-			    io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
-
+			mtx_unlock(&be_lun->queue_lock);
+			beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+			if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
+				ctl_set_busy(&io->scsiio);
+				ctl_complete_beio(beio);
+				return;
+			}
 			be_lun->dispatch(be_lun, beio);
-
-			mtx_lock(&be_lun->lock);
 			continue;
 		}
 		io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
 		if (io != NULL) {
-
 			DPRINTF("config write queue\n");
-
 			STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
 				      ctl_io_hdr, links);
-
-			mtx_unlock(&be_lun->lock);
-
+			mtx_unlock(&be_lun->queue_lock);
+			if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
+				ctl_set_busy(&io->scsiio);
+				ctl_config_write_done(io);
+				return;
+			}
 			ctl_be_block_cw_dispatch(be_lun, io);
-
-			mtx_lock(&be_lun->lock);
 			continue;
 		}
+		io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue);
+		if (io != NULL) {
+			DPRINTF("config read queue\n");
+			STAILQ_REMOVE(&be_lun->config_read_queue, &io->io_hdr,
+				      ctl_io_hdr, links);
+			mtx_unlock(&be_lun->queue_lock);
+			if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
+				ctl_set_busy(&io->scsiio);
+				ctl_config_read_done(io);
+				return;
+			}
+			ctl_be_block_cr_dispatch(be_lun, io);
+			continue;
+		}
 		io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
 		if (io != NULL) {
 			DPRINTF("input queue\n");
-
 			STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
 				      ctl_io_hdr, links);
-			mtx_unlock(&be_lun->lock);
-
-			/*
-			 * We must drop the lock, since this routine and
-			 * its children may sleep.
-			 */
+			mtx_unlock(&be_lun->queue_lock);
+			if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
+				ctl_set_busy(&io->scsiio);
+				ctl_data_submit_done(io);
+				return;
+			}
 			ctl_be_block_dispatch(be_lun, io);
-
-			mtx_lock(&be_lun->lock);
 			continue;
 		}
 
@@ -1191,9 +1732,9 @@
 		 * If we get here, there is no work left in the queues, so
 		 * just break out and let the task queue go to sleep.
 		 */
+		mtx_unlock(&be_lun->queue_lock);
 		break;
 	}
-	mtx_unlock(&be_lun->lock);
 }
 
 /*
@@ -1205,17 +1746,13 @@
 ctl_be_block_submit(union ctl_io *io)
 {
 	struct ctl_be_block_lun *be_lun;
-	struct ctl_be_lun *ctl_be_lun;
-	int retval;
+	struct ctl_be_lun *cbe_lun;
 
 	DPRINTF("entered\n");
 
-	retval = CTL_RETVAL_COMPLETE;
+	cbe_lun = CTL_BACKEND_LUN(io);
+	be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
 
-	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
-		CTL_PRIV_BACKEND_LUN].ptr;
-	be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
-
 	/*
 	 * Make sure we only get SCSI I/O.
 	 */
@@ -1222,18 +1759,14 @@
 	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
 		"%#x) encountered", io->io_hdr.io_type));
 
-	mtx_lock(&be_lun->lock);
-	/*
-	 * XXX KDM make sure that links is okay to use at this point.
-	 * Otherwise, we either need to add another field to ctl_io_hdr,
-	 * or deal with resource allocation here.
-	 */
+	PRIV(io)->len = 0;
+
+	mtx_lock(&be_lun->queue_lock);
 	STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
-	mtx_unlock(&be_lun->lock);
-
+	mtx_unlock(&be_lun->queue_lock);
 	taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
 
-	return (retval);
+	return (CTL_RETVAL_COMPLETE);
 }
 
 static int
@@ -1266,7 +1799,7 @@
 		default:
 			lun_req->status = CTL_LUN_ERROR;
 			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
-				 "%s: invalid LUN request type %d", __func__,
+				 "invalid LUN request type %d",
 				 lun_req->reqtype);
 			break;
 		}
@@ -1283,18 +1816,25 @@
 static int
 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
 {
+	struct ctl_be_lun *cbe_lun;
 	struct ctl_be_block_filedata *file_data;
 	struct ctl_lun_create_params *params;
+	char			     *value;
 	struct vattr		      vattr;
+	off_t			      ps, pss, po, pos, us, uss, uo, uos;
 	int			      error;
 
-	error = 0;
+	cbe_lun = &be_lun->cbe_lun;
 	file_data = &be_lun->backend.file;
-	params = &req->reqdata.create;
+	params = &be_lun->params;
 
 	be_lun->dev_type = CTL_BE_BLOCK_FILE;
 	be_lun->dispatch = ctl_be_block_dispatch_file;
 	be_lun->lun_flush = ctl_be_block_flush_file;
+	be_lun->get_lba_status = ctl_be_block_gls_file;
+	be_lun->getattr = ctl_be_block_getattr_file;
+	be_lun->unmap = NULL;
+	cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP;
 
 	error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
 	if (error != 0) {
@@ -1304,62 +1844,71 @@
 		return (error);
 	}
 
-	/*
-	 * Verify that we have the ability to upgrade to exclusive
-	 * access on this file so we can trap errors at open instead
-	 * of reporting them during first access.
-	 */
-	if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) {
-		vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY);
-		if (be_lun->vn->v_iflag & VI_DOOMED) {
-			error = EBADF;
-			snprintf(req->error_str, sizeof(req->error_str),
-				 "error locking file %s", be_lun->dev_path);
-			return (error);
-		}
-	}
-
-
 	file_data->cred = crhold(curthread->td_ucred);
 	if (params->lun_size_bytes != 0)
 		be_lun->size_bytes = params->lun_size_bytes;
 	else
 		be_lun->size_bytes = vattr.va_size;
-	/*
-	 * We set the multi thread flag for file operations because all
-	 * filesystems (in theory) are capable of allowing multiple readers
-	 * of a file at once.  So we want to get the maximum possible
-	 * concurrency.
-	 */
-	be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD;
 
 	/*
-	 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
-	 * With ZFS, it is 131072 bytes.  Block sizes that large don't work
-	 * with disklabel and UFS on FreeBSD at least.  Large block sizes
-	 * may not work with other OSes as well.  So just export a sector
-	 * size of 512 bytes, which should work with any OS or
-	 * application.  Since our backing is a file, any block size will
-	 * work fine for the backing store.
+	 * For files we can use any logical block size.  Prefer 512 bytes
+	 * for compatibility reasons.  If file's vattr.va_blocksize
+	 * (preferred I/O block size) is bigger and multiple to chosen
+	 * logical block size -- report it as physical block size.
 	 */
-#if 0
-	be_lun->blocksize= vattr.va_blocksize;
-#endif
 	if (params->blocksize_bytes != 0)
-		be_lun->blocksize = params->blocksize_bytes;
+		cbe_lun->blocksize = params->blocksize_bytes;
+	else if (cbe_lun->lun_type == T_CDROM)
+		cbe_lun->blocksize = 2048;
 	else
-		be_lun->blocksize = 512;
+		cbe_lun->blocksize = 512;
+	be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize;
+	cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
+	    0 : (be_lun->size_blocks - 1);
 
+	us = ps = vattr.va_blocksize;
+	uo = po = 0;
+
+	value = ctl_get_opt(&cbe_lun->options, "pblocksize");
+	if (value != NULL)
+		ctl_expand_number(value, &ps);
+	value = ctl_get_opt(&cbe_lun->options, "pblockoffset");
+	if (value != NULL)
+		ctl_expand_number(value, &po);
+	pss = ps / cbe_lun->blocksize;
+	pos = po / cbe_lun->blocksize;
+	if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) &&
+	    ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) {
+		cbe_lun->pblockexp = fls(pss) - 1;
+		cbe_lun->pblockoff = (pss - pos) % pss;
+	}
+
+	value = ctl_get_opt(&cbe_lun->options, "ublocksize");
+	if (value != NULL)
+		ctl_expand_number(value, &us);
+	value = ctl_get_opt(&cbe_lun->options, "ublockoffset");
+	if (value != NULL)
+		ctl_expand_number(value, &uo);
+	uss = us / cbe_lun->blocksize;
+	uos = uo / cbe_lun->blocksize;
+	if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) &&
+	    ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) {
+		cbe_lun->ublockexp = fls(uss) - 1;
+		cbe_lun->ublockoff = (uss - uos) % uss;
+	}
+
 	/*
 	 * Sanity check.  The media size has to be at least one
 	 * sector long.
 	 */
-	if (be_lun->size_bytes < be_lun->blocksize) {
+	if (be_lun->size_bytes < cbe_lun->blocksize) {
 		error = EINVAL;
 		snprintf(req->error_str, sizeof(req->error_str),
 			 "file %s size %ju < block size %u", be_lun->dev_path,
-			 (uintmax_t)be_lun->size_bytes, be_lun->blocksize);
+			 (uintmax_t)be_lun->size_bytes, cbe_lun->blocksize);
 	}
+
+	cbe_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / cbe_lun->blocksize;
 	return (error);
 }
 
@@ -1366,47 +1915,52 @@
 static int
 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
 {
+	struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
 	struct ctl_lun_create_params *params;
-	struct vattr		      vattr;
+	struct cdevsw		     *csw;
 	struct cdev		     *dev;
-	struct cdevsw		     *devsw;
-	int			      error;
+	char			     *value;
+	int			      error, atomic, maxio, ref, unmap, tmp;
+	off_t			      ps, pss, po, pos, us, uss, uo, uos, otmp;
 
-	params = &req->reqdata.create;
+	params = &be_lun->params;
 
 	be_lun->dev_type = CTL_BE_BLOCK_DEV;
-	be_lun->dispatch = ctl_be_block_dispatch_dev;
+	csw = devvn_refthread(be_lun->vn, &dev, &ref);
+	if (csw == NULL)
+		return (ENXIO);
+	if (strcmp(csw->d_name, "zvol") == 0) {
+		be_lun->dispatch = ctl_be_block_dispatch_zvol;
+		be_lun->get_lba_status = ctl_be_block_gls_zvol;
+		atomic = maxio = CTLBLK_MAX_IO_SIZE;
+	} else {
+		be_lun->dispatch = ctl_be_block_dispatch_dev;
+		be_lun->get_lba_status = NULL;
+		atomic = 0;
+		maxio = dev->si_iosize_max;
+		if (maxio <= 0)
+			maxio = DFLTPHYS;
+		if (maxio > CTLBLK_MAX_IO_SIZE)
+			maxio = CTLBLK_MAX_IO_SIZE;
+	}
 	be_lun->lun_flush = ctl_be_block_flush_dev;
-	be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
-	be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
-					     &be_lun->backend.dev.dev_ref);
-	if (be_lun->backend.dev.csw == NULL)
-		panic("Unable to retrieve device switch");
+	be_lun->getattr = ctl_be_block_getattr_dev;
+	be_lun->unmap = ctl_be_block_unmap_dev;
 
-	error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
-	if (error) {
+	if (!csw->d_ioctl) {
+		dev_relthread(dev, ref);
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error getting vnode attributes for device %s",
-			 __func__, be_lun->dev_path);
-		return (error);
-	}
-
-	dev = be_lun->vn->v_rdev;
-	devsw = dev->si_devsw;
-	if (!devsw->d_ioctl) {
-		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: no d_ioctl for device %s!", __func__,
-			 be_lun->dev_path);
+			 "no d_ioctl for device %s!", be_lun->dev_path);
 		return (ENODEV);
 	}
 
-	error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
-			       (caddr_t)&be_lun->blocksize, FREAD,
+	error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD,
 			       curthread);
 	if (error) {
+		dev_relthread(dev, ref);
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error %d returned for DIOCGSECTORSIZE ioctl "
-			 "on %s!", __func__, error, be_lun->dev_path);
+			 "error %d returned for DIOCGSECTORSIZE ioctl "
+			 "on %s!", error, be_lun->dev_path);
 		return (error);
 	}
 
@@ -1416,58 +1970,122 @@
 	 * the user is asking for is an even multiple of the underlying 
 	 * device's blocksize.
 	 */
-	if ((params->blocksize_bytes != 0)
-	 && (params->blocksize_bytes > be_lun->blocksize)) {
-		uint32_t bs_multiple, tmp_blocksize;
-
-		bs_multiple = params->blocksize_bytes / be_lun->blocksize;
-
-		tmp_blocksize = bs_multiple * be_lun->blocksize;
-
-		if (tmp_blocksize == params->blocksize_bytes) {
-			be_lun->blocksize = params->blocksize_bytes;
+	if ((params->blocksize_bytes != 0) &&
+	    (params->blocksize_bytes >= tmp)) {
+		if (params->blocksize_bytes % tmp == 0) {
+			cbe_lun->blocksize = params->blocksize_bytes;
 		} else {
+			dev_relthread(dev, ref);
 			snprintf(req->error_str, sizeof(req->error_str),
-				 "%s: requested blocksize %u is not an even "
+				 "requested blocksize %u is not an even "
 				 "multiple of backing device blocksize %u",
-				 __func__, params->blocksize_bytes,
-				 be_lun->blocksize);
+				 params->blocksize_bytes, tmp);
 			return (EINVAL);
-			
 		}
-	} else if ((params->blocksize_bytes != 0)
-		&& (params->blocksize_bytes != be_lun->blocksize)) {
+	} else if (params->blocksize_bytes != 0) {
+		dev_relthread(dev, ref);
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: requested blocksize %u < backing device "
-			 "blocksize %u", __func__, params->blocksize_bytes,
-			 be_lun->blocksize);
+			 "requested blocksize %u < backing device "
+			 "blocksize %u", params->blocksize_bytes, tmp);
 		return (EINVAL);
-	}
+	} else if (cbe_lun->lun_type == T_CDROM)
+		cbe_lun->blocksize = MAX(tmp, 2048);
+	else
+		cbe_lun->blocksize = tmp;
 
-	error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
-			       (caddr_t)&be_lun->size_bytes, FREAD,
-			       curthread);
+	error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD,
+			     curthread);
 	if (error) {
+		dev_relthread(dev, ref);
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error %d returned for DIOCGMEDIASIZE "
-			 " ioctl on %s!", __func__, error,
+			 "error %d returned for DIOCGMEDIASIZE "
+			 " ioctl on %s!", error,
 			 be_lun->dev_path);
 		return (error);
 	}
 
 	if (params->lun_size_bytes != 0) {
-		if (params->lun_size_bytes > be_lun->size_bytes) {
+		if (params->lun_size_bytes > otmp) {
+			dev_relthread(dev, ref);
 			snprintf(req->error_str, sizeof(req->error_str),
-				 "%s: requested LUN size %ju > backing device "
-				 "size %ju", __func__,
+				 "requested LUN size %ju > backing device "
+				 "size %ju",
 				 (uintmax_t)params->lun_size_bytes,
-				 (uintmax_t)be_lun->size_bytes);
+				 (uintmax_t)otmp);
 			return (EINVAL);
 		}
 
 		be_lun->size_bytes = params->lun_size_bytes;
+	} else
+		be_lun->size_bytes = otmp;
+	be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize;
+	cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
+	    0 : (be_lun->size_blocks - 1);
+
+	error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD,
+	    curthread);
+	if (error)
+		ps = po = 0;
+	else {
+		error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po,
+		    FREAD, curthread);
+		if (error)
+			po = 0;
 	}
+	us = ps;
+	uo = po;
 
+	value = ctl_get_opt(&cbe_lun->options, "pblocksize");
+	if (value != NULL)
+		ctl_expand_number(value, &ps);
+	value = ctl_get_opt(&cbe_lun->options, "pblockoffset");
+	if (value != NULL)
+		ctl_expand_number(value, &po);
+	pss = ps / cbe_lun->blocksize;
+	pos = po / cbe_lun->blocksize;
+	if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) &&
+	    ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) {
+		cbe_lun->pblockexp = fls(pss) - 1;
+		cbe_lun->pblockoff = (pss - pos) % pss;
+	}
+
+	value = ctl_get_opt(&cbe_lun->options, "ublocksize");
+	if (value != NULL)
+		ctl_expand_number(value, &us);
+	value = ctl_get_opt(&cbe_lun->options, "ublockoffset");
+	if (value != NULL)
+		ctl_expand_number(value, &uo);
+	uss = us / cbe_lun->blocksize;
+	uos = uo / cbe_lun->blocksize;
+	if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) &&
+	    ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) {
+		cbe_lun->ublockexp = fls(uss) - 1;
+		cbe_lun->ublockoff = (uss - uos) % uss;
+	}
+
+	cbe_lun->atomicblock = atomic / cbe_lun->blocksize;
+	cbe_lun->opttxferlen = maxio / cbe_lun->blocksize;
+
+	if (be_lun->dispatch == ctl_be_block_dispatch_zvol) {
+		unmap = 1;
+	} else {
+		struct diocgattr_arg	arg;
+
+		strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
+		arg.len = sizeof(arg.value.i);
+		error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD,
+		    curthread);
+		unmap = (error == 0) ? arg.value.i : 0;
+	}
+	value = ctl_get_opt(&cbe_lun->options, "unmap");
+	if (value != NULL)
+		unmap = (strcmp(value, "on") == 0);
+	if (unmap)
+		cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
+	else
+		cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP;
+
+	dev_relthread(dev, ref);
 	return (0);
 }
 
@@ -1474,29 +2092,13 @@
 static int
 ctl_be_block_close(struct ctl_be_block_lun *be_lun)
 {
-	DROP_GIANT();
+	struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
+	int flags;
+
 	if (be_lun->vn) {
-		int flags = FREAD | FWRITE;
-		int vfs_is_locked = 0;
-
-		switch (be_lun->dev_type) {
-		case CTL_BE_BLOCK_DEV:
-			if (be_lun->backend.dev.csw) {
-				dev_relthread(be_lun->backend.dev.cdev,
-					      be_lun->backend.dev.dev_ref);
-				be_lun->backend.dev.csw  = NULL;
-				be_lun->backend.dev.cdev = NULL;
-			}
-			break;
-		case CTL_BE_BLOCK_FILE:
-			vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
-			break;
-		case CTL_BE_BLOCK_NONE:
-		default:
-			panic("Unexpected backend type.");
-			break;
-		}
-
+		flags = FREAD;
+		if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0)
+			flags |= FWRITE;
 		(void)vn_close(be_lun->vn, flags, NOCRED, curthread);
 		be_lun->vn = NULL;
 
@@ -1504,7 +2106,6 @@
 		case CTL_BE_BLOCK_DEV:
 			break;
 		case CTL_BE_BLOCK_FILE:
-			VFS_UNLOCK_GIANT(vfs_is_locked);
 			if (be_lun->backend.file.cred != NULL) {
 				crfree(be_lun->backend.file.cred);
 				be_lun->backend.file.cred = NULL;
@@ -1511,37 +2112,30 @@
 			}
 			break;
 		case CTL_BE_BLOCK_NONE:
+			break;
 		default:
-			panic("Unexpected backend type.");
+			panic("Unexpected backend type %d", be_lun->dev_type);
 			break;
 		}
+		be_lun->dev_type = CTL_BE_BLOCK_NONE;
 	}
-	PICKUP_GIANT();
-
 	return (0);
 }
 
 static int
-ctl_be_block_open(struct ctl_be_block_softc *softc,
-		       struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
+ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
 {
+	struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
 	struct nameidata nd;
-	int		 flags;
-	int		 error;
-	int		 vfs_is_locked;
+	char		*value;
+	int		 error, flags;
 
-	/*
-	 * XXX KDM allow a read-only option?
-	 */
-	flags = FREAD | FWRITE;
 	error = 0;
-
 	if (rootvnode == NULL) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: Root filesystem is not mounted", __func__);
+			 "Root filesystem is not mounted");
 		return (1);
 	}
-
 	if (!curthread->td_proc->p_fd->fd_cdir) {
 		curthread->td_proc->p_fd->fd_cdir = rootvnode;
 		VREF(rootvnode);
@@ -1555,9 +2149,30 @@
 		VREF(rootvnode);
 	}
 
- again:
+	value = ctl_get_opt(&cbe_lun->options, "file");
+	if (value == NULL) {
+		snprintf(req->error_str, sizeof(req->error_str),
+			 "no file argument specified");
+		return (1);
+	}
+	free(be_lun->dev_path, M_CTLBLK);
+	be_lun->dev_path = strdup(value, M_CTLBLK);
+
+	flags = FREAD;
+	value = ctl_get_opt(&cbe_lun->options, "readonly");
+	if (value != NULL) {
+		if (strcmp(value, "on") != 0)
+			flags |= FWRITE;
+	} else if (cbe_lun->lun_type == T_DIRECT)
+		flags |= FWRITE;
+
+again:
 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
 	error = vn_open(&nd, &flags, 0, NULL);
+	if ((error == EROFS || error == EACCES) && (flags & FWRITE)) {
+		flags &= ~FWRITE;
+		goto again;
+	}
 	if (error) {
 		/*
 		 * This is the only reasonable guess we can make as far as
@@ -1566,30 +2181,24 @@
 		 * full path.
 		 */
 		if (be_lun->dev_path[0] != '/') {
-			char *dev_path = "/dev/";
 			char *dev_name;
 
-			/* Try adding device path at beginning of name */
-			dev_name = malloc(strlen(be_lun->dev_path)
-					+ strlen(dev_path) + 1,
-					  M_CTLBLK, M_WAITOK);
-			if (dev_name) {
-				sprintf(dev_name, "%s%s", dev_path,
-					be_lun->dev_path);
-				free(be_lun->dev_path, M_CTLBLK);
-				be_lun->dev_path = dev_name;
-				goto again;
-			}
+			asprintf(&dev_name, M_CTLBLK, "/dev/%s",
+				be_lun->dev_path);
+			free(be_lun->dev_path, M_CTLBLK);
+			be_lun->dev_path = dev_name;
+			goto again;
 		}
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error opening %s", __func__, be_lun->dev_path);
+		    "error opening %s: %d", be_lun->dev_path, error);
 		return (error);
 	}
+	if (flags & FWRITE)
+		cbe_lun->flags &= ~CTL_LUN_FLAG_READONLY;
+	else
+		cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
 
-	vfs_is_locked = NDHASGIANT(&nd);
-
 	NDFREE(&nd, NDF_ONLY_PNBUF);
-		
 	be_lun->vn = nd.ni_vp;
 
 	/* We only support disks and files. */
@@ -1600,208 +2209,161 @@
 	} else {
 		error = EINVAL;
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s is not a disk or file", be_lun->dev_path);
+			 "%s is not a disk or plain file", be_lun->dev_path);
 	}
 	VOP_UNLOCK(be_lun->vn, 0);
-	VFS_UNLOCK_GIANT(vfs_is_locked);
 
-	if (error != 0) {
+	if (error != 0)
 		ctl_be_block_close(be_lun);
-		return (error);
-	}
-
-	be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
-	be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
-
+	cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
+	if (be_lun->dispatch != ctl_be_block_dispatch_dev)
+		cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
+	value = ctl_get_opt(&cbe_lun->options, "serseq");
+	if (value != NULL && strcmp(value, "on") == 0)
+		cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
+	else if (value != NULL && strcmp(value, "read") == 0)
+		cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
+	else if (value != NULL && strcmp(value, "off") == 0)
+		cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
 	return (0);
 }
 
 static int
-ctl_be_block_mem_ctor(void *mem, int size, void *arg, int flags)
-{
-	return (0);
-}
-
-static void
-ctl_be_block_mem_dtor(void *mem, int size, void *arg)
-{
-	bzero(mem, size);
-}
-
-static int
 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
 {
+	struct ctl_be_lun *cbe_lun;
 	struct ctl_be_block_lun *be_lun;
 	struct ctl_lun_create_params *params;
-	struct ctl_be_arg *file_arg;
+	char num_thread_str[16];
 	char tmpstr[32];
+	char *value;
 	int retval, num_threads;
-	int i;
+	int tmp_num_threads;
 
 	params = &req->reqdata.create;
 	retval = 0;
+	req->status = CTL_LUN_OK;
 
-	num_threads = cbb_num_threads;
-
-	file_arg = NULL;
-
 	be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
-
+	cbe_lun = &be_lun->cbe_lun;
+	cbe_lun->be_lun = be_lun;
+	be_lun->params = req->reqdata.create;
 	be_lun->softc = softc;
 	STAILQ_INIT(&be_lun->input_queue);
+	STAILQ_INIT(&be_lun->config_read_queue);
 	STAILQ_INIT(&be_lun->config_write_queue);
 	STAILQ_INIT(&be_lun->datamove_queue);
 	sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
-	mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
-
-	be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS, 
-	    ctl_be_block_mem_ctor, ctl_be_block_mem_dtor, NULL, NULL,
-	    /*align*/ 0, /*flags*/0);
-
+	mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF);
+	mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF);
+	ctl_init_opts(&cbe_lun->options,
+	    req->num_be_args, req->kern_be_args);
+	be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG,
+	    NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
 	if (be_lun->lun_zone == NULL) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error allocating UMA zone", __func__);
+			 "error allocating UMA zone");
 		goto bailout_error;
 	}
 
 	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
-		be_lun->ctl_be_lun.lun_type = params->device_type;
+		cbe_lun->lun_type = params->device_type;
 	else
-		be_lun->ctl_be_lun.lun_type = T_DIRECT;
+		cbe_lun->lun_type = T_DIRECT;
+	be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
+	cbe_lun->flags = 0;
+	value = ctl_get_opt(&cbe_lun->options, "ha_role");
+	if (value != NULL) {
+		if (strcmp(value, "primary") == 0)
+			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
+		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
 
-	if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
-		for (i = 0; i < req->num_be_args; i++) {
-			if (strcmp(req->kern_be_args[i].kname, "file") == 0) {
-				file_arg = &req->kern_be_args[i];
-				break;
+	if (cbe_lun->lun_type == T_DIRECT ||
+	    cbe_lun->lun_type == T_CDROM) {
+		be_lun->size_bytes = params->lun_size_bytes;
+		if (params->blocksize_bytes != 0)
+			cbe_lun->blocksize = params->blocksize_bytes;
+		else if (cbe_lun->lun_type == T_CDROM)
+			cbe_lun->blocksize = 2048;
+		else
+			cbe_lun->blocksize = 512;
+		be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize;
+		cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
+		    0 : (be_lun->size_blocks - 1);
+
+		if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ||
+		    control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) {
+			retval = ctl_be_block_open(be_lun, req);
+			if (retval != 0) {
+				retval = 0;
+				req->status = CTL_LUN_WARNING;
 			}
 		}
-
-		if (file_arg == NULL) {
-			snprintf(req->error_str, sizeof(req->error_str),
-				 "%s: no file argument specified", __func__);
-			goto bailout_error;
-		}
-
-		be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK,
-					  M_WAITOK | M_ZERO);
-
-		strlcpy(be_lun->dev_path, (char *)file_arg->kvalue,
-			file_arg->vallen);
-
-		retval = ctl_be_block_open(softc, be_lun, req);
-		if (retval != 0) {
-			retval = 0;
-			goto bailout_error;
-		}
-
-		/*
-		 * Tell the user the size of the file/device.
-		 */
-		params->lun_size_bytes = be_lun->size_bytes;
-
-		/*
-		 * The maximum LBA is the size - 1.
-		 */
-		be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
+		num_threads = cbb_num_threads;
 	} else {
-		/*
-		 * For processor devices, we don't have any size.
-		 */
-		be_lun->blocksize = 0;
-		be_lun->size_blocks = 0;
-		be_lun->size_bytes = 0;
-		be_lun->ctl_be_lun.maxlba = 0;
-		params->lun_size_bytes = 0;
-
-		/*
-		 * Default to just 1 thread for processor devices.
-		 */
 		num_threads = 1;
 	}
 
-	/*
-	 * XXX This searching loop might be refactored to be combined with
-	 * the loop above,
-	 */
-	for (i = 0; i < req->num_be_args; i++) {
-		if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) {
-			struct ctl_be_arg *thread_arg;
-			char num_thread_str[16];
-			int tmp_num_threads;
+	value = ctl_get_opt(&cbe_lun->options, "num_threads");
+	if (value != NULL) {
+		tmp_num_threads = strtol(value, NULL, 0);
 
-
-			thread_arg = &req->kern_be_args[i];
-
-			strlcpy(num_thread_str, (char *)thread_arg->kvalue,
-				min(thread_arg->vallen,
-				sizeof(num_thread_str)));
-
-			tmp_num_threads = strtol(num_thread_str, NULL, 0);
-
-			/*
-			 * We don't let the user specify less than one
-			 * thread, but hope he's clueful enough not to
-			 * specify 1000 threads.
-			 */
-			if (tmp_num_threads < 1) {
-				snprintf(req->error_str, sizeof(req->error_str),
-					 "%s: invalid number of threads %s",
-				         __func__, num_thread_str);
-				goto bailout_error;
-			}
-
-			num_threads = tmp_num_threads;
+		/*
+		 * We don't let the user specify less than one
+		 * thread, but hope he's clueful enough not to
+		 * specify 1000 threads.
+		 */
+		if (tmp_num_threads < 1) {
+			snprintf(req->error_str, sizeof(req->error_str),
+				 "invalid number of threads %s",
+				 num_thread_str);
+			goto bailout_error;
 		}
+		num_threads = tmp_num_threads;
 	}
 
-	be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
-	be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
-	be_lun->ctl_be_lun.be_lun = be_lun;
-	be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
+	if (be_lun->vn == NULL)
+		cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
 	/* Tell the user the blocksize we ended up using */
-	params->blocksize_bytes = be_lun->blocksize;
+	params->lun_size_bytes = be_lun->size_bytes;
+	params->blocksize_bytes = cbe_lun->blocksize;
 	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
-		be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
-		be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
+		cbe_lun->req_lun_id = params->req_lun_id;
+		cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
 	} else
-		be_lun->ctl_be_lun.req_lun_id = 0;
+		cbe_lun->req_lun_id = 0;
 
-	be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown;
-	be_lun->ctl_be_lun.lun_config_status =
-		ctl_be_block_lun_config_status;
-	be_lun->ctl_be_lun.be = &ctl_be_block_driver;
+	cbe_lun->lun_shutdown = ctl_be_block_lun_shutdown;
+	cbe_lun->lun_config_status = ctl_be_block_lun_config_status;
+	cbe_lun->be = &ctl_be_block_driver;
 
 	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
 		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
 			 softc->num_luns);
-		strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
-			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
-			sizeof(tmpstr)));
+		strncpy((char *)cbe_lun->serial_num, tmpstr,
+			MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
 
 		/* Tell the user what we used for a serial number */
 		strncpy((char *)params->serial_num, tmpstr,
-			ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
+			MIN(sizeof(params->serial_num), sizeof(tmpstr)));
 	} else { 
-		strncpy((char *)be_lun->ctl_be_lun.serial_num,
-			params->serial_num,
-			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
+		strncpy((char *)cbe_lun->serial_num, params->serial_num,
+			MIN(sizeof(cbe_lun->serial_num),
 			sizeof(params->serial_num)));
 	}
 	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
 		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
-		strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
-			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
-			sizeof(tmpstr)));
+		strncpy((char *)cbe_lun->device_id, tmpstr,
+			MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
 
 		/* Tell the user what we used for a device ID */
 		strncpy((char *)params->device_id, tmpstr,
-			ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
+			MIN(sizeof(params->device_id), sizeof(tmpstr)));
 	} else {
-		strncpy((char *)be_lun->ctl_be_lun.device_id,
-			params->device_id,
-			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
-				sizeof(params->device_id)));
+		strncpy((char *)cbe_lun->device_id, params->device_id,
+			MIN(sizeof(cbe_lun->device_id),
+			    sizeof(params->device_id)));
 	}
 
 	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
@@ -1811,7 +2373,7 @@
 
 	if (be_lun->io_taskqueue == NULL) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: Unable to create taskqueue", __func__);
+			 "unable to create taskqueue");
 		goto bailout_error;
 	}
 
@@ -1846,7 +2408,7 @@
 
 	mtx_unlock(&softc->lock);
 
-	retval = ctl_add_lun(&be_lun->ctl_be_lun);
+	retval = ctl_add_lun(&be_lun->cbe_lun);
 	if (retval != 0) {
 		mtx_lock(&softc->lock);
 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
@@ -1854,8 +2416,8 @@
 		softc->num_luns--;
 		mtx_unlock(&softc->lock);
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: ctl_add_lun() returned error %d, see dmesg for "
-			"details", __func__, retval);
+			 "ctl_add_lun() returned error %d, see dmesg for "
+			 "details", retval);
 		retval = 0;
 		goto bailout_error;
 	}
@@ -1877,8 +2439,7 @@
 
 	if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: LUN configuration error, see dmesg for details",
-			 __func__);
+			 "LUN configuration error, see dmesg for details");
 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
 			      links);
 		softc->num_luns--;
@@ -1885,29 +2446,33 @@
 		mtx_unlock(&softc->lock);
 		goto bailout_error;
 	} else {
-		params->req_lun_id = be_lun->ctl_be_lun.lun_id;
+		params->req_lun_id = cbe_lun->lun_id;
 	}
 
 	mtx_unlock(&softc->lock);
 
 	be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id,
-					       be_lun->blocksize,
+					       cbe_lun->blocksize,
 					       DEVSTAT_ALL_SUPPORTED,
-					       be_lun->ctl_be_lun.lun_type
+					       cbe_lun->lun_type
 					       | DEVSTAT_TYPE_IF_OTHER,
 					       DEVSTAT_PRIORITY_OTHER);
 
-
-	req->status = CTL_LUN_OK;
-
 	return (retval);
 
 bailout_error:
 	req->status = CTL_LUN_ERROR;
 
+	if (be_lun->io_taskqueue != NULL)
+		taskqueue_free(be_lun->io_taskqueue);
 	ctl_be_block_close(be_lun);
-
-	free(be_lun->dev_path, M_CTLBLK);
+	if (be_lun->dev_path != NULL)
+		free(be_lun->dev_path, M_CTLBLK);
+	if (be_lun->lun_zone != NULL)
+		uma_zdestroy(be_lun->lun_zone);
+	ctl_free_opts(&cbe_lun->options);
+	mtx_destroy(&be_lun->queue_lock);
+	mtx_destroy(&be_lun->io_lock);
 	free(be_lun, M_CTLBLK);
 
 	return (retval);
@@ -1918,61 +2483,60 @@
 {
 	struct ctl_lun_rm_params *params;
 	struct ctl_be_block_lun *be_lun;
+	struct ctl_be_lun *cbe_lun;
 	int retval;
 
 	params = &req->reqdata.rm;
 
 	mtx_lock(&softc->lock);
-
-	be_lun = NULL;
-
 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
-		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
+		if (be_lun->cbe_lun.lun_id == params->lun_id)
 			break;
 	}
 	mtx_unlock(&softc->lock);
-
 	if (be_lun == NULL) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: LUN %u is not managed by the block backend",
-			 __func__, params->lun_id);
+			 "LUN %u is not managed by the block backend",
+			 params->lun_id);
 		goto bailout_error;
 	}
+	cbe_lun = &be_lun->cbe_lun;
 
-	retval = ctl_disable_lun(&be_lun->ctl_be_lun);
-
+	retval = ctl_disable_lun(cbe_lun);
 	if (retval != 0) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error %d returned from ctl_disable_lun() for "
-			 "LUN %d", __func__, retval, params->lun_id);
+			 "error %d returned from ctl_disable_lun() for "
+			 "LUN %d", retval, params->lun_id);
 		goto bailout_error;
+	}
 
+	if (be_lun->vn != NULL) {
+		cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
+		ctl_lun_no_media(cbe_lun);
+		taskqueue_drain_all(be_lun->io_taskqueue);
+		ctl_be_block_close(be_lun);
 	}
 
-	retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
+	retval = ctl_invalidate_lun(cbe_lun);
 	if (retval != 0) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error %d returned from ctl_invalidate_lun() for "
-			 "LUN %d", __func__, retval, params->lun_id);
+			 "error %d returned from ctl_invalidate_lun() for "
+			 "LUN %d", retval, params->lun_id);
 		goto bailout_error;
 	}
 
 	mtx_lock(&softc->lock);
-
 	be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
-
 	while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
                 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
                 if (retval == EINTR)
                         break;
         }
-
 	be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
 
 	if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: interrupted waiting for LUN to be freed", 
-			 __func__);
+			 "interrupted waiting for LUN to be freed");
 		mtx_unlock(&softc->lock);
 		goto bailout_error;
 	}
@@ -1982,178 +2546,119 @@
 	softc->num_luns--;
 	mtx_unlock(&softc->lock);
 
-	taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
-
+	taskqueue_drain_all(be_lun->io_taskqueue);
 	taskqueue_free(be_lun->io_taskqueue);
 
-	ctl_be_block_close(be_lun);
-
 	if (be_lun->disk_stats != NULL)
 		devstat_remove_entry(be_lun->disk_stats);
 
 	uma_zdestroy(be_lun->lun_zone);
 
+	ctl_free_opts(&cbe_lun->options);
 	free(be_lun->dev_path, M_CTLBLK);
-
+	mtx_destroy(&be_lun->queue_lock);
+	mtx_destroy(&be_lun->io_lock);
 	free(be_lun, M_CTLBLK);
 
 	req->status = CTL_LUN_OK;
-
 	return (0);
 
 bailout_error:
-
 	req->status = CTL_LUN_ERROR;
-
 	return (0);
 }
 
 static int
-ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
-			 struct ctl_lun_req *req)
-{
-	struct vattr vattr;
-	int error;
-	struct ctl_lun_modify_params *params;
-
-	params = &req->reqdata.modify;
-
-	if (params->lun_size_bytes != 0) {
-		be_lun->size_bytes = params->lun_size_bytes;
-	} else  {
-		error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
-		if (error != 0) {
-			snprintf(req->error_str, sizeof(req->error_str),
-				 "error calling VOP_GETATTR() for file %s",
-				 be_lun->dev_path);
-			return (error);
-		}
-
-		be_lun->size_bytes = vattr.va_size;
-	}
-
-	return (0);
-}
-
-static int
-ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
-			struct ctl_lun_req *req)
-{
-	struct cdev *dev;
-	struct cdevsw *devsw;
-	int error;
-	struct ctl_lun_modify_params *params;
-	uint64_t size_bytes;
-
-	params = &req->reqdata.modify;
-
-	dev = be_lun->vn->v_rdev;
-	devsw = dev->si_devsw;
-	if (!devsw->d_ioctl) {
-		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: no d_ioctl for device %s!", __func__,
-			 be_lun->dev_path);
-		return (ENODEV);
-	}
-
-	error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
-			       (caddr_t)&size_bytes, FREAD,
-			       curthread);
-	if (error) {
-		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error %d returned for DIOCGMEDIASIZE ioctl "
-			 "on %s!", __func__, error, be_lun->dev_path);
-		return (error);
-	}
-
-	if (params->lun_size_bytes != 0) {
-		if (params->lun_size_bytes > size_bytes) {
-			snprintf(req->error_str, sizeof(req->error_str),
-				 "%s: requested LUN size %ju > backing device "
-				 "size %ju", __func__,
-				 (uintmax_t)params->lun_size_bytes,
-				 (uintmax_t)size_bytes);
-			return (EINVAL);
-		}
-
-		be_lun->size_bytes = params->lun_size_bytes;
-	} else {
-		be_lun->size_bytes = size_bytes;
-	}
-
-	return (0);
-}
-
-static int
 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
 {
 	struct ctl_lun_modify_params *params;
 	struct ctl_be_block_lun *be_lun;
-	int vfs_is_locked, error;
+	struct ctl_be_lun *cbe_lun;
+	char *value;
+	uint64_t oldsize;
+	int error, wasprim;
 
 	params = &req->reqdata.modify;
 
 	mtx_lock(&softc->lock);
-
-	be_lun = NULL;
-
 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
-		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
+		if (be_lun->cbe_lun.lun_id == params->lun_id)
 			break;
 	}
 	mtx_unlock(&softc->lock);
-
 	if (be_lun == NULL) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: LUN %u is not managed by the block backend",
-			 __func__, params->lun_id);
+			 "LUN %u is not managed by the block backend",
+			 params->lun_id);
 		goto bailout_error;
 	}
+	cbe_lun = &be_lun->cbe_lun;
 
-	if (params->lun_size_bytes != 0) {
-		if (params->lun_size_bytes < be_lun->blocksize) {
-			snprintf(req->error_str, sizeof(req->error_str),
-				"%s: LUN size %ju < blocksize %u", __func__,
-				params->lun_size_bytes, be_lun->blocksize);
-			goto bailout_error;
-		}
-	}
+	if (params->lun_size_bytes != 0)
+		be_lun->params.lun_size_bytes = params->lun_size_bytes;
+	ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
 
-	vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
-	vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
-
-	if (be_lun->vn->v_type == VREG)
-		error = ctl_be_block_modify_file(be_lun, req);
+	wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
+	value = ctl_get_opt(&cbe_lun->options, "ha_role");
+	if (value != NULL) {
+		if (strcmp(value, "primary") == 0)
+			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+		else
+			cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
+	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
+		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
 	else
-		error = ctl_be_block_modify_dev(be_lun, req);
+		cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
+	if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
+		if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
+			ctl_lun_primary(cbe_lun);
+		else
+			ctl_lun_secondary(cbe_lun);
+	}
 
-	VOP_UNLOCK(be_lun->vn, 0);
-	VFS_UNLOCK_GIANT(vfs_is_locked);
+	oldsize = be_lun->size_blocks;
+	if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ||
+	    control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) {
+		if (be_lun->vn == NULL)
+			error = ctl_be_block_open(be_lun, req);
+		else if (vn_isdisk(be_lun->vn, &error))
+			error = ctl_be_block_open_dev(be_lun, req);
+		else if (be_lun->vn->v_type == VREG) {
+			vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
+			error = ctl_be_block_open_file(be_lun, req);
+			VOP_UNLOCK(be_lun->vn, 0);
+		} else
+			error = EINVAL;
+		if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) &&
+		    be_lun->vn != NULL) {
+			cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA;
+			ctl_lun_has_media(cbe_lun);
+		} else if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) == 0 &&
+		    be_lun->vn == NULL) {
+			cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
+			ctl_lun_no_media(cbe_lun);
+		}
+		cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED;
+	} else {
+		if (be_lun->vn != NULL) {
+			cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
+			ctl_lun_no_media(cbe_lun);
+			taskqueue_drain_all(be_lun->io_taskqueue);
+			error = ctl_be_block_close(be_lun);
+		} else
+			error = 0;
+	}
+	if (be_lun->size_blocks != oldsize)
+		ctl_lun_capacity_changed(cbe_lun);
 
-	if (error != 0)
-		goto bailout_error;
-
-	be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
-
-	/*
-	 * The maximum LBA is the size - 1.
-	 *
-	 * XXX: Note that this field is being updated without locking,
-	 * 	which might cause problems on 32-bit architectures.
-	 */
-	be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
-	ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
-
 	/* Tell the user the exact size we ended up using */
 	params->lun_size_bytes = be_lun->size_bytes;
 
-	req->status = CTL_LUN_OK;
-
+	req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK;
 	return (0);
 
 bailout_error:
 	req->status = CTL_LUN_ERROR;
-
 	return (0);
 }
 
@@ -2164,7 +2669,6 @@
 	struct ctl_be_block_softc *softc;
 
 	lun = (struct ctl_be_block_lun *)be_lun;
-
 	softc = lun->softc;
 
 	mtx_lock(&softc->lock);
@@ -2172,7 +2676,6 @@
 	if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
 		wakeup(lun);
 	mtx_unlock(&softc->lock);
-
 }
 
 static void
@@ -2194,9 +2697,9 @@
 		/*
 		 * We successfully added the LUN, attempt to enable it.
 		 */
-		if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
+		if (ctl_enable_lun(&lun->cbe_lun) != 0) {
 			printf("%s: ctl_enable_lun() failed!\n", __func__);
-			if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
+			if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
 				printf("%s: ctl_invalidate_lun() failed!\n",
 				       __func__);
 			}
@@ -2218,20 +2721,21 @@
 ctl_be_block_config_write(union ctl_io *io)
 {
 	struct ctl_be_block_lun *be_lun;
-	struct ctl_be_lun *ctl_be_lun;
+	struct ctl_be_lun *cbe_lun;
 	int retval;
 
-	retval = 0;
-
 	DPRINTF("entered\n");
 
-	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
-		CTL_PRIV_BACKEND_LUN].ptr;
-	be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
+	cbe_lun = CTL_BACKEND_LUN(io);
+	be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
 
+	retval = 0;
 	switch (io->scsiio.cdb[0]) {
 	case SYNCHRONIZE_CACHE:
 	case SYNCHRONIZE_CACHE_16:
+	case WRITE_SAME_10:
+	case WRITE_SAME_16:
+	case UNMAP:
 		/*
 		 * The upper level CTL code will filter out any CDBs with
 		 * the immediate bit set and return the proper error.
@@ -2240,48 +2744,54 @@
 		 * user asked to be synced out.  When they issue a sync
 		 * cache command, we'll sync out the whole thing.
 		 */
-		mtx_lock(&be_lun->lock);
+		mtx_lock(&be_lun->queue_lock);
 		STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
 				   links);
-		mtx_unlock(&be_lun->lock);
+		mtx_unlock(&be_lun->queue_lock);
 		taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
 		break;
 	case START_STOP_UNIT: {
 		struct scsi_start_stop_unit *cdb;
+		struct ctl_lun_req req;
 
 		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
-
-		if (cdb->how & SSS_START)
-			retval = ctl_start_lun(ctl_be_lun);
-		else {
-			retval = ctl_stop_lun(ctl_be_lun);
-			/*
-			 * XXX KDM Copan-specific offline behavior.
-			 * Figure out a reasonable way to port this?
-			 */
-#ifdef NEEDTOPORT
-			if ((retval == 0)
-			 && (cdb->byte2 & SSS_ONOFFLINE))
-				retval = ctl_lun_offline(ctl_be_lun);
-#endif
+		if ((cdb->how & SSS_PC_MASK) != 0) {
+			ctl_set_success(&io->scsiio);
+			ctl_config_write_done(io);
+			break;
 		}
-
-		/*
-		 * In general, the above routines should not fail.  They
-		 * just set state for the LUN.  So we've got something
-		 * pretty wrong here if we can't start or stop the LUN.
-		 */
-		if (retval != 0) {
-			ctl_set_internal_failure(&io->scsiio,
-						 /*sks_valid*/ 1,
-						 /*retry_count*/ 0xf051);
-			retval = CTL_RETVAL_COMPLETE;
+		if (cdb->how & SSS_START) {
+			if ((cdb->how & SSS_LOEJ) && be_lun->vn == NULL) {
+				retval = ctl_be_block_open(be_lun, &req);
+				cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED;
+				if (retval == 0) {
+					cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA;
+					ctl_lun_has_media(cbe_lun);
+				} else {
+					cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
+					ctl_lun_no_media(cbe_lun);
+				}
+			}
+			ctl_start_lun(cbe_lun);
 		} else {
-			ctl_set_success(&io->scsiio);
+			ctl_stop_lun(cbe_lun);
+			if (cdb->how & SSS_LOEJ) {
+				cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
+				cbe_lun->flags |= CTL_LUN_FLAG_EJECTED;
+				ctl_lun_ejected(cbe_lun);
+				if (be_lun->vn != NULL)
+					ctl_be_block_close(be_lun);
+			}
 		}
+
+		ctl_set_success(&io->scsiio);
 		ctl_config_write_done(io);
 		break;
 	}
+	case PREVENT_ALLOW:
+		ctl_set_success(&io->scsiio);
+		ctl_config_write_done(io);
+		break;
 	default:
 		ctl_set_invalid_opcode(&io->scsiio);
 		ctl_config_write_done(io);
@@ -2290,13 +2800,49 @@
 	}
 
 	return (retval);
-
 }
 
 static int
 ctl_be_block_config_read(union ctl_io *io)
 {
-	return (0);
+	struct ctl_be_block_lun *be_lun;
+	struct ctl_be_lun *cbe_lun;
+	int retval = 0;
+
+	DPRINTF("entered\n");
+
+	cbe_lun = CTL_BACKEND_LUN(io);
+	be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
+
+	switch (io->scsiio.cdb[0]) {
+	case SERVICE_ACTION_IN:
+		if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
+			mtx_lock(&be_lun->queue_lock);
+			STAILQ_INSERT_TAIL(&be_lun->config_read_queue,
+			    &io->io_hdr, links);
+			mtx_unlock(&be_lun->queue_lock);
+			taskqueue_enqueue(be_lun->io_taskqueue,
+			    &be_lun->io_task);
+			retval = CTL_RETVAL_QUEUED;
+			break;
+		}
+		ctl_set_invalid_field(&io->scsiio,
+				      /*sks_valid*/ 1,
+				      /*command*/ 1,
+				      /*field*/ 1,
+				      /*bit_valid*/ 1,
+				      /*bit*/ 4);
+		ctl_config_read_done(io);
+		retval = CTL_RETVAL_COMPLETE;
+		break;
+	default:
+		ctl_set_invalid_opcode(&io->scsiio);
+		ctl_config_read_done(io);
+		retval = CTL_RETVAL_COMPLETE;
+		break;
+	}
+
+	return (retval);
 }
 
 static int
@@ -2306,58 +2852,63 @@
 	int retval;
 
 	lun = (struct ctl_be_block_lun *)be_lun;
-	retval = 0;
 
-	retval = sbuf_printf(sb, "<num_threads>");
-
+	retval = sbuf_printf(sb, "\t<num_threads>");
 	if (retval != 0)
 		goto bailout;
-
 	retval = sbuf_printf(sb, "%d", lun->num_threads);
-
 	if (retval != 0)
 		goto bailout;
+	retval = sbuf_printf(sb, "</num_threads>\n");
 
-	retval = sbuf_printf(sb, "</num_threads>");
+bailout:
+	return (retval);
+}
 
-	/*
-	 * For processor devices, we don't have a path variable.
-	 */
-	if ((retval != 0)
-	 || (lun->dev_path == NULL))
-		goto bailout;
+static uint64_t
+ctl_be_block_lun_attr(void *be_lun, const char *attrname)
+{
+	struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)be_lun;
 
-	retval = sbuf_printf(sb, "<file>");
+	if (lun->getattr == NULL)
+		return (UINT64_MAX);
+	return (lun->getattr(lun, attrname));
+}
 
-	if (retval != 0)
-		goto bailout;
+static int
+ctl_be_block_init(void)
+{
+	struct ctl_be_block_softc *softc = &backend_block_softc;
 
-	retval = ctl_sbuf_printf_esc(sb, lun->dev_path);
+	mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF);
+	softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
+	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+	STAILQ_INIT(&softc->lun_list);
+	return (0);
+}
 
-	if (retval != 0)
-		goto bailout;
 
-	retval = sbuf_printf(sb, "</file>\n");
-
-bailout:
-
-	return (retval);
-}
-
-int
-ctl_be_block_init(void)
+static int
+ctl_be_block_shutdown(void)
 {
-	struct ctl_be_block_softc *softc;
-	int retval;
+	struct ctl_be_block_softc *softc = &backend_block_softc;
+	struct ctl_be_block_lun *lun, *next_lun;
 
-	softc = &backend_block_softc;
-	retval = 0;
+	mtx_lock(&softc->lock);
+	STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
+		/*
+		 * Drop our lock here.  Since ctl_invalidate_lun() can call
+		 * back into us, this could potentially lead to a recursive
+		 * lock of the same mutex, which would cause a hang.
+		 */
+		mtx_unlock(&softc->lock);
+		ctl_disable_lun(&lun->cbe_lun);
+		ctl_invalidate_lun(&lun->cbe_lun);
+		mtx_lock(&softc->lock);
+	}
+	mtx_unlock(&softc->lock);
 
-	mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF);
-	STAILQ_INIT(&softc->beio_free_queue);
-	STAILQ_INIT(&softc->disk_list);
-	STAILQ_INIT(&softc->lun_list);
-	ctl_grow_beio(softc, 200);
-
-	return (retval);
+	uma_zdestroy(softc->beio_zone);
+	mtx_destroy(&softc->lock);
+	return (0);
 }

Modified: trunk/sys/cam/ctl/ctl_backend_ramdisk.c
===================================================================
--- trunk/sys/cam/ctl/ctl_backend_ramdisk.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_backend_ramdisk.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,6 +1,8 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
  * Copyright (c) 2012 The FreeBSD Foundation
+ * Copyright (c) 2014-2017 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Portions of this software were developed by Edward Tomasz Napierala
@@ -31,16 +33,16 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_backend_ramdisk.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
  */
 /*
- * CAM Target Layer backend for a "fake" ramdisk.
+ * CAM Target Layer black hole and RAM disk backend.
  *
  * Author: Ken Merry <ken at FreeBSD.org>
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_backend_ramdisk.c 314026 2017-02-21 05:13:16Z mav $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -47,26 +49,55 @@
 #include <sys/kernel.h>
 #include <sys/condvar.h>
 #include <sys/types.h>
+#include <sys/limits.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/malloc.h>
+#include <sys/sx.h>
+#include <sys/taskqueue.h>
 #include <sys/time.h>
 #include <sys/queue.h>
 #include <sys/conf.h>
 #include <sys/ioccom.h>
 #include <sys/module.h>
+#include <sys/sysctl.h>
 
 #include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
 #include <cam/ctl/ctl_io.h>
 #include <cam/ctl/ctl.h>
 #include <cam/ctl/ctl_util.h>
 #include <cam/ctl/ctl_backend.h>
-#include <cam/ctl/ctl_frontend_internal.h>
 #include <cam/ctl/ctl_debug.h>
 #include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
 #include <cam/ctl/ctl_error.h>
 
+#define PRIV(io)	\
+    ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
+#define ARGS(io)	\
+    ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
+
+#define	PPP	(PAGE_SIZE / sizeof(uint8_t **))
+#ifdef __LP64__
+#define	PPPS	(PAGE_SHIFT - 3)
+#else
+#define	PPPS	(PAGE_SHIFT - 2)
+#endif
+#define	SGPP	(PAGE_SIZE / sizeof(struct ctl_sg_entry))
+
+#define	P_UNMAPPED	NULL			/* Page is unmapped. */
+#define	P_ANCHORED	((void *)(uintptr_t)1)	/* Page is anchored. */
+
 typedef enum {
+	GP_READ,	/* Return data page or zero page. */
+	GP_WRITE,	/* Return data page, try allocate if none. */
+	GP_ANCHOR,	/* Return data page, try anchor if none. */
+	GP_OTHER,	/* Return what present, do not allocate/anchor. */
+} getpage_op_t;
+
+typedef enum {
 	CTL_BE_RAMDISK_LUN_UNCONFIGURED	= 0x01,
 	CTL_BE_RAMDISK_LUN_CONFIG_ERR	= 0x02,
 	CTL_BE_RAMDISK_LUN_WAITING	= 0x04
@@ -73,46 +104,58 @@
 } ctl_be_ramdisk_lun_flags;
 
 struct ctl_be_ramdisk_lun {
-	uint64_t size_bytes;
-	uint64_t size_blocks;
+	struct ctl_lun_create_params params;
+	char			lunname[32];
+	int			indir;
+	uint8_t			**pages;
+	uint8_t			*zero_page;
+	struct sx		page_lock;
+	u_int			pblocksize;
+	u_int			pblockmul;
+	uint64_t		size_bytes;
+	uint64_t		size_blocks;
+	uint64_t		cap_bytes;
+	uint64_t		cap_used;
 	struct ctl_be_ramdisk_softc *softc;
 	ctl_be_ramdisk_lun_flags flags;
 	STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
-	struct ctl_be_lun ctl_be_lun;
+	struct ctl_be_lun	cbe_lun;
+	struct taskqueue	*io_taskqueue;
+	struct task		io_task;
+	STAILQ_HEAD(, ctl_io_hdr) cont_queue;
+	struct mtx_padalign	queue_lock;
 };
 
 struct ctl_be_ramdisk_softc {
 	struct mtx lock;
-	int rd_size;
-#ifdef CTL_RAMDISK_PAGES
-	uint8_t **ramdisk_pages;
-	int num_pages;
-#else
-	uint8_t *ramdisk_buffer;
-#endif
 	int num_luns;
 	STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
 };
 
 static struct ctl_be_ramdisk_softc rd_softc;
+extern struct ctl_softc *control_softc;
 
-int ctl_backend_ramdisk_init(void);
-void ctl_backend_ramdisk_shutdown(void);
+static int ctl_backend_ramdisk_init(void);
+static int ctl_backend_ramdisk_shutdown(void);
 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
+static void ctl_backend_ramdisk_compare(union ctl_io *io);
+static void ctl_backend_ramdisk_rw(union ctl_io *io);
 static int ctl_backend_ramdisk_submit(union ctl_io *io);
+static void ctl_backend_ramdisk_worker(void *context, int pending);
+static int ctl_backend_ramdisk_config_read(union ctl_io *io);
+static int ctl_backend_ramdisk_config_write(union ctl_io *io);
+static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname);
 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
 				     caddr_t addr, int flag, struct thread *td);
 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
 				  struct ctl_lun_req *req);
 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
-				      struct ctl_lun_req *req, int do_wait);
+				      struct ctl_lun_req *req);
 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
 				  struct ctl_lun_req *req);
 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
 						  ctl_lun_config_status status);
-static int ctl_backend_ramdisk_config_write(union ctl_io *io);
-static int ctl_backend_ramdisk_config_read(union ctl_io *io);
 
 static struct ctl_backend_driver ctl_be_ramdisk_driver = 
 {
@@ -119,249 +162,693 @@
 	.name = "ramdisk",
 	.flags = CTL_BE_FLAG_HAS_CONFIG,
 	.init = ctl_backend_ramdisk_init,
+	.shutdown = ctl_backend_ramdisk_shutdown,
 	.data_submit = ctl_backend_ramdisk_submit,
 	.data_move_done = ctl_backend_ramdisk_move_done,
 	.config_read = ctl_backend_ramdisk_config_read,
 	.config_write = ctl_backend_ramdisk_config_write,
-	.ioctl = ctl_backend_ramdisk_ioctl
+	.ioctl = ctl_backend_ramdisk_ioctl,
+	.lun_attr = ctl_backend_ramdisk_lun_attr,
 };
 
 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
 
-int
+static int
 ctl_backend_ramdisk_init(void)
 {
-	struct ctl_be_ramdisk_softc *softc;
-#ifdef CTL_RAMDISK_PAGES
-	int i;
-#endif
+	struct ctl_be_ramdisk_softc *softc = &rd_softc;
 
-
-	softc = &rd_softc;
-
 	memset(softc, 0, sizeof(*softc));
-
-	mtx_init(&softc->lock, "ramdisk", NULL, MTX_DEF);
-
+	mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
 	STAILQ_INIT(&softc->lun_list);
-	softc->rd_size = 4 * 1024 * 1024;
-#ifdef CTL_RAMDISK_PAGES
-	softc->num_pages = softc->rd_size / PAGE_SIZE;
-	softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
-						  softc->num_pages, M_RAMDISK,
-						  M_WAITOK);
-	for (i = 0; i < softc->num_pages; i++)
-		softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
-#else
-	softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
-						  M_WAITOK);
-#endif
-
 	return (0);
 }
 
-void
+static int
 ctl_backend_ramdisk_shutdown(void)
 {
-	struct ctl_be_ramdisk_softc *softc;
+	struct ctl_be_ramdisk_softc *softc = &rd_softc;
 	struct ctl_be_ramdisk_lun *lun, *next_lun;
-#ifdef CTL_RAMDISK_PAGES
-	int i;
-#endif
 
-	softc = &rd_softc;
-
 	mtx_lock(&softc->lock);
-	for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
+	STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
 		/*
-		 * Grab the next LUN.  The current LUN may get removed by
-		 * ctl_invalidate_lun(), which will call our LUN shutdown
-		 * routine, if there is no outstanding I/O for this LUN.
-		 */
-		next_lun = STAILQ_NEXT(lun, links);
-
-		/*
 		 * Drop our lock here.  Since ctl_invalidate_lun() can call
 		 * back into us, this could potentially lead to a recursive
 		 * lock of the same mutex, which would cause a hang.
 		 */
 		mtx_unlock(&softc->lock);
-		ctl_disable_lun(&lun->ctl_be_lun);
-		ctl_invalidate_lun(&lun->ctl_be_lun);
+		ctl_disable_lun(&lun->cbe_lun);
+		ctl_invalidate_lun(&lun->cbe_lun);
 		mtx_lock(&softc->lock);
 	}
 	mtx_unlock(&softc->lock);
-	
-#ifdef CTL_RAMDISK_PAGES
-	for (i = 0; i < softc->num_pages; i++)
-		free(softc->ramdisk_pages[i], M_RAMDISK);
+	mtx_destroy(&softc->lock);
+	return (0);
+}
 
-	free(softc->ramdisk_pages, M_RAMDISK);
-#else
-	free(softc->ramdisk_buffer, M_RAMDISK);
-#endif
+static uint8_t *
+ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
+    getpage_op_t op)
+{
+	uint8_t **p, ***pp;
+	off_t i;
+	int s;
 
-	if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
-		printf("ctl_backend_ramdisk_shutdown: "
-		       "ctl_backend_deregister() failed!\n");
+	if (be_lun->cap_bytes == 0) {
+		switch (op) {
+		case GP_READ:
+			return (be_lun->zero_page);
+		case GP_WRITE:
+			return ((uint8_t *)be_lun->pages);
+		case GP_ANCHOR:
+			return (P_ANCHORED);
+		default:
+			return (P_UNMAPPED);
+		}
 	}
+	if (op == GP_WRITE || op == GP_ANCHOR) {
+		sx_xlock(&be_lun->page_lock);
+		pp = &be_lun->pages;
+		for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+			if (*pp == NULL) {
+				*pp = malloc(PAGE_SIZE, M_RAMDISK,
+				    M_WAITOK|M_ZERO);
+			}
+			i = pn >> s;
+			pp = (uint8_t ***)&(*pp)[i];
+			pn -= i << s;
+		}
+		if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
+			if (op == GP_WRITE) {
+				*pp = malloc(be_lun->pblocksize, M_RAMDISK,
+				    M_WAITOK|M_ZERO);
+			} else
+				*pp = P_ANCHORED;
+			be_lun->cap_used += be_lun->pblocksize;
+		} else if (*pp == P_ANCHORED && op == GP_WRITE) {
+			*pp = malloc(be_lun->pblocksize, M_RAMDISK,
+			    M_WAITOK|M_ZERO);
+		}
+		sx_xunlock(&be_lun->page_lock);
+		return ((uint8_t *)*pp);
+	} else {
+		sx_slock(&be_lun->page_lock);
+		p = be_lun->pages;
+		for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+			if (p == NULL)
+				break;
+			i = pn >> s;
+			p = (uint8_t **)p[i];
+			pn -= i << s;
+		}
+		sx_sunlock(&be_lun->page_lock);
+		if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ)
+			return (be_lun->zero_page);
+		return ((uint8_t *)p);
+	}
+};
+
+static void
+ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
+{
+	uint8_t ***pp;
+	off_t i;
+	int s;
+
+	if (be_lun->cap_bytes == 0)
+		return;
+	sx_xlock(&be_lun->page_lock);
+	pp = &be_lun->pages;
+	for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+		if (*pp == NULL)
+			goto noindir;
+		i = pn >> s;
+		pp = (uint8_t ***)&(*pp)[i];
+		pn -= i << s;
+	}
+	if (*pp == P_ANCHORED) {
+		be_lun->cap_used -= be_lun->pblocksize;
+		*pp = P_UNMAPPED;
+	} else if (*pp != P_UNMAPPED) {
+		free(*pp, M_RAMDISK);
+		be_lun->cap_used -= be_lun->pblocksize;
+		*pp = P_UNMAPPED;
+	}
+noindir:
+	sx_xunlock(&be_lun->page_lock);
+};
+
+static void
+ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
+{
+	uint8_t ***pp;
+	off_t i;
+	int s;
+
+	if (be_lun->cap_bytes == 0)
+		return;
+	sx_xlock(&be_lun->page_lock);
+	pp = &be_lun->pages;
+	for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+		if (*pp == NULL)
+			goto noindir;
+		i = pn >> s;
+		pp = (uint8_t ***)&(*pp)[i];
+		pn -= i << s;
+	}
+	if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
+		be_lun->cap_used += be_lun->pblocksize;
+		*pp = P_ANCHORED;
+	} else if (*pp != P_ANCHORED) {
+		free(*pp, M_RAMDISK);
+		*pp = P_ANCHORED;
+	}
+noindir:
+	sx_xunlock(&be_lun->page_lock);
+};
+
+static void
+ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir)
+{
+	int i;
+
+	if (p == NULL)
+		return;
+	if (indir == 0) {
+		free(p, M_RAMDISK);
+		return;
+	}
+	for (i = 0; i < PPP; i++) {
+		if (p[i] == NULL)
+			continue;
+		ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1);
+	}
+	free(p, M_RAMDISK);
+};
+
+static size_t
+cmp(uint8_t *a, uint8_t *b, size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; i++) {
+		if (a[i] != b[i])
+			break;
+	}
+	return (i);
 }
 
 static int
+ctl_backend_ramdisk_cmp(union ctl_io *io)
+{
+	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+	struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+	uint8_t *page;
+	uint8_t info[8];
+	uint64_t lba;
+	u_int lbaoff, lbas, res, off;
+
+	lbas = io->scsiio.kern_data_len / cbe_lun->blocksize;
+	lba = ARGS(io)->lba + PRIV(io)->len - lbas;
+	off = 0;
+	for (; lbas > 0; lbas--, lba++) {
+		page = ctl_backend_ramdisk_getpage(be_lun,
+		    lba >> cbe_lun->pblockexp, GP_READ);
+		lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+		page += lbaoff * cbe_lun->blocksize;
+		res = cmp(io->scsiio.kern_data_ptr + off, page,
+		    cbe_lun->blocksize);
+		off += res;
+		if (res < cbe_lun->blocksize)
+			break;
+	}
+	if (lbas > 0) {
+		off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len;
+		scsi_u64to8b(off, info);
+		ctl_set_sense(&io->scsiio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_MISCOMPARE,
+		    /*asc*/ 0x1D, /*ascq*/ 0x00,
+		    /*type*/ SSD_ELEM_INFO,
+		    /*size*/ sizeof(info), /*data*/ &info,
+		    /*type*/ SSD_ELEM_NONE);
+		return (1);
+	}
+	return (0);
+}
+
+static int
 ctl_backend_ramdisk_move_done(union ctl_io *io)
 {
+	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+	struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
 #ifdef CTL_TIME_IO
 	struct bintime cur_bt;
 #endif
 
 	CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
-	if ((io->io_hdr.port_status == 0)
-	 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
-	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
-		io->io_hdr.status = CTL_SUCCESS;
-	else if ((io->io_hdr.port_status != 0)
-	      && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
-	      && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
-		/*
-		 * For hardware error sense keys, the sense key
-		 * specific value is defined to be a retry count,
-		 * but we use it to pass back an internal FETD
-		 * error code.  XXX KDM  Hopefully the FETD is only
-		 * using 16 bits for an error code, since that's
-		 * all the space we have in the sks field.
-		 */
-		ctl_set_internal_failure(&io->scsiio,
-					 /*sks_valid*/ 1,
-					 /*retry_count*/
-					 io->io_hdr.port_status);
-	}
 #ifdef CTL_TIME_IO
-	getbintime(&cur_bt);
+	getbinuptime(&cur_bt);
 	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
 	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
+#endif
 	io->io_hdr.num_dmas++;
-#endif
-
 	if (io->scsiio.kern_sg_entries > 0)
 		free(io->scsiio.kern_data_ptr, M_RAMDISK);
-	ctl_done(io);
+	io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
+	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
+		;
+	} else if (io->io_hdr.port_status != 0 &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+		ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+		    /*retry_count*/ io->io_hdr.port_status);
+	} else if (io->scsiio.kern_data_resid != 0 &&
+	    (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+		ctl_set_invalid_field_ciu(&io->scsiio);
+	} else if ((io->io_hdr.port_status == 0) &&
+	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
+		if (ARGS(io)->flags & CTL_LLF_COMPARE) {
+			/* We have data block ready for comparison. */
+			if (ctl_backend_ramdisk_cmp(io))
+				goto done;
+		}
+		if (ARGS(io)->len > PRIV(io)->len) {
+			mtx_lock(&be_lun->queue_lock);
+			STAILQ_INSERT_TAIL(&be_lun->cont_queue,
+			    &io->io_hdr, links);
+			mtx_unlock(&be_lun->queue_lock);
+			taskqueue_enqueue(be_lun->io_taskqueue,
+			    &be_lun->io_task);
+			return (0);
+		}
+		ctl_set_success(&io->scsiio);
+	}
+done:
+	ctl_data_submit_done(io);
 	return(0);
 }
 
-static int
-ctl_backend_ramdisk_submit(union ctl_io *io)
+static void
+ctl_backend_ramdisk_compare(union ctl_io *io)
 {
-	struct ctl_lba_len lbalen;
-#ifdef CTL_RAMDISK_PAGES
-	struct ctl_sg_entry *sg_entries;
-	int len_filled;
-	int i;
-#endif
-	int num_sg_entries, len;
-	struct ctl_be_ramdisk_softc *softc;
-	struct ctl_be_lun *ctl_be_lun;
-	struct ctl_be_ramdisk_lun *be_lun;
+	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+	u_int lbas, len;
 
-	softc = &rd_softc;
-	
-	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
-		CTL_PRIV_BACKEND_LUN].ptr;
-	be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
+	lbas = ARGS(io)->len - PRIV(io)->len;
+	lbas = MIN(lbas, 131072 / cbe_lun->blocksize);
+	len = lbas * cbe_lun->blocksize;
 
-	memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
-	       sizeof(lbalen));
+	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
+	io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK);
+	io->scsiio.kern_data_len = len;
+	io->scsiio.kern_sg_entries = 0;
+	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	PRIV(io)->len += lbas;
+#ifdef CTL_TIME_IO
+	getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
+	ctl_datamove(io);
+}
 
-	len = lbalen.len * ctl_be_lun->blocksize;
+static void
+ctl_backend_ramdisk_rw(union ctl_io *io)
+{
+	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+	struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+	struct ctl_sg_entry *sg_entries;
+	uint8_t *page;
+	uint64_t lba;
+	u_int i, len, lbaoff, lbas, sgs, off;
+	getpage_op_t op;
 
-	/*
-	 * Kick out the request if it's bigger than we can handle.
-	 */
-	if (len > softc->rd_size) {
-		ctl_set_internal_failure(&io->scsiio,
-					 /*sks_valid*/ 0,
-					 /*retry_count*/ 0);
-		ctl_done(io);
-		return (CTL_RETVAL_COMPLETE);
+	lba = ARGS(io)->lba + PRIV(io)->len;
+	lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+	lbas = ARGS(io)->len - PRIV(io)->len;
+	lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff);
+	sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
+	off = lbaoff * cbe_lun->blocksize;
+	op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
+	if (sgs > 1) {
+		io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
+		    sgs, M_RAMDISK, M_WAITOK);
+		sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+		len = lbas * cbe_lun->blocksize;
+		for (i = 0; i < sgs; i++) {
+			page = ctl_backend_ramdisk_getpage(be_lun,
+			    (lba >> cbe_lun->pblockexp) + i, op);
+			if (page == P_UNMAPPED || page == P_ANCHORED) {
+				free(io->scsiio.kern_data_ptr, M_RAMDISK);
+nospc:
+				ctl_set_space_alloc_fail(&io->scsiio);
+				ctl_data_submit_done(io);
+				return;
+			}
+			sg_entries[i].addr = page + off;
+			sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
+			len -= sg_entries[i].len;
+			off = 0;
+		}
+	} else {
+		page = ctl_backend_ramdisk_getpage(be_lun,
+		    lba >> cbe_lun->pblockexp, op);
+		if (page == P_UNMAPPED || page == P_ANCHORED)
+			goto nospc;
+		sgs = 0;
+		io->scsiio.kern_data_ptr = page + off;
 	}
 
-	/*
-	 * Kick out the request if it's larger than the device size that
-	 * the user requested.
-	 */
-	if (((lbalen.lba * ctl_be_lun->blocksize) + len) > be_lun->size_bytes) {
-		ctl_set_lba_out_of_range(&io->scsiio);
-		ctl_done(io);
+	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
+	io->scsiio.kern_data_len = lbas * cbe_lun->blocksize;
+	io->scsiio.kern_sg_entries = sgs;
+	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	PRIV(io)->len += lbas;
+#ifdef CTL_TIME_IO
+	getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
+	ctl_datamove(io);
+}
+
+static int
+ctl_backend_ramdisk_submit(union ctl_io *io)
+{
+	struct ctl_lba_len_flags *lbalen = ARGS(io);
+
+	if (lbalen->flags & CTL_LLF_VERIFY) {
+		ctl_set_success(&io->scsiio);
+		ctl_data_submit_done(io);
 		return (CTL_RETVAL_COMPLETE);
 	}
+	PRIV(io)->len = 0;
+	if (lbalen->flags & CTL_LLF_COMPARE)
+		ctl_backend_ramdisk_compare(io);
+	else
+		ctl_backend_ramdisk_rw(io);
+	return (CTL_RETVAL_COMPLETE);
+}
 
-#ifdef CTL_RAMDISK_PAGES
-	num_sg_entries = len >> PAGE_SHIFT;
-	if ((len & (PAGE_SIZE - 1)) != 0)
-		num_sg_entries++;
+static void
+ctl_backend_ramdisk_worker(void *context, int pending)
+{
+	struct ctl_be_ramdisk_lun *be_lun;
+	union ctl_io *io;
 
-	if (num_sg_entries > 1) {
-		io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
-						  num_sg_entries, M_RAMDISK,
-						  M_WAITOK);
-		sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
-		for (i = 0, len_filled = 0; i < num_sg_entries;
-		     i++, len_filled += PAGE_SIZE) {
-			sg_entries[i].addr = softc->ramdisk_pages[i];
-			sg_entries[i].len = ctl_min(PAGE_SIZE,
-						    len - len_filled);
+	be_lun = (struct ctl_be_ramdisk_lun *)context;
+	mtx_lock(&be_lun->queue_lock);
+	for (;;) {
+		io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
+		if (io != NULL) {
+			STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
+				      ctl_io_hdr, links);
+			mtx_unlock(&be_lun->queue_lock);
+			if (ARGS(io)->flags & CTL_LLF_COMPARE)
+				ctl_backend_ramdisk_compare(io);
+			else
+				ctl_backend_ramdisk_rw(io);
+			mtx_lock(&be_lun->queue_lock);
+			continue;
 		}
-	} else {
-#endif /* CTL_RAMDISK_PAGES */
+
 		/*
-		 * If this is less than 1 page, don't bother allocating a
-		 * scatter/gather list for it.  This saves time/overhead.
+		 * If we get here, there is no work left in the queues, so
+		 * just break out and let the task queue go to sleep.
 		 */
-		num_sg_entries = 0;
-#ifdef CTL_RAMDISK_PAGES
-		io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
-#else
-		io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
-#endif
-#ifdef CTL_RAMDISK_PAGES
+		break;
 	}
-#endif
+	mtx_unlock(&be_lun->queue_lock);
+}
 
-	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
-	io->scsiio.kern_data_len = len;
-	io->scsiio.kern_total_len = len;
-	io->scsiio.kern_rel_offset = 0;
-	io->scsiio.kern_data_resid = 0;
-	io->scsiio.kern_sg_entries = num_sg_entries;
-	io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
-#ifdef CTL_TIME_IO
-	getbintime(&io->io_hdr.dma_start_bt);
-#endif
-	ctl_datamove(io);
+static int
+ctl_backend_ramdisk_gls(union ctl_io *io)
+{
+	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+	struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+	struct scsi_get_lba_status_data *data;
+	uint8_t *page;
+	u_int lbaoff;
 
+	data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
+	scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
+	lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
+	scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
+	page = ctl_backend_ramdisk_getpage(be_lun,
+	    ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
+	if (page == P_UNMAPPED)
+		data->descr[0].status = 1;
+	else if (page == P_ANCHORED)
+		data->descr[0].status = 2;
+	else
+		data->descr[0].status = 0;
+	ctl_config_read_done(io);
 	return (CTL_RETVAL_COMPLETE);
 }
 
 static int
+ctl_backend_ramdisk_config_read(union ctl_io *io)
+{
+	int retval = 0;
+
+	switch (io->scsiio.cdb[0]) {
+	case SERVICE_ACTION_IN:
+		if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
+			retval = ctl_backend_ramdisk_gls(io);
+			break;
+		}
+		ctl_set_invalid_field(&io->scsiio,
+				      /*sks_valid*/ 1,
+				      /*command*/ 1,
+				      /*field*/ 1,
+				      /*bit_valid*/ 1,
+				      /*bit*/ 4);
+		ctl_config_read_done(io);
+		retval = CTL_RETVAL_COMPLETE;
+		break;
+	default:
+		ctl_set_invalid_opcode(&io->scsiio);
+		ctl_config_read_done(io);
+		retval = CTL_RETVAL_COMPLETE;
+		break;
+	}
+	return (retval);
+}
+
+static void
+ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len,
+    int anchor)
+{
+	struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+	uint8_t *page;
+	uint64_t p, lp;
+	u_int lbaoff;
+	getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER;
+
+	/* Partially zero first partial page. */
+	p = lba >> cbe_lun->pblockexp;
+	lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+	if (lbaoff != 0) {
+		page = ctl_backend_ramdisk_getpage(be_lun, p, op);
+		if (page != P_UNMAPPED && page != P_ANCHORED) {
+			memset(page + lbaoff * cbe_lun->blocksize, 0,
+			    min(len, be_lun->pblockmul - lbaoff) *
+			    cbe_lun->blocksize);
+		}
+		p++;
+	}
+
+	/* Partially zero last partial page. */
+	lp = (lba + len) >> cbe_lun->pblockexp;
+	lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp);
+	if (p <= lp && lbaoff != 0) {
+		page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
+		if (page != P_UNMAPPED && page != P_ANCHORED)
+			memset(page, 0, lbaoff * cbe_lun->blocksize);
+	}
+
+	/* Delete remaining full pages. */
+	if (anchor) {
+		for (; p < lp; p++)
+			ctl_backend_ramdisk_anchorpage(be_lun, p);
+	} else {
+		for (; p < lp; p++)
+			ctl_backend_ramdisk_unmappage(be_lun, p);
+	}
+}
+
+static void
+ctl_backend_ramdisk_ws(union ctl_io *io)
+{
+	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+	struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+	struct ctl_lba_len_flags *lbalen = ARGS(io);
+	uint8_t *page;
+	uint64_t lba;
+	u_int lbaoff, lbas;
+
+	if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) {
+		ctl_set_invalid_field(&io->scsiio,
+				      /*sks_valid*/ 1,
+				      /*command*/ 1,
+				      /*field*/ 1,
+				      /*bit_valid*/ 0,
+				      /*bit*/ 0);
+		ctl_config_write_done(io);
+		return;
+	}
+	if (lbalen->flags & SWS_UNMAP) {
+		ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len,
+		    (lbalen->flags & SWS_ANCHOR) != 0);
+		ctl_set_success(&io->scsiio);
+		ctl_config_write_done(io);
+		return;
+	}
+
+	for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) {
+		page = ctl_backend_ramdisk_getpage(be_lun,
+		    lba >> cbe_lun->pblockexp, GP_WRITE);
+		if (page == P_UNMAPPED || page == P_ANCHORED) {
+			ctl_set_space_alloc_fail(&io->scsiio);
+			ctl_data_submit_done(io);
+			return;
+		}
+		lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+		page += lbaoff * cbe_lun->blocksize;
+		if (lbalen->flags & SWS_NDOB) {
+			memset(page, 0, cbe_lun->blocksize);
+		} else {
+			memcpy(page, io->scsiio.kern_data_ptr,
+			    cbe_lun->blocksize);
+		}
+		if (lbalen->flags & SWS_LBDATA)
+			scsi_ulto4b(lba, page);
+	}
+	ctl_set_success(&io->scsiio);
+	ctl_config_write_done(io);
+}
+
+static void
+ctl_backend_ramdisk_unmap(union ctl_io *io)
+{
+	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+	struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
+	struct scsi_unmap_desc *buf, *end;
+
+	if ((ptrlen->flags & ~SU_ANCHOR) != 0) {
+		ctl_set_invalid_field(&io->scsiio,
+				      /*sks_valid*/ 0,
+				      /*command*/ 0,
+				      /*field*/ 0,
+				      /*bit_valid*/ 0,
+				      /*bit*/ 0);
+		ctl_config_write_done(io);
+		return;
+	}
+
+	buf = (struct scsi_unmap_desc *)ptrlen->ptr;
+	end = buf + ptrlen->len / sizeof(*buf);
+	for (; buf < end; buf++) {
+		ctl_backend_ramdisk_delete(cbe_lun,
+		    scsi_8btou64(buf->lba), scsi_4btoul(buf->length),
+		    (ptrlen->flags & SU_ANCHOR) != 0);
+	}
+
+	ctl_set_success(&io->scsiio);
+	ctl_config_write_done(io);
+}
+
+static int
+ctl_backend_ramdisk_config_write(union ctl_io *io)
+{
+	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+	int retval = 0;
+
+	switch (io->scsiio.cdb[0]) {
+	case SYNCHRONIZE_CACHE:
+	case SYNCHRONIZE_CACHE_16:
+		/* We have no cache to flush. */
+		ctl_set_success(&io->scsiio);
+		ctl_config_write_done(io);
+		break;
+	case START_STOP_UNIT: {
+		struct scsi_start_stop_unit *cdb;
+
+		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
+		if ((cdb->how & SSS_PC_MASK) != 0) {
+			ctl_set_success(&io->scsiio);
+			ctl_config_write_done(io);
+			break;
+		}
+		if (cdb->how & SSS_START) {
+			if (cdb->how & SSS_LOEJ)
+				ctl_lun_has_media(cbe_lun);
+			ctl_start_lun(cbe_lun);
+		} else {
+			ctl_stop_lun(cbe_lun);
+			if (cdb->how & SSS_LOEJ)
+				ctl_lun_ejected(cbe_lun);
+		}
+		ctl_set_success(&io->scsiio);
+		ctl_config_write_done(io);
+		break;
+	}
+	case PREVENT_ALLOW:
+		ctl_set_success(&io->scsiio);
+		ctl_config_write_done(io);
+		break;
+	case WRITE_SAME_10:
+	case WRITE_SAME_16:
+		ctl_backend_ramdisk_ws(io);
+		break;
+	case UNMAP:
+		ctl_backend_ramdisk_unmap(io);
+		break;
+	default:
+		ctl_set_invalid_opcode(&io->scsiio);
+		ctl_config_write_done(io);
+		retval = CTL_RETVAL_COMPLETE;
+		break;
+	}
+
+	return (retval);
+}
+
+static uint64_t
+ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname)
+{
+	struct ctl_be_ramdisk_lun *be_lun = arg;
+	uint64_t		val;
+
+	val = UINT64_MAX;
+	if (be_lun->cap_bytes == 0)
+		return (val);
+	sx_slock(&be_lun->page_lock);
+	if (strcmp(attrname, "blocksused") == 0) {
+		val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
+	} else if (strcmp(attrname, "blocksavail") == 0) {
+		val = (be_lun->cap_bytes - be_lun->cap_used) /
+		    be_lun->cbe_lun.blocksize;
+	}
+	sx_sunlock(&be_lun->page_lock);
+	return (val);
+}
+
+static int
 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
 			  int flag, struct thread *td)
 {
-	struct ctl_be_ramdisk_softc *softc;
+	struct ctl_be_ramdisk_softc *softc = &rd_softc;
+	struct ctl_lun_req *lun_req;
 	int retval;
 
 	retval = 0;
-	softc = &rd_softc;
-
 	switch (cmd) {
-	case CTL_LUN_REQ: {
-		struct ctl_lun_req *lun_req;
-
+	case CTL_LUN_REQ:
 		lun_req = (struct ctl_lun_req *)addr;
-
 		switch (lun_req->reqtype) {
 		case CTL_LUNREQ_CREATE:
-			retval = ctl_backend_ramdisk_create(softc, lun_req,
-							    /*do_wait*/ 1);
+			retval = ctl_backend_ramdisk_create(softc, lun_req);
 			break;
 		case CTL_LUNREQ_RM:
 			retval = ctl_backend_ramdisk_rm(softc, lun_req);
@@ -377,7 +864,6 @@
 			break;
 		}
 		break;
-	}
 	default:
 		retval = ENOTTY;
 		break;
@@ -394,20 +880,13 @@
 	struct ctl_lun_rm_params *params;
 	int retval;
 
-
-	retval = 0;
 	params = &req->reqdata.rm;
-
-	be_lun = NULL;
-
 	mtx_lock(&softc->lock);
-
 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
-		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
+		if (be_lun->cbe_lun.lun_id == params->lun_id)
 			break;
 	}
 	mtx_unlock(&softc->lock);
-
 	if (be_lun == NULL) {
 		snprintf(req->error_str, sizeof(req->error_str),
 			 "%s: LUN %u is not managed by the ramdisk backend",
@@ -415,8 +894,7 @@
 		goto bailout_error;
 	}
 
-	retval = ctl_disable_lun(&be_lun->ctl_be_lun);
-
+	retval = ctl_disable_lun(&be_lun->cbe_lun);
 	if (retval != 0) {
 		snprintf(req->error_str, sizeof(req->error_str),
 			 "%s: error %d returned from ctl_disable_lun() for "
@@ -436,19 +914,21 @@
 	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
 	mtx_unlock(&softc->lock);
 
-	retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
+	retval = ctl_invalidate_lun(&be_lun->cbe_lun);
 	if (retval != 0) {
 		snprintf(req->error_str, sizeof(req->error_str),
 			 "%s: error %d returned from ctl_invalidate_lun() for "
 			 "LUN %d", __func__, retval, params->lun_id);
+		mtx_lock(&softc->lock);
+		be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
+		mtx_unlock(&softc->lock);
 		goto bailout_error;
 	}
 
 	mtx_lock(&softc->lock);
-
 	while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
 		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
- 		if (retval == EINTR)   
+		if (retval == EINTR)
 			break;
 	}
 	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
@@ -467,142 +947,212 @@
 
 	mtx_unlock(&softc->lock);
 
-	if (retval == 0)
+	if (retval == 0) {
+		taskqueue_drain_all(be_lun->io_taskqueue);
+		taskqueue_free(be_lun->io_taskqueue);
+		ctl_free_opts(&be_lun->cbe_lun.options);
+		free(be_lun->zero_page, M_RAMDISK);
+		ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
+		sx_destroy(&be_lun->page_lock);
+		mtx_destroy(&be_lun->queue_lock);
 		free(be_lun, M_RAMDISK);
+	}
 
 	req->status = CTL_LUN_OK;
-
 	return (retval);
 
 bailout_error:
-
-	/*
-	 * Don't leave the waiting flag set.
-	 */
-	mtx_lock(&softc->lock);
-	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
-	mtx_unlock(&softc->lock);
-
 	req->status = CTL_LUN_ERROR;
-
 	return (0);
 }
 
 static int
 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
-			   struct ctl_lun_req *req, int do_wait)
+			   struct ctl_lun_req *req)
 {
 	struct ctl_be_ramdisk_lun *be_lun;
+	struct ctl_be_lun *cbe_lun;
 	struct ctl_lun_create_params *params;
-	uint32_t blocksize;
+	char *value;
 	char tmpstr[32];
+	uint64_t t;
 	int retval;
 
 	retval = 0;
 	params = &req->reqdata.create;
-	if (params->blocksize_bytes != 0)
-		blocksize = params->blocksize_bytes;
+
+	be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
+	cbe_lun = &be_lun->cbe_lun;
+	cbe_lun->be_lun = be_lun;
+	be_lun->params = req->reqdata.create;
+	be_lun->softc = softc;
+	sprintf(be_lun->lunname, "cram%d", softc->num_luns);
+	ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
+
+	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
+		cbe_lun->lun_type = params->device_type;
 	else
-		blocksize = 512;
+		cbe_lun->lun_type = T_DIRECT;
+	be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
+	cbe_lun->flags = 0;
+	value = ctl_get_opt(&cbe_lun->options, "ha_role");
+	if (value != NULL) {
+		if (strcmp(value, "primary") == 0)
+			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
+		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
 
-	be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ?
-			M_WAITOK : M_NOWAIT));
-
-	if (be_lun == NULL) {
+	be_lun->pblocksize = PAGE_SIZE;
+	value = ctl_get_opt(&cbe_lun->options, "pblocksize");
+	if (value != NULL) {
+		ctl_expand_number(value, &t);
+		be_lun->pblocksize = t;
+	}
+	if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
 		snprintf(req->error_str, sizeof(req->error_str),
-			 "%s: error allocating %zd bytes", __func__,
-			 sizeof(*be_lun));
+			 "%s: unsupported pblocksize %u", __func__,
+			 be_lun->pblocksize);
 		goto bailout_error;
 	}
 
-	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
-		be_lun->ctl_be_lun.lun_type = params->device_type;
-	else
-		be_lun->ctl_be_lun.lun_type = T_DIRECT;
-
-	if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
-
-		if (params->lun_size_bytes < blocksize) {
+	if (cbe_lun->lun_type == T_DIRECT ||
+	    cbe_lun->lun_type == T_CDROM) {
+		if (params->blocksize_bytes != 0)
+			cbe_lun->blocksize = params->blocksize_bytes;
+		else if (cbe_lun->lun_type == T_CDROM)
+			cbe_lun->blocksize = 2048;
+		else
+			cbe_lun->blocksize = 512;
+		be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
+		if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
 			snprintf(req->error_str, sizeof(req->error_str),
+				 "%s: pblocksize %u not exp2 of blocksize %u",
+				 __func__,
+				 be_lun->pblocksize, cbe_lun->blocksize);
+			goto bailout_error;
+		}
+		if (params->lun_size_bytes < cbe_lun->blocksize) {
+			snprintf(req->error_str, sizeof(req->error_str),
 				 "%s: LUN size %ju < blocksize %u", __func__,
-				 params->lun_size_bytes, blocksize);
+				 params->lun_size_bytes, cbe_lun->blocksize);
 			goto bailout_error;
 		}
-
-		be_lun->size_blocks = params->lun_size_bytes / blocksize;
-		be_lun->size_bytes = be_lun->size_blocks * blocksize;
-
-		be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
+		be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
+		be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
+		be_lun->indir = 0;
+		t = be_lun->size_bytes / be_lun->pblocksize;
+		while (t > 1) {
+			t /= PPP;
+			be_lun->indir++;
+		}
+		cbe_lun->maxlba = be_lun->size_blocks - 1;
+		cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
+		cbe_lun->pblockoff = 0;
+		cbe_lun->ublockexp = cbe_lun->pblockexp;
+		cbe_lun->ublockoff = 0;
+		cbe_lun->atomicblock = be_lun->pblocksize;
+		cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
+		value = ctl_get_opt(&cbe_lun->options, "capacity");
+		if (value != NULL)
+			ctl_expand_number(value, &be_lun->cap_bytes);
 	} else {
-		be_lun->ctl_be_lun.maxlba = 0;
-		blocksize = 0;
-		be_lun->size_bytes = 0;
-		be_lun->size_blocks = 0;
+		be_lun->pblockmul = 1;
+		cbe_lun->pblockexp = 0;
 	}
 
-	be_lun->ctl_be_lun.blocksize = blocksize;
-
 	/* Tell the user the blocksize we ended up using */
-	params->blocksize_bytes = blocksize;
-
-	/* Tell the user the exact size we ended up using */
+	params->blocksize_bytes = cbe_lun->blocksize;
 	params->lun_size_bytes = be_lun->size_bytes;
 
-	be_lun->softc = softc;
+	value = ctl_get_opt(&cbe_lun->options, "unmap");
+	if (value == NULL || strcmp(value, "off") != 0)
+		cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
+	value = ctl_get_opt(&cbe_lun->options, "readonly");
+	if (value != NULL) {
+		if (strcmp(value, "on") == 0)
+			cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
+	} else if (cbe_lun->lun_type != T_DIRECT)
+		cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
+	cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
+	value = ctl_get_opt(&cbe_lun->options, "serseq");
+	if (value != NULL && strcmp(value, "on") == 0)
+		cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
+	else if (value != NULL && strcmp(value, "read") == 0)
+		cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
+	else if (value != NULL && strcmp(value, "off") == 0)
+		cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
 
-	be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
-	be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
-	be_lun->ctl_be_lun.be_lun = be_lun;
-
 	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
-		be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
-		be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
+		cbe_lun->req_lun_id = params->req_lun_id;
+		cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
 	} else
-		be_lun->ctl_be_lun.req_lun_id = 0;
+		cbe_lun->req_lun_id = 0;
 
-	be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
-	be_lun->ctl_be_lun.lun_config_status =
-		ctl_backend_ramdisk_lun_config_status;
-	be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver;
+	cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
+	cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
+	cbe_lun->be = &ctl_be_ramdisk_driver;
 	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
 		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
 			 softc->num_luns);
-		strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
-			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
-			sizeof(tmpstr)));
+		strncpy((char *)cbe_lun->serial_num, tmpstr,
+			MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
 
 		/* Tell the user what we used for a serial number */
 		strncpy((char *)params->serial_num, tmpstr,
-			ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
+			MIN(sizeof(params->serial_num), sizeof(tmpstr)));
 	} else { 
-		strncpy((char *)be_lun->ctl_be_lun.serial_num,
-			params->serial_num,
-			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
-			sizeof(params->serial_num)));
+		strncpy((char *)cbe_lun->serial_num, params->serial_num,
+			MIN(sizeof(cbe_lun->serial_num),
+			    sizeof(params->serial_num)));
 	}
 	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
 		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
-		strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
-			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
-			sizeof(tmpstr)));
+		strncpy((char *)cbe_lun->device_id, tmpstr,
+			MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
 
 		/* Tell the user what we used for a device ID */
 		strncpy((char *)params->device_id, tmpstr,
-			ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
+			MIN(sizeof(params->device_id), sizeof(tmpstr)));
 	} else {
-		strncpy((char *)be_lun->ctl_be_lun.device_id,
-			params->device_id,
-			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
-				sizeof(params->device_id)));
+		strncpy((char *)cbe_lun->device_id, params->device_id,
+			MIN(sizeof(cbe_lun->device_id),
+			    sizeof(params->device_id)));
 	}
 
+	STAILQ_INIT(&be_lun->cont_queue);
+	sx_init(&be_lun->page_lock, "cram page lock");
+	if (be_lun->cap_bytes == 0) {
+		be_lun->indir = 0;
+		be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
+	}
+	be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
+	    M_WAITOK|M_ZERO);
+	mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
+	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
+	    be_lun);
+
+	be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
+	    taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
+	if (be_lun->io_taskqueue == NULL) {
+		snprintf(req->error_str, sizeof(req->error_str),
+			 "%s: Unable to create taskqueue", __func__);
+		goto bailout_error;
+	}
+
+	retval = taskqueue_start_threads(&be_lun->io_taskqueue,
+					 /*num threads*/1,
+					 /*priority*/PWAIT,
+					 /*thread name*/
+					 "%s taskq", be_lun->lunname);
+	if (retval != 0)
+		goto bailout_error;
+
 	mtx_lock(&softc->lock);
 	softc->num_luns++;
 	STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
-
 	mtx_unlock(&softc->lock);
 
-	retval = ctl_add_lun(&be_lun->ctl_be_lun);
+	retval = ctl_add_lun(&be_lun->cbe_lun);
 	if (retval != 0) {
 		mtx_lock(&softc->lock);
 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
@@ -616,9 +1166,6 @@
 		goto bailout_error;
 	}
 
-	if (do_wait == 0)
-		return (retval);
-
 	mtx_lock(&softc->lock);
 
 	/*
@@ -644,18 +1191,25 @@
 		mtx_unlock(&softc->lock);
 		goto bailout_error;
 	} else {
-		params->req_lun_id = be_lun->ctl_be_lun.lun_id;
+		params->req_lun_id = cbe_lun->lun_id;
 	}
 	mtx_unlock(&softc->lock);
 
 	req->status = CTL_LUN_OK;
-
 	return (retval);
 
 bailout_error:
 	req->status = CTL_LUN_ERROR;
-	free(be_lun, M_RAMDISK);
-
+	if (be_lun != NULL) {
+		if (be_lun->io_taskqueue != NULL)
+			taskqueue_free(be_lun->io_taskqueue);
+		ctl_free_opts(&cbe_lun->options);
+		free(be_lun->zero_page, M_RAMDISK);
+		ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
+		sx_destroy(&be_lun->page_lock);
+		mtx_destroy(&be_lun->queue_lock);
+		free(be_lun, M_RAMDISK);
+	}
 	return (retval);
 }
 
@@ -664,20 +1218,20 @@
 		       struct ctl_lun_req *req)
 {
 	struct ctl_be_ramdisk_lun *be_lun;
+	struct ctl_be_lun *cbe_lun;
 	struct ctl_lun_modify_params *params;
+	char *value;
 	uint32_t blocksize;
+	int wasprim;
 
 	params = &req->reqdata.modify;
 
-	be_lun = NULL;
-
 	mtx_lock(&softc->lock);
 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
-		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
+		if (be_lun->cbe_lun.lun_id == params->lun_id)
 			break;
 	}
 	mtx_unlock(&softc->lock);
-
 	if (be_lun == NULL) {
 		snprintf(req->error_str, sizeof(req->error_str),
 			 "%s: LUN %u is not managed by the ramdisk backend",
@@ -684,45 +1238,50 @@
 			 __func__, params->lun_id);
 		goto bailout_error;
 	}
+	cbe_lun = &be_lun->cbe_lun;
 
-	if (params->lun_size_bytes == 0) {
-		snprintf(req->error_str, sizeof(req->error_str),
-			"%s: LUN size \"auto\" not supported "
-			"by the ramdisk backend", __func__);
-		goto bailout_error;
+	if (params->lun_size_bytes != 0)
+		be_lun->params.lun_size_bytes = params->lun_size_bytes;
+	ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
+
+	wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
+	value = ctl_get_opt(&cbe_lun->options, "ha_role");
+	if (value != NULL) {
+		if (strcmp(value, "primary") == 0)
+			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+		else
+			cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
+	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
+		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+	else
+		cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
+	if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
+		if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
+			ctl_lun_primary(cbe_lun);
+		else
+			ctl_lun_secondary(cbe_lun);
 	}
 
-	blocksize = be_lun->ctl_be_lun.blocksize;
-
-	if (params->lun_size_bytes < blocksize) {
+	blocksize = be_lun->cbe_lun.blocksize;
+	if (be_lun->params.lun_size_bytes < blocksize) {
 		snprintf(req->error_str, sizeof(req->error_str),
 			"%s: LUN size %ju < blocksize %u", __func__,
-			params->lun_size_bytes, blocksize);
+			be_lun->params.lun_size_bytes, blocksize);
 		goto bailout_error;
 	}
-
-	be_lun->size_blocks = params->lun_size_bytes / blocksize;
+	be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
 	be_lun->size_bytes = be_lun->size_blocks * blocksize;
+	be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
+	ctl_lun_capacity_changed(&be_lun->cbe_lun);
 
-	/*
-	 * The maximum LBA is the size - 1.
-	 *
-	 * XXX: Note that this field is being updated without locking,
-	 * 	which might cause problems on 32-bit architectures.
-	 */
-	be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
-	ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
-
 	/* Tell the user the exact size we ended up using */
 	params->lun_size_bytes = be_lun->size_bytes;
 
 	req->status = CTL_LUN_OK;
-
 	return (0);
 
 bailout_error:
 	req->status = CTL_LUN_ERROR;
-
 	return (0);
 }
 
@@ -738,18 +1297,15 @@
 	do_free = 0;
 
 	mtx_lock(&softc->lock);
-
 	lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
-
 	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
 		wakeup(lun);
 	} else {
-		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
+		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
 			      links);
 		softc->num_luns--;
 		do_free = 1;
 	}
-
 	mtx_unlock(&softc->lock);
 
 	if (do_free != 0)
@@ -776,9 +1332,9 @@
 		/*
 		 * We successfully added the LUN, attempt to enable it.
 		 */
-		if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
+		if (ctl_enable_lun(&lun->cbe_lun) != 0) {
 			printf("%s: ctl_enable_lun() failed!\n", __func__);
-			if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
+			if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
 				printf("%s: ctl_invalidate_lun() failed!\n",
 				       __func__);
 			}
@@ -806,90 +1362,3 @@
 	}
 	mtx_unlock(&softc->lock);
 }
-
-static int
-ctl_backend_ramdisk_config_write(union ctl_io *io)
-{
-	struct ctl_be_ramdisk_softc *softc;
-	int retval;
-
-	retval = 0;
-	softc = &rd_softc;
-
-	switch (io->scsiio.cdb[0]) {
-	case SYNCHRONIZE_CACHE:
-	case SYNCHRONIZE_CACHE_16:
-		/*
-		 * The upper level CTL code will filter out any CDBs with
-		 * the immediate bit set and return the proper error.  It
-		 * will also not allow a sync cache command to go to a LUN
-		 * that is powered down.
-		 *
-		 * We don't really need to worry about what LBA range the
-		 * user asked to be synced out.  When they issue a sync
-		 * cache command, we'll sync out the whole thing.
-		 *
-		 * This is obviously just a stubbed out implementation.
-		 * The real implementation will be in the RAIDCore/CTL
-		 * interface, and can only really happen when RAIDCore
-		 * implements a per-array cache sync.
-		 */
-		ctl_set_success(&io->scsiio);
-		ctl_config_write_done(io);
-		break;
-	case START_STOP_UNIT: {
-		struct scsi_start_stop_unit *cdb;
-		struct ctl_be_lun *ctl_be_lun;
-		struct ctl_be_ramdisk_lun *be_lun;
-
-		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
-
-		ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
-			CTL_PRIV_BACKEND_LUN].ptr;
-		be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
-
-		if (cdb->how & SSS_START)
-			retval = ctl_start_lun(ctl_be_lun);
-		else {
-			retval = ctl_stop_lun(ctl_be_lun);
-#ifdef NEEDTOPORT
-			if ((retval == 0)
-			 && (cdb->byte2 & SSS_ONOFFLINE))
-				retval = ctl_lun_offline(ctl_be_lun);
-#endif
-		}
-
-		/*
-		 * In general, the above routines should not fail.  They
-		 * just set state for the LUN.  So we've got something
-		 * pretty wrong here if we can't start or stop the LUN.
-		 */
-		if (retval != 0) {
-			ctl_set_internal_failure(&io->scsiio,
-						 /*sks_valid*/ 1,
-						 /*retry_count*/ 0xf051);
-			retval = CTL_RETVAL_COMPLETE;
-		} else {
-			ctl_set_success(&io->scsiio);
-		}
-		ctl_config_write_done(io);
-		break;
-	}
-	default:
-		ctl_set_invalid_opcode(&io->scsiio);
-		ctl_config_write_done(io);
-		retval = CTL_RETVAL_COMPLETE;
-		break;
-	}
-
-	return (retval);
-}
-
-static int
-ctl_backend_ramdisk_config_read(union ctl_io *io)
-{
-	/*
-	 * XXX KDM need to implement!!
-	 */
-	return (0);
-}

Modified: trunk/sys/cam/ctl/ctl_cmd_table.c
===================================================================
--- trunk/sys/cam/ctl/ctl_cmd_table.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_cmd_table.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,5 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003, 2004, 2005, 2009 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2015 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,8 +29,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_cmd_table.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_cmd_table.c#4 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_cmd_table.c 312579 2017-01-21 08:36:05Z mav $
  */
 /*
  * CAM Target Layer command table.
@@ -52,22 +54,851 @@
 #include <cam/ctl/ctl.h>
 #include <cam/ctl/ctl_frontend.h>
 #include <cam/ctl/ctl_backend.h>
-#include <cam/ctl/ctl_frontend_internal.h>
 #include <cam/ctl/ctl_ioctl.h>
 #include <cam/ctl/ctl_ha.h>
 #include <cam/ctl/ctl_private.h>
 
 /*
- * Whenever support for a new command is added, it should be added to this
- * table.
+ * Whenever support for a new command is added, it should be added to these
+ * tables.
  */
-struct ctl_cmd_entry ctl_cmd_table[] =
+
+/* 3B WRITE BUFFER */
+const struct ctl_cmd_entry ctl_cmd_table_3b[32] =
 {
+/* 00 WRITE BUFFER HDR DATA */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 01 WRITE BUFFER VENDOR */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 WRITE BUFFER DATA */
+{ctl_write_buffer, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
+				      CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				      CTL_CMD_FLAG_OK_ON_STANDBY |
+				      CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 10, {0x02, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 03 WRITE BUFFER DESCR */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 04 WRITE BUFFER DOWNLOAD */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 WRITE BUFFER DOWNLOAD SAVE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A WRITE BUFFER ECHO */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0B WRITE BUFFER ECHO DESCRIPTOR */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 11 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 12 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 13 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 14 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 15 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 16 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 17 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 18 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 19 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1C WRITE BUFFER ERROR HISTORY */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1d-1f */
+};
+
+/* 3C READ BUFFER(10) */
+const struct ctl_cmd_entry ctl_cmd_table_3c[32] =
+{
+/* 00 READ BUFFER(10) HDR DATA */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 01 READ BUFFER(10) VENDOR */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 READ BUFFER(10) DATA */
+{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+				     CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				     CTL_CMD_FLAG_OK_ON_STANDBY |
+				     CTL_FLAG_DATA_IN |
+				     CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_NONE,
+ 10, {0x02, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 03 READ BUFFER(10) DESCR */
+{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+				     CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				     CTL_CMD_FLAG_OK_ON_STANDBY |
+				     CTL_FLAG_DATA_IN |
+				     CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_NONE,
+ 10, {0x03, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 04 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A READ BUFFER(10) ECHO */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0B READ BUFFER(10) ECHO DESCRIPTOR */
+{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+				     CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				     CTL_CMD_FLAG_OK_ON_STANDBY |
+				     CTL_FLAG_DATA_IN |
+				     CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_NONE,
+ 10, {0x0b, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 11 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 12 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 13 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 14 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 15 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 16 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 17 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 18 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 19 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1C READ BUFFER(10) ERROR HISTORY */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1d-1f */
+};
+
+/* 5E PERSISTENT RESERVE IN */
+const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
+{
+/* 00 READ KEYS */
+{ctl_persistent_reserve_in, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_IN |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x00, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
+
+/* 01 READ RESERVATION */
+{ctl_persistent_reserve_in, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_IN |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x01, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
+
+/* 02 REPORT CAPABILITIES */
+{ctl_persistent_reserve_in, CTL_SERIDX_INQ, CTL_CMD_FLAG_ALLOW_ON_RESV |
+					    CTL_CMD_FLAG_OK_ON_BOTH |
+					    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					    CTL_CMD_FLAG_OK_ON_STANDBY |
+					    CTL_FLAG_DATA_IN |
+					    CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x02, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
+
+/* 03 READ FULL STATUS */
+{ctl_persistent_reserve_in, CTL_SERIDX_INQ, CTL_CMD_FLAG_ALLOW_ON_RESV |
+					    CTL_CMD_FLAG_OK_ON_BOTH |
+					    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					    CTL_CMD_FLAG_OK_ON_STANDBY |
+					    CTL_FLAG_DATA_IN |
+					    CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x03, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
+
+/* 04-1f */
+};
+
+/* 5F PERSISTENT RESERVE OUT */
+const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
+{
+/* 00 REGISTER */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_OUT |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x00, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 01 RESERVE */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_OUT |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x01, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 02 RELEASE */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_OUT |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x02, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 03 CLEAR */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_OUT |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x03, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 04 PREEMPT */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_OUT |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x04, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 05 PREEMPT AND ABORT */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_OUT |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x05, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 06 REGISTER AND IGNORE EXISTING KEY */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+						CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_FLAG_DATA_OUT |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x06, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 07 REGISTER AND MOVE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08-1f */
+};
+
+/* 83 EXTENDED COPY */
+const struct ctl_cmd_entry ctl_cmd_table_83[32] =
+{
+/* 00 EXTENDED COPY (LID1) */
+{ctl_extended_copy_lid1, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH |
+					    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					    CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 01 EXTENDED COPY (LID4) */
+{ctl_extended_copy_lid4, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH |
+					    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					    CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 04 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 POPULATE TOKEN */
+{ctl_populate_token, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_DIRECT |
+					CTL_FLAG_DATA_OUT |
+					CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x10, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 11 WRITE USING TOKEN */
+{ctl_write_using_token, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_DIRECT |
+					CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x11, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 12 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 13 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 14 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 15 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 16 SET TAPE STREAM MIRRORING */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 17 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 18 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 19 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1C COPY OPERATION ABORT */
+{ctl_copy_operation_abort, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH |
+					      CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					      CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x1c, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x07}},
+
+/* 1D COPY OPERATION CLOSE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1e-1f */
+};
+
+/* 84 RECEIVE COPY STATUS */
+const struct ctl_cmd_entry ctl_cmd_table_84[32] =
+{
+/* 00 RECEIVE COPY STATUS (LID1) */
+{ctl_receive_copy_status_lid1, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 16, {0x00, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 01 RECEIVE COPY DATA (LID1) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 RECEIVE COPY OPERATING PARAMETERS */
+{ctl_receive_copy_operating_parameters, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 16, {0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 04 RECEIVE COPY FAILURE DETAILS (LID1) */
+{ctl_receive_copy_failure_details, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 16, {0x04, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 05 RECEIVE COPY STATUS (LID4) */
+{ctl_receive_copy_status_lid4, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 16, {0x05, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 06 RECEIVE COPY DATA (LID4)*/
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 RECEIVE ROD TOKEN INFORMATION */
+{ctl_receive_rod_token_information, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 16, {0x07, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 08 REPORT ALL ROD TOKENS */
+{ctl_report_all_rod_tokens, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 16, {0x08, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 11 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 12 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 13 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 14 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 15 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 16 REPORT TAPE STREAM MIRRORING */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 17-1f */
+};
+
+/* 9B READ BUFFER(16) */
+const struct ctl_cmd_entry ctl_cmd_table_9b[32] =
+{
+/* 00 READ BUFFER(16) HDR DATA */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 01 READ BUFFER(16) VENDOR */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 READ BUFFER(16) DATA */
+{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+				     CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				     CTL_CMD_FLAG_OK_ON_STANDBY |
+				     CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 03 READ BUFFER(16) DESCR */
+{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+				     CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				     CTL_CMD_FLAG_OK_ON_STANDBY |
+				     CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x03, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 04 READ BUFFER(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 READ BUFFER(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A READ BUFFER(16) ECHO */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0B READ BUFFER(16) ECHO DESCRIPTOR */
+{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+				     CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				     CTL_CMD_FLAG_OK_ON_STANDBY |
+				     CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x0b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 11 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 12 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 13 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 14 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 15 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 16 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 17 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 18 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 19 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1C READ BUFFER(16) ERROR HISTORY */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1d-1f */
+};
+
+
+/* 9E SERVICE ACTION IN(16) */
+const struct ctl_cmd_entry ctl_cmd_table_9e[32] =
+{
+/* 00 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 01 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 04 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 READ CAPACITY(16) */
+{ctl_read_capacity_16, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_DIRECT |
+					  CTL_FLAG_DATA_IN |
+					  CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READCAP,
+ 16, {0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 11 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 12 GET LBA STATUS */
+{ctl_get_lba_status, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT |
+				      CTL_FLAG_DATA_IN |
+				      CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 16, {0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+    0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 13-1f */
+};
+
+/* A3 MAINTENANCE IN */
+const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
+{
+/* 00 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 01 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 04 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A REPORT TARGET PORT GROUPS */
+{ctl_report_tagret_port_groups, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_CMD_FLAG_OK_ON_UNAVAIL |
+						CTL_FLAG_DATA_IN |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 12, {0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 0B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C REPORT SUPPORTED_OPCODES */
+{ctl_report_supported_opcodes, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
+						CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+						CTL_CMD_FLAG_OK_ON_STANDBY |
+						CTL_CMD_FLAG_OK_ON_UNAVAIL |
+						CTL_FLAG_DATA_IN |
+						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 12, {0x0c, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 0D REPORT SUPPORTED_TASK MANAGEMENT FUNCTIONS */
+{ctl_report_supported_tmf, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
+					   CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					   CTL_CMD_FLAG_OK_ON_STANDBY |
+					   CTL_CMD_FLAG_OK_ON_UNAVAIL |
+					   CTL_FLAG_DATA_IN |
+					   CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 12, {0x0d, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F REPORT TIMESTAMP */
+{ctl_report_timestamp, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
+					CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					CTL_CMD_FLAG_OK_ON_STANDBY |
+					CTL_CMD_FLAG_OK_ON_UNAVAIL |
+					CTL_FLAG_DATA_IN |
+					CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 12, {0x0f, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 10-1f */
+};
+
+const struct ctl_cmd_entry ctl_cmd_table[256] =
+{
 /* 00 TEST UNIT READY */
 {ctl_tur, CTL_SERIDX_TUR, CTL_CMD_FLAG_OK_ON_BOTH |
 			  CTL_FLAG_DATA_NONE |
 			  CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_TUR},
+ CTL_LUN_PAT_TUR, 6, {0, 0, 0, 0, 0x07}},
 
 /* 01 REWIND */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -77,21 +908,21 @@
 
 /* 03 REQUEST SENSE */
 {ctl_request_sense, CTL_SERIDX_RQ_SNS, CTL_FLAG_DATA_IN |
-				       CTL_CMD_FLAG_OK_ON_ALL_LUNS |
+				       CTL_CMD_FLAG_OK_ON_NO_LUN |
+				       CTL_CMD_FLAG_OK_ON_BOTH |
 				       CTL_CMD_FLAG_ALLOW_ON_RESV |
 				       CTL_CMD_FLAG_NO_SENSE |
-				       CTL_CMD_FLAG_OK_ON_STOPPED |
-				       CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				       CTL_CMD_FLAG_OK_ON_OFFLINE |
-				       CTL_CMD_FLAG_OK_ON_SECONDARY |
-				       CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_NONE},
+				       CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				       CTL_CMD_FLAG_OK_ON_STANDBY |
+				       CTL_CMD_FLAG_OK_ON_UNAVAIL |
+				       CTL_CMD_FLAG_ALLOW_ON_PR_RESV |
+				       CTL_CMD_FLAG_RUN_HERE,
+ CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0xff, 0x07}},
 
 /* 04 FORMAT UNIT */
-{ctl_format, CTL_SERIDX_FORMAT, CTL_CMD_FLAG_OK_ON_SLUN |
-				CTL_CMD_FLAG_OK_ON_INOPERABLE |
+{ctl_format, CTL_SERIDX_FORMAT, CTL_CMD_FLAG_OK_ON_DIRECT |
 				CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0xff, 0, 0, 0, 0x07}},
 
 /* 05 READ BLOCK LIMITS */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -103,18 +934,18 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 08 READ(6) */
-{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
+{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT |
 				  CTL_FLAG_DATA_IN |
-				  CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+				  CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 6, {0x1f, 0xff, 0xff, 0xff, 0x07}},
 
 /* 09 */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 0A WRITE(6) */
-{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN |
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT |
 				   CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 6, {0x1f, 0xff, 0xff, 0xff, 0x07}},
 
 /* 0B SEEK(6) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -138,16 +969,16 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 12 INQUIRY */
-{ctl_inquiry, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_ALL_LUNS |
+{ctl_inquiry, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_NO_LUN |
+			      CTL_CMD_FLAG_OK_ON_BOTH |
 			      CTL_CMD_FLAG_ALLOW_ON_RESV |
 			      CTL_CMD_FLAG_NO_SENSE |
-			      CTL_CMD_FLAG_OK_ON_STOPPED |
-			      CTL_CMD_FLAG_OK_ON_INOPERABLE |
-			      CTL_CMD_FLAG_OK_ON_OFFLINE |
-			      CTL_CMD_FLAG_OK_ON_SECONDARY |
+			      CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+			      CTL_CMD_FLAG_OK_ON_STANDBY |
+			      CTL_CMD_FLAG_OK_ON_UNAVAIL |
 			      CTL_FLAG_DATA_IN |
 			      CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0xe1, 0xff, 0xff, 0xff, 0x07}},
 
 /* 13 */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -157,29 +988,26 @@
 
 /* 15 MODE SELECT(6) */
 {ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
-				     CTL_CMD_FLAG_OK_ON_STOPPED |
-				     CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				     CTL_CMD_FLAG_OK_ON_SECONDARY |
+				     CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				     CTL_CMD_FLAG_OK_ON_STANDBY |
 				     CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0x13, 0, 0, 0xff, 0x07}},
 
 /* 16 RESERVE(6) */
-{ctl_scsi_reserve, CTL_SERIDX_RESV, CTL_CMD_FLAG_ALLOW_ON_RESV |
+{ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
 				    CTL_CMD_FLAG_OK_ON_BOTH |
-				    CTL_CMD_FLAG_OK_ON_STOPPED |
-				    CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				    CTL_CMD_FLAG_OK_ON_SECONDARY |
+				    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				    CTL_CMD_FLAG_OK_ON_STANDBY |
 				    CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}},
 
 /* 17 RELEASE(6) */
-{ctl_scsi_release, CTL_SERIDX_REL, CTL_CMD_FLAG_ALLOW_ON_RESV |
+{ctl_scsi_release, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
 				   CTL_CMD_FLAG_OK_ON_BOTH |
-				   CTL_CMD_FLAG_OK_ON_STOPPED |
-				   CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				   CTL_CMD_FLAG_OK_ON_SECONDARY |
+				   CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				   CTL_CMD_FLAG_OK_ON_STANDBY |
 				   CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}},
 
 /* 18 COPY */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -189,20 +1017,19 @@
 
 /* 1A MODE SENSE(6) */
 {ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
-				    CTL_CMD_FLAG_OK_ON_STOPPED |
-				    CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				    CTL_CMD_FLAG_OK_ON_SECONDARY |
-				    CTL_FLAG_DATA_IN,
- CTL_LUN_PAT_NONE},
+				    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				    CTL_CMD_FLAG_OK_ON_STANDBY |
+				    CTL_FLAG_DATA_IN |
+				    CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_NONE, 6, {0x08, 0xff, 0xff, 0xff, 0x07}},
 
 /* 1B START STOP UNIT */
-{ctl_start_stop, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
-				   CTL_CMD_FLAG_OK_ON_STOPPED |
-				   CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				   CTL_CMD_FLAG_OK_ON_OFFLINE |
+{ctl_start_stop, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_DIRECT |
+				   CTL_CMD_FLAG_OK_ON_CDROM |
+				   CTL_CMD_FLAG_OK_ON_NO_MEDIA |
 				   CTL_FLAG_DATA_NONE |
 				   CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0x01, 0, 0x0f, 0xf7, 0x07}},
 
 /* 1C RECEIVE DIAGNOSTIC RESULTS */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -211,7 +1038,11 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 1E PREVENT ALLOW MEDIUM REMOVAL */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_prevent_allow, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_DIRECT |
+				      CTL_CMD_FLAG_OK_ON_CDROM |
+				      CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				      CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0x03, 0x07}},
 
 /* 1F */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -232,13 +1063,11 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 25 READ CAPACITY(10) */
-{ctl_read_capacity, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN|
-				       CTL_CMD_FLAG_OK_ON_STOPPED |
-				       CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				       CTL_CMD_FLAG_OK_ON_SECONDARY |
+{ctl_read_capacity, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_DIRECT |
+				       CTL_CMD_FLAG_OK_ON_CDROM |
 				       CTL_FLAG_DATA_IN |
 				       CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READCAP},
+ CTL_LUN_PAT_READCAP, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07}},
 
 /* 26 */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -247,17 +1076,20 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 28 READ(10) */
-{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
+{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT |
+				  CTL_CMD_FLAG_OK_ON_CDROM |
 				  CTL_FLAG_DATA_IN |
-				  CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+				  CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 10, {0x1a, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
 
 /* 29 READ GENERATION */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 2A WRITE(10) */
-{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 10, {0x1a, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
 
 /* 2B SEEK(10) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -269,11 +1101,16 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 2E WRITE AND VERIFY(10) */
-{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 10, {0x12, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
 
 /* 2F VERIFY(10) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT |
+			      CTL_FLAG_DATA_OUT |
+			      CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 10, {0x16, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
 
 /* 30 SEARCH DATA HIGH(10) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -291,15 +1128,20 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 35 SYNCHRONIZE CACHE(10) */
-{ctl_sync_cache, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
-				   CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE},
+{ctl_sync_cache, CTL_SERIDX_SYNC, CTL_CMD_FLAG_OK_ON_DIRECT |
+				  CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_WRITE,
+ 10, {0x06, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
 
 /* 36 LOCK UNLOCK CACHE(10) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 37 READ DEFECT DATA(10) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_read_defect, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_DIRECT |
+				     CTL_FLAG_DATA_IN |
+				     CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_NONE,
+ 10, {0, 0x1f, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
 
 /* 38 MEDIUM SCAN */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -311,12 +1153,12 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 3B WRITE BUFFER */
-{ctl_write_buffer, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_PROC |
-				     CTL_FLAG_DATA_OUT,
+{__DECONST(ctl_opfunc *, ctl_cmd_table_3b), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
  CTL_LUN_PAT_NONE},
 
 /* 3C READ BUFFER */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{__DECONST(ctl_opfunc *, ctl_cmd_table_3c), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
+ CTL_LUN_PAT_NONE},
 
 /* 3D UPDATE BLOCK */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -331,13 +1173,22 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 41 WRITE SAME(10) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_write_same, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT |
+				   CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 10, {0x1a, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
 
-/* 42 READ SUB-CHANNEL */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+/* 42 READ SUB-CHANNEL / UNMAP */
+{ctl_unmap, CTL_SERIDX_UNMAP, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE,
+ 10, {1, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
 
 /* 43 READ TOC/PMA/ATIP */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_read_toc, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_CDROM |
+				  CTL_CMD_FLAG_ALLOW_ON_PR_WRESV |
+				  CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 10, {0x02, 0x01, 0, 0, 0, 0xff, 0xff, 0xff, 0x07}},
 
 /* 44 REPORT DENSITY SUPPORT */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -346,7 +1197,12 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 46 GET CONFIGURATION */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_get_config, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_CDROM |
+				 CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				 CTL_CMD_FLAG_ALLOW_ON_PR_RESV |
+				 CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 10, {0x03, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0x07}},
 
 /* 47 PLAY AUDIO MSF */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -358,7 +1214,12 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 4A GET EVENT STATUS NOTIFICATION */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_get_event_status, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_CDROM |
+					  CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					  CTL_CMD_FLAG_ALLOW_ON_PR_RESV |
+					  CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 10, {0x02, 0x01, 0, 0, 0, 0xff, 0xff, 0xff, 0x07}},
 
 /* 4B PAUSE/RESUME */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -367,7 +1228,12 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 4D LOG SENSE */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_log_sense, CTL_SERIDX_LOG_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+				    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				    CTL_CMD_FLAG_OK_ON_STANDBY |
+				    CTL_FLAG_DATA_IN |
+				    CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE, 10, {0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0x07} },
 
 /* 4E STOP PLAY/SCAN */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -392,29 +1258,26 @@
 
 /* 55 MODE SELECT(10) */
 {ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
-				     CTL_CMD_FLAG_OK_ON_STOPPED |
-				     CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				     CTL_CMD_FLAG_OK_ON_SECONDARY |
+				     CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				     CTL_CMD_FLAG_OK_ON_STANDBY |
 				     CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 10, {0x13, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
 
 /* 56 RESERVE(10) */
-{ctl_scsi_reserve, CTL_SERIDX_RESV, CTL_CMD_FLAG_ALLOW_ON_RESV |
+{ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
 				    CTL_CMD_FLAG_OK_ON_BOTH |
-				    CTL_CMD_FLAG_OK_ON_STOPPED |
-				    CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				    CTL_CMD_FLAG_OK_ON_SECONDARY |
+				    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				    CTL_CMD_FLAG_OK_ON_STANDBY |
 				    CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07} },
 
 /* 57 RELEASE(10) */
-{ctl_scsi_release, CTL_SERIDX_REL, CTL_CMD_FLAG_ALLOW_ON_RESV |
+{ctl_scsi_release, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
 				   CTL_CMD_FLAG_OK_ON_BOTH |
-				   CTL_CMD_FLAG_OK_ON_STOPPED |
-				   CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				   CTL_CMD_FLAG_OK_ON_SECONDARY |
+				   CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				   CTL_CMD_FLAG_OK_ON_STANDBY |
 				   CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07}},
 
 /* 58 REPAIR TRACK */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -424,11 +1287,11 @@
 
 /* 5A MODE SENSE(10) */
 {ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
-				    CTL_CMD_FLAG_OK_ON_STOPPED |
-				    CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				    CTL_CMD_FLAG_OK_ON_SECONDARY |
-				    CTL_FLAG_DATA_IN,
- CTL_LUN_PAT_NONE},
+				    CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				    CTL_CMD_FLAG_OK_ON_STANDBY |
+				    CTL_FLAG_DATA_IN |
+				    CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_NONE, 10, {0x18, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
 
 /* 5B CLOSE TRACK/SESSION */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -440,29 +1303,13 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 5E PERSISTENT RESERVE IN */
-{ctl_persistent_reserve_in, CTL_SERIDX_PRES_IN, CTL_CMD_FLAG_ALLOW_ON_RESV |
-						CTL_CMD_FLAG_OK_ON_BOTH |
-						CTL_CMD_FLAG_OK_ON_STOPPED |
-						CTL_CMD_FLAG_OK_ON_INOPERABLE |
-						CTL_CMD_FLAG_OK_ON_SECONDARY |
-						CTL_FLAG_DATA_IN |
-						CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+{__DECONST(ctl_opfunc *, ctl_cmd_table_5e), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
  CTL_LUN_PAT_NONE},
 
-//{ctl_persistent_reserve_in, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-
 /* 5F PERSISTENT RESERVE OUT */
-{ctl_persistent_reserve_out, CTL_SERIDX_PRES_OUT, CTL_CMD_FLAG_ALLOW_ON_RESV |
-						  CTL_CMD_FLAG_OK_ON_BOTH |
-						  CTL_CMD_FLAG_OK_ON_STOPPED |
-						  CTL_CMD_FLAG_OK_ON_INOPERABLE|
-						  CTL_CMD_FLAG_OK_ON_SECONDARY |
-						  CTL_FLAG_DATA_OUT |
-						  CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+{__DECONST(ctl_opfunc *, ctl_cmd_table_5f), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
  CTL_LUN_PAT_NONE},
 
-//{ctl_persistent_reserve_out, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-
 /* 60 */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
@@ -569,10 +1416,12 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 83 EXTENDED COPY */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{__DECONST(ctl_opfunc *, ctl_cmd_table_83), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
+ CTL_LUN_PAT_NONE},
 
 /* 84 RECEIVE COPY RESULTS */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{__DECONST(ctl_opfunc *, ctl_cmd_table_84), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
+ CTL_LUN_PAT_NONE},
 
 /* 85 */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -584,16 +1433,23 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 88 READ(16) */
-{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN | CTL_FLAG_DATA_IN |
-                                  CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_IN |
+                                  CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 16, {0x1a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
-/* 89 */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+/* 89 COMPARE AND WRITE */
+{ctl_cnw, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0, 0, 0, 0xff, 0, 0x07}},
 
 /* 8A WRITE(16) */
-{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x1a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* 8B */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -605,25 +1461,38 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 8E WRITE AND VERIFY(16) */
-{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* 8F VERIFY(16) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT |
+			      CTL_FLAG_DATA_OUT |
+			      CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 16, {0x16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* 90 PRE-FETCH(16) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 91 SYNCHRONIZE CACHE(16) */
-{ctl_sync_cache, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
-				   CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE},
+{ctl_sync_cache, CTL_SERIDX_SYNC, CTL_CMD_FLAG_OK_ON_DIRECT |
+				  CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_WRITE,
+ 16, {0x06, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* 92 LOCK UNLOCK CACHE(16) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 93 WRITE SAME(16) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_write_same, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT |
+				   CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x1b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* 94 */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -646,39 +1515,39 @@
 /* 9A */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* 9B */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+/* 9B READ BUFFER(16) */
+{__DECONST(ctl_opfunc *, ctl_cmd_table_9b), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
+ CTL_LUN_PAT_NONE},
 
-/* 9C */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+/* 9C WRITE ATOMIC (16) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+      0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0x07}},
 
 /* 9D */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* 9E SERVICE ACTION IN(16) */
-/* XXX KDM not all service actions will be read capacity!! */
-{ctl_service_action_in, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN |
-					   CTL_CMD_FLAG_OK_ON_STOPPED |
-					   CTL_CMD_FLAG_OK_ON_INOPERABLE |
-					   CTL_CMD_FLAG_OK_ON_SECONDARY |
-					   CTL_FLAG_DATA_IN |
-					   CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READCAP},
+{__DECONST(ctl_opfunc *, ctl_cmd_table_9e), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
+ CTL_LUN_PAT_NONE},
 
 /* 9F SERVICE ACTION OUT(16) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* A0 REPORT LUNS */
-{ctl_report_luns, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_ALL_LUNS |
+{ctl_report_luns, CTL_SERIDX_INQ, CTL_FLAG_DATA_IN |
+				  CTL_CMD_FLAG_OK_ON_NO_LUN |
+				  CTL_CMD_FLAG_OK_ON_BOTH |
 				  CTL_CMD_FLAG_ALLOW_ON_RESV |
 				  CTL_CMD_FLAG_NO_SENSE |
-				  CTL_CMD_FLAG_OK_ON_STOPPED |
-				  CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				  CTL_CMD_FLAG_OK_ON_OFFLINE |
-				  CTL_CMD_FLAG_OK_ON_SECONDARY |
-				  CTL_FLAG_DATA_IN |
-				  CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_NONE},
+				  CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+				  CTL_CMD_FLAG_OK_ON_STANDBY |
+				  CTL_CMD_FLAG_OK_ON_UNAVAIL |
+				  CTL_CMD_FLAG_ALLOW_ON_PR_RESV |
+				  CTL_CMD_FLAG_RUN_HERE,
+ CTL_LUN_PAT_NONE,
+ 12, {0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* A1 BLANK */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -686,15 +1555,11 @@
 /* A2 SEND EVENT */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* A3 MAINTENANCE (IN) Service Action - (0A) REPORT TARGET PORT GROUP */
-{ctl_maintenance_in, CTL_SERIDX_MAIN_IN, CTL_CMD_FLAG_OK_ON_BOTH |
-				         CTL_CMD_FLAG_OK_ON_STOPPED |
-				         CTL_CMD_FLAG_OK_ON_INOPERABLE |
-				         CTL_CMD_FLAG_OK_ON_SECONDARY |
-				         CTL_FLAG_DATA_IN,
+/* A3 MAINTENANCE IN */
+{__DECONST(ctl_opfunc *, ctl_cmd_table_a3), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
  CTL_LUN_PAT_NONE},
 
-/* A4 MAINTENANCE (OUT) */
+/* A4 MAINTENANCE OUT */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* A5 MOVE MEDIUM */
@@ -707,16 +1572,20 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* A8 READ(12) */
-{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN | CTL_FLAG_DATA_IN |
-                                  CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT |
+				  CTL_CMD_FLAG_OK_ON_CDROM |
+				  CTL_FLAG_DATA_IN |
+				  CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 12, {0x1a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* A9 PLAY TRACK RELATIVE(12) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* AA WRITE(12) */
-{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 12, {0x1a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* AB SERVICE ACTION IN(12) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -728,11 +1597,16 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* AE WRITE AND VERIFY(12) */
-{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 12, {0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* AF VERIFY(12) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT |
+			      CTL_FLAG_DATA_OUT |
+			      CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 12, {0x16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* B0 SEARCH DATA HIGH(12) */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -756,7 +1630,11 @@
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
 /* B7 READ DEFECT DATA(12) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_read_defect, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_DIRECT |
+				     CTL_FLAG_DATA_IN |
+				     CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
+ CTL_LUN_PAT_NONE,
+ 12, {0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
 
 /* B8 READ ELEMENT STATUS */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -764,44 +1642,41 @@
 /* B9 READ CD MSF */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* BA REDUNDANCY GROUP (IN) */
+/* BA REDUNDANCY GROUP IN */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* BB REDUNDANCY GROUP (OUT) */
+/* BB REDUNDANCY GROUP OUT */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* BC SPARE (IN) */
+/* BC SPARE IN */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* BD SPARE (OUT) */
+/* BD SPARE OUT / MECHANISM STATUS */
+{ctl_mechanism_status, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_CDROM |
+					  CTL_CMD_FLAG_OK_ON_NO_MEDIA |
+					  CTL_CMD_FLAG_ALLOW_ON_PR_RESV |
+					  CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 12, {0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 0x07}},
+
+/* BE VOLUME SET IN */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* BE VOLUME SET (IN) */
+/* BF VOLUME SET OUT */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* BF VOLUME SET (OUT) */
+/* C0 */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* C0 - ISC_SEND_MSG_SHORT */
-//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE},
+/* C1 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* C1 - ISC_SEND_MSG */
-//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+/* C2 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* C2 - ISC_WRITE */
-//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+/* C3 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 
-/* C3 - ISC_READ */
-//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_IN,
- CTL_LUN_PAT_NONE},
-
 /* C4 */
 {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
 

Modified: trunk/sys/cam/ctl/ctl_debug.h
===================================================================
--- trunk/sys/cam/ctl/ctl_debug.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_debug.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
  * All rights reserved.
@@ -27,8 +28,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_debug.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_debug.h#2 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_debug.h 273531 2014-10-23 07:36:33Z mav $
  */
 /*
  * CAM Target Layer debugging interface.
@@ -39,6 +40,16 @@
 #ifndef	_CTL_DEBUG_H_
 #define	_CTL_DEBUG_H_
 
+/*
+ * Debugging flags.
+ */
+typedef enum {
+	CTL_DEBUG_NONE		= 0x00,	/* no debugging */
+	CTL_DEBUG_INFO		= 0x01,	/* SCSI errors */
+	CTL_DEBUG_CDB		= 0x02,	/* SCSI CDBs and tasks */
+	CTL_DEBUG_CDB_DATA	= 0x04	/* SCSI CDB DATA */
+} ctl_debug_flags;
+
 #ifdef	CAM_CTL_DEBUG
 #define	CTL_DEBUG_PRINT(X)		\
 	do {				\

Modified: trunk/sys/cam/ctl/ctl_error.c
===================================================================
--- trunk/sys/cam/ctl/ctl_error.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_error.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,6 +1,8 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
  * Copyright (c) 2011 Spectra Logic Corporation
+ * Copyright (c) 2014-2015 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -28,7 +30,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_error.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_error.c#2 $
  */
 /*
  * CAM Target Layer error reporting routines.
@@ -37,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_error.c 314380 2017-02-28 06:32:01Z mav $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -57,7 +59,6 @@
 #include <cam/ctl/ctl_io.h>
 #include <cam/ctl/ctl.h>
 #include <cam/ctl/ctl_frontend.h>
-#include <cam/ctl/ctl_frontend_internal.h>
 #include <cam/ctl/ctl_backend.h>
 #include <cam/ctl/ctl_ioctl.h>
 #include <cam/ctl/ctl_error.h>
@@ -65,9 +66,9 @@
 #include <cam/ctl/ctl_private.h>
 
 void
-ctl_set_sense_data_va(struct scsi_sense_data *sense_data, void *lunptr,
-		      scsi_sense_data_type sense_format, int current_error,
-		      int sense_key, int asc, int ascq, va_list ap) 
+ctl_set_sense_data_va(struct scsi_sense_data *sense_data, u_int *sense_len,
+    void *lunptr, scsi_sense_data_type sense_format, int current_error,
+    int sense_key, int asc, int ascq, va_list ap)
 {
 	struct ctl_lun *lun;
 
@@ -83,27 +84,36 @@
 		 * sense if the LUN exists and descriptor sense is turned
 		 * on for that LUN.
 		 */
-		if ((lun != NULL)
-		 && (lun->flags & CTL_LUN_SENSE_DESC))
+		if ((lun != NULL) && (lun->MODE_CTRL.rlec & SCP_DSENSE))
 			sense_format = SSD_TYPE_DESC;
 		else
 			sense_format = SSD_TYPE_FIXED;
 	}
 
-	scsi_set_sense_data_va(sense_data, sense_format, current_error,
-			       sense_key, asc, ascq, ap);
+	/*
+	 * Determine maximum sense data length to return.
+	 */
+	if (*sense_len == 0) {
+		if ((lun != NULL) && (lun->MODE_CTRLE.max_sense != 0))
+			*sense_len = lun->MODE_CTRLE.max_sense;
+		else
+			*sense_len = SSD_FULL_SIZE;
+	}
+
+	scsi_set_sense_data_va(sense_data, sense_len, sense_format,
+	    current_error, sense_key, asc, ascq, ap);
 }
 
 void
-ctl_set_sense_data(struct scsi_sense_data *sense_data, void *lunptr,
-		   scsi_sense_data_type sense_format, int current_error,
-		   int sense_key, int asc, int ascq, ...) 
+ctl_set_sense_data(struct scsi_sense_data *sense_data, u_int *sense_len,
+    void *lunptr, scsi_sense_data_type sense_format, int current_error,
+    int sense_key, int asc, int ascq, ...)
 {
 	va_list ap;
 
 	va_start(ap, ascq);
-	ctl_set_sense_data_va(sense_data, lunptr, sense_format, current_error,
-			      sense_key, asc, ascq, ap);
+	ctl_set_sense_data_va(sense_data, sense_len, lunptr, sense_format,
+	    current_error, sense_key, asc, ascq, ap);
 	va_end(ap);
 }
 
@@ -113,6 +123,7 @@
 {
 	va_list ap;
 	struct ctl_lun *lun;
+	u_int sense_len;
 
 	/*
 	 * The LUN can't go away until all of the commands have been
@@ -119,10 +130,11 @@
 	 * completed.  Therefore we can safely access the LUN structure and
 	 * flags without the lock.
 	 */
-	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+	lun = CTL_LUN(ctsio);
 
 	va_start(ap, ascq);
-	ctl_set_sense_data_va(&ctsio->sense_data,
+	sense_len = 0;
+	ctl_set_sense_data_va(&ctsio->sense_data, &sense_len,
 			      lun,
 			      SSD_TYPE_NONE,
 			      current_error,
@@ -133,7 +145,7 @@
 	va_end(ap);
 
 	ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
-	ctsio->sense_len = SSD_FULL_SIZE;
+	ctsio->sense_len = sense_len;
 	ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
 }
 
@@ -149,6 +161,7 @@
 {
 	struct scsi_sense_stream stream_sense;
 	int current_error;
+	u_int sense_len;
 	uint8_t stream_bits;
 
 	bzero(sense_dest, sizeof(*sense_dest));
@@ -174,7 +187,8 @@
 	 * value is set in the fixed sense data, set it in the descriptor
 	 * data.  Otherwise, skip it.
 	 */
-	ctl_set_sense_data((struct scsi_sense_data *)sense_dest,
+	sense_len = SSD_FULL_SIZE;
+	ctl_set_sense_data((struct scsi_sense_data *)sense_dest, &sense_len,
 			   /*lun*/ NULL,
 			   /*sense_format*/ SSD_TYPE_DESC,
 			   current_error,
@@ -182,8 +196,8 @@
 			   /*asc*/ sense_src->add_sense_code,
 			   /*ascq*/ sense_src->add_sense_code_qual,
 
-			   /* Information Bytes */ 
-			   (scsi_4btoul(sense_src->info) != 0) ?
+			   /* Information Bytes */
+			   (sense_src->error_code & SSD_ERRCODE_VALID) ?
 			   SSD_ELEM_INFO : SSD_ELEM_SKIP,
 			   sizeof(sense_src->info),
 			   sense_src->info,
@@ -234,6 +248,7 @@
 	int info_size = 0, cmd_size = 0, fru_size = 0;
 	int sks_size = 0, stream_size = 0;
 	int pos;
+	u_int sense_len;
 
 	if ((sense_src->error_code & SSD_ERRCODE) == SSD_DESC_CURRENT_ERROR)
 		current_error = 1;
@@ -319,7 +334,8 @@
 		}
 	}
 
-	ctl_set_sense_data((struct scsi_sense_data *)sense_dest,
+	sense_len = SSD_FULL_SIZE;
+	ctl_set_sense_data((struct scsi_sense_data *)sense_dest, &sense_len,
 			   /*lun*/ NULL,
 			   /*sense_format*/ SSD_TYPE_FIXED,
 			   current_error,
@@ -366,41 +382,36 @@
 		      SSD_ELEM_NONE);
 }
 
-ctl_ua_type
-ctl_build_ua(ctl_ua_type ua_type, struct scsi_sense_data *sense,
-	     scsi_sense_data_type sense_format)
+static void
+ctl_ua_to_ascq(struct ctl_lun *lun, ctl_ua_type ua_to_build, int *asc,
+    int *ascq, ctl_ua_type *ua_to_clear, uint8_t **info)
 {
-	ctl_ua_type ua_to_build;
-	int i, asc, ascq;
 
-	if (ua_type == CTL_UA_NONE)
-		return (ua_type);
-
-	ua_to_build = CTL_UA_NONE;
-
-	for (i = 0; i < (sizeof(ua_type) * 8); i++) {
-		if (ua_type & (1 << i)) {
-			ua_to_build = 1 << i;
-			break;
-		}
-	}
-
 	switch (ua_to_build) {
 	case CTL_UA_POWERON:
 		/* 29h/01h  POWER ON OCCURRED */
-		asc = 0x29;
-		ascq = 0x01;
+		*asc = 0x29;
+		*ascq = 0x01;
+		*ua_to_clear = ~0;
 		break;
 	case CTL_UA_BUS_RESET:
 		/* 29h/02h  SCSI BUS RESET OCCURRED */
-		asc = 0x29;
-		ascq = 0x02;
+		*asc = 0x29;
+		*ascq = 0x02;
+		*ua_to_clear = ~0;
 		break;
 	case CTL_UA_TARG_RESET:
 		/* 29h/03h  BUS DEVICE RESET FUNCTION OCCURRED*/
-		asc = 0x29;
-		ascq = 0x03;
+		*asc = 0x29;
+		*ascq = 0x03;
+		*ua_to_clear = ~0;
 		break;
+	case CTL_UA_I_T_NEXUS_LOSS:
+		/* 29h/07h  I_T NEXUS LOSS OCCURRED */
+		*asc = 0x29;
+		*ascq = 0x07;
+		*ua_to_clear = ~0;
+		break;
 	case CTL_UA_LUN_RESET:
 		/* 29h/00h  POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
 		/*
@@ -407,77 +418,168 @@
 		 * Since we don't have a specific ASC/ASCQ pair for a LUN
 		 * reset, just return the generic reset code.
 		 */
-		asc = 0x29;
-		ascq = 0x00;
+		*asc = 0x29;
+		*ascq = 0x00;
 		break;
 	case CTL_UA_LUN_CHANGE:
 		/* 3Fh/0Eh  REPORTED LUNS DATA HAS CHANGED */
-		asc = 0x3F;
-		ascq = 0x0E;
+		*asc = 0x3F;
+		*ascq = 0x0E;
 		break;
 	case CTL_UA_MODE_CHANGE:
 		/* 2Ah/01h  MODE PARAMETERS CHANGED */
-		asc = 0x2A;
-		ascq = 0x01;
+		*asc = 0x2A;
+		*ascq = 0x01;
 		break;
 	case CTL_UA_LOG_CHANGE:
 		/* 2Ah/02h  LOG PARAMETERS CHANGED */
-		asc = 0x2A;
-		ascq = 0x02;
+		*asc = 0x2A;
+		*ascq = 0x02;
 		break;
-	case CTL_UA_LVD:
-		/* 29h/06h  TRANSCEIVER MODE CHANGED TO LVD */
-		asc = 0x29;
-		ascq = 0x06;
+	case CTL_UA_INQ_CHANGE:
+		/* 3Fh/03h  INQUIRY DATA HAS CHANGED */
+		*asc = 0x3F;
+		*ascq = 0x03;
 		break;
-	case CTL_UA_SE:
-		/* 29h/05h  TRANSCEIVER MODE CHANGED TO SINGLE-ENDED */
-		asc = 0x29;
-		ascq = 0x05;
-		break;
 	case CTL_UA_RES_PREEMPT:
 		/* 2Ah/03h  RESERVATIONS PREEMPTED */
-		asc = 0x2A;
-		ascq = 0x03;
+		*asc = 0x2A;
+		*ascq = 0x03;
 		break;
 	case CTL_UA_RES_RELEASE:
 		/* 2Ah/04h  RESERVATIONS RELEASED */
-		asc = 0x2A;
-		ascq = 0x04;
+		*asc = 0x2A;
+		*ascq = 0x04;
 		break;
 	case CTL_UA_REG_PREEMPT:
 		/* 2Ah/05h  REGISTRATIONS PREEMPTED */
-		asc = 0x2A;
-		ascq = 0x05;
+		*asc = 0x2A;
+		*ascq = 0x05;
 		break;
 	case CTL_UA_ASYM_ACC_CHANGE:
-	        /* 2Ah/06n  ASYMMETRIC ACCESS STATE CHANGED */
-		asc = 0x2A;
-		ascq = 0x06;
+		/* 2Ah/06h  ASYMMETRIC ACCESS STATE CHANGED */
+		*asc = 0x2A;
+		*ascq = 0x06;
 		break;
-	case CTL_UA_CAPACITY_CHANGED:
-	        /* 2Ah/09n  CAPACITY DATA HAS CHANGED */
-		asc = 0x2A;
-		ascq = 0x09;
+	case CTL_UA_CAPACITY_CHANGE:
+		/* 2Ah/09h  CAPACITY DATA HAS CHANGED */
+		*asc = 0x2A;
+		*ascq = 0x09;
 		break;
+	case CTL_UA_THIN_PROV_THRES:
+		/* 38h/07h  THIN PROVISIONING SOFT THRESHOLD REACHED */
+		*asc = 0x38;
+		*ascq = 0x07;
+		*info = lun->ua_tpt_info;
+		break;
+	case CTL_UA_MEDIUM_CHANGE:
+		/* 28h/00h  NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
+		*asc = 0x28;
+		*ascq = 0x00;
+		break;
+	case CTL_UA_IE:
+		/* Informational exception */
+		*asc = lun->ie_asc;
+		*ascq = lun->ie_ascq;
+		break;
 	default:
-		ua_to_build = CTL_UA_NONE;
-		return (ua_to_build);
-		break; /* NOTREACHED */
+		panic("%s: Unknown UA %x", __func__, ua_to_build);
 	}
+}
 
-	ctl_set_sense_data(sense,
-			   /*lun*/ NULL,
-			   sense_format,
-			   /*current_error*/ 1,
-			   /*sense_key*/ SSD_KEY_UNIT_ATTENTION,
-			   asc,
-			   ascq,
-			   SSD_ELEM_NONE);
+ctl_ua_type
+ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp)
+{
+	ctl_ua_type ua;
+	ctl_ua_type ua_to_build, ua_to_clear;
+	uint8_t *info;
+	int asc, ascq;
+	uint32_t p, i;
 
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+	p = initidx / CTL_MAX_INIT_PER_PORT;
+	i = initidx % CTL_MAX_INIT_PER_PORT;
+	if (lun->pending_ua[p] == NULL)
+		ua = CTL_UA_POWERON;
+	else
+		ua = lun->pending_ua[p][i];
+	if (ua == CTL_UA_NONE)
+		return (CTL_UA_NONE);
+
+	ua_to_build = (1 << (ffs(ua) - 1));
+	ua_to_clear = ua_to_build;
+	info = NULL;
+	ctl_ua_to_ascq(lun, ua_to_build, &asc, &ascq, &ua_to_clear, &info);
+
+	resp[0] = SSD_KEY_UNIT_ATTENTION;
+	if (ua_to_build == ua)
+		resp[0] |= 0x10;
+	else
+		resp[0] |= 0x20;
+	resp[1] = asc;
+	resp[2] = ascq;
 	return (ua_to_build);
 }
 
+ctl_ua_type
+ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
+    struct scsi_sense_data *sense, u_int *sense_len,
+    scsi_sense_data_type sense_format)
+{
+	ctl_ua_type *ua;
+	ctl_ua_type ua_to_build, ua_to_clear;
+	uint8_t *info;
+	int asc, ascq;
+	uint32_t p, i;
+
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+	mtx_assert(&lun->ctl_softc->ctl_lock, MA_NOTOWNED);
+	p = initidx / CTL_MAX_INIT_PER_PORT;
+	if ((ua = lun->pending_ua[p]) == NULL) {
+		mtx_unlock(&lun->lun_lock);
+		ua = malloc(sizeof(ctl_ua_type) * CTL_MAX_INIT_PER_PORT,
+		    M_CTL, M_WAITOK);
+		mtx_lock(&lun->lun_lock);
+		if (lun->pending_ua[p] == NULL) {
+			lun->pending_ua[p] = ua;
+			for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++)
+				ua[i] = CTL_UA_POWERON;
+		} else {
+			free(ua, M_CTL);
+			ua = lun->pending_ua[p];
+		}
+	}
+	i = initidx % CTL_MAX_INIT_PER_PORT;
+	if (ua[i] == CTL_UA_NONE)
+		return (CTL_UA_NONE);
+
+	ua_to_build = (1 << (ffs(ua[i]) - 1));
+	ua_to_clear = ua_to_build;
+	info = NULL;
+	ctl_ua_to_ascq(lun, ua_to_build, &asc, &ascq, &ua_to_clear, &info);
+
+	ctl_set_sense_data(sense, sense_len, lun, sense_format, 1,
+	    /*sense_key*/ SSD_KEY_UNIT_ATTENTION, asc, ascq,
+	    ((info != NULL) ? SSD_ELEM_INFO : SSD_ELEM_SKIP), 8, info,
+	    SSD_ELEM_NONE);
+
+	/* We're reporting this UA, so clear it */
+	ua[i] &= ~ua_to_clear;
+
+	if (ua_to_build == CTL_UA_LUN_CHANGE) {
+		mtx_unlock(&lun->lun_lock);
+		mtx_lock(&lun->ctl_softc->ctl_lock);
+		ctl_clr_ua_allluns(lun->ctl_softc, initidx, ua_to_build);
+		mtx_unlock(&lun->ctl_softc->ctl_lock);
+		mtx_lock(&lun->lun_lock);
+	} else if (ua_to_build == CTL_UA_THIN_PROV_THRES &&
+	    (lun->MODE_LBP.main.flags & SLBPP_SITUA) != 0) {
+		ctl_clr_ua_all(lun, -1, ua_to_build);
+	}
+
+	return (ua_to_build);
+}
+
 void
 ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio)
 {
@@ -540,15 +642,24 @@
 		      /*data*/ sks,
 		      SSD_ELEM_NONE);
 }
+void
+ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio)
+{
 
+	/* "Invalid field in command information unit" */
+	ctl_set_sense(ctsio,
+		      /*current_error*/ 1,
+		      /*sense_key*/ SSD_KEY_ABORTED_COMMAND,
+		      /*ascq*/ 0x0E,
+		      /*ascq*/ 0x03,
+		      SSD_ELEM_NONE);
+}
+
 void
 ctl_set_invalid_opcode(struct ctl_scsiio *ctsio)
 {
-	struct scsi_sense_data *sense;
 	uint8_t sks[3];
 
-	sense = &ctsio->sense_data;
-
 	sks[0] = SSD_SCS_VALID | SSD_FIELDPTR_CMD;
 	scsi_ulto2b(0, &sks[1]);
 
@@ -625,9 +736,9 @@
 }
 
 void
-ctl_set_medium_error(struct ctl_scsiio *ctsio)
+ctl_set_medium_error(struct ctl_scsiio *ctsio, int read)
 {
-	if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
+	if (read) {
 		/* "Unrecovered read error" */
 		ctl_set_sense(ctsio,
 			      /*current_error*/ 1,
@@ -658,8 +769,12 @@
 }
 
 void
-ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio)
+ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio, uint64_t lba)
 {
+	uint8_t	info[8];
+
+	scsi_u64to8b(lba, info);
+
 	/* "Logical block address out of range" */
 	ctl_set_sense(ctsio,
 		      /*current_error*/ 1,
@@ -666,6 +781,8 @@
 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
 		      /*asc*/ 0x21,
 		      /*ascq*/ 0x00,
+		      /*type*/ (lba != 0) ? SSD_ELEM_INFO : SSD_ELEM_SKIP,
+		      /*size*/ sizeof(info), /*data*/ &info,
 		      SSD_ELEM_NONE);
 }
 
@@ -682,7 +799,7 @@
 }
 
 void
-ctl_set_lun_not_ready(struct ctl_scsiio *ctsio)
+ctl_set_lun_int_reqd(struct ctl_scsiio *ctsio)
 {
 	/* "Logical unit not ready, manual intervention required" */
 	ctl_set_sense(ctsio,
@@ -689,11 +806,35 @@
 		      /*current_error*/ 1,
 		      /*sense_key*/ SSD_KEY_NOT_READY,
 		      /*asc*/ 0x04,
-		      /*ascq*/ 0x05,
+		      /*ascq*/ 0x03,
 		      SSD_ELEM_NONE);
 }
 
 void
+ctl_set_lun_ejected(struct ctl_scsiio *ctsio)
+{
+	/* "Medium not present - tray open" */
+	ctl_set_sense(ctsio,
+		      /*current_error*/ 1,
+		      /*sense_key*/ SSD_KEY_NOT_READY,
+		      /*asc*/ 0x3A,
+		      /*ascq*/ 0x02,
+		      SSD_ELEM_NONE);
+}
+
+void
+ctl_set_lun_no_media(struct ctl_scsiio *ctsio)
+{
+	/* "Medium not present - tray closed" */
+	ctl_set_sense(ctsio,
+		      /*current_error*/ 1,
+		      /*sense_key*/ SSD_KEY_NOT_READY,
+		      /*asc*/ 0x3A,
+		      /*ascq*/ 0x01,
+		      SSD_ELEM_NONE);
+}
+
+void
 ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio)
 {
 	/* "Invalid release of persistent reservation" */
@@ -706,6 +847,18 @@
 }
 
 void
+ctl_set_lun_transit(struct ctl_scsiio *ctsio)
+{
+	/* "Logical unit not ready, asymmetric access state transition" */
+	ctl_set_sense(ctsio,
+		      /*current_error*/ 1,
+		      /*sense_key*/ SSD_KEY_NOT_READY,
+		      /*asc*/ 0x04,
+		      /*ascq*/ 0x0a,
+		      SSD_ELEM_NONE);
+}
+
+void
 ctl_set_lun_standby(struct ctl_scsiio *ctsio)
 {
 	/* "Logical unit not ready, target port in standby state" */
@@ -718,6 +871,18 @@
 }
 
 void
+ctl_set_lun_unavail(struct ctl_scsiio *ctsio)
+{
+	/* "Logical unit not ready, target port in unavailable state" */
+	ctl_set_sense(ctsio,
+		      /*current_error*/ 1,
+		      /*sense_key*/ SSD_KEY_NOT_READY,
+		      /*asc*/ 0x04,
+		      /*ascq*/ 0x0c,
+		      SSD_ELEM_NONE);
+}
+
+void
 ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio)
 {
 	/* "Medium format corrupted" */
@@ -756,10 +921,7 @@
 void
 ctl_set_reservation_conflict(struct ctl_scsiio *ctsio)
 {
-	struct scsi_sense_data *sense;
 
-	sense = &ctsio->sense_data;
-	memset(sense, 0, sizeof(*sense));
 	ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
 	ctsio->sense_len = 0;
 	ctsio->io_hdr.status = CTL_SCSI_ERROR;
@@ -768,10 +930,7 @@
 void
 ctl_set_queue_full(struct ctl_scsiio *ctsio)
 {
-	struct scsi_sense_data *sense;
 
-	sense = &ctsio->sense_data;
-	memset(sense, 0, sizeof(*sense));
 	ctsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
 	ctsio->sense_len = 0;
 	ctsio->io_hdr.status = CTL_SCSI_ERROR;
@@ -780,10 +939,7 @@
 void
 ctl_set_busy(struct ctl_scsiio *ctsio)
 {
-	struct scsi_sense_data *sense;
 
-	sense = &ctsio->sense_data;
-	memset(sense, 0, sizeof(*sense));
 	ctsio->scsi_status = SCSI_STATUS_BUSY;
 	ctsio->sense_len = 0;
 	ctsio->io_hdr.status = CTL_SCSI_ERROR;
@@ -790,12 +946,42 @@
 }
 
 void
+ctl_set_task_aborted(struct ctl_scsiio *ctsio)
+{
+
+	ctsio->scsi_status = SCSI_STATUS_TASK_ABORTED;
+	ctsio->sense_len = 0;
+	ctsio->io_hdr.status = CTL_CMD_ABORTED;
+}
+
+void
+ctl_set_hw_write_protected(struct ctl_scsiio *ctsio)
+{
+	/* "Hardware write protected" */
+	ctl_set_sense(ctsio,
+		      /*current_error*/ 1,
+		      /*sense_key*/ SSD_KEY_DATA_PROTECT,
+		      /*asc*/ 0x27,
+		      /*ascq*/ 0x01,
+		      SSD_ELEM_NONE);
+}
+
+void
+ctl_set_space_alloc_fail(struct ctl_scsiio *ctsio)
+{
+	/* "Space allocation failed write protect" */
+	ctl_set_sense(ctsio,
+		      /*current_error*/ 1,
+		      /*sense_key*/ SSD_KEY_DATA_PROTECT,
+		      /*asc*/ 0x27,
+		      /*ascq*/ 0x07,
+		      SSD_ELEM_NONE);
+}
+
+void
 ctl_set_success(struct ctl_scsiio *ctsio)
 {
-	struct scsi_sense_data *sense;
 
-	sense = &ctsio->sense_data;
-	memset(sense, 0, sizeof(*sense));
 	ctsio->scsi_status = SCSI_STATUS_OK;
 	ctsio->sense_len = 0;
 	ctsio->io_hdr.status = CTL_SUCCESS;

Modified: trunk/sys/cam/ctl/ctl_error.h
===================================================================
--- trunk/sys/cam/ctl/ctl_error.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_error.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,5 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2015 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,8 +29,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_error.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_error.h#1 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_error.h 313365 2017-02-07 01:42:53Z mav $
  */
 /*
  * Function definitions for various error reporting routines used both
@@ -42,12 +44,14 @@
 #ifndef	_CTL_ERROR_H_
 #define	_CTL_ERROR_H_
 
-void ctl_set_sense_data_va(struct scsi_sense_data *sense_data, void *lun,
-			   scsi_sense_data_type sense_format, int current_error,
-			   int sense_key, int asc, int ascq, va_list ap); 
-void ctl_set_sense_data(struct scsi_sense_data *sense_data, void *lun,
-			scsi_sense_data_type sense_format, int current_error,
-			int sense_key, int asc, int ascq, ...); 
+struct ctl_lun;
+
+void ctl_set_sense_data_va(struct scsi_sense_data *sense_data, u_int *sense_len,
+    void *lun, scsi_sense_data_type sense_format, int current_error,
+    int sense_key, int asc, int ascq, va_list ap);
+void ctl_set_sense_data(struct scsi_sense_data *sense_data, u_int *sense_len,
+    void *lun, scsi_sense_data_type sense_format, int current_error,
+    int sense_key, int asc, int ascq, ...);
 void ctl_set_sense(struct ctl_scsiio *ctsio, int current_error, int sense_key,
 		   int asc, int ascq, ...);
 void ctl_sense_to_desc(struct scsi_sense_data_fixed *sense_src,
@@ -55,24 +59,31 @@
 void ctl_sense_to_fixed(struct scsi_sense_data_desc *sense_src,
 			struct scsi_sense_data_fixed *sense_dest);
 void ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq);
-ctl_ua_type ctl_build_ua(ctl_ua_type ua_type, struct scsi_sense_data *sense,
-			 scsi_sense_data_type sense_format);
+ctl_ua_type ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp);
+ctl_ua_type ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
+    struct scsi_sense_data *sense, u_int *sense_len,
+    scsi_sense_data_type sense_format);
 void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio);
 void ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag);
 void ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
 			   int field, int bit_valid, int bit);
+void ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio);
 void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio);
 void ctl_set_param_len_error(struct ctl_scsiio *ctsio);
 void ctl_set_already_locked(struct ctl_scsiio *ctsio);
 void ctl_set_unsupported_lun(struct ctl_scsiio *ctsio);
+void ctl_set_lun_transit(struct ctl_scsiio *ctsio);
 void ctl_set_lun_standby(struct ctl_scsiio *ctsio);
+void ctl_set_lun_unavail(struct ctl_scsiio *ctsio);
 void ctl_set_internal_failure(struct ctl_scsiio *ctsio, int sks_valid,
 			      uint16_t retry_count);
-void ctl_set_medium_error(struct ctl_scsiio *ctsio);
+void ctl_set_medium_error(struct ctl_scsiio *ctsio, int read);
 void ctl_set_aborted(struct ctl_scsiio *ctsio);
-void ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio);
+void ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio, uint64_t lba);
 void ctl_set_lun_stopped(struct ctl_scsiio *ctsio);
-void ctl_set_lun_not_ready(struct ctl_scsiio *ctsio);
+void ctl_set_lun_int_reqd(struct ctl_scsiio *ctsio);
+void ctl_set_lun_ejected(struct ctl_scsiio *ctsio);
+void ctl_set_lun_no_media(struct ctl_scsiio *ctsio);
 void ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio);
 void ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio);
 void ctl_set_medium_magazine_inaccessible(struct ctl_scsiio *ctsio);
@@ -80,6 +91,9 @@
 void ctl_set_reservation_conflict(struct ctl_scsiio *ctsio);
 void ctl_set_queue_full(struct ctl_scsiio *ctsio);
 void ctl_set_busy(struct ctl_scsiio *ctsio);
+void ctl_set_task_aborted(struct ctl_scsiio *ctsio);
+void ctl_set_hw_write_protected(struct ctl_scsiio *ctsio);
+void ctl_set_space_alloc_fail(struct ctl_scsiio *ctsio);
 void ctl_set_success(struct ctl_scsiio *ctsio);
 
 #endif	/* _CTL_ERROR_H_ */

Modified: trunk/sys/cam/ctl/ctl_frontend.c
===================================================================
--- trunk/sys/cam/ctl/ctl_frontend.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_frontend.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,5 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2017 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,7 +29,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_frontend.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.c#4 $
  */
 /*
  * CAM Target Layer front end interface code
@@ -36,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_frontend.c 314755 2017-03-06 06:36:45Z mav $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -55,7 +57,6 @@
 #include <cam/ctl/ctl_io.h>
 #include <cam/ctl/ctl.h>
 #include <cam/ctl/ctl_frontend.h>
-#include <cam/ctl/ctl_frontend_internal.h>
 #include <cam/ctl/ctl_backend.h>
 /* XXX KDM move defines from ctl_ioctl.h to somewhere else */
 #include <cam/ctl/ctl_ioctl.h>
@@ -66,28 +67,118 @@
 extern struct ctl_softc *control_softc;
 
 int
-ctl_frontend_register(struct ctl_frontend *fe, int master_shelf)
+ctl_frontend_register(struct ctl_frontend *fe)
 {
-	struct ctl_io_pool *pool;
+	struct ctl_softc *softc = control_softc;
+	struct ctl_frontend *fe_tmp;
+	int error;
+
+	KASSERT(softc != NULL, ("CTL is not initialized"));
+
+	/* Sanity check, make sure this isn't a duplicate registration. */
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(fe_tmp, &softc->fe_list, links) {
+		if (strcmp(fe_tmp->name, fe->name) == 0) {
+			mtx_unlock(&softc->ctl_lock);
+			return (-1);
+		}
+	}
+	mtx_unlock(&softc->ctl_lock);
+	STAILQ_INIT(&fe->port_list);
+
+	/* Call the frontend's initialization routine. */
+	if (fe->init != NULL) {
+		if ((error = fe->init()) != 0) {
+			printf("%s frontend init error: %d\n",
+			    fe->name, error);
+			return (error);
+		}
+	}
+
+	mtx_lock(&softc->ctl_lock);
+	softc->num_frontends++;
+	STAILQ_INSERT_TAIL(&softc->fe_list, fe, links);
+	mtx_unlock(&softc->ctl_lock);
+	return (0);
+}
+
+int
+ctl_frontend_deregister(struct ctl_frontend *fe)
+{
+	struct ctl_softc *softc = control_softc;
+	int error;
+
+	/* Call the frontend's shutdown routine.*/
+	if (fe->shutdown != NULL) {
+		if ((error = fe->shutdown()) != 0) {
+			printf("%s frontend shutdown error: %d\n",
+			    fe->name, error);
+			return (error);
+		}
+	}
+
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_REMOVE(&softc->fe_list, fe, ctl_frontend, links);
+	softc->num_frontends--;
+	mtx_unlock(&softc->ctl_lock);
+	return (0);
+}
+
+struct ctl_frontend *
+ctl_frontend_find(char *frontend_name)
+{
+	struct ctl_softc *softc = control_softc;
+	struct ctl_frontend *fe;
+
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_FOREACH(fe, &softc->fe_list, links) {
+		if (strcmp(fe->name, frontend_name) == 0) {
+			mtx_unlock(&softc->ctl_lock);
+			return (fe);
+		}
+	}
+	mtx_unlock(&softc->ctl_lock);
+	return (NULL);
+}
+
+int
+ctl_port_register(struct ctl_port *port)
+{
+	struct ctl_softc *softc = control_softc;
+	struct ctl_port *tport, *nport;
+	void *pool;
 	int port_num;
 	int retval;
 
-	retval = 0;
+	KASSERT(softc != NULL, ("CTL is not initialized"));
+	port->ctl_softc = softc;
 
-	KASSERT(control_softc != NULL, ("CTL is not initialized"));
-
-	mtx_lock(&control_softc->ctl_lock);
-	port_num = ctl_ffz(&control_softc->ctl_port_mask, CTL_MAX_PORTS);
-	if ((port_num == -1)
-	 || (ctl_set_mask(&control_softc->ctl_port_mask, port_num) == -1)) {
-		fe->targ_port = -1;
-		mtx_unlock(&control_softc->ctl_lock);
+	mtx_lock(&softc->ctl_lock);
+	if (port->targ_port >= 0)
+		port_num = port->targ_port;
+	else
+		port_num = ctl_ffz(softc->ctl_port_mask,
+		    softc->port_min, softc->port_max);
+	if ((port_num < 0) ||
+	    (ctl_set_mask(softc->ctl_port_mask, port_num) < 0)) {
+		mtx_unlock(&softc->ctl_lock);
 		return (1);
 	}
-	control_softc->num_frontends++;
+	softc->num_ports++;
+	mtx_unlock(&softc->ctl_lock);
 
-	mtx_unlock(&control_softc->ctl_lock);
 	/*
+	 * Initialize the initiator and portname mappings
+	 */
+	port->max_initiators = CTL_MAX_INIT_PER_PORT;
+	port->wwpn_iid = malloc(sizeof(*port->wwpn_iid) * port->max_initiators,
+	    M_CTL, M_NOWAIT | M_ZERO);
+	if (port->wwpn_iid == NULL) {
+		retval = ENOMEM;
+		goto error;
+	}
+
+	/*
 	 * We add 20 to whatever the caller requests, so he doesn't get
 	 * burned by queueing things back to the pending sense queue.  In
 	 * theory, there should probably only be one outstanding item, at
@@ -95,92 +186,203 @@
 	 * pending sense queue on the next command, whether or not it is
 	 * a REQUEST SENSE.
 	 */
-	retval = ctl_pool_create(control_softc,
-				 (fe->port_type != CTL_PORT_IOCTL) ?
-				 CTL_POOL_FETD : CTL_POOL_IOCTL,
-				 fe->num_requested_ctl_io + 20, &pool);
+	retval = ctl_pool_create(softc, port->port_name,
+				 port->num_requested_ctl_io + 20, &pool);
 	if (retval != 0) {
-		fe->targ_port = -1;
-		mtx_lock(&control_softc->ctl_lock);
-		ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
-		mtx_unlock(&control_softc->ctl_lock);
+		free(port->wwpn_iid, M_CTL);
+error:
+		port->targ_port = -1;
+		mtx_lock(&softc->ctl_lock);
+		ctl_clear_mask(softc->ctl_port_mask, port_num);
+		mtx_unlock(&softc->ctl_lock);
 		return (retval);
 	}
+	port->targ_port = port_num;
+	port->ctl_pool_ref = pool;
+	if (port->options.stqh_first == NULL)
+		STAILQ_INIT(&port->options);
+	port->stats.item = port_num;
+	mtx_init(&port->port_lock, "CTL port", NULL, MTX_DEF);
 
-	mtx_lock(&control_softc->ctl_lock);
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links);
+	for (tport = NULL, nport = STAILQ_FIRST(&softc->port_list);
+	    nport != NULL && nport->targ_port < port_num;
+	    tport = nport, nport = STAILQ_NEXT(tport, links)) {
+	}
+	if (tport)
+		STAILQ_INSERT_AFTER(&softc->port_list, tport, port, links);
+	else
+		STAILQ_INSERT_HEAD(&softc->port_list, port, links);
+	softc->ctl_ports[port->targ_port] = port;
+	mtx_unlock(&softc->ctl_lock);
 
-	/* For now assume master shelf */
-	//fe->targ_port = port_num;
-	fe->targ_port = port_num + (master_shelf!=0 ? 0 : CTL_MAX_PORTS);
-	fe->max_initiators = CTL_MAX_INIT_PER_PORT;
-	STAILQ_INSERT_TAIL(&control_softc->fe_list, fe, links);
-	ctl_pool_acquire(pool);
-	control_softc->ctl_ports[port_num] = fe;
-
-	mtx_unlock(&control_softc->ctl_lock);
-
-	fe->ctl_pool_ref = pool;
-
 	return (retval);
 }
 
 int
-ctl_frontend_deregister(struct ctl_frontend *fe)
+ctl_port_deregister(struct ctl_port *port)
 {
-	struct ctl_io_pool *pool;
-	int port_num;
-	int retval;
+	struct ctl_softc *softc = port->ctl_softc;
+	struct ctl_io_pool *pool = (struct ctl_io_pool *)port->ctl_pool_ref;
+	int i;
 
-	retval = 0;
+	if (port->targ_port == -1)
+		return (1);
 
-	pool = (struct ctl_io_pool *)fe->ctl_pool_ref;
+	mtx_lock(&softc->ctl_lock);
+	STAILQ_REMOVE(&softc->port_list, port, ctl_port, links);
+	STAILQ_REMOVE(&port->frontend->port_list, port, ctl_port, fe_links);
+	softc->num_ports--;
+	ctl_clear_mask(softc->ctl_port_mask, port->targ_port);
+	softc->ctl_ports[port->targ_port] = NULL;
+	mtx_unlock(&softc->ctl_lock);
 
-	if (fe->targ_port == -1) {
-		retval = 1;
-		goto bailout;
-	}
+	ctl_pool_free(pool);
+	ctl_free_opts(&port->options);
 
-	mtx_lock(&control_softc->ctl_lock);
+	ctl_lun_map_deinit(port);
+	free(port->port_devid, M_CTL);
+	port->port_devid = NULL;
+	free(port->target_devid, M_CTL);
+	port->target_devid = NULL;
+	free(port->init_devid, M_CTL);
+	port->init_devid = NULL;
+	for (i = 0; i < port->max_initiators; i++)
+		free(port->wwpn_iid[i].name, M_CTL);
+	free(port->wwpn_iid, M_CTL);
+	mtx_destroy(&port->port_lock);
 
-	ctl_pool_invalidate(pool);
-	ctl_pool_release(pool);
-
-	STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links);
-	control_softc->num_frontends--;
-	port_num = (fe->targ_port < CTL_MAX_PORTS) ? fe->targ_port :
-	                                             fe->targ_port - CTL_MAX_PORTS;
-	ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
-	control_softc->ctl_ports[port_num] = NULL;
-	mtx_unlock(&control_softc->ctl_lock);
-bailout:
-	return (retval);
+	return (0);
 }
 
 void
-ctl_frontend_set_wwns(struct ctl_frontend *fe, int wwnn_valid, uint64_t wwnn,
+ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid, uint64_t wwnn,
 		      int wwpn_valid, uint64_t wwpn)
 {
-	if (wwnn_valid)
-		fe->wwnn = wwnn;
+	struct scsi_vpd_id_descriptor *desc;
+	int len, proto;
 
-	if (wwpn_valid)
-		fe->wwpn = wwpn;
+	if (port->port_type == CTL_PORT_FC)
+		proto = SCSI_PROTO_FC << 4;
+	else if (port->port_type == CTL_PORT_SAS)
+		proto = SCSI_PROTO_SAS << 4;
+	else if (port->port_type == CTL_PORT_ISCSI)
+		proto = SCSI_PROTO_ISCSI << 4;
+	else
+		proto = SCSI_PROTO_SPI << 4;
+
+	if (wwnn_valid) {
+		port->wwnn = wwnn;
+
+		free(port->target_devid, M_CTL);
+
+		len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN;
+		port->target_devid = malloc(sizeof(struct ctl_devid) + len,
+		    M_CTL, M_WAITOK | M_ZERO);
+		port->target_devid->len = len;
+		desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data;
+		desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+		desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET |
+		    SVPD_ID_TYPE_NAA;
+		desc->length = CTL_WWPN_LEN;
+		scsi_u64to8b(port->wwnn, desc->identifier);
+	}
+
+	if (wwpn_valid) {
+		port->wwpn = wwpn;
+
+		free(port->port_devid, M_CTL);
+
+		len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN;
+		port->port_devid = malloc(sizeof(struct ctl_devid) + len,
+		    M_CTL, M_WAITOK | M_ZERO);
+		port->port_devid->len = len;
+		desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data;
+		desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+		desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+		    SVPD_ID_TYPE_NAA;
+		desc->length = CTL_WWPN_LEN;
+		scsi_u64to8b(port->wwpn, desc->identifier);
+	}
 }
 
 void
-ctl_frontend_online(struct ctl_frontend *fe)
+ctl_port_online(struct ctl_port *port)
 {
-	fe->port_online(fe->onoff_arg);
-	/* XXX KDM need a lock here? */
-	fe->status |= CTL_PORT_STATUS_ONLINE;
+	struct ctl_softc *softc = port->ctl_softc;
+	struct ctl_lun *lun;
+	const char *value;
+	uint32_t l;
+
+	if (port->lun_enable != NULL) {
+		if (port->lun_map) {
+			for (l = 0; l < port->lun_map_size; l++) {
+				if (ctl_lun_map_from_port(port, l) ==
+				    UINT32_MAX)
+					continue;
+				port->lun_enable(port->targ_lun_arg, l);
+			}
+		} else {
+			STAILQ_FOREACH(lun, &softc->lun_list, links)
+				port->lun_enable(port->targ_lun_arg, lun->lun);
+		}
+	}
+	if (port->port_online != NULL)
+		port->port_online(port->onoff_arg);
+	mtx_lock(&softc->ctl_lock);
+	if (softc->is_single == 0) {
+		value = ctl_get_opt(&port->options, "ha_shared");
+		if (value != NULL && strcmp(value, "on") == 0)
+			port->status |= CTL_PORT_STATUS_HA_SHARED;
+		else
+			port->status &= ~CTL_PORT_STATUS_HA_SHARED;
+	}
+	port->status |= CTL_PORT_STATUS_ONLINE;
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+			continue;
+		mtx_lock(&lun->lun_lock);
+		ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
+		mtx_unlock(&lun->lun_lock);
+	}
+	mtx_unlock(&softc->ctl_lock);
+	ctl_isc_announce_port(port);
 }
 
 void
-ctl_frontend_offline(struct ctl_frontend *fe)
+ctl_port_offline(struct ctl_port *port)
 {
-	fe->port_offline(fe->onoff_arg);
-	/* XXX KDM need a lock here? */
-	fe->status &= ~CTL_PORT_STATUS_ONLINE;
+	struct ctl_softc *softc = port->ctl_softc;
+	struct ctl_lun *lun;
+	uint32_t l;
+
+	if (port->port_offline != NULL)
+		port->port_offline(port->onoff_arg);
+	if (port->lun_disable != NULL) {
+		if (port->lun_map) {
+			for (l = 0; l < port->lun_map_size; l++) {
+				if (ctl_lun_map_from_port(port, l) ==
+				    UINT32_MAX)
+					continue;
+				port->lun_disable(port->targ_lun_arg, l);
+			}
+		} else {
+			STAILQ_FOREACH(lun, &softc->lun_list, links)
+				port->lun_disable(port->targ_lun_arg, lun->lun);
+		}
+	}
+	mtx_lock(&softc->ctl_lock);
+	port->status &= ~CTL_PORT_STATUS_ONLINE;
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+			continue;
+		mtx_lock(&lun->lun_lock);
+		ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
+		mtx_unlock(&lun->lun_lock);
+	}
+	mtx_unlock(&softc->ctl_lock);
+	ctl_isc_announce_port(port);
 }
 
 /*

Modified: trunk/sys/cam/ctl/ctl_frontend.h
===================================================================
--- trunk/sys/cam/ctl/ctl_frontend.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_frontend.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,5 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2017 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,8 +29,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_frontend.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.h#2 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_frontend.h 313369 2017-02-07 01:56:26Z mav $
  */
 /*
  * CAM Target Layer front end registration hooks
@@ -39,17 +41,55 @@
 #ifndef	_CTL_FRONTEND_H_
 #define	_CTL_FRONTEND_H_
 
+#include <cam/ctl/ctl_ioctl.h>
+
 typedef enum {
 	CTL_PORT_STATUS_NONE		= 0x00,
 	CTL_PORT_STATUS_ONLINE		= 0x01,
-	CTL_PORT_STATUS_TARG_ONLINE	= 0x02,
-	CTL_PORT_STATUS_LUN_ONLINE	= 0x04
+	CTL_PORT_STATUS_HA_SHARED	= 0x02
 } ctl_port_status;
 
+typedef int (*fe_init_t)(void);
+typedef int (*fe_shutdown_t)(void);
 typedef void (*port_func_t)(void *onoff_arg);
-typedef int (*targ_func_t)(void *arg, struct ctl_id targ_id);
-typedef	int (*lun_func_t)(void *arg, struct ctl_id targ_id, int lun_id);
+typedef int (*port_info_func_t)(void *onoff_arg, struct sbuf *sb);
+typedef	int (*lun_func_t)(void *arg, int lun_id);
+typedef int (*fe_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+			  struct thread *td);
 
+#define CTL_FRONTEND_DECLARE(name, driver) \
+	static int name ## _modevent(module_t mod, int type, void *data) \
+	{ \
+		switch (type) { \
+		case MOD_LOAD: \
+			return (ctl_frontend_register( \
+				(struct ctl_frontend *)data)); \
+			break; \
+		case MOD_UNLOAD: \
+			return (ctl_frontend_deregister( \
+				(struct ctl_frontend *)data)); \
+			break; \
+		default: \
+			return EOPNOTSUPP; \
+		} \
+		return 0; \
+	} \
+	static moduledata_t name ## _mod = { \
+		#name, \
+		name ## _modevent, \
+		(void *)&driver \
+	}; \
+	DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \
+	MODULE_DEPEND(name, ctl, 1, 1, 1); \
+	MODULE_DEPEND(name, cam, 1, 1, 1)
+
+struct ctl_wwpn_iid {
+	int in_use;
+	time_t last_use;
+	uint64_t wwpn;
+	char *name;
+};
+
 /*
  * The ctl_frontend structure is the registration mechanism between a FETD
  * (Front End Target Driver) and the CTL layer.  Here is a description of
@@ -91,41 +131,17 @@
  * port_online():	  This function is called, with onoff_arg as its
  *			  argument, by the CTL layer when it wants the FETD
  *			  to start responding to selections on the specified
- * 			  target ID.  (targ_target)
+ * 			  target ID.
  *
  * port_offline():	  This function is called, with onoff_arg as its
  *			  argument, by the CTL layer when it wants the FETD
  * 			  to stop responding to selection on the specified
- * 			  target ID.  (targ_target)
+ * 			  target ID.
  *
  * onoff_arg:		  This is supplied as an argument to port_online()
  *			  and port_offline().  This is specified by the
  *			  FETD.
  *
- * targ_enable():	  This function is called, with targ_lun_arg and a
- * 			  target ID as its arguments, by CTL when it wants
- *			  the FETD to enable a particular target.  targ_enable()
- *			  will always be called for a particular target ID
- * 			  before any LUN is enabled for that target.  If the
- *			  FETD does not support enabling targets, but rather
- *			  LUNs, it should ignore this call and return 0.  If
- *			  the FETD does support enabling targets, it should
- *			  return 0 for success and non-zero if it cannot
- *			  enable the given target.
- *
- *			  TODO:  Add the ability to specify a WWID here.
- *
- * targ_disable():	  This function is called, with targ_lun_arg and a
- *			  target ID as its arguments, by CTL when it wants
- *			  the FETD to disable a particular target.
- *			  targ_disable() will always be called for a
- *			  particular target ID after all LUNs are disabled
- *			  on that particular target.  If the FETD does not
- *			  support enabling targets, it should ignore this
- *			  call and return 0.  If the FETD does support
- *			  enabling targets, it should return 0 for success,
- *			  and non-zero if it cannot disable the given target.
- *
  * lun_enable():	  This function is called, with targ_lun_arg, a target
  *			  ID and a LUN ID as its arguments, by CTL when it
  *			  wants the FETD to enable a particular LUN.  If the
@@ -200,7 +216,9 @@
  * links:		  Linked list pointers, used by CTL.  The FETD
  *			  shouldn't touch this field.
  */
-struct ctl_frontend {
+struct ctl_port {
+	struct ctl_softc *ctl_softc;
+	struct ctl_frontend *frontend;
 	ctl_port_type	port_type;		/* passed to CTL */
 	int		num_requested_ctl_io;	/* passed to CTL */
 	char		*port_name;		/* passed to CTL */
@@ -208,23 +226,41 @@
 	int		virtual_port;		/* passed to CTL */
 	port_func_t	port_online;		/* passed to CTL */
 	port_func_t	port_offline;		/* passed to CTL */
+	port_info_func_t port_info;		/* passed to CTL */
 	void		*onoff_arg;		/* passed to CTL */
-	targ_func_t	targ_enable;		/* passed to CTL */
-	targ_func_t	targ_disable;		/* passed to CTL */
 	lun_func_t	lun_enable;		/* passed to CTL */
 	lun_func_t	lun_disable;		/* passed to CTL */
+	int		lun_map_size;		/* passed to CTL */
+	uint32_t	*lun_map;		/* passed to CTL */
 	void		*targ_lun_arg;		/* passed to CTL */
 	void		(*fe_datamove)(union ctl_io *io); /* passed to CTL */
 	void		(*fe_done)(union ctl_io *io); /* passed to CTL */
-	void		(*fe_dump)(void);	/* passed to CTL */
 	int		max_targets;		/* passed to CTL */
 	int		max_target_id;		/* passed to CTL */
 	int32_t		targ_port;		/* passed back to FETD */
 	void		*ctl_pool_ref;		/* passed back to FETD */
 	uint32_t	max_initiators;		/* passed back to FETD */
+	struct ctl_wwpn_iid *wwpn_iid;		/* used by CTL */
 	uint64_t	wwnn;			/* set by CTL before online */
 	uint64_t	wwpn;			/* set by CTL before online */
 	ctl_port_status	status;			/* used by CTL */
+	ctl_options_t	options;		/* passed to CTL */
+	struct ctl_devid *port_devid;		/* passed to CTL */
+	struct ctl_devid *target_devid;		/* passed to CTL */
+	struct ctl_devid *init_devid;		/* passed to CTL */
+	struct ctl_io_stats stats;		/* used by CTL */
+	struct mtx	port_lock;		/* used by CTL */
+	STAILQ_ENTRY(ctl_port) fe_links;	/* used by CTL */
+	STAILQ_ENTRY(ctl_port) links;		/* used by CTL */
+};
+
+struct ctl_frontend {
+	char		name[CTL_DRIVER_NAME_LEN];	/* passed to CTL */
+	fe_init_t	init;			/* passed to CTL */
+	fe_ioctl_t	ioctl;			/* passed to CTL */
+	void		(*fe_dump)(void);	/* passed to CTL */
+	fe_shutdown_t	shutdown;		/* passed to CTL */
+	STAILQ_HEAD(, ctl_port) port_list;	/* used by CTL */
 	STAILQ_ENTRY(ctl_frontend) links;	/* used by CTL */
 };
 
@@ -232,7 +268,7 @@
  * This may block until resources are allocated.  Called at FETD module load
  * time. Returns 0 for success, non-zero for failure.
  */
-int ctl_frontend_register(struct ctl_frontend *fe, int master_SC);
+int ctl_frontend_register(struct ctl_frontend *fe);
 
 /*
  * Called at FETD module unload time.
@@ -241,20 +277,37 @@
 int ctl_frontend_deregister(struct ctl_frontend *fe);
 
 /*
+ * Find the frontend by its name. Returns NULL if not found.
+ */
+struct ctl_frontend * ctl_frontend_find(char *frontend_name);
+
+/*
+ * This may block until resources are allocated.  Called at FETD module load
+ * time. Returns 0 for success, non-zero for failure.
+ */
+int ctl_port_register(struct ctl_port *port);
+
+/*
+ * Called at FETD module unload time.
+ * Returns 0 for success, non-zero for failure.
+ */
+int ctl_port_deregister(struct ctl_port *port);
+
+/*
  * Called to set the WWNN and WWPN for a particular frontend.
  */
-void ctl_frontend_set_wwns(struct ctl_frontend *fe, int wwnn_valid,
+void ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid,
 			   uint64_t wwnn, int wwpn_valid, uint64_t wwpn);
 
 /*
  * Called to bring a particular frontend online.
  */
-void ctl_frontend_online(struct ctl_frontend *fe);
+void ctl_port_online(struct ctl_port *fe);
 
 /*
  * Called to take a particular frontend offline.
  */
-void ctl_frontend_offline(struct ctl_frontend *fe);
+void ctl_port_offline(struct ctl_port *fe);
 
 /*
  * This routine queues I/O and task management requests from the FETD to the
@@ -275,21 +328,18 @@
 int ctl_queue_sense(union ctl_io *io);
 
 /*
- * This routine adds an initiator to CTL's port database.  The WWPN should
- * be the FC WWPN, if available.  The targ_port field should be the same as
- * the targ_port passed back from CTL in the ctl_frontend structure above.
+ * This routine adds an initiator to CTL's port database.
  * The iid field should be the same as the iid passed in the nexus of each
  * ctl_io from this initiator.
+ * The WWPN should be the FC WWPN, if available.
  */
-int ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid);
+int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name);
 
 /*
- * This routine will remove an initiator from CTL's port database.  The
- * targ_port field should be the same as the targ_port passed back in the
- * ctl_frontend structure above.  The iid field should be the same as the
- * iid passed in the nexus of each ctl_io from this initiator.
+ * This routine will remove an initiator from CTL's port database.
+ * The iid field should be the same as the iid passed in the nexus of each
+ * ctl_io from this initiator.
  */
-int
-ctl_remove_initiator(int32_t targ_port, uint32_t iid);
+int ctl_remove_initiator(struct ctl_port *port, int iid);
 
 #endif	/* _CTL_FRONTEND_H_ */

Modified: trunk/sys/cam/ctl/ctl_frontend_cam_sim.c
===================================================================
--- trunk/sys/cam/ctl/ctl_frontend_cam_sim.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_frontend_cam_sim.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2009 Silicon Graphics International Corp.
  * All rights reserved.
@@ -27,7 +28,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_frontend_cam_sim.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_cam_sim.c#4 $
  */
 /*
  * CTL frontend to CAM SIM interface.  This allows access to CTL LUNs via
@@ -37,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_frontend_cam_sim.c 315813 2017-03-23 06:41:13Z mav $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -64,8 +65,6 @@
 #include <cam/ctl/ctl_io.h>
 #include <cam/ctl/ctl.h>
 #include <cam/ctl/ctl_frontend.h>
-#include <cam/ctl/ctl_frontend_internal.h>
-#include <cam/ctl/ctl_mem_pool.h>
 #include <cam/ctl/ctl_debug.h>
 
 #define	io_ptr		spriv_ptr1
@@ -75,13 +74,12 @@
 };
 
 struct cfcs_softc {
-	struct ctl_frontend fe;
+	struct ctl_port port;
 	char port_name[32];
 	struct cam_sim *sim;
 	struct cam_devq *devq;
 	struct cam_path *path;
 	struct mtx lock;
-	char lock_desc[32];
 	uint64_t wwnn;
 	uint64_t wwpn;
 	uint32_t cur_tag_num;
@@ -93,33 +91,26 @@
  * handle physical addresses yet.  That would require mapping things in
  * order to do the copy.
  */
-#define	CFCS_BAD_CCB_FLAGS (CAM_DATA_PHYS | CAM_SG_LIST_PHYS | \
-	CAM_MSG_BUF_PHYS | CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR |\
+#define	CFCS_BAD_CCB_FLAGS (CAM_DATA_ISPHYS | CAM_MSG_BUF_PHYS |	\
+	CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR |		\
 	CAM_SENSE_PHYS)
 
-int cfcs_init(void);
-void cfcs_shutdown(void);
+static int cfcs_init(void);
+static int cfcs_shutdown(void);
 static void cfcs_poll(struct cam_sim *sim);
 static void cfcs_online(void *arg);
 static void cfcs_offline(void *arg);
-static int cfcs_targ_enable(void *arg, struct ctl_id targ_id);
-static int cfcs_targ_disable(void *arg, struct ctl_id targ_id);
-static int cfcs_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
-static int cfcs_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
 static void cfcs_datamove(union ctl_io *io);
 static void cfcs_done(union ctl_io *io);
 void cfcs_action(struct cam_sim *sim, union ccb *ccb);
-static void cfcs_async(void *callback_arg, uint32_t code,
-		       struct cam_path *path, void *arg);
 
 struct cfcs_softc cfcs_softc;
 /*
- * This is primarly intended to allow for error injection to test the CAM
+ * This is primarily intended to allow for error injection to test the CAM
  * sense data and sense residual handling code.  This sets the maximum
  * amount of SCSI sense data that we will report to CAM.
  */
 static int cfcs_max_sense = sizeof(struct scsi_sense_data);
-extern int ctl_disable;
 
 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl2cam, CTLFLAG_RD, 0,
 	    "CAM Target Layer SIM frontend");
@@ -126,65 +117,47 @@
 SYSCTL_INT(_kern_cam_ctl2cam, OID_AUTO, max_sense, CTLFLAG_RW,
            &cfcs_max_sense, 0, "Maximum sense data size");
 
-static int cfcs_module_event_handler(module_t, int /*modeventtype_t*/, void *);
-
-static moduledata_t cfcs_moduledata = {
-	"ctlcfcs",
-	cfcs_module_event_handler,
-	NULL
+static struct ctl_frontend cfcs_frontend =
+{
+	.name = "camsim",
+	.init = cfcs_init,
+	.shutdown = cfcs_shutdown,
 };
+CTL_FRONTEND_DECLARE(ctlcfcs, cfcs_frontend);
 
-DECLARE_MODULE(ctlcfcs, cfcs_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
-MODULE_VERSION(ctlcfcs, 1);
-MODULE_DEPEND(ctlcfi, ctl, 1, 1, 1);
-MODULE_DEPEND(ctlcfi, cam, 1, 1, 1);
-
-int
+static int
 cfcs_init(void)
 {
 	struct cfcs_softc *softc;
-	struct ccb_setasync csa;
-	struct ctl_frontend *fe;
-#ifdef NEEDTOPORT
-	char wwnn[8];
-#endif
+	struct ctl_port *port;
 	int retval;
 
-	/* Don't continue if CTL is disabled */
-	if (ctl_disable != 0)
-		return (0);
-
 	softc = &cfcs_softc;
-	retval = 0;
 	bzero(softc, sizeof(*softc));
-	sprintf(softc->lock_desc, "ctl2cam");
-	mtx_init(&softc->lock, softc->lock_desc, NULL, MTX_DEF);
-	fe = &softc->fe;
+	mtx_init(&softc->lock, "ctl2cam", NULL, MTX_DEF);
+	port = &softc->port;
 
-	fe->port_type = CTL_PORT_INTERNAL;
+	port->frontend = &cfcs_frontend;
+	port->port_type = CTL_PORT_INTERNAL;
 	/* XXX KDM what should the real number be here? */
-	fe->num_requested_ctl_io = 4096;
-	snprintf(softc->port_name, sizeof(softc->port_name), "ctl2cam");
-	fe->port_name = softc->port_name;
-	fe->port_online = cfcs_online;
-	fe->port_offline = cfcs_offline;
-	fe->onoff_arg = softc;
-	fe->targ_enable = cfcs_targ_enable;
-	fe->targ_disable = cfcs_targ_disable;
-	fe->lun_enable = cfcs_lun_enable;
-	fe->lun_disable = cfcs_lun_disable;
-	fe->targ_lun_arg = softc;
-	fe->fe_datamove = cfcs_datamove;
-	fe->fe_done = cfcs_done;
+	port->num_requested_ctl_io = 4096;
+	snprintf(softc->port_name, sizeof(softc->port_name), "camsim");
+	port->port_name = softc->port_name;
+	port->port_online = cfcs_online;
+	port->port_offline = cfcs_offline;
+	port->onoff_arg = softc;
+	port->fe_datamove = cfcs_datamove;
+	port->fe_done = cfcs_done;
 
 	/* XXX KDM what should we report here? */
 	/* XXX These should probably be fetched from CTL. */
-	fe->max_targets = 1;
-	fe->max_target_id = 15;
+	port->max_targets = 1;
+	port->max_target_id = 15;
+	port->targ_port = -1;
 
-	retval = ctl_frontend_register(fe, /*master_SC*/ 1);
+	retval = ctl_port_register(port);
 	if (retval != 0) {
-		printf("%s: ctl_frontend_register() failed with error %d!\n",
+		printf("%s: ctl_port_register() failed with error %d!\n",
 		       __func__, retval);
 		mtx_destroy(&softc->lock);
 		return (retval);
@@ -191,19 +164,10 @@
 	}
 
 	/*
-	 * Get the WWNN out of the database, and create a WWPN as well.
-	 */
-#ifdef NEEDTOPORT
-	ddb_GetWWNN((char *)wwnn);
-	softc->wwnn = be64dec(wwnn);
-	softc->wwpn = softc->wwnn + (softc->fe.targ_port & 0xff);
-#endif
-
-	/*
 	 * If the CTL frontend didn't tell us what our WWNN/WWPN is, go
 	 * ahead and set something random.
 	 */
-	if (fe->wwnn == 0) {
+	if (port->wwnn == 0) {
 		uint64_t random_bits;
 
 		arc4rand(&random_bits, sizeof(random_bits), 0);
@@ -210,16 +174,15 @@
 		softc->wwnn = (random_bits & 0x0000000fffffff00ULL) |
 			/* Company ID */ 0x5000000000000000ULL |
 			/* NL-Port */    0x0300;
-		softc->wwpn = softc->wwnn + fe->targ_port + 1;
-		fe->wwnn = softc->wwnn;
-		fe->wwpn = softc->wwpn;
+		softc->wwpn = softc->wwnn + port->targ_port + 1;
+		ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn);
 	} else {
-		softc->wwnn = fe->wwnn;
-		softc->wwpn = fe->wwpn;
+		softc->wwnn = port->wwnn;
+		softc->wwpn = port->wwpn;
 	}
 
 	mtx_lock(&softc->lock);
-	softc->devq = cam_simq_alloc(fe->num_requested_ctl_io);
+	softc->devq = cam_simq_alloc(port->num_requested_ctl_io);
 	if (softc->devq == NULL) {
 		printf("%s: error allocating devq\n", __func__);
 		retval = ENOMEM;
@@ -228,7 +191,7 @@
 
 	softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name,
 				   softc, /*unit*/ 0, &softc->lock, 1,
-				   fe->num_requested_ctl_io, softc->devq);
+				   port->num_requested_ctl_io, softc->devq);
 	if (softc->sim == NULL) {
 		printf("%s: error allocating SIM\n", __func__);
 		retval = ENOMEM;
@@ -251,13 +214,6 @@
 		goto bailout;
 	}
 
-	xpt_setup_ccb(&csa.ccb_h, softc->path, CAM_PRIORITY_NONE);
-	csa.ccb_h.func_code = XPT_SASYNC_CB;
-	csa.event_enable = AC_LOST_DEVICE;
-	csa.callback = cfcs_async;
-        csa.callback_arg = softc->sim;
-        xpt_action((union ccb *)&csa);
-
 	mtx_unlock(&softc->lock);
 
 	return (retval);
@@ -273,30 +229,31 @@
 	return (retval);
 }
 
-static void
-cfcs_poll(struct cam_sim *sim)
+static int
+cfcs_shutdown(void)
 {
+	struct cfcs_softc *softc = &cfcs_softc;
+	struct ctl_port *port = &softc->port;
+	int error;
 
-}
+	ctl_port_offline(port);
 
-void
-cfcs_shutdown(void)
-{
+	mtx_lock(&softc->lock);
+	xpt_free_path(softc->path);
+	xpt_bus_deregister(cam_sim_path(softc->sim));
+	cam_sim_free(softc->sim, /*free_devq*/ TRUE);
+	mtx_unlock(&softc->lock);
+	mtx_destroy(&softc->lock);
 
+	if ((error = ctl_port_deregister(port)) != 0)
+		printf("%s: cam_sim port deregistration failed\n", __func__);
+	return (error);
 }
 
-static int
-cfcs_module_event_handler(module_t mod, int what, void *arg)
+static void
+cfcs_poll(struct cam_sim *sim)
 {
 
-	switch (what) {
-	case MOD_LOAD:
-		return (cfcs_init());
-	case MOD_UNLOAD:
-		return (EBUSY);
-	default:
-		return (EOPNOTSUPP);
-	}
 }
 
 static void
@@ -316,7 +273,7 @@
 		goto bailout;
 	}
 
-	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
+	if (xpt_create_path(&ccb->ccb_h.path, NULL,
 			    cam_sim_path(softc->sim), CAM_TARGET_WILDCARD,
 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 		printf("%s: can't allocate path for rescan\n", __func__);
@@ -341,29 +298,6 @@
 	cfcs_onoffline(arg, /*online*/ 0);
 }
 
-static int
-cfcs_targ_enable(void *arg, struct ctl_id targ_id)
-{
-	return (0);
-}
-
-static int
-cfcs_targ_disable(void *arg, struct ctl_id targ_id)
-{
-	return (0);
-}
-
-static int
-cfcs_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
-{
-	return (0);
-}
-static int
-cfcs_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
-{
-	return (0);
-}
-
 /*
  * This function is very similar to ctl_ioctl_do_datamove().  Is there a
  * way to combine the functionality?
@@ -380,14 +314,10 @@
 	struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
 	int cam_sg_count, ctl_sg_count, cam_sg_start;
 	int cam_sg_offset;
-	int len_to_copy, len_copied;
+	int len_to_copy;
 	int ctl_watermark, cam_watermark;
 	int i, j;
 
-
-	cam_sg_offset = 0;
-	cam_sg_start = 0;
-
 	ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
 
 	/*
@@ -404,30 +334,28 @@
 	 * Simplify things on both sides by putting single buffers into a
 	 * single entry S/G list.
 	 */
-	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
-		if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) {
-			/* We should filter this out on entry */
-			panic("%s: physical S/G list, should not get here",
-			      __func__);
-		} else {
-			int len_seen;
+	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
+	case CAM_DATA_SG: {
+		int len_seen;
 
-			cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
-			cam_sg_count = ccb->csio.sglist_cnt;
+		cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
+		cam_sg_count = ccb->csio.sglist_cnt;
+		cam_sg_start = cam_sg_count;
+		cam_sg_offset = 0;
 
-			for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
-				if ((len_seen + cam_sglist[i].ds_len) >=
-				     io->scsiio.kern_rel_offset) {
-					cam_sg_start = i;
-					cam_sg_offset =
-						io->scsiio.kern_rel_offset -
-						len_seen;
-					break;
-				}
-				len_seen += cam_sglist[i].ds_len;
+		for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
+			if ((len_seen + cam_sglist[i].ds_len) >=
+			     io->scsiio.kern_rel_offset) {
+				cam_sg_start = i;
+				cam_sg_offset = io->scsiio.kern_rel_offset -
+					len_seen;
+				break;
 			}
+			len_seen += cam_sglist[i].ds_len;
 		}
-	} else {
+		break;
+	}
+	case CAM_DATA_VADDR:
 		cam_sglist = &cam_sg_entry;
 		cam_sglist[0].ds_len = ccb->csio.dxfer_len;
 		cam_sglist[0].ds_addr = (bus_addr_t)ccb->csio.data_ptr;
@@ -434,6 +362,9 @@
 		cam_sg_count = 1;
 		cam_sg_start = 0;
 		cam_sg_offset = io->scsiio.kern_rel_offset;
+		break;
+	default:
+		panic("Invalid CAM flags %#x", ccb->ccb_h.flags);
 	}
 
 	if (io->scsiio.kern_sg_entries > 0) {
@@ -448,13 +379,12 @@
 
 	ctl_watermark = 0;
 	cam_watermark = cam_sg_offset;
-	len_copied = 0;
 	for (i = cam_sg_start, j = 0;
 	     i < cam_sg_count && j < ctl_sg_count;) {
 		uint8_t *cam_ptr, *ctl_ptr;
 
-		len_to_copy = ctl_min(cam_sglist[i].ds_len - cam_watermark,
-				      ctl_sglist[j].len - ctl_watermark);
+		len_to_copy = MIN(cam_sglist[i].ds_len - cam_watermark,
+				  ctl_sglist[j].len - ctl_watermark);
 
 		cam_ptr = (uint8_t *)cam_sglist[i].ds_addr;
 		cam_ptr = cam_ptr + cam_watermark;
@@ -470,9 +400,6 @@
 			ctl_ptr = (uint8_t *)ctl_sglist[j].addr;
 		ctl_ptr = ctl_ptr + ctl_watermark;
 
-		ctl_watermark += len_to_copy;
-		cam_watermark += len_to_copy;
-
 		if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
 		     CTL_FLAG_DATA_IN) {
 			CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n",
@@ -488,13 +415,16 @@
 			bcopy(cam_ptr, ctl_ptr, len_to_copy);
 		}
 
-		len_copied += len_to_copy;
+		io->scsiio.ext_data_filled += len_to_copy;
+		io->scsiio.kern_data_resid -= len_to_copy;
 
+		cam_watermark += len_to_copy;
 		if (cam_sglist[i].ds_len == cam_watermark) {
 			i++;
 			cam_watermark = 0;
 		}
 
+		ctl_watermark += len_to_copy;
 		if (ctl_sglist[j].len == ctl_watermark) {
 			j++;
 			ctl_watermark = 0;
@@ -501,7 +431,15 @@
 		}
 	}
 
-	io->scsiio.ext_data_filled += len_copied;
+	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
+		io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
+		ccb->csio.resid = ccb->csio.dxfer_len -
+		    io->scsiio.ext_data_filled;
+		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
+		ccb->ccb_h.status |= CAM_REQ_CMP;
+		xpt_done(ccb);
+	}
 
 	io->scsiio.be_move_done(io);
 }
@@ -510,14 +448,13 @@
 cfcs_done(union ctl_io *io)
 {
 	union ccb *ccb;
-	struct cfcs_softc *softc;
-	struct cam_sim *sim;
 
 	ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+	if (ccb == NULL) {
+		ctl_free_io(io);
+		return;
+	}
 
-	sim = xpt_path_sim(ccb->ccb_h.path);
-	softc = (struct cfcs_softc *)cam_sim_softc(sim);
-
 	/*
 	 * At this point we should have status.  If we don't, that's a bug.
 	 */
@@ -527,12 +464,17 @@
 	/*
 	 * Translate CTL status to CAM status.
 	 */
+	if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
+		ccb->csio.resid = ccb->csio.dxfer_len -
+		    io->scsiio.ext_data_filled;
+	}
+	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 	switch (io->io_hdr.status & CTL_STATUS_MASK) {
 	case CTL_SUCCESS:
-		ccb->ccb_h.status = CAM_REQ_CMP;
+		ccb->ccb_h.status |= CAM_REQ_CMP;
 		break;
 	case CTL_SCSI_ERROR:
-		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
+		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
 		ccb->csio.scsi_status = io->scsiio.scsi_status;
 		bcopy(&io->scsiio.sense_data, &ccb->csio.sense_data,
 		      min(io->scsiio.sense_len, ccb->csio.sense_len));
@@ -548,18 +490,19 @@
 		}
 		break;
 	case CTL_CMD_ABORTED:
-		ccb->ccb_h.status = CAM_REQ_ABORTED;
+		ccb->ccb_h.status |= CAM_REQ_ABORTED;
 		break;
 	case CTL_ERROR:
 	default:
-		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
 		break;
 	}
-
-	mtx_lock(sim->mtx);
+	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
+	    (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
+		xpt_freeze_devq(ccb->ccb_h.path, 1);
+		ccb->ccb_h.status |= CAM_DEV_QFRZN;
+	}
 	xpt_done(ccb);
-	mtx_unlock(sim->mtx);
-
 	ctl_free_io(io);
 }
 
@@ -601,7 +544,7 @@
 			return;
 		}
 
-		io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+		io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
 		if (io == NULL) {
 			printf("%s: can't allocate ctl_io\n", __func__);
 			ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
@@ -619,14 +562,11 @@
 		 * down via the XPT_RESET_BUS/LUN CCBs below.
 		 */
 		io->io_hdr.io_type = CTL_IO_SCSI;
-		io->io_hdr.nexus.initid.id = 1;
-		io->io_hdr.nexus.targ_port = softc->fe.targ_port;
+		io->io_hdr.nexus.initid = 1;
+		io->io_hdr.nexus.targ_port = softc->port.targ_port;
+		io->io_hdr.nexus.targ_lun = ctl_decode_lun(
+		    CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
 		/*
-		 * XXX KDM how do we handle target IDs?
-		 */
-		io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
-		io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
-		/*
 		 * This tag scheme isn't the best, since we could in theory
 		 * have a very long-lived I/O and tag collision, especially
 		 * in a high I/O environment.  But it should work well
@@ -662,9 +602,9 @@
 			       __func__, csio->cdb_len, sizeof(io->scsiio.cdb));
 		}
 		io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb));
-		bcopy(csio->cdb_io.cdb_bytes, io->scsiio.cdb,
-		      io->scsiio.cdb_len);
+		bcopy(scsiio_cdb_ptr(csio), io->scsiio.cdb, io->scsiio.cdb_len);
 
+		ccb->ccb_h.status |= CAM_SIM_QUEUED;
 		err = ctl_queue(io);
 		if (err != CTL_RETVAL_COMPLETE) {
 			printf("%s: func %d: error %d returned by "
@@ -671,8 +611,9 @@
 			       "ctl_queue()!\n", __func__,
 			       ccb->ccb_h.func_code, err);
 			ctl_free_io(io);
-		} else {
-			ccb->ccb_h.status |= CAM_SIM_QUEUED;
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+			xpt_done(ccb);
+			return;
 		}
 		break;
 	}
@@ -696,7 +637,7 @@
 			return;
 		}
 
-		io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+		io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
 		if (io == NULL) {
 			ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
 			xpt_freeze_devq(ccb->ccb_h.path, 1);
@@ -710,10 +651,10 @@
 		ccb->ccb_h.io_ptr = io;
 
 		io->io_hdr.io_type = CTL_IO_TASK;
-		io->io_hdr.nexus.initid.id = 1;
-		io->io_hdr.nexus.targ_port = softc->fe.targ_port;
-		io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
-		io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
+		io->io_hdr.nexus.initid = 1;
+		io->io_hdr.nexus.targ_port = softc->port.targ_port;
+		io->io_hdr.nexus.targ_lun = ctl_decode_lun(
+		    CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
 		io->taskio.task_action = CTL_TASK_ABORT_TASK;
 		io->taskio.tag_num = abort_ccb->csio.tag_id;
 		switch (abort_ccb->csio.tag_action) {
@@ -768,7 +709,7 @@
 		fc->bitrate = 800000;
 		fc->wwnn = softc->wwnn;
 		fc->wwpn = softc->wwpn;
-       		fc->port = softc->fe.targ_port;
+		fc->port = softc->port.targ_port;
 		fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN |
 			CTS_FC_VALID_PORT; 
 		ccb->ccb_h.status = CAM_REQ_CMP;
@@ -791,7 +732,7 @@
 			return;
 		}
 
-		io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+		io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
 		if (io == NULL) {
 			ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
 			xpt_freeze_devq(ccb->ccb_h.path, 1);
@@ -801,14 +742,15 @@
 
 		ctl_zero_io(io);
 		/* Save pointers on both sides */
-		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb;
+		if (ccb->ccb_h.func_code == XPT_RESET_DEV)
+			io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb;
 		ccb->ccb_h.io_ptr = io;
 
 		io->io_hdr.io_type = CTL_IO_TASK;
-		io->io_hdr.nexus.initid.id = 0;
-		io->io_hdr.nexus.targ_port = softc->fe.targ_port;
-		io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
-		io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
+		io->io_hdr.nexus.initid = 1;
+		io->io_hdr.nexus.targ_port = softc->port.targ_port;
+		io->io_hdr.nexus.targ_lun = ctl_decode_lun(
+		    CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
 		if (ccb->ccb_h.func_code == XPT_RESET_BUS)
 			io->taskio.task_action = CTL_TASK_BUS_RESET;
 		else
@@ -835,7 +777,7 @@
 		cpi->version_num = 0;
 		cpi->hba_inquiry = PI_TAG_ABLE;
 		cpi->target_sprt = 0;
-		cpi->hba_misc = 0;
+		cpi->hba_misc = PIM_EXTLUNS;
 		cpi->hba_eng_cnt = 0;
 		cpi->max_target = 1;
 		cpi->max_lun = 1024;
@@ -845,9 +787,9 @@
 		cpi->hpath_id = 0;
 		cpi->initiator_id = 0;
 
-		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
-		strncpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN);
-		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+		strlcpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN);
+		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
 		cpi->unit_number = 0;
 		cpi->bus_id = 0;
 		cpi->base_transfer_speed = 800000;
@@ -860,7 +802,7 @@
 		cpi->transport_version = 0;
 		cpi->xport_specific.fc.wwnn = softc->wwnn;
 		cpi->xport_specific.fc.wwpn = softc->wwpn;
-		cpi->xport_specific.fc.port = softc->fe.targ_port;
+		cpi->xport_specific.fc.port = softc->port.targ_port;
 		cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000;
 		cpi->ccb_h.status = CAM_REQ_CMP;
 		break;
@@ -873,9 +815,3 @@
 		break;
 	}
 }
-
-static void
-cfcs_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
-{
-
-}

Added: trunk/sys/cam/ctl/ctl_frontend_ioctl.c
===================================================================
--- trunk/sys/cam/ctl/ctl_frontend_ioctl.c	                        (rev 0)
+++ trunk/sys/cam/ctl/ctl_frontend_ioctl.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,437 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003-2009 Silicon Graphics International Corp.
+ * Copyright (c) 2012 The FreeBSD Foundation
+ * Copyright (c) 2015 Alexander Motin <mav at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_frontend_ioctl.c 313369 2017-02-07 01:56:26Z mav $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_error.h>
+
+typedef enum {
+	CTL_IOCTL_INPROG,
+	CTL_IOCTL_DATAMOVE,
+	CTL_IOCTL_DONE
+} ctl_fe_ioctl_state;
+
+struct ctl_fe_ioctl_params {
+	struct cv		sem;
+	struct mtx		ioctl_mtx;
+	ctl_fe_ioctl_state	state;
+};
+
+struct cfi_softc {
+	uint32_t		cur_tag_num;
+	struct ctl_port		port;
+};
+
+static struct cfi_softc cfi_softc;
+
+static int cfi_init(void);
+static int cfi_shutdown(void);
+static void cfi_datamove(union ctl_io *io);
+static void cfi_done(union ctl_io *io);
+
+static struct ctl_frontend cfi_frontend =
+{
+	.name = "ioctl",
+	.init = cfi_init,
+	.shutdown = cfi_shutdown,
+};
+CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend);
+
+static int
+cfi_init(void)
+{
+	struct cfi_softc *isoftc = &cfi_softc;
+	struct ctl_port *port;
+	int error = 0;
+
+	memset(isoftc, 0, sizeof(*isoftc));
+
+	port = &isoftc->port;
+	port->frontend = &cfi_frontend;
+	port->port_type = CTL_PORT_IOCTL;
+	port->num_requested_ctl_io = 100;
+	port->port_name = "ioctl";
+	port->fe_datamove = cfi_datamove;
+	port->fe_done = cfi_done;
+	port->max_targets = 1;
+	port->max_target_id = 0;
+	port->targ_port = -1;
+	port->max_initiators = 1;
+
+	if ((error = ctl_port_register(port)) != 0) {
+		printf("%s: ioctl port registration failed\n", __func__);
+		return (error);
+	}
+	ctl_port_online(port);
+	return (0);
+}
+
+static int
+cfi_shutdown(void)
+{
+	struct cfi_softc *isoftc = &cfi_softc;
+	struct ctl_port *port = &isoftc->port;
+	int error = 0;
+
+	ctl_port_offline(port);
+	if ((error = ctl_port_deregister(port)) != 0)
+		printf("%s: ioctl port deregistration failed\n", __func__);
+	return (error);
+}
+
+/*
+ * Data movement routine for the CTL ioctl frontend port.
+ */
+static int
+ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
+{
+	struct ctl_sg_entry *ext_sglist, *kern_sglist;
+	struct ctl_sg_entry ext_entry, kern_entry;
+	int ext_sglen, ext_sg_entries, kern_sg_entries;
+	int ext_sg_start, ext_offset;
+	int len_to_copy;
+	int kern_watermark, ext_watermark;
+	int ext_sglist_malloced;
+	int i, j;
+
+	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
+
+	/*
+	 * If this flag is set, fake the data transfer.
+	 */
+	if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
+		ext_sglist_malloced = 0;
+		ctsio->ext_data_filled += ctsio->kern_data_len;
+		ctsio->kern_data_resid = 0;
+		goto bailout;
+	}
+
+	/*
+	 * To simplify things here, if we have a single buffer, stick it in
+	 * a S/G entry and just make it a single entry S/G list.
+	 */
+	if (ctsio->ext_sg_entries > 0) {
+		int len_seen;
+
+		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
+		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
+							   M_WAITOK);
+		ext_sglist_malloced = 1;
+		if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
+			ctsio->io_hdr.port_status = 31343;
+			goto bailout;
+		}
+		ext_sg_entries = ctsio->ext_sg_entries;
+		ext_sg_start = ext_sg_entries;
+		ext_offset = 0;
+		len_seen = 0;
+		for (i = 0; i < ext_sg_entries; i++) {
+			if ((len_seen + ext_sglist[i].len) >=
+			     ctsio->ext_data_filled) {
+				ext_sg_start = i;
+				ext_offset = ctsio->ext_data_filled - len_seen;
+				break;
+			}
+			len_seen += ext_sglist[i].len;
+		}
+	} else {
+		ext_sglist = &ext_entry;
+		ext_sglist_malloced = 0;
+		ext_sglist->addr = ctsio->ext_data_ptr;
+		ext_sglist->len = ctsio->ext_data_len;
+		ext_sg_entries = 1;
+		ext_sg_start = 0;
+		ext_offset = ctsio->ext_data_filled;
+	}
+
+	if (ctsio->kern_sg_entries > 0) {
+		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
+		kern_sg_entries = ctsio->kern_sg_entries;
+	} else {
+		kern_sglist = &kern_entry;
+		kern_sglist->addr = ctsio->kern_data_ptr;
+		kern_sglist->len = ctsio->kern_data_len;
+		kern_sg_entries = 1;
+	}
+
+	kern_watermark = 0;
+	ext_watermark = ext_offset;
+	for (i = ext_sg_start, j = 0;
+	     i < ext_sg_entries && j < kern_sg_entries;) {
+		uint8_t *ext_ptr, *kern_ptr;
+
+		len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
+				  kern_sglist[j].len - kern_watermark);
+
+		ext_ptr = (uint8_t *)ext_sglist[i].addr;
+		ext_ptr = ext_ptr + ext_watermark;
+		if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+			/*
+			 * XXX KDM fix this!
+			 */
+			panic("need to implement bus address support");
+#if 0
+			kern_ptr = bus_to_virt(kern_sglist[j].addr);
+#endif
+		} else
+			kern_ptr = (uint8_t *)kern_sglist[j].addr;
+		kern_ptr = kern_ptr + kern_watermark;
+
+		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+		     CTL_FLAG_DATA_IN) {
+			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
+					 "bytes to user\n", len_to_copy));
+			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
+					 "to %p\n", kern_ptr, ext_ptr));
+			if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
+				ctsio->io_hdr.port_status = 31344;
+				goto bailout;
+			}
+		} else {
+			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
+					 "bytes from user\n", len_to_copy));
+			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
+					 "to %p\n", ext_ptr, kern_ptr));
+			if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
+				ctsio->io_hdr.port_status = 31345;
+				goto bailout;
+			}
+		}
+
+		ctsio->ext_data_filled += len_to_copy;
+		ctsio->kern_data_resid -= len_to_copy;
+
+		ext_watermark += len_to_copy;
+		if (ext_sglist[i].len == ext_watermark) {
+			i++;
+			ext_watermark = 0;
+		}
+
+		kern_watermark += len_to_copy;
+		if (kern_sglist[j].len == kern_watermark) {
+			j++;
+			kern_watermark = 0;
+		}
+	}
+
+	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
+			 "kern_sg_entries: %d\n", ext_sg_entries,
+			 kern_sg_entries));
+	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
+			 "kern_data_len = %d\n", ctsio->ext_data_len,
+			 ctsio->kern_data_len));
+
+bailout:
+	if (ext_sglist_malloced != 0)
+		free(ext_sglist, M_CTL);
+
+	return (CTL_RETVAL_COMPLETE);
+}
+
+static void
+cfi_datamove(union ctl_io *io)
+{
+	struct ctl_fe_ioctl_params *params;
+
+	params = (struct ctl_fe_ioctl_params *)
+		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+	mtx_lock(&params->ioctl_mtx);
+	params->state = CTL_IOCTL_DATAMOVE;
+	cv_broadcast(&params->sem);
+	mtx_unlock(&params->ioctl_mtx);
+}
+
+static void
+cfi_done(union ctl_io *io)
+{
+	struct ctl_fe_ioctl_params *params;
+
+	params = (struct ctl_fe_ioctl_params *)
+		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+	mtx_lock(&params->ioctl_mtx);
+	params->state = CTL_IOCTL_DONE;
+	cv_broadcast(&params->sem);
+	mtx_unlock(&params->ioctl_mtx);
+}
+
+static int
+cfi_submit_wait(union ctl_io *io)
+{
+	struct ctl_fe_ioctl_params params;
+	ctl_fe_ioctl_state last_state;
+	int done, retval;
+
+	bzero(&params, sizeof(params));
+	mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
+	cv_init(&params.sem, "ctlioccv");
+	params.state = CTL_IOCTL_INPROG;
+	last_state = params.state;
+
+	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms;
+
+	CTL_DEBUG_PRINT(("cfi_submit_wait\n"));
+
+	/* This shouldn't happen */
+	if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
+		return (retval);
+
+	done = 0;
+
+	do {
+		mtx_lock(&params.ioctl_mtx);
+		/*
+		 * Check the state here, and don't sleep if the state has
+		 * already changed (i.e. wakeup has already occurred, but we
+		 * weren't waiting yet).
+		 */
+		if (params.state == last_state) {
+			/* XXX KDM cv_wait_sig instead? */
+			cv_wait(&params.sem, &params.ioctl_mtx);
+		}
+		last_state = params.state;
+
+		switch (params.state) {
+		case CTL_IOCTL_INPROG:
+			/* Why did we wake up? */
+			/* XXX KDM error here? */
+			mtx_unlock(&params.ioctl_mtx);
+			break;
+		case CTL_IOCTL_DATAMOVE:
+			CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
+
+			/*
+			 * change last_state back to INPROG to avoid
+			 * deadlock on subsequent data moves.
+			 */
+			params.state = last_state = CTL_IOCTL_INPROG;
+
+			mtx_unlock(&params.ioctl_mtx);
+			ctl_ioctl_do_datamove(&io->scsiio);
+			/*
+			 * Note that in some cases, most notably writes,
+			 * this will queue the I/O and call us back later.
+			 * In other cases, generally reads, this routine
+			 * will immediately call back and wake us up,
+			 * probably using our own context.
+			 */
+			io->scsiio.be_move_done(io);
+			break;
+		case CTL_IOCTL_DONE:
+			mtx_unlock(&params.ioctl_mtx);
+			CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
+			done = 1;
+			break;
+		default:
+			mtx_unlock(&params.ioctl_mtx);
+			/* XXX KDM error here? */
+			break;
+		}
+	} while (done == 0);
+
+	mtx_destroy(&params.ioctl_mtx);
+	cv_destroy(&params.sem);
+
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+    struct thread *td)
+{
+	union ctl_io *io;
+	void *pool_tmp, *sc_tmp;
+	int retval = 0;
+
+	/*
+	 * If we haven't been "enabled", don't allow any SCSI I/O
+	 * to this FETD.
+	 */
+	if ((cfi_softc.port.status & CTL_PORT_STATUS_ONLINE) == 0)
+		return (EPERM);
+
+	io = ctl_alloc_io(cfi_softc.port.ctl_pool_ref);
+
+	/*
+	 * Need to save the pool reference so it doesn't get
+	 * spammed by the user's ctl_io.
+	 */
+	pool_tmp = io->io_hdr.pool;
+	sc_tmp = CTL_SOFTC(io);
+	memcpy(io, (void *)addr, sizeof(*io));
+	io->io_hdr.pool = pool_tmp;
+	CTL_SOFTC(io) = sc_tmp;
+
+	/*
+	 * No status yet, so make sure the status is set properly.
+	 */
+	io->io_hdr.status = CTL_STATUS_NONE;
+
+	/*
+	 * The user sets the initiator ID, target and LUN IDs.
+	 */
+	io->io_hdr.nexus.targ_port = cfi_softc.port.targ_port;
+	io->io_hdr.flags |= CTL_FLAG_USER_REQ;
+	if ((io->io_hdr.io_type == CTL_IO_SCSI) &&
+	    (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
+		io->scsiio.tag_num = cfi_softc.cur_tag_num++;
+
+	retval = cfi_submit_wait(io);
+	if (retval == 0)
+		memcpy((void *)addr, io, sizeof(*io));
+	ctl_free_io(io);
+	return (retval);
+}


Property changes on: trunk/sys/cam/ctl/ctl_frontend_ioctl.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/cam/ctl/ctl_frontend_iscsi.c
===================================================================
--- trunk/sys/cam/ctl/ctl_frontend_iscsi.c	                        (rev 0)
+++ trunk/sys/cam/ctl/ctl_frontend_iscsi.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,2917 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2012 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Edward Tomasz Napierala under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_frontend_iscsi.c 317320 2017-04-23 07:35:51Z mav $
+ */
+
+/*
+ * CTL frontend for the iSCSI protocol.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_frontend_iscsi.c 317320 2017-04-23 07:35:51Z mav $");
+
+#include <sys/param.h>
+#include <sys/capsicum.h>
+#include <sys/condvar.h>
+#include <sys/endian.h>
+#include <sys/file.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/unistd.h>
+#include <vm/uma.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_error.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_private.h>
+
+#include <dev/iscsi/icl.h>
+#include <dev/iscsi/iscsi_proto.h>
+#include <cam/ctl/ctl_frontend_iscsi.h>
+
+#ifdef ICL_KERNEL_PROXY
+#include <sys/socketvar.h>
+#endif
+
+#ifdef ICL_KERNEL_PROXY
+FEATURE(cfiscsi_kernel_proxy, "iSCSI target built with ICL_KERNEL_PROXY");
+#endif
+
+static MALLOC_DEFINE(M_CFISCSI, "cfiscsi", "Memory used for CTL iSCSI frontend");
+static uma_zone_t cfiscsi_data_wait_zone;
+
+SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD, 0,
+    "CAM Target Layer iSCSI Frontend");
+static int debug = 1;
+TUNABLE_INT("kern.cam.ctl.iscsi.debug", &debug);
+SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN,
+    &debug, 1, "Enable debug messages");
+static int ping_timeout = 5;
+TUNABLE_INT("kern.cam.ctl.iscsi.ping_timeout", &ping_timeout);
+SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN,
+    &ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds");
+static int login_timeout = 60;
+TUNABLE_INT("kern.cam.ctl.iscsi.login_timeout", &login_timeout);
+SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN,
+    &login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds");
+static int maxcmdsn_delta = 256;
+TUNABLE_INT("kern.cam.ctl.iscsi.maxcmdsn_delta", &maxcmdsn_delta);
+SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN,
+    &maxcmdsn_delta, 256, "Number of commands the initiator can send "
+    "without confirmation");
+
+#define	CFISCSI_DEBUG(X, ...)						\
+	do {								\
+		if (debug > 1) {					\
+			printf("%s: " X "\n",				\
+			    __func__, ## __VA_ARGS__);			\
+		}							\
+	} while (0)
+
+#define	CFISCSI_WARN(X, ...)						\
+	do {								\
+		if (debug > 0) {					\
+			printf("WARNING: %s: " X "\n",			\
+			    __func__, ## __VA_ARGS__);			\
+		}							\
+	} while (0)
+
+#define	CFISCSI_SESSION_DEBUG(S, X, ...)				\
+	do {								\
+		if (debug > 1) {					\
+			printf("%s: %s (%s): " X "\n",			\
+			    __func__, S->cs_initiator_addr,		\
+			    S->cs_initiator_name, ## __VA_ARGS__);	\
+		}							\
+	} while (0)
+
+#define	CFISCSI_SESSION_WARN(S, X, ...)					\
+	do  {								\
+		if (debug > 0) {					\
+			printf("WARNING: %s (%s): " X "\n",		\
+			    S->cs_initiator_addr,			\
+			    S->cs_initiator_name, ## __VA_ARGS__);	\
+		}							\
+	} while (0)
+
+#define CFISCSI_SESSION_LOCK(X)		mtx_lock(&X->cs_lock)
+#define CFISCSI_SESSION_UNLOCK(X)	mtx_unlock(&X->cs_lock)
+#define CFISCSI_SESSION_LOCK_ASSERT(X)	mtx_assert(&X->cs_lock, MA_OWNED)
+
+#define	CONN_SESSION(X)			((struct cfiscsi_session *)(X)->ic_prv0)
+#define	PDU_SESSION(X)			CONN_SESSION((X)->ip_conn)
+#define	PDU_EXPDATASN(X)		(X)->ip_prv0
+#define	PDU_TOTAL_TRANSFER_LEN(X)	(X)->ip_prv1
+#define	PDU_R2TSN(X)			(X)->ip_prv2
+
+static int	cfiscsi_init(void);
+static int	cfiscsi_shutdown(void);
+static void	cfiscsi_online(void *arg);
+static void	cfiscsi_offline(void *arg);
+static int	cfiscsi_info(void *arg, struct sbuf *sb);
+static int	cfiscsi_ioctl(struct cdev *dev,
+		    u_long cmd, caddr_t addr, int flag, struct thread *td);
+static void	cfiscsi_datamove(union ctl_io *io);
+static void	cfiscsi_datamove_in(union ctl_io *io);
+static void	cfiscsi_datamove_out(union ctl_io *io);
+static void	cfiscsi_done(union ctl_io *io);
+static bool	cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request);
+static void	cfiscsi_pdu_handle_nop_out(struct icl_pdu *request);
+static void	cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request);
+static void	cfiscsi_pdu_handle_task_request(struct icl_pdu *request);
+static void	cfiscsi_pdu_handle_data_out(struct icl_pdu *request);
+static void	cfiscsi_pdu_handle_logout_request(struct icl_pdu *request);
+static void	cfiscsi_session_terminate(struct cfiscsi_session *cs);
+static struct cfiscsi_target	*cfiscsi_target_find(struct cfiscsi_softc
+		    *softc, const char *name, uint16_t tag);
+static struct cfiscsi_target	*cfiscsi_target_find_or_create(
+    struct cfiscsi_softc *softc, const char *name, const char *alias,
+    uint16_t tag);
+static void	cfiscsi_target_release(struct cfiscsi_target *ct);
+static void	cfiscsi_session_delete(struct cfiscsi_session *cs);
+
+static struct cfiscsi_softc cfiscsi_softc;
+
+static struct ctl_frontend cfiscsi_frontend =
+{
+	.name = "iscsi",
+	.init = cfiscsi_init,
+	.ioctl = cfiscsi_ioctl,
+	.shutdown = cfiscsi_shutdown,
+};
+CTL_FRONTEND_DECLARE(ctlcfiscsi, cfiscsi_frontend);
+MODULE_DEPEND(ctlcfiscsi, icl, 1, 1, 1);
+
+static struct icl_pdu *
+cfiscsi_pdu_new_response(struct icl_pdu *request, int flags)
+{
+
+	return (icl_pdu_new(request->ip_conn, flags));
+}
+
+static bool
+cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request)
+{
+	const struct iscsi_bhs_scsi_command *bhssc;
+	struct cfiscsi_session *cs;
+	uint32_t cmdsn, expstatsn;
+
+	cs = PDU_SESSION(request);
+
+	/*
+	 * Every incoming PDU - not just NOP-Out - resets the ping timer.
+	 * The purpose of the timeout is to reset the connection when it stalls;
+	 * we don't want this to happen when NOP-In or NOP-Out ends up delayed
+	 * in some queue.
+	 *
+	 * XXX: Locking?
+	 */
+	cs->cs_timeout = 0;
+
+	/*
+	 * Data-Out PDUs don't contain CmdSN.
+	 */
+	if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+	    ISCSI_BHS_OPCODE_SCSI_DATA_OUT)
+		return (false);
+
+	/*
+	 * We're only using fields common for all the request
+	 * (initiator -> target) PDUs.
+	 */
+	bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs;
+	cmdsn = ntohl(bhssc->bhssc_cmdsn);
+	expstatsn = ntohl(bhssc->bhssc_expstatsn);
+
+	CFISCSI_SESSION_LOCK(cs);
+#if 0
+	if (expstatsn != cs->cs_statsn) {
+		CFISCSI_SESSION_DEBUG(cs, "received PDU with ExpStatSN %d, "
+		    "while current StatSN is %d", expstatsn,
+		    cs->cs_statsn);
+	}
+#endif
+
+	if ((request->ip_bhs->bhs_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) == 0) {
+		/*
+		 * The target MUST silently ignore any non-immediate command
+		 * outside of this range.
+		 */
+		if (ISCSI_SNLT(cmdsn, cs->cs_cmdsn) ||
+		    ISCSI_SNGT(cmdsn, cs->cs_cmdsn + maxcmdsn_delta)) {
+			CFISCSI_SESSION_UNLOCK(cs);
+			CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, "
+			    "while expected %u", cmdsn, cs->cs_cmdsn);
+			return (true);
+		}
+
+		/*
+		 * We don't support multiple connections now, so any
+		 * discontinuity in CmdSN means lost PDUs.  Since we don't
+		 * support PDU retransmission -- terminate the connection.
+		 */
+		if (cmdsn != cs->cs_cmdsn) {
+			CFISCSI_SESSION_UNLOCK(cs);
+			CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, "
+			    "while expected %u; dropping connection",
+			    cmdsn, cs->cs_cmdsn);
+			cfiscsi_session_terminate(cs);
+			return (true);
+		}
+		cs->cs_cmdsn++;
+	}
+
+	CFISCSI_SESSION_UNLOCK(cs);
+
+	return (false);
+}
+
+static void
+cfiscsi_pdu_handle(struct icl_pdu *request)
+{
+	struct cfiscsi_session *cs;
+	bool ignore;
+
+	cs = PDU_SESSION(request);
+
+	ignore = cfiscsi_pdu_update_cmdsn(request);
+	if (ignore) {
+		icl_pdu_free(request);
+		return;
+	}
+
+	/*
+	 * Handle the PDU; this includes e.g. receiving the remaining
+	 * part of PDU and submitting the SCSI command to CTL
+	 * or queueing a reply.  The handling routine is responsible
+	 * for freeing the PDU when it's no longer needed.
+	 */
+	switch (request->ip_bhs->bhs_opcode &
+	    ~ISCSI_BHS_OPCODE_IMMEDIATE) {
+	case ISCSI_BHS_OPCODE_NOP_OUT:
+		cfiscsi_pdu_handle_nop_out(request);
+		break;
+	case ISCSI_BHS_OPCODE_SCSI_COMMAND:
+		cfiscsi_pdu_handle_scsi_command(request);
+		break;
+	case ISCSI_BHS_OPCODE_TASK_REQUEST:
+		cfiscsi_pdu_handle_task_request(request);
+		break;
+	case ISCSI_BHS_OPCODE_SCSI_DATA_OUT:
+		cfiscsi_pdu_handle_data_out(request);
+		break;
+	case ISCSI_BHS_OPCODE_LOGOUT_REQUEST:
+		cfiscsi_pdu_handle_logout_request(request);
+		break;
+	default:
+		CFISCSI_SESSION_WARN(cs, "received PDU with unsupported "
+		    "opcode 0x%x; dropping connection",
+		    request->ip_bhs->bhs_opcode);
+		icl_pdu_free(request);
+		cfiscsi_session_terminate(cs);
+	}
+
+}
+
+static void
+cfiscsi_receive_callback(struct icl_pdu *request)
+{
+#ifdef ICL_KERNEL_PROXY
+	struct cfiscsi_session *cs;
+
+	cs = PDU_SESSION(request);
+	if (cs->cs_waiting_for_ctld || cs->cs_login_phase) {
+		if (cs->cs_login_pdu == NULL)
+			cs->cs_login_pdu = request;
+		else
+			icl_pdu_free(request);
+		cv_signal(&cs->cs_login_cv);
+		return;
+	}
+#endif
+
+	cfiscsi_pdu_handle(request);
+}
+
+static void
+cfiscsi_error_callback(struct icl_conn *ic)
+{
+	struct cfiscsi_session *cs;
+
+	cs = CONN_SESSION(ic);
+
+	CFISCSI_SESSION_WARN(cs, "connection error; dropping connection");
+	cfiscsi_session_terminate(cs);
+}
+
+static int
+cfiscsi_pdu_prepare(struct icl_pdu *response)
+{
+	struct cfiscsi_session *cs;
+	struct iscsi_bhs_scsi_response *bhssr;
+	bool advance_statsn = true;
+
+	cs = PDU_SESSION(response);
+
+	CFISCSI_SESSION_LOCK_ASSERT(cs);
+
+	/*
+	 * We're only using fields common for all the response
+	 * (target -> initiator) PDUs.
+	 */
+	bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs;
+
+	/*
+	 * 10.8.3: "The StatSN for this connection is not advanced
+	 * after this PDU is sent."
+	 */
+	if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_R2T)
+		advance_statsn = false;
+
+	/*
+	 * 10.19.2: "However, when the Initiator Task Tag is set to 0xffffffff,
+	 * StatSN for the connection is not advanced after this PDU is sent."
+	 */
+	if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_NOP_IN && 
+	    bhssr->bhssr_initiator_task_tag == 0xffffffff)
+		advance_statsn = false;
+
+	/*
+	 * See the comment below - StatSN is not meaningful and must
+	 * not be advanced.
+	 */
+	if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_SCSI_DATA_IN &&
+	    (bhssr->bhssr_flags & BHSDI_FLAGS_S) == 0)
+		advance_statsn = false;
+
+	/*
+	 * 10.7.3: "The fields StatSN, Status, and Residual Count
+	 * only have meaningful content if the S bit is set to 1."
+	 */
+	if (bhssr->bhssr_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_IN ||
+	    (bhssr->bhssr_flags & BHSDI_FLAGS_S))
+		bhssr->bhssr_statsn = htonl(cs->cs_statsn);
+	bhssr->bhssr_expcmdsn = htonl(cs->cs_cmdsn);
+	bhssr->bhssr_maxcmdsn = htonl(cs->cs_cmdsn + maxcmdsn_delta);
+
+	if (advance_statsn)
+		cs->cs_statsn++;
+
+	return (0);
+}
+
+static void
+cfiscsi_pdu_queue(struct icl_pdu *response)
+{
+	struct cfiscsi_session *cs;
+
+	cs = PDU_SESSION(response);
+
+	CFISCSI_SESSION_LOCK(cs);
+	cfiscsi_pdu_prepare(response);
+	icl_pdu_queue(response);
+	CFISCSI_SESSION_UNLOCK(cs);
+}
+
+static void
+cfiscsi_pdu_handle_nop_out(struct icl_pdu *request)
+{
+	struct cfiscsi_session *cs;
+	struct iscsi_bhs_nop_out *bhsno;
+	struct iscsi_bhs_nop_in *bhsni;
+	struct icl_pdu *response;
+	void *data = NULL;
+	size_t datasize;
+	int error;
+
+	cs = PDU_SESSION(request);
+	bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs;
+
+	if (bhsno->bhsno_initiator_task_tag == 0xffffffff) {
+		/*
+		 * Nothing to do, iscsi_pdu_update_statsn() already
+		 * zeroed the timeout.
+		 */
+		icl_pdu_free(request);
+		return;
+	}
+
+	datasize = icl_pdu_data_segment_length(request);
+	if (datasize > 0) {
+		data = malloc(datasize, M_CFISCSI, M_NOWAIT | M_ZERO);
+		if (data == NULL) {
+			CFISCSI_SESSION_WARN(cs, "failed to allocate memory; "
+			    "dropping connection");
+			icl_pdu_free(request);
+			cfiscsi_session_terminate(cs);
+			return;
+		}
+		icl_pdu_get_data(request, 0, data, datasize);
+	}
+
+	response = cfiscsi_pdu_new_response(request, M_NOWAIT);
+	if (response == NULL) {
+		CFISCSI_SESSION_WARN(cs, "failed to allocate memory; "
+		    "droppping connection");
+		free(data, M_CFISCSI);
+		icl_pdu_free(request);
+		cfiscsi_session_terminate(cs);
+		return;
+	}
+	bhsni = (struct iscsi_bhs_nop_in *)response->ip_bhs;
+	bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN;
+	bhsni->bhsni_flags = 0x80;
+	bhsni->bhsni_initiator_task_tag = bhsno->bhsno_initiator_task_tag;
+	bhsni->bhsni_target_transfer_tag = 0xffffffff;
+	if (datasize > 0) {
+		error = icl_pdu_append_data(response, data, datasize, M_NOWAIT);
+		if (error != 0) {
+			CFISCSI_SESSION_WARN(cs, "failed to allocate memory; "
+			    "dropping connection");
+			free(data, M_CFISCSI);
+			icl_pdu_free(request);
+			icl_pdu_free(response);
+			cfiscsi_session_terminate(cs);
+			return;
+		}
+		free(data, M_CFISCSI);
+	}
+
+	icl_pdu_free(request);
+	cfiscsi_pdu_queue(response);
+}
+
+static void
+cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request)
+{
+	struct iscsi_bhs_scsi_command *bhssc;
+	struct cfiscsi_session *cs;
+	union ctl_io *io;
+	int error;
+
+	cs = PDU_SESSION(request);
+	bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs;
+	//CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x",
+	//    bhssc->bhssc_initiator_task_tag);
+
+	if (request->ip_data_len > 0 && cs->cs_immediate_data == false) {
+		CFISCSI_SESSION_WARN(cs, "unsolicited data with "
+		    "ImmediateData=No; dropping connection");
+		icl_pdu_free(request);
+		cfiscsi_session_terminate(cs);
+		return;
+	}
+	io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
+	ctl_zero_io(io);
+	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request;
+	io->io_hdr.io_type = CTL_IO_SCSI;
+	io->io_hdr.nexus.initid = cs->cs_ctl_initid;
+	io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
+	io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhssc->bhssc_lun));
+	io->scsiio.tag_num = bhssc->bhssc_initiator_task_tag;
+	switch ((bhssc->bhssc_flags & BHSSC_FLAGS_ATTR)) {
+	case BHSSC_FLAGS_ATTR_UNTAGGED:
+		io->scsiio.tag_type = CTL_TAG_UNTAGGED;
+		break;
+	case BHSSC_FLAGS_ATTR_SIMPLE:
+		io->scsiio.tag_type = CTL_TAG_SIMPLE;
+		break;
+	case BHSSC_FLAGS_ATTR_ORDERED:
+        	io->scsiio.tag_type = CTL_TAG_ORDERED;
+		break;
+	case BHSSC_FLAGS_ATTR_HOQ:
+        	io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
+		break;
+	case BHSSC_FLAGS_ATTR_ACA:
+		io->scsiio.tag_type = CTL_TAG_ACA;
+		break;
+	default:
+		io->scsiio.tag_type = CTL_TAG_UNTAGGED;
+		CFISCSI_SESSION_WARN(cs, "unhandled tag type %d",
+		    bhssc->bhssc_flags & BHSSC_FLAGS_ATTR);
+		break;
+	}
+	io->scsiio.cdb_len = sizeof(bhssc->bhssc_cdb); /* Which is 16. */
+	memcpy(io->scsiio.cdb, bhssc->bhssc_cdb, sizeof(bhssc->bhssc_cdb));
+	refcount_acquire(&cs->cs_outstanding_ctl_pdus);
+	error = ctl_queue(io);
+	if (error != CTL_RETVAL_COMPLETE) {
+		CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; "
+		    "dropping connection", error);
+		ctl_free_io(io);
+		refcount_release(&cs->cs_outstanding_ctl_pdus);
+		icl_pdu_free(request);
+		cfiscsi_session_terminate(cs);
+	}
+}
+
+static void
+cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
+{
+	struct iscsi_bhs_task_management_request *bhstmr;
+	struct iscsi_bhs_task_management_response *bhstmr2;
+	struct icl_pdu *response;
+	struct cfiscsi_session *cs;
+	union ctl_io *io;
+	int error;
+
+	cs = PDU_SESSION(request);
+	bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs;
+	io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
+	ctl_zero_io(io);
+	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request;
+	io->io_hdr.io_type = CTL_IO_TASK;
+	io->io_hdr.nexus.initid = cs->cs_ctl_initid;
+	io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
+	io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhstmr->bhstmr_lun));
+	io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
+
+	switch (bhstmr->bhstmr_function & ~0x80) {
+	case BHSTMR_FUNCTION_ABORT_TASK:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK");
+#endif
+		io->taskio.task_action = CTL_TASK_ABORT_TASK;
+		io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag;
+		break;
+	case BHSTMR_FUNCTION_ABORT_TASK_SET:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK_SET");
+#endif
+		io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
+		break;
+	case BHSTMR_FUNCTION_CLEAR_TASK_SET:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_CLEAR_TASK_SET");
+#endif
+		io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
+		break;
+	case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET");
+#endif
+		io->taskio.task_action = CTL_TASK_LUN_RESET;
+		break;
+	case BHSTMR_FUNCTION_TARGET_WARM_RESET:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_WARM_RESET");
+#endif
+		io->taskio.task_action = CTL_TASK_TARGET_RESET;
+		break;
+	case BHSTMR_FUNCTION_TARGET_COLD_RESET:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_COLD_RESET");
+#endif
+		io->taskio.task_action = CTL_TASK_TARGET_RESET;
+		break;
+	case BHSTMR_FUNCTION_QUERY_TASK:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK");
+#endif
+		io->taskio.task_action = CTL_TASK_QUERY_TASK;
+		io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag;
+		break;
+	case BHSTMR_FUNCTION_QUERY_TASK_SET:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK_SET");
+#endif
+		io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
+		break;
+	case BHSTMR_FUNCTION_I_T_NEXUS_RESET:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_I_T_NEXUS_RESET");
+#endif
+		io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
+		break;
+	case BHSTMR_FUNCTION_QUERY_ASYNC_EVENT:
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_ASYNC_EVENT");
+#endif
+		io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT;
+		break;
+	default:
+		CFISCSI_SESSION_DEBUG(cs, "unsupported function 0x%x",
+		    bhstmr->bhstmr_function & ~0x80);
+		ctl_free_io(io);
+
+		response = cfiscsi_pdu_new_response(request, M_NOWAIT);
+		if (response == NULL) {
+			CFISCSI_SESSION_WARN(cs, "failed to allocate memory; "
+			    "dropping connection");
+			icl_pdu_free(request);
+			cfiscsi_session_terminate(cs);
+			return;
+		}
+		bhstmr2 = (struct iscsi_bhs_task_management_response *)
+		    response->ip_bhs;
+		bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE;
+		bhstmr2->bhstmr_flags = 0x80;
+		bhstmr2->bhstmr_response =
+		    BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED;
+		bhstmr2->bhstmr_initiator_task_tag =
+		    bhstmr->bhstmr_initiator_task_tag;
+		icl_pdu_free(request);
+		cfiscsi_pdu_queue(response);
+		return;
+	}
+
+	refcount_acquire(&cs->cs_outstanding_ctl_pdus);
+	error = ctl_queue(io);
+	if (error != CTL_RETVAL_COMPLETE) {
+		CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; "
+		    "dropping connection", error);
+		ctl_free_io(io);
+		refcount_release(&cs->cs_outstanding_ctl_pdus);
+		icl_pdu_free(request);
+		cfiscsi_session_terminate(cs);
+	}
+}
+
+static bool
+cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *cdw)
+{
+	struct iscsi_bhs_data_out *bhsdo;
+	struct cfiscsi_session *cs;
+	struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
+	size_t copy_len, len, off, buffer_offset;
+	int ctl_sg_count;
+	union ctl_io *io;
+
+	cs = PDU_SESSION(request);
+
+	KASSERT((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+	    ISCSI_BHS_OPCODE_SCSI_DATA_OUT ||
+	    (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+	    ISCSI_BHS_OPCODE_SCSI_COMMAND,
+	    ("bad opcode 0x%x", request->ip_bhs->bhs_opcode));
+
+	/*
+	 * We're only using fields common for Data-Out and SCSI Command PDUs.
+	 */
+	bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs;
+
+	io = cdw->cdw_ctl_io;
+	KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN,
+	    ("CTL_FLAG_DATA_IN"));
+
+#if 0
+	CFISCSI_SESSION_DEBUG(cs, "received %zd bytes out of %d",
+	    request->ip_data_len, io->scsiio.kern_total_len);
+#endif
+
+	if (io->scsiio.kern_sg_entries > 0) {
+		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+		ctl_sg_count = io->scsiio.kern_sg_entries;
+	} else {
+		ctl_sglist = &ctl_sg_entry;
+		ctl_sglist->addr = io->scsiio.kern_data_ptr;
+		ctl_sglist->len = io->scsiio.kern_data_len;
+		ctl_sg_count = 1;
+	}
+
+	if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+	    ISCSI_BHS_OPCODE_SCSI_DATA_OUT)
+		buffer_offset = ntohl(bhsdo->bhsdo_buffer_offset);
+	else
+		buffer_offset = 0;
+	len = icl_pdu_data_segment_length(request);
+
+	/*
+	 * Make sure the offset, as sent by the initiator, matches the offset
+	 * we're supposed to be at in the scatter-gather list.
+	 */
+	if (buffer_offset >
+	    io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled ||
+	    buffer_offset + len <=
+	    io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled) {
+		CFISCSI_SESSION_WARN(cs, "received bad buffer offset %zd, "
+		    "expected %zd; dropping connection", buffer_offset,
+		    (size_t)io->scsiio.kern_rel_offset +
+		    (size_t)io->scsiio.ext_data_filled);
+		ctl_set_data_phase_error(&io->scsiio);
+		cfiscsi_session_terminate(cs);
+		return (true);
+	}
+
+	/*
+	 * This is the offset within the PDU data segment, as opposed
+	 * to buffer_offset, which is the offset within the task (SCSI
+	 * command).
+	 */
+	off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled -
+	    buffer_offset;
+
+	/*
+	 * Iterate over the scatter/gather segments, filling them with data
+	 * from the PDU data segment.  Note that this can get called multiple
+	 * times for one SCSI command; the cdw structure holds state for the
+	 * scatter/gather list.
+	 */
+	for (;;) {
+		KASSERT(cdw->cdw_sg_index < ctl_sg_count,
+		    ("cdw->cdw_sg_index >= ctl_sg_count"));
+		if (cdw->cdw_sg_len == 0) {
+			cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr;
+			cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len;
+		}
+		KASSERT(off <= len, ("len > off"));
+		copy_len = len - off;
+		if (copy_len > cdw->cdw_sg_len)
+			copy_len = cdw->cdw_sg_len;
+
+		icl_pdu_get_data(request, off, cdw->cdw_sg_addr, copy_len);
+		cdw->cdw_sg_addr += copy_len;
+		cdw->cdw_sg_len -= copy_len;
+		off += copy_len;
+		io->scsiio.ext_data_filled += copy_len;
+		io->scsiio.kern_data_resid -= copy_len;
+
+		if (cdw->cdw_sg_len == 0) {
+			/*
+			 * End of current segment.
+			 */
+			if (cdw->cdw_sg_index == ctl_sg_count - 1) {
+				/*
+				 * Last segment in scatter/gather list.
+				 */
+				break;
+			}
+			cdw->cdw_sg_index++;
+		}
+
+		if (off == len) {
+			/*
+			 * End of PDU payload.
+			 */
+			break;
+		}
+	}
+
+	if (len > off) {
+		/*
+		 * In case of unsolicited data, it's possible that the buffer
+		 * provided by CTL is smaller than negotiated FirstBurstLength.
+		 * Just ignore the superfluous data; will ask for them with R2T
+		 * on next call to cfiscsi_datamove().
+		 *
+		 * This obviously can only happen with SCSI Command PDU. 
+		 */
+		if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+		    ISCSI_BHS_OPCODE_SCSI_COMMAND)
+			return (true);
+
+		CFISCSI_SESSION_WARN(cs, "received too much data: got %zd bytes, "
+		    "expected %zd; dropping connection",
+		    icl_pdu_data_segment_length(request), off);
+		ctl_set_data_phase_error(&io->scsiio);
+		cfiscsi_session_terminate(cs);
+		return (true);
+	}
+
+	if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end &&
+	    (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) == 0) {
+		CFISCSI_SESSION_WARN(cs, "got the final packet without "
+		    "the F flag; flags = 0x%x; dropping connection",
+		    bhsdo->bhsdo_flags);
+		ctl_set_data_phase_error(&io->scsiio);
+		cfiscsi_session_terminate(cs);
+		return (true);
+	}
+
+	if (io->scsiio.ext_data_filled != cdw->cdw_r2t_end &&
+	    (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) != 0) {
+		if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+		    ISCSI_BHS_OPCODE_SCSI_DATA_OUT) {
+			CFISCSI_SESSION_WARN(cs, "got the final packet, but the "
+			    "transmitted size was %zd bytes instead of %d; "
+			    "dropping connection",
+			    (size_t)io->scsiio.ext_data_filled,
+			    cdw->cdw_r2t_end);
+			ctl_set_data_phase_error(&io->scsiio);
+			cfiscsi_session_terminate(cs);
+			return (true);
+		} else {
+			/*
+			 * For SCSI Command PDU, this just means we need to
+			 * solicit more data by sending R2T.
+			 */
+			return (false);
+		}
+	}
+
+	if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end) {
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "no longer expecting Data-Out with target "
+		    "transfer tag 0x%x", cdw->cdw_target_transfer_tag);
+#endif
+
+		return (true);
+	}
+
+	return (false);
+}
+
+static void
+cfiscsi_pdu_handle_data_out(struct icl_pdu *request)
+{
+	struct iscsi_bhs_data_out *bhsdo;
+	struct cfiscsi_session *cs;
+	struct cfiscsi_data_wait *cdw = NULL;
+	union ctl_io *io;
+	bool done;
+
+	cs = PDU_SESSION(request);
+	bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs;
+
+	CFISCSI_SESSION_LOCK(cs);
+	TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) {
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "have ttt 0x%x, itt 0x%x; looking for "
+		    "ttt 0x%x, itt 0x%x",
+		    bhsdo->bhsdo_target_transfer_tag,
+		    bhsdo->bhsdo_initiator_task_tag,
+		    cdw->cdw_target_transfer_tag, cdw->cdw_initiator_task_tag));
+#endif
+		if (bhsdo->bhsdo_target_transfer_tag ==
+		    cdw->cdw_target_transfer_tag)
+			break;
+	}
+	CFISCSI_SESSION_UNLOCK(cs);
+	if (cdw == NULL) {
+		CFISCSI_SESSION_WARN(cs, "data transfer tag 0x%x, initiator task tag "
+		    "0x%x, not found; dropping connection",
+		    bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag);
+		icl_pdu_free(request);
+		cfiscsi_session_terminate(cs);
+		return;
+	}
+
+	if (cdw->cdw_datasn != ntohl(bhsdo->bhsdo_datasn)) {
+		CFISCSI_SESSION_WARN(cs, "received Data-Out PDU with "
+		    "DataSN %u, while expected %u; dropping connection",
+		    ntohl(bhsdo->bhsdo_datasn), cdw->cdw_datasn);
+		icl_pdu_free(request);
+		cfiscsi_session_terminate(cs);
+		return;
+	}
+	cdw->cdw_datasn++;
+
+	io = cdw->cdw_ctl_io;
+	KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN,
+	    ("CTL_FLAG_DATA_IN"));
+
+	done = cfiscsi_handle_data_segment(request, cdw);
+	if (done) {
+		CFISCSI_SESSION_LOCK(cs);
+		TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next);
+		CFISCSI_SESSION_UNLOCK(cs);
+		done = (io->scsiio.ext_data_filled != cdw->cdw_r2t_end ||
+		    io->scsiio.ext_data_filled == io->scsiio.kern_data_len);
+		uma_zfree(cfiscsi_data_wait_zone, cdw);
+		io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
+		if (done)
+			io->scsiio.be_move_done(io);
+		else
+			cfiscsi_datamove_out(io);
+	}
+
+	icl_pdu_free(request);
+}
+
+static void
+cfiscsi_pdu_handle_logout_request(struct icl_pdu *request)
+{
+	struct iscsi_bhs_logout_request *bhslr;
+	struct iscsi_bhs_logout_response *bhslr2;
+	struct icl_pdu *response;
+	struct cfiscsi_session *cs;
+
+	cs = PDU_SESSION(request);
+	bhslr = (struct iscsi_bhs_logout_request *)request->ip_bhs;
+	switch (bhslr->bhslr_reason & 0x7f) {
+	case BHSLR_REASON_CLOSE_SESSION:
+	case BHSLR_REASON_CLOSE_CONNECTION:
+		response = cfiscsi_pdu_new_response(request, M_NOWAIT);
+		if (response == NULL) {
+			CFISCSI_SESSION_DEBUG(cs, "failed to allocate memory");
+			icl_pdu_free(request);
+			cfiscsi_session_terminate(cs);
+			return;
+		}
+		bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs;
+		bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE;
+		bhslr2->bhslr_flags = 0x80;
+		bhslr2->bhslr_response = BHSLR_RESPONSE_CLOSED_SUCCESSFULLY;
+		bhslr2->bhslr_initiator_task_tag =
+		    bhslr->bhslr_initiator_task_tag;
+		icl_pdu_free(request);
+		cfiscsi_pdu_queue(response);
+		cfiscsi_session_terminate(cs);
+		break;
+	case BHSLR_REASON_REMOVE_FOR_RECOVERY:
+		response = cfiscsi_pdu_new_response(request, M_NOWAIT);
+		if (response == NULL) {
+			CFISCSI_SESSION_WARN(cs,
+			    "failed to allocate memory; dropping connection");
+			icl_pdu_free(request);
+			cfiscsi_session_terminate(cs);
+			return;
+		}
+		bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs;
+		bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE;
+		bhslr2->bhslr_flags = 0x80;
+		bhslr2->bhslr_response = BHSLR_RESPONSE_RECOVERY_NOT_SUPPORTED;
+		bhslr2->bhslr_initiator_task_tag =
+		    bhslr->bhslr_initiator_task_tag;
+		icl_pdu_free(request);
+		cfiscsi_pdu_queue(response);
+		break;
+	default:
+		CFISCSI_SESSION_WARN(cs, "invalid reason 0%x; dropping connection",
+		    bhslr->bhslr_reason);
+		icl_pdu_free(request);
+		cfiscsi_session_terminate(cs);
+		break;
+	}
+}
+
+static void
+cfiscsi_callout(void *context)
+{
+	struct icl_pdu *cp;
+	struct iscsi_bhs_nop_in *bhsni;
+	struct cfiscsi_session *cs;
+
+	cs = context;
+
+	if (cs->cs_terminating) 
+		return;
+
+	callout_schedule(&cs->cs_callout, 1 * hz);
+
+	atomic_add_int(&cs->cs_timeout, 1);
+
+#ifdef ICL_KERNEL_PROXY
+	if (cs->cs_waiting_for_ctld || cs->cs_login_phase) {
+		if (login_timeout > 0 && cs->cs_timeout > login_timeout) {
+			CFISCSI_SESSION_WARN(cs, "login timed out after "
+			    "%d seconds; dropping connection", cs->cs_timeout);
+			cfiscsi_session_terminate(cs);
+		}
+		return;
+	}
+#endif
+
+	if (ping_timeout <= 0) {
+		/*
+		 * Pings are disabled.  Don't send NOP-In in this case;
+		 * user might have disabled pings to work around problems
+		 * with certain initiators that can't properly handle
+		 * NOP-In, such as iPXE.  Reset the timeout, to avoid
+		 * triggering reconnection, should the user decide to
+		 * reenable them.
+		 */
+		cs->cs_timeout = 0;
+		return;
+	}
+
+	if (cs->cs_timeout >= ping_timeout) {
+		CFISCSI_SESSION_WARN(cs, "no ping reply (NOP-Out) after %d seconds; "
+		    "dropping connection",  ping_timeout);
+		cfiscsi_session_terminate(cs);
+		return;
+	}
+
+	/*
+	 * If the ping was reset less than one second ago - which means
+	 * that we've received some PDU during the last second - assume
+	 * the traffic flows correctly and don't bother sending a NOP-Out.
+	 *
+	 * (It's 2 - one for one second, and one for incrementing is_timeout
+	 * earlier in this routine.)
+	 */
+	if (cs->cs_timeout < 2)
+		return;
+
+	cp = icl_pdu_new(cs->cs_conn, M_NOWAIT);
+	if (cp == NULL) {
+		CFISCSI_SESSION_WARN(cs, "failed to allocate memory");
+		return;
+	}
+	bhsni = (struct iscsi_bhs_nop_in *)cp->ip_bhs;
+	bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN;
+	bhsni->bhsni_flags = 0x80;
+	bhsni->bhsni_initiator_task_tag = 0xffffffff;
+
+	cfiscsi_pdu_queue(cp);
+}
+
+static void
+cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
+{
+	struct cfiscsi_data_wait *cdw;
+	union ctl_io *io;
+	int error, last, wait;
+
+	if (cs->cs_target == NULL)
+		return;		/* No target yet, so nothing to do. */
+	io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
+	ctl_zero_io(io);
+	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs;
+	io->io_hdr.io_type = CTL_IO_TASK;
+	io->io_hdr.nexus.initid = cs->cs_ctl_initid;
+	io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
+	io->io_hdr.nexus.targ_lun = 0;
+	io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
+	io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
+	wait = cs->cs_outstanding_ctl_pdus;
+	refcount_acquire(&cs->cs_outstanding_ctl_pdus);
+	error = ctl_queue(io);
+	if (error != CTL_RETVAL_COMPLETE) {
+		CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error);
+		refcount_release(&cs->cs_outstanding_ctl_pdus);
+		ctl_free_io(io);
+	}
+
+	CFISCSI_SESSION_LOCK(cs);
+	while ((cdw = TAILQ_FIRST(&cs->cs_waiting_for_data_out)) != NULL) {
+		TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next);
+		CFISCSI_SESSION_UNLOCK(cs);
+		/*
+		 * Set nonzero port status; this prevents backends from
+		 * assuming that the data transfer actually succeeded
+		 * and writing uninitialized data to disk.
+		 */
+		cdw->cdw_ctl_io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
+		cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42;
+		cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io);
+		uma_zfree(cfiscsi_data_wait_zone, cdw);
+		CFISCSI_SESSION_LOCK(cs);
+	}
+	CFISCSI_SESSION_UNLOCK(cs);
+
+	/*
+	 * Wait for CTL to terminate all the tasks.
+	 */
+	if (wait > 0)
+		CFISCSI_SESSION_WARN(cs,
+		    "waiting for CTL to terminate %d tasks", wait);
+	for (;;) {
+		refcount_acquire(&cs->cs_outstanding_ctl_pdus);
+		last = refcount_release(&cs->cs_outstanding_ctl_pdus);
+		if (last != 0)
+			break;
+		tsleep(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus),
+		    0, "cfiscsi_terminate", hz / 100);
+	}
+	if (wait > 0)
+		CFISCSI_SESSION_WARN(cs, "tasks terminated");
+}
+
+static void
+cfiscsi_maintenance_thread(void *arg)
+{
+	struct cfiscsi_session *cs;
+
+	cs = arg;
+
+	for (;;) {
+		CFISCSI_SESSION_LOCK(cs);
+		if (cs->cs_terminating == false)
+			cv_wait(&cs->cs_maintenance_cv, &cs->cs_lock);
+		CFISCSI_SESSION_UNLOCK(cs);
+
+		if (cs->cs_terminating) {
+
+			/*
+			 * We used to wait up to 30 seconds to deliver queued
+			 * PDUs to the initiator.  We also tried hard to deliver
+			 * SCSI Responses for the aborted PDUs.  We don't do
+			 * that anymore.  We might need to revisit that.
+			 */
+			callout_drain(&cs->cs_callout);
+			icl_conn_close(cs->cs_conn);
+
+			/*
+			 * At this point ICL receive thread is no longer
+			 * running; no new tasks can be queued.
+			 */
+			cfiscsi_session_terminate_tasks(cs);
+			cfiscsi_session_delete(cs);
+			kthread_exit();
+			return;
+		}
+		CFISCSI_SESSION_DEBUG(cs, "nothing to do");
+	}
+}
+
+static void
+cfiscsi_session_terminate(struct cfiscsi_session *cs)
+{
+
+	if (cs->cs_terminating)
+		return;
+	cs->cs_terminating = true;
+	cv_signal(&cs->cs_maintenance_cv);
+#ifdef ICL_KERNEL_PROXY
+	cv_signal(&cs->cs_login_cv);
+#endif
+}
+
+static int
+cfiscsi_session_register_initiator(struct cfiscsi_session *cs)
+{
+	struct cfiscsi_target *ct;
+	char *name;
+	int i;
+
+	KASSERT(cs->cs_ctl_initid == -1, ("already registered"));
+
+	ct = cs->cs_target;
+	name = strdup(cs->cs_initiator_id, M_CTL);
+	i = ctl_add_initiator(&ct->ct_port, -1, 0, name);
+	if (i < 0) {
+		CFISCSI_SESSION_WARN(cs, "ctl_add_initiator failed with error %d",
+		    i);
+		cs->cs_ctl_initid = -1;
+		return (1);
+	}
+	cs->cs_ctl_initid = i;
+#if 0
+	CFISCSI_SESSION_DEBUG(cs, "added initiator id %d", i);
+#endif
+
+	return (0);
+}
+
+static void
+cfiscsi_session_unregister_initiator(struct cfiscsi_session *cs)
+{
+	int error;
+
+	if (cs->cs_ctl_initid == -1)
+		return;
+
+	error = ctl_remove_initiator(&cs->cs_target->ct_port, cs->cs_ctl_initid);
+	if (error != 0) {
+		CFISCSI_SESSION_WARN(cs, "ctl_remove_initiator failed with error %d",
+		    error);
+	}
+	cs->cs_ctl_initid = -1;
+}
+
+static struct cfiscsi_session *
+cfiscsi_session_new(struct cfiscsi_softc *softc)
+{
+	struct cfiscsi_session *cs;
+	int error;
+
+	cs = malloc(sizeof(*cs), M_CFISCSI, M_NOWAIT | M_ZERO);
+	if (cs == NULL) {
+		CFISCSI_WARN("malloc failed");
+		return (NULL);
+	}
+	cs->cs_ctl_initid = -1;
+
+	refcount_init(&cs->cs_outstanding_ctl_pdus, 0);
+	TAILQ_INIT(&cs->cs_waiting_for_data_out);
+	mtx_init(&cs->cs_lock, "cfiscsi_lock", NULL, MTX_DEF);
+	cv_init(&cs->cs_maintenance_cv, "cfiscsi_mt");
+#ifdef ICL_KERNEL_PROXY
+	cv_init(&cs->cs_login_cv, "cfiscsi_login");
+#endif
+
+	cs->cs_conn = icl_conn_new("cfiscsi", &cs->cs_lock);
+	cs->cs_conn->ic_receive = cfiscsi_receive_callback;
+	cs->cs_conn->ic_error = cfiscsi_error_callback;
+	cs->cs_conn->ic_prv0 = cs;
+
+	error = kthread_add(cfiscsi_maintenance_thread, cs, NULL, NULL, 0, 0, "cfiscsimt");
+	if (error != 0) {
+		CFISCSI_SESSION_WARN(cs, "kthread_add(9) failed with error %d", error);
+		free(cs, M_CFISCSI);
+		return (NULL);
+	}
+
+	mtx_lock(&softc->lock);
+	cs->cs_id = ++softc->last_session_id;
+	TAILQ_INSERT_TAIL(&softc->sessions, cs, cs_next);
+	mtx_unlock(&softc->lock);
+
+	/*
+	 * Start pinging the initiator.
+	 */
+	callout_init(&cs->cs_callout, 1);
+	callout_reset(&cs->cs_callout, 1 * hz, cfiscsi_callout, cs);
+
+	return (cs);
+}
+
+static void
+cfiscsi_session_delete(struct cfiscsi_session *cs)
+{
+	struct cfiscsi_softc *softc;
+
+	softc = &cfiscsi_softc;
+
+	KASSERT(cs->cs_outstanding_ctl_pdus == 0,
+	    ("destroying session with outstanding CTL pdus"));
+	KASSERT(TAILQ_EMPTY(&cs->cs_waiting_for_data_out),
+	    ("destroying session with non-empty queue"));
+
+	cfiscsi_session_unregister_initiator(cs);
+	if (cs->cs_target != NULL)
+		cfiscsi_target_release(cs->cs_target);
+	icl_conn_close(cs->cs_conn);
+	icl_conn_free(cs->cs_conn);
+
+	mtx_lock(&softc->lock);
+	TAILQ_REMOVE(&softc->sessions, cs, cs_next);
+	cv_signal(&softc->sessions_cv);
+	mtx_unlock(&softc->lock);
+
+	free(cs, M_CFISCSI);
+}
+
+static int
+cfiscsi_init(void)
+{
+	struct cfiscsi_softc *softc;
+
+	softc = &cfiscsi_softc;
+	bzero(softc, sizeof(*softc));
+	mtx_init(&softc->lock, "cfiscsi", NULL, MTX_DEF);
+
+	cv_init(&softc->sessions_cv, "cfiscsi_sessions");
+#ifdef ICL_KERNEL_PROXY
+	cv_init(&softc->accept_cv, "cfiscsi_accept");
+#endif
+	TAILQ_INIT(&softc->sessions);
+	TAILQ_INIT(&softc->targets);
+
+	cfiscsi_data_wait_zone = uma_zcreate("cfiscsi_data_wait",
+	    sizeof(struct cfiscsi_data_wait), NULL, NULL, NULL, NULL,
+	    UMA_ALIGN_PTR, 0);
+
+	return (0);
+}
+
+static int
+cfiscsi_shutdown(void)
+{
+	struct cfiscsi_softc *softc = &cfiscsi_softc;
+
+	if (!TAILQ_EMPTY(&softc->sessions) || !TAILQ_EMPTY(&softc->targets))
+		return (EBUSY);
+
+	uma_zdestroy(cfiscsi_data_wait_zone);
+#ifdef ICL_KERNEL_PROXY
+	cv_destroy(&softc->accept_cv);
+#endif
+	cv_destroy(&softc->sessions_cv);
+	mtx_destroy(&softc->lock);
+	return (0);
+}
+
+#ifdef ICL_KERNEL_PROXY
+static void
+cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id)
+{
+	struct cfiscsi_session *cs;
+
+	cs = cfiscsi_session_new(&cfiscsi_softc);
+	if (cs == NULL) {
+		CFISCSI_WARN("failed to create session");
+		return;
+	}
+
+	icl_conn_handoff_sock(cs->cs_conn, so);
+	cs->cs_initiator_sa = sa;
+	cs->cs_portal_id = portal_id;
+	cs->cs_waiting_for_ctld = true;
+	cv_signal(&cfiscsi_softc.accept_cv);
+}
+#endif
+
+static void
+cfiscsi_online(void *arg)
+{
+	struct cfiscsi_softc *softc;
+	struct cfiscsi_target *ct;
+	int online;
+
+	ct = (struct cfiscsi_target *)arg;
+	softc = ct->ct_softc;
+
+	mtx_lock(&softc->lock);
+	if (ct->ct_online) {
+		mtx_unlock(&softc->lock);
+		return;
+	}
+	ct->ct_online = 1;
+	online = softc->online++;
+	mtx_unlock(&softc->lock);
+	if (online > 0)
+		return;
+
+#ifdef ICL_KERNEL_PROXY
+	if (softc->listener != NULL)
+		icl_listen_free(softc->listener);
+	softc->listener = icl_listen_new(cfiscsi_accept);
+#endif
+}
+
+static void
+cfiscsi_offline(void *arg)
+{
+	struct cfiscsi_softc *softc;
+	struct cfiscsi_target *ct;
+	struct cfiscsi_session *cs;
+	int online;
+
+	ct = (struct cfiscsi_target *)arg;
+	softc = ct->ct_softc;
+
+	mtx_lock(&softc->lock);
+	if (!ct->ct_online) {
+		mtx_unlock(&softc->lock);
+		return;
+	}
+	ct->ct_online = 0;
+	online = --softc->online;
+
+	TAILQ_FOREACH(cs, &softc->sessions, cs_next) {
+		if (cs->cs_target == ct)
+			cfiscsi_session_terminate(cs);
+	}
+	do {
+		TAILQ_FOREACH(cs, &softc->sessions, cs_next) {
+			if (cs->cs_target == ct)
+				break;
+		}
+		if (cs != NULL)
+			cv_wait(&softc->sessions_cv, &softc->lock);
+	} while (cs != NULL && ct->ct_online == 0);
+	mtx_unlock(&softc->lock);
+	if (online > 0)
+		return;
+
+#ifdef ICL_KERNEL_PROXY
+	icl_listen_free(softc->listener);
+	softc->listener = NULL;
+#endif
+}
+
+static int
+cfiscsi_info(void *arg, struct sbuf *sb)
+{
+	struct cfiscsi_target *ct = (struct cfiscsi_target *)arg;
+	int retval;
+
+	retval = sbuf_printf(sb, "\t<cfiscsi_state>%d</cfiscsi_state>\n",
+	    ct->ct_state);
+	return (retval);
+}
+
+static void
+cfiscsi_ioctl_handoff(struct ctl_iscsi *ci)
+{
+	struct cfiscsi_softc *softc;
+	struct cfiscsi_session *cs, *cs2;
+	struct cfiscsi_target *ct;
+	struct ctl_iscsi_handoff_params *cihp;
+	int error;
+
+	cihp = (struct ctl_iscsi_handoff_params *)&(ci->data);
+	softc = &cfiscsi_softc;
+
+	CFISCSI_DEBUG("new connection from %s (%s) to %s",
+	    cihp->initiator_name, cihp->initiator_addr,
+	    cihp->target_name);
+
+	ct = cfiscsi_target_find(softc, cihp->target_name,
+	    cihp->portal_group_tag);
+	if (ct == NULL) {
+		ci->status = CTL_ISCSI_ERROR;
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "%s: target not found", __func__);
+		return;
+	}
+
+#ifdef ICL_KERNEL_PROXY
+	if (cihp->socket > 0 && cihp->connection_id > 0) {
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "both socket and connection_id set");
+		ci->status = CTL_ISCSI_ERROR;
+		cfiscsi_target_release(ct);
+		return;
+	}
+	if (cihp->socket == 0) {
+		mtx_lock(&cfiscsi_softc.lock);
+		TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) {
+			if (cs->cs_id == cihp->connection_id)
+				break;
+		}
+		if (cs == NULL) {
+			mtx_unlock(&cfiscsi_softc.lock);
+			snprintf(ci->error_str, sizeof(ci->error_str),
+			    "connection not found");
+			ci->status = CTL_ISCSI_ERROR;
+			cfiscsi_target_release(ct);
+			return;
+		}
+		mtx_unlock(&cfiscsi_softc.lock);
+	} else {
+#endif
+		cs = cfiscsi_session_new(softc);
+		if (cs == NULL) {
+			ci->status = CTL_ISCSI_ERROR;
+			snprintf(ci->error_str, sizeof(ci->error_str),
+			    "%s: cfiscsi_session_new failed", __func__);
+			cfiscsi_target_release(ct);
+			return;
+		}
+#ifdef ICL_KERNEL_PROXY
+	}
+#endif
+
+	/*
+	 * First PDU of Full Feature phase has the same CmdSN as the last
+	 * PDU from the Login Phase received from the initiator.  Thus,
+	 * the -1 below.
+	 */
+	cs->cs_cmdsn = cihp->cmdsn;
+	cs->cs_statsn = cihp->statsn;
+	cs->cs_max_data_segment_length = cihp->max_recv_data_segment_length;
+	cs->cs_max_burst_length = cihp->max_burst_length;
+	cs->cs_immediate_data = !!cihp->immediate_data;
+	if (cihp->header_digest == CTL_ISCSI_DIGEST_CRC32C)
+		cs->cs_conn->ic_header_crc32c = true;
+	if (cihp->data_digest == CTL_ISCSI_DIGEST_CRC32C)
+		cs->cs_conn->ic_data_crc32c = true;
+
+	strlcpy(cs->cs_initiator_name,
+	    cihp->initiator_name, sizeof(cs->cs_initiator_name));
+	strlcpy(cs->cs_initiator_addr,
+	    cihp->initiator_addr, sizeof(cs->cs_initiator_addr));
+	strlcpy(cs->cs_initiator_alias,
+	    cihp->initiator_alias, sizeof(cs->cs_initiator_alias));
+	memcpy(cs->cs_initiator_isid,
+	    cihp->initiator_isid, sizeof(cs->cs_initiator_isid));
+	snprintf(cs->cs_initiator_id, sizeof(cs->cs_initiator_id),
+	    "%s,i,0x%02x%02x%02x%02x%02x%02x", cs->cs_initiator_name,
+	    cihp->initiator_isid[0], cihp->initiator_isid[1],
+	    cihp->initiator_isid[2], cihp->initiator_isid[3],
+	    cihp->initiator_isid[4], cihp->initiator_isid[5]);
+
+	mtx_lock(&softc->lock);
+	if (ct->ct_online == 0) {
+		mtx_unlock(&softc->lock);
+		cfiscsi_session_terminate(cs);
+		cfiscsi_target_release(ct);
+		ci->status = CTL_ISCSI_ERROR;
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "%s: port offline", __func__);
+		return;
+	}
+	cs->cs_target = ct;
+	mtx_unlock(&softc->lock);
+
+	refcount_acquire(&cs->cs_outstanding_ctl_pdus);
+restart:
+	if (!cs->cs_terminating) {
+		mtx_lock(&softc->lock);
+		TAILQ_FOREACH(cs2, &softc->sessions, cs_next) {
+			if (cs2 != cs && cs2->cs_tasks_aborted == false &&
+			    cs->cs_target == cs2->cs_target &&
+			    strcmp(cs->cs_initiator_id, cs2->cs_initiator_id) == 0) {
+				if (strcmp(cs->cs_initiator_addr,
+				    cs2->cs_initiator_addr) != 0) {
+					CFISCSI_SESSION_WARN(cs2,
+					    "session reinstatement from "
+					    "different address %s",
+					    cs->cs_initiator_addr);
+				} else {
+					CFISCSI_SESSION_DEBUG(cs2,
+					    "session reinstatement");
+				}
+				cfiscsi_session_terminate(cs2);
+				mtx_unlock(&softc->lock);
+				pause("cfiscsi_reinstate", 1);
+				goto restart;
+			}
+		}
+		mtx_unlock(&softc->lock);
+	}
+
+	/*
+	 * Register initiator with CTL.
+	 */
+	cfiscsi_session_register_initiator(cs);
+
+#ifdef ICL_KERNEL_PROXY
+	if (cihp->socket > 0) {
+#endif
+		error = icl_conn_handoff(cs->cs_conn, cihp->socket);
+		if (error != 0) {
+			cfiscsi_session_terminate(cs);
+			refcount_release(&cs->cs_outstanding_ctl_pdus);
+			ci->status = CTL_ISCSI_ERROR;
+			snprintf(ci->error_str, sizeof(ci->error_str),
+			    "%s: icl_conn_handoff failed with error %d",
+			    __func__, error);
+			return;
+		}
+#ifdef ICL_KERNEL_PROXY
+	}
+#endif
+
+#ifdef ICL_KERNEL_PROXY
+	cs->cs_login_phase = false;
+
+	/*
+	 * First PDU of the Full Feature phase has likely already arrived.
+	 * We have to pick it up and execute properly.
+	 */
+	if (cs->cs_login_pdu != NULL) {
+		CFISCSI_SESSION_DEBUG(cs, "picking up first PDU");
+		cfiscsi_pdu_handle(cs->cs_login_pdu);
+		cs->cs_login_pdu = NULL;
+	}
+#endif
+
+	refcount_release(&cs->cs_outstanding_ctl_pdus);
+	ci->status = CTL_ISCSI_OK;
+}
+
+static void
+cfiscsi_ioctl_list(struct ctl_iscsi *ci)
+{
+	struct ctl_iscsi_list_params *cilp;
+	struct cfiscsi_session *cs;
+	struct cfiscsi_softc *softc;
+	struct sbuf *sb;
+	int error;
+
+	cilp = (struct ctl_iscsi_list_params *)&(ci->data);
+	softc = &cfiscsi_softc;
+
+	sb = sbuf_new(NULL, NULL, cilp->alloc_len, SBUF_FIXEDLEN);
+	if (sb == NULL) {
+		ci->status = CTL_ISCSI_ERROR;
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "Unable to allocate %d bytes for iSCSI session list",
+		    cilp->alloc_len);
+		return;
+	}
+
+	sbuf_printf(sb, "<ctlislist>\n");
+	mtx_lock(&softc->lock);
+	TAILQ_FOREACH(cs, &softc->sessions, cs_next) {
+#ifdef ICL_KERNEL_PROXY
+		if (cs->cs_target == NULL)
+			continue;
+#endif
+		error = sbuf_printf(sb, "<connection id=\"%d\">"
+		    "<initiator>%s</initiator>"
+		    "<initiator_addr>%s</initiator_addr>"
+		    "<initiator_alias>%s</initiator_alias>"
+		    "<target>%s</target>"
+		    "<target_alias>%s</target_alias>"
+		    "<target_portal_group_tag>%u</target_portal_group_tag>"
+		    "<header_digest>%s</header_digest>"
+		    "<data_digest>%s</data_digest>"
+		    "<max_data_segment_length>%zd</max_data_segment_length>"
+		    "<immediate_data>%d</immediate_data>"
+		    "<iser>%d</iser>"
+		    "</connection>\n",
+		    cs->cs_id,
+		    cs->cs_initiator_name, cs->cs_initiator_addr, cs->cs_initiator_alias,
+		    cs->cs_target->ct_name, cs->cs_target->ct_alias,
+		    cs->cs_target->ct_tag,
+		    cs->cs_conn->ic_header_crc32c ? "CRC32C" : "None",
+		    cs->cs_conn->ic_data_crc32c ? "CRC32C" : "None",
+		    cs->cs_max_data_segment_length,
+		    cs->cs_immediate_data,
+		    cs->cs_conn->ic_iser);
+		if (error != 0)
+			break;
+	}
+	mtx_unlock(&softc->lock);
+	error = sbuf_printf(sb, "</ctlislist>\n");
+	if (error != 0) {
+		sbuf_delete(sb);
+		ci->status = CTL_ISCSI_LIST_NEED_MORE_SPACE;
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "Out of space, %d bytes is too small", cilp->alloc_len);
+		return;
+	}
+	sbuf_finish(sb);
+
+	error = copyout(sbuf_data(sb), cilp->conn_xml, sbuf_len(sb) + 1);
+	if (error != 0) {
+		sbuf_delete(sb);
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "copyout failed with error %d", error);
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+	cilp->fill_len = sbuf_len(sb) + 1;
+	ci->status = CTL_ISCSI_OK;
+	sbuf_delete(sb);
+}
+
+static void
+cfiscsi_ioctl_logout(struct ctl_iscsi *ci)
+{
+	struct icl_pdu *response;
+	struct iscsi_bhs_asynchronous_message *bhsam;
+	struct ctl_iscsi_logout_params *cilp;
+	struct cfiscsi_session *cs;
+	struct cfiscsi_softc *softc;
+	int found = 0;
+
+	cilp = (struct ctl_iscsi_logout_params *)&(ci->data);
+	softc = &cfiscsi_softc;
+
+	mtx_lock(&softc->lock);
+	TAILQ_FOREACH(cs, &softc->sessions, cs_next) {
+		if (cilp->all == 0 && cs->cs_id != cilp->connection_id &&
+		    strcmp(cs->cs_initiator_name, cilp->initiator_name) != 0 &&
+		    strcmp(cs->cs_initiator_addr, cilp->initiator_addr) != 0)
+			continue;
+
+		response = icl_pdu_new(cs->cs_conn, M_NOWAIT);
+		if (response == NULL) {
+			ci->status = CTL_ISCSI_ERROR;
+			snprintf(ci->error_str, sizeof(ci->error_str),
+			    "Unable to allocate memory");
+			mtx_unlock(&softc->lock);
+			return;
+		}
+		bhsam =
+		    (struct iscsi_bhs_asynchronous_message *)response->ip_bhs;
+		bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE;
+		bhsam->bhsam_flags = 0x80;
+		bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_REQUESTS_LOGOUT;
+		bhsam->bhsam_parameter3 = htons(10);
+		cfiscsi_pdu_queue(response);
+		found++;
+	}
+	mtx_unlock(&softc->lock);
+
+	if (found == 0) {
+		ci->status = CTL_ISCSI_SESSION_NOT_FOUND;
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "No matching connections found");
+		return;
+	}
+
+	ci->status = CTL_ISCSI_OK;
+}
+
+static void
+cfiscsi_ioctl_terminate(struct ctl_iscsi *ci)
+{
+	struct icl_pdu *response;
+	struct iscsi_bhs_asynchronous_message *bhsam;
+	struct ctl_iscsi_terminate_params *citp;
+	struct cfiscsi_session *cs;
+	struct cfiscsi_softc *softc;
+	int found = 0;
+
+	citp = (struct ctl_iscsi_terminate_params *)&(ci->data);
+	softc = &cfiscsi_softc;
+
+	mtx_lock(&softc->lock);
+	TAILQ_FOREACH(cs, &softc->sessions, cs_next) {
+		if (citp->all == 0 && cs->cs_id != citp->connection_id &&
+		    strcmp(cs->cs_initiator_name, citp->initiator_name) != 0 &&
+		    strcmp(cs->cs_initiator_addr, citp->initiator_addr) != 0)
+			continue;
+
+		response = icl_pdu_new(cs->cs_conn, M_NOWAIT);
+		if (response == NULL) {
+			/*
+			 * Oh well.  Just terminate the connection.
+			 */
+		} else {
+			bhsam = (struct iscsi_bhs_asynchronous_message *)
+			    response->ip_bhs;
+			bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE;
+			bhsam->bhsam_flags = 0x80;
+			bhsam->bhsam_0xffffffff = 0xffffffff;
+			bhsam->bhsam_async_event =
+			    BHSAM_EVENT_TARGET_TERMINATES_SESSION;
+			cfiscsi_pdu_queue(response);
+		}
+		cfiscsi_session_terminate(cs);
+		found++;
+	}
+	mtx_unlock(&softc->lock);
+
+	if (found == 0) {
+		ci->status = CTL_ISCSI_SESSION_NOT_FOUND;
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "No matching connections found");
+		return;
+	}
+
+	ci->status = CTL_ISCSI_OK;
+}
+
+#ifdef ICL_KERNEL_PROXY
+static void
+cfiscsi_ioctl_listen(struct ctl_iscsi *ci)
+{
+	struct ctl_iscsi_listen_params *cilp;
+	struct sockaddr *sa;
+	int error;
+
+	cilp = (struct ctl_iscsi_listen_params *)&(ci->data);
+
+	if (cfiscsi_softc.listener == NULL) {
+		CFISCSI_DEBUG("no listener");
+		snprintf(ci->error_str, sizeof(ci->error_str), "no listener");
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+
+	error = getsockaddr(&sa, (void *)cilp->addr, cilp->addrlen);
+	if (error != 0) {
+		CFISCSI_DEBUG("getsockaddr, error %d", error);
+		snprintf(ci->error_str, sizeof(ci->error_str), "getsockaddr failed");
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+
+	error = icl_listen_add(cfiscsi_softc.listener, cilp->iser, cilp->domain,
+	    cilp->socktype, cilp->protocol, sa, cilp->portal_id);
+	if (error != 0) {
+		free(sa, M_SONAME);
+		CFISCSI_DEBUG("icl_listen_add, error %d", error);
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "icl_listen_add failed, error %d", error);
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+
+	ci->status = CTL_ISCSI_OK;
+}
+
+static void
+cfiscsi_ioctl_accept(struct ctl_iscsi *ci)
+{
+	struct ctl_iscsi_accept_params *ciap;
+	struct cfiscsi_session *cs;
+	int error;
+
+	ciap = (struct ctl_iscsi_accept_params *)&(ci->data);
+
+	mtx_lock(&cfiscsi_softc.lock);
+	for (;;) {
+		TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) {
+			if (cs->cs_waiting_for_ctld)
+				break;
+		}
+		if (cs != NULL)
+			break;
+		error = cv_wait_sig(&cfiscsi_softc.accept_cv, &cfiscsi_softc.lock);
+		if (error != 0) {
+			mtx_unlock(&cfiscsi_softc.lock);
+			snprintf(ci->error_str, sizeof(ci->error_str), "interrupted");
+			ci->status = CTL_ISCSI_ERROR;
+			return;
+		}
+	}
+	mtx_unlock(&cfiscsi_softc.lock);
+
+	cs->cs_waiting_for_ctld = false;
+	cs->cs_login_phase = true;
+
+	ciap->connection_id = cs->cs_id;
+	ciap->portal_id = cs->cs_portal_id;
+	ciap->initiator_addrlen = cs->cs_initiator_sa->sa_len;
+	error = copyout(cs->cs_initiator_sa, ciap->initiator_addr,
+	    cs->cs_initiator_sa->sa_len);
+	if (error != 0) {
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "copyout failed with error %d", error);
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+
+	ci->status = CTL_ISCSI_OK;
+}
+
+static void
+cfiscsi_ioctl_send(struct ctl_iscsi *ci)
+{
+	struct ctl_iscsi_send_params *cisp;
+	struct cfiscsi_session *cs;
+	struct icl_pdu *ip;
+	size_t datalen;
+	void *data;
+	int error;
+
+	cisp = (struct ctl_iscsi_send_params *)&(ci->data);
+
+	mtx_lock(&cfiscsi_softc.lock);
+	TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) {
+		if (cs->cs_id == cisp->connection_id)
+			break;
+	}
+	if (cs == NULL) {
+		mtx_unlock(&cfiscsi_softc.lock);
+		snprintf(ci->error_str, sizeof(ci->error_str), "connection not found");
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+	mtx_unlock(&cfiscsi_softc.lock);
+
+#if 0
+	if (cs->cs_login_phase == false)
+		return (EBUSY);
+#endif
+
+	if (cs->cs_terminating) {
+		snprintf(ci->error_str, sizeof(ci->error_str), "connection is terminating");
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+
+	datalen = cisp->data_segment_len;
+	/*
+	 * XXX
+	 */
+	//if (datalen > CFISCSI_MAX_DATA_SEGMENT_LENGTH) {
+	if (datalen > 65535) {
+		snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big");
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+	if (datalen > 0) {
+		data = malloc(datalen, M_CFISCSI, M_WAITOK);
+		error = copyin(cisp->data_segment, data, datalen);
+		if (error != 0) {
+			free(data, M_CFISCSI);
+			snprintf(ci->error_str, sizeof(ci->error_str), "copyin error %d", error);
+			ci->status = CTL_ISCSI_ERROR;
+			return;
+		}
+	}
+
+	ip = icl_pdu_new(cs->cs_conn, M_WAITOK);
+	memcpy(ip->ip_bhs, cisp->bhs, sizeof(*ip->ip_bhs));
+	if (datalen > 0) {
+		icl_pdu_append_data(ip, data, datalen, M_WAITOK);
+		free(data, M_CFISCSI);
+	}
+	CFISCSI_SESSION_LOCK(cs);
+	icl_pdu_queue(ip);
+	CFISCSI_SESSION_UNLOCK(cs);
+	ci->status = CTL_ISCSI_OK;
+}
+
+static void
+cfiscsi_ioctl_receive(struct ctl_iscsi *ci)
+{
+	struct ctl_iscsi_receive_params *cirp;
+	struct cfiscsi_session *cs;
+	struct icl_pdu *ip;
+	void *data;
+	int error;
+
+	cirp = (struct ctl_iscsi_receive_params *)&(ci->data);
+
+	mtx_lock(&cfiscsi_softc.lock);
+	TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) {
+		if (cs->cs_id == cirp->connection_id)
+			break;
+	}
+	if (cs == NULL) {
+		mtx_unlock(&cfiscsi_softc.lock);
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "connection not found");
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+	mtx_unlock(&cfiscsi_softc.lock);
+
+#if 0
+	if (is->is_login_phase == false)
+		return (EBUSY);
+#endif
+
+	CFISCSI_SESSION_LOCK(cs);
+	while (cs->cs_login_pdu == NULL && cs->cs_terminating == false) {
+		error = cv_wait_sig(&cs->cs_login_cv, &cs->cs_lock);
+		if (error != 0) {
+			CFISCSI_SESSION_UNLOCK(cs);
+			snprintf(ci->error_str, sizeof(ci->error_str),
+			    "interrupted by signal");
+			ci->status = CTL_ISCSI_ERROR;
+			return;
+		}
+	}
+
+	if (cs->cs_terminating) {
+		CFISCSI_SESSION_UNLOCK(cs);
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "connection terminating");
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+	ip = cs->cs_login_pdu;
+	cs->cs_login_pdu = NULL;
+	CFISCSI_SESSION_UNLOCK(cs);
+
+	if (ip->ip_data_len > cirp->data_segment_len) {
+		icl_pdu_free(ip);
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "data segment too big");
+		ci->status = CTL_ISCSI_ERROR;
+		return;
+	}
+
+	copyout(ip->ip_bhs, cirp->bhs, sizeof(*ip->ip_bhs));
+	if (ip->ip_data_len > 0) {
+		data = malloc(ip->ip_data_len, M_CFISCSI, M_WAITOK);
+		icl_pdu_get_data(ip, 0, data, ip->ip_data_len);
+		copyout(data, cirp->data_segment, ip->ip_data_len);
+		free(data, M_CFISCSI);
+	}
+
+	icl_pdu_free(ip);
+	ci->status = CTL_ISCSI_OK;
+}
+
+#endif /* !ICL_KERNEL_PROXY */
+
+static void
+cfiscsi_ioctl_port_create(struct ctl_req *req)
+{
+	struct cfiscsi_target *ct;
+	struct ctl_port *port;
+	const char *target, *alias, *tags;
+	struct scsi_vpd_id_descriptor *desc;
+	ctl_options_t opts;
+	int retval, len, idlen;
+	uint16_t tag;
+
+	ctl_init_opts(&opts, req->num_args, req->kern_args);
+	target = ctl_get_opt(&opts, "cfiscsi_target");
+	alias = ctl_get_opt(&opts, "cfiscsi_target_alias");
+	tags = ctl_get_opt(&opts, "cfiscsi_portal_group_tag");
+	if (target == NULL || tags == NULL) {
+		req->status = CTL_LUN_ERROR;
+		snprintf(req->error_str, sizeof(req->error_str),
+		    "Missing required argument");
+		ctl_free_opts(&opts);
+		return;
+	}
+	tag = strtol(tags, (char **)NULL, 10);
+	ct = cfiscsi_target_find_or_create(&cfiscsi_softc, target, alias, tag);
+	if (ct == NULL) {
+		req->status = CTL_LUN_ERROR;
+		snprintf(req->error_str, sizeof(req->error_str),
+		    "failed to create target \"%s\"", target);
+		ctl_free_opts(&opts);
+		return;
+	}
+	if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) {
+		req->status = CTL_LUN_ERROR;
+		snprintf(req->error_str, sizeof(req->error_str),
+		    "target \"%s\" for portal group tag %u already exists",
+		    target, tag);
+		cfiscsi_target_release(ct);
+		ctl_free_opts(&opts);
+		return;
+	}
+	port = &ct->ct_port;
+	if (ct->ct_state == CFISCSI_TARGET_STATE_DYING)
+		goto done;
+
+	port->frontend = &cfiscsi_frontend;
+	port->port_type = CTL_PORT_ISCSI;
+	/* XXX KDM what should the real number be here? */
+	port->num_requested_ctl_io = 4096;
+	port->port_name = "iscsi";
+	port->physical_port = tag;
+	port->virtual_port = ct->ct_target_id;
+	port->port_online = cfiscsi_online;
+	port->port_offline = cfiscsi_offline;
+	port->port_info = cfiscsi_info;
+	port->onoff_arg = ct;
+	port->fe_datamove = cfiscsi_datamove;
+	port->fe_done = cfiscsi_done;
+
+	/* XXX KDM what should we report here? */
+	/* XXX These should probably be fetched from CTL. */
+	port->max_targets = 1;
+	port->max_target_id = 15;
+	port->targ_port = -1;
+
+	port->options = opts;
+	STAILQ_INIT(&opts);
+
+	/* Generate Port ID. */
+	idlen = strlen(target) + strlen(",t,0x0001") + 1;
+	idlen = roundup2(idlen, 4);
+	len = sizeof(struct scsi_vpd_device_id) + idlen;
+	port->port_devid = malloc(sizeof(struct ctl_devid) + len,
+	    M_CTL, M_WAITOK | M_ZERO);
+	port->port_devid->len = len;
+	desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data;
+	desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8;
+	desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+	    SVPD_ID_TYPE_SCSI_NAME;
+	desc->length = idlen;
+	snprintf(desc->identifier, idlen, "%s,t,0x%4.4x", target, tag);
+
+	/* Generate Target ID. */
+	idlen = strlen(target) + 1;
+	idlen = roundup2(idlen, 4);
+	len = sizeof(struct scsi_vpd_device_id) + idlen;
+	port->target_devid = malloc(sizeof(struct ctl_devid) + len,
+	    M_CTL, M_WAITOK | M_ZERO);
+	port->target_devid->len = len;
+	desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data;
+	desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8;
+	desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET |
+	    SVPD_ID_TYPE_SCSI_NAME;
+	desc->length = idlen;
+	strlcpy(desc->identifier, target, idlen);
+
+	retval = ctl_port_register(port);
+	if (retval != 0) {
+		ctl_free_opts(&port->options);
+		free(port->port_devid, M_CFISCSI);
+		free(port->target_devid, M_CFISCSI);
+		cfiscsi_target_release(ct);
+		req->status = CTL_LUN_ERROR;
+		snprintf(req->error_str, sizeof(req->error_str),
+		    "ctl_port_register() failed with error %d", retval);
+		return;
+	}
+done:
+	ct->ct_state = CFISCSI_TARGET_STATE_ACTIVE;
+	req->status = CTL_LUN_OK;
+	memcpy(req->kern_args[0].kvalue, &port->targ_port,
+	    sizeof(port->targ_port)); //XXX
+}
+
+static void
+cfiscsi_ioctl_port_remove(struct ctl_req *req)
+{
+	struct cfiscsi_target *ct;
+	const char *target, *tags;
+	ctl_options_t opts;
+	uint16_t tag;
+
+	ctl_init_opts(&opts, req->num_args, req->kern_args);
+	target = ctl_get_opt(&opts, "cfiscsi_target");
+	tags = ctl_get_opt(&opts, "cfiscsi_portal_group_tag");
+	if (target == NULL || tags == NULL) {
+		ctl_free_opts(&opts);
+		req->status = CTL_LUN_ERROR;
+		snprintf(req->error_str, sizeof(req->error_str),
+		    "Missing required argument");
+		return;
+	}
+	tag = strtol(tags, (char **)NULL, 10);
+	ct = cfiscsi_target_find(&cfiscsi_softc, target, tag);
+	if (ct == NULL) {
+		ctl_free_opts(&opts);
+		req->status = CTL_LUN_ERROR;
+		snprintf(req->error_str, sizeof(req->error_str),
+		    "can't find target \"%s\"", target);
+		return;
+	}
+	if (ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) {
+		ctl_free_opts(&opts);
+		req->status = CTL_LUN_ERROR;
+		snprintf(req->error_str, sizeof(req->error_str),
+		    "target \"%s\" is already dying", target);
+		return;
+	}
+	ctl_free_opts(&opts);
+
+	ct->ct_state = CFISCSI_TARGET_STATE_DYING;
+	ctl_port_offline(&ct->ct_port);
+	cfiscsi_target_release(ct);
+	cfiscsi_target_release(ct);
+	req->status = CTL_LUN_OK;
+}
+
+static int
+cfiscsi_ioctl(struct cdev *dev,
+    u_long cmd, caddr_t addr, int flag, struct thread *td)
+{
+	struct ctl_iscsi *ci;
+	struct ctl_req *req;
+
+	if (cmd == CTL_PORT_REQ) {
+		req = (struct ctl_req *)addr;
+		switch (req->reqtype) {
+		case CTL_REQ_CREATE:
+			cfiscsi_ioctl_port_create(req);
+			break;
+		case CTL_REQ_REMOVE:
+			cfiscsi_ioctl_port_remove(req);
+			break;
+		default:
+			req->status = CTL_LUN_ERROR;
+			snprintf(req->error_str, sizeof(req->error_str),
+			    "Unsupported request type %d", req->reqtype);
+		}
+		return (0);
+	}
+
+	if (cmd != CTL_ISCSI)
+		return (ENOTTY);
+
+	ci = (struct ctl_iscsi *)addr;
+	switch (ci->type) {
+	case CTL_ISCSI_HANDOFF:
+		cfiscsi_ioctl_handoff(ci);
+		break;
+	case CTL_ISCSI_LIST:
+		cfiscsi_ioctl_list(ci);
+		break;
+	case CTL_ISCSI_LOGOUT:
+		cfiscsi_ioctl_logout(ci);
+		break;
+	case CTL_ISCSI_TERMINATE:
+		cfiscsi_ioctl_terminate(ci);
+		break;
+#ifdef ICL_KERNEL_PROXY
+	case CTL_ISCSI_LISTEN:
+		cfiscsi_ioctl_listen(ci);
+		break;
+	case CTL_ISCSI_ACCEPT:
+		cfiscsi_ioctl_accept(ci);
+		break;
+	case CTL_ISCSI_SEND:
+		cfiscsi_ioctl_send(ci);
+		break;
+	case CTL_ISCSI_RECEIVE:
+		cfiscsi_ioctl_receive(ci);
+		break;
+#else
+	case CTL_ISCSI_LISTEN:
+	case CTL_ISCSI_ACCEPT:
+	case CTL_ISCSI_SEND:
+	case CTL_ISCSI_RECEIVE:
+		ci->status = CTL_ISCSI_ERROR;
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "%s: CTL compiled without ICL_KERNEL_PROXY",
+		    __func__);
+		break;
+#endif /* !ICL_KERNEL_PROXY */
+	default:
+		ci->status = CTL_ISCSI_ERROR;
+		snprintf(ci->error_str, sizeof(ci->error_str),
+		    "%s: invalid iSCSI request type %d", __func__, ci->type);
+		break;
+	}
+
+	return (0);
+}
+
+static void
+cfiscsi_target_hold(struct cfiscsi_target *ct)
+{
+
+	refcount_acquire(&ct->ct_refcount);
+}
+
+static void
+cfiscsi_target_release(struct cfiscsi_target *ct)
+{
+	struct cfiscsi_softc *softc;
+
+	softc = ct->ct_softc;
+	mtx_lock(&softc->lock);
+	if (refcount_release(&ct->ct_refcount)) {
+		TAILQ_REMOVE(&softc->targets, ct, ct_next);
+		mtx_unlock(&softc->lock);
+		if (ct->ct_state != CFISCSI_TARGET_STATE_INVALID) {
+			ct->ct_state = CFISCSI_TARGET_STATE_INVALID;
+			if (ctl_port_deregister(&ct->ct_port) != 0)
+				printf("%s: ctl_port_deregister() failed\n",
+				    __func__);
+		}
+		free(ct, M_CFISCSI);
+
+		return;
+	}
+	mtx_unlock(&softc->lock);
+}
+
+static struct cfiscsi_target *
+cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag)
+{
+	struct cfiscsi_target *ct;
+
+	mtx_lock(&softc->lock);
+	TAILQ_FOREACH(ct, &softc->targets, ct_next) {
+		if (ct->ct_tag != tag ||
+		    strcmp(name, ct->ct_name) != 0 ||
+		    ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE)
+			continue;
+		cfiscsi_target_hold(ct);
+		mtx_unlock(&softc->lock);
+		return (ct);
+	}
+	mtx_unlock(&softc->lock);
+
+	return (NULL);
+}
+
+static struct cfiscsi_target *
+cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name,
+    const char *alias, uint16_t tag)
+{
+	struct cfiscsi_target *ct, *newct;
+
+	if (name[0] == '\0' || strlen(name) >= CTL_ISCSI_NAME_LEN)
+		return (NULL);
+
+	newct = malloc(sizeof(*newct), M_CFISCSI, M_WAITOK | M_ZERO);
+
+	mtx_lock(&softc->lock);
+	TAILQ_FOREACH(ct, &softc->targets, ct_next) {
+		if (ct->ct_tag != tag ||
+		    strcmp(name, ct->ct_name) != 0 ||
+		    ct->ct_state == CFISCSI_TARGET_STATE_INVALID)
+			continue;
+		cfiscsi_target_hold(ct);
+		mtx_unlock(&softc->lock);
+		free(newct, M_CFISCSI);
+		return (ct);
+	}
+
+	strlcpy(newct->ct_name, name, sizeof(newct->ct_name));
+	if (alias != NULL)
+		strlcpy(newct->ct_alias, alias, sizeof(newct->ct_alias));
+	newct->ct_tag = tag;
+	refcount_init(&newct->ct_refcount, 1);
+	newct->ct_softc = softc;
+	if (TAILQ_EMPTY(&softc->targets))
+		softc->last_target_id = 0;
+	newct->ct_target_id = ++softc->last_target_id;
+	TAILQ_INSERT_TAIL(&softc->targets, newct, ct_next);
+	mtx_unlock(&softc->lock);
+
+	return (newct);
+}
+
+static void
+cfiscsi_datamove_in(union ctl_io *io)
+{
+	struct cfiscsi_session *cs;
+	struct icl_pdu *request, *response;
+	const struct iscsi_bhs_scsi_command *bhssc;
+	struct iscsi_bhs_data_in *bhsdi;
+	struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
+	size_t len, expected_len, sg_len, buffer_offset;
+	const char *sg_addr;
+	int ctl_sg_count, error, i;
+
+	request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+	cs = PDU_SESSION(request);
+
+	bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs;
+	KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+	    ISCSI_BHS_OPCODE_SCSI_COMMAND,
+	    ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND"));
+
+	if (io->scsiio.kern_sg_entries > 0) {
+		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+		ctl_sg_count = io->scsiio.kern_sg_entries;
+	} else {
+		ctl_sglist = &ctl_sg_entry;
+		ctl_sglist->addr = io->scsiio.kern_data_ptr;
+		ctl_sglist->len = io->scsiio.kern_data_len;
+		ctl_sg_count = 1;
+	}
+
+	/*
+	 * This is the total amount of data to be transferred within the current
+	 * SCSI command.  We need to record it so that we can properly report
+	 * underflow/underflow.
+	 */
+	PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len;
+
+	/*
+	 * This is the offset within the current SCSI command; for the first
+	 * call to cfiscsi_datamove() it will be 0, and for subsequent ones
+	 * it will be the sum of lengths of previous ones.
+	 */
+	buffer_offset = io->scsiio.kern_rel_offset;
+
+	/*
+	 * This is the transfer length expected by the initiator.  In theory,
+	 * it could be different from the correct amount of data from the SCSI
+	 * point of view, even if that doesn't make any sense.
+	 */
+	expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length);
+#if 0
+	if (expected_len != io->scsiio.kern_total_len) {
+		CFISCSI_SESSION_DEBUG(cs, "expected transfer length %zd, "
+		    "actual length %zd", expected_len,
+		    (size_t)io->scsiio.kern_total_len);
+	}
+#endif
+
+	if (buffer_offset >= expected_len) {
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "buffer_offset = %zd, "
+		    "already sent the expected len", buffer_offset);
+#endif
+		io->scsiio.be_move_done(io);
+		return;
+	}
+
+	i = 0;
+	sg_addr = NULL;
+	sg_len = 0;
+	response = NULL;
+	bhsdi = NULL;
+	for (;;) {
+		if (response == NULL) {
+			response = cfiscsi_pdu_new_response(request, M_NOWAIT);
+			if (response == NULL) {
+				CFISCSI_SESSION_WARN(cs, "failed to "
+				    "allocate memory; dropping connection");
+				ctl_set_busy(&io->scsiio);
+				io->scsiio.be_move_done(io);
+				cfiscsi_session_terminate(cs);
+				return;
+			}
+			bhsdi = (struct iscsi_bhs_data_in *)response->ip_bhs;
+			bhsdi->bhsdi_opcode = ISCSI_BHS_OPCODE_SCSI_DATA_IN;
+			bhsdi->bhsdi_initiator_task_tag =
+			    bhssc->bhssc_initiator_task_tag;
+			bhsdi->bhsdi_target_transfer_tag = 0xffffffff;
+			bhsdi->bhsdi_datasn = htonl(PDU_EXPDATASN(request));
+			PDU_EXPDATASN(request)++;
+			bhsdi->bhsdi_buffer_offset = htonl(buffer_offset);
+		}
+
+		KASSERT(i < ctl_sg_count, ("i >= ctl_sg_count"));
+		if (sg_len == 0) {
+			sg_addr = ctl_sglist[i].addr;
+			sg_len = ctl_sglist[i].len;
+			KASSERT(sg_len > 0, ("sg_len <= 0"));
+		}
+
+		len = sg_len;
+
+		/*
+		 * Truncate to maximum data segment length.
+		 */
+		KASSERT(response->ip_data_len < cs->cs_max_data_segment_length,
+		    ("ip_data_len %zd >= max_data_segment_length %zd",
+		    response->ip_data_len, cs->cs_max_data_segment_length));
+		if (response->ip_data_len + len >
+		    cs->cs_max_data_segment_length) {
+			len = cs->cs_max_data_segment_length -
+			    response->ip_data_len;
+			KASSERT(len <= sg_len, ("len %zd > sg_len %zd",
+			    len, sg_len));
+		}
+
+		/*
+		 * Truncate to expected data transfer length.
+		 */
+		KASSERT(buffer_offset + response->ip_data_len < expected_len,
+		    ("buffer_offset %zd + ip_data_len %zd >= expected_len %zd",
+		    buffer_offset, response->ip_data_len, expected_len));
+		if (buffer_offset + response->ip_data_len + len > expected_len) {
+			CFISCSI_SESSION_DEBUG(cs, "truncating from %zd "
+			    "to expected data transfer length %zd",
+			    buffer_offset + response->ip_data_len + len, expected_len);
+			len = expected_len - (buffer_offset + response->ip_data_len);
+			KASSERT(len <= sg_len, ("len %zd > sg_len %zd",
+			    len, sg_len));
+		}
+
+		error = icl_pdu_append_data(response, sg_addr, len, M_NOWAIT);
+		if (error != 0) {
+			CFISCSI_SESSION_WARN(cs, "failed to "
+			    "allocate memory; dropping connection");
+			icl_pdu_free(response);
+			ctl_set_busy(&io->scsiio);
+			io->scsiio.be_move_done(io);
+			cfiscsi_session_terminate(cs);
+			return;
+		}
+		sg_addr += len;
+		sg_len -= len;
+		io->scsiio.kern_data_resid -= len;
+
+		KASSERT(buffer_offset + response->ip_data_len <= expected_len,
+		    ("buffer_offset %zd + ip_data_len %zd > expected_len %zd",
+		    buffer_offset, response->ip_data_len, expected_len));
+		if (buffer_offset + response->ip_data_len == expected_len) {
+			/*
+			 * Already have the amount of data the initiator wanted.
+			 */
+			break;
+		}
+
+		if (sg_len == 0) {
+			/*
+			 * End of scatter-gather segment;
+			 * proceed to the next one...
+			 */
+			if (i == ctl_sg_count - 1) {
+				/*
+				 * ... unless this was the last one.
+				 */
+				break;
+			}
+			i++;
+		}
+
+		if (response->ip_data_len == cs->cs_max_data_segment_length) {
+			/*
+			 * Can't stuff more data into the current PDU;
+			 * queue it.  Note that's not enough to check
+			 * for kern_data_resid == 0 instead; there
+			 * may be several Data-In PDUs for the final
+			 * call to cfiscsi_datamove(), and we want
+			 * to set the F flag only on the last of them.
+			 */
+			buffer_offset += response->ip_data_len;
+			if (buffer_offset == io->scsiio.kern_total_len ||
+			    buffer_offset == expected_len) {
+				buffer_offset -= response->ip_data_len;
+				break;
+			}
+			cfiscsi_pdu_queue(response);
+			response = NULL;
+			bhsdi = NULL;
+		}
+	}
+	if (response != NULL) {
+		buffer_offset += response->ip_data_len;
+		if (buffer_offset == io->scsiio.kern_total_len ||
+		    buffer_offset == expected_len) {
+			bhsdi->bhsdi_flags |= BHSDI_FLAGS_F;
+			if (io->io_hdr.status == CTL_SUCCESS) {
+				bhsdi->bhsdi_flags |= BHSDI_FLAGS_S;
+				if (PDU_TOTAL_TRANSFER_LEN(request) <
+				    ntohl(bhssc->bhssc_expected_data_transfer_length)) {
+					bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW;
+					bhsdi->bhsdi_residual_count =
+					    htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) -
+					    PDU_TOTAL_TRANSFER_LEN(request));
+				} else if (PDU_TOTAL_TRANSFER_LEN(request) >
+				    ntohl(bhssc->bhssc_expected_data_transfer_length)) {
+					bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW;
+					bhsdi->bhsdi_residual_count =
+					    htonl(PDU_TOTAL_TRANSFER_LEN(request) -
+					    ntohl(bhssc->bhssc_expected_data_transfer_length));
+				}
+				bhsdi->bhsdi_status = io->scsiio.scsi_status;
+				io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
+			}
+		}
+		KASSERT(response->ip_data_len > 0, ("sending empty Data-In"));
+		cfiscsi_pdu_queue(response);
+	}
+
+	io->scsiio.be_move_done(io);
+}
+
+static void
+cfiscsi_datamove_out(union ctl_io *io)
+{
+	struct cfiscsi_session *cs;
+	struct icl_pdu *request, *response;
+	const struct iscsi_bhs_scsi_command *bhssc;
+	struct iscsi_bhs_r2t *bhsr2t;
+	struct cfiscsi_data_wait *cdw;
+	struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
+	uint32_t expected_len, datamove_len, r2t_off, r2t_len;
+	uint32_t target_transfer_tag;
+	bool done;
+
+	request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+	cs = PDU_SESSION(request);
+
+	bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs;
+	KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+	    ISCSI_BHS_OPCODE_SCSI_COMMAND,
+	    ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND"));
+
+	/*
+	 * We need to record it so that we can properly report
+	 * underflow/underflow.
+	 */
+	PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len;
+
+	/*
+	 * Complete write underflow.  Not a single byte to read.  Return.
+	 */
+	expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length);
+	if (io->scsiio.kern_rel_offset >= expected_len) {
+		io->scsiio.be_move_done(io);
+		return;
+	}
+	datamove_len = MIN(io->scsiio.kern_data_len,
+	    expected_len - io->scsiio.kern_rel_offset);
+
+	target_transfer_tag =
+	    atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1);
+
+#if 0
+	CFISCSI_SESSION_DEBUG(cs, "expecting Data-Out with initiator "
+	    "task tag 0x%x, target transfer tag 0x%x",
+	    bhssc->bhssc_initiator_task_tag, target_transfer_tag);
+#endif
+	cdw = uma_zalloc(cfiscsi_data_wait_zone, M_NOWAIT | M_ZERO);
+	if (cdw == NULL) {
+		CFISCSI_SESSION_WARN(cs, "failed to "
+		    "allocate memory; dropping connection");
+		ctl_set_busy(&io->scsiio);
+		io->scsiio.be_move_done(io);
+		cfiscsi_session_terminate(cs);
+		return;
+	}
+	cdw->cdw_ctl_io = io;
+	cdw->cdw_target_transfer_tag = target_transfer_tag;
+	cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
+	cdw->cdw_r2t_end = datamove_len;
+	cdw->cdw_datasn = 0;
+
+	/* Set initial data pointer for the CDW respecting ext_data_filled. */
+	if (io->scsiio.kern_sg_entries > 0) {
+		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+	} else {
+		ctl_sglist = &ctl_sg_entry;
+		ctl_sglist->addr = io->scsiio.kern_data_ptr;
+		ctl_sglist->len = datamove_len;
+	}
+	cdw->cdw_sg_index = 0;
+	cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr;
+	cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len;
+	r2t_off = io->scsiio.ext_data_filled;
+	while (r2t_off > 0) {
+		if (r2t_off >= cdw->cdw_sg_len) {
+			r2t_off -= cdw->cdw_sg_len;
+			cdw->cdw_sg_index++;
+			cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr;
+			cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len;
+			continue;
+		}
+		cdw->cdw_sg_addr += r2t_off;
+		cdw->cdw_sg_len -= r2t_off;
+		r2t_off = 0;
+	}
+
+	if (cs->cs_immediate_data &&
+	    io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled <
+	    icl_pdu_data_segment_length(request)) {
+		done = cfiscsi_handle_data_segment(request, cdw);
+		if (done) {
+			uma_zfree(cfiscsi_data_wait_zone, cdw);
+			io->scsiio.be_move_done(io);
+			return;
+		}
+	}
+
+	r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled;
+	r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled,
+	    cs->cs_max_burst_length);
+	cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len;
+
+	CFISCSI_SESSION_LOCK(cs);
+	TAILQ_INSERT_TAIL(&cs->cs_waiting_for_data_out, cdw, cdw_next);
+	CFISCSI_SESSION_UNLOCK(cs);
+
+	/*
+	 * XXX: We should limit the number of outstanding R2T PDUs
+	 * 	per task to MaxOutstandingR2T.
+	 */
+	response = cfiscsi_pdu_new_response(request, M_NOWAIT);
+	if (response == NULL) {
+		CFISCSI_SESSION_WARN(cs, "failed to "
+		    "allocate memory; dropping connection");
+		ctl_set_busy(&io->scsiio);
+		io->scsiio.be_move_done(io);
+		cfiscsi_session_terminate(cs);
+		return;
+	}
+	io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
+	bhsr2t = (struct iscsi_bhs_r2t *)response->ip_bhs;
+	bhsr2t->bhsr2t_opcode = ISCSI_BHS_OPCODE_R2T;
+	bhsr2t->bhsr2t_flags = 0x80;
+	bhsr2t->bhsr2t_lun = bhssc->bhssc_lun;
+	bhsr2t->bhsr2t_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
+	bhsr2t->bhsr2t_target_transfer_tag = target_transfer_tag;
+	/*
+	 * XXX: Here we assume that cfiscsi_datamove() won't ever
+	 *	be running concurrently on several CPUs for a given
+	 *	command.
+	 */
+	bhsr2t->bhsr2t_r2tsn = htonl(PDU_R2TSN(request));
+	PDU_R2TSN(request)++;
+	/*
+	 * This is the offset within the current SCSI command;
+	 * i.e. for the first call of datamove(), it will be 0,
+	 * and for subsequent ones it will be the sum of lengths
+	 * of previous ones.
+	 *
+	 * The ext_data_filled is to account for unsolicited
+	 * (immediate) data that might have already arrived.
+	 */
+	bhsr2t->bhsr2t_buffer_offset = htonl(r2t_off);
+	/*
+	 * This is the total length (sum of S/G lengths) this call
+	 * to cfiscsi_datamove() is supposed to handle, limited by
+	 * MaxBurstLength.
+	 */
+	bhsr2t->bhsr2t_desired_data_transfer_length = htonl(r2t_len);
+	cfiscsi_pdu_queue(response);
+}
+
+static void
+cfiscsi_datamove(union ctl_io *io)
+{
+
+	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
+		cfiscsi_datamove_in(io);
+	else {
+		/* We hadn't received anything during this datamove yet. */
+		io->scsiio.ext_data_filled = 0;
+		cfiscsi_datamove_out(io);
+	}
+}
+
+static void
+cfiscsi_scsi_command_done(union ctl_io *io)
+{
+	struct icl_pdu *request, *response;
+	struct iscsi_bhs_scsi_command *bhssc;
+	struct iscsi_bhs_scsi_response *bhssr;
+#ifdef DIAGNOSTIC
+	struct cfiscsi_data_wait *cdw;
+#endif
+	struct cfiscsi_session *cs;
+	uint16_t sense_length;
+
+	request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+	cs = PDU_SESSION(request);
+	bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs;
+	KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+	    ISCSI_BHS_OPCODE_SCSI_COMMAND,
+	    ("replying to wrong opcode 0x%x", bhssc->bhssc_opcode));
+
+	//CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x",
+	//    bhssc->bhssc_initiator_task_tag);
+
+#ifdef DIAGNOSTIC
+	CFISCSI_SESSION_LOCK(cs);
+	TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next)
+		KASSERT(bhssc->bhssc_initiator_task_tag !=
+		    cdw->cdw_initiator_task_tag, ("dangling cdw"));
+	CFISCSI_SESSION_UNLOCK(cs);
+#endif
+
+	/*
+	 * Do not return status for aborted commands.
+	 * There are exceptions, but none supported by CTL yet.
+	 */
+	if (((io->io_hdr.flags & CTL_FLAG_ABORT) &&
+	     (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) ||
+	    (io->io_hdr.flags & CTL_FLAG_STATUS_SENT)) {
+		ctl_free_io(io);
+		icl_pdu_free(request);
+		return;
+	}
+
+	response = cfiscsi_pdu_new_response(request, M_WAITOK);
+	bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs;
+	bhssr->bhssr_opcode = ISCSI_BHS_OPCODE_SCSI_RESPONSE;
+	bhssr->bhssr_flags = 0x80;
+	/*
+	 * XXX: We don't deal with bidirectional under/overflows;
+	 *	does anything actually support those?
+	 */
+	if (PDU_TOTAL_TRANSFER_LEN(request) <
+	    ntohl(bhssc->bhssc_expected_data_transfer_length)) {
+		bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW;
+		bhssr->bhssr_residual_count =
+		    htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) -
+		    PDU_TOTAL_TRANSFER_LEN(request));
+		//CFISCSI_SESSION_DEBUG(cs, "underflow; residual count %d",
+		//    ntohl(bhssr->bhssr_residual_count));
+	} else if (PDU_TOTAL_TRANSFER_LEN(request) > 
+	    ntohl(bhssc->bhssc_expected_data_transfer_length)) {
+		bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW;
+		bhssr->bhssr_residual_count =
+		    htonl(PDU_TOTAL_TRANSFER_LEN(request) -
+		    ntohl(bhssc->bhssc_expected_data_transfer_length));
+		//CFISCSI_SESSION_DEBUG(cs, "overflow; residual count %d",
+		//    ntohl(bhssr->bhssr_residual_count));
+	}
+	bhssr->bhssr_response = BHSSR_RESPONSE_COMMAND_COMPLETED;
+	bhssr->bhssr_status = io->scsiio.scsi_status;
+	bhssr->bhssr_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
+	bhssr->bhssr_expdatasn = htonl(PDU_EXPDATASN(request));
+
+	if (io->scsiio.sense_len > 0) {
+#if 0
+		CFISCSI_SESSION_DEBUG(cs, "returning %d bytes of sense data",
+		    io->scsiio.sense_len);
+#endif
+		sense_length = htons(io->scsiio.sense_len);
+		icl_pdu_append_data(response,
+		    &sense_length, sizeof(sense_length), M_WAITOK);
+		icl_pdu_append_data(response,
+		    &io->scsiio.sense_data, io->scsiio.sense_len, M_WAITOK);
+	}
+
+	ctl_free_io(io);
+	icl_pdu_free(request);
+	cfiscsi_pdu_queue(response);
+}
+
+static void
+cfiscsi_task_management_done(union ctl_io *io)
+{
+	struct icl_pdu *request, *response;
+	struct iscsi_bhs_task_management_request *bhstmr;
+	struct iscsi_bhs_task_management_response *bhstmr2;
+	struct cfiscsi_data_wait *cdw, *tmpcdw;
+	struct cfiscsi_session *cs, *tcs;
+	struct cfiscsi_softc *softc;
+	int cold_reset = 0;
+
+	request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+	cs = PDU_SESSION(request);
+	bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs;
+	KASSERT((bhstmr->bhstmr_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
+	    ISCSI_BHS_OPCODE_TASK_REQUEST,
+	    ("replying to wrong opcode 0x%x", bhstmr->bhstmr_opcode));
+
+#if 0
+	CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x; referenced task tag 0x%x",
+	    bhstmr->bhstmr_initiator_task_tag,
+	    bhstmr->bhstmr_referenced_task_tag);
+#endif
+
+	if ((bhstmr->bhstmr_function & ~0x80) ==
+	    BHSTMR_FUNCTION_ABORT_TASK) {
+		/*
+		 * Make sure we no longer wait for Data-Out for this command.
+		 */
+		CFISCSI_SESSION_LOCK(cs);
+		TAILQ_FOREACH_SAFE(cdw,
+		    &cs->cs_waiting_for_data_out, cdw_next, tmpcdw) {
+			if (bhstmr->bhstmr_referenced_task_tag !=
+			    cdw->cdw_initiator_task_tag)
+				continue;
+
+#if 0
+			CFISCSI_SESSION_DEBUG(cs, "removing csw for initiator task "
+			    "tag 0x%x", bhstmr->bhstmr_initiator_task_tag);
+#endif
+			TAILQ_REMOVE(&cs->cs_waiting_for_data_out,
+			    cdw, cdw_next);
+			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
+			cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 43;
+			cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io);
+			uma_zfree(cfiscsi_data_wait_zone, cdw);
+		}
+		CFISCSI_SESSION_UNLOCK(cs);
+	}
+	if ((bhstmr->bhstmr_function & ~0x80) ==
+	    BHSTMR_FUNCTION_TARGET_COLD_RESET &&
+	    io->io_hdr.status == CTL_SUCCESS)
+		cold_reset = 1;
+
+	response = cfiscsi_pdu_new_response(request, M_WAITOK);
+	bhstmr2 = (struct iscsi_bhs_task_management_response *)
+	    response->ip_bhs;
+	bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE;
+	bhstmr2->bhstmr_flags = 0x80;
+	switch (io->taskio.task_status) {
+	case CTL_TASK_FUNCTION_COMPLETE:
+		bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_COMPLETE;
+		break;
+	case CTL_TASK_FUNCTION_SUCCEEDED:
+		bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_SUCCEEDED;
+		break;
+	case CTL_TASK_LUN_DOES_NOT_EXIST:
+		bhstmr2->bhstmr_response = BHSTMR_RESPONSE_LUN_DOES_NOT_EXIST;
+		break;
+	case CTL_TASK_FUNCTION_NOT_SUPPORTED:
+	default:
+		bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED;
+		break;
+	}
+	memcpy(bhstmr2->bhstmr_additional_reponse_information,
+	    io->taskio.task_resp, sizeof(io->taskio.task_resp));
+	bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag;
+
+	ctl_free_io(io);
+	icl_pdu_free(request);
+	cfiscsi_pdu_queue(response);
+
+	if (cold_reset) {
+		softc = cs->cs_target->ct_softc;
+		mtx_lock(&softc->lock);
+		TAILQ_FOREACH(tcs, &softc->sessions, cs_next) {
+			if (tcs->cs_target == cs->cs_target)
+				cfiscsi_session_terminate(tcs);
+		}
+		mtx_unlock(&softc->lock);
+	}
+}
+
+static void
+cfiscsi_done(union ctl_io *io)
+{
+	struct icl_pdu *request;
+	struct cfiscsi_session *cs;
+
+	KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE),
+		("invalid CTL status %#x", io->io_hdr.status));
+
+	if (io->io_hdr.io_type == CTL_IO_TASK &&
+	    io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) {
+		/*
+		 * Implicit task termination has just completed; nothing to do.
+		 */
+		cs = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+		cs->cs_tasks_aborted = true;
+		refcount_release(&cs->cs_outstanding_ctl_pdus);
+		wakeup(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus));
+		ctl_free_io(io);
+		return;
+	}
+
+	request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+	cs = PDU_SESSION(request);
+	refcount_release(&cs->cs_outstanding_ctl_pdus);
+
+	switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) {
+	case ISCSI_BHS_OPCODE_SCSI_COMMAND:
+		cfiscsi_scsi_command_done(io);
+		break;
+	case ISCSI_BHS_OPCODE_TASK_REQUEST:
+		cfiscsi_task_management_done(io);
+		break;
+	default:
+		panic("cfiscsi_done called with wrong opcode 0x%x",
+		    request->ip_bhs->bhs_opcode);
+	}
+}


Property changes on: trunk/sys/cam/ctl/ctl_frontend_iscsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/cam/ctl/ctl_frontend_iscsi.h
===================================================================
--- trunk/sys/cam/ctl/ctl_frontend_iscsi.h	                        (rev 0)
+++ trunk/sys/cam/ctl/ctl_frontend_iscsi.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,126 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2012 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Edward Tomasz Napierala under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_frontend_iscsi.h 279003 2015-02-19 14:33:46Z mav $
+ */
+
+#ifndef CTL_FRONTEND_ISCSI_H
+#define	CTL_FRONTEND_ISCSI_H
+
+#define CFISCSI_TARGET_STATE_INVALID	0
+#define CFISCSI_TARGET_STATE_ACTIVE	1
+#define CFISCSI_TARGET_STATE_DYING	2
+
+struct cfiscsi_target {
+	TAILQ_ENTRY(cfiscsi_target)	ct_next;
+	struct cfiscsi_softc		*ct_softc;
+	volatile u_int			ct_refcount;
+	char				ct_name[CTL_ISCSI_NAME_LEN];
+	char				ct_alias[CTL_ISCSI_ALIAS_LEN];
+	uint16_t			ct_tag;
+	int				ct_state;
+	int				ct_online;
+	int				ct_target_id;
+	struct ctl_port			ct_port;
+};
+
+struct cfiscsi_data_wait {
+	TAILQ_ENTRY(cfiscsi_data_wait)	cdw_next;
+	union ctl_io			*cdw_ctl_io;
+	uint32_t			cdw_target_transfer_tag;
+	uint32_t			cdw_initiator_task_tag;
+	int				cdw_sg_index;
+	char				*cdw_sg_addr;
+	size_t				cdw_sg_len;
+	uint32_t			cdw_r2t_end;
+	uint32_t			cdw_datasn;
+};
+
+#define CFISCSI_SESSION_STATE_INVALID		0
+#define CFISCSI_SESSION_STATE_BHS		1
+#define CFISCSI_SESSION_STATE_AHS		2
+#define CFISCSI_SESSION_STATE_HEADER_DIGEST	3
+#define CFISCSI_SESSION_STATE_DATA		4
+#define CFISCSI_SESSION_STATE_DATA_DIGEST	5
+
+struct cfiscsi_session {
+	TAILQ_ENTRY(cfiscsi_session)	cs_next;
+	struct mtx			cs_lock;
+	struct icl_conn			*cs_conn;
+	uint32_t			cs_cmdsn;
+	uint32_t			cs_statsn;
+	uint32_t			cs_target_transfer_tag;
+	volatile u_int			cs_outstanding_ctl_pdus;
+	TAILQ_HEAD(, cfiscsi_data_wait)	cs_waiting_for_data_out;
+	struct cfiscsi_target		*cs_target;
+	struct callout			cs_callout;
+	int				cs_timeout;
+	struct cv			cs_maintenance_cv;
+	bool				cs_terminating;
+	bool				cs_tasks_aborted;
+	size_t				cs_max_data_segment_length;
+	size_t				cs_max_burst_length;
+	bool				cs_immediate_data;
+	char				cs_initiator_name[CTL_ISCSI_NAME_LEN];
+	char				cs_initiator_addr[CTL_ISCSI_ADDR_LEN];
+	char				cs_initiator_alias[CTL_ISCSI_ALIAS_LEN];
+	char				cs_initiator_isid[6];
+	char				cs_initiator_id[CTL_ISCSI_NAME_LEN + 5 + 6 + 1];
+	unsigned int			cs_id;
+	int				cs_ctl_initid;
+#ifdef ICL_KERNEL_PROXY
+	struct sockaddr			*cs_initiator_sa;
+	int				cs_portal_id;
+	bool				cs_login_phase;
+	bool				cs_waiting_for_ctld;
+	struct cv			cs_login_cv;
+	struct icl_pdu			*cs_login_pdu;
+#endif
+};
+
+#ifdef ICL_KERNEL_PROXY
+struct icl_listen;
+#endif
+
+struct cfiscsi_softc {
+	struct mtx			lock;
+	char				port_name[32];
+	int				online;
+	int				last_target_id;
+	unsigned int			last_session_id;
+	TAILQ_HEAD(, cfiscsi_target)	targets;
+	TAILQ_HEAD(, cfiscsi_session)	sessions;
+	struct cv			sessions_cv;
+#ifdef ICL_KERNEL_PROXY
+	struct icl_listen		*listener;
+	struct cv			accept_cv;
+#endif
+};
+
+#endif /* !CTL_FRONTEND_ISCSI_H */


Property changes on: trunk/sys/cam/ctl/ctl_frontend_iscsi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/cam/ctl/ctl_ha.c
===================================================================
--- trunk/sys/cam/ctl/ctl_ha.c	                        (rev 0)
+++ trunk/sys/cam/ctl/ctl_ha.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,1031 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Alexander Motin <mav at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_ha.c 312587 2017-01-21 08:43:41Z mav $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/types.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/proc.h>
+#include <sys/conf.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/uio.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <vm/uma.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_error.h>
+
+#if (__FreeBSD_version < 1100000)
+struct mbufq {
+	struct mbuf *head;
+	struct mbuf *tail;
+};
+
+static void
+mbufq_init(struct mbufq *q, int limit)
+{
+
+	q->head = q->tail = NULL;
+}
+
+static void
+mbufq_drain(struct mbufq *q)
+{
+	struct mbuf *m;
+
+	while ((m = q->head) != NULL) {
+		q->head = m->m_nextpkt;
+		m_freem(m);
+	}
+	q->tail = NULL;
+}
+
+static struct mbuf *
+mbufq_dequeue(struct mbufq *q)
+{
+	struct mbuf *m;
+
+	m = q->head;
+	if (m) {
+		if (q->tail == m)
+			q->tail = NULL;
+		q->head = m->m_nextpkt;
+		m->m_nextpkt = NULL;
+	}
+	return (m);
+}
+
+static void
+mbufq_enqueue(struct mbufq *q, struct mbuf *m)
+{
+
+	m->m_nextpkt = NULL;
+	if (q->tail)
+		q->tail->m_nextpkt = m;
+	else
+		q->head = m;
+	q->tail = m;
+}
+
+static u_int
+sbavail(struct sockbuf *sb)
+{
+	return (sb->sb_cc);
+}
+
+#if (__FreeBSD_version < 1000000)
+#define	mtodo(m, o)	((void *)(((m)->m_data) + (o)))
+#endif
+#endif
+
+struct ha_msg_wire {
+	uint32_t	 channel;
+	uint32_t	 length;
+};
+
+struct ha_dt_msg_wire {
+	ctl_ha_dt_cmd	command;
+	uint32_t	size;
+	uint8_t		*local;
+	uint8_t		*remote;
+};
+
+struct ha_softc {
+	struct ctl_softc *ha_ctl_softc;
+	ctl_evt_handler	 ha_handler[CTL_HA_CHAN_MAX];
+	char		 ha_peer[128];
+	struct sockaddr_in  ha_peer_in;
+	struct socket	*ha_lso;
+	struct socket	*ha_so;
+	struct mbufq	 ha_sendq;
+	struct mbuf	*ha_sending;
+	struct mtx	 ha_lock;
+	int		 ha_connect;
+	int		 ha_listen;
+	int		 ha_connected;
+	int		 ha_receiving;
+	int		 ha_wakeup;
+	int		 ha_disconnect;
+	int		 ha_shutdown;
+	eventhandler_tag ha_shutdown_eh;
+	TAILQ_HEAD(, ctl_ha_dt_req) ha_dts;
+} ha_softc;
+
+static void
+ctl_ha_conn_wake(struct ha_softc *softc)
+{
+
+	mtx_lock(&softc->ha_lock);
+	softc->ha_wakeup = 1;
+	mtx_unlock(&softc->ha_lock);
+	wakeup(&softc->ha_wakeup);
+}
+
+static int
+ctl_ha_lupcall(struct socket *so, void *arg, int waitflag)
+{
+	struct ha_softc *softc = arg;
+
+	ctl_ha_conn_wake(softc);
+	return (SU_OK);
+}
+
+static int
+ctl_ha_rupcall(struct socket *so, void *arg, int waitflag)
+{
+	struct ha_softc *softc = arg;
+
+	wakeup(&softc->ha_receiving);
+	return (SU_OK);
+}
+
+static int
+ctl_ha_supcall(struct socket *so, void *arg, int waitflag)
+{
+	struct ha_softc *softc = arg;
+
+	ctl_ha_conn_wake(softc);
+	return (SU_OK);
+}
+
+static void
+ctl_ha_evt(struct ha_softc *softc, ctl_ha_channel ch, ctl_ha_event evt,
+    int param)
+{
+	int i;
+
+	if (ch < CTL_HA_CHAN_MAX) {
+		if (softc->ha_handler[ch])
+			softc->ha_handler[ch](ch, evt, param);
+		return;
+	}
+	for (i = 0; i < CTL_HA_CHAN_MAX; i++) {
+		if (softc->ha_handler[i])
+			softc->ha_handler[i](i, evt, param);
+	}
+}
+
+static void
+ctl_ha_close(struct ha_softc *softc)
+{
+	struct socket *so = softc->ha_so;
+	int report = 0;
+
+	if (softc->ha_connected || softc->ha_disconnect) {
+		softc->ha_connected = 0;
+		mbufq_drain(&softc->ha_sendq);
+		m_freem(softc->ha_sending);
+		softc->ha_sending = NULL;
+		report = 1;
+	}
+	if (so) {
+		SOCKBUF_LOCK(&so->so_rcv);
+		soupcall_clear(so, SO_RCV);
+		while (softc->ha_receiving) {
+			wakeup(&softc->ha_receiving);
+			msleep(&softc->ha_receiving, SOCKBUF_MTX(&so->so_rcv),
+			    0, "ha_rx exit", 0);
+		}
+		SOCKBUF_UNLOCK(&so->so_rcv);
+		SOCKBUF_LOCK(&so->so_snd);
+		soupcall_clear(so, SO_SND);
+		SOCKBUF_UNLOCK(&so->so_snd);
+		softc->ha_so = NULL;
+		if (softc->ha_connect)
+			pause("reconnect", hz / 2);
+		soclose(so);
+	}
+	if (report) {
+		ctl_ha_evt(softc, CTL_HA_CHAN_MAX, CTL_HA_EVT_LINK_CHANGE,
+		    (softc->ha_connect || softc->ha_listen) ?
+		    CTL_HA_LINK_UNKNOWN : CTL_HA_LINK_OFFLINE);
+	}
+}
+
+static void
+ctl_ha_lclose(struct ha_softc *softc)
+{
+
+	if (softc->ha_lso) {
+		SOCKBUF_LOCK(&softc->ha_lso->so_rcv);
+		soupcall_clear(softc->ha_lso, SO_RCV);
+		SOCKBUF_UNLOCK(&softc->ha_lso->so_rcv);
+		soclose(softc->ha_lso);
+		softc->ha_lso = NULL;
+	}
+}
+
+static void
+ctl_ha_rx_thread(void *arg)
+{
+	struct ha_softc *softc = arg;
+	struct socket *so = softc->ha_so;
+	struct ha_msg_wire wire_hdr;
+	struct uio uio;
+	struct iovec iov;
+	int error, flags, next;
+
+	bzero(&wire_hdr, sizeof(wire_hdr));
+	while (1) {
+		if (wire_hdr.length > 0)
+			next = wire_hdr.length;
+		else
+			next = sizeof(wire_hdr);
+		SOCKBUF_LOCK(&so->so_rcv);
+		while (sbavail(&so->so_rcv) < next || softc->ha_disconnect) {
+			if (softc->ha_connected == 0 || softc->ha_disconnect ||
+			    so->so_error ||
+			    (so->so_rcv.sb_state & SBS_CANTRCVMORE)) {
+				goto errout;
+			}
+			so->so_rcv.sb_lowat = next;
+			msleep(&softc->ha_receiving, SOCKBUF_MTX(&so->so_rcv),
+			    0, "-", 0);
+		}
+		SOCKBUF_UNLOCK(&so->so_rcv);
+
+		if (wire_hdr.length == 0) {
+			iov.iov_base = &wire_hdr;
+			iov.iov_len = sizeof(wire_hdr);
+			uio.uio_iov = &iov;
+			uio.uio_iovcnt = 1;
+			uio.uio_rw = UIO_READ;
+			uio.uio_segflg = UIO_SYSSPACE;
+			uio.uio_td = curthread;
+			uio.uio_resid = sizeof(wire_hdr);
+			flags = MSG_DONTWAIT;
+			error = soreceive(softc->ha_so, NULL, &uio, NULL,
+			    NULL, &flags);
+			if (error != 0) {
+				printf("%s: header receive error %d\n",
+				    __func__, error);
+				SOCKBUF_LOCK(&so->so_rcv);
+				goto errout;
+			}
+		} else {
+			ctl_ha_evt(softc, wire_hdr.channel,
+			    CTL_HA_EVT_MSG_RECV, wire_hdr.length);
+			wire_hdr.length = 0;
+		}
+	}
+
+errout:
+	softc->ha_receiving = 0;
+	wakeup(&softc->ha_receiving);
+	SOCKBUF_UNLOCK(&so->so_rcv);
+	ctl_ha_conn_wake(softc);
+	kthread_exit();
+}
+
+static void
+ctl_ha_send(struct ha_softc *softc)
+{
+	struct socket *so = softc->ha_so;
+	int error;
+
+	while (1) {
+		if (softc->ha_sending == NULL) {
+			mtx_lock(&softc->ha_lock);
+			softc->ha_sending = mbufq_dequeue(&softc->ha_sendq);
+			mtx_unlock(&softc->ha_lock);
+			if (softc->ha_sending == NULL) {
+				so->so_snd.sb_lowat = so->so_snd.sb_hiwat + 1;
+				break;
+			}
+		}
+		SOCKBUF_LOCK(&so->so_snd);
+		if (sbspace(&so->so_snd) < softc->ha_sending->m_pkthdr.len) {
+			so->so_snd.sb_lowat = softc->ha_sending->m_pkthdr.len;
+			SOCKBUF_UNLOCK(&so->so_snd);
+			break;
+		}
+		SOCKBUF_UNLOCK(&so->so_snd);
+		error = sosend(softc->ha_so, NULL, NULL, softc->ha_sending,
+		    NULL, MSG_DONTWAIT, curthread);
+		softc->ha_sending = NULL;
+		if (error != 0) {
+			printf("%s: sosend() error %d\n", __func__, error);
+			return;
+		}
+	};
+}
+
+static void
+ctl_ha_sock_setup(struct ha_softc *softc)
+{
+	struct sockopt opt;
+	struct socket *so = softc->ha_so;
+	int error, val;
+
+	val = 1024 * 1024;
+	error = soreserve(so, val, val);
+	if (error)
+		printf("%s: soreserve failed %d\n", __func__, error);
+
+	SOCKBUF_LOCK(&so->so_rcv);
+	so->so_rcv.sb_lowat = sizeof(struct ha_msg_wire);
+	soupcall_set(so, SO_RCV, ctl_ha_rupcall, softc);
+	SOCKBUF_UNLOCK(&so->so_rcv);
+	SOCKBUF_LOCK(&so->so_snd);
+	so->so_snd.sb_lowat = sizeof(struct ha_msg_wire);
+	soupcall_set(so, SO_SND, ctl_ha_supcall, softc);
+	SOCKBUF_UNLOCK(&so->so_snd);
+
+	bzero(&opt, sizeof(struct sockopt));
+	opt.sopt_dir = SOPT_SET;
+	opt.sopt_level = SOL_SOCKET;
+	opt.sopt_name = SO_KEEPALIVE;
+	opt.sopt_val = &val;
+	opt.sopt_valsize = sizeof(val);
+	val = 1;
+	error = sosetopt(so, &opt);
+	if (error)
+		printf("%s: KEEPALIVE setting failed %d\n", __func__, error);
+
+	opt.sopt_level = IPPROTO_TCP;
+	opt.sopt_name = TCP_NODELAY;
+	val = 1;
+	error = sosetopt(so, &opt);
+	if (error)
+		printf("%s: NODELAY setting failed %d\n", __func__, error);
+
+	opt.sopt_name = TCP_KEEPINIT;
+	val = 3;
+	error = sosetopt(so, &opt);
+	if (error)
+		printf("%s: KEEPINIT setting failed %d\n", __func__, error);
+
+	opt.sopt_name = TCP_KEEPIDLE;
+	val = 1;
+	error = sosetopt(so, &opt);
+	if (error)
+		printf("%s: KEEPIDLE setting failed %d\n", __func__, error);
+
+	opt.sopt_name = TCP_KEEPINTVL;
+	val = 1;
+	error = sosetopt(so, &opt);
+	if (error)
+		printf("%s: KEEPINTVL setting failed %d\n", __func__, error);
+
+	opt.sopt_name = TCP_KEEPCNT;
+	val = 5;
+	error = sosetopt(so, &opt);
+	if (error)
+		printf("%s: KEEPCNT setting failed %d\n", __func__, error);
+}
+
+static int
+ctl_ha_connect(struct ha_softc *softc)
+{
+	struct thread *td = curthread;
+	struct sockaddr_in sa;
+	struct socket *so;
+	int error;
+
+	/* Create the socket */
+	error = socreate(PF_INET, &so, SOCK_STREAM,
+	    IPPROTO_TCP, td->td_ucred, td);
+	if (error != 0) {
+		printf("%s: socreate() error %d\n", __func__, error);
+		return (error);
+	}
+	softc->ha_so = so;
+	ctl_ha_sock_setup(softc);
+
+	memcpy(&sa, &softc->ha_peer_in, sizeof(sa));
+	error = soconnect(so, (struct sockaddr *)&sa, td);
+	if (error != 0) {
+		if (bootverbose)
+			printf("%s: soconnect() error %d\n", __func__, error);
+		goto out;
+	}
+	return (0);
+
+out:
+	ctl_ha_close(softc);
+	return (error);
+}
+
+static int
+ctl_ha_accept(struct ha_softc *softc)
+{
+	struct socket *so;
+	struct sockaddr *sap;
+	int error;
+
+	ACCEPT_LOCK();
+	if (softc->ha_lso->so_rcv.sb_state & SBS_CANTRCVMORE)
+		softc->ha_lso->so_error = ECONNABORTED;
+	if (softc->ha_lso->so_error) {
+		error = softc->ha_lso->so_error;
+		softc->ha_lso->so_error = 0;
+		ACCEPT_UNLOCK();
+		printf("%s: socket error %d\n", __func__, error);
+		goto out;
+	}
+	so = TAILQ_FIRST(&softc->ha_lso->so_comp);
+	if (so == NULL) {
+		ACCEPT_UNLOCK();
+		return (EWOULDBLOCK);
+	}
+	KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
+	KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
+
+	/*
+	 * Before changing the flags on the socket, we have to bump the
+	 * reference count.  Otherwise, if the protocol calls sofree(),
+	 * the socket will be released due to a zero refcount.
+	 */
+	SOCK_LOCK(so);			/* soref() and so_state update */
+	soref(so);			/* file descriptor reference */
+
+	TAILQ_REMOVE(&softc->ha_lso->so_comp, so, so_list);
+	softc->ha_lso->so_qlen--;
+	so->so_state |= SS_NBIO;
+	so->so_qstate &= ~SQ_COMP;
+	so->so_head = NULL;
+
+	SOCK_UNLOCK(so);
+	ACCEPT_UNLOCK();
+
+	sap = NULL;
+	error = soaccept(so, &sap);
+	if (error != 0) {
+		printf("%s: soaccept() error %d\n", __func__, error);
+		if (sap != NULL)
+			free(sap, M_SONAME);
+		goto out;
+	}
+	if (sap != NULL)
+		free(sap, M_SONAME);
+	softc->ha_so = so;
+	ctl_ha_sock_setup(softc);
+	return (0);
+
+out:
+	ctl_ha_lclose(softc);
+	return (error);
+}
+
+static int
+ctl_ha_listen(struct ha_softc *softc)
+{
+	struct thread *td = curthread;
+	struct sockaddr_in sa;
+	struct sockopt opt;
+	int error, val;
+
+	/* Create the socket */
+	if (softc->ha_lso == NULL) {
+		error = socreate(PF_INET, &softc->ha_lso, SOCK_STREAM,
+		    IPPROTO_TCP, td->td_ucred, td);
+		if (error != 0) {
+			printf("%s: socreate() error %d\n", __func__, error);
+			return (error);
+		}
+		bzero(&opt, sizeof(struct sockopt));
+		opt.sopt_dir = SOPT_SET;
+		opt.sopt_level = SOL_SOCKET;
+		opt.sopt_name = SO_REUSEADDR;
+		opt.sopt_val = &val;
+		opt.sopt_valsize = sizeof(val);
+		val = 1;
+		error = sosetopt(softc->ha_lso, &opt);
+		if (error) {
+			printf("%s: REUSEADDR setting failed %d\n",
+			    __func__, error);
+		}
+		bzero(&opt, sizeof(struct sockopt));
+		opt.sopt_dir = SOPT_SET;
+		opt.sopt_level = SOL_SOCKET;
+		opt.sopt_name = SO_REUSEPORT;
+		opt.sopt_val = &val;
+		opt.sopt_valsize = sizeof(val);
+		val = 1;
+		error = sosetopt(softc->ha_lso, &opt);
+		if (error) {
+			printf("%s: REUSEPORT setting failed %d\n",
+			    __func__, error);
+		}
+		SOCKBUF_LOCK(&softc->ha_lso->so_rcv);
+		soupcall_set(softc->ha_lso, SO_RCV, ctl_ha_lupcall, softc);
+		SOCKBUF_UNLOCK(&softc->ha_lso->so_rcv);
+	}
+
+	memcpy(&sa, &softc->ha_peer_in, sizeof(sa));
+	error = sobind(softc->ha_lso, (struct sockaddr *)&sa, td);
+	if (error != 0) {
+		printf("%s: sobind() error %d\n", __func__, error);
+		goto out;
+	}
+	error = solisten(softc->ha_lso, 1, td);
+	if (error != 0) {
+		printf("%s: solisten() error %d\n", __func__, error);
+		goto out;
+	}
+	return (0);
+
+out:
+	ctl_ha_lclose(softc);
+	return (error);
+}
+
+static void
+ctl_ha_conn_thread(void *arg)
+{
+	struct ha_softc *softc = arg;
+	int error;
+
+	while (1) {
+		if (softc->ha_disconnect || softc->ha_shutdown) {
+			ctl_ha_close(softc);
+			if (softc->ha_disconnect == 2 || softc->ha_shutdown)
+				ctl_ha_lclose(softc);
+			softc->ha_disconnect = 0;
+			if (softc->ha_shutdown)
+				break;
+		} else if (softc->ha_so != NULL &&
+		    (softc->ha_so->so_error ||
+		     softc->ha_so->so_rcv.sb_state & SBS_CANTRCVMORE))
+			ctl_ha_close(softc);
+		if (softc->ha_so == NULL) {
+			if (softc->ha_lso != NULL)
+				ctl_ha_accept(softc);
+			else if (softc->ha_listen)
+				ctl_ha_listen(softc);
+			else if (softc->ha_connect)
+				ctl_ha_connect(softc);
+		}
+		if (softc->ha_so != NULL) {
+			if (softc->ha_connected == 0 &&
+			    softc->ha_so->so_error == 0 &&
+			    (softc->ha_so->so_state & SS_ISCONNECTING) == 0) {
+				softc->ha_connected = 1;
+				ctl_ha_evt(softc, CTL_HA_CHAN_MAX,
+				    CTL_HA_EVT_LINK_CHANGE,
+				    CTL_HA_LINK_ONLINE);
+				softc->ha_receiving = 1;
+				error = kproc_kthread_add(ctl_ha_rx_thread,
+				    softc, &softc->ha_ctl_softc->ctl_proc,
+				    NULL, 0, 0, "ctl", "ha_rx");
+				if (error != 0) {
+					printf("Error creating CTL HA rx thread!\n");
+					softc->ha_receiving = 0;
+					softc->ha_disconnect = 1;
+				}
+			}
+			ctl_ha_send(softc);
+		}
+		mtx_lock(&softc->ha_lock);
+		if (softc->ha_so != NULL &&
+		    (softc->ha_so->so_error ||
+		     softc->ha_so->so_rcv.sb_state & SBS_CANTRCVMORE))
+			;
+		else if (!softc->ha_wakeup)
+			msleep(&softc->ha_wakeup, &softc->ha_lock, 0, "-", hz);
+		softc->ha_wakeup = 0;
+		mtx_unlock(&softc->ha_lock);
+	}
+	mtx_lock(&softc->ha_lock);
+	softc->ha_shutdown = 2;
+	wakeup(&softc->ha_wakeup);
+	mtx_unlock(&softc->ha_lock);
+	kthread_exit();
+}
+
+static int
+ctl_ha_peer_sysctl(SYSCTL_HANDLER_ARGS)
+{
+	struct ha_softc *softc = (struct ha_softc *)arg1;
+	struct sockaddr_in *sa;
+	int error, b1, b2, b3, b4, p, num;
+	char buf[128];
+
+	strlcpy(buf, softc->ha_peer, sizeof(buf));
+	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+	if ((error != 0) || (req->newptr == NULL) ||
+	    strncmp(buf, softc->ha_peer, sizeof(buf)) == 0)
+		return (error);
+
+	sa = &softc->ha_peer_in;
+	mtx_lock(&softc->ha_lock);
+	if ((num = sscanf(buf, "connect %d.%d.%d.%d:%d",
+	    &b1, &b2, &b3, &b4, &p)) >= 4) {
+		softc->ha_connect = 1;
+		softc->ha_listen = 0;
+	} else if ((num = sscanf(buf, "listen %d.%d.%d.%d:%d",
+	    &b1, &b2, &b3, &b4, &p)) >= 4) {
+		softc->ha_connect = 0;
+		softc->ha_listen = 1;
+	} else {
+		softc->ha_connect = 0;
+		softc->ha_listen = 0;
+		if (buf[0] != 0) {
+			buf[0] = 0;
+			error = EINVAL;
+		}
+	}
+	strlcpy(softc->ha_peer, buf, sizeof(softc->ha_peer));
+	if (softc->ha_connect || softc->ha_listen) {
+		memset(sa, 0, sizeof(*sa));
+		sa->sin_len = sizeof(struct sockaddr_in);
+		sa->sin_family = AF_INET;
+		sa->sin_port = htons((num >= 5) ? p : 999);
+		sa->sin_addr.s_addr =
+		    htonl((b1 << 24) + (b2 << 16) + (b3 << 8) + b4);
+	}
+	softc->ha_disconnect = 2;
+	softc->ha_wakeup = 1;
+	mtx_unlock(&softc->ha_lock);
+	wakeup(&softc->ha_wakeup);
+	return (error);
+}
+
+ctl_ha_status
+ctl_ha_msg_register(ctl_ha_channel channel, ctl_evt_handler handler)
+{
+	struct ha_softc *softc = &ha_softc;
+
+	KASSERT(channel < CTL_HA_CHAN_MAX,
+	    ("Wrong CTL HA channel %d", channel));
+	softc->ha_handler[channel] = handler;
+	return (CTL_HA_STATUS_SUCCESS);
+}
+
+ctl_ha_status
+ctl_ha_msg_deregister(ctl_ha_channel channel)
+{
+	struct ha_softc *softc = &ha_softc;
+
+	KASSERT(channel < CTL_HA_CHAN_MAX,
+	    ("Wrong CTL HA channel %d", channel));
+	softc->ha_handler[channel] = NULL;
+	return (CTL_HA_STATUS_SUCCESS);
+}
+
+/*
+ * Receive a message of the specified size.
+ */
+ctl_ha_status
+ctl_ha_msg_recv(ctl_ha_channel channel, void *addr, size_t len,
+		int wait)
+{
+	struct ha_softc *softc = &ha_softc;
+	struct uio uio;
+	struct iovec iov;
+	int error, flags;
+
+	if (!softc->ha_connected)
+		return (CTL_HA_STATUS_DISCONNECT);
+
+	iov.iov_base = addr;
+	iov.iov_len = len;
+	uio.uio_iov = &iov;
+	uio.uio_iovcnt = 1;
+	uio.uio_rw = UIO_READ;
+	uio.uio_segflg = UIO_SYSSPACE;
+	uio.uio_td = curthread;
+	uio.uio_resid = len;
+	flags = wait ? 0 : MSG_DONTWAIT;
+	error = soreceive(softc->ha_so, NULL, &uio, NULL, NULL, &flags);
+	if (error == 0)
+		return (CTL_HA_STATUS_SUCCESS);
+
+	/* Consider all errors fatal for HA sanity. */
+	mtx_lock(&softc->ha_lock);
+	if (softc->ha_connected) {
+		softc->ha_disconnect = 1;
+		softc->ha_wakeup = 1;
+		wakeup(&softc->ha_wakeup);
+	}
+	mtx_unlock(&softc->ha_lock);
+	return (CTL_HA_STATUS_ERROR);
+}
+
+/*
+ * Send a message of the specified size.
+ */
+ctl_ha_status
+ctl_ha_msg_send2(ctl_ha_channel channel, const void *addr, size_t len,
+    const void *addr2, size_t len2, int wait)
+{
+	struct ha_softc *softc = &ha_softc;
+	struct mbuf *mb, *newmb;
+	struct ha_msg_wire hdr;
+	size_t copylen, off;
+
+	if (!softc->ha_connected)
+		return (CTL_HA_STATUS_DISCONNECT);
+
+	newmb = m_getm2(NULL, sizeof(hdr) + len + len2, wait, MT_DATA,
+	    M_PKTHDR);
+	if (newmb == NULL) {
+		/* Consider all errors fatal for HA sanity. */
+		mtx_lock(&softc->ha_lock);
+		if (softc->ha_connected) {
+			softc->ha_disconnect = 1;
+			softc->ha_wakeup = 1;
+			wakeup(&softc->ha_wakeup);
+		}
+		mtx_unlock(&softc->ha_lock);
+		printf("%s: Can't allocate mbuf chain\n", __func__);
+		return (CTL_HA_STATUS_ERROR);
+	}
+	hdr.channel = channel;
+	hdr.length = len + len2;
+	mb = newmb;
+	memcpy(mtodo(mb, 0), &hdr, sizeof(hdr));
+	mb->m_len += sizeof(hdr);
+	off = 0;
+	for (; mb != NULL && off < len; mb = mb->m_next) {
+		copylen = min(M_TRAILINGSPACE(mb), len - off);
+		memcpy(mtodo(mb, mb->m_len), (const char *)addr + off, copylen);
+		mb->m_len += copylen;
+		off += copylen;
+		if (off == len)
+			break;
+	}
+	KASSERT(off == len, ("%s: off (%zu) != len (%zu)", __func__,
+	    off, len));
+	off = 0;
+	for (; mb != NULL && off < len2; mb = mb->m_next) {
+		copylen = min(M_TRAILINGSPACE(mb), len2 - off);
+		memcpy(mtodo(mb, mb->m_len), (const char *)addr2 + off, copylen);
+		mb->m_len += copylen;
+		off += copylen;
+	}
+	KASSERT(off == len2, ("%s: off (%zu) != len2 (%zu)", __func__,
+	    off, len2));
+	newmb->m_pkthdr.len = sizeof(hdr) + len + len2;
+
+	mtx_lock(&softc->ha_lock);
+	if (!softc->ha_connected) {
+		mtx_unlock(&softc->ha_lock);
+		m_freem(newmb);
+		return (CTL_HA_STATUS_DISCONNECT);
+	}
+	mbufq_enqueue(&softc->ha_sendq, newmb);
+	softc->ha_wakeup = 1;
+	mtx_unlock(&softc->ha_lock);
+	wakeup(&softc->ha_wakeup);
+	return (CTL_HA_STATUS_SUCCESS);
+}
+
+ctl_ha_status
+ctl_ha_msg_send(ctl_ha_channel channel, const void *addr, size_t len,
+    int wait)
+{
+
+	return (ctl_ha_msg_send2(channel, addr, len, NULL, 0, wait));
+}
+
+ctl_ha_status
+ctl_ha_msg_abort(ctl_ha_channel channel)
+{
+	struct ha_softc *softc = &ha_softc;
+
+	mtx_lock(&softc->ha_lock);
+	softc->ha_disconnect = 1;
+	softc->ha_wakeup = 1;
+	mtx_unlock(&softc->ha_lock);
+	wakeup(&softc->ha_wakeup);
+	return (CTL_HA_STATUS_SUCCESS);
+}
+
+/*
+ * Allocate a data transfer request structure.
+ */
+struct ctl_ha_dt_req *
+ctl_dt_req_alloc(void)
+{
+
+	return (malloc(sizeof(struct ctl_ha_dt_req), M_CTL, M_WAITOK | M_ZERO));
+}
+
+/*
+ * Free a data transfer request structure.
+ */
+void
+ctl_dt_req_free(struct ctl_ha_dt_req *req)
+{
+
+	free(req, M_CTL);
+}
+
+/*
+ * Issue a DMA request for a single buffer.
+ */
+ctl_ha_status
+ctl_dt_single(struct ctl_ha_dt_req *req)
+{
+	struct ha_softc *softc = &ha_softc;
+	struct ha_dt_msg_wire wire_dt;
+	ctl_ha_status status;
+
+	wire_dt.command = req->command;
+	wire_dt.size = req->size;
+	wire_dt.local = req->local;
+	wire_dt.remote = req->remote;
+	if (req->command == CTL_HA_DT_CMD_READ && req->callback != NULL) {
+		mtx_lock(&softc->ha_lock);
+		TAILQ_INSERT_TAIL(&softc->ha_dts, req, links);
+		mtx_unlock(&softc->ha_lock);
+		ctl_ha_msg_send(CTL_HA_CHAN_DATA, &wire_dt, sizeof(wire_dt),
+		    M_WAITOK);
+		return (CTL_HA_STATUS_WAIT);
+	}
+	if (req->command == CTL_HA_DT_CMD_READ) {
+		status = ctl_ha_msg_send(CTL_HA_CHAN_DATA, &wire_dt,
+		    sizeof(wire_dt), M_WAITOK);
+	} else {
+		status = ctl_ha_msg_send2(CTL_HA_CHAN_DATA, &wire_dt,
+		    sizeof(wire_dt), req->local, req->size, M_WAITOK);
+	}
+	return (status);
+}
+
+static void
+ctl_dt_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
+{
+	struct ha_softc *softc = &ha_softc;
+	struct ctl_ha_dt_req *req;
+	ctl_ha_status isc_status;
+
+	if (event == CTL_HA_EVT_MSG_RECV) {
+		struct ha_dt_msg_wire wire_dt;
+		uint8_t *tmp;
+		int size;
+
+		size = min(sizeof(wire_dt), param);
+		isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_DATA, &wire_dt,
+					     size, M_WAITOK);
+		if (isc_status != CTL_HA_STATUS_SUCCESS) {
+			printf("%s: Error receiving message: %d\n",
+			    __func__, isc_status);
+			return;
+		}
+
+		if (wire_dt.command == CTL_HA_DT_CMD_READ) {
+			wire_dt.command = CTL_HA_DT_CMD_WRITE;
+			tmp = wire_dt.local;
+			wire_dt.local = wire_dt.remote;
+			wire_dt.remote = tmp;
+			ctl_ha_msg_send2(CTL_HA_CHAN_DATA, &wire_dt,
+			    sizeof(wire_dt), wire_dt.local, wire_dt.size,
+			    M_WAITOK);
+		} else if (wire_dt.command == CTL_HA_DT_CMD_WRITE) {
+			isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_DATA,
+			    wire_dt.remote, wire_dt.size, M_WAITOK);
+			mtx_lock(&softc->ha_lock);
+			TAILQ_FOREACH(req, &softc->ha_dts, links) {
+				if (req->local == wire_dt.remote) {
+					TAILQ_REMOVE(&softc->ha_dts, req, links);
+					break;
+				}
+			}
+			mtx_unlock(&softc->ha_lock);
+			if (req) {
+				req->ret = isc_status;
+				req->callback(req);
+			}
+		}
+	} else if (event == CTL_HA_EVT_LINK_CHANGE) {
+		CTL_DEBUG_PRINT(("%s: Link state change to %d\n", __func__,
+		    param));
+		if (param != CTL_HA_LINK_ONLINE) {
+			mtx_lock(&softc->ha_lock);
+			while ((req = TAILQ_FIRST(&softc->ha_dts)) != NULL) {
+				TAILQ_REMOVE(&softc->ha_dts, req, links);
+				mtx_unlock(&softc->ha_lock);
+				req->ret = CTL_HA_STATUS_DISCONNECT;
+				req->callback(req);
+				mtx_lock(&softc->ha_lock);
+			}
+			mtx_unlock(&softc->ha_lock);
+		}
+	} else {
+		printf("%s: Unknown event %d\n", __func__, event);
+	}
+}
+
+
+ctl_ha_status
+ctl_ha_msg_init(struct ctl_softc *ctl_softc)
+{
+	struct ha_softc *softc = &ha_softc;
+	int error;
+
+	softc->ha_ctl_softc = ctl_softc;
+	mtx_init(&softc->ha_lock, "CTL HA mutex", NULL, MTX_DEF);
+	mbufq_init(&softc->ha_sendq, INT_MAX);
+	TAILQ_INIT(&softc->ha_dts);
+	error = kproc_kthread_add(ctl_ha_conn_thread, softc,
+	    &ctl_softc->ctl_proc, NULL, 0, 0, "ctl", "ha_tx");
+	if (error != 0) {
+		printf("error creating CTL HA connection thread!\n");
+		mtx_destroy(&softc->ha_lock);
+		return (CTL_HA_STATUS_ERROR);
+	}
+	softc->ha_shutdown_eh = EVENTHANDLER_REGISTER(shutdown_pre_sync,
+	    ctl_ha_msg_shutdown, ctl_softc, SHUTDOWN_PRI_FIRST);
+	SYSCTL_ADD_PROC(&ctl_softc->sysctl_ctx,
+	    SYSCTL_CHILDREN(ctl_softc->sysctl_tree),
+	    OID_AUTO, "ha_peer", CTLTYPE_STRING | CTLFLAG_RWTUN,
+	    softc, 0, ctl_ha_peer_sysctl, "A", "HA peer connection method");
+
+	if (ctl_ha_msg_register(CTL_HA_CHAN_DATA, ctl_dt_event_handler)
+	    != CTL_HA_STATUS_SUCCESS) {
+		printf("%s: ctl_ha_msg_register failed.\n", __func__);
+	}
+
+	return (CTL_HA_STATUS_SUCCESS);
+};
+
+void
+ctl_ha_msg_shutdown(struct ctl_softc *ctl_softc)
+{
+	struct ha_softc *softc = &ha_softc;
+
+	/* Disconnect and shutdown threads. */
+	mtx_lock(&softc->ha_lock);
+	if (softc->ha_shutdown < 2) {
+		softc->ha_shutdown = 1;
+		softc->ha_wakeup = 1;
+		wakeup(&softc->ha_wakeup);
+		while (softc->ha_shutdown < 2 && !SCHEDULER_STOPPED()) {
+			msleep(&softc->ha_wakeup, &softc->ha_lock, 0,
+			    "shutdown", hz);
+		}
+	}
+	mtx_unlock(&softc->ha_lock);
+};
+
+ctl_ha_status
+ctl_ha_msg_destroy(struct ctl_softc *ctl_softc)
+{
+	struct ha_softc *softc = &ha_softc;
+
+	if (softc->ha_shutdown_eh != NULL) {
+		EVENTHANDLER_DEREGISTER(shutdown_pre_sync,
+		    softc->ha_shutdown_eh);
+		softc->ha_shutdown_eh = NULL;
+	}
+
+	ctl_ha_msg_shutdown(ctl_softc);	/* Just in case. */
+
+	if (ctl_ha_msg_deregister(CTL_HA_CHAN_DATA) != CTL_HA_STATUS_SUCCESS)
+		printf("%s: ctl_ha_msg_deregister failed.\n", __func__);
+
+	mtx_destroy(&softc->ha_lock);
+	return (CTL_HA_STATUS_SUCCESS);
+};


Property changes on: trunk/sys/cam/ctl/ctl_ha.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/cam/ctl/ctl_ha.h
===================================================================
--- trunk/sys/cam/ctl/ctl_ha.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_ha.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,6 +1,8 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
  * Copyright (c) 2011 Spectra Logic Corporation
+ * Copyright (c) 2015 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -28,8 +30,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_ha.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ha.h#1 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_ha.h 288789 2015-10-05 10:53:13Z mav $
  */
 
 #ifndef _CTL_HA_H_
@@ -38,77 +40,27 @@
 /*
  * CTL High Availability Modes:
  *
- * CTL_HA_MODE_SER_ONLY:  Commands are serialized to the other side.  Write
- *			  mirroring and read re-direction are assumed to
- * 			  happen in the back end.
- * CTL_HA_MODE_XFER:	  Commands are serialized and data is transferred
- *			  for write mirroring and read re-direction.
+ * CTL_HA_MODE_ACT_STBY:  Commands are serialized to the master side.
+ *			  No media access commands on slave side (Standby).
+ * CTL_HA_MODE_SER_ONLY:  Commands are serialized to the master side.
+ *			  Media can be accessed on both sides.
+ * CTL_HA_MODE_XFER:	  Commands and data are forwarded to the
+ *			  master side for execution.
  */
-
 typedef enum {
+	CTL_HA_MODE_ACT_STBY,
 	CTL_HA_MODE_SER_ONLY,
 	CTL_HA_MODE_XFER
 } ctl_ha_mode;
 
-
 /*
- * This is a stubbed out High Availability interface.  It assumes two nodes
- * staying in sync.
- *
- * The reason this interface is here, and stubbed out, is that CTL was
- * originally written with support for Copan's (now SGI) high availability
- * framework.  That framework was not released by SGI, and would not have
- * been generally applicable to FreeBSD anyway.
- *
- * The idea here is to show the kind of API that would need to be in place
- * in a HA framework to work with CTL's HA hooks.  This API is very close
- * to the Copan/SGI API, so that the code using it could stay in place
- * as-is.
- *
- * So, in summary, this is a shell without real substance, and much more
- * work would be needed to actually make HA work.  The implementation
- * inside CTL will also need to change to fit the eventual implementation.
- * The additional pieces we would need are:
- *
- *  - HA "Supervisor" framework that can startup the components of the
- *    system, and initiate failover (i.e. active/active to single mode)
- *    and failback (single to active/active mode) state transitions.
- *    This framework would be able to recognize when an event happens
- *    that requires it to initiate state transitions in the components it
- *    manages.
- *
- *  - HA communication framework.  This framework should have the following
- *    features:
- *	- Separate channels for separate system components.  The CTL
- *	  instance on one node should communicate with the CTL instance
- *	  on another node.
- *	- Short message passing.  These messages would be fixed length, so
- *	  they could be preallocated and easily passed between the nodes.
- *	  i.e. conceptually like an ethernet packet.
- *	- DMA/large buffer capability.  This would require some negotiation
- *	  with the other node to define the destination.  It could
- *	  allow for "push" (i.e. initiated by the requesting node) DMA or
- * 	  "pull" (i.e. initiated by the target controller) DMA or both.
- *	- Communication channel status change notification.
- *  - HA capability in other portions of the storage stack.  Having two CTL
- *    instances communicate is just one part of an overall HA solution.
- *    State needs to be synchronized at multiple levels of the system in
- *    order for failover to actually work.  For instance, if CTL is using a
- *    file on a ZFS filesystem as its backing store, the ZFS array state
- *    should be synchronized with the other node, so that the other node
- *    can immediately take over if the node that is primary for a particular
- *    array fails.
- */
-
-/*
  * Communication channel IDs for various system components.  This is to
  * make sure one CTL instance talks with another, one ZFS instance talks
  * with another, etc.
  */
 typedef enum {
-	CTL_HA_CHAN_NONE,
 	CTL_HA_CHAN_CTL,
-	CTL_HA_CHAN_ZFS,
+	CTL_HA_CHAN_DATA,
 	CTL_HA_CHAN_MAX
 } ctl_ha_channel;
 
@@ -117,18 +69,12 @@
  * HA communication subsystem.
  *
  * CTL_HA_EVT_MSG_RECV:		Message received by the other node.
- * CTL_HA_EVT_MSG_SENT:		Message sent to the other node.
- * CTL_HA_EVT_DISCONNECT:	Communication channel disconnected.
- * CTL_HA_EVT_DMA_SENT:		DMA successfully sent to other node (push).
- * CTL_HA_EVT_DMA_RECEIVED:	DMA successfully received by other node (pull).
+ * CTL_HA_EVT_LINK_CHANGE:	Communication channel status changed.
  */
 typedef enum {
 	CTL_HA_EVT_NONE,
 	CTL_HA_EVT_MSG_RECV,
-	CTL_HA_EVT_MSG_SENT,
-	CTL_HA_EVT_DISCONNECT,
-	CTL_HA_EVT_DMA_SENT,
-	CTL_HA_EVT_DMA_RECEIVED,
+	CTL_HA_EVT_LINK_CHANGE,
 	CTL_HA_EVT_MAX
 } ctl_ha_event;
 
@@ -143,12 +89,6 @@
 } ctl_ha_status;
 
 typedef enum {
-	CTL_HA_DATA_CTL,
-	CTL_HA_DATA_ZFS,
-	CTL_HA_DATA_MAX
-} ctl_ha_dtid;
-
-typedef enum {
 	CTL_HA_DT_CMD_READ,
 	CTL_HA_DT_CMD_WRITE,
 } ctl_ha_dt_cmd;
@@ -161,110 +101,42 @@
 	ctl_ha_dt_cmd	command;
 	void		*context;
 	ctl_ha_dt_cb	callback;
-	ctl_ha_dtid	id;
 	int		ret;
 	uint32_t	size;
 	uint8_t		*local;
 	uint8_t		*remote;
+	TAILQ_ENTRY(ctl_ha_dt_req)	 links;
 };
 
+struct ctl_softc;
+ctl_ha_status ctl_ha_msg_init(struct ctl_softc *softc);
+void ctl_ha_msg_shutdown(struct ctl_softc *softc);
+ctl_ha_status ctl_ha_msg_destroy(struct ctl_softc *softc);
+
 typedef void (*ctl_evt_handler)(ctl_ha_channel channel, ctl_ha_event event,
 				int param);
 void ctl_ha_register_evthandler(ctl_ha_channel channel,
 				ctl_evt_handler handler);
 
-static inline ctl_ha_status
-ctl_ha_msg_create(ctl_ha_channel channel, ctl_evt_handler handler)
-{
-	return (CTL_HA_STATUS_SUCCESS);
-}
+ctl_ha_status ctl_ha_msg_register(ctl_ha_channel channel,
+    ctl_evt_handler handler);
+ctl_ha_status ctl_ha_msg_recv(ctl_ha_channel channel, void *addr,
+    size_t len, int wait);
+ctl_ha_status ctl_ha_msg_send(ctl_ha_channel channel, const void *addr,
+    size_t len, int wait);
+ctl_ha_status ctl_ha_msg_send2(ctl_ha_channel channel, const void *addr,
+    size_t len, const void *addr2, size_t len2, int wait);
+ctl_ha_status ctl_ha_msg_abort(ctl_ha_channel channel);
+ctl_ha_status ctl_ha_msg_deregister(ctl_ha_channel channel);
 
-/*
- * Receive a message of the specified size.
- */
-static inline ctl_ha_status
-ctl_ha_msg_recv(ctl_ha_channel channel, void *buffer, unsigned int size,
-		int wait)
-{
-	return (CTL_HA_STATUS_SUCCESS);
-}
+struct ctl_ha_dt_req * ctl_dt_req_alloc(void);
+void ctl_dt_req_free(struct ctl_ha_dt_req *req);
+ctl_ha_status ctl_dt_single(struct ctl_ha_dt_req *req);
 
-/*
- * Send a message of the specified size.
- */
-static inline ctl_ha_status
-ctl_ha_msg_send(ctl_ha_channel channel, void *buffer, unsigned int size,
-		int wait)
-{
-	return (CTL_HA_STATUS_SUCCESS);
-}
-
-/*
- * Allocate a data transfer request structure.
- */
-static inline struct ctl_ha_dt_req *
-ctl_dt_req_alloc(void)
-{
-	return (NULL);
-}
-
-/*
- * Free a data transfer request structure.
- */
-static inline void
-ctl_dt_req_free(struct ctl_ha_dt_req *req)
-{
-	return;
-}
-
-/*
- * Issue a DMA request for a single buffer.
- */
-static inline ctl_ha_status
-ctl_dt_single(struct ctl_ha_dt_req *req)
-{
-	return (CTL_HA_STATUS_WAIT);
-}
-
-/*
- * SINGLE:	   One node
- * HA:		   Two nodes (Active/Active implied)
- * SLAVE/MASTER:   The component can set these flags to indicate which side
- *		   is in control.  It has no effect on the HA framework.
- */
 typedef enum {
-	CTL_HA_STATE_UNKNOWN	= 0x00,
-	CTL_HA_STATE_SINGLE	= 0x01,
-	CTL_HA_STATE_HA		= 0x02,
-	CTL_HA_STATE_MASK	= 0x0F,
-	CTL_HA_STATE_SLAVE	= 0x10,
-	CTL_HA_STATE_MASTER	= 0x20
-} ctl_ha_state;
+	CTL_HA_LINK_OFFLINE	= 0x00,
+	CTL_HA_LINK_UNKNOWN	= 0x01,
+	CTL_HA_LINK_ONLINE	= 0x02
+} ctl_ha_link_state;
 
-typedef enum {
-	CTL_HA_COMP_STATUS_OK,
-	CTL_HA_COMP_STATUS_FAILED,
-	CTL_HA_COMP_STATUS_ERROR
-} ctl_ha_comp_status;
-
-struct ctl_ha_component;
-
-typedef ctl_ha_comp_status (*ctl_hacmp_init_t)(struct ctl_ha_component *);
-typedef ctl_ha_comp_status (*ctl_hacmp_start_t)(struct ctl_ha_component *,
-						ctl_ha_state);
-
-struct ctl_ha_component {
-	char			*name;
-	ctl_ha_state 		state;
-	ctl_ha_comp_status	status;
-	ctl_hacmp_init_t	init;
-	ctl_hacmp_start_t	start;
-	ctl_hacmp_init_t	quiesce;
-};
-
-#define	CTL_HA_STATE_IS_SINGLE(state)	((state & CTL_HA_STATE_MASK) == \
-					  CTL_HA_STATE_SINGLE)
-#define	CTL_HA_STATE_IS_HA(state)	((state & CTL_HA_STATE_MASK) == \
-					  CTL_HA_STATE_HA)
-
 #endif /* _CTL_HA_H_ */

Modified: trunk/sys/cam/ctl/ctl_io.h
===================================================================
--- trunk/sys/cam/ctl/ctl_io.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_io.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,5 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2015 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,8 +29,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_io.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_io.h#5 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_io.h 313367 2017-02-07 01:44:18Z mav $
  */
 /*
  * CAM Target Layer data movement structures/interface.
@@ -58,13 +60,12 @@
 #endif
 
 /*
- * Uncomment these next two lines to enable the CTL I/O delay feature.  You
+ * Uncomment this next line to enable the CTL I/O delay feature.  You
  * can delay I/O at two different points -- datamove and done.  This is
  * useful for diagnosing abort conditions (for hosts that send an abort on a
  * timeout), and for determining how long a host's timeout is.
  */
-#define	CTL_IO_DELAY
-#define	CTL_TIMER_BYTES		sizeof(struct callout)
+//#define	CTL_IO_DELAY
 
 typedef enum {
 	CTL_STATUS_NONE,	/* No status */
@@ -80,7 +81,7 @@
 
 /*
  * WARNING:  Keep the data in/out/none flags where they are.  They're used
- * in conjuction with ctl_cmd_flags.  See comment above ctl_cmd_flags
+ * in conjunction with ctl_cmd_flags.  See comment above ctl_cmd_flags
  * definition in ctl_private.h.
  */
 typedef enum {
@@ -89,16 +90,13 @@
 	CTL_FLAG_DATA_OUT	= 0x00000002,	/* DATA OUT */
 	CTL_FLAG_DATA_NONE	= 0x00000003,	/* no data */
 	CTL_FLAG_DATA_MASK	= 0x00000003,
-	CTL_FLAG_KDPTR_SGLIST	= 0x00000008, 	/* kern_data_ptr is S/G list*/
-	CTL_FLAG_EDPTR_SGLIST	= 0x00000010,	/* ext_data_ptr is S/G list */
 	CTL_FLAG_DO_AUTOSENSE	= 0x00000020,	/* grab sense info */
 	CTL_FLAG_USER_REQ	= 0x00000040,	/* request came from userland */
-	CTL_FLAG_CONTROL_DEV	= 0x00000080,	/* processor device */
 	CTL_FLAG_ALLOCATED	= 0x00000100,	/* data space allocated */
 	CTL_FLAG_BLOCKED	= 0x00000200,	/* on the blocked queue */
+	CTL_FLAG_ABORT_STATUS	= 0x00000400,	/* return TASK ABORTED status */
 	CTL_FLAG_ABORT		= 0x00000800,	/* this I/O should be aborted */
 	CTL_FLAG_DMA_INPROG	= 0x00001000,	/* DMA in progress */
-	CTL_FLAG_NO_DATASYNC	= 0x00002000,	/* don't cache flush data */
 	CTL_FLAG_DELAY_DONE	= 0x00004000,	/* delay injection done */
 	CTL_FLAG_INT_COPY	= 0x00008000,	/* internal copy, no done call*/
 	CTL_FLAG_SENT_2OTHER_SC	= 0x00010000,
@@ -108,9 +106,6 @@
 						   addresses, not virtual ones*/
 	CTL_FLAG_IO_CONT	= 0x00100000,	/* Continue I/O instead of
 						   completing */
-	CTL_FLAG_AUTO_MIRROR	= 0x00200000,	/* Automatically use memory
-						   from the RC cache mirrored
-						   address area. */
 #if 0
 	CTL_FLAG_ALREADY_DONE	= 0x00200000	/* I/O already completed */
 #endif
@@ -118,14 +113,10 @@
 	CTL_FLAG_DMA_QUEUED	= 0x00800000,	/* DMA queued but not started*/
 	CTL_FLAG_STATUS_QUEUED	= 0x01000000,	/* Status queued but not sent*/
 
-	CTL_FLAG_REDIR_DONE	= 0x02000000,	/* Redirection has already
-						   been done. */
 	CTL_FLAG_FAILOVER	= 0x04000000,	/* Killed by a failover */
 	CTL_FLAG_IO_ACTIVE	= 0x08000000,	/* I/O active on this SC */
-	CTL_FLAG_RDMA_MASK	= CTL_FLAG_NO_DATASYNC | CTL_FLAG_BUS_ADDR |
-				  CTL_FLAG_AUTO_MIRROR | CTL_FLAG_REDIR_DONE
-						/* Flags we care about for
-						   remote DMA */
+	CTL_FLAG_STATUS_SENT	= 0x10000000,	/* Status sent by datamove */
+	CTL_FLAG_SERSEQ_DONE	= 0x20000000	/* All storage I/O started */
 } ctl_io_flags;
 
 
@@ -134,10 +125,30 @@
 	uint32_t len;
 };
 
+struct ctl_lba_len_flags {
+	uint64_t lba;
+	uint32_t len;
+	uint32_t flags;
+#define CTL_LLF_FUA	0x04000000
+#define CTL_LLF_DPO	0x08000000
+#define CTL_LLF_READ	0x10000000
+#define CTL_LLF_WRITE	0x20000000
+#define CTL_LLF_VERIFY	0x40000000
+#define CTL_LLF_COMPARE	0x80000000
+};
+
+struct ctl_ptr_len_flags {
+	uint8_t *ptr;
+	uint32_t len;
+	uint32_t flags;
+};
+
 union ctl_priv {
 	uint8_t		bytes[sizeof(uint64_t) * 2];
 	uint64_t	integer;
+	uint64_t	integers[2];
 	void		*ptr;
+	void		*ptrs[2];
 };
 
 /*
@@ -153,35 +164,17 @@
 #define	CTL_PRIV_MODEPAGE	1	/* Modepage info for config write */
 #define	CTL_PRIV_BACKEND	2	/* Reserved for block, RAIDCore */
 #define	CTL_PRIV_BACKEND_LUN	3	/* Backend LUN pointer */
-#define	CTL_PRIV_FRONTEND	4	/* LSI driver, ioctl front end */
-#define	CTL_PRIV_USER		5	/* Userland use */
+#define	CTL_PRIV_FRONTEND	4	/* Frontend storage */
+#define	CTL_PRIV_FRONTEND2	5	/* Another frontend storage */
 
+#define CTL_LUN(io)	((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0])
+#define CTL_SOFTC(io)	((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1])
+#define CTL_BACKEND_LUN(io)	((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0])
+#define CTL_PORT(io)	(((struct ctl_softc *)CTL_SOFTC(io))->	\
+    ctl_ports[(io)->io_hdr.nexus.targ_port])
+
 #define CTL_INVALID_PORTNAME 0xFF
 #define CTL_UNMAPPED_IID     0xFF
-/*
- * XXX KDM this size is for the port_priv variable in struct ctl_io_hdr
- * below.  This should be defined in terms of the size of struct
- * ctlfe_lun_cmd_info at the moment:
- * struct ctlfe_lun_cmd_info {
- *	int cur_transfer_index;
- * 	ctlfe_cmd_flags flags;
- * 	bus_dma_segment_t cam_sglist[32];
- * };
- *
- * This isn't really the way I'd prefer to do it, but it does make some
- * sense, AS LONG AS we can guarantee that there will always only be one
- * outstanding DMA request per ctl_io.  If that assumption isn't valid,
- * then we've got problems.
- *
- * At some point it may be nice switch CTL over to using CCBs for
- * everything.  At that point we can probably use the ATIO/CTIO model, so
- * that multiple simultaneous DMAs per command will just work.
- *
- * Also note that the current size, 600, is appropriate for 64-bit
- * architectures, but is overkill for 32-bit architectures.  Need a way to
- * figure out the size at compile time, or just get rid of this altogether.
- */
-#define	CTL_PORT_PRIV_SIZE	600
 
 struct ctl_sg_entry {
 	void	*addr;
@@ -188,11 +181,6 @@
 	size_t	len;
 };
 
-struct ctl_id {
-	uint32_t		id;
-	uint64_t		wwid[2];
-};
-
 typedef enum {
 	CTL_IO_NONE,
 	CTL_IO_SCSI,
@@ -200,10 +188,10 @@
 } ctl_io_type;
 
 struct ctl_nexus {
-	struct ctl_id initid;		/* Initiator ID */
+	uint32_t initid;		/* Initiator ID */
 	uint32_t targ_port;		/* Target port, filled in by PORT */
-	struct ctl_id targ_target;	/* Destination target */
 	uint32_t targ_lun;		/* Destination lun */
+	uint32_t targ_mapped_lun;	/* Destination lun CTL-wide */
 };
 
 typedef enum {
@@ -213,16 +201,19 @@
 	CTL_MSG_BAD_JUJU,
 	CTL_MSG_MANAGE_TASKS,
 	CTL_MSG_PERS_ACTION,
-	CTL_MSG_SYNC_FE,
-	CTL_MSG_APS_LOCK,
 	CTL_MSG_DATAMOVE,
-	CTL_MSG_DATAMOVE_DONE
+	CTL_MSG_DATAMOVE_DONE,
+	CTL_MSG_UA,			/* Set/clear UA on secondary. */
+	CTL_MSG_PORT_SYNC,		/* Information about port. */
+	CTL_MSG_LUN_SYNC,		/* Information about LUN. */
+	CTL_MSG_IID_SYNC,		/* Information about initiator. */
+	CTL_MSG_LOGIN,			/* Information about HA peer. */
+	CTL_MSG_MODE_SYNC,		/* Mode page current content. */
+	CTL_MSG_FAILOVER		/* Fake, never sent though the wire */
 } ctl_msg_type;
 
 struct ctl_scsiio;
 
-#define	CTL_NUM_SG_ENTRIES	9
-
 struct ctl_io_hdr {
 	uint32_t	  version;	/* interface version XXX */
 	ctl_io_type	  io_type;	/* task I/O, SCSI I/O, etc. */
@@ -235,7 +226,7 @@
 	uint32_t	  timeout;	/* timeout in ms */
 	uint32_t	  retries;	/* retry count */
 #ifdef CTL_IO_DELAY
-	uint8_t		  timer_bytes[CTL_TIMER_BYTES]; /* timer kludge */
+	struct callout	  delay_callout;
 #endif /* CTL_IO_DELAY */
 #ifdef CTL_TIME_IO
 	time_t		  start_time;	/* I/O start time */
@@ -242,17 +233,14 @@
 	struct bintime	  start_bt;	/* Timer start ticks */
 	struct bintime	  dma_start_bt;	/* DMA start ticks */
 	struct bintime	  dma_bt;	/* DMA total ticks */
+#endif /* CTL_TIME_IO */
 	uint32_t	  num_dmas;	/* Number of DMAs */
-#endif /* CTL_TIME_IO */
 	union ctl_io	  *original_sc;
 	union ctl_io	  *serializing_sc;
 	void		  *pool;	/* I/O pool */
 	union ctl_priv	  ctl_private[CTL_NUM_PRIV];/* CTL private area */
-	uint8_t		  port_priv[CTL_PORT_PRIV_SIZE];/* PORT private area*/
-	struct ctl_sg_entry remote_sglist[CTL_NUM_SG_ENTRIES];
-	struct ctl_sg_entry remote_dma_sglist[CTL_NUM_SG_ENTRIES];
-	struct ctl_sg_entry local_sglist[CTL_NUM_SG_ENTRIES];
-	struct ctl_sg_entry local_dma_sglist[CTL_NUM_SG_ENTRIES];
+	struct ctl_sg_entry *remote_sglist;
+	struct ctl_sg_entry *local_sglist;
 	STAILQ_ENTRY(ctl_io_hdr) links;	/* linked list pointer */
 	TAILQ_ENTRY(ctl_io_hdr) ooa_links;
 	TAILQ_ENTRY(ctl_io_hdr) blocked_links;
@@ -279,22 +267,61 @@
  */
 struct ctl_scsiio {
 	struct ctl_io_hdr io_hdr;	/* common to all I/O types */
+
+	/*
+	 * The ext_* fields are generally intended for frontend use; CTL itself
+	 * doesn't modify or use them.
+	 */
 	uint32_t   ext_sg_entries;	/* 0 = no S/G list, > 0 = num entries */
 	uint8_t	   *ext_data_ptr;	/* data buffer or S/G list */
 	uint32_t   ext_data_len;	/* Data transfer length */
 	uint32_t   ext_data_filled;	/* Amount of data filled so far */
-	uint32_t   kern_sg_entries;	/* 0 = no S/G list, > 0 = num entries */
-	uint32_t   rem_sg_entries;	/* 0 = no S/G list, > 0 = num entries */
-	uint8_t    *kern_data_ptr;	/* data buffer or S/G list */
-	uint32_t   kern_data_len;	/* Length of this S/G list/buffer */
-	uint32_t   kern_total_len;	/* Total length of this transaction */
-	uint32_t   kern_data_resid;	/* Length left to transfer after this*/
-	uint32_t   kern_rel_offset;	/* Byte Offset of this transfer */
+
+	/*
+	 * The number of scatter/gather entries in the list pointed to
+	 * by kern_data_ptr.  0 means there is no list, just a data pointer.
+	 */
+	uint32_t   kern_sg_entries;
+
+	uint32_t   rem_sg_entries;	/* Unused. */
+
+	/*
+	 * The data pointer or a pointer to the scatter/gather list.
+	 */
+	uint8_t    *kern_data_ptr;
+
+	/*
+	 * Length of the data buffer or scatter/gather list.  It's also
+	 * the length of this particular piece of the data transfer,
+	 * ie. number of bytes expected to be transferred by the current
+	 * invocation of frontend's datamove() callback.  It's always
+	 * less than or equal to kern_total_len.
+	 */
+	uint32_t   kern_data_len;
+
+	/*
+	 * Total length of data to be transferred during this particular
+	 * SCSI command, as decoded from SCSI CDB.
+	 */
+	uint32_t   kern_total_len;
+
+	/*
+	 * Amount of data left after the current data transfer.
+	 */
+	uint32_t   kern_data_resid;
+
+	/*
+	 * Byte offset of this transfer, equal to the amount of data
+	 * already transferred for this SCSI command during previous
+	 * datamove() invocations.
+	 */
+	uint32_t   kern_rel_offset;
+
 	struct     scsi_sense_data sense_data;	/* sense data */
 	uint8_t	   sense_len;		/* Returned sense length */
 	uint8_t	   scsi_status;		/* SCSI status byte */
-	uint8_t	   sense_residual;	/* sense residual length */
-	uint32_t   residual;		/* data residual length */
+	uint8_t	   sense_residual;	/* Unused. */
+	uint32_t   residual;		/* Unused */
 	uint32_t   tag_num;		/* tag number */
 	ctl_tag_type tag_type;		/* simple, ordered, head of queue,etc.*/
 	uint8_t    cdb_len;		/* CDB length */
@@ -308,13 +335,25 @@
 	CTL_TASK_ABORT_TASK_SET,
 	CTL_TASK_CLEAR_ACA,
 	CTL_TASK_CLEAR_TASK_SET,
+	CTL_TASK_I_T_NEXUS_RESET,
 	CTL_TASK_LUN_RESET,
 	CTL_TASK_TARGET_RESET,
 	CTL_TASK_BUS_RESET,
 	CTL_TASK_PORT_LOGIN,
-	CTL_TASK_PORT_LOGOUT
+	CTL_TASK_PORT_LOGOUT,
+	CTL_TASK_QUERY_TASK,
+	CTL_TASK_QUERY_TASK_SET,
+	CTL_TASK_QUERY_ASYNC_EVENT
 } ctl_task_type;
 
+typedef enum {
+	CTL_TASK_FUNCTION_COMPLETE,
+	CTL_TASK_FUNCTION_SUCCEEDED,
+	CTL_TASK_FUNCTION_REJECTED,
+	CTL_TASK_LUN_DOES_NOT_EXIST,
+	CTL_TASK_FUNCTION_NOT_SUPPORTED
+} ctl_task_status;
+
 /*
  * Task management I/O structure.  Aborts, bus resets, etc., are sent using
  * this structure.
@@ -327,8 +366,29 @@
 	ctl_task_type		task_action; /* Target Reset, Abort, etc.  */
 	uint32_t		tag_num;     /* tag number */
 	ctl_tag_type		tag_type;    /* simple, ordered, etc. */
+	uint8_t			task_status; /* Complete, Succeeded, etc. */
+	uint8_t			task_resp[3];/* Response information */
 };
 
+
+/*
+ * HA link messages.
+ */
+#define	CTL_HA_VERSION		3
+
+/*
+ * Used for CTL_MSG_LOGIN.
+ */
+struct ctl_ha_msg_login {
+	ctl_msg_type		msg_type;
+	int			version;
+	int			ha_mode;
+	int			ha_id;
+	int			max_luns;
+	int			max_ports;
+	int			max_init_per_port;
+};
+
 typedef enum {
 	CTL_PR_REG_KEY,
 	CTL_PR_UNREG_KEY,
@@ -349,34 +409,37 @@
 	ctl_pr_action        action;
 	uint8_t              sa_res_key[8];
 	uint8_t              res_type;
-	uint16_t             residx;
+	uint32_t             residx;
 };
 
 struct ctl_ha_msg_hdr {
 	ctl_msg_type		msg_type;
+	uint32_t		status;	     /* transaction status */
 	union ctl_io		*original_sc;
 	union ctl_io		*serializing_sc;
 	struct ctl_nexus	nexus;	     /* Initiator, port, target, lun */
-	uint32_t		status;	     /* transaction status */
-	TAILQ_ENTRY(ctl_ha_msg_hdr) links;
 };
 
 #define	CTL_HA_MAX_SG_ENTRIES	16
+#define	CTL_HA_DATAMOVE_SEGMENT	131072
 
 /*
- * Used for CTL_MSG_APS_LOCK.
+ * Used for CTL_MSG_PERS_ACTION.
  */
-struct ctl_ha_msg_aps {
+struct ctl_ha_msg_pr {
 	struct ctl_ha_msg_hdr	hdr;
-	uint8_t			lock_flag;
+	struct ctl_pr_info	pr_info;
 };
 
 /*
- * Used for CTL_MSG_PERS_ACTION.
+ * Used for CTL_MSG_UA.
  */
-struct ctl_ha_msg_pr {
+struct ctl_ha_msg_ua {
 	struct ctl_ha_msg_hdr	hdr;
-	struct ctl_pr_info	pr_info;
+	int			ua_all;
+	int			ua_set;
+	int			ua_type;
+	uint8_t			ua_info[8];
 };
 
 /*
@@ -407,21 +470,21 @@
 };
 
 /*
- * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU.
+ * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU,
+ * and CTL_MSG_DATAMOVE_DONE.
  */
 struct ctl_ha_msg_scsi {
 	struct ctl_ha_msg_hdr	hdr;
-	uint8_t			cdb[CTL_MAX_CDBLEN];	/* CDB */
 	uint32_t		tag_num;     /* tag number */
 	ctl_tag_type		tag_type;    /* simple, ordered, etc. */
+	uint8_t			cdb[CTL_MAX_CDBLEN];	/* CDB */
+	uint8_t			cdb_len;	/* CDB length */
 	uint8_t			scsi_status; /* SCSI status byte */
-	struct scsi_sense_data	sense_data;  /* sense data */
 	uint8_t			sense_len;   /* Returned sense length */
-	uint8_t			sense_residual;	/* sense residual length */
-	uint32_t		residual;    /* data residual length */
-	uint32_t		fetd_status; /* trans status, set by FETD,
+	uint32_t		port_status; /* trans status, set by FETD,
 						0 = good*/
-	struct ctl_lba_len	lbalen;      /* used for stats */
+	uint32_t		kern_data_resid; /* for DATAMOVE_DONE */
+	struct scsi_sense_data	sense_data;  /* sense data */
 };
 
 /* 
@@ -434,6 +497,64 @@
 	ctl_tag_type		tag_type;    /* simple, ordered, etc. */
 };
 
+/*
+ * Used for CTL_MSG_PORT_SYNC.
+ */
+struct ctl_ha_msg_port {
+	struct ctl_ha_msg_hdr	hdr;
+	int			port_type;
+	int			physical_port;
+	int			virtual_port;
+	int			status;
+	int			name_len;
+	int			lun_map_len;
+	int			port_devid_len;
+	int			target_devid_len;
+	int			init_devid_len;
+	uint8_t			data[];
+};
+
+/*
+ * Used for CTL_MSG_LUN_SYNC.
+ */
+struct ctl_ha_msg_lun {
+	struct ctl_ha_msg_hdr	hdr;
+	int			flags;
+	unsigned int		pr_generation;
+	uint32_t		pr_res_idx;
+	uint8_t			pr_res_type;
+	int			lun_devid_len;
+	int			pr_key_count;
+	uint8_t			data[];
+};
+
+struct ctl_ha_msg_lun_pr_key {
+	uint32_t		pr_iid;
+	uint64_t		pr_key;
+};
+
+/*
+ * Used for CTL_MSG_IID_SYNC.
+ */
+struct ctl_ha_msg_iid {
+	struct ctl_ha_msg_hdr	hdr;
+	int			in_use;
+	int			name_len;
+	uint64_t		wwpn;
+	uint8_t			data[];
+};
+
+/*
+ * Used for CTL_MSG_MODE_SYNC.
+ */
+struct ctl_ha_msg_mode {
+	struct ctl_ha_msg_hdr	hdr;
+	uint8_t			page_code;
+	uint8_t			subpage;
+	uint16_t		page_len;
+	uint8_t			data[];
+};
+
 union ctl_ha_msg {
 	struct ctl_ha_msg_hdr	hdr;
 	struct ctl_ha_msg_task	task;
@@ -440,17 +561,19 @@
 	struct ctl_ha_msg_scsi	scsi;
 	struct ctl_ha_msg_dt	dt;
 	struct ctl_ha_msg_pr	pr;
-	struct ctl_ha_msg_aps	aps;
+	struct ctl_ha_msg_ua	ua;
+	struct ctl_ha_msg_port	port;
+	struct ctl_ha_msg_lun	lun;
+	struct ctl_ha_msg_iid	iid;
+	struct ctl_ha_msg_login	login;
+	struct ctl_ha_msg_mode	mode;
 };
 
-
 struct ctl_prio {
 	struct ctl_io_hdr  io_hdr;
 	struct ctl_ha_msg_pr pr_msg;
 };
 
-
-
 union ctl_io {
 	struct ctl_io_hdr io_hdr;	/* common to all I/O types */
 	struct ctl_scsiio scsiio;	/* Normal SCSI commands */
@@ -461,9 +584,9 @@
 #ifdef _KERNEL
 
 union ctl_io *ctl_alloc_io(void *pool_ref);
+union ctl_io *ctl_alloc_io_nowait(void *pool_ref);
 void ctl_free_io(union ctl_io *io);
 void ctl_zero_io(union ctl_io *io);
-void ctl_copy_io(union ctl_io *src, union ctl_io *dest);
 
 #endif /* _KERNEL */
 

Modified: trunk/sys/cam/ctl/ctl_ioctl.h
===================================================================
--- trunk/sys/cam/ctl/ctl_ioctl.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_ioctl.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,6 +1,8 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
  * Copyright (c) 2011 Spectra Logic Corporation
+ * Copyright (c) 2014-2017 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -28,8 +30,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_ioctl.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ioctl.h#4 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_ioctl.h 312841 2017-01-26 21:00:49Z mav $
  */
 /*
  * CAM Target Layer ioctl interface.
@@ -40,6 +42,12 @@
 #ifndef	_CTL_IOCTL_H_
 #define	_CTL_IOCTL_H_
 
+#ifdef ICL_KERNEL_PROXY
+#include <sys/socket.h>
+#endif
+
+#include <sys/ioccom.h>
+
 #define	CTL_DEFAULT_DEV		"/dev/cam/ctl"
 /*
  * Maximum number of targets we support.
@@ -54,17 +62,17 @@
 /*
  * Maximum number of LUNs we support at the moment.  MUST be a power of 2.
  */
-#define	CTL_MAX_LUNS		256
+#define	CTL_MAX_LUNS		1024
 
 /*
  * Maximum number of initiators per port.
  */
-#define	CTL_MAX_INIT_PER_PORT	2048 // Was 16
+#define	CTL_MAX_INIT_PER_PORT	2048
 
 /*
  * Maximum number of ports registered at one time.
  */
-#define	CTL_MAX_PORTS		32
+#define	CTL_MAX_PORTS		256
 
 /*
  * Maximum number of initiators we support.
@@ -74,35 +82,9 @@
 /* Hopefully this won't conflict with new misc devices that pop up */
 #define	CTL_MINOR	225
 
-typedef enum {
-	CTL_OOA_INVALID_LUN,
-	CTL_OOA_SUCCESS
-} ctl_ooa_status;
+/* Legacy statistics accumulated for every port for every LU. */
+#define CTL_LEGACY_STATS	1
 
-struct ctl_ooa_info {
-	uint32_t target_id;	/* Passed in to CTL */
-	uint32_t lun_id;	/* Passed in to CTL */
-	uint32_t num_entries;	/* Returned from CTL */
-	ctl_ooa_status status;	/* Returned from CTL */
-};
-
-struct ctl_hard_startstop_info {
-	cfi_mt_status status;
-	int total_luns;
-	int luns_complete;
-	int luns_failed;
-};
-
-struct ctl_bbrread_info {
-	int			lun_num;	/* Passed in to CTL */
-	uint64_t		lba;		/* Passed in to CTL */
-	int			len;		/* Passed in to CTL */
-	cfi_mt_status		status;		/* Returned from CTL */
-	cfi_bbrread_status	bbr_status;	/* Returned from CTL */
-	uint8_t			scsi_status;	/* Returned from CTL */
-	struct scsi_sense_data	sense_data;	/* Returned from CTL */
-};
-
 typedef enum {
 	CTL_DELAY_TYPE_NONE,
 	CTL_DELAY_TYPE_CONT,
@@ -125,7 +107,6 @@
 } ctl_delay_status;
 
 struct ctl_io_delay_info {
-	uint32_t		target_id;
 	uint32_t		lun_id;
 	ctl_delay_type		delay_type;
 	ctl_delay_location	delay_loc;
@@ -134,23 +115,6 @@
 };
 
 typedef enum {
-	CTL_GS_SYNC_NONE,
-	CTL_GS_SYNC_OK,
-	CTL_GS_SYNC_NO_LUN
-} ctl_gs_sync_status;
-
-/*
- * The target and LUN id specify which device to modify.  The sync interval
- * means that we will let through every N SYNCHRONIZE CACHE commands.
- */
-struct ctl_sync_info {
-	uint32_t		target_id;	/* passed to kernel */
-	uint32_t		lun_id;		/* passed to kernel */
-	int			sync_interval;	/* depends on whether get/set */
-	ctl_gs_sync_status	status;		/* passed from kernel */
-};
-
-typedef enum {
 	CTL_STATS_NO_IO,
 	CTL_STATS_READ,
 	CTL_STATS_WRITE
@@ -158,6 +122,18 @@
 #define	CTL_STATS_NUM_TYPES	3
 
 typedef enum {
+	CTL_SS_OK,
+	CTL_SS_NEED_MORE_SPACE,
+	CTL_SS_ERROR
+} ctl_stats_status;
+
+typedef enum {
+	CTL_STATS_FLAG_NONE		= 0x00,
+	CTL_STATS_FLAG_TIME_VALID	= 0x01
+} ctl_stats_flags;
+
+#ifdef CTL_LEGACY_STATS
+typedef enum {
 	CTL_LUN_STATS_NO_BLOCKSIZE	= 0x01
 } ctl_lun_stats_flags;
 
@@ -178,17 +154,6 @@
 	struct ctl_lun_io_port_stats	ports[CTL_MAX_PORTS];
 };
 
-typedef enum {
-	CTL_SS_OK,
-	CTL_SS_NEED_MORE_SPACE,
-	CTL_SS_ERROR
-} ctl_stats_status;
-
-typedef enum {
-	CTL_STATS_FLAG_NONE		= 0x00,
-	CTL_STATS_FLAG_TIME_VALID	= 0x01
-} ctl_stats_flags;
-
 struct ctl_stats {
 	int			alloc_len;	/* passed to kernel */
 	struct ctl_lun_io_stats	*lun_stats;	/* passed to/from kernel */
@@ -198,7 +163,28 @@
 	ctl_stats_flags		flags;		/* passed to userland */
 	struct timespec		timestamp;	/* passed to userland */
 };
+#endif /* CTL_LEGACY_STATS */
 
+struct ctl_io_stats {
+	uint32_t			item;
+	uint64_t			bytes[CTL_STATS_NUM_TYPES];
+	uint64_t			operations[CTL_STATS_NUM_TYPES];
+	uint64_t			dmas[CTL_STATS_NUM_TYPES];
+	struct bintime			time[CTL_STATS_NUM_TYPES];
+	struct bintime			dma_time[CTL_STATS_NUM_TYPES];
+};
+
+struct ctl_get_io_stats {
+	struct ctl_io_stats	*stats;		/* passed to/from kernel */
+	size_t			alloc_len;	/* passed to kernel */
+	size_t			fill_len;	/* passed to userland */
+	int			first_item;	/* passed to kernel */
+	int			num_items;	/* passed to userland */
+	ctl_stats_status	status;		/* passed to userland */
+	ctl_stats_flags		flags;		/* passed to userland */
+	struct timespec		timestamp;	/* passed to userland */
+};
+
 /*
  * The types of errors that can be injected:
  *
@@ -273,7 +259,6 @@
 /*
  * Error injection descriptor.
  *
- * target_id:	   Target ID to act on.
  * lun_id	   LUN to act on.
  * lun_error:	   The type of error to inject.  See above for descriptions.
  * error_pattern:  What kind of command to act on.  See above.
@@ -284,7 +269,6 @@
  * links:	   Kernel use only.
  */
 struct ctl_error_desc {
-	uint32_t			target_id;	/* To kernel */
 	uint32_t			lun_id;		/* To kernel */
 	ctl_lun_error			lun_error;	/* To kernel */
 	ctl_lun_error_pattern		error_pattern;	/* To kernel */
@@ -338,26 +322,10 @@
 };
 
 typedef enum {
-	CTL_PORT_LIST_NONE,
-	CTL_PORT_LIST_OK,
-	CTL_PORT_LIST_NEED_MORE_SPACE,
-	CTL_PORT_LIST_ERROR
-} ctl_port_list_status;
-
-struct ctl_port_list {
-	uint32_t		alloc_len;	/* passed to kernel */
-	uint32_t		alloc_num;	/* passed to kernel */
-	struct ctl_port_entry   *entries;	/* filled in kernel */
-	uint32_t		fill_len;	/* passed to userland */
-	uint32_t		fill_num;	/* passed to userland */
-	uint32_t		dropped_num;	/* passed to userland */
-	ctl_port_list_status	status;		/* passed to userland */
-};
-
-typedef enum {
 	CTL_LUN_NOSTATUS,
 	CTL_LUN_OK,
-	CTL_LUN_ERROR
+	CTL_LUN_ERROR,
+	CTL_LUN_WARNING
 } ctl_lun_status;
 
 #define	CTL_ERROR_STR_LEN	160
@@ -372,7 +340,7 @@
  *
  * namelen:	Length of the name field, including the terminating NUL.
  *
- * name:	Name of the paramter.  This must be NUL-terminated.
+ * name:	Name of the parameter.  This must be NUL-terminated.
  *
  * flags:	Flags for the parameter, see above for values.
  *
@@ -401,12 +369,54 @@
 	CTL_LUNREQ_MODIFY,
 } ctl_lunreq_type;
 
+/*
+ * The ID_REQ flag is used to say that the caller has requested a
+ * particular LUN ID in the req_lun_id field.  If we cannot allocate that
+ * LUN ID, the ctl_add_lun() call will fail.
+ *
+ * The STOPPED flag tells us that the LUN should default to the powered
+ * off state.  It will return 0x04,0x02 until it is powered up.  ("Logical
+ * unit not ready, initializing command required.")
+ *
+ * The NO_MEDIA flag tells us that the LUN has no media inserted.
+ *
+ * The PRIMARY flag tells us that this LUN is registered as a Primary LUN
+ * which is accessible via the Master shelf controller in an HA. This flag
+ * being set indicates a Primary LUN. This flag being reset represents a
+ * Secondary LUN controlled by the Secondary controller in an HA
+ * configuration. Flag is applicable at this time to T_DIRECT types. 
+ *
+ * The SERIAL_NUM flag tells us that the serial_num field is filled in and
+ * valid for use in SCSI INQUIRY VPD page 0x80.
+ *
+ * The DEVID flag tells us that the device_id field is filled in and
+ * valid for use in SCSI INQUIRY VPD page 0x83.
+ *
+ * The DEV_TYPE flag tells us that the device_type field is filled in.
+ *
+ * The EJECTED flag tells us that the removable LUN has tray open.
+ *
+ * The UNMAP flag tells us that this LUN supports UNMAP.
+ *
+ * The OFFLINE flag tells us that this LUN can not access backing store.
+ */
+typedef enum {
+	CTL_LUN_FLAG_ID_REQ		= 0x01,
+	CTL_LUN_FLAG_STOPPED		= 0x02,
+	CTL_LUN_FLAG_NO_MEDIA		= 0x04,
+	CTL_LUN_FLAG_PRIMARY		= 0x08,
+	CTL_LUN_FLAG_SERIAL_NUM		= 0x10,
+	CTL_LUN_FLAG_DEVID		= 0x20,
+	CTL_LUN_FLAG_DEV_TYPE		= 0x40,
+	CTL_LUN_FLAG_UNMAP		= 0x80,
+	CTL_LUN_FLAG_EJECTED		= 0x100,
+	CTL_LUN_FLAG_READONLY		= 0x200
+} ctl_backend_lun_flags;
 
 /*
  * LUN creation parameters:
  *
- * flags:		Various LUN flags, see ctl_backend.h for a
- *			description of the flag values and meanings.
+ * flags:		Various LUN flags, see above.
  *
  * device_type:		The SCSI device type.  e.g. 0 for Direct Access,
  *			3 for Processor, etc.  Only certain backends may
@@ -524,6 +534,7 @@
  * kern_be_args:	For kernel use only.
  */
 struct ctl_lun_req {
+#define	CTL_BE_NAME_LEN		32
 	char			backend[CTL_BE_NAME_LEN];
 	ctl_lunreq_type		reqtype;
 	union ctl_lunreq_data	reqdata;
@@ -544,7 +555,7 @@
  * NEED_MORE_SPACE:	The allocated length of the entries field is too
  * 			small for the available data.
  *
- * ERROR:		An error occured, look at the error string for a
+ * ERROR:		An error occurred, look at the error string for a
  *			description of the error.
  */
 typedef enum {
@@ -588,30 +599,240 @@
 						/* passed to userland */
 };
 
+/*
+ * Port request interface:
+ *
+ * driver:		This is required, and is NUL-terminated a string
+ *			that is the name of the frontend, like "iscsi" .
+ *
+ * reqtype:		The type of request, CTL_REQ_CREATE to create a
+ *			port, CTL_REQ_REMOVE to delete a port.
+ *
+ * num_be_args:		This is the number of frontend-specific arguments
+ *			in the be_args array.
+ *
+ * be_args:		This is an array of frontend-specific arguments.
+ *			See above for a description of the fields in this
+ *			structure.
+ *
+ * status:		Status of the request.
+ *
+ * error_str:		If the status is CTL_LUN_ERROR, this will
+ *			contain a string describing the error.
+ *
+ * kern_be_args:	For kernel use only.
+ */
+typedef enum {
+	CTL_REQ_CREATE,
+	CTL_REQ_REMOVE,
+	CTL_REQ_MODIFY,
+} ctl_req_type;
+
+struct ctl_req {
+	char			driver[CTL_DRIVER_NAME_LEN];
+	ctl_req_type		reqtype;
+	int			num_args;
+	struct ctl_be_arg	*args;
+	ctl_lun_status		status;
+	char			error_str[CTL_ERROR_STR_LEN];
+	struct ctl_be_arg	*kern_args;
+};
+
+/*
+ * iSCSI status
+ *
+ * OK:			Request completed successfully.
+ *
+ * ERROR:		An error occurred, look at the error string for a
+ *			description of the error.
+ *
+ * CTL_ISCSI_LIST_NEED_MORE_SPACE:
+ * 			User has to pass larger buffer for CTL_ISCSI_LIST ioctl.
+ */
+typedef enum {
+	CTL_ISCSI_OK,
+	CTL_ISCSI_ERROR,
+	CTL_ISCSI_LIST_NEED_MORE_SPACE,
+	CTL_ISCSI_SESSION_NOT_FOUND
+} ctl_iscsi_status;
+
+typedef enum {
+	CTL_ISCSI_HANDOFF,
+	CTL_ISCSI_LIST,
+	CTL_ISCSI_LOGOUT,
+	CTL_ISCSI_TERMINATE,
+#if defined(ICL_KERNEL_PROXY) || 1
+	/*
+	 * We actually need those in all cases, but leave the ICL_KERNEL_PROXY,
+	 * to remember to remove them along with rest of proxy code, eventually.
+	 */
+	CTL_ISCSI_LISTEN,
+	CTL_ISCSI_ACCEPT,
+	CTL_ISCSI_SEND,
+	CTL_ISCSI_RECEIVE,
+#endif
+} ctl_iscsi_type;
+
+typedef enum {
+	CTL_ISCSI_DIGEST_NONE,
+	CTL_ISCSI_DIGEST_CRC32C
+} ctl_iscsi_digest;
+
+#define	CTL_ISCSI_NAME_LEN	224	/* 223 bytes, by RFC 3720, + '\0' */
+#define	CTL_ISCSI_ADDR_LEN	47	/* INET6_ADDRSTRLEN + '\0' */
+#define	CTL_ISCSI_ALIAS_LEN	128	/* Arbitrary. */
+
+struct ctl_iscsi_handoff_params {
+	char			initiator_name[CTL_ISCSI_NAME_LEN];
+	char			initiator_addr[CTL_ISCSI_ADDR_LEN];
+	char			initiator_alias[CTL_ISCSI_ALIAS_LEN];
+	uint8_t			initiator_isid[6];
+	char			target_name[CTL_ISCSI_NAME_LEN];
+	int			socket;
+	int			portal_group_tag;
+	
+	/*
+	 * Connection parameters negotiated by ctld(8).
+	 */
+	ctl_iscsi_digest	header_digest;
+	ctl_iscsi_digest	data_digest;
+	uint32_t		cmdsn;
+	uint32_t		statsn;
+	uint32_t		max_recv_data_segment_length;
+	uint32_t		max_burst_length;
+	uint32_t		first_burst_length;
+	uint32_t		immediate_data;
+#ifdef ICL_KERNEL_PROXY
+	int			connection_id;
+	int			spare[3];
+#else
+	int			spare[4];
+#endif
+};
+
+struct ctl_iscsi_list_params {
+	uint32_t		alloc_len;	/* passed to kernel */
+	char                   *conn_xml;	/* filled in kernel */
+	uint32_t		fill_len;	/* passed to userland */
+	int			spare[4];
+};
+
+struct ctl_iscsi_logout_params {
+	int			connection_id;	/* passed to kernel */
+	char			initiator_name[CTL_ISCSI_NAME_LEN];
+						/* passed to kernel */
+	char			initiator_addr[CTL_ISCSI_ADDR_LEN];
+						/* passed to kernel */
+	int			all;		/* passed to kernel */
+	int			spare[4];
+};
+
+struct ctl_iscsi_terminate_params {
+	int			connection_id;	/* passed to kernel */
+	char			initiator_name[CTL_ISCSI_NAME_LEN];
+						/* passed to kernel */
+	char			initiator_addr[CTL_ISCSI_NAME_LEN];
+						/* passed to kernel */
+	int			all;		/* passed to kernel */
+	int			spare[4];
+};
+
+#ifdef ICL_KERNEL_PROXY
+struct ctl_iscsi_listen_params {
+	int				iser;
+	int				domain;
+	int				socktype;
+	int				protocol;
+	struct sockaddr			*addr;
+	socklen_t			addrlen;
+	int				portal_id;
+	int				spare[4];
+};
+
+struct ctl_iscsi_accept_params {
+	int				connection_id;
+	int				portal_id;
+	struct sockaddr			*initiator_addr;
+	socklen_t			initiator_addrlen;
+	int				spare[4];
+};
+
+struct ctl_iscsi_send_params {
+	int				connection_id;
+	void				*bhs;
+	size_t				spare;
+	void				*spare2;
+	size_t				data_segment_len;
+	void				*data_segment;
+	int				spare3[4];
+};
+
+struct ctl_iscsi_receive_params {
+	int				connection_id;
+	void				*bhs;
+	size_t				spare;
+	void				*spare2;
+	size_t				data_segment_len;
+	void				*data_segment;
+	int				spare3[4];
+};
+
+#endif /* ICL_KERNEL_PROXY */
+
+union ctl_iscsi_data {
+	struct ctl_iscsi_handoff_params		handoff;
+	struct ctl_iscsi_list_params		list;
+	struct ctl_iscsi_logout_params		logout;
+	struct ctl_iscsi_terminate_params	terminate;
+#ifdef ICL_KERNEL_PROXY
+	struct ctl_iscsi_listen_params		listen;
+	struct ctl_iscsi_accept_params		accept;
+	struct ctl_iscsi_send_params		send;
+	struct ctl_iscsi_receive_params		receive;
+#endif
+};
+
+/*
+ * iSCSI interface
+ *
+ * status:		The status of the request.  See above for the 
+ *			description of the values of this field.
+ *
+ * error_str:		If the status indicates an error, this string will
+ *			be filled in to describe the error.
+ */
+struct ctl_iscsi {
+	ctl_iscsi_type		type;		/* passed to kernel */
+	union ctl_iscsi_data	data;		/* passed to kernel */
+	ctl_iscsi_status	status;		/* passed to userland */
+	char			error_str[CTL_ERROR_STR_LEN];
+						/* passed to userland */
+};
+
+struct ctl_lun_map {
+	uint32_t		port;
+	uint32_t		plun;
+	uint32_t		lun;
+};
+
 #define	CTL_IO			_IOWR(CTL_MINOR, 0x00, union ctl_io)
 #define	CTL_ENABLE_PORT		_IOW(CTL_MINOR, 0x04, struct ctl_port_entry)
 #define	CTL_DISABLE_PORT	_IOW(CTL_MINOR, 0x05, struct ctl_port_entry)
-#define	CTL_DUMP_OOA		_IO(CTL_MINOR, 0x06)
-#define	CTL_CHECK_OOA		_IOWR(CTL_MINOR, 0x07, struct ctl_ooa_info)
-#define	CTL_HARD_STOP		_IOR(CTL_MINOR, 0x08, \
-				     struct ctl_hard_startstop_info)
-#define	CTL_HARD_START		_IOR(CTL_MINOR, 0x09, \
-				     struct ctl_hard_startstop_info)
 #define	CTL_DELAY_IO		_IOWR(CTL_MINOR, 0x10, struct ctl_io_delay_info)
-#define	CTL_REALSYNC_GET	_IOR(CTL_MINOR, 0x11, int)
-#define	CTL_REALSYNC_SET	_IOW(CTL_MINOR, 0x12, int)
-#define	CTL_SETSYNC		_IOWR(CTL_MINOR, 0x13, struct ctl_sync_info)
-#define	CTL_GETSYNC		_IOWR(CTL_MINOR, 0x14, struct ctl_sync_info)
 #define	CTL_GETSTATS		_IOWR(CTL_MINOR, 0x15, struct ctl_stats)
 #define	CTL_ERROR_INJECT	_IOWR(CTL_MINOR, 0x16, struct ctl_error_desc)
-#define	CTL_BBRREAD		_IOWR(CTL_MINOR, 0x17, struct ctl_bbrread_info)
 #define	CTL_GET_OOA		_IOWR(CTL_MINOR, 0x18, struct ctl_ooa)
 #define	CTL_DUMP_STRUCTS	_IO(CTL_MINOR, 0x19)
-#define	CTL_GET_PORT_LIST	_IOWR(CTL_MINOR, 0x20, struct ctl_port_list)
 #define	CTL_LUN_REQ		_IOWR(CTL_MINOR, 0x21, struct ctl_lun_req)
 #define	CTL_LUN_LIST		_IOWR(CTL_MINOR, 0x22, struct ctl_lun_list)
 #define	CTL_ERROR_INJECT_DELETE	_IOW(CTL_MINOR, 0x23, struct ctl_error_desc)
 #define	CTL_SET_PORT_WWNS	_IOW(CTL_MINOR, 0x24, struct ctl_port_entry)
+#define	CTL_ISCSI		_IOWR(CTL_MINOR, 0x25, struct ctl_iscsi)
+#define	CTL_PORT_REQ		_IOWR(CTL_MINOR, 0x26, struct ctl_req)
+#define	CTL_PORT_LIST		_IOWR(CTL_MINOR, 0x27, struct ctl_lun_list)
+#define	CTL_LUN_MAP		_IOW(CTL_MINOR, 0x28, struct ctl_lun_map)
+#define	CTL_GET_LUN_STATS	_IOWR(CTL_MINOR, 0x29, struct ctl_get_io_stats)
+#define	CTL_GET_PORT_STATS	_IOWR(CTL_MINOR, 0x2a, struct ctl_get_io_stats)
 
 #endif /* _CTL_IOCTL_H_ */
 

Modified: trunk/sys/cam/ctl/ctl_private.h
===================================================================
--- trunk/sys/cam/ctl/ctl_private.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_private.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,5 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003, 2004, 2005, 2008 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2017 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,8 +29,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_private.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_private.h#7 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_private.h 314767 2017-03-06 06:47:05Z mav $
  */
 /*
  * CAM Target Layer driver private data structures/definitions.
@@ -39,6 +41,10 @@
 #ifndef	_CTL_PRIVATE_H_
 #define	_CTL_PRIVATE_H_
 
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_cd.h>
+#include <cam/scsi/scsi_da.h>
+
 /*
  * SCSI vendor and product names.
  */
@@ -45,78 +51,24 @@
 #define	CTL_VENDOR		"FREEBSD "
 #define	CTL_DIRECT_PRODUCT	"CTLDISK         "
 #define	CTL_PROCESSOR_PRODUCT	"CTLPROCESSOR    "
+#define	CTL_CDROM_PRODUCT	"CTLCDROM        "
 #define	CTL_UNKNOWN_PRODUCT	"CTLDEVICE       "
 
-struct ctl_fe_ioctl_startstop_info {
-	struct cv			sem;
-	struct ctl_hard_startstop_info	hs_info;
-};
-
-struct ctl_fe_ioctl_bbrread_info {
-	struct cv			sem;
-	struct ctl_bbrread_info		*bbr_info;
-	int				wakeup_done;
-	struct mtx			*lock;
-};
-
-typedef enum {
-	CTL_IOCTL_INPROG,
-	CTL_IOCTL_DATAMOVE,
-	CTL_IOCTL_DONE
-} ctl_fe_ioctl_state;
-
-struct ctl_fe_ioctl_params {
-	struct cv		sem;
-	struct mtx		ioctl_mtx;
-	ctl_fe_ioctl_state	state;
-};
-
-#define	CTL_POOL_ENTRIES_INTERNAL	200
-#define	CTL_POOL_ENTRIES_EMERGENCY	300
 #define CTL_POOL_ENTRIES_OTHER_SC   200
 
-typedef enum {
-	CTL_POOL_INTERNAL,
-	CTL_POOL_FETD,
-	CTL_POOL_EMERGENCY,
-	CTL_POOL_IOCTL,
-	CTL_POOL_4OTHERSC
-} ctl_pool_type;
-
-typedef enum {
-	CTL_POOL_FLAG_NONE	= 0x00,
-	CTL_POOL_FLAG_INVALID	= 0x01
-} ctl_pool_flags;
-
 struct ctl_io_pool {
-	ctl_pool_type			type;
-	ctl_pool_flags			flags;
+	char				name[64];
 	uint32_t			id;
 	struct ctl_softc		*ctl_softc;
-	uint32_t			refcount;
-	uint64_t			total_allocated;
-	uint64_t			total_freed;
-	int32_t				total_ctl_io;
-	int32_t				free_ctl_io;
-	STAILQ_HEAD(, ctl_io_hdr)	free_queue;
-	STAILQ_ENTRY(ctl_io_pool)	links;
+	struct uma_zone			*zone;
 };
 
 typedef enum {
-	CTL_IOCTL_FLAG_NONE	= 0x00,
-	CTL_IOCTL_FLAG_ENABLED	= 0x01
-} ctl_ioctl_flags;
-
-struct ctl_ioctl_info {
-	ctl_ioctl_flags		flags;
-	uint32_t		cur_tag_num;
-	struct ctl_frontend	fe;
-	char			port_name[24];
-};
-
-typedef enum {
 	CTL_SER_BLOCK,
+	CTL_SER_BLOCKOPT,
 	CTL_SER_EXTENT,
+	CTL_SER_EXTENTOPT,
+	CTL_SER_EXTENTSEQ,
 	CTL_SER_PASS,
 	CTL_SER_SKIP
 } ctl_serialize_action;
@@ -134,22 +86,26 @@
  * WARNING:  Keep the bottom nibble here free, we OR in the data direction
  * flags for each command.
  *
- * Note:  "OK_ON_ALL_LUNS" == we don't have to have a lun configured
+ * Note:  "OK_ON_NO_LUN"   == we don't have to have a lun configured
  *        "OK_ON_BOTH"     == we have to have a lun configured
+ *        "SA5"            == command has 5-bit service action at byte 1
  */
 typedef enum {
 	CTL_CMD_FLAG_NONE		= 0x0000,
 	CTL_CMD_FLAG_NO_SENSE		= 0x0010,
-	CTL_CMD_FLAG_OK_ON_ALL_LUNS	= 0x0020,
-	CTL_CMD_FLAG_ALLOW_ON_RESV	= 0x0040,
+	CTL_CMD_FLAG_ALLOW_ON_RESV	= 0x0020,
+	CTL_CMD_FLAG_ALLOW_ON_PR_RESV	= 0x0040,
+	CTL_CMD_FLAG_ALLOW_ON_PR_WRESV	= 0x0080,
 	CTL_CMD_FLAG_OK_ON_PROC		= 0x0100,
-	CTL_CMD_FLAG_OK_ON_SLUN		= 0x0200,
-	CTL_CMD_FLAG_OK_ON_BOTH		= 0x0300,
-	CTL_CMD_FLAG_OK_ON_STOPPED	= 0x0400,
-	CTL_CMD_FLAG_OK_ON_INOPERABLE	= 0x0800,
-	CTL_CMD_FLAG_OK_ON_OFFLINE	= 0x1000,
-	CTL_CMD_FLAG_OK_ON_SECONDARY	= 0x2000,
-	CTL_CMD_FLAG_ALLOW_ON_PR_RESV   = 0x4000
+	CTL_CMD_FLAG_OK_ON_DIRECT	= 0x0200,
+	CTL_CMD_FLAG_OK_ON_CDROM	= 0x0400,
+	CTL_CMD_FLAG_OK_ON_BOTH		= 0x0700,
+	CTL_CMD_FLAG_OK_ON_NO_LUN	= 0x0800,
+	CTL_CMD_FLAG_OK_ON_NO_MEDIA	= 0x1000,
+	CTL_CMD_FLAG_OK_ON_STANDBY	= 0x2000,
+	CTL_CMD_FLAG_OK_ON_UNAVAIL	= 0x4000,
+	CTL_CMD_FLAG_SA5		= 0x8000,
+	CTL_CMD_FLAG_RUN_HERE		= 0x10000
 } ctl_cmd_flags;
 
 typedef enum {
@@ -156,19 +112,17 @@
 	CTL_SERIDX_TUR	= 0,
 	CTL_SERIDX_READ,
 	CTL_SERIDX_WRITE,
+	CTL_SERIDX_UNMAP,
+	CTL_SERIDX_SYNC,
 	CTL_SERIDX_MD_SNS,
 	CTL_SERIDX_MD_SEL,
 	CTL_SERIDX_RQ_SNS,
 	CTL_SERIDX_INQ,
 	CTL_SERIDX_RD_CAP,
-	CTL_SERIDX_RESV,
-	CTL_SERIDX_REL,
+	CTL_SERIDX_RES,
 	CTL_SERIDX_LOG_SNS,
 	CTL_SERIDX_FORMAT,
 	CTL_SERIDX_START,
-	CTL_SERIDX_PRES_IN,
-	CTL_SERIDX_PRES_OUT,
-	CTL_SERIDX_MAIN_IN,
 	/* TBD: others to be filled in as needed */
 	CTL_SERIDX_COUNT, /* LAST, not a normal code, provides # codes */
 	CTL_SERIDX_INVLD = CTL_SERIDX_COUNT
@@ -181,6 +135,9 @@
 	ctl_seridx		seridx;
 	ctl_cmd_flags		flags;
 	ctl_lun_error_pattern	pattern;
+	uint8_t			length;		/* CDB length */
+	uint8_t			usage[15];	/* Mask of allowed CDB bits
+						 * after the opcode byte. */
 };
 
 typedef enum {
@@ -191,11 +148,13 @@
 	CTL_LUN_DISABLED	= 0x008,
 	CTL_LUN_MALLOCED	= 0x010,
 	CTL_LUN_STOPPED		= 0x020,
-	CTL_LUN_INOPERABLE	= 0x040,
-	CTL_LUN_OFFLINE		= 0x080,
+	CTL_LUN_NO_MEDIA	= 0x040,
+	CTL_LUN_EJECTED		= 0x080,
 	CTL_LUN_PR_RESERVED	= 0x100,
 	CTL_LUN_PRIMARY_SC	= 0x200,
-	CTL_LUN_SENSE_DESC	= 0x400
+	CTL_LUN_READONLY	= 0x800,
+	CTL_LUN_PEER_SC_PRIMARY	= 0x1000,
+	CTL_LUN_REMOVABLE	= 0x2000
 } ctl_lun_flags;
 
 typedef enum {
@@ -266,7 +225,7 @@
 #define	CTL_DEFAULT_SECTORS_PER_TRACK	256
 #define	CTL_DEFAULT_HEADS		128
 
-#define	CTL_DEFAULT_ROTATION_RATE	10000
+#define	CTL_DEFAULT_ROTATION_RATE	SVPD_NON_ROTATING
 
 struct ctl_page_index;
 
@@ -279,7 +238,10 @@
 
 typedef enum {
 	CTL_PAGE_FLAG_NONE	 = 0x00,
-	CTL_PAGE_FLAG_DISK_ONLY	 = 0x01
+	CTL_PAGE_FLAG_DIRECT	 = 0x01,
+	CTL_PAGE_FLAG_PROC	 = 0x02,
+	CTL_PAGE_FLAG_CDROM	 = 0x04,
+	CTL_PAGE_FLAG_ALL	 = 0x07
 } ctl_page_flags;
 
 struct ctl_page_index {
@@ -297,24 +259,42 @@
 #define	CTL_PAGE_DEFAULT	0x02
 #define	CTL_PAGE_SAVED		0x03
 
+#define CTL_NUM_LBP_PARAMS	4
+#define CTL_NUM_LBP_THRESH	4
+#define CTL_LBP_EXPONENT	11	/* 2048 sectors */
+#define CTL_LBP_PERIOD		10	/* 10 seconds */
+#define CTL_LBP_UA_PERIOD	300	/* 5 minutes */
+
+struct ctl_logical_block_provisioning_page {
+	struct scsi_logical_block_provisioning_page	main;
+	struct scsi_logical_block_provisioning_page_descr descr[CTL_NUM_LBP_THRESH];
+};
+
 static const struct ctl_page_index page_index_template[] = {
+	{SMS_RW_ERROR_RECOVERY_PAGE, 0, sizeof(struct scsi_da_rw_recovery_page), NULL,
+	 CTL_PAGE_FLAG_DIRECT | CTL_PAGE_FLAG_CDROM, NULL, ctl_default_page_handler},
 	{SMS_FORMAT_DEVICE_PAGE, 0, sizeof(struct scsi_format_page), NULL,
-	 CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
+	 CTL_PAGE_FLAG_DIRECT, NULL, NULL},
 	{SMS_RIGID_DISK_PAGE, 0, sizeof(struct scsi_rigid_disk_page), NULL,
-	 CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
+	 CTL_PAGE_FLAG_DIRECT, NULL, NULL},
+	{SMS_VERIFY_ERROR_RECOVERY_PAGE, 0, sizeof(struct scsi_da_verify_recovery_page), NULL,
+	 CTL_PAGE_FLAG_DIRECT | CTL_PAGE_FLAG_CDROM, NULL, ctl_default_page_handler},
 	{SMS_CACHING_PAGE, 0, sizeof(struct scsi_caching_page), NULL,
-	 CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
+	 CTL_PAGE_FLAG_DIRECT | CTL_PAGE_FLAG_CDROM,
+	 NULL, ctl_default_page_handler},
 	{SMS_CONTROL_MODE_PAGE, 0, sizeof(struct scsi_control_page), NULL,
-	 CTL_PAGE_FLAG_NONE, NULL, ctl_control_page_handler},
-   	{SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, PWR_SUBPAGE_CODE,
-	 sizeof(struct copan_power_subpage), NULL, CTL_PAGE_FLAG_NONE,
-	 ctl_power_sp_sense_handler, ctl_power_sp_handler},
-	{SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, APS_SUBPAGE_CODE,
-	 sizeof(struct copan_aps_subpage), NULL, CTL_PAGE_FLAG_NONE,
-	 NULL, ctl_aps_sp_handler},
-	{SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, DBGCNF_SUBPAGE_CODE,
-	 sizeof(struct copan_debugconf_subpage), NULL, CTL_PAGE_FLAG_NONE,
-	 ctl_debugconf_sp_sense_handler, ctl_debugconf_sp_select_handler},
+	 CTL_PAGE_FLAG_ALL, NULL, ctl_default_page_handler},
+	{SMS_CONTROL_MODE_PAGE | SMPH_SPF, 0x01,
+	 sizeof(struct scsi_control_ext_page), NULL,
+	 CTL_PAGE_FLAG_ALL, NULL, ctl_default_page_handler},
+	{SMS_INFO_EXCEPTIONS_PAGE, 0, sizeof(struct scsi_info_exceptions_page), NULL,
+	 CTL_PAGE_FLAG_ALL, NULL, ctl_ie_page_handler},
+	{SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 0x02,
+	 sizeof(struct ctl_logical_block_provisioning_page), NULL,
+	 CTL_PAGE_FLAG_DIRECT, NULL, ctl_default_page_handler},
+	{SMS_CDDVD_CAPS_PAGE, 0,
+	 sizeof(struct scsi_cddvd_capabilities_page), NULL,
+	 CTL_PAGE_FLAG_CDROM, NULL, NULL},
 };
 
 #define	CTL_NUM_MODE_PAGES sizeof(page_index_template)/   \
@@ -321,21 +301,59 @@
 			   sizeof(page_index_template[0])
 
 struct ctl_mode_pages {
+	struct scsi_da_rw_recovery_page	rw_er_page[4];
 	struct scsi_format_page		format_page[4];
 	struct scsi_rigid_disk_page	rigid_disk_page[4];
+	struct scsi_da_verify_recovery_page	verify_er_page[4];
 	struct scsi_caching_page	caching_page[4];
 	struct scsi_control_page	control_page[4];
-	struct copan_power_subpage	power_subpage[4];
-	struct copan_aps_subpage	aps_subpage[4];
-	struct copan_debugconf_subpage	debugconf_subpage[4];
+	struct scsi_control_ext_page	control_ext_page[4];
+	struct scsi_info_exceptions_page ie_page[4];
+	struct ctl_logical_block_provisioning_page lbp_page[4];
+	struct scsi_cddvd_capabilities_page cddvd_page[4];
 	struct ctl_page_index		index[CTL_NUM_MODE_PAGES];
 };
 
-struct ctl_pending_sense {
-	ctl_ua_type		ua_pending;
-	struct scsi_sense_data	sense;
+#define	MODE_RWER	mode_pages.rw_er_page[CTL_PAGE_CURRENT]
+#define	MODE_FMT	mode_pages.format_page[CTL_PAGE_CURRENT]
+#define	MODE_RDISK	mode_pages.rigid_disk_page[CTL_PAGE_CURRENT]
+#define	MODE_VER	mode_pages.verify_er_page[CTL_PAGE_CURRENT]
+#define	MODE_CACHING	mode_pages.caching_page[CTL_PAGE_CURRENT]
+#define	MODE_CTRL	mode_pages.control_page[CTL_PAGE_CURRENT]
+#define	MODE_CTRLE	mode_pages.control_ext_page[CTL_PAGE_CURRENT]
+#define	MODE_IE		mode_pages.ie_page[CTL_PAGE_CURRENT]
+#define	MODE_LBP	mode_pages.lbp_page[CTL_PAGE_CURRENT]
+#define	MODE_CDDVD	mode_pages.cddvd_page[CTL_PAGE_CURRENT]
+
+static const struct ctl_page_index log_page_index_template[] = {
+	{SLS_SUPPORTED_PAGES_PAGE, 0, 0, NULL,
+	 CTL_PAGE_FLAG_ALL, NULL, NULL},
+	{SLS_SUPPORTED_PAGES_PAGE, SLS_SUPPORTED_SUBPAGES_SUBPAGE, 0, NULL,
+	 CTL_PAGE_FLAG_ALL, NULL, NULL},
+	{SLS_LOGICAL_BLOCK_PROVISIONING, 0, 0, NULL,
+	 CTL_PAGE_FLAG_DIRECT, ctl_lbp_log_sense_handler, NULL},
+	{SLS_STAT_AND_PERF, 0, 0, NULL,
+	 CTL_PAGE_FLAG_ALL, ctl_sap_log_sense_handler, NULL},
+	{SLS_IE_PAGE, 0, 0, NULL,
+	 CTL_PAGE_FLAG_ALL, ctl_ie_log_sense_handler, NULL},
 };
 
+#define	CTL_NUM_LOG_PAGES sizeof(log_page_index_template)/   \
+			  sizeof(log_page_index_template[0])
+
+struct ctl_log_pages {
+	uint8_t				pages_page[CTL_NUM_LOG_PAGES];
+	uint8_t				subpages_page[CTL_NUM_LOG_PAGES * 2];
+	uint8_t				lbp_page[12*CTL_NUM_LBP_PARAMS];
+	struct stat_page {
+		struct scsi_log_stat_and_perf sap;
+		struct scsi_log_idle_time it;
+		struct scsi_log_time_interval ti;
+	} stat_page;
+	struct scsi_log_informational_exceptions	ie_page;
+	struct ctl_page_index		index[CTL_NUM_LOG_PAGES];
+};
+
 struct ctl_lun_delay_info {
 	ctl_delay_type		datamove_type;
 	uint32_t		datamove_delay;
@@ -343,34 +361,21 @@
 	uint32_t		done_delay;
 };
 
-typedef enum {
-	CTL_ERR_INJ_NONE	= 0x00,
-	CTL_ERR_INJ_ABORTED	= 0x01
-} ctl_err_inject_flags;
+#define CTL_PR_ALL_REGISTRANTS  0xFFFFFFFF
+#define CTL_PR_NO_RESERVATION   0xFFFFFFF0
 
-typedef enum {
-	CTL_PR_FLAG_NONE	= 0x00,
-	CTL_PR_FLAG_REGISTERED	= 0x01,
-	CTL_PR_FLAG_ACTIVE_RES	= 0x02
-} ctl_per_res_flags;
-
-struct ctl_per_res_info {
-	struct scsi_per_res_key res_key;
-	uint8_t  registered;
+struct ctl_devid {
+	int		len;
+	uint8_t		data[];
 };
 
-#define CTL_PR_ALL_REGISTRANTS  0xFFFF
-#define CTL_PR_NO_RESERVATION   0xFFF0
+#define NUM_HA_SHELVES		2
 
-/*
- * For report target port groups.
- */
-#define NUM_TARGET_PORT_GROUPS	2
-#define NUM_PORTS_PER_GRP	2
+#define CTL_WRITE_BUFFER_SIZE	262144
 
+struct tpc_list;
 struct ctl_lun {
 	struct mtx			lun_lock;
-	struct ctl_id			target;
 	uint64_t			lun;
 	ctl_lun_flags			flags;
 	STAILQ_HEAD(,ctl_error_desc)	error_list;
@@ -378,115 +383,167 @@
 	struct ctl_softc		*ctl_softc;
 	struct ctl_be_lun		*be_lun;
 	struct ctl_backend_driver	*backend;
-	int				io_count;
 	struct ctl_lun_delay_info	delay_info;
-	int				sync_interval;
-	int				sync_count;
+#ifdef CTL_TIME_IO
+	sbintime_t			idle_time;
+	sbintime_t			last_busy;
+#endif
 	TAILQ_HEAD(ctl_ooaq, ctl_io_hdr)  ooa_queue;
 	TAILQ_HEAD(ctl_blockq,ctl_io_hdr) blocked_queue;
 	STAILQ_ENTRY(ctl_lun)		links;
-	STAILQ_ENTRY(ctl_lun)		run_links;
-	struct ctl_nexus		rsv_nexus;
-	uint32_t			have_ca[CTL_MAX_INITIATORS >> 5];
-	struct ctl_pending_sense	pending_sense[CTL_MAX_INITIATORS];
+	struct scsi_sense_data		*pending_sense[CTL_MAX_PORTS];
+	ctl_ua_type			*pending_ua[CTL_MAX_PORTS];
+	uint8_t				ua_tpt_info[8];
+	time_t				lasttpt;
+	uint8_t				ie_asc;	/* Informational exceptions */
+	uint8_t				ie_ascq;
+	int				ie_reported;	/* Already reported */
+	uint32_t			ie_reportcnt;	/* REPORT COUNT */
+	struct callout			ie_callout;	/* INTERVAL TIMER */
 	struct ctl_mode_pages		mode_pages;
-	struct ctl_lun_io_stats		stats;
-	struct ctl_per_res_info		per_res[2*CTL_MAX_INITIATORS];
-	unsigned int			PRGeneration;
+	struct ctl_log_pages		log_pages;
+#ifdef CTL_LEGACY_STATS
+	struct ctl_lun_io_stats		legacy_stats;
+#endif /* CTL_LEGACY_STATS */
+	struct ctl_io_stats		stats;
+	uint32_t			res_idx;
+	uint32_t			pr_generation;
+	uint64_t			*pr_keys[CTL_MAX_PORTS];
 	int				pr_key_count;
-	uint16_t        		pr_res_idx;
-	uint8_t				res_type;
-	uint8_t				write_buffer[524288];
+	uint32_t			pr_res_idx;
+	uint8_t				pr_res_type;
+	int				prevent_count;
+	uint32_t			*prevent;
+	uint8_t				*write_buffer;
+	struct ctl_devid		*lun_devid;
+	TAILQ_HEAD(tpc_lists, tpc_list) tpc_lists;
 };
 
 typedef enum {
-	CTL_FLAG_TASK_PENDING	= 0x01,
-	CTL_FLAG_REAL_SYNC	= 0x02,
-	CTL_FLAG_MASTER_SHELF	= 0x04
+	CTL_FLAG_ACTIVE_SHELF	= 0x04
 } ctl_gen_flags;
 
-struct ctl_wwpn_iid {
-	int in_use;
-	uint64_t wwpn;
-	uint32_t iid;
-	int32_t port;
+#define CTL_MAX_THREADS		16
+
+struct ctl_thread {
+	struct mtx_padalign queue_lock;
+	struct ctl_softc	*ctl_softc;
+	struct thread		*thread;
+	STAILQ_HEAD(, ctl_io_hdr) incoming_queue;
+	STAILQ_HEAD(, ctl_io_hdr) rtr_queue;
+	STAILQ_HEAD(, ctl_io_hdr) done_queue;
+	STAILQ_HEAD(, ctl_io_hdr) isc_queue;
 };
 
+struct tpc_token;
 struct ctl_softc {
 	struct mtx ctl_lock;
 	struct cdev *dev;
-	int open_count;
-	struct ctl_id target;
-	int num_disks;
 	int num_luns;
 	ctl_gen_flags flags;
 	ctl_ha_mode ha_mode;
-	int inquiry_pq_no_lun;
+	int ha_id;
+	int is_single;
+	ctl_ha_link_state ha_link;
+	int port_min;
+	int port_max;
+	int port_cnt;
+	int init_min;
+	int init_max;
 	struct sysctl_ctx_list sysctl_ctx;
 	struct sysctl_oid *sysctl_tree;
-	struct ctl_ioctl_info ioctl_info;
-	struct ctl_lun lun;
-	struct ctl_io_pool *internal_pool;
-	struct ctl_io_pool *emergency_pool;
-	struct ctl_io_pool *othersc_pool;
-	struct proc *work_thread;
-	int targ_online;
-	uint32_t ctl_lun_mask[CTL_MAX_LUNS >> 5];
+	void *othersc_pool;
+	struct proc *ctl_proc;
+	uint32_t ctl_lun_mask[(CTL_MAX_LUNS + 31) / 32];
 	struct ctl_lun *ctl_luns[CTL_MAX_LUNS];
-	struct ctl_wwpn_iid wwpn_iid[CTL_MAX_PORTS][CTL_MAX_INIT_PER_PORT];
-	uint32_t ctl_port_mask;
-	uint64_t aps_locked_lun;
+	uint32_t ctl_port_mask[(CTL_MAX_PORTS + 31) / 32];
 	STAILQ_HEAD(, ctl_lun) lun_list;
 	STAILQ_HEAD(, ctl_be_lun) pending_lun_queue;
-	STAILQ_HEAD(, ctl_io_hdr) task_queue;
-	STAILQ_HEAD(, ctl_io_hdr) incoming_queue;
-	STAILQ_HEAD(, ctl_io_hdr) rtr_queue;
-	STAILQ_HEAD(, ctl_io_hdr) done_queue;
-	STAILQ_HEAD(, ctl_io_hdr) isc_queue;
 	uint32_t num_frontends;
 	STAILQ_HEAD(, ctl_frontend) fe_list;
-	struct ctl_frontend *ctl_ports[CTL_MAX_PORTS];
+	uint32_t num_ports;
+	STAILQ_HEAD(, ctl_port) port_list;
+	struct ctl_port *ctl_ports[CTL_MAX_PORTS];
 	uint32_t num_backends;
 	STAILQ_HEAD(, ctl_backend_driver) be_list;
-	uint32_t num_pools;
+	struct uma_zone *io_zone;
 	uint32_t cur_pool_id;
-	STAILQ_HEAD(, ctl_io_pool) io_pools;
-	time_t last_print_jiffies;
-	uint32_t skipped_prints;
+	int shutdown;
+	struct ctl_thread threads[CTL_MAX_THREADS];
+	struct thread *lun_thread;
+	struct thread *thresh_thread;
+	TAILQ_HEAD(tpc_tokens, tpc_token) tpc_tokens;
+	struct callout tpc_timeout;
+	struct mtx tpc_lock;
 };
 
 #ifdef _KERNEL
 
-extern struct ctl_cmd_entry ctl_cmd_table[];
+extern const struct ctl_cmd_entry ctl_cmd_table[256];
 
 uint32_t ctl_get_initindex(struct ctl_nexus *nexus);
-int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
-		    uint32_t total_ctl_io, struct ctl_io_pool **npool);
-int ctl_pool_acquire(struct ctl_io_pool *pool);
-int ctl_pool_invalidate(struct ctl_io_pool *pool);
-int ctl_pool_release(struct ctl_io_pool *pool);
-void ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool);
+int ctl_lun_map_init(struct ctl_port *port);
+int ctl_lun_map_deinit(struct ctl_port *port);
+int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun);
+int ctl_lun_map_unset(struct ctl_port *port, uint32_t plun);
+uint32_t ctl_lun_map_from_port(struct ctl_port *port, uint32_t plun);
+uint32_t ctl_lun_map_to_port(struct ctl_port *port, uint32_t glun);
+int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
+		    uint32_t total_ctl_io, void **npool);
+void ctl_pool_free(struct ctl_io_pool *pool);
 int ctl_scsi_release(struct ctl_scsiio *ctsio);
 int ctl_scsi_reserve(struct ctl_scsiio *ctsio);
 int ctl_start_stop(struct ctl_scsiio *ctsio);
+int ctl_prevent_allow(struct ctl_scsiio *ctsio);
 int ctl_sync_cache(struct ctl_scsiio *ctsio);
 int ctl_format(struct ctl_scsiio *ctsio);
+int ctl_read_buffer(struct ctl_scsiio *ctsio);
 int ctl_write_buffer(struct ctl_scsiio *ctsio);
+int ctl_write_same(struct ctl_scsiio *ctsio);
+int ctl_unmap(struct ctl_scsiio *ctsio);
 int ctl_mode_select(struct ctl_scsiio *ctsio);
 int ctl_mode_sense(struct ctl_scsiio *ctsio);
+int ctl_log_sense(struct ctl_scsiio *ctsio);
 int ctl_read_capacity(struct ctl_scsiio *ctsio);
-int ctl_service_action_in(struct ctl_scsiio *ctsio);
+int ctl_read_capacity_16(struct ctl_scsiio *ctsio);
+int ctl_read_defect(struct ctl_scsiio *ctsio);
+int ctl_read_toc(struct ctl_scsiio *ctsio);
 int ctl_read_write(struct ctl_scsiio *ctsio);
+int ctl_cnw(struct ctl_scsiio *ctsio);
 int ctl_report_luns(struct ctl_scsiio *ctsio);
 int ctl_request_sense(struct ctl_scsiio *ctsio);
 int ctl_tur(struct ctl_scsiio *ctsio);
+int ctl_verify(struct ctl_scsiio *ctsio);
 int ctl_inquiry(struct ctl_scsiio *ctsio);
+int ctl_get_config(struct ctl_scsiio *ctsio);
+int ctl_get_event_status(struct ctl_scsiio *ctsio);
+int ctl_mechanism_status(struct ctl_scsiio *ctsio);
 int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio);
 int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio);
-int ctl_maintenance_in(struct ctl_scsiio *ctsio);
-void ctl_done_lock(union ctl_io *io, int have_lock);
-int ctl_isc(struct ctl_scsiio *ctsio);
+int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio);
+int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio);
+int ctl_report_supported_tmf(struct ctl_scsiio *ctsio);
+int ctl_report_timestamp(struct ctl_scsiio *ctsio);
+int ctl_get_lba_status(struct ctl_scsiio *ctsio);
 
+void ctl_tpc_init(struct ctl_softc *softc);
+void ctl_tpc_shutdown(struct ctl_softc *softc);
+void ctl_tpc_lun_init(struct ctl_lun *lun);
+void ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx);
+void ctl_tpc_lun_shutdown(struct ctl_lun *lun);
+int ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len);
+int ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio);
+int ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio);
+int ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio);
+int ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio);
+int ctl_extended_copy_lid1(struct ctl_scsiio *ctsio);
+int ctl_extended_copy_lid4(struct ctl_scsiio *ctsio);
+int ctl_copy_operation_abort(struct ctl_scsiio *ctsio);
+int ctl_populate_token(struct ctl_scsiio *ctsio);
+int ctl_write_using_token(struct ctl_scsiio *ctsio);
+int ctl_receive_rod_token_information(struct ctl_scsiio *ctsio);
+int ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio);
+
 #endif	/* _KERNEL */
 
 #endif	/* _CTL_PRIVATE_H_ */

Modified: trunk/sys/cam/ctl/ctl_scsi_all.c
===================================================================
--- trunk/sys/cam/ctl/ctl_scsi_all.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_scsi_all.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Implementation of Utility functions for all SCSI device types.
  *
@@ -26,12 +27,12 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: ctl_scsi_all.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_scsi_all.c#2 $
  */
 
 #include <sys/param.h>
 
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_scsi_all.c 288731 2015-10-05 08:55:59Z mav $");
 
 #include <sys/types.h>
 #ifdef _KERNEL
@@ -112,32 +113,10 @@
 void
 ctl_scsi_path_string(union ctl_io *io, char *path_str, int len)
 {
-	if (io->io_hdr.nexus.targ_target.wwid[0] == 0) {
-		snprintf(path_str, len, "(%ju:%d:%ju:%d): ",
-			 (uintmax_t)io->io_hdr.nexus.initid.id,
-			 io->io_hdr.nexus.targ_port,
-			 (uintmax_t)io->io_hdr.nexus.targ_target.id,
-			 io->io_hdr.nexus.targ_lun);
-	} else {
-		/*
-		 * XXX KDM find a better way to display FC WWIDs.
-		 */
-#ifdef _KERNEL
-		snprintf(path_str, len, "(%ju:%d:%#jx,%#jx:%d): ",
-			 (uintmax_t)io->io_hdr.nexus.initid.id,
-			 io->io_hdr.nexus.targ_port,
-			 (intmax_t)io->io_hdr.nexus.targ_target.wwid[0],
-			 (intmax_t)io->io_hdr.nexus.targ_target.wwid[1],
-			 io->io_hdr.nexus.targ_lun);
-#else /* _KERNEL */
-		snprintf(path_str, len, "(%ju:%d:%#jx,%#jx:%d): ",
-			 (uintmax_t)io->io_hdr.nexus.initid.id,
-			 io->io_hdr.nexus.targ_port,
-			 (intmax_t)io->io_hdr.nexus.targ_target.wwid[0],
-			 (intmax_t)io->io_hdr.nexus.targ_target.wwid[1],
-			 io->io_hdr.nexus.targ_lun);
-#endif /* _KERNEL */
-	}
+
+	snprintf(path_str, len, "(%u:%u:%u/%u): ",
+	    io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port,
+	    io->io_hdr.nexus.targ_lun, io->io_hdr.nexus.targ_mapped_lun);
 }
 
 /*

Modified: trunk/sys/cam/ctl/ctl_scsi_all.h
===================================================================
--- trunk/sys/cam/ctl/ctl_scsi_all.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_scsi_all.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
  * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry.
@@ -24,10 +25,10 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: ctl_scsi_all.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_scsi_all.h#2 $
  */
 
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_scsi_all.h 229997 2012-01-12 00:34:33Z ken $");
 
 __BEGIN_DECLS
 const char *	ctl_scsi_status_string(struct ctl_scsiio *ctsio);

Modified: trunk/sys/cam/ctl/ctl_ser_table.c
===================================================================
--- trunk/sys/cam/ctl/ctl_ser_table.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_ser_table.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
  * All rights reserved.
@@ -27,8 +28,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_ser_table.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ser_table.c#1 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_ser_table.c 288802 2015-10-05 11:05:04Z mav $
  */
 
 /*
@@ -54,28 +55,29 @@
 /****************************************************************************/
 
 #define	sK	CTL_SER_SKIP		/* Skip */
-#define	pS	CTL_SER_PASS		/* pS */
+#define	pS	CTL_SER_PASS		/* Pass */
 #define	bK	CTL_SER_BLOCK		/* Blocked */
+#define	bO	CTL_SER_BLOCKOPT	/* Optional block */
 #define	xT	CTL_SER_EXTENT		/* Extent check */
+#define	xO	CTL_SER_EXTENTOPT	/* Optional extent check */
+#define	xS	CTL_SER_EXTENTSEQ	/* Sequential extent check */
 
-static ctl_serialize_action
+const static ctl_serialize_action
 ctl_serialize_table[CTL_SERIDX_COUNT][CTL_SERIDX_COUNT] = {
-/**>IDX_ :: 2nd:TUR RD  WRT  MDSN MDSL RQSN INQ  RDCP RES  REL LSNS FMT STR PRIN PROT MAININ*/
-/*TUR     */{   pS, pS, pS,  bK,  bK,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*READ    */{   pS, pS, xT,  bK,  bK,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*WRITE   */{   pS, xT, xT,  bK,  bK,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*MD_SNS  */{   bK, bK, bK,  pS,  bK,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*MD_SEL  */{   bK, bK, bK,  bK,  bK,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*RQ_SNS  */{   pS, pS, pS,  pS,  pS,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*INQ     */{   pS, pS, pS,  pS,  pS,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*RD_CAP  */{   pS, pS, pS,  pS,  pS,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*RESV    */{   bK, bK, bK,  bK,  bK,  bK,  bK,  bK,  bK,  bK, bK,  bK, bK, bK,  bK,  bK},
-/*REL     */{   bK, bK, bK,  bK,  bK,  bK,  bK,  bK,  bK,  bK, bK,  bK, bK, bK,  bK,  bK},
-/*LOG_SNS */{   pS, pS, pS,  pS,  bK,  bK,  pS,  pS,  bK,  bK, pS,  bK, bK, bK,  bK,  bK},
-/*FORMAT  */{   pS, bK, bK,  bK,  bK,  pS,  pS,  bK,  bK,  bK, bK,  bK, bK, bK,  bK,  bK},
-/*START   */{   bK, bK, bK,  bK,  bK,  bK,  pS,  bK,  bK,  bK, bK,  bK, bK, bK,  bK,  bK},
-/*PRES_IN */{   bK, bK, bK,  bK,  bK,  bK,  bK,  bK,  bK,  bK, bK,  bK, bK, bK,  bK,  bK},
-/*PRES_OUT*/{   bK, bK, bK,  bK,  bK,  bK,  bK,  bK,  bK,  bK, bK,  bK, bK, bK,  bK,  bK},
-/*MAIN_IN */{   bK, bK, bK,  bK,  bK,  bK,  pS,  bK,  bK,  bK, bK,  bK, bK, bK,  bK,  pS}
+/**>IDX_ :: 2nd:TUR RD  WRT UNM SYN MDSN MDSL RQSN INQ RDCP RES LSNS FMT STR*/
+/*TUR     */{   pS, pS, pS, pS, pS, bK,  bK,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*READ    */{   pS, xS, xT, bO, pS, bK,  bK,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*WRITE   */{   pS, xT, xT, bO, bO, bK,  bK,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*UNMAP   */{   pS, xO, xO, pS, pS, bK,  bK,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*SYNC    */{   pS, pS, pS, pS, pS, bK,  bK,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*MD_SNS  */{   bK, bK, bK, bK, bK, pS,  bK,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*MD_SEL  */{   bK, bK, bK, bK, bK, bK,  bK,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*RQ_SNS  */{   pS, pS, pS, pS, pS, pS,  pS,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*INQ     */{   pS, pS, pS, pS, pS, pS,  pS,  bK,  pS, pS,  pS, pS,  bK, bK},
+/*RD_CAP  */{   pS, pS, pS, pS, pS, pS,  pS,  bK,  pS, pS,  pS, pS,  bK, pS},
+/*RES     */{   bK, bK, bK, bK, bK, bK,  bK,  bK,  pS, bK,  bK, bK,  bK, bK},
+/*LOG_SNS */{   pS, pS, pS, pS, pS, pS,  bK,  bK,  pS, pS,  bK, pS,  bK, bK},
+/*FORMAT  */{   pS, bK, bK, bK, bK, bK,  bK,  pS,  pS, bK,  bK, bK,  bK, bK},
+/*START   */{   bK, bK, bK, bK, bK, bK,  bK,  bK,  pS, bK,  bK, bK,  bK, bK},
 };
 

Added: trunk/sys/cam/ctl/ctl_tpc.c
===================================================================
--- trunk/sys/cam/ctl/ctl_tpc.c	                        (rev 0)
+++ trunk/sys/cam/ctl/ctl_tpc.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,2472 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Alexander Motin <mav at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_tpc.c 314767 2017-03-06 06:47:05Z mav $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <machine/atomic.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_tpc.h>
+#include <cam/ctl/ctl_error.h>
+
+#define	TPC_MAX_CSCDS	64
+#define	TPC_MAX_SEGS	64
+#define	TPC_MAX_SEG	0
+#define	TPC_MAX_LIST	8192
+#define	TPC_MAX_INLINE	0
+#define	TPC_MAX_LISTS	255
+#define	TPC_MAX_IO_SIZE	(1024 * 1024)
+#define	TPC_MAX_IOCHUNK_SIZE	(TPC_MAX_IO_SIZE * 16)
+#define	TPC_MIN_TOKEN_TIMEOUT	1
+#define	TPC_DFL_TOKEN_TIMEOUT	60
+#define	TPC_MAX_TOKEN_TIMEOUT	600
+
+MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
+
+typedef enum {
+	TPC_ERR_RETRY		= 0x000,
+	TPC_ERR_FAIL		= 0x001,
+	TPC_ERR_MASK		= 0x0ff,
+	TPC_ERR_NO_DECREMENT	= 0x100
+} tpc_error_action;
+
+struct tpc_list;
+TAILQ_HEAD(runl, tpc_io);
+struct tpc_io {
+	union ctl_io		*io;
+	uint8_t			 target;
+	uint32_t		 cscd;
+	uint64_t		 lun;
+	uint8_t			*buf;
+	struct tpc_list		*list;
+	struct runl		 run;
+	TAILQ_ENTRY(tpc_io)	 rlinks;
+	TAILQ_ENTRY(tpc_io)	 links;
+};
+
+struct tpc_token {
+	uint8_t			 token[512];
+	uint64_t		 lun;
+	uint32_t		 blocksize;
+	uint8_t			*params;
+	struct scsi_range_desc	*range;
+	int			 nrange;
+	int			 active;
+	time_t			 last_active;
+	uint32_t		 timeout;
+	TAILQ_ENTRY(tpc_token)	 links;
+};
+
+struct tpc_list {
+	uint8_t			 service_action;
+	int			 init_port;
+	uint32_t		 init_idx;
+	uint32_t		 list_id;
+	uint8_t			 flags;
+	uint8_t			*params;
+	struct scsi_ec_cscd	*cscd;
+	struct scsi_ec_segment	*seg[TPC_MAX_SEGS];
+	uint8_t			*inl;
+	int			 ncscd;
+	int			 nseg;
+	int			 leninl;
+	struct tpc_token	*token;
+	struct scsi_range_desc	*range;
+	int			 nrange;
+	off_t			 offset_into_rod;
+
+	int			 curseg;
+	off_t			 cursectors;
+	off_t			 curbytes;
+	int			 curops;
+	int			 stage;
+	off_t			 segsectors;
+	off_t			 segbytes;
+	int			 tbdio;
+	int			 error;
+	int			 abort;
+	int			 completed;
+	time_t			 last_active;
+	TAILQ_HEAD(, tpc_io)	 allio;
+	struct scsi_sense_data	 fwd_sense_data;
+	uint8_t			 fwd_sense_len;
+	uint8_t			 fwd_scsi_status;
+	uint8_t			 fwd_target;
+	uint16_t		 fwd_cscd;
+	struct scsi_sense_data	 sense_data;
+	uint8_t			 sense_len;
+	uint8_t			 scsi_status;
+	struct ctl_scsiio	*ctsio;
+	struct ctl_lun		*lun;
+	int			 res_token_valid;
+	uint8_t			 res_token[512];
+	TAILQ_ENTRY(tpc_list)	 links;
+};
+
+static void
+tpc_timeout(void *arg)
+{
+	struct ctl_softc *softc = arg;
+	struct ctl_lun *lun;
+	struct tpc_token *token, *ttoken;
+	struct tpc_list *list, *tlist;
+
+	/* Free completed lists with expired timeout. */
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		mtx_lock(&lun->lun_lock);
+		TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
+			if (!list->completed || time_uptime < list->last_active +
+			    TPC_DFL_TOKEN_TIMEOUT)
+				continue;
+			TAILQ_REMOVE(&lun->tpc_lists, list, links);
+			free(list, M_CTL);
+		}
+		mtx_unlock(&lun->lun_lock);
+	}
+
+	/* Free inactive ROD tokens with expired timeout. */
+	mtx_lock(&softc->tpc_lock);
+	TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
+		if (token->active ||
+		    time_uptime < token->last_active + token->timeout + 1)
+			continue;
+		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
+		free(token->params, M_CTL);
+		free(token, M_CTL);
+	}
+	mtx_unlock(&softc->tpc_lock);
+	callout_schedule(&softc->tpc_timeout, hz);
+}
+
+void
+ctl_tpc_init(struct ctl_softc *softc)
+{
+
+	mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
+	TAILQ_INIT(&softc->tpc_tokens);
+	callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
+	callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
+}
+
+void
+ctl_tpc_shutdown(struct ctl_softc *softc)
+{
+	struct tpc_token *token;
+
+	callout_drain(&softc->tpc_timeout);
+
+	/* Free ROD tokens. */
+	mtx_lock(&softc->tpc_lock);
+	while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
+		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
+		free(token->params, M_CTL);
+		free(token, M_CTL);
+	}
+	mtx_unlock(&softc->tpc_lock);
+	mtx_destroy(&softc->tpc_lock);
+}
+
+void
+ctl_tpc_lun_init(struct ctl_lun *lun)
+{
+
+	TAILQ_INIT(&lun->tpc_lists);
+}
+
+void
+ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx)
+{
+	struct tpc_list *list, *tlist;
+
+	TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
+		if (initidx != -1 && list->init_idx != initidx)
+			continue;
+		if (!list->completed)
+			continue;
+		TAILQ_REMOVE(&lun->tpc_lists, list, links);
+		free(list, M_CTL);
+	}
+}
+
+void
+ctl_tpc_lun_shutdown(struct ctl_lun *lun)
+{
+	struct ctl_softc *softc = lun->ctl_softc;
+	struct tpc_list *list;
+	struct tpc_token *token, *ttoken;
+
+	/* Free lists for this LUN. */
+	while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
+		TAILQ_REMOVE(&lun->tpc_lists, list, links);
+		KASSERT(list->completed,
+		    ("Not completed TPC (%p) on shutdown", list));
+		free(list, M_CTL);
+	}
+
+	/* Free ROD tokens for this LUN. */
+	mtx_lock(&softc->tpc_lock);
+	TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
+		if (token->lun != lun->lun || token->active)
+			continue;
+		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
+		free(token->params, M_CTL);
+		free(token, M_CTL);
+	}
+	mtx_unlock(&softc->tpc_lock);
+}
+
+int
+ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_vpd_tpc *tpc_ptr;
+	struct scsi_vpd_tpc_descriptor *d_ptr;
+	struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
+	struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
+	struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
+	struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
+	struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
+	struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
+	struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
+	struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
+	struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
+	struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
+	struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
+	int data_len;
+
+	data_len = sizeof(struct scsi_vpd_tpc) +
+	    sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
+	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
+	     2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
+	    sizeof(struct scsi_vpd_tpc_descriptor_pd) +
+	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
+	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
+	    sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
+	     sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
+	    sizeof(struct scsi_vpd_tpc_descriptor_srt) +
+	     2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
+	    sizeof(struct scsi_vpd_tpc_descriptor_gco);
+
+	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+	tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_data_len = min(data_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	/*
+	 * The control device is always connected.  The disk device, on the
+	 * other hand, may not be online all the time.
+	 */
+	if (lun != NULL)
+		tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+				     lun->be_lun->lun_type;
+	else
+		tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+	tpc_ptr->page_code = SVPD_SCSI_TPC;
+	scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
+
+	/* Block Device ROD Limits */
+	d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
+	bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
+	scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
+	scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
+	scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
+	scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
+	    bdrl_ptr->maximum_inactivity_timeout);
+	scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
+	    bdrl_ptr->default_inactivity_timeout);
+	scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
+	scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
+
+	/* Supported commands */
+	d_ptr = (struct scsi_vpd_tpc_descriptor *)
+	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+	sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
+	scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
+	sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
+	scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
+	scd_ptr = &sc_ptr->descr[0];
+	scd_ptr->opcode = EXTENDED_COPY;
+	scd_ptr->sa_length = 5;
+	scd_ptr->supported_service_actions[0] = EC_EC_LID1;
+	scd_ptr->supported_service_actions[1] = EC_EC_LID4;
+	scd_ptr->supported_service_actions[2] = EC_PT;
+	scd_ptr->supported_service_actions[3] = EC_WUT;
+	scd_ptr->supported_service_actions[4] = EC_COA;
+	scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
+	    &scd_ptr->supported_service_actions[scd_ptr->sa_length];
+	scd_ptr->opcode = RECEIVE_COPY_STATUS;
+	scd_ptr->sa_length = 6;
+	scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
+	scd_ptr->supported_service_actions[1] = RCS_RCFD;
+	scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
+	scd_ptr->supported_service_actions[3] = RCS_RCOP;
+	scd_ptr->supported_service_actions[4] = RCS_RRTI;
+	scd_ptr->supported_service_actions[5] = RCS_RART;
+
+	/* Parameter data. */
+	d_ptr = (struct scsi_vpd_tpc_descriptor *)
+	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+	pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
+	scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
+	scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
+	scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
+	scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
+	scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
+	scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
+
+	/* Supported Descriptors */
+	d_ptr = (struct scsi_vpd_tpc_descriptor *)
+	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+	sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
+	scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
+	scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
+	sd_ptr->list_length = 4;
+	sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
+	sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
+	sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
+	sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
+
+	/* Supported CSCD Descriptor IDs */
+	d_ptr = (struct scsi_vpd_tpc_descriptor *)
+	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+	sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
+	scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
+	scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
+	scsi_ulto2b(2, sdid_ptr->list_length);
+	scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
+
+	/* ROD Token Features */
+	d_ptr = (struct scsi_vpd_tpc_descriptor *)
+	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+	rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
+	scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
+	scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
+	rtf_ptr->remote_tokens = 0;
+	scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
+	scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
+	scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
+	    rtf_ptr->maximum_token_inactivity_timeout);
+	scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
+	rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
+	    &rtf_ptr->type_specific_features;
+	rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
+	scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
+	scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
+	scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
+	scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
+	scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment);
+	scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
+	    rtfb_ptr->optimal_bytes_from_token_per_segment);
+
+	/* Supported ROD Tokens */
+	d_ptr = (struct scsi_vpd_tpc_descriptor *)
+	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+	srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
+	scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
+	scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
+	scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
+	srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
+	    &srt_ptr->rod_type_descriptors;
+	scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
+	srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
+	scsi_ulto2b(0, srtd_ptr->preference_indicator);
+	srtd_ptr++;
+	scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
+	srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
+	scsi_ulto2b(0, srtd_ptr->preference_indicator);
+
+	/* General Copy Operations */
+	d_ptr = (struct scsi_vpd_tpc_descriptor *)
+	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+	gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
+	scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
+	scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
+	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
+	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
+	scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
+	gco_ptr->data_segment_granularity = 0;
+	gco_ptr->inline_data_granularity = 0;
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
+{
+	struct scsi_receive_copy_operating_parameters *cdb;
+	struct scsi_receive_copy_operating_parameters_data *data;
+	int retval;
+	int alloc_len, total_len;
+
+	CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
+
+	cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
+
+	retval = CTL_RETVAL_COMPLETE;
+
+	total_len = sizeof(*data) + 4;
+	alloc_len = scsi_4btoul(cdb->length);
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
+	scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
+	data->snlid = RCOP_SNLID;
+	scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
+	scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
+	scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
+	scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
+	scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
+	scsi_ulto4b(0, data->held_data_limit);
+	scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
+	scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
+	data->maximum_concurrent_copies = TPC_MAX_LISTS;
+	data->data_segment_granularity = 0;
+	data->inline_data_granularity = 0;
+	data->held_data_granularity = 0;
+	data->implemented_descriptor_list_length = 4;
+	data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
+	data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
+	data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
+	data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (retval);
+}
+
+static struct tpc_list *
+tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
+{
+	struct tpc_list *list;
+
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
+		if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
+		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
+		    list->init_idx == init_idx)
+			break;
+	}
+	return (list);
+}
+
+int
+ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_receive_copy_status_lid1 *cdb;
+	struct scsi_receive_copy_status_lid1_data *data;
+	struct tpc_list *list;
+	struct tpc_list list_copy;
+	int retval;
+	int alloc_len, total_len;
+	uint32_t list_id;
+
+	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
+
+	cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
+	retval = CTL_RETVAL_COMPLETE;
+
+	list_id = cdb->list_identifier;
+	mtx_lock(&lun->lun_lock);
+	list = tpc_find_list(lun, list_id,
+	    ctl_get_initindex(&ctsio->io_hdr.nexus));
+	if (list == NULL) {
+		mtx_unlock(&lun->lun_lock);
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		ctl_done((union ctl_io *)ctsio);
+		return (retval);
+	}
+	list_copy = *list;
+	if (list->completed) {
+		TAILQ_REMOVE(&lun->tpc_lists, list, links);
+		free(list, M_CTL);
+	}
+	mtx_unlock(&lun->lun_lock);
+
+	total_len = sizeof(*data);
+	alloc_len = scsi_4btoul(cdb->length);
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
+	scsi_ulto4b(sizeof(*data) - 4, data->available_data);
+	if (list_copy.completed) {
+		if (list_copy.error || list_copy.abort)
+			data->copy_command_status = RCS_CCS_ERROR;
+		else
+			data->copy_command_status = RCS_CCS_COMPLETED;
+	} else
+		data->copy_command_status = RCS_CCS_INPROG;
+	scsi_ulto2b(list_copy.curseg, data->segments_processed);
+	if (list_copy.curbytes <= UINT32_MAX) {
+		data->transfer_count_units = RCS_TC_BYTES;
+		scsi_ulto4b(list_copy.curbytes, data->transfer_count);
+	} else {
+		data->transfer_count_units = RCS_TC_MBYTES;
+		scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
+	}
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (retval);
+}
+
+int
+ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_receive_copy_failure_details *cdb;
+	struct scsi_receive_copy_failure_details_data *data;
+	struct tpc_list *list;
+	struct tpc_list list_copy;
+	int retval;
+	int alloc_len, total_len;
+	uint32_t list_id;
+
+	CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
+
+	cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
+	retval = CTL_RETVAL_COMPLETE;
+
+	list_id = cdb->list_identifier;
+	mtx_lock(&lun->lun_lock);
+	list = tpc_find_list(lun, list_id,
+	    ctl_get_initindex(&ctsio->io_hdr.nexus));
+	if (list == NULL || !list->completed) {
+		mtx_unlock(&lun->lun_lock);
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		ctl_done((union ctl_io *)ctsio);
+		return (retval);
+	}
+	list_copy = *list;
+	TAILQ_REMOVE(&lun->tpc_lists, list, links);
+	free(list, M_CTL);
+	mtx_unlock(&lun->lun_lock);
+
+	total_len = sizeof(*data) + list_copy.sense_len;
+	alloc_len = scsi_4btoul(cdb->length);
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
+	if (list_copy.completed && (list_copy.error || list_copy.abort)) {
+		scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
+		    data->available_data);
+		data->copy_command_status = RCS_CCS_ERROR;
+	} else
+		scsi_ulto4b(0, data->available_data);
+	scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
+	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (retval);
+}
+
+int
+ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_receive_copy_status_lid4 *cdb;
+	struct scsi_receive_copy_status_lid4_data *data;
+	struct tpc_list *list;
+	struct tpc_list list_copy;
+	int retval;
+	int alloc_len, total_len;
+	uint32_t list_id;
+
+	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
+
+	cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
+	retval = CTL_RETVAL_COMPLETE;
+
+	list_id = scsi_4btoul(cdb->list_identifier);
+	mtx_lock(&lun->lun_lock);
+	list = tpc_find_list(lun, list_id,
+	    ctl_get_initindex(&ctsio->io_hdr.nexus));
+	if (list == NULL) {
+		mtx_unlock(&lun->lun_lock);
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		ctl_done((union ctl_io *)ctsio);
+		return (retval);
+	}
+	list_copy = *list;
+	if (list->completed) {
+		TAILQ_REMOVE(&lun->tpc_lists, list, links);
+		free(list, M_CTL);
+	}
+	mtx_unlock(&lun->lun_lock);
+
+	total_len = sizeof(*data) + list_copy.sense_len;
+	alloc_len = scsi_4btoul(cdb->length);
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
+	scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
+	    data->available_data);
+	data->response_to_service_action = list_copy.service_action;
+	if (list_copy.completed) {
+		if (list_copy.error)
+			data->copy_command_status = RCS_CCS_ERROR;
+		else if (list_copy.abort)
+			data->copy_command_status = RCS_CCS_ABORTED;
+		else
+			data->copy_command_status = RCS_CCS_COMPLETED;
+	} else
+		data->copy_command_status = RCS_CCS_INPROG_FG;
+	scsi_ulto2b(list_copy.curops, data->operation_counter);
+	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
+	data->transfer_count_units = RCS_TC_BYTES;
+	scsi_u64to8b(list_copy.curbytes, data->transfer_count);
+	scsi_ulto2b(list_copy.curseg, data->segments_processed);
+	data->length_of_the_sense_data_field = list_copy.sense_len;
+	data->sense_data_length = list_copy.sense_len;
+	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
+
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (retval);
+}
+
+int
+ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_copy_operation_abort *cdb;
+	struct tpc_list *list;
+	int retval;
+	uint32_t list_id;
+
+	CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
+
+	cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
+	retval = CTL_RETVAL_COMPLETE;
+
+	list_id = scsi_4btoul(cdb->list_identifier);
+	mtx_lock(&lun->lun_lock);
+	list = tpc_find_list(lun, list_id,
+	    ctl_get_initindex(&ctsio->io_hdr.nexus));
+	if (list == NULL) {
+		mtx_unlock(&lun->lun_lock);
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		ctl_done((union ctl_io *)ctsio);
+		return (retval);
+	}
+	list->abort = 1;
+	mtx_unlock(&lun->lun_lock);
+
+	ctl_set_success(ctsio);
+	ctl_done((union ctl_io *)ctsio);
+	return (retval);
+}
+
+static uint64_t
+tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
+    uint32_t *pb, uint32_t *pbo)
+{
+
+	if (idx == 0xffff) {
+		if (ss && list->lun->be_lun)
+			*ss = list->lun->be_lun->blocksize;
+		if (pb && list->lun->be_lun)
+			*pb = list->lun->be_lun->blocksize <<
+			    list->lun->be_lun->pblockexp;
+		if (pbo && list->lun->be_lun)
+			*pbo = list->lun->be_lun->blocksize *
+			    list->lun->be_lun->pblockoff;
+		return (list->lun->lun);
+	}
+	if (idx >= list->ncscd)
+		return (UINT64_MAX);
+	return (tpcl_resolve(list->lun->ctl_softc,
+	    list->init_port, &list->cscd[idx], ss, pb, pbo));
+}
+
+static void
+tpc_set_io_error_sense(struct tpc_list *list)
+{
+	int flen;
+	uint8_t csi[4];
+	uint8_t sks[3];
+	uint8_t fbuf[4 + 64];
+
+	scsi_ulto4b(list->curseg, csi);
+	if (list->fwd_cscd <= 0x07ff) {
+		sks[0] = SSD_SKS_SEGMENT_VALID;
+		scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] -
+		    list->params, &sks[1]);
+	} else
+		sks[0] = 0;
+	if (list->fwd_scsi_status) {
+		fbuf[0] = 0x0c;
+		fbuf[2] = list->fwd_target;
+		flen = list->fwd_sense_len;
+		if (flen > 64) {
+			flen = 64;
+			fbuf[2] |= SSD_FORWARDED_FSDT;
+		}
+		fbuf[1] = 2 + flen;
+		fbuf[3] = list->fwd_scsi_status;
+		bcopy(&list->fwd_sense_data, &fbuf[4], flen);
+		flen += 4;
+	} else
+		flen = 0;
+	ctl_set_sense(list->ctsio, /*current_error*/ 1,
+	    /*sense_key*/ SSD_KEY_COPY_ABORTED,
+	    /*asc*/ 0x0d, /*ascq*/ 0x01,
+	    SSD_ELEM_COMMAND, sizeof(csi), csi,
+	    sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks,
+	    flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf,
+	    SSD_ELEM_NONE);
+}
+
+static int
+tpc_process_b2b(struct tpc_list *list)
+{
+	struct scsi_ec_segment_b2b *seg;
+	struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
+	struct tpc_io *tior, *tiow;
+	struct runl run;
+	uint64_t sl, dl;
+	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
+	int numlba;
+	uint32_t srcblock, dstblock, pb, pbo, adj;
+	uint16_t scscd, dcscd;
+	uint8_t csi[4];
+
+	scsi_ulto4b(list->curseg, csi);
+	if (list->stage == 1) {
+		while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
+			TAILQ_REMOVE(&list->allio, tior, links);
+			ctl_free_io(tior->io);
+			free(tior->buf, M_CTL);
+			free(tior, M_CTL);
+		}
+		if (list->abort) {
+			ctl_set_task_aborted(list->ctsio);
+			return (CTL_RETVAL_ERROR);
+		} else if (list->error) {
+			tpc_set_io_error_sense(list);
+			return (CTL_RETVAL_ERROR);
+		}
+		list->cursectors += list->segsectors;
+		list->curbytes += list->segbytes;
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	TAILQ_INIT(&list->allio);
+	seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
+	scscd = scsi_2btoul(seg->src_cscd);
+	dcscd = scsi_2btoul(seg->dst_cscd);
+	sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL);
+	dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo);
+	if (sl == UINT64_MAX || dl == UINT64_MAX) {
+		ctl_set_sense(list->ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
+		    /*asc*/ 0x08, /*ascq*/ 0x04,
+		    SSD_ELEM_COMMAND, sizeof(csi), csi,
+		    SSD_ELEM_NONE);
+		return (CTL_RETVAL_ERROR);
+	}
+	if (pbo > 0)
+		pbo = pb - pbo;
+	sdstp = &list->cscd[scscd].dtsp;
+	if (scsi_3btoul(sdstp->block_length) != 0)
+		srcblock = scsi_3btoul(sdstp->block_length);
+	ddstp = &list->cscd[dcscd].dtsp;
+	if (scsi_3btoul(ddstp->block_length) != 0)
+		dstblock = scsi_3btoul(ddstp->block_length);
+	numlba = scsi_2btoul(seg->number_of_blocks);
+	if (seg->flags & EC_SEG_DC)
+		numbytes = (off_t)numlba * dstblock;
+	else
+		numbytes = (off_t)numlba * srcblock;
+	srclba = scsi_8btou64(seg->src_lba);
+	dstlba = scsi_8btou64(seg->dst_lba);
+
+//	printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
+//	    (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
+//	    dl, scsi_8btou64(seg->dst_lba));
+
+	if (numbytes == 0)
+		return (CTL_RETVAL_COMPLETE);
+
+	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
+		ctl_set_sense(list->ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
+		    /*asc*/ 0x26, /*ascq*/ 0x0A,
+		    SSD_ELEM_COMMAND, sizeof(csi), csi,
+		    SSD_ELEM_NONE);
+		return (CTL_RETVAL_ERROR);
+	}
+
+	list->segbytes = numbytes;
+	list->segsectors = numbytes / dstblock;
+	donebytes = 0;
+	TAILQ_INIT(&run);
+	list->tbdio = 0;
+	while (donebytes < numbytes) {
+		roundbytes = numbytes - donebytes;
+		if (roundbytes > TPC_MAX_IO_SIZE) {
+			roundbytes = TPC_MAX_IO_SIZE;
+			roundbytes -= roundbytes % dstblock;
+			if (pb > dstblock) {
+				adj = (dstlba * dstblock + roundbytes - pbo) % pb;
+				if (roundbytes > adj)
+					roundbytes -= adj;
+			}
+		}
+
+		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
+		TAILQ_INIT(&tior->run);
+		tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
+		tior->list = list;
+		TAILQ_INSERT_TAIL(&list->allio, tior, links);
+		tior->io = tpcl_alloc_io();
+		ctl_scsi_read_write(tior->io,
+				    /*data_ptr*/ tior->buf,
+				    /*data_len*/ roundbytes,
+				    /*read_op*/ 1,
+				    /*byte2*/ 0,
+				    /*minimum_cdb_size*/ 0,
+				    /*lba*/ srclba,
+				    /*num_blocks*/ roundbytes / srcblock,
+				    /*tag_type*/ CTL_TAG_SIMPLE,
+				    /*control*/ 0);
+		tior->io->io_hdr.retries = 3;
+		tior->target = SSD_FORWARDED_SDS_EXSRC;
+		tior->cscd = scscd;
+		tior->lun = sl;
+		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
+
+		tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
+		TAILQ_INIT(&tiow->run);
+		tiow->list = list;
+		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
+		tiow->io = tpcl_alloc_io();
+		ctl_scsi_read_write(tiow->io,
+				    /*data_ptr*/ tior->buf,
+				    /*data_len*/ roundbytes,
+				    /*read_op*/ 0,
+				    /*byte2*/ 0,
+				    /*minimum_cdb_size*/ 0,
+				    /*lba*/ dstlba,
+				    /*num_blocks*/ roundbytes / dstblock,
+				    /*tag_type*/ CTL_TAG_SIMPLE,
+				    /*control*/ 0);
+		tiow->io->io_hdr.retries = 3;
+		tiow->target = SSD_FORWARDED_SDS_EXDST;
+		tiow->cscd = dcscd;
+		tiow->lun = dl;
+		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
+
+		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
+		TAILQ_INSERT_TAIL(&run, tior, rlinks);
+		list->tbdio++;
+		donebytes += roundbytes;
+		srclba += roundbytes / srcblock;
+		dstlba += roundbytes / dstblock;
+	}
+
+	while ((tior = TAILQ_FIRST(&run)) != NULL) {
+		TAILQ_REMOVE(&run, tior, rlinks);
+		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
+			panic("tpcl_queue() error");
+	}
+
+	list->stage++;
+	return (CTL_RETVAL_QUEUED);
+}
+
+static int
+tpc_process_verify(struct tpc_list *list)
+{
+	struct scsi_ec_segment_verify *seg;
+	struct tpc_io *tio;
+	uint64_t sl;
+	uint16_t cscd;
+	uint8_t csi[4];
+
+	scsi_ulto4b(list->curseg, csi);
+	if (list->stage == 1) {
+		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
+			TAILQ_REMOVE(&list->allio, tio, links);
+			ctl_free_io(tio->io);
+			free(tio, M_CTL);
+		}
+		if (list->abort) {
+			ctl_set_task_aborted(list->ctsio);
+			return (CTL_RETVAL_ERROR);
+		} else if (list->error) {
+			tpc_set_io_error_sense(list);
+			return (CTL_RETVAL_ERROR);
+		} else
+			return (CTL_RETVAL_COMPLETE);
+	}
+
+	TAILQ_INIT(&list->allio);
+	seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
+	cscd = scsi_2btoul(seg->src_cscd);
+	sl = tpc_resolve(list, cscd, NULL, NULL, NULL);
+	if (sl == UINT64_MAX) {
+		ctl_set_sense(list->ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
+		    /*asc*/ 0x08, /*ascq*/ 0x04,
+		    SSD_ELEM_COMMAND, sizeof(csi), csi,
+		    SSD_ELEM_NONE);
+		return (CTL_RETVAL_ERROR);
+	}
+
+//	printf("Verify %ju\n", sl);
+
+	if ((seg->tur & 0x01) == 0)
+		return (CTL_RETVAL_COMPLETE);
+
+	list->tbdio = 1;
+	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
+	TAILQ_INIT(&tio->run);
+	tio->list = list;
+	TAILQ_INSERT_TAIL(&list->allio, tio, links);
+	tio->io = tpcl_alloc_io();
+	ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
+	tio->io->io_hdr.retries = 3;
+	tio->target = SSD_FORWARDED_SDS_EXSRC;
+	tio->cscd = cscd;
+	tio->lun = sl;
+	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
+	list->stage++;
+	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
+		panic("tpcl_queue() error");
+	return (CTL_RETVAL_QUEUED);
+}
+
+static int
+tpc_process_register_key(struct tpc_list *list)
+{
+	struct scsi_ec_segment_register_key *seg;
+	struct tpc_io *tio;
+	uint64_t dl;
+	int datalen;
+	uint16_t cscd;
+	uint8_t csi[4];
+
+	scsi_ulto4b(list->curseg, csi);
+	if (list->stage == 1) {
+		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
+			TAILQ_REMOVE(&list->allio, tio, links);
+			ctl_free_io(tio->io);
+			free(tio->buf, M_CTL);
+			free(tio, M_CTL);
+		}
+		if (list->abort) {
+			ctl_set_task_aborted(list->ctsio);
+			return (CTL_RETVAL_ERROR);
+		} else if (list->error) {
+			tpc_set_io_error_sense(list);
+			return (CTL_RETVAL_ERROR);
+		} else
+			return (CTL_RETVAL_COMPLETE);
+	}
+
+	TAILQ_INIT(&list->allio);
+	seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
+	cscd = scsi_2btoul(seg->dst_cscd);
+	dl = tpc_resolve(list, cscd, NULL, NULL, NULL);
+	if (dl == UINT64_MAX) {
+		ctl_set_sense(list->ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
+		    /*asc*/ 0x08, /*ascq*/ 0x04,
+		    SSD_ELEM_COMMAND, sizeof(csi), csi,
+		    SSD_ELEM_NONE);
+		return (CTL_RETVAL_ERROR);
+	}
+
+//	printf("Register Key %ju\n", dl);
+
+	list->tbdio = 1;
+	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
+	TAILQ_INIT(&tio->run);
+	tio->list = list;
+	TAILQ_INSERT_TAIL(&list->allio, tio, links);
+	tio->io = tpcl_alloc_io();
+	datalen = sizeof(struct scsi_per_res_out_parms);
+	tio->buf = malloc(datalen, M_CTL, M_WAITOK);
+	ctl_scsi_persistent_res_out(tio->io,
+	    tio->buf, datalen, SPRO_REGISTER, -1,
+	    scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
+	    /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
+	tio->io->io_hdr.retries = 3;
+	tio->target = SSD_FORWARDED_SDS_EXDST;
+	tio->cscd = cscd;
+	tio->lun = dl;
+	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
+	list->stage++;
+	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
+		panic("tpcl_queue() error");
+	return (CTL_RETVAL_QUEUED);
+}
+
+static off_t
+tpc_ranges_length(struct scsi_range_desc *range, int nrange)
+{
+	off_t length = 0;
+	int r;
+
+	for (r = 0; r < nrange; r++)
+		length += scsi_4btoul(range[r].length);
+	return (length);
+}
+
+static int
+tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba,
+    uint64_t *lba)
+{
+	uint64_t b1;
+	uint32_t l1;
+	int i;
+
+	for (i = 0; i < nrange; i++) {
+		b1 = scsi_8btou64(range[i].lba);
+		l1 = scsi_4btoul(range[i].length);
+		if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) {
+			*lba = MAX(b1, maxlba + 1);
+			return (-1);
+		}
+	}
+	return (0);
+}
+
+static int
+tpc_check_ranges_x(struct scsi_range_desc *range, int nrange)
+{
+	uint64_t b1, b2;
+	uint32_t l1, l2;
+	int i, j;
+
+	for (i = 0; i < nrange - 1; i++) {
+		b1 = scsi_8btou64(range[i].lba);
+		l1 = scsi_4btoul(range[i].length);
+		for (j = i + 1; j < nrange; j++) {
+			b2 = scsi_8btou64(range[j].lba);
+			l2 = scsi_4btoul(range[j].length);
+			if (b1 + l1 > b2 && b2 + l2 > b1)
+				return (-1);
+		}
+	}
+	return (0);
+}
+
+static int
+tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
+    int *srange, off_t *soffset)
+{
+	off_t off;
+	int r;
+
+	r = 0;
+	off = 0;
+	while (r < nrange) {
+		if (skip - off < scsi_4btoul(range[r].length)) {
+			*srange = r;
+			*soffset = skip - off;
+			return (0);
+		}
+		off += scsi_4btoul(range[r].length);
+		r++;
+	}
+	return (-1);
+}
+
+static int
+tpc_process_wut(struct tpc_list *list)
+{
+	struct tpc_io *tio, *tior, *tiow;
+	struct runl run;
+	int drange, srange;
+	off_t doffset, soffset;
+	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
+	uint32_t srcblock, dstblock, pb, pbo, adj;
+
+	if (list->stage > 0) {
+		/* Cleanup after previous rounds. */
+		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
+			TAILQ_REMOVE(&list->allio, tio, links);
+			ctl_free_io(tio->io);
+			free(tio->buf, M_CTL);
+			free(tio, M_CTL);
+		}
+		if (list->abort) {
+			ctl_set_task_aborted(list->ctsio);
+			return (CTL_RETVAL_ERROR);
+		} else if (list->error) {
+			if (list->fwd_scsi_status) {
+				list->ctsio->io_hdr.status =
+				    CTL_SCSI_ERROR | CTL_AUTOSENSE;
+				list->ctsio->scsi_status = list->fwd_scsi_status;
+				list->ctsio->sense_data = list->fwd_sense_data;
+				list->ctsio->sense_len = list->fwd_sense_len;
+			} else {
+				ctl_set_invalid_field(list->ctsio,
+				    /*sks_valid*/ 0, /*command*/ 0,
+				    /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
+			}
+			return (CTL_RETVAL_ERROR);
+		}
+		list->cursectors += list->segsectors;
+		list->curbytes += list->segbytes;
+	}
+
+	/* Check where we are on destination ranges list. */
+	if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
+	    &drange, &doffset) != 0)
+		return (CTL_RETVAL_COMPLETE);
+	dstblock = list->lun->be_lun->blocksize;
+	pb = dstblock << list->lun->be_lun->pblockexp;
+	if (list->lun->be_lun->pblockoff > 0)
+		pbo = pb - dstblock * list->lun->be_lun->pblockoff;
+	else
+		pbo = 0;
+
+	/* Check where we are on source ranges list. */
+	srcblock = list->token->blocksize;
+	if (tpc_skip_ranges(list->token->range, list->token->nrange,
+	    list->offset_into_rod + list->cursectors * dstblock / srcblock,
+	    &srange, &soffset) != 0) {
+		ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
+		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
+		return (CTL_RETVAL_ERROR);
+	}
+
+	srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
+	dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
+	numbytes = srcblock *
+	    (scsi_4btoul(list->token->range[srange].length) - soffset);
+	numbytes = omin(numbytes, dstblock *
+	    (scsi_4btoul(list->range[drange].length) - doffset));
+	if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
+		numbytes = TPC_MAX_IOCHUNK_SIZE;
+		numbytes -= numbytes % dstblock;
+		if (pb > dstblock) {
+			adj = (dstlba * dstblock + numbytes - pbo) % pb;
+			if (numbytes > adj)
+				numbytes -= adj;
+		}
+	}
+
+	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
+		ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
+		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
+		return (CTL_RETVAL_ERROR);
+	}
+
+	list->segbytes = numbytes;
+	list->segsectors = numbytes / dstblock;
+//printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
+//    srclba, dstlba);
+	donebytes = 0;
+	TAILQ_INIT(&run);
+	list->tbdio = 0;
+	TAILQ_INIT(&list->allio);
+	while (donebytes < numbytes) {
+		roundbytes = numbytes - donebytes;
+		if (roundbytes > TPC_MAX_IO_SIZE) {
+			roundbytes = TPC_MAX_IO_SIZE;
+			roundbytes -= roundbytes % dstblock;
+			if (pb > dstblock) {
+				adj = (dstlba * dstblock + roundbytes - pbo) % pb;
+				if (roundbytes > adj)
+					roundbytes -= adj;
+			}
+		}
+
+		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
+		TAILQ_INIT(&tior->run);
+		tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
+		tior->list = list;
+		TAILQ_INSERT_TAIL(&list->allio, tior, links);
+		tior->io = tpcl_alloc_io();
+		ctl_scsi_read_write(tior->io,
+				    /*data_ptr*/ tior->buf,
+				    /*data_len*/ roundbytes,
+				    /*read_op*/ 1,
+				    /*byte2*/ 0,
+				    /*minimum_cdb_size*/ 0,
+				    /*lba*/ srclba,
+				    /*num_blocks*/ roundbytes / srcblock,
+				    /*tag_type*/ CTL_TAG_SIMPLE,
+				    /*control*/ 0);
+		tior->io->io_hdr.retries = 3;
+		tior->lun = list->token->lun;
+		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
+
+		tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
+		TAILQ_INIT(&tiow->run);
+		tiow->list = list;
+		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
+		tiow->io = tpcl_alloc_io();
+		ctl_scsi_read_write(tiow->io,
+				    /*data_ptr*/ tior->buf,
+				    /*data_len*/ roundbytes,
+				    /*read_op*/ 0,
+				    /*byte2*/ 0,
+				    /*minimum_cdb_size*/ 0,
+				    /*lba*/ dstlba,
+				    /*num_blocks*/ roundbytes / dstblock,
+				    /*tag_type*/ CTL_TAG_SIMPLE,
+				    /*control*/ 0);
+		tiow->io->io_hdr.retries = 3;
+		tiow->lun = list->lun->lun;
+		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
+
+		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
+		TAILQ_INSERT_TAIL(&run, tior, rlinks);
+		list->tbdio++;
+		donebytes += roundbytes;
+		srclba += roundbytes / srcblock;
+		dstlba += roundbytes / dstblock;
+	}
+
+	while ((tior = TAILQ_FIRST(&run)) != NULL) {
+		TAILQ_REMOVE(&run, tior, rlinks);
+		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
+			panic("tpcl_queue() error");
+	}
+
+	list->stage++;
+	return (CTL_RETVAL_QUEUED);
+}
+
+static int
+tpc_process_zero_wut(struct tpc_list *list)
+{
+	struct tpc_io *tio, *tiow;
+	struct runl run, *prun;
+	int r;
+	uint32_t dstblock, len;
+
+	if (list->stage > 0) {
+complete:
+		/* Cleanup after previous rounds. */
+		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
+			TAILQ_REMOVE(&list->allio, tio, links);
+			ctl_free_io(tio->io);
+			free(tio, M_CTL);
+		}
+		if (list->abort) {
+			ctl_set_task_aborted(list->ctsio);
+			return (CTL_RETVAL_ERROR);
+		} else if (list->error) {
+			if (list->fwd_scsi_status) {
+				list->ctsio->io_hdr.status =
+				    CTL_SCSI_ERROR | CTL_AUTOSENSE;
+				list->ctsio->scsi_status = list->fwd_scsi_status;
+				list->ctsio->sense_data = list->fwd_sense_data;
+				list->ctsio->sense_len = list->fwd_sense_len;
+			} else {
+				ctl_set_invalid_field(list->ctsio,
+				    /*sks_valid*/ 0, /*command*/ 0,
+				    /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
+			}
+			return (CTL_RETVAL_ERROR);
+		}
+		list->cursectors += list->segsectors;
+		list->curbytes += list->segbytes;
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	dstblock = list->lun->be_lun->blocksize;
+	TAILQ_INIT(&run);
+	prun = &run;
+	list->tbdio = 1;
+	TAILQ_INIT(&list->allio);
+	list->segsectors = 0;
+	for (r = 0; r < list->nrange; r++) {
+		len = scsi_4btoul(list->range[r].length);
+		if (len == 0)
+			continue;
+
+		tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
+		TAILQ_INIT(&tiow->run);
+		tiow->list = list;
+		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
+		tiow->io = tpcl_alloc_io();
+		ctl_scsi_write_same(tiow->io,
+				    /*data_ptr*/ NULL,
+				    /*data_len*/ 0,
+				    /*byte2*/ SWS_NDOB,
+				    /*lba*/ scsi_8btou64(list->range[r].lba),
+				    /*num_blocks*/ len,
+				    /*tag_type*/ CTL_TAG_SIMPLE,
+				    /*control*/ 0);
+		tiow->io->io_hdr.retries = 3;
+		tiow->lun = list->lun->lun;
+		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
+
+		TAILQ_INSERT_TAIL(prun, tiow, rlinks);
+		prun = &tiow->run;
+		list->segsectors += len;
+	}
+	list->segbytes = list->segsectors * dstblock;
+
+	if (TAILQ_EMPTY(&run))
+		goto complete;
+
+	while ((tiow = TAILQ_FIRST(&run)) != NULL) {
+		TAILQ_REMOVE(&run, tiow, rlinks);
+		if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
+			panic("tpcl_queue() error");
+	}
+
+	list->stage++;
+	return (CTL_RETVAL_QUEUED);
+}
+
+static void
+tpc_process(struct tpc_list *list)
+{
+	struct ctl_lun *lun = list->lun;
+	struct ctl_softc *softc = lun->ctl_softc;
+	struct scsi_ec_segment *seg;
+	struct ctl_scsiio *ctsio = list->ctsio;
+	int retval = CTL_RETVAL_COMPLETE;
+	uint8_t csi[4];
+
+	if (list->service_action == EC_WUT) {
+		if (list->token != NULL)
+			retval = tpc_process_wut(list);
+		else
+			retval = tpc_process_zero_wut(list);
+		if (retval == CTL_RETVAL_QUEUED)
+			return;
+		if (retval == CTL_RETVAL_ERROR) {
+			list->error = 1;
+			goto done;
+		}
+	} else {
+//printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
+		while (list->curseg < list->nseg) {
+			seg = list->seg[list->curseg];
+			switch (seg->type_code) {
+			case EC_SEG_B2B:
+				retval = tpc_process_b2b(list);
+				break;
+			case EC_SEG_VERIFY:
+				retval = tpc_process_verify(list);
+				break;
+			case EC_SEG_REGISTER_KEY:
+				retval = tpc_process_register_key(list);
+				break;
+			default:
+				scsi_ulto4b(list->curseg, csi);
+				ctl_set_sense(ctsio, /*current_error*/ 1,
+				    /*sense_key*/ SSD_KEY_COPY_ABORTED,
+				    /*asc*/ 0x26, /*ascq*/ 0x09,
+				    SSD_ELEM_COMMAND, sizeof(csi), csi,
+				    SSD_ELEM_NONE);
+				goto done;
+			}
+			if (retval == CTL_RETVAL_QUEUED)
+				return;
+			if (retval == CTL_RETVAL_ERROR) {
+				list->error = 1;
+				goto done;
+			}
+			list->curseg++;
+			list->stage = 0;
+		}
+	}
+
+	ctl_set_success(ctsio);
+
+done:
+//printf("ZZZ done\n");
+	free(list->params, M_CTL);
+	list->params = NULL;
+	if (list->token) {
+		mtx_lock(&softc->tpc_lock);
+		if (--list->token->active == 0)
+			list->token->last_active = time_uptime;
+		mtx_unlock(&softc->tpc_lock);
+		list->token = NULL;
+	}
+	mtx_lock(&lun->lun_lock);
+	if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
+		TAILQ_REMOVE(&lun->tpc_lists, list, links);
+		free(list, M_CTL);
+	} else {
+		list->completed = 1;
+		list->last_active = time_uptime;
+		list->sense_data = ctsio->sense_data;
+		list->sense_len = ctsio->sense_len;
+		list->scsi_status = ctsio->scsi_status;
+	}
+	mtx_unlock(&lun->lun_lock);
+
+	ctl_done((union ctl_io *)ctsio);
+}
+
+/*
+ * For any sort of check condition, busy, etc., we just retry.  We do not
+ * decrement the retry count for unit attention type errors.  These are
+ * normal, and we want to save the retry count for "real" errors.  Otherwise,
+ * we could end up with situations where a command will succeed in some
+ * situations and fail in others, depending on whether a unit attention is
+ * pending.  Also, some of our error recovery actions, most notably the
+ * LUN reset action, will cause a unit attention.
+ *
+ * We can add more detail here later if necessary.
+ */
+static tpc_error_action
+tpc_checkcond_parse(union ctl_io *io)
+{
+	tpc_error_action error_action;
+	int error_code, sense_key, asc, ascq;
+
+	/*
+	 * Default to retrying the command.
+	 */
+	error_action = TPC_ERR_RETRY;
+
+	scsi_extract_sense_len(&io->scsiio.sense_data,
+			       io->scsiio.sense_len,
+			       &error_code,
+			       &sense_key,
+			       &asc,
+			       &ascq,
+			       /*show_errors*/ 1);
+
+	switch (error_code) {
+	case SSD_DEFERRED_ERROR:
+	case SSD_DESC_DEFERRED_ERROR:
+		error_action |= TPC_ERR_NO_DECREMENT;
+		break;
+	case SSD_CURRENT_ERROR:
+	case SSD_DESC_CURRENT_ERROR:
+	default:
+		switch (sense_key) {
+		case SSD_KEY_UNIT_ATTENTION:
+			error_action |= TPC_ERR_NO_DECREMENT;
+			break;
+		case SSD_KEY_HARDWARE_ERROR:
+			/*
+			 * This is our generic "something bad happened"
+			 * error code.  It often isn't recoverable.
+			 */
+			if ((asc == 0x44) && (ascq == 0x00))
+				error_action = TPC_ERR_FAIL;
+			break;
+		case SSD_KEY_NOT_READY:
+			/*
+			 * If the LUN is powered down, there likely isn't
+			 * much point in retrying right now.
+			 */
+			if ((asc == 0x04) && (ascq == 0x02))
+				error_action = TPC_ERR_FAIL;
+			/*
+			 * If the LUN is offline, there probably isn't much
+			 * point in retrying, either.
+			 */
+			if ((asc == 0x04) && (ascq == 0x03))
+				error_action = TPC_ERR_FAIL;
+			break;
+		}
+	}
+	return (error_action);
+}
+
+static tpc_error_action
+tpc_error_parse(union ctl_io *io)
+{
+	tpc_error_action error_action = TPC_ERR_RETRY;
+
+	switch (io->io_hdr.io_type) {
+	case CTL_IO_SCSI:
+		switch (io->io_hdr.status & CTL_STATUS_MASK) {
+		case CTL_SCSI_ERROR:
+			switch (io->scsiio.scsi_status) {
+			case SCSI_STATUS_CHECK_COND:
+				error_action = tpc_checkcond_parse(io);
+				break;
+			default:
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+		break;
+	case CTL_IO_TASK:
+		break;
+	default:
+		panic("%s: invalid ctl_io type %d\n", __func__,
+		      io->io_hdr.io_type);
+		break;
+	}
+	return (error_action);
+}
+
+void
+tpc_done(union ctl_io *io)
+{
+	struct tpc_io *tio, *tior;
+
+	/*
+	 * Very minimal retry logic.  We basically retry if we got an error
+	 * back, and the retry count is greater than 0.  If we ever want
+	 * more sophisticated initiator type behavior, the CAM error
+	 * recovery code in ../common might be helpful.
+	 */
+	tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+	 && (io->io_hdr.retries > 0)) {
+		ctl_io_status old_status;
+		tpc_error_action error_action;
+
+		error_action = tpc_error_parse(io);
+		switch (error_action & TPC_ERR_MASK) {
+		case TPC_ERR_FAIL:
+			break;
+		case TPC_ERR_RETRY:
+		default:
+			if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
+				io->io_hdr.retries--;
+			old_status = io->io_hdr.status;
+			io->io_hdr.status = CTL_STATUS_NONE;
+			io->io_hdr.flags &= ~CTL_FLAG_ABORT;
+			io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+			if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
+				printf("%s: error returned from ctl_queue()!\n",
+				       __func__);
+				io->io_hdr.status = old_status;
+			} else
+				return;
+		}
+	}
+
+	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
+		tio->list->error = 1;
+		if (io->io_hdr.io_type == CTL_IO_SCSI &&
+		    (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) {
+			tio->list->fwd_scsi_status = io->scsiio.scsi_status;
+			tio->list->fwd_sense_data = io->scsiio.sense_data;
+			tio->list->fwd_sense_len = io->scsiio.sense_len;
+			tio->list->fwd_target = tio->target;
+			tio->list->fwd_cscd = tio->cscd;
+		}
+	} else
+		atomic_add_int(&tio->list->curops, 1);
+	if (!tio->list->error && !tio->list->abort) {
+		while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
+			TAILQ_REMOVE(&tio->run, tior, rlinks);
+			atomic_add_int(&tio->list->tbdio, 1);
+			if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
+				panic("tpcl_queue() error");
+		}
+	}
+	if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
+		tpc_process(tio->list);
+}
+
+int
+ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_extended_copy *cdb;
+	struct scsi_extended_copy_lid1_data *data;
+	struct scsi_ec_cscd *cscd;
+	struct scsi_ec_segment *seg;
+	struct tpc_list *list, *tlist;
+	uint8_t *ptr;
+	char *value;
+	int len, off, lencscd, lenseg, leninl, nseg;
+
+	CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
+
+	cdb = (struct scsi_extended_copy *)ctsio->cdb;
+	len = scsi_4btoul(cdb->length);
+
+	if (len == 0) {
+		ctl_set_success(ctsio);
+		goto done;
+	}
+	if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
+	    len > sizeof(struct scsi_extended_copy_lid1_data) +
+	    TPC_MAX_LIST + TPC_MAX_INLINE) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+
+	/*
+	 * If we've got a kernel request that hasn't been malloced yet,
+	 * malloc it and tell the caller the data buffer is here.
+	 */
+	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+		ctsio->kern_data_len = len;
+		ctsio->kern_total_len = len;
+		ctsio->kern_rel_offset = 0;
+		ctsio->kern_sg_entries = 0;
+		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+		ctsio->be_move_done = ctl_config_move_done;
+		ctl_datamove((union ctl_io *)ctsio);
+
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
+	lencscd = scsi_2btoul(data->cscd_list_length);
+	lenseg = scsi_4btoul(data->segment_list_length);
+	leninl = scsi_4btoul(data->inline_data_length);
+	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
+		ctl_set_sense(ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
+		goto done;
+	}
+	if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
+		ctl_set_sense(ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+		    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
+		goto done;
+	}
+	if (lencscd + lenseg > TPC_MAX_LIST ||
+	    leninl > TPC_MAX_INLINE ||
+	    len < sizeof(struct scsi_extended_copy_lid1_data) +
+	     lencscd + lenseg + leninl) {
+		ctl_set_param_len_error(ctsio);
+		goto done;
+	}
+
+	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
+	list->service_action = cdb->service_action;
+	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
+	if (value != NULL && strcmp(value, "on") == 0)
+		list->init_port = -1;
+	else
+		list->init_port = ctsio->io_hdr.nexus.targ_port;
+	list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+	list->list_id = data->list_identifier;
+	list->flags = data->flags;
+	list->params = ctsio->kern_data_ptr;
+	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
+	ptr = &data->data[0];
+	for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
+		cscd = (struct scsi_ec_cscd *)(ptr + off);
+		if (cscd->type_code != EC_CSCD_ID) {
+			free(list, M_CTL);
+			ctl_set_sense(ctsio, /*current_error*/ 1,
+			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+			    /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
+			goto done;
+		}
+	}
+	ptr = &data->data[lencscd];
+	for (nseg = 0, off = 0; off < lenseg; nseg++) {
+		if (nseg >= TPC_MAX_SEGS) {
+			free(list, M_CTL);
+			ctl_set_sense(ctsio, /*current_error*/ 1,
+			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
+			goto done;
+		}
+		seg = (struct scsi_ec_segment *)(ptr + off);
+		if (seg->type_code != EC_SEG_B2B &&
+		    seg->type_code != EC_SEG_VERIFY &&
+		    seg->type_code != EC_SEG_REGISTER_KEY) {
+			free(list, M_CTL);
+			ctl_set_sense(ctsio, /*current_error*/ 1,
+			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+			    /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
+			goto done;
+		}
+		list->seg[nseg] = seg;
+		off += sizeof(struct scsi_ec_segment) +
+		    scsi_2btoul(seg->descr_length);
+	}
+	list->inl = &data->data[lencscd + lenseg];
+	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
+	list->nseg = nseg;
+	list->leninl = leninl;
+	list->ctsio = ctsio;
+	list->lun = lun;
+	mtx_lock(&lun->lun_lock);
+	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
+		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
+		if (tlist != NULL && !tlist->completed) {
+			mtx_unlock(&lun->lun_lock);
+			free(list, M_CTL);
+			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+			    /*bit*/ 0);
+			goto done;
+		}
+		if (tlist != NULL) {
+			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
+			free(tlist, M_CTL);
+		}
+	}
+	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
+	mtx_unlock(&lun->lun_lock);
+
+	tpc_process(list);
+	return (CTL_RETVAL_COMPLETE);
+
+done:
+	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
+		free(ctsio->kern_data_ptr, M_CTL);
+		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
+	}
+	ctl_done((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_extended_copy *cdb;
+	struct scsi_extended_copy_lid4_data *data;
+	struct scsi_ec_cscd *cscd;
+	struct scsi_ec_segment *seg;
+	struct tpc_list *list, *tlist;
+	uint8_t *ptr;
+	char *value;
+	int len, off, lencscd, lenseg, leninl, nseg;
+
+	CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
+
+	cdb = (struct scsi_extended_copy *)ctsio->cdb;
+	len = scsi_4btoul(cdb->length);
+
+	if (len == 0) {
+		ctl_set_success(ctsio);
+		goto done;
+	}
+	if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
+	    len > sizeof(struct scsi_extended_copy_lid4_data) +
+	    TPC_MAX_LIST + TPC_MAX_INLINE) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+
+	/*
+	 * If we've got a kernel request that hasn't been malloced yet,
+	 * malloc it and tell the caller the data buffer is here.
+	 */
+	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+		ctsio->kern_data_len = len;
+		ctsio->kern_total_len = len;
+		ctsio->kern_rel_offset = 0;
+		ctsio->kern_sg_entries = 0;
+		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+		ctsio->be_move_done = ctl_config_move_done;
+		ctl_datamove((union ctl_io *)ctsio);
+
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
+	lencscd = scsi_2btoul(data->cscd_list_length);
+	lenseg = scsi_2btoul(data->segment_list_length);
+	leninl = scsi_2btoul(data->inline_data_length);
+	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
+		ctl_set_sense(ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
+		goto done;
+	}
+	if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
+		ctl_set_sense(ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+		    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
+		goto done;
+	}
+	if (lencscd + lenseg > TPC_MAX_LIST ||
+	    leninl > TPC_MAX_INLINE ||
+	    len < sizeof(struct scsi_extended_copy_lid1_data) +
+	     lencscd + lenseg + leninl) {
+		ctl_set_param_len_error(ctsio);
+		goto done;
+	}
+
+	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
+	list->service_action = cdb->service_action;
+	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
+	if (value != NULL && strcmp(value, "on") == 0)
+		list->init_port = -1;
+	else
+		list->init_port = ctsio->io_hdr.nexus.targ_port;
+	list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+	list->list_id = scsi_4btoul(data->list_identifier);
+	list->flags = data->flags;
+	list->params = ctsio->kern_data_ptr;
+	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
+	ptr = &data->data[0];
+	for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
+		cscd = (struct scsi_ec_cscd *)(ptr + off);
+		if (cscd->type_code != EC_CSCD_ID) {
+			free(list, M_CTL);
+			ctl_set_sense(ctsio, /*current_error*/ 1,
+			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+			    /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
+			goto done;
+		}
+	}
+	ptr = &data->data[lencscd];
+	for (nseg = 0, off = 0; off < lenseg; nseg++) {
+		if (nseg >= TPC_MAX_SEGS) {
+			free(list, M_CTL);
+			ctl_set_sense(ctsio, /*current_error*/ 1,
+			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
+			goto done;
+		}
+		seg = (struct scsi_ec_segment *)(ptr + off);
+		if (seg->type_code != EC_SEG_B2B &&
+		    seg->type_code != EC_SEG_VERIFY &&
+		    seg->type_code != EC_SEG_REGISTER_KEY) {
+			free(list, M_CTL);
+			ctl_set_sense(ctsio, /*current_error*/ 1,
+			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+			    /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
+			goto done;
+		}
+		list->seg[nseg] = seg;
+		off += sizeof(struct scsi_ec_segment) +
+		    scsi_2btoul(seg->descr_length);
+	}
+	list->inl = &data->data[lencscd + lenseg];
+	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
+	list->nseg = nseg;
+	list->leninl = leninl;
+	list->ctsio = ctsio;
+	list->lun = lun;
+	mtx_lock(&lun->lun_lock);
+	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
+		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
+		if (tlist != NULL && !tlist->completed) {
+			mtx_unlock(&lun->lun_lock);
+			free(list, M_CTL);
+			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+			    /*bit*/ 0);
+			goto done;
+		}
+		if (tlist != NULL) {
+			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
+			free(tlist, M_CTL);
+		}
+	}
+	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
+	mtx_unlock(&lun->lun_lock);
+
+	tpc_process(list);
+	return (CTL_RETVAL_COMPLETE);
+
+done:
+	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
+		free(ctsio->kern_data_ptr, M_CTL);
+		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
+	}
+	ctl_done((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+static void
+tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
+    struct scsi_token *token)
+{
+	static int id = 0;
+	struct scsi_vpd_id_descriptor *idd = NULL;
+	struct scsi_ec_cscd_id *cscd;
+	struct scsi_read_capacity_data_long *dtsd;
+	int targid_len;
+
+	scsi_ulto4b(ROD_TYPE_AUR, token->type);
+	scsi_ulto2b(0x01f8, token->length);
+	scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
+	if (lun->lun_devid)
+		idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
+		    lun->lun_devid->data, lun->lun_devid->len,
+		    scsi_devid_is_lun_naa);
+	if (idd == NULL && lun->lun_devid)
+		idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
+		    lun->lun_devid->data, lun->lun_devid->len,
+		    scsi_devid_is_lun_eui64);
+	if (idd != NULL) {
+		cscd = (struct scsi_ec_cscd_id *)&token->body[8];
+		cscd->type_code = EC_CSCD_ID;
+		cscd->luidt_pdt = T_DIRECT;
+		memcpy(&cscd->codeset, idd, 4 + idd->length);
+		scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
+	}
+	scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
+	scsi_u64to8b(len, &token->body[48]);
+
+	/* ROD token device type specific data (RC16 without first field) */
+	dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
+	scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
+	dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
+	scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
+	if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
+		dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
+
+	if (port->target_devid) {
+		targid_len = port->target_devid->len;
+		memcpy(&token->body[120], port->target_devid->data, targid_len);
+	} else
+		targid_len = 32;
+	arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
+};
+
+int
+ctl_populate_token(struct ctl_scsiio *ctsio)
+{
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_port *port = CTL_PORT(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_populate_token *cdb;
+	struct scsi_populate_token_data *data;
+	struct tpc_list *list, *tlist;
+	struct tpc_token *token;
+	uint64_t lba;
+	int len, lendata, lendesc;
+
+	CTL_DEBUG_PRINT(("ctl_populate_token\n"));
+
+	cdb = (struct scsi_populate_token *)ctsio->cdb;
+	len = scsi_4btoul(cdb->length);
+
+	if (len < sizeof(struct scsi_populate_token_data) ||
+	    len > sizeof(struct scsi_populate_token_data) +
+	     TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+
+	/*
+	 * If we've got a kernel request that hasn't been malloced yet,
+	 * malloc it and tell the caller the data buffer is here.
+	 */
+	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+		ctsio->kern_data_len = len;
+		ctsio->kern_total_len = len;
+		ctsio->kern_rel_offset = 0;
+		ctsio->kern_sg_entries = 0;
+		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+		ctsio->be_move_done = ctl_config_move_done;
+		ctl_datamove((union ctl_io *)ctsio);
+
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
+	lendata = scsi_2btoul(data->length);
+	if (lendata < sizeof(struct scsi_populate_token_data) - 2 +
+	    sizeof(struct scsi_range_desc)) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+		    /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+	lendesc = scsi_2btoul(data->range_descriptor_length);
+	if (lendesc < sizeof(struct scsi_range_desc) ||
+	    len < sizeof(struct scsi_populate_token_data) + lendesc ||
+	    lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+		    /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+/*
+	printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
+	    scsi_4btoul(cdb->list_identifier),
+	    data->flags, scsi_4btoul(data->inactivity_timeout),
+	    scsi_4btoul(data->rod_type),
+	    scsi_2btoul(data->range_descriptor_length));
+*/
+
+	/* Validate INACTIVITY TIMEOUT field */
+	if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		goto done;
+	}
+
+	/* Validate ROD TYPE field */
+	if ((data->flags & EC_PT_RTV) &&
+	    scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+		    /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+
+	/* Validate list of ranges */
+	if (tpc_check_ranges_l(&data->desc[0],
+	    scsi_2btoul(data->range_descriptor_length) /
+	    sizeof(struct scsi_range_desc),
+	    lun->be_lun->maxlba, &lba) != 0) {
+		ctl_set_lba_out_of_range(ctsio, lba);
+		goto done;
+	}
+	if (tpc_check_ranges_x(&data->desc[0],
+	    scsi_2btoul(data->range_descriptor_length) /
+	    sizeof(struct scsi_range_desc)) != 0) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
+		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		goto done;
+	}
+
+	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
+	list->service_action = cdb->service_action;
+	list->init_port = ctsio->io_hdr.nexus.targ_port;
+	list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+	list->list_id = scsi_4btoul(cdb->list_identifier);
+	list->flags = data->flags;
+	list->ctsio = ctsio;
+	list->lun = lun;
+	mtx_lock(&lun->lun_lock);
+	tlist = tpc_find_list(lun, list->list_id, list->init_idx);
+	if (tlist != NULL && !tlist->completed) {
+		mtx_unlock(&lun->lun_lock);
+		free(list, M_CTL);
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		goto done;
+	}
+	if (tlist != NULL) {
+		TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
+		free(tlist, M_CTL);
+	}
+	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
+	mtx_unlock(&lun->lun_lock);
+
+	token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
+	token->lun = lun->lun;
+	token->blocksize = lun->be_lun->blocksize;
+	token->params = ctsio->kern_data_ptr;
+	token->range = &data->desc[0];
+	token->nrange = scsi_2btoul(data->range_descriptor_length) /
+	    sizeof(struct scsi_range_desc);
+	list->cursectors = tpc_ranges_length(token->range, token->nrange);
+	list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
+	tpc_create_token(lun, port, list->curbytes,
+	    (struct scsi_token *)token->token);
+	token->active = 0;
+	token->last_active = time_uptime;
+	token->timeout = scsi_4btoul(data->inactivity_timeout);
+	if (token->timeout == 0)
+		token->timeout = TPC_DFL_TOKEN_TIMEOUT;
+	else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
+		token->timeout = TPC_MIN_TOKEN_TIMEOUT;
+	memcpy(list->res_token, token->token, sizeof(list->res_token));
+	list->res_token_valid = 1;
+	list->curseg = 0;
+	list->completed = 1;
+	list->last_active = time_uptime;
+	mtx_lock(&softc->tpc_lock);
+	TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
+	mtx_unlock(&softc->tpc_lock);
+	ctl_set_success(ctsio);
+	ctl_done((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+
+done:
+	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
+		free(ctsio->kern_data_ptr, M_CTL);
+		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
+	}
+	ctl_done((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_write_using_token(struct ctl_scsiio *ctsio)
+{
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_write_using_token *cdb;
+	struct scsi_write_using_token_data *data;
+	struct tpc_list *list, *tlist;
+	struct tpc_token *token;
+	uint64_t lba;
+	int len, lendata, lendesc;
+
+	CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
+
+	cdb = (struct scsi_write_using_token *)ctsio->cdb;
+	len = scsi_4btoul(cdb->length);
+
+	if (len < sizeof(struct scsi_write_using_token_data) ||
+	    len > sizeof(struct scsi_write_using_token_data) +
+	     TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+
+	/*
+	 * If we've got a kernel request that hasn't been malloced yet,
+	 * malloc it and tell the caller the data buffer is here.
+	 */
+	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+		ctsio->kern_data_len = len;
+		ctsio->kern_total_len = len;
+		ctsio->kern_rel_offset = 0;
+		ctsio->kern_sg_entries = 0;
+		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+		ctsio->be_move_done = ctl_config_move_done;
+		ctl_datamove((union ctl_io *)ctsio);
+
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
+	lendata = scsi_2btoul(data->length);
+	if (lendata < sizeof(struct scsi_write_using_token_data) - 2 +
+	    sizeof(struct scsi_range_desc)) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+		    /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+	lendesc = scsi_2btoul(data->range_descriptor_length);
+	if (lendesc < sizeof(struct scsi_range_desc) ||
+	    len < sizeof(struct scsi_write_using_token_data) + lendesc ||
+	    lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+		    /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0);
+		goto done;
+	}
+/*
+	printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
+	    scsi_4btoul(cdb->list_identifier),
+	    data->flags, scsi_8btou64(data->offset_into_rod),
+	    scsi_2btoul(data->range_descriptor_length));
+*/
+
+	/* Validate list of ranges */
+	if (tpc_check_ranges_l(&data->desc[0],
+	    scsi_2btoul(data->range_descriptor_length) /
+	    sizeof(struct scsi_range_desc),
+	    lun->be_lun->maxlba, &lba) != 0) {
+		ctl_set_lba_out_of_range(ctsio, lba);
+		goto done;
+	}
+	if (tpc_check_ranges_x(&data->desc[0],
+	    scsi_2btoul(data->range_descriptor_length) /
+	    sizeof(struct scsi_range_desc)) != 0) {
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
+		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		goto done;
+	}
+
+	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
+	list->service_action = cdb->service_action;
+	list->init_port = ctsio->io_hdr.nexus.targ_port;
+	list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+	list->list_id = scsi_4btoul(cdb->list_identifier);
+	list->flags = data->flags;
+	list->params = ctsio->kern_data_ptr;
+	list->range = &data->desc[0];
+	list->nrange = scsi_2btoul(data->range_descriptor_length) /
+	    sizeof(struct scsi_range_desc);
+	list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
+	list->ctsio = ctsio;
+	list->lun = lun;
+	mtx_lock(&lun->lun_lock);
+	tlist = tpc_find_list(lun, list->list_id, list->init_idx);
+	if (tlist != NULL && !tlist->completed) {
+		mtx_unlock(&lun->lun_lock);
+		free(list, M_CTL);
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		goto done;
+	}
+	if (tlist != NULL) {
+		TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
+		free(tlist, M_CTL);
+	}
+	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
+	mtx_unlock(&lun->lun_lock);
+
+	/* Block device zero ROD token -> no token. */
+	if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
+		tpc_process(list);
+		return (CTL_RETVAL_COMPLETE);
+	}
+
+	mtx_lock(&softc->tpc_lock);
+	TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
+		if (memcmp(token->token, data->rod_token,
+		    sizeof(data->rod_token)) == 0)
+			break;
+	}
+	if (token != NULL) {
+		token->active++;
+		list->token = token;
+		if (data->flags & EC_WUT_DEL_TKN)
+			token->timeout = 0;
+	}
+	mtx_unlock(&softc->tpc_lock);
+	if (token == NULL) {
+		mtx_lock(&lun->lun_lock);
+		TAILQ_REMOVE(&lun->tpc_lists, list, links);
+		mtx_unlock(&lun->lun_lock);
+		free(list, M_CTL);
+		ctl_set_sense(ctsio, /*current_error*/ 1,
+		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+		    /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
+		goto done;
+	}
+
+	tpc_process(list);
+	return (CTL_RETVAL_COMPLETE);
+
+done:
+	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
+		free(ctsio->kern_data_ptr, M_CTL);
+		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
+	}
+	ctl_done((union ctl_io *)ctsio);
+	return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
+{
+	struct ctl_lun *lun = CTL_LUN(ctsio);
+	struct scsi_receive_rod_token_information *cdb;
+	struct scsi_receive_copy_status_lid4_data *data;
+	struct tpc_list *list;
+	struct tpc_list list_copy;
+	uint8_t *ptr;
+	int retval;
+	int alloc_len, total_len, token_len;
+	uint32_t list_id;
+
+	CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
+
+	cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
+	retval = CTL_RETVAL_COMPLETE;
+
+	list_id = scsi_4btoul(cdb->list_identifier);
+	mtx_lock(&lun->lun_lock);
+	list = tpc_find_list(lun, list_id,
+	    ctl_get_initindex(&ctsio->io_hdr.nexus));
+	if (list == NULL) {
+		mtx_unlock(&lun->lun_lock);
+		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+		    /*bit*/ 0);
+		ctl_done((union ctl_io *)ctsio);
+		return (retval);
+	}
+	list_copy = *list;
+	if (list->completed) {
+		TAILQ_REMOVE(&lun->tpc_lists, list, links);
+		free(list, M_CTL);
+	}
+	mtx_unlock(&lun->lun_lock);
+
+	token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
+	total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
+	alloc_len = scsi_4btoul(cdb->length);
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
+	scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
+	    4 + token_len, data->available_data);
+	data->response_to_service_action = list_copy.service_action;
+	if (list_copy.completed) {
+		if (list_copy.error)
+			data->copy_command_status = RCS_CCS_ERROR;
+		else if (list_copy.abort)
+			data->copy_command_status = RCS_CCS_ABORTED;
+		else
+			data->copy_command_status = RCS_CCS_COMPLETED;
+	} else
+		data->copy_command_status = RCS_CCS_INPROG_FG;
+	scsi_ulto2b(list_copy.curops, data->operation_counter);
+	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
+	data->transfer_count_units = RCS_TC_LBAS;
+	scsi_u64to8b(list_copy.cursectors, data->transfer_count);
+	scsi_ulto2b(list_copy.curseg, data->segments_processed);
+	data->length_of_the_sense_data_field = list_copy.sense_len;
+	data->sense_data_length = list_copy.sense_len;
+	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
+
+	ptr = &data->sense_data[data->length_of_the_sense_data_field];
+	scsi_ulto4b(token_len, &ptr[0]);
+	if (list_copy.res_token_valid) {
+		scsi_ulto2b(0, &ptr[4]);
+		memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
+	}
+/*
+	printf("RRTI(list=%u) valid=%d\n",
+	    scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
+*/
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (retval);
+}
+
+int
+ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
+{
+	struct ctl_softc *softc = CTL_SOFTC(ctsio);
+	struct scsi_report_all_rod_tokens *cdb;
+	struct scsi_report_all_rod_tokens_data *data;
+	struct tpc_token *token;
+	int retval;
+	int alloc_len, total_len, tokens, i;
+
+	CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
+
+	cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
+	retval = CTL_RETVAL_COMPLETE;
+
+	tokens = 0;
+	mtx_lock(&softc->tpc_lock);
+	TAILQ_FOREACH(token, &softc->tpc_tokens, links)
+		tokens++;
+	mtx_unlock(&softc->tpc_lock);
+	if (tokens > 512)
+		tokens = 512;
+
+	total_len = sizeof(*data) + tokens * 96;
+	alloc_len = scsi_4btoul(cdb->length);
+
+	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+	ctsio->kern_sg_entries = 0;
+	ctsio->kern_rel_offset = 0;
+	ctsio->kern_data_len = min(total_len, alloc_len);
+	ctsio->kern_total_len = ctsio->kern_data_len;
+
+	data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
+	i = 0;
+	mtx_lock(&softc->tpc_lock);
+	TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
+		if (i >= tokens)
+			break;
+		memcpy(&data->rod_management_token_list[i * 96],
+		    token->token, 96);
+		i++;
+	}
+	mtx_unlock(&softc->tpc_lock);
+	scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
+/*
+	printf("RART tokens=%d\n", i);
+*/
+	ctl_set_success(ctsio);
+	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+	ctsio->be_move_done = ctl_config_move_done;
+	ctl_datamove((union ctl_io *)ctsio);
+	return (retval);
+}
+


Property changes on: trunk/sys/cam/ctl/ctl_tpc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/cam/ctl/ctl_tpc.h
===================================================================
--- trunk/sys/cam/ctl/ctl_tpc.h	                        (rev 0)
+++ trunk/sys/cam/ctl/ctl_tpc.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,40 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Alexander Motin <mav at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_tpc.h 279004 2015-02-19 14:36:03Z mav $
+ */
+
+#ifndef	_CTL_TPC_H
+#define	_CTL_TPC_H 1
+
+void tpc_done(union ctl_io *io);
+
+uint64_t tpcl_resolve(struct ctl_softc *softc, int init_port,
+    struct scsi_ec_cscd *cscd, uint32_t *ss, uint32_t *ps, uint32_t *pso);
+union ctl_io * tpcl_alloc_io(void);
+int tpcl_queue(union ctl_io *io, uint64_t lun);
+
+#endif	/* _CTL_TPC_H */


Property changes on: trunk/sys/cam/ctl/ctl_tpc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/cam/ctl/ctl_tpc_local.c
===================================================================
--- trunk/sys/cam/ctl/ctl_tpc_local.c	                        (rev 0)
+++ trunk/sys/cam/ctl/ctl_tpc_local.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -0,0 +1,333 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Alexander Motin <mav at FreeBSD.org>
+ * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_tpc_local.c 313369 2017-02-07 01:56:26Z mav $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_tpc.h>
+#include <cam/ctl/ctl_error.h>
+
+struct tpcl_softc {
+	struct ctl_port port;
+	int cur_tag_num;
+};
+
+static struct tpcl_softc tpcl_softc;
+
+static int tpcl_init(void);
+static int tpcl_shutdown(void);
+static void tpcl_datamove(union ctl_io *io);
+static void tpcl_done(union ctl_io *io);
+
+
+static struct ctl_frontend tpcl_frontend =
+{
+	.name = "tpc",
+	.init = tpcl_init,
+	.shutdown = tpcl_shutdown,
+};
+CTL_FRONTEND_DECLARE(ctltpc, tpcl_frontend);
+
+static int
+tpcl_init(void)
+{
+	struct tpcl_softc *tsoftc = &tpcl_softc;
+	struct ctl_port *port;
+	struct scsi_transportid_spi *tid;
+	int error, len;
+
+	memset(tsoftc, 0, sizeof(*tsoftc));
+
+	port = &tsoftc->port;
+	port->frontend = &tpcl_frontend;
+	port->port_type = CTL_PORT_INTERNAL;
+	port->num_requested_ctl_io = 100;
+	port->port_name = "tpc";
+	port->fe_datamove = tpcl_datamove;
+	port->fe_done = tpcl_done;
+	port->max_targets = 1;
+	port->max_target_id = 0;
+	port->targ_port = -1;
+	port->max_initiators = 1;
+
+	if ((error = ctl_port_register(port)) != 0) {
+		printf("%s: tpc port registration failed\n", __func__);
+		return (error);
+	}
+
+	len = sizeof(struct scsi_transportid_spi);
+	port->init_devid = malloc(sizeof(struct ctl_devid) + len,
+	    M_CTL, M_WAITOK | M_ZERO);
+	port->init_devid->len = len;
+	tid = (struct scsi_transportid_spi *)port->init_devid->data;
+	tid->format_protocol = SCSI_TRN_SPI_FORMAT_DEFAULT | SCSI_PROTO_SPI;
+	scsi_ulto2b(0, tid->scsi_addr);
+	scsi_ulto2b(port->targ_port, tid->rel_trgt_port_id);
+
+	ctl_port_online(port);
+	return (0);
+}
+
+static int
+tpcl_shutdown(void)
+{
+	struct tpcl_softc *tsoftc = &tpcl_softc;
+	struct ctl_port *port = &tsoftc->port;
+	int error;
+
+	ctl_port_offline(port);
+	if ((error = ctl_port_deregister(port)) != 0)
+		printf("%s: tpc port deregistration failed\n", __func__);
+	return (error);
+}
+
+static void
+tpcl_datamove(union ctl_io *io)
+{
+	struct ctl_sg_entry *ext_sglist, *kern_sglist;
+	struct ctl_sg_entry ext_entry, kern_entry;
+	int ext_sg_entries, kern_sg_entries;
+	int ext_sg_start, ext_offset;
+	int len_to_copy;
+	int kern_watermark, ext_watermark;
+	struct ctl_scsiio *ctsio;
+	int i, j;
+
+	CTL_DEBUG_PRINT(("%s\n", __func__));
+
+	ctsio = &io->scsiio;
+
+	/*
+	 * If this is the case, we're probably doing a BBR read and don't
+	 * actually need to transfer the data.  This will effectively
+	 * bit-bucket the data.
+	 */
+	if (ctsio->ext_data_ptr == NULL)
+		goto bailout;
+
+	/*
+	 * To simplify things here, if we have a single buffer, stick it in
+	 * a S/G entry and just make it a single entry S/G list.
+	 */
+	if (ctsio->ext_sg_entries > 0) {
+		int len_seen;
+
+		ext_sglist = (struct ctl_sg_entry *)ctsio->ext_data_ptr;
+		ext_sg_entries = ctsio->ext_sg_entries;
+		ext_sg_start = 0;
+		ext_offset = 0;
+		len_seen = 0;
+		for (i = 0; i < ext_sg_entries; i++) {
+			if ((len_seen + ext_sglist[i].len) >=
+			     ctsio->ext_data_filled) {
+				ext_sg_start = i;
+				ext_offset = ctsio->ext_data_filled - len_seen;
+				break;
+			}
+			len_seen += ext_sglist[i].len;
+		}
+	} else {
+		ext_sglist = &ext_entry;
+		ext_sglist->addr = ctsio->ext_data_ptr;
+		ext_sglist->len = ctsio->ext_data_len;
+		ext_sg_entries = 1;
+		ext_sg_start = 0;
+		ext_offset = ctsio->ext_data_filled;
+	}
+
+	if (ctsio->kern_sg_entries > 0) {
+		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
+		kern_sg_entries = ctsio->kern_sg_entries;
+	} else {
+		kern_sglist = &kern_entry;
+		kern_sglist->addr = ctsio->kern_data_ptr;
+		kern_sglist->len = ctsio->kern_data_len;
+		kern_sg_entries = 1;
+	}
+
+	kern_watermark = 0;
+	ext_watermark = ext_offset;
+	for (i = ext_sg_start, j = 0;
+	     i < ext_sg_entries && j < kern_sg_entries;) {
+		uint8_t *ext_ptr, *kern_ptr;
+
+		len_to_copy = min(ext_sglist[i].len - ext_watermark,
+				  kern_sglist[j].len - kern_watermark);
+
+		ext_ptr = (uint8_t *)ext_sglist[i].addr;
+		ext_ptr = ext_ptr + ext_watermark;
+		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+			/*
+			 * XXX KDM fix this!
+			 */
+			panic("need to implement bus address support");
+#if 0
+			kern_ptr = bus_to_virt(kern_sglist[j].addr);
+#endif
+		} else
+			kern_ptr = (uint8_t *)kern_sglist[j].addr;
+		kern_ptr = kern_ptr + kern_watermark;
+
+		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+		     CTL_FLAG_DATA_IN) {
+			CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
+					 __func__, len_to_copy));
+			CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
+					 kern_ptr, ext_ptr));
+			memcpy(ext_ptr, kern_ptr, len_to_copy);
+		} else {
+			CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
+					 __func__, len_to_copy));
+			CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
+					 ext_ptr, kern_ptr));
+			memcpy(kern_ptr, ext_ptr, len_to_copy);
+		}
+
+		ctsio->ext_data_filled += len_to_copy;
+		ctsio->kern_data_resid -= len_to_copy;
+
+		ext_watermark += len_to_copy;
+		if (ext_sglist[i].len == ext_watermark) {
+			i++;
+			ext_watermark = 0;
+		}
+
+		kern_watermark += len_to_copy;
+		if (kern_sglist[j].len == kern_watermark) {
+			j++;
+			kern_watermark = 0;
+		}
+	}
+
+	CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
+			 __func__, ext_sg_entries, kern_sg_entries));
+	CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
+			 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
+
+bailout:
+	io->scsiio.be_move_done(io);
+}
+
+static void
+tpcl_done(union ctl_io *io)
+{
+
+	tpc_done(io);
+}
+
+uint64_t
+tpcl_resolve(struct ctl_softc *softc, int init_port,
+    struct scsi_ec_cscd *cscd, uint32_t *ss, uint32_t *ps, uint32_t *pso)
+{
+	struct scsi_ec_cscd_id *cscdid;
+	struct ctl_port *port;
+	struct ctl_lun *lun;
+	uint64_t lunid = UINT64_MAX;
+
+	if (cscd->type_code != EC_CSCD_ID ||
+	    (cscd->luidt_pdt & EC_LUIDT_MASK) != EC_LUIDT_LUN ||
+	    (cscd->luidt_pdt & EC_NUL) != 0)
+		return (lunid);
+
+	cscdid = (struct scsi_ec_cscd_id *)cscd;
+	mtx_lock(&softc->ctl_lock);
+	if (init_port >= 0)
+		port = softc->ctl_ports[init_port];
+	else
+		port = NULL;
+	STAILQ_FOREACH(lun, &softc->lun_list, links) {
+		if (port != NULL &&
+		    ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
+			continue;
+		if (lun->lun_devid == NULL)
+			continue;
+		if (scsi_devid_match(lun->lun_devid->data,
+		    lun->lun_devid->len, &cscdid->codeset,
+		    cscdid->length + 4) == 0) {
+			lunid = lun->lun;
+			if (ss && lun->be_lun)
+				*ss = lun->be_lun->blocksize;
+			if (ps && lun->be_lun)
+				*ps = lun->be_lun->blocksize <<
+				    lun->be_lun->pblockexp;
+			if (pso && lun->be_lun)
+				*pso = lun->be_lun->blocksize *
+				    lun->be_lun->pblockoff;
+			break;
+		}
+	}
+	mtx_unlock(&softc->ctl_lock);
+	return (lunid);
+};
+
+union ctl_io *
+tpcl_alloc_io(void)
+{
+	struct tpcl_softc *tsoftc = &tpcl_softc;
+
+	return (ctl_alloc_io(tsoftc->port.ctl_pool_ref));
+};
+
+int
+tpcl_queue(union ctl_io *io, uint64_t lun)
+{
+	struct tpcl_softc *tsoftc = &tpcl_softc;
+
+	io->io_hdr.nexus.initid = 0;
+	io->io_hdr.nexus.targ_port = tsoftc->port.targ_port;
+	io->io_hdr.nexus.targ_lun = lun;
+	io->scsiio.tag_num = atomic_fetchadd_int(&tsoftc->cur_tag_num, 1);
+	io->scsiio.ext_data_filled = 0;
+	return (ctl_queue(io));
+}


Property changes on: trunk/sys/cam/ctl/ctl_tpc_local.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/cam/ctl/ctl_util.c
===================================================================
--- trunk/sys/cam/ctl/ctl_util.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_util.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
  * All rights reserved.
@@ -27,7 +28,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_util.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_util.c#2 $
  */
 /*
  * CAM Target Layer SCSI library
@@ -36,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_util.c 312847 2017-01-26 21:08:58Z mav $");
 
 #ifdef _KERNEL
 #include <sys/param.h>
@@ -84,11 +85,15 @@
 	{CTL_TASK_ABORT_TASK_SET, "Abort Task Set"},
 	{CTL_TASK_CLEAR_ACA, "Clear ACA"},
 	{CTL_TASK_CLEAR_TASK_SET, "Clear Task Set"},
+	{CTL_TASK_I_T_NEXUS_RESET, "I_T Nexus Reset"},
 	{CTL_TASK_LUN_RESET, "LUN Reset"},
 	{CTL_TASK_TARGET_RESET, "Target Reset"},
 	{CTL_TASK_BUS_RESET, "Bus Reset"},
 	{CTL_TASK_PORT_LOGIN, "Port Login"},
-	{CTL_TASK_PORT_LOGOUT, "Port Logout"}
+	{CTL_TASK_PORT_LOGOUT, "Port Logout"},
+	{CTL_TASK_QUERY_TASK, "Query Task"},
+	{CTL_TASK_QUERY_TASK_SET, "Query Task Set"},
+	{CTL_TASK_QUERY_ASYNC_EVENT, "Query Async Event"}
 };
 
 void
@@ -344,6 +349,37 @@
 }
 
 void
+ctl_scsi_write_same(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len,
+		    uint8_t byte2, uint64_t lba, uint32_t num_blocks,
+		    ctl_tag_type tag_type, uint8_t control)
+{
+	struct ctl_scsiio *ctsio;
+	struct scsi_write_same_16 *cdb;
+
+	ctl_scsi_zero_io(io);
+
+	io->io_hdr.io_type = CTL_IO_SCSI;
+	ctsio = &io->scsiio;
+	ctsio->cdb_len = sizeof(*cdb);
+	cdb = (struct scsi_write_same_16 *)ctsio->cdb;
+	cdb->opcode = WRITE_SAME_16;
+	cdb->byte2 = byte2;
+	scsi_u64to8b(lba, cdb->addr);
+	scsi_ulto4b(num_blocks, cdb->length);
+	cdb->group = 0;
+	cdb->control = control;
+
+	io->io_hdr.io_type = CTL_IO_SCSI;
+	io->io_hdr.flags = CTL_FLAG_DATA_OUT;
+	ctsio->tag_type = tag_type;
+	ctsio->ext_data_ptr = data_ptr;
+	ctsio->ext_data_len = data_len;
+	ctsio->ext_sg_entries = 0;
+	ctsio->ext_data_filled = 0;
+	ctsio->sense_len = SSD_FULL_SIZE;
+}
+
+void
 ctl_scsi_read_capacity(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len,
 		       uint32_t addr, int reladr, int pmi,
 		       ctl_tag_type tag_type, uint8_t control)
@@ -454,8 +490,7 @@
 
 void
 ctl_scsi_start_stop(union ctl_io *io, int start, int load_eject, int immediate,
-		    int power_conditions, int onoffline __unused,
-		    ctl_tag_type tag_type, uint8_t control)
+    int power_conditions, ctl_tag_type tag_type, uint8_t control)
 {
 	struct scsi_start_stop_unit *cdb;
 
@@ -466,10 +501,6 @@
 	cdb->opcode = START_STOP_UNIT;
 	if (immediate)
 		cdb->byte2 |= SSS_IMMED;
-#ifdef NEEDTOPORT
-	if (onoffline)
-		cdb->byte2 |= SSS_ONOFFLINE;
-#endif
 	cdb->how = power_conditions;
 	if (load_eject)
 		cdb->how |= SSS_LOEJ;
@@ -647,7 +678,7 @@
 
 #ifndef _KERNEL
 union ctl_io *
-ctl_scsi_alloc_io(struct ctl_id initid)
+ctl_scsi_alloc_io(uint32_t initid)
 {
 	union ctl_io *io;
 
@@ -667,7 +698,6 @@
 	free(io);
 }
 
-#endif /* !_KERNEL */
 void
 ctl_scsi_zero_io(union ctl_io *io)
 {
@@ -677,11 +707,10 @@
 		return;
 
 	pool_ref = io->io_hdr.pool;
-
 	memset(io, 0, sizeof(*io));
-
 	io->io_hdr.pool = pool_ref;
 }
+#endif /* !_KERNEL */
 
 const char *
 ctl_scsi_task_string(struct ctl_taskio *taskio)
@@ -699,70 +728,65 @@
 }
 
 void
-ctl_io_error_sbuf(union ctl_io *io, struct scsi_inquiry_data *inq_data,
-		  struct sbuf *sb)
+ctl_io_sbuf(union ctl_io *io, struct sbuf *sb)
 {
-	struct ctl_status_desc *status_desc;
+	const char *task_desc;
 	char path_str[64];
-	unsigned int i;
 
-	status_desc = NULL;
-
-	for (i = 0; i < (sizeof(ctl_status_table)/sizeof(ctl_status_table[0]));
-	     i++) {
-		if ((io->io_hdr.status & CTL_STATUS_MASK) ==
-		     ctl_status_table[i].status) {
-			status_desc = &ctl_status_table[i];
-			break;
-		}
-	}
-
 	ctl_scsi_path_string(io, path_str, sizeof(path_str));
 
 	switch (io->io_hdr.io_type) {
 	case CTL_IO_SCSI:
 		sbuf_cat(sb, path_str);
-
 		ctl_scsi_command_string(&io->scsiio, NULL, sb);
-
-		sbuf_printf(sb, "\n");
-
-		sbuf_printf(sb, "%sTag: 0x%04x, Type: %d\n", path_str,
+		sbuf_printf(sb, " Tag: %#x/%d\n",
 			    io->scsiio.tag_num, io->scsiio.tag_type);
 		break;
-	case CTL_IO_TASK: {
-		const char *task_desc;
-
+	case CTL_IO_TASK:
 		sbuf_cat(sb, path_str);
-
 		task_desc = ctl_scsi_task_string(&io->taskio);
-
 		if (task_desc == NULL)
 			sbuf_printf(sb, "Unknown Task Action %d (%#x)",
-				    io->taskio.task_action,
-				    io->taskio.task_action);
+			    io->taskio.task_action, io->taskio.task_action);
 		else
 			sbuf_printf(sb, "Task Action: %s", task_desc);
-
-		sbuf_printf(sb, "\n");
-
 		switch (io->taskio.task_action) {
 		case CTL_TASK_ABORT_TASK:
-		case CTL_TASK_ABORT_TASK_SET:
-		case CTL_TASK_CLEAR_TASK_SET:
-			sbuf_printf(sb, "%sTag: 0x%04x, Type: %d\n", path_str,
-				    io->taskio.tag_num,
-				    io->taskio.tag_type);
+			sbuf_printf(sb, " Tag: %#x/%d\n",
+			    io->taskio.tag_num, io->taskio.tag_type);
 			break;
 		default:
+			sbuf_printf(sb, "\n");
 			break;
 		}
 		break;
-	}
 	default:
 		break;
 	}
+}
 
+void
+ctl_io_error_sbuf(union ctl_io *io, struct scsi_inquiry_data *inq_data,
+		  struct sbuf *sb)
+{
+	struct ctl_status_desc *status_desc;
+	char path_str[64];
+	unsigned int i;
+
+	ctl_io_sbuf(io, sb);
+
+	status_desc = NULL;
+	for (i = 0; i < (sizeof(ctl_status_table)/sizeof(ctl_status_table[0]));
+	     i++) {
+		if ((io->io_hdr.status & CTL_STATUS_MASK) ==
+		     ctl_status_table[i].status) {
+			status_desc = &ctl_status_table[i];
+			break;
+		}
+	}
+
+	ctl_scsi_path_string(io, path_str, sizeof(path_str));
+
 	sbuf_cat(sb, path_str);
 	if (status_desc == NULL)
 		sbuf_printf(sb, "CTL Status: Unknown status %#x\n",
@@ -783,6 +807,17 @@
 }
 
 char *
+ctl_io_string(union ctl_io *io, char *str, int str_len)
+{
+	struct sbuf sb;
+
+	sbuf_new(&sb, str, str_len, SBUF_FIXEDLEN);
+	ctl_io_sbuf(io, &sb);
+	sbuf_finish(&sb);
+	return (sbuf_data(&sb));
+}
+
+char *
 ctl_io_error_string(union ctl_io *io, struct scsi_inquiry_data *inq_data,
 		    char *str, int str_len)
 {
@@ -789,11 +824,8 @@
 	struct sbuf sb;
 
 	sbuf_new(&sb, str, str_len, SBUF_FIXEDLEN);
-
 	ctl_io_error_sbuf(io, inq_data, &sb);
-
 	sbuf_finish(&sb);
-
 	return (sbuf_data(&sb));
 }
 
@@ -800,30 +832,53 @@
 #ifdef _KERNEL
 
 void
+ctl_io_print(union ctl_io *io)
+{
+	char str[512];
+
+	printf("%s", ctl_io_string(io, str, sizeof(str)));
+}
+
+void
 ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data)
 {
 	char str[512];
-#ifdef NEEDTOPORT
-	char *message;
-	char *line;
 
-	message = io_error_string(io, inq_data, str, sizeof(str));
-
-	for (line = strsep(&message, "\n"); line != NULL;
-	     line = strsep(&message, "\n")) {
-		csevent_log(CSC_CTL | CSC_SHELF_SW | CTL_ERROR_REPORT,
-                            csevent_LogType_Trace,
-                            csevent_Severity_Information,
-                            csevent_AlertLevel_Green,
-                            csevent_FRU_Firmware,
-                            csevent_FRU_Unknown, "%s", line);
-	}
-#else
 	printf("%s", ctl_io_error_string(io, inq_data, str, sizeof(str)));
-#endif
 
 }
 
+void
+ctl_data_print(union ctl_io *io)
+{
+	char str[128];
+	char path_str[64];
+	struct sbuf sb;
+	int i, j, len;
+
+	if (io->io_hdr.io_type != CTL_IO_SCSI)
+		return;
+	if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
+		return;
+	if (io->scsiio.ext_sg_entries > 0)	/* XXX: Implement */
+		return;
+	ctl_scsi_path_string(io, path_str, sizeof(path_str));
+	len = min(io->scsiio.kern_data_len, 4096);
+	for (i = 0; i < len; ) {
+		sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
+		sbuf_cat(&sb, path_str);
+		sbuf_printf(&sb, " %#6x:%04x:", io->scsiio.tag_num, i);
+		for (j = 0; j < 16 && i < len; i++, j++) {
+			if (j == 8)
+				sbuf_cat(&sb, " ");
+			sbuf_printf(&sb, " %02x", io->scsiio.kern_data_ptr[i]);
+		}
+		sbuf_cat(&sb, "\n");
+		sbuf_finish(&sb);
+		printf("%s", sbuf_data(&sb));
+	}
+}
+
 #else /* _KERNEL */
 
 void

Modified: trunk/sys/cam/ctl/ctl_util.h
===================================================================
--- trunk/sys/cam/ctl/ctl_util.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/ctl_util.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2003 Silicon Graphics International Corp.
  * All rights reserved.
@@ -27,8 +28,8 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: ctl_util.h,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
- * $MidnightBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_util.h#2 $
+ * $FreeBSD: stable/10/sys/cam/ctl/ctl_util.h 312847 2017-01-26 21:08:58Z mav $
  */
 /*
  * CAM Target Layer SCSI library interface
@@ -61,6 +62,10 @@
 			 int minimum_cdb_size, uint64_t lba,
 			 uint32_t num_blocks, ctl_tag_type tag_type,
 			 uint8_t control);
+void ctl_scsi_write_same(union ctl_io *io, uint8_t *data_ptr,
+			 uint32_t data_len, uint8_t byte2,
+			 uint64_t lba, uint32_t num_blocks,
+			 ctl_tag_type tag_type, uint8_t control);
 void ctl_scsi_read_capacity(union ctl_io *io, uint8_t *data_ptr,
 			    uint32_t data_len, uint32_t addr, int reladr,
 			    int pmi, ctl_tag_type tag_type, uint8_t control);
@@ -73,7 +78,7 @@
 			 int minimum_cdb_size, ctl_tag_type tag_type,
 			 uint8_t control);
 void ctl_scsi_start_stop(union ctl_io *io, int start, int load_eject,
-			 int immediate, int power_conditions, int onoffline,
+			 int immediate, int power_conditions,
 			 ctl_tag_type tag_type, uint8_t control);
 void ctl_scsi_sync_cache(union ctl_io *io, int immed, int reladr,
 			 int minimum_cdb_size, uint64_t starting_lba,
@@ -90,24 +95,27 @@
 			     uint32_t data_len, uint8_t action, 
 			     ctl_tag_type tag_type, uint8_t control);
 #ifndef _KERNEL
-union ctl_io *ctl_scsi_alloc_io(struct ctl_id initid);
+union ctl_io *ctl_scsi_alloc_io(uint32_t initid);
 void ctl_scsi_free_io(union ctl_io *io);
+void ctl_scsi_zero_io(union ctl_io *io);
+#else
+#define	ctl_scsi_zero_io(io)	ctl_zero_io(io)
 #endif /* !_KERNEL */
-void ctl_scsi_zero_io(union ctl_io *io);
 const char *ctl_scsi_task_string(struct ctl_taskio *taskio);
+void ctl_io_sbuf(union ctl_io *io, struct sbuf *sb);
 void ctl_io_error_sbuf(union ctl_io *io,
 		       struct scsi_inquiry_data *inq_data, struct sbuf *sb);
+char *ctl_io_string(union ctl_io *io, char *str, int str_len);
 char *ctl_io_error_string(union ctl_io *io,
 			  struct scsi_inquiry_data *inq_data, char *str,
 			  int str_len);
 #ifdef _KERNEL
-
+void ctl_io_print(union ctl_io *io);
 void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data);
+void ctl_data_print(union ctl_io *io);
 #else /* _KERNEL */
-void
-ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data,
+void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data,
 		   FILE *ofile);
-
 #endif /* _KERNEL */
 
 __END_DECLS

Modified: trunk/sys/cam/ctl/scsi_ctl.c
===================================================================
--- trunk/sys/cam/ctl/scsi_ctl.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/ctl/scsi_ctl.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,5 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2008, 2009 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2015 Alexander Motin <mav at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,7 +29,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: scsi_ctl.c,v 1.2 2012-11-23 06:04:01 laffer1 Exp $
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $
  */
 /*
  * Peripheral driver interface between CAM and CTL (CAM Target Layer).
@@ -36,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/scsi_ctl.c 315939 2017-03-25 11:45:19Z mav $");
 
 #include <sys/param.h>
 #include <sys/queue.h>
@@ -52,6 +54,7 @@
 #include <sys/sysctl.h>
 #include <sys/types.h>
 #include <sys/systm.h>
+#include <sys/taskqueue.h>
 #include <machine/bus.h>
 
 #include <cam/cam.h>
@@ -72,16 +75,15 @@
 #include <cam/ctl/ctl_util.h>
 #include <cam/ctl/ctl_error.h>
 
-typedef enum {
-	CTLFE_CCB_DEFAULT	= 0x00,
-	CTLFE_CCB_WAITING 	= 0x01
-} ctlfe_ccb_types;
-
 struct ctlfe_softc {
-	struct ctl_frontend fe;
-	path_id_t path_id;
+	struct ctl_port	port;
+	path_id_t	path_id;
+	target_id_t	target_id;
+	uint32_t	hba_misc;
+	u_int		maxio;
 	struct cam_sim *sim;
-	char port_name[DEV_IDLEN];
+	char		port_name[DEV_IDLEN];
+	struct mtx	lun_softc_mtx;
 	STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
 	STAILQ_ENTRY(ctlfe_softc) links;
 };
@@ -89,11 +91,6 @@
 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list;
 struct mtx ctlfe_list_mtx;
 static char ctlfe_mtx_desc[] = "ctlfelist";
-static int ctlfe_dma_enabled = 1;
-#ifdef CTLFE_INIT_ENABLE
-static int ctlfe_max_targets = 1;
-static int ctlfe_num_targets = 0;
-#endif
 
 typedef enum {
 	CTLFE_LUN_NONE		= 0x00,
@@ -104,17 +101,14 @@
 	struct ctlfe_softc *parent_softc;
 	struct cam_periph *periph;
 	ctlfe_lun_flags flags;
-	struct callout dma_callout;
-	uint64_t ccbs_alloced;
-	uint64_t ccbs_freed;
-	uint64_t ctios_sent;
-	uint64_t ctios_returned;
-	uint64_t atios_sent;
-	uint64_t atios_returned;
-	uint64_t inots_sent;
-	uint64_t inots_returned;
-	/* bus_dma_tag_t dma_tag; */
-	TAILQ_HEAD(, ccb_hdr) work_queue;
+	int	 ctios_sent;		/* Number of active CTIOs */
+	int	 refcount;		/* Number of active xpt_action() */
+	int	 atios_alloced;		/* Number of ATIOs not freed */
+	int	 inots_alloced;		/* Number of INOTs not freed */
+	struct task	refdrain_task;
+	STAILQ_HEAD(, ccb_hdr) work_queue;
+	LIST_HEAD(, ccb_hdr) atio_list;	/* List of ATIOs queued to SIM. */
+	LIST_HEAD(, ccb_hdr) inot_list;	/* List of INOTs queued to SIM. */
 	STAILQ_ENTRY(ctlfe_lun_softc) links;
 };
 
@@ -123,12 +117,9 @@
 	CTLFE_CMD_PIECEWISE	= 0x01
 } ctlfe_cmd_flags;
 
-/*
- * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h.
- * Currently that is 600 bytes.
- */
-struct ctlfe_lun_cmd_info {
+struct ctlfe_cmd_info {
 	int cur_transfer_index;
+	size_t cur_transfer_off;
 	ctlfe_cmd_flags flags;
 	/*
 	 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16
@@ -135,7 +126,8 @@
 	 * bytes on amd64.  So with 32 elements, this is 256 bytes on
 	 * i386 and 512 bytes on amd64.
 	 */
-	bus_dma_segment_t cam_sglist[32];
+#define CTLFE_MAX_SEGS	32
+	bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS];
 };
 
 /*
@@ -159,12 +151,9 @@
 #define	CTLFE_IN_PER_LUN	1024
 
 /*
- * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending
- * status to the initiator.  The SIM is expected to have its own timeouts,
- * so we're not putting this timeout around the CCB execution time.  The
- * SIM should timeout and let us know if it has an issue.
+ * Timeout (in seconds) on CTIO CCB doing DMA or sending status
  */
-#define	CTLFE_DMA_TIMEOUT	60
+#define	CTLFE_TIMEOUT	5
 
 /*
  * Turn this on to enable extra debugging prints.
@@ -173,29 +162,19 @@
 #define	CTLFE_DEBUG
 #endif
 
-/*
- * Use randomly assigned WWNN/WWPN values.  This is to work around an issue
- * in the FreeBSD initiator that makes it unable to rescan the target if
- * the target gets rebooted and the WWNN/WWPN stay the same.
- */
-#if 0
-#define	RANDOM_WWNN
-#endif
-
-SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW,
-	   &ctlfe_dma_enabled, 0, "DMA enabled");
 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
 
-#define	ccb_type	ppriv_field0
-/* This is only used in the ATIO */
-#define	io_ptr		ppriv_ptr1
+#define	io_ptr		ppriv_ptr0
 
 /* This is only used in the CTIO */
 #define	ccb_atio	ppriv_ptr1
 
-int			ctlfeinitialize(void);
-void			ctlfeshutdown(void);
-static periph_init_t	ctlfeinit;
+#define PRIV_CCB(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0])
+#define PRIV_INFO(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1])
+
+static int		ctlfeinitialize(void);
+static int		ctlfeshutdown(void);
+static periph_init_t	ctlfeperiphinit;
 static void		ctlfeasync(void *callback_arg, uint32_t code,
 				   struct cam_path *path, void *arg);
 static periph_ctor_t	ctlferegister;
@@ -208,68 +187,59 @@
 static void 		ctlfe_onoffline(void *arg, int online);
 static void 		ctlfe_online(void *arg);
 static void 		ctlfe_offline(void *arg);
-static int 		ctlfe_targ_enable(void *arg, struct ctl_id targ_id);
-static int 		ctlfe_targ_disable(void *arg, struct ctl_id targ_id);
-static int 		ctlfe_lun_enable(void *arg, struct ctl_id targ_id,
-					 int lun_id);
-static int 		ctlfe_lun_disable(void *arg, struct ctl_id targ_id,
-					  int lun_id);
+static int 		ctlfe_lun_enable(void *arg, int lun_id);
+static int 		ctlfe_lun_disable(void *arg, int lun_id);
 static void		ctlfe_dump_sim(struct cam_sim *sim);
 static void		ctlfe_dump_queue(struct ctlfe_lun_softc *softc);
-static void		ctlfe_dma_timeout(void *arg);
-static void 		ctlfe_datamove_done(union ctl_io *io);
+static void 		ctlfe_datamove(union ctl_io *io);
+static void 		ctlfe_done(union ctl_io *io);
 static void 		ctlfe_dump(void);
+static void		ctlfe_free_ccb(struct cam_periph *periph,
+			    union ccb *ccb);
+static void		ctlfe_requeue_ccb(struct cam_periph *periph,
+			    union ccb *ccb, int unlock);
 
 static struct periph_driver ctlfe_driver =
 {
-	ctlfeinit, "ctl",
-	TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0
+	ctlfeperiphinit, "ctl",
+	TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0,
+	CAM_PERIPH_DRV_EARLY
 };
 
-static int ctlfe_module_event_handler(module_t, int /*modeventtype_t*/, void *);
-
-/*
- * We're not using PERIPHDRIVER_DECLARE(), because it runs at SI_SUB_DRIVERS,
- * and that happens before CTL gets initialised.
- */
-static moduledata_t ctlfe_moduledata = {
-	"ctlfe",
-	ctlfe_module_event_handler,
-	NULL
+static struct ctl_frontend ctlfe_frontend =
+{
+	.name = "camtgt",
+	.init = ctlfeinitialize,
+	.fe_dump = ctlfe_dump,
+	.shutdown = ctlfeshutdown,
 };
+CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend);
 
-DECLARE_MODULE(ctlfe, ctlfe_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
-MODULE_VERSION(ctlfe, 1);
-MODULE_DEPEND(ctlfe, ctl, 1, 1, 1);
-MODULE_DEPEND(ctlfe, cam, 1, 1, 1);
-
-extern struct ctl_softc *control_softc;
-extern int ctl_disable;
-
-void
+static int
 ctlfeshutdown(void)
 {
-	return;
+
+	/* CAM does not support periph driver unregister now. */
+	return (EBUSY);
 }
 
-void
-ctlfeinit(void)
+static int
+ctlfeinitialize(void)
 {
-	cam_status status;
 
-	/* Don't initialize if we're disabled */
-	if (ctl_disable != 0)
-		return;
-
 	STAILQ_INIT(&ctlfe_softc_list);
-
 	mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
+	periphdriver_register(&ctlfe_driver);
+	return (0);
+}
 
-	KASSERT(control_softc != NULL, ("CTL is not initialized!"));
+static void
+ctlfeperiphinit(void)
+{
+	cam_status status;
 
 	status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
 				    AC_CONTRACT, ctlfeasync, NULL, NULL);
-
 	if (status != CAM_REQ_CMP) {
 		printf("ctl: Failed to attach async callback due to CAM "
 		       "status 0x%x!\n", status);
@@ -276,29 +246,22 @@
 	}
 }
 
-static int
-ctlfe_module_event_handler(module_t mod, int what, void *arg)
-{
-
-	switch (what) {
-	case MOD_LOAD:
-		periphdriver_register(&ctlfe_driver);
-		return (0);
-	case MOD_UNLOAD:
-		return (EBUSY);
-	default:
-		return (EOPNOTSUPP);
-	}
-}
-
 static void
 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
 {
+	struct ctlfe_softc *softc;
 
 #ifdef CTLFEDEBUG
 	printf("%s: entered\n", __func__);
 #endif
 
+	mtx_lock(&ctlfe_list_mtx);
+	STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
+		if (softc->path_id == xpt_path_path_id(path))
+			break;
+	}
+	mtx_unlock(&ctlfe_list_mtx);
+
 	/*
 	 * When a new path gets registered, and it is capable of target
 	 * mode, go ahead and attach.  Later on, we may need to be more
@@ -306,8 +269,7 @@
  	 */
 	switch (code) {
 	case AC_PATH_REGISTERED: {
-		struct ctl_frontend *fe;
-		struct ctlfe_softc *bus_softc;
+		struct ctl_port *port;
 		struct ccb_pathinq *cpi;
 		int retval;
 
@@ -322,129 +284,87 @@
 			break;
 		}
 
-#ifdef CTLFE_INIT_ENABLE
-		if (ctlfe_num_targets >= ctlfe_max_targets) {
-			union ccb *ccb;
-			struct cam_sim *sim;
-
-			ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP,
-						  M_NOWAIT | M_ZERO);
-			if (ccb == NULL) {
-				printf("%s: unable to malloc CCB!\n", __func__);
-				return;
-			}
-			xpt_setup_ccb(&ccb->ccb_h, cpi->ccb_h.path,
-				      CAM_PRIORITY_NONE);
-
-			sim = xpt_path_sim(cpi->ccb_h.path);
-
-			ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
-			ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
-			ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR;
-
-			/* We should hold the SIM lock here */
-			mtx_assert(sim->mtx, MA_OWNED);
-
-			xpt_action(ccb);
-
-			if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
-			     CAM_REQ_CMP) {
-				printf("%s: SIM %s%d (path id %d) initiator "
-				       "enable failed with status %#x\n",
-				       __func__, cpi->dev_name,
-				       cpi->unit_number, cpi->ccb_h.path_id,
-				       ccb->ccb_h.status);
-			} else {
-				printf("%s: SIM %s%d (path id %d) initiator "
-				       "enable succeeded\n",
-				       __func__, cpi->dev_name,
-				       cpi->unit_number, cpi->ccb_h.path_id);
-			}
-
-			free(ccb, M_TEMP);
-
+		if (softc != NULL) {
+#ifdef CTLFEDEBUG
+			printf("%s: CTL port for CAM path %u already exists\n",
+			       __func__, xpt_path_path_id(path));
+#endif
 			break;
-		} else {
-			ctlfe_num_targets++;
 		}
 
-		printf("%s: ctlfe_num_targets = %d\n", __func__,
-		       ctlfe_num_targets);
-#endif /* CTLFE_INIT_ENABLE */
-
 		/*
 		 * We're in an interrupt context here, so we have to
 		 * use M_NOWAIT.  Of course this means trouble if we
 		 * can't allocate memory.
 		 */
-		bus_softc = malloc(sizeof(*bus_softc), M_CTLFE,
-				   M_NOWAIT | M_ZERO);
-		if (bus_softc == NULL) {
+		softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO);
+		if (softc == NULL) {
 			printf("%s: unable to malloc %zd bytes for softc\n",
-			       __func__, sizeof(*bus_softc));
+			       __func__, sizeof(*softc));
 			return;
 		}
 
-		bus_softc->path_id = cpi->ccb_h.path_id;
-		bus_softc->sim = xpt_path_sim(cpi->ccb_h.path);
-		STAILQ_INIT(&bus_softc->lun_softc_list);
+		softc->path_id = cpi->ccb_h.path_id;
+		softc->target_id = cpi->initiator_id;
+		softc->sim = xpt_path_sim(path);
+		softc->hba_misc = cpi->hba_misc;
+		if (cpi->maxio != 0)
+			softc->maxio = cpi->maxio;
+		else
+			softc->maxio = DFLTPHYS;
+		mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF);
+		STAILQ_INIT(&softc->lun_softc_list);
 
-		fe = &bus_softc->fe;
+		port = &softc->port;
+		port->frontend = &ctlfe_frontend;
 
 		/*
 		 * XXX KDM should we be more accurate here ?
 		 */
 		if (cpi->transport == XPORT_FC)
-			fe->port_type = CTL_PORT_FC;
+			port->port_type = CTL_PORT_FC;
+		else if (cpi->transport == XPORT_SAS)
+			port->port_type = CTL_PORT_SAS;
 		else
-			fe->port_type = CTL_PORT_SCSI;
+			port->port_type = CTL_PORT_SCSI;
 
 		/* XXX KDM what should the real number be here? */
-		fe->num_requested_ctl_io = 4096;
-		snprintf(bus_softc->port_name, sizeof(bus_softc->port_name),
+		port->num_requested_ctl_io = CTLFE_REQ_CTL_IO;
+		snprintf(softc->port_name, sizeof(softc->port_name),
 			 "%s%d", cpi->dev_name, cpi->unit_number);
 		/*
 		 * XXX KDM it would be nice to allocate storage in the
 		 * frontend structure itself.
 	 	 */
-		fe->port_name = bus_softc->port_name;
-		fe->physical_port = cpi->unit_number;
-		fe->virtual_port = cpi->bus_id;
-		fe->port_online = ctlfe_online;
-		fe->port_offline = ctlfe_offline;
-		fe->onoff_arg = bus_softc;
-		fe->targ_enable = ctlfe_targ_enable;
-		fe->targ_disable = ctlfe_targ_disable;
-		fe->lun_enable = ctlfe_lun_enable;
-		fe->lun_disable = ctlfe_lun_disable;
-		fe->targ_lun_arg = bus_softc;
-		fe->fe_datamove = ctlfe_datamove_done;
-		fe->fe_done = ctlfe_datamove_done;
-		fe->fe_dump = ctlfe_dump;
+		port->port_name = softc->port_name;
+		port->physical_port = cpi->bus_id;
+		port->virtual_port = 0;
+		port->port_online = ctlfe_online;
+		port->port_offline = ctlfe_offline;
+		port->onoff_arg = softc;
+		port->lun_enable = ctlfe_lun_enable;
+		port->lun_disable = ctlfe_lun_disable;
+		port->targ_lun_arg = softc;
+		port->fe_datamove = ctlfe_datamove;
+		port->fe_done = ctlfe_done;
 		/*
 		 * XXX KDM the path inquiry doesn't give us the maximum
 		 * number of targets supported.
 		 */
-		fe->max_targets = cpi->max_target;
-		fe->max_target_id = cpi->max_target;
-		
-		/*
-		 * XXX KDM need to figure out whether we're the master or
-		 * slave.
-		 */
-#ifdef CTLFEDEBUG
-		printf("%s: calling ctl_frontend_register() for %s%d\n",
-		       __func__, cpi->dev_name, cpi->unit_number);
-#endif
-		retval = ctl_frontend_register(fe, /*master_SC*/ 1);
+		port->max_targets = cpi->max_target;
+		port->max_target_id = cpi->max_target;
+		port->targ_port = -1;
+
+		retval = ctl_port_register(port);
 		if (retval != 0) {
-			printf("%s: ctl_frontend_register() failed with "
+			printf("%s: ctl_port_register() failed with "
 			       "error %d!\n", __func__, retval);
-			free(bus_softc, M_CTLFE);
+			mtx_destroy(&softc->lun_softc_mtx);
+			free(softc, M_CTLFE);
 			break;
 		} else {
 			mtx_lock(&ctlfe_list_mtx);
-			STAILQ_INSERT_TAIL(&ctlfe_softc_list, bus_softc, links);
+			STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links);
 			mtx_unlock(&ctlfe_list_mtx);
 		}
 
@@ -451,24 +371,18 @@
 		break;
 	}
 	case AC_PATH_DEREGISTERED: {
-		struct ctlfe_softc *softc = NULL;
 
-		mtx_lock(&ctlfe_list_mtx);
-		STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
-			if (softc->path_id == xpt_path_path_id(path)) {
-				STAILQ_REMOVE(&ctlfe_softc_list, softc,
-						ctlfe_softc, links);
-				break;
-			}
-		}
-		mtx_unlock(&ctlfe_list_mtx);
-
 		if (softc != NULL) {
 			/*
 			 * XXX KDM are we certain at this point that there
 			 * are no outstanding commands for this frontend?
 			 */
-			ctl_frontend_deregister(&softc->fe);
+			mtx_lock(&ctlfe_list_mtx);
+			STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc,
+			    links);
+			mtx_unlock(&ctlfe_list_mtx);
+			ctl_port_deregister(&softc->port);
+			mtx_destroy(&softc->lun_softc_mtx);
 			free(softc, M_CTLFE);
 		}
 		break;
@@ -481,8 +395,7 @@
 		switch (ac->contract_number) {
 		case AC_CONTRACT_DEV_CHG: {
 			struct ac_device_changed *dev_chg;
-			struct ctlfe_softc *softc;
-			int retval, found;
+			int retval;
 
 			dev_chg = (struct ac_device_changed *)ac->contract_data;
 
@@ -491,18 +404,7 @@
 			       xpt_path_path_id(path), dev_chg->target,
 			       (dev_chg->arrived == 0) ?  "left" : "arrived");
 
-			found = 0;
-
-			mtx_lock(&ctlfe_list_mtx);
-			STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
-				if (softc->path_id == xpt_path_path_id(path)) {
-					found = 1;
-					break;
-				}
-			}
-			mtx_unlock(&ctlfe_list_mtx);
-
-			if (found == 0) {
+			if (softc == NULL) {
 				printf("%s: CTL port for CAM path %u not "
 				       "found!\n", __func__,
 				       xpt_path_path_id(path));
@@ -509,18 +411,18 @@
 				break;
 			}
 			if (dev_chg->arrived != 0) {
-				retval = ctl_add_initiator(dev_chg->wwpn,
-					softc->fe.targ_port, dev_chg->target);
+				retval = ctl_add_initiator(&softc->port,
+				    dev_chg->target, dev_chg->wwpn, NULL);
 			} else {
-				retval = ctl_remove_initiator(
-					softc->fe.targ_port, dev_chg->target);
+				retval = ctl_remove_initiator(&softc->port,
+				    dev_chg->target);
 			}
 
-			if (retval != 0) {
+			if (retval < 0) {
 				printf("%s: could not %s port %d iid %u "
 				       "WWPN %#jx!\n", __func__,
 				       (dev_chg->arrived != 0) ? "add" :
-				       "remove", softc->fe.targ_port,
+				       "remove", softc->port.targ_port,
 				       dev_chg->target,
 				       (uintmax_t)dev_chg->wwpn);
 			}
@@ -543,31 +445,39 @@
 {
 	struct ctlfe_softc *bus_softc;
 	struct ctlfe_lun_softc *softc;
-	struct cam_sim *sim;
-	union ccb en_lun_ccb;
+	union ccb ccb;
 	cam_status status;
 	int i;
 
 	softc = (struct ctlfe_lun_softc *)arg;
 	bus_softc = softc->parent_softc;
-	sim = xpt_path_sim(periph->path);
 	
-	TAILQ_INIT(&softc->work_queue);
+	STAILQ_INIT(&softc->work_queue);
+	LIST_INIT(&softc->atio_list);
+	LIST_INIT(&softc->inot_list);
 	softc->periph = periph;
-
-	callout_init_mtx(&softc->dma_callout, sim->mtx, /*flags*/ 0);
 	periph->softc = softc;
 
-	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
-	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
-	en_lun_ccb.cel.grp6_len = 0;
-	en_lun_ccb.cel.grp7_len = 0;
-	en_lun_ccb.cel.enable = 1;
-	xpt_action(&en_lun_ccb);
-	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
+	/* Increase device openings to maximum for the SIM. */
+	if (bus_softc->sim->max_tagged_dev_openings >
+	    bus_softc->sim->max_dev_openings) {
+		cam_release_devq(periph->path,
+		    /*relsim_flags*/RELSIM_ADJUST_OPENINGS,
+		    /*openings*/bus_softc->sim->max_tagged_dev_openings,
+		    /*timeout*/0,
+		    /*getcount_only*/1);
+	}
+
+	xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
+	ccb.ccb_h.func_code = XPT_EN_LUN;
+	ccb.cel.grp6_len = 0;
+	ccb.cel.grp7_len = 0;
+	ccb.cel.enable = 1;
+	xpt_action(&ccb);
+	status = (ccb.ccb_h.status & CAM_STATUS_MASK);
 	if (status != CAM_REQ_CMP) {
 		xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 
-			  __func__, en_lun_ccb.ccb_h.status);
+			  __func__, ccb.ccb_h.status);
 		return (status);
 	}
 
@@ -575,6 +485,8 @@
 
 	for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
 		union ccb *new_ccb;
+		union ctl_io *new_io;
+		struct ctlfe_cmd_info *cmd_info;
 
 		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
 					      M_ZERO|M_NOWAIT);
@@ -582,13 +494,34 @@
 			status = CAM_RESRC_UNAVAIL;
 			break;
 		}
+		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
+		if (new_io == NULL) {
+			free(new_ccb, M_CTLFE);
+			status = CAM_RESRC_UNAVAIL;
+			break;
+		}
+		cmd_info = malloc(sizeof(*cmd_info), M_CTLFE,
+		    M_ZERO | M_NOWAIT);
+		if (cmd_info == NULL) {
+			ctl_free_io(new_io);
+			free(new_ccb, M_CTLFE);
+			status = CAM_RESRC_UNAVAIL;
+			break;
+		}
+		PRIV_INFO(new_io) = cmd_info;
+		softc->atios_alloced++;
+		new_ccb->ccb_h.io_ptr = new_io;
+		LIST_INSERT_HEAD(&softc->atio_list, &new_ccb->ccb_h, periph_links.le);
+
 		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
 		new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
 		new_ccb->ccb_h.cbfcnp = ctlfedone;
+		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
 		xpt_action(new_ccb);
-		softc->atios_sent++;
 		status = new_ccb->ccb_h.status;
 		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
+			free(cmd_info, M_CTLFE);
+			ctl_free_io(new_io);
 			free(new_ccb, M_CTLFE);
 			break;
 		}
@@ -609,6 +542,7 @@
 
 	for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
 		union ccb *new_ccb;
+		union ctl_io *new_io;
 
 		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
 					      M_ZERO|M_NOWAIT);
@@ -616,12 +550,21 @@
 			status = CAM_RESRC_UNAVAIL;
 			break;
 		}
+		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
+		if (new_io == NULL) {
+			free(new_ccb, M_CTLFE);
+			status = CAM_RESRC_UNAVAIL;
+			break;
+		}
+		softc->inots_alloced++;
+		new_ccb->ccb_h.io_ptr = new_io;
+		LIST_INSERT_HEAD(&softc->inot_list, &new_ccb->ccb_h, periph_links.le);
 
 		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
 		new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
 		new_ccb->ccb_h.cbfcnp = ctlfedone;
+		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
 		xpt_action(new_ccb);
-		softc->inots_sent++;
 		status = new_ccb->ccb_h.status;
 		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
 			/*
@@ -643,6 +586,9 @@
 			  "notify CCBs, status 0x%x\n", __func__, status);
 		return (CAM_REQ_CMP_ERR);
 	}
+	mtx_lock(&bus_softc->lun_softc_mtx);
+	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
+	mtx_unlock(&bus_softc->lun_softc_mtx);
 	return (CAM_REQ_CMP);
 }
 
@@ -649,30 +595,43 @@
 static void
 ctlfeoninvalidate(struct cam_periph *periph)
 {
-	union ccb en_lun_ccb;
+	struct ctlfe_lun_softc *softc = (struct ctlfe_lun_softc *)periph->softc;
+	struct ctlfe_softc *bus_softc;
+	union ccb ccb;
+	struct ccb_hdr *hdr;
 	cam_status status;
-	struct ctlfe_lun_softc *softc;
 
-	softc = (struct ctlfe_lun_softc *)periph->softc;
+	/* Abort all ATIOs and INOTs queued to SIM. */
+	xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
+	ccb.ccb_h.func_code = XPT_ABORT;
+	LIST_FOREACH(hdr, &softc->atio_list, periph_links.le) {
+		ccb.cab.abort_ccb = (union ccb *)hdr;
+		xpt_action(&ccb);
+	}
+	LIST_FOREACH(hdr, &softc->inot_list, periph_links.le) {
+		ccb.cab.abort_ccb = (union ccb *)hdr;
+		xpt_action(&ccb);
+	}
 
-	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
-	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
-	en_lun_ccb.cel.grp6_len = 0;
-	en_lun_ccb.cel.grp7_len = 0;
-	en_lun_ccb.cel.enable = 0;
-	xpt_action(&en_lun_ccb);
-	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
+	/* Disable the LUN in SIM. */
+	ccb.ccb_h.func_code = XPT_EN_LUN;
+	ccb.cel.grp6_len = 0;
+	ccb.cel.grp7_len = 0;
+	ccb.cel.enable = 0;
+	xpt_action(&ccb);
+	status = (ccb.ccb_h.status & CAM_STATUS_MASK);
 	if (status != CAM_REQ_CMP) {
 		xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n",
-			  __func__, en_lun_ccb.ccb_h.status);
+			  __func__, ccb.ccb_h.status);
 		/*
 		 * XXX KDM what do we do now?
 		 */
 	}
-	xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju "
-		  "INOTs outstanding, %d refs\n", softc->atios_sent -
-		  softc->atios_returned, softc->inots_sent -
-		  softc->inots_returned, periph->refcount);
+
+	bus_softc = softc->parent_softc;
+	mtx_lock(&bus_softc->lun_softc_mtx);
+	STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
+	mtx_unlock(&bus_softc->lun_softc_mtx);
 }
 
 static void
@@ -679,358 +638,282 @@
 ctlfecleanup(struct cam_periph *periph)
 {
 	struct ctlfe_lun_softc *softc;
-	struct ctlfe_softc *bus_softc;
 
-	xpt_print(periph->path, "%s: Called\n", __func__);
-
 	softc = (struct ctlfe_lun_softc *)periph->softc;
-	bus_softc = softc->parent_softc;
 
-	STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
-	
-	/*
-	 * XXX KDM is there anything else that needs to be done here?
-	 */
+	KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0",
+	    __func__, softc->ctios_sent));
+	KASSERT(softc->refcount == 0, ("%s: refcount %d != 0",
+	    __func__, softc->refcount));
+	KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0",
+	    __func__, softc->atios_alloced));
+	KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0",
+	    __func__, softc->inots_alloced));
 
-	callout_stop(&softc->dma_callout);
-
 	free(softc, M_CTLFE);
 }
 
 static void
-ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
+ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
+    ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len,
+    u_int16_t *sglist_cnt)
 {
-	struct ctlfe_lun_softc *softc;
-	struct ccb_hdr *ccb_h;
+	struct ctlfe_softc *bus_softc;
+	struct ctlfe_cmd_info *cmd_info;
+	struct ctl_sg_entry *ctl_sglist;
+	bus_dma_segment_t *cam_sglist;
+	size_t off;
+	int i, idx;
 
-	softc = (struct ctlfe_lun_softc *)periph->softc;
+	cmd_info = PRIV_INFO(io);
+	bus_softc = softc->parent_softc;
 
-	softc->ccbs_alloced++;
+	/*
+	 * Set the direction, relative to the initiator.
+	 */
+	*flags &= ~CAM_DIR_MASK;
+	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
+		*flags |= CAM_DIR_IN;
+	else
+		*flags |= CAM_DIR_OUT;
 
-	start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT;
+	*flags &= ~CAM_DATA_MASK;
+	idx = cmd_info->cur_transfer_index;
+	off = cmd_info->cur_transfer_off;
+	cmd_info->flags &= ~CTLFE_CMD_PIECEWISE;
+	if (io->scsiio.kern_sg_entries == 0) {	/* No S/G list. */
 
-	ccb_h = TAILQ_FIRST(&softc->work_queue);
-	if (periph->immediate_priority <= periph->pinfo.priority) {
-		panic("shouldn't get to the CCB waiting case!");
-		start_ccb->ccb_h.ccb_type = CTLFE_CCB_WAITING;
-		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-				  periph_links.sle);
-		periph->immediate_priority = CAM_PRIORITY_NONE;
-		wakeup(&periph->ccb_list);
-	} else if (ccb_h == NULL) {
-		softc->ccbs_freed++;
-		xpt_release_ccb(start_ccb);
-	} else {
-		struct ccb_accept_tio *atio;
-		struct ccb_scsiio *csio;
-		uint8_t *data_ptr;
-		uint32_t dxfer_len;
-		ccb_flags flags;
-		union ctl_io *io;
-		uint8_t scsi_status;
+		/* One time shift for SRR offset. */
+		off += io->scsiio.ext_data_filled;
+		io->scsiio.ext_data_filled = 0;
 
-		/* Take the ATIO off the work queue */
-		TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe);
-		atio = (struct ccb_accept_tio *)ccb_h;
-		io = (union ctl_io *)ccb_h->io_ptr;
-		csio = &start_ccb->csio;
+		*data_ptr = io->scsiio.kern_data_ptr + off;
+		if (io->scsiio.kern_data_len - off <= bus_softc->maxio) {
+			*dxfer_len = io->scsiio.kern_data_len - off;
+		} else {
+			*dxfer_len = bus_softc->maxio;
+			cmd_info->cur_transfer_off += bus_softc->maxio;
+			cmd_info->flags |= CTLFE_CMD_PIECEWISE;
+		}
+		*sglist_cnt = 0;
 
-		flags = atio->ccb_h.flags &
-			(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
+		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
+			*flags |= CAM_DATA_PADDR;
+		else
+			*flags |= CAM_DATA_VADDR;
+	} else {	/* S/G list with physical or virtual pointers. */
+		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
 
-		if ((io == NULL)
-		 || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
-			/*
-			 * We're done, send status back.
-			 */
-			flags |= CAM_SEND_STATUS;
-			if (io == NULL) {
-				scsi_status = SCSI_STATUS_BUSY;
-				csio->sense_len = 0;
-			} else if ((io->io_hdr.status & CTL_STATUS_MASK) ==
-				   CTL_CMD_ABORTED) {
-				io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
+		/* One time shift for SRR offset. */
+		while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) {
+			io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off;
+			idx++;
+			off = 0;
+		}
+		off += io->scsiio.ext_data_filled;
+		io->scsiio.ext_data_filled = 0;
 
-				/*
-				 * If this command was aborted, we don't
-				 * need to send status back to the SIM.
-				 * Just free the CTIO and ctl_io, and
-				 * recycle the ATIO back to the SIM.
-				 */
-				xpt_print(periph->path, "%s: aborted "
-					  "command 0x%04x discarded\n",
-					  __func__, io->scsiio.tag_num);
-				ctl_free_io(io);
-				/*
-				 * For a wildcard attachment, commands can
-				 * come in with a specific target/lun.  Reset
-				 * the target and LUN fields back to the
-				 * wildcard values before we send them back
-				 * down to the SIM.  The SIM has a wildcard
-				 * LUN enabled, not whatever target/lun 
-				 * these happened to be.
-				 */
-				if (softc->flags & CTLFE_LUN_WILDCARD) {
-					atio->ccb_h.target_id =
-						CAM_TARGET_WILDCARD;
-					atio->ccb_h.target_lun =
-						CAM_LUN_WILDCARD;
-				}
-
-				if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
-					cam_release_devq(periph->path,
-							 /*relsim_flags*/0,
-							 /*reduction*/0,
- 							 /*timeout*/0,
-							 /*getcount_only*/0);
-					atio->ccb_h.status &= ~CAM_DEV_QFRZN;
-				}
-
-				ccb_h = TAILQ_FIRST(&softc->work_queue);
-
-				if (atio->ccb_h.func_code != 
-				    XPT_ACCEPT_TARGET_IO) {
-					xpt_print(periph->path, "%s: func_code "
-						  "is %#x\n", __func__,
-						  atio->ccb_h.func_code);
-				}
-				start_ccb->ccb_h.func_code = XPT_ABORT;
-				start_ccb->cab.abort_ccb = (union ccb *)atio;
-				start_ccb->ccb_h.cbfcnp = ctlfedone;
-
-				/* Tell the SIM that we've aborted this ATIO */
-				xpt_action(start_ccb);
-				softc->ccbs_freed++;
-				xpt_release_ccb(start_ccb);
-
-				/*
-				 * Send the ATIO back down to the SIM.
-				 */
-				xpt_action((union ccb *)atio);
-				softc->atios_sent++;
-
-				/*
-				 * If we still have work to do, ask for
-				 * another CCB.  Otherwise, deactivate our
-				 * callout.
-				 */
-				if (ccb_h != NULL)
-					xpt_schedule(periph, /*priority*/ 1);
-				else
-					callout_stop(&softc->dma_callout);
-
-				return;
+		cam_sglist = cmd_info->cam_sglist;
+		*dxfer_len = 0;
+		for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) {
+			cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off;
+			if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) {
+				cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off;
+				*dxfer_len += cam_sglist[i].ds_len;
 			} else {
-				io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
-				scsi_status = io->scsiio.scsi_status;
-				csio->sense_len = io->scsiio.sense_len;
+				cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len;
+				cmd_info->cur_transfer_index = idx + i;
+				cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off;
+				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
+				*dxfer_len += cam_sglist[i].ds_len;
+				if (ctl_sglist[i].len != 0)
+					i++;
+				break;
 			}
-			data_ptr = NULL;
-			dxfer_len = 0;
-			if (io == NULL) {
-				printf("%s: tag %04x io is NULL\n", __func__,
-				       atio->tag_id);
-			} else {
-#ifdef CTLFEDEBUG
-				printf("%s: tag %04x status %x\n", __func__,
-				       atio->tag_id, io->io_hdr.status);
-#endif
+			if (i == (CTLFE_MAX_SEGS - 1) &&
+			    idx + i < (io->scsiio.kern_sg_entries - 1)) {
+				cmd_info->cur_transfer_index = idx + i + 1;
+				cmd_info->cur_transfer_off = 0;
+				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
+				i++;
+				break;
 			}
-			csio->sglist_cnt = 0;
-			if (csio->sense_len != 0) {
-				csio->sense_data = io->scsiio.sense_data;
-				flags |= CAM_SEND_SENSE;
-			} else if (scsi_status == SCSI_STATUS_CHECK_COND) {
-				xpt_print(periph->path, "%s: check condition "
-					  "with no sense\n", __func__);
-			}
-		} else {
-			struct ctlfe_lun_cmd_info *cmd_info;
+			off = 0;
+		}
+		*sglist_cnt = i;
+		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
+			*flags |= CAM_DATA_SG_PADDR;
+		else
+			*flags |= CAM_DATA_SG;
+		*data_ptr = (uint8_t *)cam_sglist;
+	}
+}
 
-			/*
-			 * Datamove call, we need to setup the S/G list. 
-			 */
+static void
+ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
+{
+	struct ctlfe_lun_softc *softc;
+	struct ctlfe_cmd_info *cmd_info;
+	struct ccb_hdr *ccb_h;
+	struct ccb_accept_tio *atio;
+	struct ccb_scsiio *csio;
+	uint8_t *data_ptr;
+	uint32_t dxfer_len;
+	ccb_flags flags;
+	union ctl_io *io;
+	uint8_t scsi_status;
 
-			cmd_info = (struct ctlfe_lun_cmd_info *)
-				io->io_hdr.port_priv;
+	softc = (struct ctlfe_lun_softc *)periph->softc;
 
-			KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE,
-				("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < "
-				"CTL_PORT_PRIV_SIZE %d", __func__,
-				sizeof(*cmd_info), CTL_PORT_PRIV_SIZE));
-			io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED;
+next:
+	/* Take the ATIO off the work queue */
+	ccb_h = STAILQ_FIRST(&softc->work_queue);
+	if (ccb_h == NULL) {
+		xpt_release_ccb(start_ccb);
+		return;
+	}
+	STAILQ_REMOVE_HEAD(&softc->work_queue, periph_links.stqe);
+	atio = (struct ccb_accept_tio *)ccb_h;
+	io = (union ctl_io *)ccb_h->io_ptr;
+	csio = &start_ccb->csio;
 
-			/*
-			 * Need to zero this, in case it has been used for
-			 * a previous datamove for this particular I/O.
-			 */
-			bzero(cmd_info, sizeof(*cmd_info));
-			scsi_status = 0;
+	flags = atio->ccb_h.flags &
+		(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
+	cmd_info = PRIV_INFO(io);
+	cmd_info->cur_transfer_index = 0;
+	cmd_info->cur_transfer_off = 0;
+	cmd_info->flags = 0;
 
-			/*
-			 * Set the direction, relative to the initiator.
-			 */
-			flags &= ~CAM_DIR_MASK;
-			if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
-			     CTL_FLAG_DATA_IN)
-				flags |= CAM_DIR_IN;
-			else
-				flags |= CAM_DIR_OUT;
-			
-			csio->cdb_len = atio->cdb_len;
+	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
+		/*
+		 * Datamove call, we need to setup the S/G list.
+		 */
+		ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len,
+		    &csio->sglist_cnt);
+	} else {
+		/*
+		 * We're done, send status back.
+		 */
+		if ((io->io_hdr.flags & CTL_FLAG_ABORT) &&
+		    (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) {
+			io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
 
-			if (io->scsiio.kern_sg_entries == 0) {
-				/* No S/G list */
-				data_ptr = io->scsiio.kern_data_ptr;
-				dxfer_len = io->scsiio.kern_data_len;
-				csio->sglist_cnt = 0;
+			/* Tell the SIM that we've aborted this ATIO */
+#ifdef CTLFEDEBUG
+			printf("%s: tag %04x abort\n", __func__, atio->tag_id);
+#endif
+			KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO,
+			    ("func_code %#x is not ATIO", atio->ccb_h.func_code));
+			start_ccb->ccb_h.func_code = XPT_ABORT;
+			start_ccb->cab.abort_ccb = (union ccb *)atio;
+			xpt_action(start_ccb);
 
-				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
-					flags |= CAM_DATA_PHYS;
-			} else if (io->scsiio.kern_sg_entries <=
-				   (sizeof(cmd_info->cam_sglist)/
-				   sizeof(cmd_info->cam_sglist[0]))) {
-				/*
-				 * S/G list with physical or virtual pointers.
-				 * Just populate the CAM S/G list with the
-				 * pointers.
-				 */
-				int i;
-				struct ctl_sg_entry *ctl_sglist;
-				bus_dma_segment_t *cam_sglist;
+			ctlfe_requeue_ccb(periph, (union ccb *)atio,
+			    /* unlock */0);
 
-				ctl_sglist = (struct ctl_sg_entry *)
-					io->scsiio.kern_data_ptr;
-				cam_sglist = cmd_info->cam_sglist;
-
-				for (i = 0; i < io->scsiio.kern_sg_entries;i++){
-					cam_sglist[i].ds_addr =
-						(bus_addr_t)ctl_sglist[i].addr;
-					cam_sglist[i].ds_len =
-						ctl_sglist[i].len;
-				}
-				csio->sglist_cnt = io->scsiio.kern_sg_entries;
-				flags |= CAM_SCATTER_VALID;
-				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
-					flags |= CAM_SG_LIST_PHYS;
-				else
-					flags &= ~CAM_SG_LIST_PHYS;
-				data_ptr = (uint8_t *)cam_sglist;
-				dxfer_len = io->scsiio.kern_data_len;
-			} else {
-				/* S/G list with virtual pointers */
-				struct ctl_sg_entry *sglist;
-				int *ti;
-
-				/*
-				 * If we have more S/G list pointers than
-				 * will fit in the available storage in the
-				 * cmd_info structure inside the ctl_io header,
-				 * then we need to send down the pointers
-				 * one element at a time.
-				 */
-
-				sglist = (struct ctl_sg_entry *)
-					io->scsiio.kern_data_ptr;
-				ti = &cmd_info->cur_transfer_index;
-				data_ptr = sglist[*ti].addr;
-				dxfer_len = sglist[*ti].len;
-				csio->sglist_cnt = 0;
-				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
-				(*ti)++;
-			}
-
-			io->scsiio.ext_data_filled += dxfer_len;
-
-			if (io->scsiio.ext_data_filled >
-			    io->scsiio.kern_total_len) {
-				xpt_print(periph->path, "%s: tag 0x%04x "
-					  "fill len %u > total %u\n",
-					  __func__, io->scsiio.tag_num,
-					  io->scsiio.ext_data_filled,
-					  io->scsiio.kern_total_len);
-			}
+			/* XPT_ABORT is not queued, so we can take next I/O. */
+			goto next;
 		}
+		data_ptr = NULL;
+		dxfer_len = 0;
+		csio->sglist_cnt = 0;
+	}
+	scsi_status = 0;
+	if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) &&
+	    (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 &&
+	    ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 ||
+	     io->io_hdr.status == CTL_SUCCESS)) {
+		flags |= CAM_SEND_STATUS;
+		scsi_status = io->scsiio.scsi_status;
+		csio->sense_len = io->scsiio.sense_len;
+#ifdef CTLFEDEBUG
+		printf("%s: tag %04x status %x\n", __func__,
+		       atio->tag_id, io->io_hdr.status);
+#endif
+		if (csio->sense_len != 0) {
+			csio->sense_data = io->scsiio.sense_data;
+			flags |= CAM_SEND_SENSE;
+		}
+	}
 
 #ifdef CTLFEDEBUG
-		printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
-		       (flags & CAM_SEND_STATUS) ? "done" : "datamove",
-		       atio->tag_id, flags, data_ptr, dxfer_len);
+	printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
+	       (flags & CAM_SEND_STATUS) ? "done" : "datamove",
+	       atio->tag_id, flags, data_ptr, dxfer_len);
 #endif
 
-		/*
-		 * Valid combinations:
-		 *  - CAM_SEND_STATUS, SCATTER_VALID = 0, dxfer_len = 0,
-		 *    sglist_cnt = 0
-		 *  - CAM_SEND_STATUS = 0, SCATTER_VALID = 0, dxfer_len != 0,
-		 *    sglist_cnt = 0 
-		 *  - CAM_SEND_STATUS = 0, SCATTER_VALID, dxfer_len != 0,
-		 *    sglist_cnt != 0
-		 */
+	/*
+	 * Valid combinations:
+	 *  - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0,
+	 *    sglist_cnt = 0
+	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0,
+	 *    sglist_cnt = 0
+	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0,
+	 *    sglist_cnt != 0
+	 */
 #ifdef CTLFEDEBUG
-		if (((flags & CAM_SEND_STATUS)
-		  && (((flags & CAM_SCATTER_VALID) != 0)
-		   || (dxfer_len != 0)
-		   || (csio->sglist_cnt != 0)))
-		 || (((flags & CAM_SEND_STATUS) == 0)
-		  && (dxfer_len == 0))
-		 || ((flags & CAM_SCATTER_VALID)
-		  && (csio->sglist_cnt == 0))
-		 || (((flags & CAM_SCATTER_VALID) == 0)
-		  && (csio->sglist_cnt != 0))) {
-			printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
-			       "%d sg %u\n", __func__, atio->tag_id,
-			       atio->cdb_io.cdb_bytes[0], flags, dxfer_len,
-			       csio->sglist_cnt);
-			if (io != NULL) {
-				printf("%s: tag %04x io status %#x\n", __func__,
-				       atio->tag_id, io->io_hdr.status);
-			} else {
-				printf("%s: tag %04x no associated io\n",
-				       __func__, atio->tag_id);
-			}
-		}
+	if (((flags & CAM_SEND_STATUS)
+	  && (((flags & CAM_DATA_SG) != 0)
+	   || (dxfer_len != 0)
+	   || (csio->sglist_cnt != 0)))
+	 || (((flags & CAM_SEND_STATUS) == 0)
+	  && (dxfer_len == 0))
+	 || ((flags & CAM_DATA_SG)
+	  && (csio->sglist_cnt == 0))
+	 || (((flags & CAM_DATA_SG) == 0)
+	  && (csio->sglist_cnt != 0))) {
+		printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
+		       "%d sg %u\n", __func__, atio->tag_id,
+		       atio_cdb_ptr(atio)[0], flags, dxfer_len,
+		       csio->sglist_cnt);
+		printf("%s: tag %04x io status %#x\n", __func__,
+		       atio->tag_id, io->io_hdr.status);
+	}
 #endif
-		cam_fill_ctio(csio,
-			      /*retries*/ 2,
-			      ctlfedone,
-			      flags,
-			      (flags & CAM_TAG_ACTION_VALID) ?
-			       MSG_SIMPLE_Q_TAG : 0,
-			      atio->tag_id,
-			      atio->init_id,
-			      scsi_status,
-			      /*data_ptr*/ data_ptr,
-			      /*dxfer_len*/ dxfer_len,
-			      /*timeout*/ 5 * 1000);
-		start_ccb->ccb_h.ccb_atio = atio;
-		if (((flags & CAM_SEND_STATUS) == 0)
-		 && (io != NULL))
-			io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
+	cam_fill_ctio(csio,
+		      /*retries*/ 2,
+		      ctlfedone,
+		      flags,
+		      (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0,
+		      atio->tag_id,
+		      atio->init_id,
+		      scsi_status,
+		      /*data_ptr*/ data_ptr,
+		      /*dxfer_len*/ dxfer_len,
+		      /*timeout*/ CTLFE_TIMEOUT * 1000);
+	start_ccb->ccb_h.flags |= CAM_UNLOCKED;
+	start_ccb->ccb_h.ccb_atio = atio;
+	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
+		io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
+	io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED);
 
-		softc->ctios_sent++;
+	softc->ctios_sent++;
+	softc->refcount++;
+	cam_periph_unlock(periph);
+	xpt_action(start_ccb);
+	cam_periph_lock(periph);
+	softc->refcount--;
 
-		xpt_action(start_ccb);
+	/*
+	 * If we still have work to do, ask for another CCB.
+	 */
+	if (!STAILQ_EMPTY(&softc->work_queue))
+		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
+}
 
-		if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
-			cam_release_devq(periph->path,
-					 /*relsim_flags*/0,
-					 /*reduction*/0,
- 					 /*timeout*/0,
-					 /*getcount_only*/0);
-			atio->ccb_h.status &= ~CAM_DEV_QFRZN;
-		}
+static void
+ctlfe_drain(void *context, int pending)
+{
+	struct cam_periph *periph = context;
+	struct ctlfe_lun_softc *softc = periph->softc;
 
-		ccb_h = TAILQ_FIRST(&softc->work_queue);
+	cam_periph_lock(periph);
+	while (softc->refcount != 0) {
+		cam_periph_sleep(periph, &softc->refcount, PRIBIO,
+		    "ctlfe_drain", 1);
 	}
-	/*
-	 * If we still have work to do, ask for another CCB.  Otherwise,
-	 * deactivate our callout.
-	 */
-	if (ccb_h != NULL)
-		xpt_schedule(periph, /*priority*/ 1);
-	else
-		callout_stop(&softc->dma_callout);
+	cam_periph_unlock(periph);
+	cam_periph_release(periph);
 }
 
 static void
@@ -1037,29 +920,33 @@
 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
 {
 	struct ctlfe_lun_softc *softc;
+	union ctl_io *io;
+	struct ctlfe_cmd_info *cmd_info;
 
 	softc = (struct ctlfe_lun_softc *)periph->softc;
+	io = ccb->ccb_h.io_ptr;
 
 	switch (ccb->ccb_h.func_code) {
 	case XPT_ACCEPT_TARGET_IO:
-		softc->atios_returned++;
+		softc->atios_alloced--;
+		cmd_info = PRIV_INFO(io);
+		free(cmd_info, M_CTLFE);
 		break;
 	case XPT_IMMEDIATE_NOTIFY:
 	case XPT_NOTIFY_ACKNOWLEDGE:
-		softc->inots_returned++;
+		softc->inots_alloced--;
 		break;
 	default:
 		break;
 	}
 
+	ctl_free_io(io);
 	free(ccb, M_CTLFE);
 
-	KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: "
-		"atios_returned %ju > atios_sent %ju", __func__,
-		softc->atios_returned, softc->atios_sent));
-	KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: "
-		"inots_returned %ju > inots_sent %ju", __func__,
-		softc->inots_returned, softc->inots_sent));
+	KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0",
+	    __func__, softc->atios_alloced));
+	KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0",
+	    __func__, softc->inots_alloced));
 
 	/*
 	 * If we have received all of our CCBs, we can release our
@@ -1066,19 +953,60 @@
 	 * reference on the peripheral driver.  It will probably go away
 	 * now.
 	 */
-	if ((softc->atios_returned == softc->atios_sent)
-	 && (softc->inots_returned == softc->inots_sent)) {
-		cam_periph_release_locked(periph);
+	if (softc->atios_alloced == 0 && softc->inots_alloced == 0) {
+		if (softc->refcount == 0) {
+			cam_periph_release_locked(periph);
+		} else {
+			TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph);
+			taskqueue_enqueue(taskqueue_thread,
+			    &softc->refdrain_task);
+		}
 	}
 }
 
+/*
+ * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated.
+ */
+static void
+ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock)
+{
+	struct ctlfe_lun_softc *softc;
+	struct mtx *mtx;
+
+	if (periph->flags & CAM_PERIPH_INVALID) {
+		mtx = cam_periph_mtx(periph);
+		ctlfe_free_ccb(periph, ccb);
+		if (unlock)
+			mtx_unlock(mtx);
+		return;
+	}
+	softc = (struct ctlfe_lun_softc *)periph->softc;
+	if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
+		LIST_INSERT_HEAD(&softc->atio_list, &ccb->ccb_h, periph_links.le);
+	else
+		LIST_INSERT_HEAD(&softc->inot_list, &ccb->ccb_h, periph_links.le);
+	if (unlock)
+		cam_periph_unlock(periph);
+
+	/*
+	 * For a wildcard attachment, commands can come in with a specific
+	 * target/lun.  Reset the target and LUN fields back to the wildcard
+	 * values before we send them back down to the SIM.
+	 */
+	if (softc->flags & CTLFE_LUN_WILDCARD) {
+		ccb->ccb_h.target_id = CAM_TARGET_WILDCARD;
+		ccb->ccb_h.target_lun = CAM_LUN_WILDCARD;
+	}
+
+	xpt_action(ccb);
+}
+
 static int
 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
 {
 	uint64_t lba;
 	uint32_t num_blocks, nbc;
-	uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)?
-	    atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes;
+	uint8_t *cmdbyt = atio_cdb_ptr(atio);
 
 	nbc = offset >> 9;	/* ASSUMING 512 BYTE BLOCKS */
 
@@ -1145,79 +1073,61 @@
 {
 	struct ctlfe_lun_softc *softc;
 	struct ctlfe_softc *bus_softc;
+	struct ctlfe_cmd_info *cmd_info;
 	struct ccb_accept_tio *atio = NULL;
 	union ctl_io *io = NULL;
+	struct mtx *mtx;
+	cam_status status;
 
+	KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0,
+	    ("CCB in ctlfedone() without CAM_UNLOCKED flag"));
 #ifdef CTLFE_DEBUG
-	printf("%s: entered, func_code = %#x, type = %#lx\n", __func__,
-	       done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type);
+	printf("%s: entered, func_code = %#x\n", __func__,
+	       done_ccb->ccb_h.func_code);
 #endif
 
+	/*
+	 * At this point CTL has no known use case for device queue freezes.
+	 * In case some SIM think different -- drop its freeze right here.
+	 */
+	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+		cam_release_devq(periph->path,
+				 /*relsim_flags*/0,
+				 /*reduction*/0,
+				 /*timeout*/0,
+				 /*getcount_only*/0);
+		done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
+	}
+
 	softc = (struct ctlfe_lun_softc *)periph->softc;
 	bus_softc = softc->parent_softc;
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
 
-	if (done_ccb->ccb_h.ccb_type == CTLFE_CCB_WAITING) {
-		panic("shouldn't get to the CCB waiting case!");
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
-	}
-
-	/*
-	 * If the peripheral is invalid, ATIOs and immediate notify CCBs
-	 * need to be freed.  Most of the ATIOs and INOTs that come back
-	 * will be CCBs that are being returned from the SIM as a result of
-	 * our disabling the LUN.
-	 *
-	 * Other CCB types are handled in their respective cases below.
-	 */
-	if (periph->flags & CAM_PERIPH_INVALID) {
-		switch (done_ccb->ccb_h.func_code) {
-		case XPT_ACCEPT_TARGET_IO:
-		case XPT_IMMEDIATE_NOTIFY:
-		case XPT_NOTIFY_ACKNOWLEDGE:
-			ctlfe_free_ccb(periph, done_ccb);
-			return;
-		default:
-			break;
-		}
-
-	}
 	switch (done_ccb->ccb_h.func_code) {
 	case XPT_ACCEPT_TARGET_IO: {
 
+		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
 		atio = &done_ccb->atio;
+		status = atio->ccb_h.status & CAM_STATUS_MASK;
+		if (status != CAM_CDB_RECVD) {
+			ctlfe_free_ccb(periph, done_ccb);
+			goto out;
+		}
 
-		softc->atios_returned++;
-
  resubmit:
 		/*
 		 * Allocate a ctl_io, pass it to CTL, and wait for the
 		 * datamove or done.
 		 */
-		io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
-		if (io == NULL) {
-			atio->ccb_h.flags &= ~CAM_DIR_MASK;
-			atio->ccb_h.flags |= CAM_DIR_NONE;
-
-			printf("%s: ctl_alloc_io failed!\n", __func__);
-
-			/*
-			 * XXX KDM need to set SCSI_STATUS_BUSY, but there
-			 * is no field in the ATIO structure to do that,
-			 * and we aren't able to allocate a ctl_io here.
-			 * What to do?
-			 */
-			atio->sense_len = 0;
-			done_ccb->ccb_h.io_ptr = NULL;
-			TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
-					  periph_links.tqe);
-			xpt_schedule(periph, /*priority*/ 1);
-			break;
-		}
+		mtx_unlock(mtx);
+		io = done_ccb->ccb_h.io_ptr;
+		cmd_info = PRIV_INFO(io);
 		ctl_zero_io(io);
 
 		/* Save pointers on both sides */
-		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb;
+		PRIV_CCB(io) = done_ccb;
+		PRIV_INFO(io) = cmd_info;
 		done_ccb->ccb_h.io_ptr = io;
 
 		/*
@@ -1225,10 +1135,14 @@
 		 * down the immediate notify path below.
 		 */
 		io->io_hdr.io_type = CTL_IO_SCSI;
-		io->io_hdr.nexus.initid.id = atio->init_id;
-		io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
-		io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id;
-		io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
+		io->io_hdr.nexus.initid = atio->init_id;
+		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
+		if (bus_softc->hba_misc & PIM_EXTLUNS) {
+			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
+			    CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun));
+		} else {
+			io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
+		}
 		io->scsiio.tag_num = atio->tag_id;
 		switch (atio->tag_action) {
 		case CAM_TAG_ACTION_NONE:
@@ -1257,20 +1171,18 @@
 			       __func__, atio->cdb_len, sizeof(io->scsiio.cdb));
 		}
 		io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
-		bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb,
-		      io->scsiio.cdb_len);
+		bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len);
 
 #ifdef CTLFEDEBUG
-		printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__,
-		        (uintmax_t)io->io_hdr.nexus.initid.id,
+		printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__,
+		        io->io_hdr.nexus.initid,
 		        io->io_hdr.nexus.targ_port,
-		        (uintmax_t)io->io_hdr.nexus.targ_target.id,
 		        io->io_hdr.nexus.targ_lun,
 			io->scsiio.tag_num, io->scsiio.cdb[0]);
 #endif
 
 		ctl_queue(io);
-		break;
+		return;
 	}
 	case XPT_CONT_TARGET_IO: {
 		int srr = 0;
@@ -1279,7 +1191,7 @@
 		atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio;
 		io = (union ctl_io *)atio->ccb_h.io_ptr;
 
-		softc->ctios_returned++;
+		softc->ctios_sent--;
 #ifdef CTLFEDEBUG
 		printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n",
 		       __func__, atio->tag_id, done_ccb->ccb_h.flags);
@@ -1300,16 +1212,36 @@
 			    | (done_ccb->csio.msg_ptr[6]);
 		}
 
-		if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) {
-			/*
-			 * If status was being sent, the back end data is now
-			 * history. Hack it up and resubmit a new command with
-			 * the CDB adjusted. If the SIM does the right thing,
-			 * all of the resid math should work.
-			 */
-			softc->ccbs_freed++;
+		/*
+		 * If we have an SRR and we're still sending data, we
+		 * should be able to adjust offsets and cycle again.
+		 * It is possible only if offset is from this datamove.
+		 */
+		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) &&
+		    srr_off >= io->scsiio.kern_rel_offset &&
+		    srr_off < io->scsiio.kern_rel_offset +
+		     io->scsiio.kern_data_len) {
+			io->scsiio.kern_data_resid =
+			    io->scsiio.kern_rel_offset +
+			    io->scsiio.kern_data_len - srr_off;
+			io->scsiio.ext_data_filled = srr_off;
+			io->scsiio.io_hdr.status = CTL_STATUS_NONE;
+			io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
 			xpt_release_ccb(done_ccb);
-			ctl_free_io(io);
+			STAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
+					  periph_links.stqe);
+			xpt_schedule(periph, CAM_PRIORITY_NORMAL);
+			break;
+		}
+
+		/*
+		 * If status was being sent, the back end data is now history.
+		 * Hack it up and resubmit a new command with the CDB adjusted.
+		 * If the SIM does the right thing, all of the resid math
+		 * should work.
+		 */
+		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
+			xpt_release_ccb(done_ccb);
 			if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
 				done_ccb = (union ccb *)atio;
 				goto resubmit;
@@ -1317,72 +1249,53 @@
 			/*
 			 * Fall through to doom....
 			 */
-		} else if (srr) {
-			/*
-			 * If we have an srr and we're still sending data, we
-			 * should be able to adjust offsets and cycle again.
-			 */
-			io->scsiio.kern_rel_offset =
-			    io->scsiio.ext_data_filled = srr_off;
-			io->scsiio.ext_data_len = io->scsiio.kern_total_len -
-			    io->scsiio.kern_rel_offset;
-			softc->ccbs_freed++;
-			io->scsiio.io_hdr.status = CTL_STATUS_NONE;
-			xpt_release_ccb(done_ccb);
-			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
-					  periph_links.tqe);
-			xpt_schedule(periph, /*priority*/ 1);
-			return;
 		}
 
+		if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) &&
+		    (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
+			io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
+
 		/*
 		 * If we were sending status back to the initiator, free up
 		 * resources.  If we were doing a datamove, call the
 		 * datamove done routine.
 		 */
-		if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) {
-			softc->ccbs_freed++;
-			xpt_release_ccb(done_ccb);
-			ctl_free_io(io);
+		if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
 			/*
-			 * For a wildcard attachment, commands can come in
-			 * with a specific target/lun.  Reset the target
-			 * and LUN fields back to the wildcard values before
-			 * we send them back down to the SIM.  The SIM has
-			 * a wildcard LUN enabled, not whatever target/lun
-			 * these happened to be.
+			 * If we asked to send sense data but it wasn't sent,
+			 * queue the I/O back to CTL for later REQUEST SENSE.
 			 */
-			if (softc->flags & CTLFE_LUN_WILDCARD) {
-				atio->ccb_h.target_id = CAM_TARGET_WILDCARD;
-				atio->ccb_h.target_lun = CAM_LUN_WILDCARD;
+			if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 &&
+			    (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
+			    (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 &&
+			    (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) {
+				PRIV_INFO(io) = PRIV_INFO(
+				    (union ctl_io *)atio->ccb_h.io_ptr);
+				ctl_queue_sense(atio->ccb_h.io_ptr);
+				atio->ccb_h.io_ptr = io;
 			}
-			if (periph->flags & CAM_PERIPH_INVALID) {
-				ctlfe_free_ccb(periph, (union ccb *)atio);
-				return;
-			} else {
-				xpt_action((union ccb *)atio);
-				softc->atios_sent++;
+
+			/* Abort ATIO if CTIO sending status has failed. */
+			if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) !=
+			    CAM_REQ_CMP) {
+				done_ccb->ccb_h.func_code = XPT_ABORT;
+				done_ccb->cab.abort_ccb = (union ccb *)atio;
+				xpt_action(done_ccb);
 			}
+
+			xpt_release_ccb(done_ccb);
+			ctlfe_requeue_ccb(periph, (union ccb *)atio,
+			    /* unlock */1);
+			return;
 		} else {
-			struct ctlfe_lun_cmd_info *cmd_info;
+			struct ctlfe_cmd_info *cmd_info;
 			struct ccb_scsiio *csio;
 
 			csio = &done_ccb->csio;
-			cmd_info = (struct ctlfe_lun_cmd_info *)
-				io->io_hdr.port_priv;
+			cmd_info = PRIV_INFO(io);
 
 			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
 
-			io->scsiio.ext_data_len += csio->dxfer_len;
-			if (io->scsiio.ext_data_len >
-			    io->scsiio.kern_total_len) {
-				xpt_print(periph->path, "%s: tag 0x%04x "
-					  "done len %u > total %u sent %u\n",
-					  __func__, io->scsiio.tag_num,
-					  io->scsiio.ext_data_len,
-					  io->scsiio.kern_total_len,
-					  io->scsiio.ext_data_filled);
-			}
 			/*
 			 * Translate CAM status to CTL status.  Success
 			 * does not change the overall, ctl_io status.  In
@@ -1392,6 +1305,8 @@
 			 */
 			switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
 			case CAM_REQ_CMP:
+				io->scsiio.kern_data_resid -=
+				    csio->dxfer_len - csio->resid;
 				io->io_hdr.port_status = 0;
 				break;
 			default:
@@ -1418,47 +1333,25 @@
 			 * pieces, figure out where we are in the list, and
 			 * continue sending pieces if necessary.
 			 */
-			if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
-			 && (io->io_hdr.port_status == 0)
-			 && (cmd_info->cur_transfer_index <
-			     io->scsiio.kern_sg_entries)) {
-				struct ctl_sg_entry *sglist;
+			if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) &&
+			    io->io_hdr.port_status == 0 && csio->resid == 0) {
 				ccb_flags flags;
-				uint8_t scsi_status;
 				uint8_t *data_ptr;
 				uint32_t dxfer_len;
-				int *ti;
 
-				sglist = (struct ctl_sg_entry *)
-					io->scsiio.kern_data_ptr;
-				ti = &cmd_info->cur_transfer_index;
 				flags = atio->ccb_h.flags &
 					(CAM_DIS_DISCONNECT|
-					 CAM_TAG_ACTION_VALID|
-					 CAM_DIR_MASK);
-				
-				/*
-				 * Set the direction, relative to the initiator.
-				 */
-				flags &= ~CAM_DIR_MASK;
-				if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
-				     CTL_FLAG_DATA_IN)
-					flags |= CAM_DIR_IN;
-				else
-					flags |= CAM_DIR_OUT;
+					 CAM_TAG_ACTION_VALID);
 
-				data_ptr = sglist[*ti].addr;
-				dxfer_len = sglist[*ti].len;
-				(*ti)++;
+				ctlfedata(softc, io, &flags, &data_ptr,
+				    &dxfer_len, &csio->sglist_cnt);
 
-				scsi_status = 0;
-
 				if (((flags & CAM_SEND_STATUS) == 0)
 				 && (dxfer_len == 0)) {
 					printf("%s: tag %04x no status or "
 					       "len cdb = %02x\n", __func__,
 					       atio->tag_id,
-					atio->cdb_io.cdb_bytes[0]);
+					       atio_cdb_ptr(atio)[0]);
 					printf("%s: tag %04x io status %#x\n",
 					       __func__, atio->tag_id,
 					       io->io_hdr.status);
@@ -1472,15 +1365,17 @@
 					       MSG_SIMPLE_Q_TAG : 0,
 					      atio->tag_id,
 					      atio->init_id,
-					      scsi_status,
+					      0,
 					      /*data_ptr*/ data_ptr,
 					      /*dxfer_len*/ dxfer_len,
-					      /*timeout*/ 5 * 1000);
+					      CTLFE_TIMEOUT * 1000);
 
+				csio->ccb_h.flags |= CAM_UNLOCKED;
 				csio->resid = 0;
 				csio->ccb_h.ccb_atio = atio;
 				io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
 				softc->ctios_sent++;
+				mtx_unlock(mtx);
 				xpt_action((union ccb *)csio);
 			} else {
 				/*
@@ -1487,12 +1382,13 @@
 				 * Release the CTIO.  The ATIO will be sent back
 				 * down to the SIM once we send status.
 				 */
-				softc->ccbs_freed++;
 				xpt_release_ccb(done_ccb);
+				mtx_unlock(mtx);
 
 				/* Call the backend move done callback */
 				io->scsiio.be_move_done(io);
 			}
+			return;
 		}
 		break;
 	}
@@ -1499,163 +1395,108 @@
 	case XPT_IMMEDIATE_NOTIFY: {
 		union ctl_io *io;
 		struct ccb_immediate_notify *inot;
-		cam_status status;
-		int frozen;
+		int send_ctl_io;
 
+		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
 		inot = &done_ccb->cin1;
+		io = done_ccb->ccb_h.io_ptr;
+		ctl_zero_io(io);
 
-		softc->inots_returned++;
+		send_ctl_io = 1;
 
-		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
-
-		printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x "
-		       "seq %#x\n", __func__, inot->ccb_h.status,
-		       inot->tag_id, inot->seq_id);
-
-		io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
-		if (io != NULL) {
-			int send_ctl_io;
-
-			send_ctl_io = 1;
-
-			ctl_zero_io(io);		
-			io->io_hdr.io_type = CTL_IO_TASK;
-			io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
-			inot->ccb_h.io_ptr = io;
-			io->io_hdr.nexus.initid.id = inot->initiator_id;
-			io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
-			io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id;
+		io->io_hdr.io_type = CTL_IO_TASK;
+		PRIV_CCB(io) = done_ccb;
+		inot->ccb_h.io_ptr = io;
+		io->io_hdr.nexus.initid = inot->initiator_id;
+		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
+		if (bus_softc->hba_misc & PIM_EXTLUNS) {
+			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
+			    CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun));
+		} else {
 			io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
-			/* XXX KDM should this be the tag_id? */
-			io->taskio.tag_num = inot->seq_id;
+		}
+		/* XXX KDM should this be the tag_id? */
+		io->taskio.tag_num = inot->seq_id;
 
-			status = inot->ccb_h.status & CAM_STATUS_MASK;
-			switch (status) {
-			case CAM_SCSI_BUS_RESET:
-				io->taskio.task_action = CTL_TASK_BUS_RESET;
+		status = inot->ccb_h.status & CAM_STATUS_MASK;
+		switch (status) {
+		case CAM_SCSI_BUS_RESET:
+			io->taskio.task_action = CTL_TASK_BUS_RESET;
+			break;
+		case CAM_BDR_SENT:
+			io->taskio.task_action = CTL_TASK_TARGET_RESET;
+			break;
+		case CAM_MESSAGE_RECV:
+			switch (inot->arg) {
+			case MSG_ABORT_TASK_SET:
+				io->taskio.task_action =
+				    CTL_TASK_ABORT_TASK_SET;
 				break;
-			case CAM_BDR_SENT:
+			case MSG_TARGET_RESET:
 				io->taskio.task_action = CTL_TASK_TARGET_RESET;
 				break;
-			case CAM_MESSAGE_RECV:
-				switch (inot->arg) {
-				case MSG_ABORT_TASK_SET:
-					/*
-					 * XXX KDM this isn't currently
-					 * supported by CTL.  It ends up
-					 * being a no-op.
-					 */
-					io->taskio.task_action =
-						CTL_TASK_ABORT_TASK_SET;
-					break;
-				case MSG_TARGET_RESET:
-					io->taskio.task_action =
-						CTL_TASK_TARGET_RESET;
-					break;
-				case MSG_ABORT_TASK:
-					io->taskio.task_action =
-						CTL_TASK_ABORT_TASK;
-					break;
-				case MSG_LOGICAL_UNIT_RESET:
-					io->taskio.task_action =
-						CTL_TASK_LUN_RESET;
-					break;
-				case MSG_CLEAR_TASK_SET:
-					/*
-					 * XXX KDM this isn't currently
-					 * supported by CTL.  It ends up
-					 * being a no-op.
-					 */
-					io->taskio.task_action =
-						CTL_TASK_CLEAR_TASK_SET;
-					break;
-				case MSG_CLEAR_ACA:
-					io->taskio.task_action = 
-						CTL_TASK_CLEAR_ACA;
-					break;
-				case MSG_NOOP:
-					send_ctl_io = 0;
-					break;
-				default:
-					xpt_print(periph->path, "%s: "
-						  "unsupported message 0x%x\n", 
-						  __func__, inot->arg);
-					send_ctl_io = 0;
-					break;
-				}
+			case MSG_ABORT_TASK:
+				io->taskio.task_action = CTL_TASK_ABORT_TASK;
 				break;
-			case CAM_REQ_ABORTED:
-				/*
-				 * This request was sent back by the driver.
-				 * XXX KDM what do we do here?
-				 */
+			case MSG_LOGICAL_UNIT_RESET:
+				io->taskio.task_action = CTL_TASK_LUN_RESET;
+				break;
+			case MSG_CLEAR_TASK_SET:
+				io->taskio.task_action =
+				    CTL_TASK_CLEAR_TASK_SET;
+				break;
+			case MSG_CLEAR_ACA:
+				io->taskio.task_action = CTL_TASK_CLEAR_ACA;
+				break;
+			case MSG_QUERY_TASK:
+				io->taskio.task_action = CTL_TASK_QUERY_TASK;
+				break;
+			case MSG_QUERY_TASK_SET:
+				io->taskio.task_action =
+				    CTL_TASK_QUERY_TASK_SET;
+				break;
+			case MSG_QUERY_ASYNC_EVENT:
+				io->taskio.task_action =
+				    CTL_TASK_QUERY_ASYNC_EVENT;
+				break;
+			case MSG_NOOP:
 				send_ctl_io = 0;
 				break;
-			case CAM_REQ_INVALID:
-			case CAM_PROVIDE_FAIL:
 			default:
-				/*
-				 * We should only get here if we're talking
-				 * to a talking to a SIM that is target
-				 * capable but supports the old API.  In
-				 * that case, we need to just free the CCB.
-				 * If we actually send a notify acknowledge,
-				 * it will send that back with an error as
-				 * well.
-				 */
-
-				if ((status != CAM_REQ_INVALID)
-				 && (status != CAM_PROVIDE_FAIL))
-					xpt_print(periph->path, "%s: "
-						  "unsupported CAM status "
-						  "0x%x\n", __func__, status);
-
-				ctl_free_io(io);
-				ctlfe_free_ccb(periph, done_ccb);
-
-				return;
+				xpt_print(periph->path,
+				    "%s: unsupported INOT message 0x%x\n",
+				    __func__, inot->arg);
+				send_ctl_io = 0;
+				break;
 			}
-			if (send_ctl_io != 0) {
-				ctl_queue(io);
-			} else {
-				ctl_free_io(io);
-				done_ccb->ccb_h.status = CAM_REQ_INPROG;
-				done_ccb->ccb_h.func_code =
-					XPT_NOTIFY_ACKNOWLEDGE;
-				xpt_action(done_ccb);
-			}
+			break;
+		default:
+			xpt_print(periph->path,
+			    "%s: unsupported INOT status 0x%x\n",
+			    __func__, status);
+			/* FALLTHROUGH */
+		case CAM_REQ_ABORTED:
+		case CAM_REQ_INVALID:
+		case CAM_DEV_NOT_THERE:
+		case CAM_PROVIDE_FAIL:
+			ctlfe_free_ccb(periph, done_ccb);
+			goto out;
+		}
+		if (send_ctl_io != 0) {
+			ctl_queue(io);
 		} else {
-			xpt_print(periph->path, "%s: could not allocate "
-				  "ctl_io for immediate notify!\n", __func__);
-			/* requeue this to the adapter */
 			done_ccb->ccb_h.status = CAM_REQ_INPROG;
 			done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
 			xpt_action(done_ccb);
 		}
-
-		if (frozen != 0) {
-			cam_release_devq(periph->path,
-					 /*relsim_flags*/ 0,
-					 /*opening reduction*/ 0,
-					 /*timeout*/ 0,
-					 /*getcount_only*/ 0);
-		}
 		break;
 	}
 	case XPT_NOTIFY_ACKNOWLEDGE:
-		/*
-		 * Queue this back down to the SIM as an immediate notify.
-		 */
+		/* Queue this back down to the SIM as an immediate notify. */
+		done_ccb->ccb_h.status = CAM_REQ_INPROG;
 		done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
-		xpt_action(done_ccb);
-		softc->inots_sent++;
-		break;
-	case XPT_ABORT:
-		/*
-		 * XPT_ABORT is an immediate CCB, we shouldn't get here.
-		 */
-		panic("%s: XPT_ABORT CCB returned!", __func__);
-		break;
+		ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1);
+		return;
 	case XPT_SET_SIM_KNOB:
 	case XPT_GET_SIM_KNOB:
 		break;
@@ -1664,66 +1505,34 @@
 		      done_ccb->ccb_h.func_code);
 		break;
 	}
+
+out:
+	mtx_unlock(mtx);
 }
 
 static void
 ctlfe_onoffline(void *arg, int online)
 {
-	struct ctlfe_softc *bus_softc;
+	struct ctlfe_softc *bus_softc = arg;
 	union ccb *ccb;
 	cam_status status;
 	struct cam_path *path;
-	struct cam_sim *sim;
-	int set_wwnn;
+	int set_wwnn = 0;
 
-	bus_softc = (struct ctlfe_softc *)arg;
-
-	set_wwnn = 0;
-
-	sim = bus_softc->sim;
-
-	mtx_assert(sim->mtx, MA_OWNED);
-
 	status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
-		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 	if (status != CAM_REQ_CMP) {
 		printf("%s: unable to create path!\n", __func__);
 		return;
 	}
-	ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO);
-	if (ccb == NULL) {
-		printf("%s: unable to malloc CCB!\n", __func__);
-		xpt_free_path(path);
-		return;
-	}
+	ccb = xpt_alloc_ccb();
 	xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
+	ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
+	xpt_action(ccb);
 
-	/*
-	 * Copan WWN format:
-	 *
-	 * Bits 63-60:	0x5		NAA, IEEE registered name
-	 * Bits 59-36:	0x000ED5	IEEE Company name assigned to Copan
-	 * Bits 35-12:			Copan SSN (Sequential Serial Number)
-	 * Bits 11-8:			Type of port:
-	 *					1 == N-Port
-	 *					2 == F-Port
-	 *					3 == NL-Port
-	 * Bits 7-0:			0 == Node Name, >0 == Port Number
-	 */
-
+	/* Check whether we should change WWNs. */
 	if (online != 0) {
-
-		ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
-
-
-		xpt_action(ccb);
-
-
 		if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){
-#ifdef RANDOM_WWNN
-			uint64_t random_bits;
-#endif
-
 			printf("%s: %s current WWNN %#jx\n", __func__,
 			       bus_softc->port_name,
 			       ccb->knob.xport_specific.fc.wwnn);
@@ -1731,109 +1540,88 @@
 			       bus_softc->port_name,
 			       ccb->knob.xport_specific.fc.wwpn);
 
-#ifdef RANDOM_WWNN
-			arc4rand(&random_bits, sizeof(random_bits), 0);
-#endif
-
 			/*
-			 * XXX KDM this is a bit of a kludge for now.  We
-			 * take the current WWNN/WWPN from the card, and
-			 * replace the company identifier and the NL-Port
-			 * indicator and the port number (for the WWPN).
-			 * This should be replaced later with ddb_GetWWNN,
-			 * or possibly a more centralized scheme.  (It
-			 * would be nice to have the WWNN/WWPN for each
-			 * port stored in the ctl_frontend structure.)
-			 */
-#ifdef RANDOM_WWNN
-			ccb->knob.xport_specific.fc.wwnn = 
-				(random_bits &
-				0x0000000fffffff00ULL) |
-				/* Company ID */ 0x5000ED5000000000ULL |
-				/* NL-Port */    0x0300;
-			ccb->knob.xport_specific.fc.wwpn = 
-				(random_bits &
-				0x0000000fffffff00ULL) |
-				/* Company ID */ 0x5000ED5000000000ULL |
-				/* NL-Port */    0x3000 |
-				/* Port Num */ (bus_softc->fe.targ_port & 0xff);
-
-			/*
-			 * This is a bit of an API break/reversal, but if
-			 * we're doing the random WWNN that's a little
-			 * different anyway.  So record what we're actually
-			 * using with the frontend code so it's reported
-			 * accurately.
-			 */
-			bus_softc->fe.wwnn = 
-				ccb->knob.xport_specific.fc.wwnn;
-			bus_softc->fe.wwpn = 
-				ccb->knob.xport_specific.fc.wwpn;
-			set_wwnn = 1;
-#else /* RANDOM_WWNN */
-			/*
 			 * If the user has specified a WWNN/WWPN, send them
 			 * down to the SIM.  Otherwise, record what the SIM
 			 * has reported.
 			 */
-			if ((bus_softc->fe.wwnn != 0)
-			 && (bus_softc->fe.wwpn != 0)) {
+			if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn
+			    != ccb->knob.xport_specific.fc.wwnn) {
 				ccb->knob.xport_specific.fc.wwnn =
-					bus_softc->fe.wwnn;
+				    bus_softc->port.wwnn;
+				set_wwnn = 1;
+			} else {
+				ctl_port_set_wwns(&bus_softc->port,
+				    true, ccb->knob.xport_specific.fc.wwnn,
+				    false, 0);
+			}
+			if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn
+			     != ccb->knob.xport_specific.fc.wwpn) {
 				ccb->knob.xport_specific.fc.wwpn =
-					bus_softc->fe.wwpn;
+				    bus_softc->port.wwpn;
 				set_wwnn = 1;
 			} else {
-				bus_softc->fe.wwnn =
-					ccb->knob.xport_specific.fc.wwnn;
-				bus_softc->fe.wwpn =
-					ccb->knob.xport_specific.fc.wwpn;
+				ctl_port_set_wwns(&bus_softc->port,
+				    false, 0,
+				    true, ccb->knob.xport_specific.fc.wwpn);
 			}
-#endif /* RANDOM_WWNN */
-
-
-			if (set_wwnn != 0) {
-				printf("%s: %s new WWNN %#jx\n", __func__,
-				       bus_softc->port_name,
-				ccb->knob.xport_specific.fc.wwnn);
-				printf("%s: %s new WWPN %#jx\n", __func__,
-				       bus_softc->port_name,
-				       ccb->knob.xport_specific.fc.wwpn);
-			}
 		} else {
 			printf("%s: %s has no valid WWNN/WWPN\n", __func__,
 			       bus_softc->port_name);
+			if (bus_softc->port.wwnn != 0) {
+				ccb->knob.xport_specific.fc.wwnn =
+				    bus_softc->port.wwnn;
+				set_wwnn = 1;
+			}
+			if (bus_softc->port.wwpn != 0) {
+				ccb->knob.xport_specific.fc.wwpn =
+				    bus_softc->port.wwpn;
+				set_wwnn = 1;
+			}
 		}
 	}
-	ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
-	ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
-	if (set_wwnn != 0)
-		ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS;
+	if (set_wwnn) {
+		ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
+		ccb->knob.xport_specific.valid = KNOB_VALID_ADDRESS;
+		xpt_action(ccb);
+		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+			printf("%s: %s (path id %d) failed set WWNs: %#x\n",
+			    __func__, bus_softc->port_name, bus_softc->path_id,
+			    ccb->ccb_h.status);
+		} else {
+			printf("%s: %s new WWNN %#jx\n", __func__,
+			       bus_softc->port_name,
+			       ccb->knob.xport_specific.fc.wwnn);
+			printf("%s: %s new WWPN %#jx\n", __func__,
+			       bus_softc->port_name,
+			       ccb->knob.xport_specific.fc.wwpn);
+		}
+	}
 
-	if (online != 0)
-		ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET;
-	else
-		ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE;
-
-	xpt_action(ccb);
-
-	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
-		printf("%s: SIM %s (path id %d) target %s failed with "
-		       "status %#x\n",
-		       __func__, bus_softc->port_name, bus_softc->path_id,
-		       (online != 0) ? "enable" : "disable",
-		       ccb->ccb_h.status);
-	} else {
-		printf("%s: SIM %s (path id %d) target %s succeeded\n",
-		       __func__, bus_softc->port_name, bus_softc->path_id,
-		       (online != 0) ? "enable" : "disable");
+	/* Check whether we should change role. */
+	if ((ccb->knob.xport_specific.valid & KNOB_VALID_ROLE) == 0 ||
+	    ((online != 0) ^
+	    ((ccb->knob.xport_specific.fc.role & KNOB_ROLE_TARGET) != 0)) != 0) {
+		ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
+		ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
+		if (online)
+			ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET;
+		else
+			ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET;
+		xpt_action(ccb);
+		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+			printf("%s: %s (path id %d) failed %s target role: %#x\n",
+			    __func__, bus_softc->port_name, bus_softc->path_id,
+			    online ? "enable" : "disable", ccb->ccb_h.status);
+		} else {
+			printf("%s: %s (path id %d) target role %s succeeded\n",
+			    __func__, bus_softc->port_name, bus_softc->path_id,
+			    online ? "enable" : "disable");
+		}
 	}
 
 	xpt_free_path(path);
-
-	free(ccb, M_TEMP);
-
-	return;
+	xpt_free_ccb(ccb);
 }
 
 static void
@@ -1843,13 +1631,10 @@
 	struct cam_path *path;
 	cam_status status;
 	struct ctlfe_lun_softc *lun_softc;
-	struct cam_sim *sim;
+	struct cam_periph *periph;
 
 	bus_softc = (struct ctlfe_softc *)arg;
-	sim = bus_softc->sim;
 
-	CAM_SIM_LOCK(sim);
-
 	/*
 	 * Create the wildcard LUN before bringing the port online.
 	 */
@@ -1859,26 +1644,23 @@
 	if (status != CAM_REQ_CMP) {
 		printf("%s: unable to create path for wildcard periph\n",
 				__func__);
-		CAM_SIM_UNLOCK(sim);
 		return;
 	}
 
-	lun_softc = malloc(sizeof(*lun_softc), M_CTLFE,
-			M_NOWAIT | M_ZERO);
-	if (lun_softc == NULL) {
-		xpt_print(path, "%s: unable to allocate softc for "
-				"wildcard periph\n", __func__);
+	lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO);
+
+	xpt_path_lock(path);
+	periph = cam_periph_find(path, "ctl");
+	if (periph != NULL) {
+		/* We've already got a periph, no need to alloc a new one. */
+		xpt_path_unlock(path);
 		xpt_free_path(path);
-		CAM_SIM_UNLOCK(sim);
+		free(lun_softc, M_CTLFE);
 		return;
 	}
-
 	lun_softc->parent_softc = bus_softc;
 	lun_softc->flags |= CTLFE_LUN_WILDCARD;
 
-	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links);
-
-
 	status = cam_periph_alloc(ctlferegister,
 				  ctlfeoninvalidate,
 				  ctlfecleanup,
@@ -1894,17 +1676,15 @@
 		const struct cam_status_entry *entry;
 
 		entry = cam_fetch_status_entry(status);
-
 		printf("%s: CAM error %s (%#x) returned from "
 		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
 		       entry->status_text : "Unknown", status);
+		free(lun_softc, M_CTLFE);
 	}
 
+	xpt_path_unlock(path);
+	ctlfe_onoffline(arg, /*online*/ 1);
 	xpt_free_path(path);
-
-	ctlfe_onoffline(arg, /*online*/ 1);
-
-	CAM_SIM_UNLOCK(sim);
 }
 
 static void
@@ -1914,13 +1694,9 @@
 	struct cam_path *path;
 	cam_status status;
 	struct cam_periph *periph;
-	struct cam_sim *sim;
 
 	bus_softc = (struct ctlfe_softc *)arg;
-	sim = bus_softc->sim;
 
-	CAM_SIM_LOCK(sim);
-
 	ctlfe_onoffline(arg, /*online*/ 0);
 
 	/*
@@ -1931,53 +1707,36 @@
 				 bus_softc->path_id, CAM_TARGET_WILDCARD,
 				 CAM_LUN_WILDCARD);
 	if (status != CAM_REQ_CMP) {
-		CAM_SIM_UNLOCK(sim);
 		printf("%s: unable to create path for wildcard periph\n",
 		       __func__);
 		return;
 	}
-
-
+	xpt_path_lock(path);
 	if ((periph = cam_periph_find(path, "ctl")) != NULL)
 		cam_periph_invalidate(periph);
-
+	xpt_path_unlock(path);
 	xpt_free_path(path);
-
-	CAM_SIM_UNLOCK(sim);
 }
 
-static int
-ctlfe_targ_enable(void *arg, struct ctl_id targ_id)
-{
-	return (0);
-}
-
-static int
-ctlfe_targ_disable(void *arg, struct ctl_id targ_id)
-{
-	return (0);
-}
-
 /*
  * This will get called to enable a LUN on every bus that is attached to
  * CTL.  So we only need to create a path/periph for this particular bus.
  */
 static int
-ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
+ctlfe_lun_enable(void *arg, int lun_id)
 {
 	struct ctlfe_softc *bus_softc;
 	struct ctlfe_lun_softc *softc;
 	struct cam_path *path;
 	struct cam_periph *periph;
-	struct cam_sim *sim;
 	cam_status status;
 
 	bus_softc = (struct ctlfe_softc *)arg;
-	sim = bus_softc->sim;
+	if (bus_softc->hba_misc & PIM_EXTLUNS)
+		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
 
-	status = xpt_create_path_unlocked(&path, /*periph*/ NULL,
-					  bus_softc->path_id,
-					  targ_id.id, lun_id);
+	status = xpt_create_path(&path, /*periph*/ NULL,
+	    bus_softc->path_id, bus_softc->target_id, lun_id);
 	/* XXX KDM need some way to return status to CTL here? */
 	if (status != CAM_REQ_CMP) {
 		printf("%s: could not create path, status %#x\n", __func__,
@@ -1986,18 +1745,16 @@
 	}
 
 	softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
-	CAM_SIM_LOCK(sim);
+	xpt_path_lock(path);
 	periph = cam_periph_find(path, "ctl");
 	if (periph != NULL) {
 		/* We've already got a periph, no need to alloc a new one. */
+		xpt_path_unlock(path);
 		xpt_free_path(path);
 		free(softc, M_CTLFE);
-		CAM_SIM_UNLOCK(sim);
 		return (0);
 	}
-
 	softc->parent_softc = bus_softc;
-	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
 
 	status = cam_periph_alloc(ctlferegister,
 				  ctlfeoninvalidate,
@@ -2010,10 +1767,18 @@
 				  0,
 				  softc);
 
-	xpt_free_path(path);
+	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		const struct cam_status_entry *entry;
 
-	CAM_SIM_UNLOCK(sim);
+		entry = cam_fetch_status_entry(status);
+		printf("%s: CAM error %s (%#x) returned from "
+		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
+		       entry->status_text : "Unknown", status);
+		free(softc, M_CTLFE);
+	}
 
+	xpt_path_unlock(path);
+	xpt_free_path(path);
 	return (0);
 }
 
@@ -2022,37 +1787,38 @@
  * on every bus that is attached to CTL.  
  */
 static int
-ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
+ctlfe_lun_disable(void *arg, int lun_id)
 {
 	struct ctlfe_softc *softc;
 	struct ctlfe_lun_softc *lun_softc;
-	struct cam_sim *sim;
 
 	softc = (struct ctlfe_softc *)arg;
-	sim = softc->sim;
+	if (softc->hba_misc & PIM_EXTLUNS)
+		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
 
-	CAM_SIM_LOCK(sim);
+	mtx_lock(&softc->lun_softc_mtx);
 	STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
 		struct cam_path *path;
 
 		path = lun_softc->periph->path;
 
-		if ((xpt_path_target_id(path) == targ_id.id)
+		if ((xpt_path_target_id(path) == softc->target_id)
 		 && (xpt_path_lun_id(path) == lun_id)) {
 			break;
 		}
 	}
 	if (lun_softc == NULL) {
-		CAM_SIM_UNLOCK(sim);
-		printf("%s: can't find target %d lun %d\n", __func__,
-		       targ_id.id, lun_id);
+		mtx_unlock(&softc->lun_softc_mtx);
+		printf("%s: can't find lun %d\n", __func__, lun_id);
 		return (1);
 	}
+	cam_periph_acquire(lun_softc->periph);
+	mtx_unlock(&softc->lun_softc_mtx);
 
+	cam_periph_lock(lun_softc->periph);
 	cam_periph_invalidate(lun_softc->periph);
-
-	CAM_SIM_UNLOCK(sim);
-
+	cam_periph_unlock(lun_softc->periph);
+	cam_periph_release(lun_softc->periph);
 	return (0);
 }
 
@@ -2059,26 +1825,10 @@
 static void
 ctlfe_dump_sim(struct cam_sim *sim)
 {
-	int i;
 
-	printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
-	       sim->sim_name, sim->unit_number,
-	       sim->max_tagged_dev_openings, sim->max_dev_openings);
-	printf("%s%d: max_ccbs: %u, ccb_count: %u\n", 
-	       sim->sim_name, sim->unit_number,
-	       sim->max_ccbs, sim->ccb_count);
-	printf("%s%d: ccb_freeq is %sempty\n",
-	       sim->sim_name, sim->unit_number,
-	       (SLIST_FIRST(&sim->ccb_freeq) == NULL) ? "" : "NOT ");
-	printf("%s%d: alloc_queue.entries %d, alloc_openings %d\n",
-	       sim->sim_name, sim->unit_number,
-	       sim->devq->alloc_queue.entries, sim->devq->alloc_openings);
-	printf("%s%d: qfrozen_cnt:", sim->sim_name, sim->unit_number);
-	for (i = 0; i < CAM_RL_VALUES; i++) {
-		printf("%s%u", (i != 0) ? ":" : "",
-		sim->devq->alloc_queue.qfrozen_cnt[i]);
-	}
-	printf("\n");
+	printf("%s%d: max dev openings: %d, max tagged dev openings: %d\n",
+	    sim->sim_name, sim->unit_number, sim->max_dev_openings,
+	    sim->max_tagged_dev_openings);
 }
 
 /*
@@ -2087,35 +1837,29 @@
 static void
 ctlfe_dump_queue(struct ctlfe_lun_softc *softc)
 {
+	struct cam_periph *periph = softc->periph;
 	struct ccb_hdr *hdr;
-	struct cam_periph *periph;
+	struct ccb_getdevstats cgds;
 	int num_items;
 
-	periph = softc->periph;
+	xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
+	cgds.ccb_h.func_code = XPT_GDEV_STATS;
+	xpt_action((union ccb *)&cgds);
+	if ((cgds.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+		xpt_print(periph->path, "devq: openings %d, active %d, "
+		    "allocated %d, queued %d, held %d\n",
+		    cgds.dev_openings, cgds.dev_active, cgds.allocated,
+		    cgds.queued, cgds.held);
+	}
+
 	num_items = 0;
 
-	TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) {
-		union ctl_io *io;
+	STAILQ_FOREACH(hdr, &softc->work_queue, periph_links.stqe) {
+		union ctl_io *io = hdr->io_ptr;
 
-		io = hdr->io_ptr;
-
 		num_items++;
 
 		/*
-		 * This can happen when we get an ATIO but can't allocate
-		 * a ctl_io.  See the XPT_ACCEPT_TARGET_IO case in ctlfedone().
-		 */
-		if (io == NULL) {
-			struct ccb_scsiio *csio;
-
-			csio = (struct ccb_scsiio *)hdr;
-
-			xpt_print(periph->path, "CCB %#x ctl_io allocation "
-				  "failed\n", csio->tag_id);
-			continue;
-		}
-
-		/*
 		 * Only regular SCSI I/O is put on the work
 		 * queue, so we can print sense here.  There may be no
 		 * sense if it's no the queue for a DMA, but this serves to
@@ -2127,143 +1871,103 @@
 		ctl_io_error_print(io, NULL);
 
 		/*
-		 * We're sending status back to the
-		 * initiator, so we're on the queue waiting
-		 * for a CTIO to do that.
+		 * Print DMA status if we are DMA_QUEUED.
 		 */
-		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
-			continue;
-
-		/*
-		 * Otherwise, we're on the queue waiting to
-		 * do a data transfer.
-		 */
-		xpt_print(periph->path, "Total %u, Current %u, Resid %u\n",
-			  io->scsiio.kern_total_len, io->scsiio.kern_data_len,
-			  io->scsiio.kern_data_resid);
+		if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
+			xpt_print(periph->path,
+			    "Total %u, Current %u, Resid %u\n",
+			    io->scsiio.kern_total_len,
+			    io->scsiio.kern_data_len,
+			    io->scsiio.kern_data_resid);
+		}
 	}
 
-	xpt_print(periph->path, "%d requests total waiting for CCBs\n",
-		  num_items);
-	xpt_print(periph->path, "%ju CCBs oustanding (%ju allocated, %ju "
-		  "freed)\n", (uintmax_t)(softc->ccbs_alloced -
-		  softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced,
-		  (uintmax_t)softc->ccbs_freed);
-	xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju "
-		  "returned\n", (uintmax_t)(softc->ctios_sent -
-		  softc->ctios_returned), softc->ctios_sent,
-		  softc->ctios_returned);
+	xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items);
+	xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent);
 }
 
 /*
- * This function is called when we fail to get a CCB for a DMA or status return
- * to the initiator within the specified time period.
- *
- * The callout code should insure that we hold the sim mutex here.
+ * Datamove/done routine called by CTL.  Put ourselves on the queue to
+ * receive a CCB from CAM so we can queue the continue I/O request down
+ * to the adapter.
  */
 static void
-ctlfe_dma_timeout(void *arg)
+ctlfe_datamove(union ctl_io *io)
 {
+	union ccb *ccb;
+	struct cam_periph *periph;
 	struct ctlfe_lun_softc *softc;
-	struct cam_periph *periph;
-	struct cam_sim *sim;
-	int num_queued;
 
-	softc = (struct ctlfe_lun_softc *)arg;
-	periph = softc->periph;
-	sim = xpt_path_sim(periph->path);
-	num_queued = 0;
+	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
+	    ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type));
 
-	/*
-	 * Nothing to do...
-	 */
-	if (TAILQ_FIRST(&softc->work_queue) == NULL) {
-		xpt_print(periph->path, "TIMEOUT triggered after %d "
-			  "seconds, but nothing on work queue??\n",
-			  CTLFE_DMA_TIMEOUT);
-		return;
-	}
-
-	xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to "
-		  "start\n", CTLFE_DMA_TIMEOUT);
-
-	ctlfe_dump_queue(softc);
-
-	ctlfe_dump_sim(sim);
-
-	xpt_print(periph->path, "calling xpt_schedule() to attempt to "
-		  "unstick our queue\n");
-
-	xpt_schedule(periph, /*priority*/ 1);
-
-	xpt_print(periph->path, "xpt_schedule() call complete\n");
+	io->scsiio.ext_data_filled = 0;
+	ccb = PRIV_CCB(io);
+	periph = xpt_path_periph(ccb->ccb_h.path);
+	cam_periph_lock(periph);
+	softc = (struct ctlfe_lun_softc *)periph->softc;
+	io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
+	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
+		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
+	STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
+			  periph_links.stqe);
+	xpt_schedule(periph, CAM_PRIORITY_NORMAL);
+	cam_periph_unlock(periph);
 }
 
-/*
- * Datamove/done routine called by CTL.  Put ourselves on the queue to
- * receive a CCB from CAM so we can queue the continue I/O request down
- * to the adapter.
- */
 static void
-ctlfe_datamove_done(union ctl_io *io)
+ctlfe_done(union ctl_io *io)
 {
 	union ccb *ccb;
-	struct cam_sim *sim;
 	struct cam_periph *periph;
 	struct ctlfe_lun_softc *softc;
 
-	ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
-
-	sim = xpt_path_sim(ccb->ccb_h.path);
-
-	CAM_SIM_LOCK(sim);
-
+	ccb = PRIV_CCB(io);
 	periph = xpt_path_periph(ccb->ccb_h.path);
-
+	cam_periph_lock(periph);
 	softc = (struct ctlfe_lun_softc *)periph->softc;
 
 	if (io->io_hdr.io_type == CTL_IO_TASK) {
 		/*
-		 * Task management commands don't require any further
-		 * communication back to the adapter.  Requeue the CCB
-		 * to the adapter, and free the CTL I/O.
-		 */
-		xpt_print(ccb->ccb_h.path, "%s: returning task I/O "
-			  "tag %#x seq %#x\n", __func__,
-			  ccb->cin1.tag_id, ccb->cin1.seq_id);
-		/*
 		 * Send the notify acknowledge down to the SIM, to let it
 		 * know we processed the task management command.
 		 */
 		ccb->ccb_h.status = CAM_REQ_INPROG;
 		ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
+		switch (io->taskio.task_status) {
+		case CTL_TASK_FUNCTION_COMPLETE:
+			ccb->cna2.arg = CAM_RSP_TMF_COMPLETE;
+			break;
+		case CTL_TASK_FUNCTION_SUCCEEDED:
+			ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED;
+			ccb->ccb_h.flags |= CAM_SEND_STATUS;
+			break;
+		case CTL_TASK_FUNCTION_REJECTED:
+			ccb->cna2.arg = CAM_RSP_TMF_REJECTED;
+			ccb->ccb_h.flags |= CAM_SEND_STATUS;
+			break;
+		case CTL_TASK_LUN_DOES_NOT_EXIST:
+			ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN;
+			ccb->ccb_h.flags |= CAM_SEND_STATUS;
+			break;
+		case CTL_TASK_FUNCTION_NOT_SUPPORTED:
+			ccb->cna2.arg = CAM_RSP_TMF_FAILED;
+			ccb->ccb_h.flags |= CAM_SEND_STATUS;
+			break;
+		}
+		ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8;
 		xpt_action(ccb);
-		ctl_free_io(io);
+	} else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) {
+		ctlfe_requeue_ccb(periph, ccb, /* unlock */1);
+		return;
 	} else {
-		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
-			io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
-		else
-			io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
-
-		TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
-				  periph_links.tqe);
-
-		/*
-		 * Reset the timeout for our latest active DMA.
-		 */
-		callout_reset(&softc->dma_callout,
-			      CTLFE_DMA_TIMEOUT * hz,
-			      ctlfe_dma_timeout, softc);
-		/*
-		 * Ask for the CAM transport layer to send us a CCB to do
-		 * the DMA or send status, unless ctlfe_dma_enabled is set
-		 * to 0.
-		 */
-		if (ctlfe_dma_enabled != 0)
-			xpt_schedule(periph, /*priority*/ 1);
+		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
+		STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
+				  periph_links.stqe);
+		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
 	}
 
-	CAM_SIM_UNLOCK(sim);
+	cam_periph_unlock(periph);
 }
 
 static void
@@ -2270,14 +1974,11 @@
 ctlfe_dump(void)
 {
 	struct ctlfe_softc *bus_softc;
+	struct ctlfe_lun_softc *lun_softc;
 
 	STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) {
-		struct ctlfe_lun_softc *lun_softc;
-
 		ctlfe_dump_sim(bus_softc->sim);
-
-		STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) {
+		STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links)
 			ctlfe_dump_queue(lun_softc);
-		}
 	}
 }

Modified: trunk/sys/cam/scsi/scsi_all.c
===================================================================
--- trunk/sys/cam/scsi/scsi_all.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_all.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Implementation of Utility functions for all SCSI device types.
  *
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_all.c 323987 2017-09-25 18:26:31Z jkim $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -44,11 +45,13 @@
 #include <sys/malloc.h>
 #include <sys/mutex.h>
 #include <sys/sysctl.h>
+#include <sys/ctype.h>
 #else
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <ctype.h>
 #endif
 
 #include <cam/cam.h>
@@ -173,7 +176,7 @@
 	 *
 	 * SCSI Operation Codes
 	 * Numeric Sorted Listing
-	 * as of  3/11/08
+	 * as of  5/26/15
 	 *
 	 *     D - DIRECT ACCESS DEVICE (SBC-2)                device column key
 	 *     .T - SEQUENTIAL ACCESS DEVICE (SSC-2)           -----------------
@@ -465,13 +468,10 @@
 	{ 0x86,	ALL & ~(L | R | F), "ACCESS CONTROL IN" },
 	/* 87  OO OO OOOOOOO   ACCESS CONTROL OUT */
 	{ 0x87,	ALL & ~(L | R | F), "ACCESS CONTROL OUT" },
-	/*
-	 * XXX READ(16)/WRITE(16) were not listed for CD/DVE in op-num.txt
-	 * but we had it since r1.40.  Do we really want them?
-	 */
 	/* 88  MM  O O   O     READ(16) */
 	{ 0x88,	D | T | W | O | B, "READ(16)" },
-	/* 89 */
+	/* 89  O               COMPARE AND WRITE*/
+	{ 0x89,	D, "COMPARE AND WRITE" },
 	/* 8A  OM  O O   O     WRITE(16) */
 	{ 0x8A,	D | T | W | O | B, "WRITE(16)" },
 	/* 8B  O               ORWRITE */
@@ -498,16 +498,22 @@
 	{ 0x93,	D, "WRITE SAME(16)" },
 	/* 93   M              ERASE(16) */
 	{ 0x93,	T, "ERASE(16)" },
-	/* 94 [usage proposed by SCSI Socket Services project] */
-	/* 95 [usage proposed by SCSI Socket Services project] */
-	/* 96 [usage proposed by SCSI Socket Services project] */
-	/* 97 [usage proposed by SCSI Socket Services project] */
+	/* 94  O               ZBC OUT */
+	{ 0x94,	D, "ZBC OUT" },
+	/* 95  O               ZBC OUT */
+	{ 0x95,	D, "ZBC OUT" },
+	/* 96 */
+	/* 97 */
 	/* 98 */
 	/* 99 */
-	/* 9A */
-	/* 9B */
-	/* 9C */
-	/* 9D */
+	/* 9A  O               WRITE STREAM(16) */
+	{ 0x9A,	D, "WRITE STREAM(16)" },
+	/* 9B  OOOOOOOOOO OOO  READ BUFFER(16) */
+	{ 0x9B,	ALL & ~(B) , "READ BUFFER(16)" },
+	/* 9C  O              WRITE ATOMIC(16) */
+	{ 0x9C, D, "WRITE ATOMIC(16)" },
+	/* 9D                  SERVICE ACTION BIDIRECTIONAL */
+	{ 0x9D, ALL, "SERVICE ACTION BIDIRECTIONAL" },
 	/* XXX KDM ALL for this?  op-num.txt defines it for none.. */
 	/* 9E                  SERVICE ACTION IN(16) */
 	{ 0x9E, ALL, "SERVICE ACTION IN(16)" },
@@ -665,6 +671,10 @@
 	if (pd_type == T_RBC)
 		pd_type = T_DIRECT;
 
+	/* Map NODEVICE to Direct Access Device to handle REPORT LUNS, etc. */
+	if (pd_type == T_NODEVICE)
+		pd_type = T_DIRECT;
+
 	opmask = 1 << pd_type;
 
 	for (j = 0; j < num_tables; j++) {
@@ -737,6 +747,172 @@
 	     "Logical unit not ready, cause not reportable") }
 };
 
+static struct asc_table_entry hgst_entries[] = {
+	{ SST(0x04, 0xF0, SS_RDEF,
+	    "Vendor Unique - Logical Unit Not Ready") },
+	{ SST(0x0A, 0x01, SS_RDEF,
+	    "Unrecovered Super Certification Log Write Error") },
+	{ SST(0x0A, 0x02, SS_RDEF,
+	    "Unrecovered Super Certification Log Read Error") },
+	{ SST(0x15, 0x03, SS_RDEF,
+	    "Unrecovered Sector Error") },
+	{ SST(0x3E, 0x04, SS_RDEF,
+	    "Unrecovered Self-Test Hard-Cache Test Fail") },
+	{ SST(0x3E, 0x05, SS_RDEF,
+	    "Unrecovered Self-Test OTF-Cache Fail") },
+	{ SST(0x40, 0x00, SS_RDEF,
+	    "Unrecovered SAT No Buffer Overflow Error") },
+	{ SST(0x40, 0x01, SS_RDEF,
+	    "Unrecovered SAT Buffer Overflow Error") },
+	{ SST(0x40, 0x02, SS_RDEF,
+	    "Unrecovered SAT No Buffer Overflow With ECS Fault") },
+	{ SST(0x40, 0x03, SS_RDEF,
+	    "Unrecovered SAT Buffer Overflow With ECS Fault") },
+	{ SST(0x40, 0x81, SS_RDEF,
+	    "DRAM Failure") },
+	{ SST(0x44, 0x0B, SS_RDEF,
+	    "Vendor Unique - Internal Target Failure") },
+	{ SST(0x44, 0xF2, SS_RDEF,
+	    "Vendor Unique - Internal Target Failure") },
+	{ SST(0x44, 0xF6, SS_RDEF,
+	    "Vendor Unique - Internal Target Failure") },
+	{ SST(0x44, 0xF9, SS_RDEF,
+	    "Vendor Unique - Internal Target Failure") },
+	{ SST(0x44, 0xFA, SS_RDEF,
+	    "Vendor Unique - Internal Target Failure") },
+	{ SST(0x5D, 0x22, SS_RDEF,
+	    "Extreme Over-Temperature Warning") },
+	{ SST(0x5D, 0x50, SS_RDEF,
+	    "Load/Unload cycle Count Warning") },
+	{ SST(0x81, 0x00, SS_RDEF,
+	    "Vendor Unique - Internal Logic Error") },
+	{ SST(0x85, 0x00, SS_RDEF,
+	    "Vendor Unique - Internal Key Seed Error") },
+};
+
+static struct asc_table_entry seagate_entries[] = {
+	{ SST(0x04, 0xF0, SS_RDEF,
+	    "Logical Unit Not Ready, super certify in Progress") },
+	{ SST(0x08, 0x86, SS_RDEF,
+	    "Write Fault Data Corruption") },
+	{ SST(0x09, 0x0D, SS_RDEF,
+	    "Tracking Failure") },
+	{ SST(0x09, 0x0E, SS_RDEF,
+	    "ETF Failure") },
+	{ SST(0x0B, 0x5D, SS_RDEF,
+	    "Pre-SMART Warning") },
+	{ SST(0x0B, 0x85, SS_RDEF,
+	    "5V Voltage Warning") },
+	{ SST(0x0B, 0x8C, SS_RDEF,
+	    "12V Voltage Warning") },
+	{ SST(0x0C, 0xFF, SS_RDEF,
+	    "Write Error - Too many error recovery revs") },
+	{ SST(0x11, 0xFF, SS_RDEF,
+	    "Unrecovered Read Error - Too many error recovery revs") },
+	{ SST(0x19, 0x0E, SS_RDEF,
+	    "Fewer than 1/2 defect list copies") },
+	{ SST(0x20, 0xF3, SS_RDEF,
+	    "Illegal CDB linked to skip mask cmd") },
+	{ SST(0x24, 0xF0, SS_RDEF,
+	    "Illegal byte in CDB, LBA not matching") },
+	{ SST(0x24, 0xF1, SS_RDEF,
+	    "Illegal byte in CDB, LEN not matching") },
+	{ SST(0x24, 0xF2, SS_RDEF,
+	    "Mask not matching transfer length") },
+	{ SST(0x24, 0xF3, SS_RDEF,
+	    "Drive formatted without plist") },
+	{ SST(0x26, 0x95, SS_RDEF,
+	    "Invalid Field Parameter - CAP File") },
+	{ SST(0x26, 0x96, SS_RDEF,
+	    "Invalid Field Parameter - RAP File") },
+	{ SST(0x26, 0x97, SS_RDEF,
+	    "Invalid Field Parameter - TMS Firmware Tag") },
+	{ SST(0x26, 0x98, SS_RDEF,
+	    "Invalid Field Parameter - Check Sum") },
+	{ SST(0x26, 0x99, SS_RDEF,
+	    "Invalid Field Parameter - Firmware Tag") },
+	{ SST(0x29, 0x08, SS_RDEF,
+	    "Write Log Dump data") },
+	{ SST(0x29, 0x09, SS_RDEF,
+	    "Write Log Dump data") },
+	{ SST(0x29, 0x0A, SS_RDEF,
+	    "Reserved disk space") },
+	{ SST(0x29, 0x0B, SS_RDEF,
+	    "SDBP") },
+	{ SST(0x29, 0x0C, SS_RDEF,
+	    "SDBP") },
+	{ SST(0x31, 0x91, SS_RDEF,
+	    "Format Corrupted World Wide Name (WWN) is Invalid") },
+	{ SST(0x32, 0x03, SS_RDEF,
+	    "Defect List - Length exceeds Command Allocated Length") },
+	{ SST(0x33, 0x00, SS_RDEF,
+	    "Flash not ready for access") },
+	{ SST(0x3F, 0x70, SS_RDEF,
+	    "Invalid RAP block") },
+	{ SST(0x3F, 0x71, SS_RDEF,
+	    "RAP/ETF mismatch") },
+	{ SST(0x3F, 0x90, SS_RDEF,
+	    "Invalid CAP block") },
+	{ SST(0x3F, 0x91, SS_RDEF,
+	    "World Wide Name (WWN) Mismatch") },
+	{ SST(0x40, 0x01, SS_RDEF,
+	    "DRAM Parity Error") },
+	{ SST(0x40, 0x02, SS_RDEF,
+	    "DRAM Parity Error") },
+	{ SST(0x42, 0x0A, SS_RDEF,
+	    "Loopback Test") },
+	{ SST(0x42, 0x0B, SS_RDEF,
+	    "Loopback Test") },
+	{ SST(0x44, 0xF2, SS_RDEF,
+	    "Compare error during data integrity check") },
+	{ SST(0x44, 0xF6, SS_RDEF,
+	    "Unrecoverable error during data integrity check") },
+	{ SST(0x47, 0x80, SS_RDEF,
+	    "Fibre Channel Sequence Error") },
+	{ SST(0x4E, 0x01, SS_RDEF,
+	    "Information Unit Too Short") },
+	{ SST(0x80, 0x00, SS_RDEF,
+	    "General Firmware Error / Command Timeout") },
+	{ SST(0x80, 0x01, SS_RDEF,
+	    "Command Timeout") },
+	{ SST(0x80, 0x02, SS_RDEF,
+	    "Command Timeout") },
+	{ SST(0x80, 0x80, SS_RDEF,
+	    "FC FIFO Error During Read Transfer") },
+	{ SST(0x80, 0x81, SS_RDEF,
+	    "FC FIFO Error During Write Transfer") },
+	{ SST(0x80, 0x82, SS_RDEF,
+	    "DISC FIFO Error During Read Transfer") },
+	{ SST(0x80, 0x83, SS_RDEF,
+	    "DISC FIFO Error During Write Transfer") },
+	{ SST(0x80, 0x84, SS_RDEF,
+	    "LBA Seeded LRC Error on Read") },
+	{ SST(0x80, 0x85, SS_RDEF,
+	    "LBA Seeded LRC Error on Write") },
+	{ SST(0x80, 0x86, SS_RDEF,
+	    "IOEDC Error on Read") },
+	{ SST(0x80, 0x87, SS_RDEF,
+	    "IOEDC Error on Write") },
+	{ SST(0x80, 0x88, SS_RDEF,
+	    "Host Parity Check Failed") },
+	{ SST(0x80, 0x89, SS_RDEF,
+	    "IOEDC error on read detected by formatter") },
+	{ SST(0x80, 0x8A, SS_RDEF,
+	    "Host Parity Errors / Host FIFO Initialization Failed") },
+	{ SST(0x80, 0x8B, SS_RDEF,
+	    "Host Parity Errors") },
+	{ SST(0x80, 0x8C, SS_RDEF,
+	    "Host Parity Errors") },
+	{ SST(0x80, 0x8D, SS_RDEF,
+	    "Host Parity Errors") },
+	{ SST(0x81, 0x00, SS_RDEF,
+	    "LA Check Failed") },
+	{ SST(0x82, 0x00, SS_RDEF,
+	    "Internal client detected insufficient buffer") },
+	{ SST(0x84, 0x00, SS_RDEF,
+	    "Scheduled Diagnostic And Repair") },
+};
+
 static struct scsi_sense_quirk_entry sense_quirk_table[] = {
 	{
 		/*
@@ -759,6 +935,26 @@
 		sizeof(sony_mo_entries)/sizeof(struct asc_table_entry),
 		/*sense key entries*/NULL,
 		sony_mo_entries
+	},
+	{
+		/*
+		 * HGST vendor-specific error codes
+		 */
+		{T_DIRECT, SIP_MEDIA_FIXED, "HGST", "*", "*"},
+		/*num_sense_keys*/0,
+		sizeof(hgst_entries)/sizeof(struct asc_table_entry),
+		/*sense key entries*/NULL,
+		hgst_entries
+	},
+	{
+		/*
+		 * SEAGATE vendor-specific error codes
+		 */
+		{T_DIRECT, SIP_MEDIA_FIXED, "SEAGATE", "*", "*"},
+		/*num_sense_keys*/0,
+		sizeof(seagate_entries)/sizeof(struct asc_table_entry),
+		/*sense key entries*/NULL,
+		seagate_entries
 	}
 };
 
@@ -775,7 +971,7 @@
 	 *
 	 * SCSI ASC/ASCQ Assignments
 	 * Numeric Sorted Listing
-	 * as of  5/20/12
+	 * as of  8/12/15
 	 *
 	 * D - DIRECT ACCESS DEVICE (SBC-2)                   device column key
 	 * .T - SEQUENTIAL ACCESS DEVICE (SSC)               -------------------
@@ -856,7 +1052,7 @@
 	{ SST(0x00, 0x1C, SS_RDEF,	/* XXX TBD */
 	    "Verify operation in progress") },
 	/* DT        B    */
-	{ SST(0x00, 0x1D, SS_RDEF,	/* XXX TBD */
+	{ SST(0x00, 0x1D, SS_NOP,
 	    "ATA pass through information available") },
 	/* DT   R MAEBKV  */
 	{ SST(0x00, 0x1E, SS_RDEF,	/* XXX TBD */
@@ -865,8 +1061,11 @@
 	{ SST(0x00, 0x1F, SS_RDEF,	/* XXX TBD */
 	    "Logical unit transitioning to another power condition") },
 	/* DT P      B    */
-	{ SST(0x00, 0x20, SS_RDEF,	/* XXX TBD */
+	{ SST(0x00, 0x20, SS_NOP,
 	    "Extended copy information available") },
+	/* D              */
+	{ SST(0x00, 0x21, SS_RDEF,	/* XXX TBD */
+	    "Atomic command aborted due to ACA") },
 	/* D   W O   BK   */
 	{ SST(0x01, 0x00, SS_RDEF,
 	    "No index/sector signal") },
@@ -886,7 +1085,7 @@
 	{ SST(0x04, 0x00, SS_RDEF,
 	    "Logical unit not ready, cause not reportable") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x04, 0x01, SS_TUR | SSQ_MANY | SSQ_DECREMENT_COUNT | EBUSY,
+	{ SST(0x04, 0x01, SS_WAIT | EBUSY,
 	    "Logical unit is in process of becoming ready") },
 	/* DTLPWROMAEBKVF */
 	{ SST(0x04, 0x02, SS_START | SSQ_DECREMENT_COUNT | ENXIO,
@@ -913,22 +1112,25 @@
 	{ SST(0x04, 0x09, SS_RDEF,	/* XXX TBD */
 	    "Logical unit not ready, self-test in progress") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x04, 0x0A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x04, 0x0A, SS_WAIT | ENXIO,
 	    "Logical unit not accessible, asymmetric access state transition")},
 	/* DTLPWROMAEBKVF */
-	{ SST(0x04, 0x0B, SS_RDEF,	/* XXX TBD */
+	{ SST(0x04, 0x0B, SS_FATAL | ENXIO,
 	    "Logical unit not accessible, target port in standby state") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x04, 0x0C, SS_RDEF,	/* XXX TBD */
+	{ SST(0x04, 0x0C, SS_FATAL | ENXIO,
 	    "Logical unit not accessible, target port in unavailable state") },
 	/*              F */
 	{ SST(0x04, 0x0D, SS_RDEF,	/* XXX TBD */
 	    "Logical unit not ready, structure check required") },
+	/* DTL WR MAEBKVF */
+	{ SST(0x04, 0x0E, SS_RDEF,	/* XXX TBD */
+	    "Logical unit not ready, security session in progress") },
 	/* DT  WROM  B    */
 	{ SST(0x04, 0x10, SS_RDEF,	/* XXX TBD */
 	    "Logical unit not ready, auxiliary memory not accessible") },
 	/* DT  WRO AEB VF */
-	{ SST(0x04, 0x11, SS_RDEF,	/* XXX TBD */
+	{ SST(0x04, 0x11, SS_WAIT | EBUSY,
 	    "Logical unit not ready, notify (enable spinup) required") },
 	/*        M    V  */
 	{ SST(0x04, 0x12, SS_RDEF,	/* XXX TBD */
@@ -963,6 +1165,24 @@
 	/* DT     MAEB    */
 	{ SST(0x04, 0x1C, SS_RDEF,	/* XXX TBD */
 	    "Logical unit not ready, additional power use not yet granted") },
+	/* D              */
+	{ SST(0x04, 0x1D, SS_RDEF,	/* XXX TBD */
+	    "Logical unit not ready, configuration in progress") },
+	/* D              */
+	{ SST(0x04, 0x1E, SS_FATAL | ENXIO,
+	    "Logical unit not ready, microcode activation required") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x04, 0x1F, SS_FATAL | ENXIO,
+	    "Logical unit not ready, microcode download required") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x04, 0x20, SS_RDEF,	/* XXX TBD */
+	    "Logical unit not ready, logical unit reset required") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x04, 0x21, SS_RDEF,	/* XXX TBD */
+	    "Logical unit not ready, hard reset required") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x04, 0x22, SS_RDEF,	/* XXX TBD */
+	    "Logical unit not ready, power cycle required") },
 	/* DTL WROMAEBKVF */
 	{ SST(0x05, 0x00, SS_RDEF,
 	    "Logical unit does not respond to selection") },
@@ -1002,39 +1222,66 @@
 	/* DT  WRO   B    */
 	{ SST(0x09, 0x04, SS_RDEF,
 	    "Head select fault") },
+	/* DT   RO   B    */
+	{ SST(0x09, 0x05, SS_RDEF,
+	    "Vibration induced tracking error") },
 	/* DTLPWROMAEBKVF */
 	{ SST(0x0A, 0x00, SS_FATAL | ENOSPC,
 	    "Error log overflow") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x0B, 0x00, SS_RDEF,
+	{ SST(0x0B, 0x00, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x0B, 0x01, SS_RDEF,
+	{ SST(0x0B, 0x01, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - specified temperature exceeded") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x0B, 0x02, SS_RDEF,
+	{ SST(0x0B, 0x02, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - enclosure degraded") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x0B, 0x03, SS_RDEF,	/* XXX TBD */
+	{ SST(0x0B, 0x03, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - background self-test failed") },
 	/* DTLPWRO AEBKVF */
-	{ SST(0x0B, 0x04, SS_RDEF,	/* XXX TBD */
+	{ SST(0x0B, 0x04, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - background pre-scan detected medium error") },
 	/* DTLPWRO AEBKVF */
-	{ SST(0x0B, 0x05, SS_RDEF,	/* XXX TBD */
+	{ SST(0x0B, 0x05, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - background medium scan detected medium error") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x0B, 0x06, SS_RDEF,	/* XXX TBD */
+	{ SST(0x0B, 0x06, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - non-volatile cache now volatile") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x0B, 0x07, SS_RDEF,	/* XXX TBD */
+	{ SST(0x0B, 0x07, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - degraded power to non-volatile cache") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x0B, 0x08, SS_RDEF,	/* XXX TBD */
+	{ SST(0x0B, 0x08, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - power loss expected") },
 	/* D              */
-	{ SST(0x0B, 0x09, SS_RDEF,	/* XXX TBD */
+	{ SST(0x0B, 0x09, SS_NOP | SSQ_PRINT_SENSE,
 	    "Warning - device statistics notification available") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x0B, 0x0A, SS_NOP | SSQ_PRINT_SENSE,
+	    "Warning - High critical temperature limit exceeded") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x0B, 0x0B, SS_NOP | SSQ_PRINT_SENSE,
+	    "Warning - Low critical temperature limit exceeded") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x0B, 0x0C, SS_NOP | SSQ_PRINT_SENSE,
+	    "Warning - High operating temperature limit exceeded") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x0B, 0x0D, SS_NOP | SSQ_PRINT_SENSE,
+	    "Warning - Low operating temperature limit exceeded") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x0B, 0x0E, SS_NOP | SSQ_PRINT_SENSE,
+	    "Warning - High citical humidity limit exceeded") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x0B, 0x0F, SS_NOP | SSQ_PRINT_SENSE,
+	    "Warning - Low citical humidity limit exceeded") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x0B, 0x10, SS_NOP | SSQ_PRINT_SENSE,
+	    "Warning - High operating humidity limit exceeded") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x0B, 0x11, SS_NOP | SSQ_PRINT_SENSE,
+	    "Warning - Low operating humidity limit exceeded") },
 	/*  T   R         */
 	{ SST(0x0C, 0x00, SS_RDEF,
 	    "Write error") },
@@ -1083,6 +1330,15 @@
 	/*      R         */
 	{ SST(0x0C, 0x0F, SS_RDEF,	/* XXX TBD */
 	    "Defects in error window") },
+	/* D              */
+	{ SST(0x0C, 0x10, SS_RDEF,	/* XXX TBD */
+	    "Incomplete multiple atomic write operations") },
+	/* D              */
+	{ SST(0x0C, 0x11, SS_RDEF,	/* XXX TBD */
+	    "Write error - recovery scan needed") },
+	/* D              */
+	{ SST(0x0C, 0x12, SS_RDEF,	/* XXX TBD */
+	    "Write error - insufficient zone resources") },
 	/* DTLPWRO A  K   */
 	{ SST(0x0D, 0x00, SS_RDEF,	/* XXX TBD */
 	    "Error detected by third party temporary initiator") },
@@ -1111,7 +1367,7 @@
 	{ SST(0x0E, 0x02, SS_RDEF,	/* XXX TBD */
 	    "Information unit too long") },
 	/* DT P R MAEBK F */
-	{ SST(0x0E, 0x03, SS_RDEF,	/* XXX TBD */
+	{ SST(0x0E, 0x03, SS_FATAL | EINVAL,
 	    "Invalid field in command information unit") },
 	/* D   W O   BK   */
 	{ SST(0x10, 0x00, SS_RDEF,
@@ -1194,6 +1450,9 @@
 	/* D              */
 	{ SST(0x11, 0x14, SS_RDEF,	/* XXX TBD */
 	    "Read error - LBA marked bad by application client") },
+	/* D              */
+	{ SST(0x11, 0x15, SS_RDEF,	/* XXX TBD */
+	    "Write after sanitize required") },
 	/* D   W O   BK   */
 	{ SST(0x12, 0x00, SS_RDEF,
 	    "Address mark not found for ID field") },
@@ -1351,7 +1610,7 @@
 	{ SST(0x20, 0x01, SS_RDEF,	/* XXX TBD */
 	    "Access denied - initiator pending-enrolled") },
 	/* DT PWROMAEBK   */
-	{ SST(0x20, 0x02, SS_RDEF,	/* XXX TBD */
+	{ SST(0x20, 0x02, SS_FATAL | EPERM,
 	    "Access denied - no access rights") },
 	/* DT PWROMAEBK   */
 	{ SST(0x20, 0x03, SS_RDEF,	/* XXX TBD */
@@ -1396,40 +1655,52 @@
 	{ SST(0x21, 0x03, SS_RDEF,	/* XXX TBD */
 	    "Invalid write crossing layer jump") },
 	/* D              */
+	{ SST(0x21, 0x04, SS_RDEF,	/* XXX TBD */
+	    "Unaligned write command") },
+	/* D              */
+	{ SST(0x21, 0x05, SS_RDEF,	/* XXX TBD */
+	    "Write boundary violation") },
+	/* D              */
+	{ SST(0x21, 0x06, SS_RDEF,	/* XXX TBD */
+	    "Attempt to read invalid data") },
+	/* D              */
+	{ SST(0x21, 0x07, SS_RDEF,	/* XXX TBD */
+	    "Read boundary violation") },
+	/* D              */
 	{ SST(0x22, 0x00, SS_FATAL | EINVAL,
 	    "Illegal function (use 20 00, 24 00, or 26 00)") },
 	/* DT P      B    */
-	{ SST(0x23, 0x00, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x00, SS_FATAL | EINVAL,
 	    "Invalid token operation, cause not reportable") },
 	/* DT P      B    */
-	{ SST(0x23, 0x01, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x01, SS_FATAL | EINVAL,
 	    "Invalid token operation, unsupported token type") },
 	/* DT P      B    */
-	{ SST(0x23, 0x02, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x02, SS_FATAL | EINVAL,
 	    "Invalid token operation, remote token usage not supported") },
 	/* DT P      B    */
-	{ SST(0x23, 0x03, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x03, SS_FATAL | EINVAL,
 	    "Invalid token operation, remote ROD token creation not supported") },
 	/* DT P      B    */
-	{ SST(0x23, 0x04, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x04, SS_FATAL | EINVAL,
 	    "Invalid token operation, token unknown") },
 	/* DT P      B    */
-	{ SST(0x23, 0x05, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x05, SS_FATAL | EINVAL,
 	    "Invalid token operation, token corrupt") },
 	/* DT P      B    */
-	{ SST(0x23, 0x06, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x06, SS_FATAL | EINVAL,
 	    "Invalid token operation, token revoked") },
 	/* DT P      B    */
-	{ SST(0x23, 0x07, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x07, SS_FATAL | EINVAL,
 	    "Invalid token operation, token expired") },
 	/* DT P      B    */
-	{ SST(0x23, 0x08, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x08, SS_FATAL | EINVAL,
 	    "Invalid token operation, token cancelled") },
 	/* DT P      B    */
-	{ SST(0x23, 0x09, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x09, SS_FATAL | EINVAL,
 	    "Invalid token operation, token deleted") },
 	/* DT P      B    */
-	{ SST(0x23, 0x0A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x23, 0x0A, SS_FATAL | EINVAL,
 	    "Invalid token operation, invalid token length") },
 	/* DTLPWROMAEBKVF */
 	{ SST(0x24, 0x00, SS_FATAL | EINVAL,
@@ -1459,7 +1730,7 @@
 	{ SST(0x24, 0x08, SS_RDEF,	/* XXX TBD */
 	    "Invalid XCDB") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x25, 0x00, SS_FATAL | ENXIO,
+	{ SST(0x25, 0x00, SS_FATAL | ENXIO | SSQ_LOST,
 	    "Logical unit not supported") },
 	/* DTLPWROMAEBKVF */
 	{ SST(0x26, 0x00, SS_FATAL | EINVAL,
@@ -1480,28 +1751,28 @@
 	{ SST(0x26, 0x05, SS_RDEF,	/* XXX TBD */
 	    "Data decryption error") },
 	/* DTLPWRO    K   */
-	{ SST(0x26, 0x06, SS_RDEF,	/* XXX TBD */
+	{ SST(0x26, 0x06, SS_FATAL | EINVAL,
 	    "Too many target descriptors") },
 	/* DTLPWRO    K   */
-	{ SST(0x26, 0x07, SS_RDEF,	/* XXX TBD */
+	{ SST(0x26, 0x07, SS_FATAL | EINVAL,
 	    "Unsupported target descriptor type code") },
 	/* DTLPWRO    K   */
-	{ SST(0x26, 0x08, SS_RDEF,	/* XXX TBD */
+	{ SST(0x26, 0x08, SS_FATAL | EINVAL,
 	    "Too many segment descriptors") },
 	/* DTLPWRO    K   */
-	{ SST(0x26, 0x09, SS_RDEF,	/* XXX TBD */
+	{ SST(0x26, 0x09, SS_FATAL | EINVAL,
 	    "Unsupported segment descriptor type code") },
 	/* DTLPWRO    K   */
-	{ SST(0x26, 0x0A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x26, 0x0A, SS_FATAL | EINVAL,
 	    "Unexpected inexact segment") },
 	/* DTLPWRO    K   */
-	{ SST(0x26, 0x0B, SS_RDEF,	/* XXX TBD */
+	{ SST(0x26, 0x0B, SS_FATAL | EINVAL,
 	    "Inline data length exceeded") },
 	/* DTLPWRO    K   */
-	{ SST(0x26, 0x0C, SS_RDEF,	/* XXX TBD */
+	{ SST(0x26, 0x0C, SS_FATAL | EINVAL,
 	    "Invalid operation for copy source or destination") },
 	/* DTLPWRO    K   */
-	{ SST(0x26, 0x0D, SS_RDEF,	/* XXX TBD */
+	{ SST(0x26, 0x0D, SS_FATAL | EINVAL,
 	    "Copy segment granularity violation") },
 	/* DT PWROMAEBK   */
 	{ SST(0x26, 0x0E, SS_RDEF,	/* XXX TBD */
@@ -1518,6 +1789,9 @@
 	/*  T             */
 	{ SST(0x26, 0x12, SS_RDEF,	/* XXX TBD */
 	    "Vendor specific key reference not found") },
+	/* D              */
+	{ SST(0x26, 0x13, SS_RDEF,	/* XXX TBD */
+	    "Application tag mode page is invalid") },
 	/* DT  WRO   BK   */
 	{ SST(0x27, 0x00, SS_FATAL | EACCES,
 	    "Write protected") },
@@ -1540,8 +1814,11 @@
 	{ SST(0x27, 0x06, SS_RDEF,	/* XXX TBD */
 	    "Conditional write protect") },
 	/* D         B    */
-	{ SST(0x27, 0x07, SS_RDEF,	/* XXX TBD */
+	{ SST(0x27, 0x07, SS_FATAL | ENOSPC,
 	    "Space allocation failed write protect") },
+	/* D              */
+	{ SST(0x27, 0x08, SS_FATAL | EACCES,
+	    "Zone is read only") },
 	/* DTLPWROMAEBKVF */
 	{ SST(0x28, 0x00, SS_FATAL | ENXIO,
 	    "Not ready to ready change, medium may have changed") },
@@ -1685,6 +1962,18 @@
 	/* D              */
 	{ SST(0x2C, 0x0C, SS_RDEF,	/* XXX TBD */
 	    "ORWRITE generation does not match") },
+	/* D              */
+	{ SST(0x2C, 0x0D, SS_RDEF,	/* XXX TBD */
+	    "Reset write pointer not allowed") },
+	/* D              */
+	{ SST(0x2C, 0x0E, SS_RDEF,	/* XXX TBD */
+	    "Zone is offline") },
+	/* D              */
+	{ SST(0x2C, 0x0F, SS_RDEF,	/* XXX TBD */
+	    "Stream not open") },
+	/* D              */
+	{ SST(0x2C, 0x10, SS_RDEF,	/* XXX TBD */
+	    "Unwritten data in zone") },
 	/*  T             */
 	{ SST(0x2D, 0x00, SS_RDEF,
 	    "Overwrite error on update in place") },
@@ -1691,6 +1980,15 @@
 	/*      R         */
 	{ SST(0x2E, 0x00, SS_RDEF,	/* XXX TBD */
 	    "Insufficient time for operation") },
+	/* D              */
+	{ SST(0x2E, 0x01, SS_RDEF,	/* XXX TBD */
+	    "Command timeout before processing") },
+	/* D              */
+	{ SST(0x2E, 0x02, SS_RDEF,	/* XXX TBD */
+	    "Command timeout during processing") },
+	/* D              */
+	{ SST(0x2E, 0x03, SS_RDEF,	/* XXX TBD */
+	    "Command timeout during processing due to error recovery") },
 	/* DTLPWROMAEBKVF */
 	{ SST(0x2F, 0x00, SS_RDEF,
 	    "Commands cleared by another initiator") },
@@ -1700,6 +1998,9 @@
 	/* DTLPWROMAEBKVF */
 	{ SST(0x2F, 0x02, SS_RDEF,	/* XXX TBD */
 	    "Commands cleared by device server") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x2F, 0x03, SS_RDEF,	/* XXX TBD */
+	    "Some commands cleared by queuing layer event") },
 	/* DT  WROM  BK   */
 	{ SST(0x30, 0x00, SS_RDEF,
 	    "Incompatible medium installed") },
@@ -1977,7 +2278,7 @@
 	{ SST(0x3F, 0x0D, SS_RDEF,
 	    "Volume set reassigned") },
 	/* DTLPWROMAE     */
-	{ SST(0x3F, 0x0E, SS_RDEF,	/* XXX TBD */
+	{ SST(0x3F, 0x0E, SS_RDEF | SSQ_RESCAN ,
 	    "Reported LUNs data has changed") },
 	/* DTLPWROMAEBKVF */
 	{ SST(0x3F, 0x0F, SS_RDEF,	/* XXX TBD */
@@ -1997,7 +2298,16 @@
 	/* DTLPWR MAEBK F */
 	{ SST(0x3F, 0x14, SS_RDEF,	/* XXX TBD */
 	    "iSCSI IP address changed") },
+	/* DTLPWR MAEBK   */
+	{ SST(0x3F, 0x15, SS_RDEF,	/* XXX TBD */
+	    "Inspect referrals sense descriptors") },
+	/* DTLPWROMAEBKVF */
+	{ SST(0x3F, 0x16, SS_RDEF,	/* XXX TBD */
+	    "Microcode has been changed without reset") },
 	/* D              */
+	{ SST(0x3F, 0x17, SS_RDEF,	/* XXX TBD */
+	    "Zone transition to full") },
+	/* D              */
 	{ SST(0x40, 0x00, SS_RDEF,
 	    "RAM failure") },		/* deprecated - use 40 NN instead */
 	/* DTLPWROMAEBKVF */
@@ -2106,6 +2416,30 @@
 	/* DT PWROMAEBK F */
 	{ SST(0x4B, 0x0D, SS_RDEF,	/* XXX TBD */
 	    "Data-out buffer error") },
+	/* DT PWROMAEBK F */
+	{ SST(0x4B, 0x0E, SS_RDEF,	/* XXX TBD */
+	    "PCIe fabric error") },
+	/* DT PWROMAEBK F */
+	{ SST(0x4B, 0x0F, SS_RDEF,	/* XXX TBD */
+	    "PCIe completion timeout") },
+	/* DT PWROMAEBK F */
+	{ SST(0x4B, 0x10, SS_RDEF,	/* XXX TBD */
+	    "PCIe completer abort") },
+	/* DT PWROMAEBK F */
+	{ SST(0x4B, 0x11, SS_RDEF,	/* XXX TBD */
+	    "PCIe poisoned TLP received") },
+	/* DT PWROMAEBK F */
+	{ SST(0x4B, 0x12, SS_RDEF,	/* XXX TBD */
+	    "PCIe ECRC check failed") },
+	/* DT PWROMAEBK F */
+	{ SST(0x4B, 0x13, SS_RDEF,	/* XXX TBD */
+	    "PCIe unsupported request") },
+	/* DT PWROMAEBK F */
+	{ SST(0x4B, 0x14, SS_RDEF,	/* XXX TBD */
+	    "PCIe ACS violation") },
+	/* DT PWROMAEBK F */
+	{ SST(0x4B, 0x15, SS_RDEF,	/* XXX TBD */
+	    "PCIe TLP prefix blocket") },
 	/* DTLPWROMAEBKVF */
 	{ SST(0x4C, 0x00, SS_RDEF,
 	    "Logical unit failed self-configuration") },
@@ -2163,6 +2497,21 @@
 	/*        M       */
 	{ SST(0x53, 0x08, SS_RDEF,	/* XXX TBD */
 	    "Element status unknown") },
+	/*        M       */
+	{ SST(0x53, 0x09, SS_RDEF,	/* XXX TBD */
+	    "Data transfer device error - load failed") },
+	/*        M       */
+	{ SST(0x53, 0x0A, SS_RDEF,	/* XXX TBD */
+	    "Data transfer device error - unload failed") },
+	/*        M       */
+	{ SST(0x53, 0x0B, SS_RDEF,	/* XXX TBD */
+	    "Data transfer device error - unload missing") },
+	/*        M       */
+	{ SST(0x53, 0x0C, SS_RDEF,	/* XXX TBD */
+	    "Data transfer device error - eject failed") },
+	/*        M       */
+	{ SST(0x53, 0x0D, SS_RDEF,	/* XXX TBD */
+	    "Data transfer device error - library communication failed") },
 	/*    P           */
 	{ SST(0x54, 0x00, SS_RDEF,
 	    "SCSI to host system interface failure") },
@@ -2208,6 +2557,15 @@
 	/* DT P      B    */
 	{ SST(0x55, 0x0D, SS_RDEF,	/* XXX TBD */
 	    "Insufficient resources to create ROD token") },
+	/* D              */
+	{ SST(0x55, 0x0E, SS_RDEF,	/* XXX TBD */
+	    "Insufficient zone resources") },
+	/* D              */
+	{ SST(0x55, 0x0F, SS_RDEF,	/* XXX TBD */
+	    "Insufficient zone resources to complete write") },
+	/* D              */
+	{ SST(0x55, 0x10, SS_RDEF,	/* XXX TBD */
+	    "Maximum number of streams open") },
 	/*      R         */
 	{ SST(0x57, 0x00, SS_RDEF,
 	    "Unable to recover table-of-contents") },
@@ -2251,253 +2609,259 @@
 	{ SST(0x5C, 0x02, SS_RDEF,
 	    "Spindles not synchronized") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x5D, 0x00, SS_RDEF,
+	{ SST(0x5D, 0x00, SS_NOP | SSQ_PRINT_SENSE,
 	    "Failure prediction threshold exceeded") },
 	/*      R    B    */
-	{ SST(0x5D, 0x01, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x01, SS_NOP | SSQ_PRINT_SENSE,
 	    "Media failure prediction threshold exceeded") },
 	/*      R         */
-	{ SST(0x5D, 0x02, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x02, SS_NOP | SSQ_PRINT_SENSE,
 	    "Logical unit failure prediction threshold exceeded") },
 	/*      R         */
-	{ SST(0x5D, 0x03, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x03, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spare area exhaustion prediction threshold exceeded") },
 	/* D         B    */
-	{ SST(0x5D, 0x10, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x10, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure general hard drive failure") },
 	/* D         B    */
-	{ SST(0x5D, 0x11, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x11, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure drive error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x12, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x12, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure data error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x13, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x13, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure seek error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x14, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x14, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure too many block reassigns") },
 	/* D         B    */
-	{ SST(0x5D, 0x15, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x15, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure access times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x16, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x16, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure start unit times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x17, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x17, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure channel parametrics") },
 	/* D         B    */
-	{ SST(0x5D, 0x18, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x18, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure controller detected") },
 	/* D         B    */
-	{ SST(0x5D, 0x19, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x19, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure throughput performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x1A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x1A, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure seek time performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x1B, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x1B, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure spin-up retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x1C, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x1C, SS_NOP | SSQ_PRINT_SENSE,
 	    "Hardware impending failure drive calibration retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x20, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x1D, SS_NOP | SSQ_PRINT_SENSE,
+	    "Hardware impending failure power loss protection circuit") },
+	/* D         B    */
+	{ SST(0x5D, 0x20, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure general hard drive failure") },
 	/* D         B    */
-	{ SST(0x5D, 0x21, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x21, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure drive error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x22, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x22, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure data error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x23, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x23, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure seek error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x24, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x24, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure too many block reassigns") },
 	/* D         B    */
-	{ SST(0x5D, 0x25, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x25, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure access times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x26, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x26, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure start unit times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x27, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x27, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure channel parametrics") },
 	/* D         B    */
-	{ SST(0x5D, 0x28, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x28, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure controller detected") },
 	/* D         B    */
-	{ SST(0x5D, 0x29, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x29, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure throughput performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x2A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x2A, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure seek time performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x2B, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x2B, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure spin-up retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x2C, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x2C, SS_NOP | SSQ_PRINT_SENSE,
 	    "Controller impending failure drive calibration retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x30, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x30, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure general hard drive failure") },
 	/* D         B    */
-	{ SST(0x5D, 0x31, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x31, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure drive error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x32, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x32, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure data error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x33, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x33, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure seek error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x34, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x34, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure too many block reassigns") },
 	/* D         B    */
-	{ SST(0x5D, 0x35, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x35, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure access times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x36, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x36, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure start unit times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x37, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x37, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure channel parametrics") },
 	/* D         B    */
-	{ SST(0x5D, 0x38, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x38, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure controller detected") },
 	/* D         B    */
-	{ SST(0x5D, 0x39, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x39, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure throughput performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x3A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x3A, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure seek time performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x3B, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x3B, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure spin-up retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x3C, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x3C, SS_NOP | SSQ_PRINT_SENSE,
 	    "Data channel impending failure drive calibration retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x40, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x40, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure general hard drive failure") },
 	/* D         B    */
-	{ SST(0x5D, 0x41, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x41, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure drive error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x42, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x42, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure data error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x43, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x43, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure seek error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x44, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x44, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure too many block reassigns") },
 	/* D         B    */
-	{ SST(0x5D, 0x45, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x45, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure access times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x46, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x46, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure start unit times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x47, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x47, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure channel parametrics") },
 	/* D         B    */
-	{ SST(0x5D, 0x48, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x48, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure controller detected") },
 	/* D         B    */
-	{ SST(0x5D, 0x49, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x49, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure throughput performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x4A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x4A, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure seek time performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x4B, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x4B, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure spin-up retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x4C, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x4C, SS_NOP | SSQ_PRINT_SENSE,
 	    "Servo impending failure drive calibration retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x50, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x50, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure general hard drive failure") },
 	/* D         B    */
-	{ SST(0x5D, 0x51, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x51, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure drive error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x52, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x52, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure data error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x53, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x53, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure seek error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x54, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x54, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure too many block reassigns") },
 	/* D         B    */
-	{ SST(0x5D, 0x55, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x55, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure access times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x56, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x56, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure start unit times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x57, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x57, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure channel parametrics") },
 	/* D         B    */
-	{ SST(0x5D, 0x58, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x58, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure controller detected") },
 	/* D         B    */
-	{ SST(0x5D, 0x59, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x59, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure throughput performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x5A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x5A, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure seek time performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x5B, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x5B, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure spin-up retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x5C, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x5C, SS_NOP | SSQ_PRINT_SENSE,
 	    "Spindle impending failure drive calibration retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x60, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x60, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure general hard drive failure") },
 	/* D         B    */
-	{ SST(0x5D, 0x61, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x61, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure drive error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x62, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x62, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure data error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x63, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x63, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure seek error rate too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x64, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x64, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure too many block reassigns") },
 	/* D         B    */
-	{ SST(0x5D, 0x65, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x65, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure access times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x66, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x66, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure start unit times too high") },
 	/* D         B    */
-	{ SST(0x5D, 0x67, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x67, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure channel parametrics") },
 	/* D         B    */
-	{ SST(0x5D, 0x68, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x68, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure controller detected") },
 	/* D         B    */
-	{ SST(0x5D, 0x69, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x69, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure throughput performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x6A, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x6A, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure seek time performance") },
 	/* D         B    */
-	{ SST(0x5D, 0x6B, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x6B, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure spin-up retry count") },
 	/* D         B    */
-	{ SST(0x5D, 0x6C, SS_RDEF,	/* XXX TBD */
+	{ SST(0x5D, 0x6C, SS_NOP | SSQ_PRINT_SENSE,
 	    "Firmware impending failure drive calibration retry count") },
+	/* D         B    */
+	{ SST(0x5D, 0x73, SS_NOP | SSQ_PRINT_SENSE,
+	    "Media impending failure endurance limit met") },
 	/* DTLPWROMAEBKVF */
-	{ SST(0x5D, 0xFF, SS_RDEF,
+	{ SST(0x5D, 0xFF, SS_NOP | SSQ_PRINT_SENSE,
 	    "Failure prediction threshold exceeded (false)") },
 	/* DTLPWRO A  K   */
 	{ SST(0x5E, 0x00, SS_RDEF,
@@ -2628,6 +2992,9 @@
 	/*         A      */
 	{ SST(0x68, 0x00, SS_RDEF,
 	    "Logical unit not configured") },
+	/* D              */
+	{ SST(0x68, 0x01, SS_RDEF,
+	    "Subsidiary logical unit not configured") },
 	/*         A      */
 	{ SST(0x69, 0x00, SS_RDEF,
 	    "Data loss on logical unit") },
@@ -2827,10 +3194,10 @@
 	{ SST(0x74, 0x6F, SS_RDEF,	/* XXX TBD */
 	    "External data encryption control error") },
 	/* DT   R M E  V  */
-	{ SST(0x74, 0x71, SS_RDEF,	/* XXX TBD */
+	{ SST(0x74, 0x71, SS_FATAL | EACCES,
 	    "Logical unit access not authorized") },
 	/* D              */
-	{ SST(0x74, 0x79, SS_RDEF,	/* XXX TBD */
+	{ SST(0x74, 0x79, SS_FATAL | EACCES,
 	    "Security conflict in translated device") }
 };
 
@@ -3077,6 +3444,7 @@
 				action |= SS_RETRY|SSQ_DECREMENT_COUNT|
 					  SSQ_PRINT_SENSE;
 			}
+			action |= SSQ_UA;
 		}
 	}
 	if ((action & SS_MASK) >= SS_START &&
@@ -3099,15 +3467,33 @@
 char *
 scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len)
 {
+	struct sbuf sb;
+	int error;
+
+	if (len == 0)
+		return ("");
+
+	sbuf_new(&sb, cdb_string, len, SBUF_FIXEDLEN);
+
+	scsi_cdb_sbuf(cdb_ptr, &sb);
+
+	/* ENOMEM just means that the fixed buffer is full, OK to ignore */
+	error = sbuf_finish(&sb);
+	if (error != 0 && error != ENOMEM)
+		return ("");
+
+	return(sbuf_data(&sb));
+}
+
+void
+scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb)
+{
 	u_int8_t cdb_len;
 	int i;
 
 	if (cdb_ptr == NULL)
-		return("");
+		return;
 
-	/* Silence warnings */
-	cdb_len = 0;
-
 	/*
 	 * This is taken from the SCSI-3 draft spec.
 	 * (T10/1157D revision 0.3)
@@ -3143,12 +3529,11 @@
 			cdb_len = 12;
 			break;
 	}
-	*cdb_string = '\0';
+
 	for (i = 0; i < cdb_len; i++)
-		snprintf(cdb_string + strlen(cdb_string),
-			 len - strlen(cdb_string), "%02hhx ", cdb_ptr[i]);
+		sbuf_printf(sb, "%02hhx ", cdb_ptr[i]);
 
-	return(cdb_string);
+	return;
 }
 
 const char *
@@ -3197,7 +3582,6 @@
 #endif /* _KERNEL/!_KERNEL */
 {
 	struct scsi_inquiry_data *inq_data;
-	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 #ifdef _KERNEL
 	struct	  ccb_getdev *cgd;
 #endif /* _KERNEL */
@@ -3229,17 +3613,9 @@
 
 #endif /* _KERNEL/!_KERNEL */
 
-	if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) {
-		sbuf_printf(sb, "%s. CDB: %s", 
-			    scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data),
-			    scsi_cdb_string(csio->cdb_io.cdb_ptr, cdb_str,
-					    sizeof(cdb_str)));
-	} else {
-		sbuf_printf(sb, "%s. CDB: %s",
-			    scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data),
-			    scsi_cdb_string(csio->cdb_io.cdb_bytes, cdb_str,
-					    sizeof(cdb_str)));
-	}
+	sbuf_printf(sb, "%s. CDB: ",
+		    scsi_op_desc(scsiio_cdb_ptr(csio)[0], inq_data));
+	scsi_cdb_sbuf(scsiio_cdb_ptr(csio), sb);
 
 #ifdef _KERNEL
 	xpt_free_ccb((union ccb *)cgd);
@@ -3270,7 +3646,7 @@
 
 	/*
 	 * The length of data actually returned may be different than the
-	 * extra_len recorded in the sturcture.
+	 * extra_len recorded in the structure.
 	 */
 	desc_len = sense_len -offsetof(struct scsi_sense_data_desc, sense_desc);
 
@@ -3356,336 +3732,303 @@
 }
 
 /*
- * Fill in SCSI sense data with the specified parameters.  This routine can
- * fill in either fixed or descriptor type sense data.
+ * Fill in SCSI descriptor sense data with the specified parameters.
  */
-void
-scsi_set_sense_data_va(struct scsi_sense_data *sense_data,
-		      scsi_sense_data_type sense_format, int current_error,
-		      int sense_key, int asc, int ascq, va_list ap) 
+static void
+scsi_set_sense_data_desc_va(struct scsi_sense_data *sense_data,
+    u_int *sense_len, scsi_sense_data_type sense_format, int current_error,
+    int sense_key, int asc, int ascq, va_list ap)
 {
-	int descriptor_sense;
+	struct scsi_sense_data_desc *sense;
 	scsi_sense_elem_type elem_type;
+	int space, len;
+	uint8_t *desc, *data;
 
-	/*
-	 * Determine whether to return fixed or descriptor format sense
-	 * data.  If the user specifies SSD_TYPE_NONE for some reason,
-	 * they'll just get fixed sense data.
-	 */
-	if (sense_format == SSD_TYPE_DESC)
-		descriptor_sense = 1;
+	memset(sense_data, 0, sizeof(*sense_data));
+	sense = (struct scsi_sense_data_desc *)sense_data;
+	if (current_error != 0)
+		sense->error_code = SSD_DESC_CURRENT_ERROR;
 	else
-		descriptor_sense = 0;
+		sense->error_code = SSD_DESC_DEFERRED_ERROR;
+	sense->sense_key = sense_key;
+	sense->add_sense_code = asc;
+	sense->add_sense_code_qual = ascq;
+	sense->flags = 0;
 
-	/*
-	 * Zero the sense data, so that we don't pass back any garbage data
-	 * to the user.
-	 */
-	memset(sense_data, 0, sizeof(*sense_data));
+	desc = &sense->sense_desc[0];
+	space = *sense_len - offsetof(struct scsi_sense_data_desc, sense_desc);
+	while ((elem_type = va_arg(ap, scsi_sense_elem_type)) !=
+	    SSD_ELEM_NONE) {
+		if (elem_type >= SSD_ELEM_MAX) {
+			printf("%s: invalid sense type %d\n", __func__,
+			       elem_type);
+			break;
+		}
+		len = va_arg(ap, int);
+		data = va_arg(ap, uint8_t *);
 
-	if (descriptor_sense != 0) {
-		struct scsi_sense_data_desc *sense;
-
-		sense = (struct scsi_sense_data_desc *)sense_data;
-		/*
-		 * The descriptor sense format eliminates the use of the
-		 * valid bit.
-		 */
-		if (current_error != 0)
-			sense->error_code = SSD_DESC_CURRENT_ERROR;
-		else
-			sense->error_code = SSD_DESC_DEFERRED_ERROR;
-		sense->sense_key = sense_key;
-		sense->add_sense_code = asc;
-		sense->add_sense_code_qual = ascq;
-		/*
-		 * Start off with no extra length, since the above data
-		 * fits in the standard descriptor sense information.
-		 */
-		sense->extra_len = 0;
-		while ((elem_type = (scsi_sense_elem_type)va_arg(ap,
-			scsi_sense_elem_type)) != SSD_ELEM_NONE) {
-			int sense_len, len_to_copy;
-			uint8_t *data;
-
-			if (elem_type >= SSD_ELEM_MAX) {
-				printf("%s: invalid sense type %d\n", __func__,
-				       elem_type);
+		switch (elem_type) {
+		case SSD_ELEM_SKIP:
+			break;
+		case SSD_ELEM_DESC:
+			if (space < len) {
+				sense->flags |= SSDD_SDAT_OVFL;
 				break;
 			}
+			bcopy(data, desc, len);
+			desc += len;
+			space -= len;
+			break;
+		case SSD_ELEM_SKS: {
+			struct scsi_sense_sks *sks = (void *)desc;
 
-			sense_len = (int)va_arg(ap, int);
-			len_to_copy = MIN(sense_len, SSD_EXTRA_MAX -
-					  sense->extra_len);
-			data = (uint8_t *)va_arg(ap, uint8_t *);
-
-			/*
-			 * We've already consumed the arguments for this one.
-			 */
-			if (elem_type == SSD_ELEM_SKIP)
-				continue;
-
-			switch (elem_type) {
-			case SSD_ELEM_DESC: {
-
-				/*
-				 * This is a straight descriptor.  All we
-				 * need to do is copy the data in.
-				 */
-				bcopy(data, &sense->sense_desc[
-				      sense->extra_len], len_to_copy);
-				sense->extra_len += len_to_copy;
+			if (len > sizeof(sks->sense_key_spec))
 				break;
+			if (space < sizeof(*sks)) {
+				sense->flags |= SSDD_SDAT_OVFL;
+				break;
 			}
-			case SSD_ELEM_SKS: {
-				struct scsi_sense_sks sks;
+			sks->desc_type = SSD_DESC_SKS;
+			sks->length = sizeof(*sks) -
+			    (offsetof(struct scsi_sense_sks, length) + 1);
+			bcopy(data, &sks->sense_key_spec, len);
+			desc += sizeof(*sks);
+			space -= sizeof(*sks);
+			break;
+		}
+		case SSD_ELEM_COMMAND: {
+			struct scsi_sense_command *cmd = (void *)desc;
 
-				bzero(&sks, sizeof(sks));
-
-				/*
-				 * This is already-formatted sense key
-				 * specific data.  We just need to fill out
-				 * the header and copy everything in.
-				 */
-				bcopy(data, &sks.sense_key_spec,
-				      MIN(len_to_copy,
-				          sizeof(sks.sense_key_spec)));
-
-				sks.desc_type = SSD_DESC_SKS;
-				sks.length = sizeof(sks) -
-				    offsetof(struct scsi_sense_sks, reserved1);
-				bcopy(&sks,&sense->sense_desc[sense->extra_len],
-				      sizeof(sks));
-				sense->extra_len += sizeof(sks);
+			if (len > sizeof(cmd->command_info))
 				break;
+			if (space < sizeof(*cmd)) {
+				sense->flags |= SSDD_SDAT_OVFL;
+				break;
 			}
-			case SSD_ELEM_INFO:
-			case SSD_ELEM_COMMAND: {
-				struct scsi_sense_command cmd;
-				struct scsi_sense_info info;
-				uint8_t *data_dest;
-				uint8_t *descriptor;
-				int descriptor_size, i, copy_len;
+			cmd->desc_type = SSD_DESC_COMMAND;
+			cmd->length = sizeof(*cmd) -
+			    (offsetof(struct scsi_sense_command, length) + 1);
+			bcopy(data, &cmd->command_info[
+			    sizeof(cmd->command_info) - len], len);
+			desc += sizeof(*cmd);
+			space -= sizeof(*cmd);
+			break;
+		}
+		case SSD_ELEM_INFO: {
+			struct scsi_sense_info *info = (void *)desc;
 
-				bzero(&cmd, sizeof(cmd));
-				bzero(&info, sizeof(info));
-
-				/*
-				 * Command or information data.  The
-				 * operate in pretty much the same way.
-				 */
-				if (elem_type == SSD_ELEM_COMMAND) {
-					len_to_copy = MIN(len_to_copy,
-					    sizeof(cmd.command_info));
-					descriptor = (uint8_t *)&cmd;
-					descriptor_size  = sizeof(cmd);
-					data_dest =(uint8_t *)&cmd.command_info;
-					cmd.desc_type = SSD_DESC_COMMAND;
-					cmd.length = sizeof(cmd) -
-					    offsetof(struct scsi_sense_command,
-						     reserved);
-				} else {
-					len_to_copy = MIN(len_to_copy,
-					    sizeof(info.info));
-					descriptor = (uint8_t *)&info;
-					descriptor_size = sizeof(cmd);
-					data_dest = (uint8_t *)&info.info;
-					info.desc_type = SSD_DESC_INFO;
-					info.byte2 = SSD_INFO_VALID;
-					info.length = sizeof(info) -
-					    offsetof(struct scsi_sense_info,
-						     byte2);
-				}
-
-				/*
-				 * Copy this in reverse because the spec
-				 * (SPC-4) says that when 4 byte quantities
-				 * are stored in this 8 byte field, the
-				 * first four bytes shall be 0.
-				 *
-				 * So we fill the bytes in from the end, and
-				 * if we have less than 8 bytes to copy,
-				 * the initial, most significant bytes will
-				 * be 0.
-				 */
-				for (i = sense_len - 1; i >= 0 &&
-				     len_to_copy > 0; i--, len_to_copy--)
-					data_dest[len_to_copy - 1] = data[i];
-
-				/*
-				 * This calculation looks much like the
-				 * initial len_to_copy calculation, but
-				 * we have to do it again here, because
-				 * we're looking at a larger amount that
-				 * may or may not fit.  It's not only the
-				 * data the user passed in, but also the
-				 * rest of the descriptor.
-				 */
-				copy_len = MIN(descriptor_size,
-				    SSD_EXTRA_MAX - sense->extra_len);
-				bcopy(descriptor, &sense->sense_desc[
-				      sense->extra_len], copy_len);
-				sense->extra_len += copy_len;
+			if (len > sizeof(info->info))
 				break;
+			if (space < sizeof(*info)) {
+				sense->flags |= SSDD_SDAT_OVFL;
+				break;
 			}
-			case SSD_ELEM_FRU: {
-				struct scsi_sense_fru fru;
-				int copy_len;
+			info->desc_type = SSD_DESC_INFO;
+			info->length = sizeof(*info) -
+			    (offsetof(struct scsi_sense_info, length) + 1);
+			info->byte2 = SSD_INFO_VALID;
+			bcopy(data, &info->info[sizeof(info->info) - len], len);
+			desc += sizeof(*info);
+			space -= sizeof(*info);
+			break;
+		}
+		case SSD_ELEM_FRU: {
+			struct scsi_sense_fru *fru = (void *)desc;
 
-				bzero(&fru, sizeof(fru));
-
-				fru.desc_type = SSD_DESC_FRU;
-				fru.length = sizeof(fru) -
-				    offsetof(struct scsi_sense_fru, reserved);
-				fru.fru = *data;
-
-				copy_len = MIN(sizeof(fru), SSD_EXTRA_MAX -
-					       sense->extra_len);
-				bcopy(&fru, &sense->sense_desc[
-				      sense->extra_len], copy_len);
-				sense->extra_len += copy_len;
+			if (len > sizeof(fru->fru))
 				break;
+			if (space < sizeof(*fru)) {
+				sense->flags |= SSDD_SDAT_OVFL;
+				break;
 			}
-			case SSD_ELEM_STREAM: {
-				struct scsi_sense_stream stream_sense;
-				int copy_len;
+			fru->desc_type = SSD_DESC_FRU;
+			fru->length = sizeof(*fru) -
+			    (offsetof(struct scsi_sense_fru, length) + 1);
+			fru->fru = *data;
+			desc += sizeof(*fru);
+			space -= sizeof(*fru);
+			break;
+		}
+		case SSD_ELEM_STREAM: {
+			struct scsi_sense_stream *stream = (void *)desc;
 
-				bzero(&stream_sense, sizeof(stream_sense));
-				stream_sense.desc_type = SSD_DESC_STREAM;
-				stream_sense.length = sizeof(stream_sense) -
-				   offsetof(struct scsi_sense_stream, reserved);
-				stream_sense.byte3 = *data;
-
-				copy_len = MIN(sizeof(stream_sense),
-				    SSD_EXTRA_MAX - sense->extra_len);
-				bcopy(&stream_sense, &sense->sense_desc[
-				      sense->extra_len], copy_len);
-				sense->extra_len += copy_len;
+			if (len > sizeof(stream->byte3))
 				break;
-			}
-			default:
-				/*
-				 * We shouldn't get here, but if we do, do
-				 * nothing.  We've already consumed the
-				 * arguments above.
-				 */
+			if (space < sizeof(*stream)) {
+				sense->flags |= SSDD_SDAT_OVFL;
 				break;
 			}
+			stream->desc_type = SSD_DESC_STREAM;
+			stream->length = sizeof(*stream) -
+			    (offsetof(struct scsi_sense_stream, length) + 1);
+			stream->byte3 = *data;
+			desc += sizeof(*stream);
+			space -= sizeof(*stream);
+			break;
 		}
-	} else {
-		struct scsi_sense_data_fixed *sense;
+		default:
+			/*
+			 * We shouldn't get here, but if we do, do nothing.
+			 * We've already consumed the arguments above.
+			 */
+			break;
+		}
+	}
+	sense->extra_len = desc - &sense->sense_desc[0];
+	*sense_len = offsetof(struct scsi_sense_data_desc, extra_len) + 1 +
+	    sense->extra_len;
+}
 
-		sense = (struct scsi_sense_data_fixed *)sense_data;
+/*
+ * Fill in SCSI fixed sense data with the specified parameters.
+ */
+static void
+scsi_set_sense_data_fixed_va(struct scsi_sense_data *sense_data,
+    u_int *sense_len, scsi_sense_data_type sense_format, int current_error,
+    int sense_key, int asc, int ascq, va_list ap)
+{
+	struct scsi_sense_data_fixed *sense;
+	scsi_sense_elem_type elem_type;
+	uint8_t *data;
+	int len;
 
-		if (current_error != 0)
-			sense->error_code = SSD_CURRENT_ERROR;
-		else
-			sense->error_code = SSD_DEFERRED_ERROR;
-
-		sense->flags = sense_key;
+	memset(sense_data, 0, sizeof(*sense_data));
+	sense = (struct scsi_sense_data_fixed *)sense_data;
+	if (current_error != 0)
+		sense->error_code = SSD_CURRENT_ERROR;
+	else
+		sense->error_code = SSD_DEFERRED_ERROR;
+	sense->flags = sense_key & SSD_KEY;
+	sense->extra_len = 0;
+	if (*sense_len >= 13) {
 		sense->add_sense_code = asc;
+		sense->extra_len = MAX(sense->extra_len, 5);
+	} else
+		sense->flags |= SSD_SDAT_OVFL;
+	if (*sense_len >= 14) {
 		sense->add_sense_code_qual = ascq;
-		/*
-		 * We've set the ASC and ASCQ, so we have 6 more bytes of
-		 * valid data.  If we wind up setting any of the other
-		 * fields, we'll bump this to 10 extra bytes.
-		 */
-		sense->extra_len = 6;
+		sense->extra_len = MAX(sense->extra_len, 6);
+	} else
+		sense->flags |= SSD_SDAT_OVFL;
 
-		while ((elem_type = (scsi_sense_elem_type)va_arg(ap,
-			scsi_sense_elem_type)) != SSD_ELEM_NONE) {
-			int sense_len, len_to_copy;
-			uint8_t *data;
+	while ((elem_type = va_arg(ap, scsi_sense_elem_type)) !=
+	    SSD_ELEM_NONE) {
+		if (elem_type >= SSD_ELEM_MAX) {
+			printf("%s: invalid sense type %d\n", __func__,
+			       elem_type);
+			break;
+		}
+		len = va_arg(ap, int);
+		data = va_arg(ap, uint8_t *);
 
-			if (elem_type >= SSD_ELEM_MAX) {
-				printf("%s: invalid sense type %d\n", __func__,
-				       elem_type);
+		switch (elem_type) {
+		case SSD_ELEM_SKIP:
+			break;
+		case SSD_ELEM_SKS:
+			if (len > sizeof(sense->sense_key_spec))
 				break;
+			if (*sense_len < 18) {
+				sense->flags |= SSD_SDAT_OVFL;
+				break;
 			}
-			/*
-			 * If we get in here, just bump the extra length to
-			 * 10 bytes.  That will encompass anything we're
-			 * going to set here.
-			 */
-			sense->extra_len = 10;
-			sense_len = (int)va_arg(ap, int);
-			len_to_copy = MIN(sense_len, SSD_EXTRA_MAX -
-					  sense->extra_len);
-			data = (uint8_t *)va_arg(ap, uint8_t *);
-
-			switch (elem_type) {
-			case SSD_ELEM_SKS:
-				/*
-				 * The user passed in pre-formatted sense
-				 * key specific data.
-				 */
-				bcopy(data, &sense->sense_key_spec[0],
-				      MIN(sizeof(sense->sense_key_spec),
-				      sense_len));
+			bcopy(data, &sense->sense_key_spec[0], len);
+			sense->extra_len = MAX(sense->extra_len, 10);
+			break;
+		case SSD_ELEM_COMMAND:
+			if (*sense_len < 12) {
+				sense->flags |= SSD_SDAT_OVFL;
 				break;
-			case SSD_ELEM_INFO:
-			case SSD_ELEM_COMMAND: {
-				uint8_t *data_dest;
-				int i;
-
-				if (elem_type == SSD_ELEM_COMMAND)
-					data_dest = &sense->cmd_spec_info[0];
-				else {
-					data_dest = &sense->info[0];
-					/*
-					 * We're setting the info field, so
-					 * set the valid bit.
-					 */
-					sense->error_code |= SSD_ERRCODE_VALID;
-				}
-
-				/*
-			 	 * Copy this in reverse so that if we have
-				 * less than 4 bytes to fill, the least
-				 * significant bytes will be at the end.
-				 * If we have more than 4 bytes, only the
-				 * least significant bytes will be included.
-				 */
-				for (i = sense_len - 1; i >= 0 &&
-				     len_to_copy > 0; i--, len_to_copy--)
-					data_dest[len_to_copy - 1] = data[i];
-
+			}
+			if (len > sizeof(sense->cmd_spec_info)) {
+				data += len - sizeof(sense->cmd_spec_info);
+				len -= len - sizeof(sense->cmd_spec_info);
+			}
+			bcopy(data, &sense->cmd_spec_info[
+			    sizeof(sense->cmd_spec_info) - len], len);
+			sense->extra_len = MAX(sense->extra_len, 4);
+			break;
+		case SSD_ELEM_INFO:
+			/* Set VALID bit only if no overflow. */
+			sense->error_code |= SSD_ERRCODE_VALID;
+			while (len > sizeof(sense->info)) {
+				if (data[0] != 0)
+					sense->error_code &= ~SSD_ERRCODE_VALID;
+				data ++;
+				len --;
+			}
+			bcopy(data, &sense->info[sizeof(sense->info) - len], len);
+			break;
+		case SSD_ELEM_FRU:
+			if (*sense_len < 15) {
+				sense->flags |= SSD_SDAT_OVFL;
 				break;
 			}
-			case SSD_ELEM_FRU:
-				sense->fru = *data;
-				break;
-			case SSD_ELEM_STREAM:
-				sense->flags |= *data;
-				break;
-			case SSD_ELEM_DESC:
-			default:
+			sense->fru = *data;
+			sense->extra_len = MAX(sense->extra_len, 7);
+			break;
+		case SSD_ELEM_STREAM:
+			sense->flags |= *data &
+			    (SSD_ILI | SSD_EOM | SSD_FILEMARK);
+			break;
+		default:
 
-				/*
-				 * If the user passes in descriptor sense,
-				 * we can't handle that in fixed format.
-				 * So just skip it, and any unknown argument
-				 * types.
-				 */
-				break;
-			}
+			/*
+			 * We can't handle that in fixed format.  Skip it.
+			 */
+			break;
 		}
 	}
+	*sense_len = offsetof(struct scsi_sense_data_fixed, extra_len) + 1 +
+	    sense->extra_len;
 }
 
+/*
+ * Fill in SCSI sense data with the specified parameters.  This routine can
+ * fill in either fixed or descriptor type sense data.
+ */
 void
-scsi_set_sense_data(struct scsi_sense_data *sense_data, 
+scsi_set_sense_data_va(struct scsi_sense_data *sense_data, u_int *sense_len,
+		      scsi_sense_data_type sense_format, int current_error,
+		      int sense_key, int asc, int ascq, va_list ap)
+{
+
+	if (*sense_len > SSD_FULL_SIZE)
+		*sense_len = SSD_FULL_SIZE;
+	if (sense_format == SSD_TYPE_DESC)
+		scsi_set_sense_data_desc_va(sense_data, sense_len,
+		    sense_format, current_error, sense_key, asc, ascq, ap);
+	else
+		scsi_set_sense_data_fixed_va(sense_data, sense_len,
+		    sense_format, current_error, sense_key, asc, ascq, ap);
+}
+
+void
+scsi_set_sense_data(struct scsi_sense_data *sense_data,
 		    scsi_sense_data_type sense_format, int current_error,
-		    int sense_key, int asc, int ascq, ...) 
+		    int sense_key, int asc, int ascq, ...)
 {
 	va_list ap;
+	u_int	sense_len = SSD_FULL_SIZE;
 
 	va_start(ap, ascq);
-	scsi_set_sense_data_va(sense_data, sense_format, current_error,
-			       sense_key, asc, ascq, ap);
+	scsi_set_sense_data_va(sense_data, &sense_len, sense_format,
+	    current_error, sense_key, asc, ascq, ap);
 	va_end(ap);
 }
 
+void
+scsi_set_sense_data_len(struct scsi_sense_data *sense_data, u_int *sense_len,
+		    scsi_sense_data_type sense_format, int current_error,
+		    int sense_key, int asc, int ascq, ...)
+{
+	va_list ap;
+
+	va_start(ap, ascq);
+	scsi_set_sense_data_va(sense_data, sense_len, sense_format,
+	    current_error, sense_key, asc, ascq, ap);
+	va_end(ap);
+}
+
 /*
  * Get sense information for three similar sense data types.
  */
@@ -4269,6 +4612,73 @@
 	scsi_progress_sbuf(sb, progress_val);
 }
 
+void
+scsi_sense_ata_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
+			 u_int sense_len, uint8_t *cdb, int cdb_len,
+			 struct scsi_inquiry_data *inq_data,
+			 struct scsi_sense_desc_header *header)
+{
+	struct scsi_sense_ata_ret_desc *res;
+
+	res = (struct scsi_sense_ata_ret_desc *)header;
+
+	sbuf_printf(sb, "ATA status: %02x (%s%s%s%s%s%s%s%s), ",
+	    res->status,
+	    (res->status & 0x80) ? "BSY " : "",
+	    (res->status & 0x40) ? "DRDY " : "",
+	    (res->status & 0x20) ? "DF " : "",
+	    (res->status & 0x10) ? "SERV " : "",
+	    (res->status & 0x08) ? "DRQ " : "",
+	    (res->status & 0x04) ? "CORR " : "",
+	    (res->status & 0x02) ? "IDX " : "",
+	    (res->status & 0x01) ? "ERR" : "");
+	if (res->status & 1) {
+	    sbuf_printf(sb, "error: %02x (%s%s%s%s%s%s%s%s), ",
+		res->error,
+		(res->error & 0x80) ? "ICRC " : "",
+		(res->error & 0x40) ? "UNC " : "",
+		(res->error & 0x20) ? "MC " : "",
+		(res->error & 0x10) ? "IDNF " : "",
+		(res->error & 0x08) ? "MCR " : "",
+		(res->error & 0x04) ? "ABRT " : "",
+		(res->error & 0x02) ? "NM " : "",
+		(res->error & 0x01) ? "ILI" : "");
+	}
+
+	if (res->flags & SSD_DESC_ATA_FLAG_EXTEND) {
+		sbuf_printf(sb, "count: %02x%02x, ",
+		    res->count_15_8, res->count_7_0);
+		sbuf_printf(sb, "LBA: %02x%02x%02x%02x%02x%02x, ",
+		    res->lba_47_40, res->lba_39_32, res->lba_31_24,
+		    res->lba_23_16, res->lba_15_8, res->lba_7_0);
+	} else {
+		sbuf_printf(sb, "count: %02x, ", res->count_7_0);
+		sbuf_printf(sb, "LBA: %02x%02x%02x, ",
+		    res->lba_23_16, res->lba_15_8, res->lba_7_0);
+	}
+	sbuf_printf(sb, "device: %02x, ", res->device);
+}
+
+void
+scsi_sense_forwarded_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
+			 u_int sense_len, uint8_t *cdb, int cdb_len,
+			 struct scsi_inquiry_data *inq_data,
+			 struct scsi_sense_desc_header *header)
+{
+	struct scsi_sense_forwarded *forwarded;
+	const char *sense_key_desc;
+	const char *asc_desc;
+	int error_code, sense_key, asc, ascq;
+
+	forwarded = (struct scsi_sense_forwarded *)header;
+	scsi_extract_sense_len((struct scsi_sense_data *)forwarded->sense_data,
+	    forwarded->length - 2, &error_code, &sense_key, &asc, &ascq, 1);
+	scsi_sense_desc(sense_key, asc, ascq, NULL, &sense_key_desc, &asc_desc);
+
+	sbuf_printf(sb, "Forwarded sense: %s asc:%x,%x (%s): ",
+	    sense_key_desc, asc, ascq, asc_desc);
+}
+
 /*
  * Generic sense descriptor printing routine.  This is used when we have
  * not yet implemented a specific printing routine for this descriptor.
@@ -4315,7 +4725,9 @@
 	{SSD_DESC_FRU, scsi_sense_fru_sbuf},
 	{SSD_DESC_STREAM, scsi_sense_stream_sbuf},
 	{SSD_DESC_BLOCK, scsi_sense_block_sbuf},
-	{SSD_DESC_PROGRESS, scsi_sense_progress_sbuf}
+	{SSD_DESC_ATA, scsi_sense_ata_sbuf},
+	{SSD_DESC_PROGRESS, scsi_sense_progress_sbuf},
+	{SSD_DESC_FORWARDED, scsi_sense_forwarded_sbuf}
 };
 
 void
@@ -4608,7 +5020,6 @@
 	struct	  ccb_getdev *cgd;
 #endif /* _KERNEL */
 	char	  path_str[64];
-	uint8_t	  *cdb;
 
 #ifndef _KERNEL
 	if (device == NULL)
@@ -4706,14 +5117,9 @@
 			sense = &csio->sense_data;
 	}
 
-	if (csio->ccb_h.flags & CAM_CDB_POINTER)
-		cdb = csio->cdb_io.cdb_ptr;
-	else
-		cdb = csio->cdb_io.cdb_bytes;
+	scsi_sense_only_sbuf(sense, csio->sense_len - csio->sense_resid, sb,
+	    path_str, inq_data, scsiio_cdb_ptr(csio), csio->cdb_len);
 
-	scsi_sense_only_sbuf(sense, csio->sense_len - csio->sense_resid, sb,
-			     path_str, inq_data, cdb, csio->cdb_len);
-			 
 #ifdef _KERNEL
 	xpt_free_ccb((union ccb*)cgd);
 #endif /* _KERNEL/!_KERNEL */
@@ -4955,7 +5361,7 @@
 {
 	u_int8_t type;
 	char *dtype, *qtype;
-	char vendor[16], product[48], revision[16], rstr[4];
+	char vendor[16], product[48], revision[16], rstr[12];
 
 	type = SID_TYPE(inq_data);
 
@@ -4963,7 +5369,7 @@
 	 * Figure out basic device type and qualifier.
 	 */
 	if (SID_QUAL_IS_VENDOR_UNIQUE(inq_data)) {
-		qtype = "(vendor-unique qualifier)";
+		qtype = " (vendor-unique qualifier)";
 	} else {
 		switch (SID_QUAL(inq_data)) {
 		case SID_QUAL_LU_CONNECTED:
@@ -4971,15 +5377,15 @@
 			break;
 
 		case SID_QUAL_LU_OFFLINE:
-			qtype = "(offline)";
+			qtype = " (offline)";
 			break;
 
 		case SID_QUAL_RSVD:
-			qtype = "(reserved qualifier)";
+			qtype = " (reserved qualifier)";
 			break;
 		default:
 		case SID_QUAL_BAD_LU:
-			qtype = "(LUN not supported)";
+			qtype = " (LUN not supported)";
 			break;
 		}
 	}
@@ -5048,16 +5454,36 @@
 	cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
 		   sizeof(revision));
 
-	if (SID_ANSI_REV(inq_data) == SCSI_REV_CCS)
-		bcopy("CCS", rstr, 4);
-	else
-		snprintf(rstr, sizeof (rstr), "%d", SID_ANSI_REV(inq_data));
-	printf("<%s %s %s> %s %s SCSI-%s device %s\n",
+	if (SID_ANSI_REV(inq_data) == SCSI_REV_0)
+		snprintf(rstr, sizeof(rstr), "SCSI");
+	else if (SID_ANSI_REV(inq_data) <= SCSI_REV_SPC) {
+		snprintf(rstr, sizeof(rstr), "SCSI-%d",
+		    SID_ANSI_REV(inq_data));
+	} else {
+		snprintf(rstr, sizeof(rstr), "SPC-%d SCSI",
+		    SID_ANSI_REV(inq_data) - 2);
+	}
+	printf("<%s %s %s> %s %s %s device%s\n",
 	       vendor, product, revision,
 	       SID_IS_REMOVABLE(inq_data) ? "Removable" : "Fixed",
 	       dtype, rstr, qtype);
 }
 
+void
+scsi_print_inquiry_short(struct scsi_inquiry_data *inq_data)
+{
+	char vendor[16], product[48], revision[16];
+
+	cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
+		   sizeof(vendor));
+	cam_strvis(product, inq_data->product, sizeof(inq_data->product),
+		   sizeof(product));
+	cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
+		   sizeof(revision));
+
+	printf("<%s %s %s>", vendor, product, revision);
+}
+
 /*
  * Table of syncrates that don't follow the "divisible by 4"
  * rule. This table will be expanded in future SCSI specs.
@@ -5173,34 +5599,1936 @@
 	return 1;
 }
 
-uint8_t *
-scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len,
+int
+scsi_devid_is_lun_eui64(uint8_t *bufp)
+{
+	struct scsi_vpd_id_descriptor *descr;
+
+	descr = (struct scsi_vpd_id_descriptor *)bufp;
+	if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN)
+		return 0;
+	if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_EUI64)
+		return 0;
+	return 1;
+}
+
+int
+scsi_devid_is_lun_naa(uint8_t *bufp)
+{
+	struct scsi_vpd_id_descriptor *descr;
+
+	descr = (struct scsi_vpd_id_descriptor *)bufp;
+	if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN)
+		return 0;
+	if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA)
+		return 0;
+	return 1;
+}
+
+int
+scsi_devid_is_lun_t10(uint8_t *bufp)
+{
+	struct scsi_vpd_id_descriptor *descr;
+
+	descr = (struct scsi_vpd_id_descriptor *)bufp;
+	if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN)
+		return 0;
+	if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_T10)
+		return 0;
+	return 1;
+}
+
+int
+scsi_devid_is_lun_name(uint8_t *bufp)
+{
+	struct scsi_vpd_id_descriptor *descr;
+
+	descr = (struct scsi_vpd_id_descriptor *)bufp;
+	if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN)
+		return 0;
+	if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_SCSI_NAME)
+		return 0;
+	return 1;
+}
+
+int
+scsi_devid_is_lun_md5(uint8_t *bufp)
+{
+	struct scsi_vpd_id_descriptor *descr;
+
+	descr = (struct scsi_vpd_id_descriptor *)bufp;
+	if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN)
+		return 0;
+	if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_MD5_LUN_ID)
+		return 0;
+	return 1;
+}
+
+int
+scsi_devid_is_lun_uuid(uint8_t *bufp)
+{
+	struct scsi_vpd_id_descriptor *descr;
+
+	descr = (struct scsi_vpd_id_descriptor *)bufp;
+	if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN)
+		return 0;
+	if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_UUID)
+		return 0;
+	return 1;
+}
+
+int
+scsi_devid_is_port_naa(uint8_t *bufp)
+{
+	struct scsi_vpd_id_descriptor *descr;
+
+	descr = (struct scsi_vpd_id_descriptor *)bufp;
+	if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_PORT)
+		return 0;
+	if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA)
+		return 0;
+	return 1;
+}
+
+struct scsi_vpd_id_descriptor *
+scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len,
     scsi_devid_checkfn_t ck_fn)
 {
-	struct scsi_vpd_id_descriptor *desc;
-	uint8_t *page_end;
 	uint8_t *desc_buf_end;
 
-	page_end = (uint8_t *)id + page_len;
-	if (page_end < id->desc_list)
-		return (NULL);
+	desc_buf_end = (uint8_t *)desc + len;
 
-	desc_buf_end = MIN(id->desc_list + scsi_2btoul(id->length), page_end);
-
-	for (desc = (struct scsi_vpd_id_descriptor *)id->desc_list;
-	     desc->identifier <= desc_buf_end
-	  && desc->identifier + desc->length <= desc_buf_end;
-	     desc = (struct scsi_vpd_id_descriptor *)(desc->identifier
+	for (; desc->identifier <= desc_buf_end &&
+	    desc->identifier + desc->length <= desc_buf_end;
+	    desc = (struct scsi_vpd_id_descriptor *)(desc->identifier
 						    + desc->length)) {
 
 		if (ck_fn == NULL || ck_fn((uint8_t *)desc) != 0)
-			return (desc->identifier);
+			return (desc);
 	}
+	return (NULL);
+}
 
+struct scsi_vpd_id_descriptor *
+scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len,
+    scsi_devid_checkfn_t ck_fn)
+{
+	uint32_t len;
+
+	if (page_len < sizeof(*id))
+		return (NULL);
+	len = MIN(scsi_2btoul(id->length), page_len - sizeof(*id));
+	return (scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
+	    id->desc_list, len, ck_fn));
+}
+
+int
+scsi_transportid_sbuf(struct sbuf *sb, struct scsi_transportid_header *hdr,
+		      uint32_t valid_len)
+{
+	switch (hdr->format_protocol & SCSI_TRN_PROTO_MASK) {
+	case SCSI_PROTO_FC: {
+		struct scsi_transportid_fcp *fcp;
+		uint64_t n_port_name;
+
+		fcp = (struct scsi_transportid_fcp *)hdr;
+
+		n_port_name = scsi_8btou64(fcp->n_port_name);
+
+		sbuf_printf(sb, "FCP address: 0x%.16jx",(uintmax_t)n_port_name);
+		break;
+	}
+	case SCSI_PROTO_SPI: {
+		struct scsi_transportid_spi *spi;
+
+		spi = (struct scsi_transportid_spi *)hdr;
+
+		sbuf_printf(sb, "SPI address: %u,%u",
+			    scsi_2btoul(spi->scsi_addr),
+			    scsi_2btoul(spi->rel_trgt_port_id));
+		break;
+	}
+	case SCSI_PROTO_SSA:
+		/*
+		 * XXX KDM there is no transport ID defined in SPC-4 for
+		 * SSA.
+		 */
+		break;
+	case SCSI_PROTO_1394: {
+		struct scsi_transportid_1394 *sbp;
+		uint64_t eui64;
+
+		sbp = (struct scsi_transportid_1394 *)hdr;
+
+		eui64 = scsi_8btou64(sbp->eui64);
+		sbuf_printf(sb, "SBP address: 0x%.16jx", (uintmax_t)eui64);
+		break;
+	}
+	case SCSI_PROTO_RDMA: {
+		struct scsi_transportid_rdma *rdma;
+		unsigned int i;
+
+		rdma = (struct scsi_transportid_rdma *)hdr;
+
+		sbuf_printf(sb, "RDMA address: 0x");
+		for (i = 0; i < sizeof(rdma->initiator_port_id); i++)
+			sbuf_printf(sb, "%02x", rdma->initiator_port_id[i]);
+		break;
+	}
+	case SCSI_PROTO_ISCSI: {
+		uint32_t add_len, i;
+		uint8_t *iscsi_name = NULL;
+		int nul_found = 0;
+
+		sbuf_printf(sb, "iSCSI address: ");
+		if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) == 
+		    SCSI_TRN_ISCSI_FORMAT_DEVICE) {
+			struct scsi_transportid_iscsi_device *dev;
+
+			dev = (struct scsi_transportid_iscsi_device *)hdr;
+
+			/*
+			 * Verify how much additional data we really have.
+			 */
+			add_len = scsi_2btoul(dev->additional_length);
+			add_len = MIN(add_len, valid_len -
+				__offsetof(struct scsi_transportid_iscsi_device,
+					   iscsi_name));
+			iscsi_name = &dev->iscsi_name[0];
+
+		} else if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) ==
+			    SCSI_TRN_ISCSI_FORMAT_PORT) {
+			struct scsi_transportid_iscsi_port *port;
+
+			port = (struct scsi_transportid_iscsi_port *)hdr;
+			
+			add_len = scsi_2btoul(port->additional_length);
+			add_len = MIN(add_len, valid_len -
+				__offsetof(struct scsi_transportid_iscsi_port,
+					   iscsi_name));
+			iscsi_name = &port->iscsi_name[0];
+		} else {
+			sbuf_printf(sb, "unknown format %x",
+				    (hdr->format_protocol &
+				     SCSI_TRN_FORMAT_MASK) >>
+				     SCSI_TRN_FORMAT_SHIFT);
+			break;
+		}
+		if (add_len == 0) {
+			sbuf_printf(sb, "not enough data");
+			break;
+		}
+		/*
+		 * This is supposed to be a NUL-terminated ASCII 
+		 * string, but you never know.  So we're going to
+		 * check.  We need to do this because there is no
+		 * sbuf equivalent of strncat().
+		 */
+		for (i = 0; i < add_len; i++) {
+			if (iscsi_name[i] == '\0') {
+				nul_found = 1;
+				break;
+			}
+		}
+		/*
+		 * If there is a NUL in the name, we can just use
+		 * sbuf_cat().  Otherwise we need to use sbuf_bcat().
+		 */
+		if (nul_found != 0)
+			sbuf_cat(sb, iscsi_name);
+		else
+			sbuf_bcat(sb, iscsi_name, add_len);
+		break;
+	}
+	case SCSI_PROTO_SAS: {
+		struct scsi_transportid_sas *sas;
+		uint64_t sas_addr;
+
+		sas = (struct scsi_transportid_sas *)hdr;
+
+		sas_addr = scsi_8btou64(sas->sas_address);
+		sbuf_printf(sb, "SAS address: 0x%.16jx", (uintmax_t)sas_addr);
+		break;
+	}
+	case SCSI_PROTO_ADITP:
+	case SCSI_PROTO_ATA:
+	case SCSI_PROTO_UAS:
+		/*
+		 * No Transport ID format for ADI, ATA or USB is defined in
+		 * SPC-4.
+		 */
+		sbuf_printf(sb, "No known Transport ID format for protocol "
+			    "%#x", hdr->format_protocol & SCSI_TRN_PROTO_MASK);
+		break;
+	case SCSI_PROTO_SOP: {
+		struct scsi_transportid_sop *sop;
+		struct scsi_sop_routing_id_norm *rid;
+
+		sop = (struct scsi_transportid_sop *)hdr;
+		rid = (struct scsi_sop_routing_id_norm *)sop->routing_id;
+
+		/*
+		 * Note that there is no alternate format specified in SPC-4
+		 * for the PCIe routing ID, so we don't really have a way
+		 * to know whether the second byte of the routing ID is
+		 * a device and function or just a function.  So we just
+		 * assume bus,device,function.
+		 */
+		sbuf_printf(sb, "SOP Routing ID: %u,%u,%u",
+			    rid->bus, rid->devfunc >> SCSI_TRN_SOP_DEV_SHIFT,
+			    rid->devfunc & SCSI_TRN_SOP_FUNC_NORM_MAX);
+		break;
+	}
+	case SCSI_PROTO_NONE:
+	default:
+		sbuf_printf(sb, "Unknown protocol %#x",
+			    hdr->format_protocol & SCSI_TRN_PROTO_MASK);
+		break;
+	}
+
+	return (0);
+}
+
+struct scsi_nv scsi_proto_map[] = {
+	{ "fcp", SCSI_PROTO_FC },
+	{ "spi", SCSI_PROTO_SPI },
+	{ "ssa", SCSI_PROTO_SSA },
+	{ "sbp", SCSI_PROTO_1394 },
+	{ "1394", SCSI_PROTO_1394 },
+	{ "srp", SCSI_PROTO_RDMA },
+	{ "rdma", SCSI_PROTO_RDMA },
+	{ "iscsi", SCSI_PROTO_ISCSI },
+	{ "iqn", SCSI_PROTO_ISCSI },
+	{ "sas", SCSI_PROTO_SAS },
+	{ "aditp", SCSI_PROTO_ADITP },
+	{ "ata", SCSI_PROTO_ATA },
+	{ "uas", SCSI_PROTO_UAS },
+	{ "usb", SCSI_PROTO_UAS },
+	{ "sop", SCSI_PROTO_SOP }
+};
+
+const char *
+scsi_nv_to_str(struct scsi_nv *table, int num_table_entries, uint64_t value)
+{
+	int i;
+
+	for (i = 0; i < num_table_entries; i++) {
+		if (table[i].value == value)
+			return (table[i].name);
+	}
+
 	return (NULL);
 }
 
+/*
+ * Given a name/value table, find a value matching the given name.
+ * Return values:
+ *	SCSI_NV_FOUND - match found
+ *	SCSI_NV_AMBIGUOUS - more than one match, none of them exact
+ *	SCSI_NV_NOT_FOUND - no match found
+ */
+scsi_nv_status
+scsi_get_nv(struct scsi_nv *table, int num_table_entries,
+	    char *name, int *table_entry, scsi_nv_flags flags)
+{
+	int i, num_matches = 0;
+
+	for (i = 0; i < num_table_entries; i++) {
+		size_t table_len, name_len;
+
+		table_len = strlen(table[i].name);
+		name_len = strlen(name);
+
+		if ((((flags & SCSI_NV_FLAG_IG_CASE) != 0)
+		  && (strncasecmp(table[i].name, name, name_len) == 0))
+		|| (((flags & SCSI_NV_FLAG_IG_CASE) == 0)
+		 && (strncmp(table[i].name, name, name_len) == 0))) {
+			*table_entry = i;
+
+			/*
+			 * Check for an exact match.  If we have the same
+			 * number of characters in the table as the argument,
+			 * and we already know they're the same, we have
+			 * an exact match.
+		 	 */
+			if (table_len == name_len)
+				return (SCSI_NV_FOUND);
+
+			/*
+			 * Otherwise, bump up the number of matches.  We'll
+			 * see later how many we have.
+			 */
+			num_matches++;
+		}
+	}
+
+	if (num_matches > 1)
+		return (SCSI_NV_AMBIGUOUS);
+	else if (num_matches == 1)
+		return (SCSI_NV_FOUND);
+	else
+		return (SCSI_NV_NOT_FOUND);
+}
+
+/*
+ * Parse transport IDs for Fibre Channel, 1394 and SAS.  Since these are
+ * all 64-bit numbers, the code is similar.
+ */
+int
+scsi_parse_transportid_64bit(int proto_id, char *id_str,
+			     struct scsi_transportid_header **hdr,
+			     unsigned int *alloc_len,
+#ifdef _KERNEL
+			     struct malloc_type *type, int flags,
+#endif
+			     char *error_str, int error_str_len)
+{
+	uint64_t value;
+	char *endptr;
+	int retval;
+	size_t alloc_size;
+
+	retval = 0;
+
+	value = strtouq(id_str, &endptr, 0); 
+	if (*endptr != '\0') {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: error "
+				 "parsing ID %s, 64-bit number required",
+				 __func__, id_str);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	switch (proto_id) {
+	case SCSI_PROTO_FC:
+		alloc_size = sizeof(struct scsi_transportid_fcp);
+		break;
+	case SCSI_PROTO_1394:
+		alloc_size = sizeof(struct scsi_transportid_1394);
+		break;
+	case SCSI_PROTO_SAS:
+		alloc_size = sizeof(struct scsi_transportid_sas);
+		break;
+	default:
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: unsupoprted "
+				 "protocol %d", __func__, proto_id);
+		}
+		retval = 1;
+		goto bailout;
+		break; /* NOTREACHED */
+	}
+#ifdef _KERNEL
+	*hdr = malloc(alloc_size, type, flags);
+#else /* _KERNEL */
+	*hdr = malloc(alloc_size);
+#endif /*_KERNEL */
+	if (*hdr == NULL) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: unable to "
+				 "allocate %zu bytes", __func__, alloc_size);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	*alloc_len = alloc_size;
+
+	bzero(*hdr, alloc_size);
+
+	switch (proto_id) {
+	case SCSI_PROTO_FC: {
+		struct scsi_transportid_fcp *fcp;
+
+		fcp = (struct scsi_transportid_fcp *)(*hdr);
+		fcp->format_protocol = SCSI_PROTO_FC |
+				       SCSI_TRN_FCP_FORMAT_DEFAULT;
+		scsi_u64to8b(value, fcp->n_port_name);
+		break;
+	}
+	case SCSI_PROTO_1394: {
+		struct scsi_transportid_1394 *sbp;
+
+		sbp = (struct scsi_transportid_1394 *)(*hdr);
+		sbp->format_protocol = SCSI_PROTO_1394 |
+				       SCSI_TRN_1394_FORMAT_DEFAULT;
+		scsi_u64to8b(value, sbp->eui64);
+		break;
+	}
+	case SCSI_PROTO_SAS: {
+		struct scsi_transportid_sas *sas;
+
+		sas = (struct scsi_transportid_sas *)(*hdr);
+		sas->format_protocol = SCSI_PROTO_SAS |
+				       SCSI_TRN_SAS_FORMAT_DEFAULT;
+		scsi_u64to8b(value, sas->sas_address);
+		break;
+	}
+	default:
+		break;
+	}
+bailout:
+	return (retval);
+}
+
+/*
+ * Parse a SPI (Parallel SCSI) address of the form: id,rel_tgt_port
+ */
+int
+scsi_parse_transportid_spi(char *id_str, struct scsi_transportid_header **hdr,
+			   unsigned int *alloc_len,
+#ifdef _KERNEL
+			   struct malloc_type *type, int flags,
+#endif
+			   char *error_str, int error_str_len)
+{
+	unsigned long scsi_addr, target_port;
+	struct scsi_transportid_spi *spi;
+	char *tmpstr, *endptr;
+	int retval;
+
+	retval = 0;
+
+	tmpstr = strsep(&id_str, ",");
+	if (tmpstr == NULL) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len,
+				 "%s: no ID found", __func__);
+		}
+		retval = 1;
+		goto bailout;
+	}
+	scsi_addr = strtoul(tmpstr, &endptr, 0);
+	if (*endptr != '\0') {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: error "
+				 "parsing SCSI ID %s, number required",
+				 __func__, tmpstr);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	if (id_str == NULL) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: no relative "
+				 "target port found", __func__);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	target_port = strtoul(id_str, &endptr, 0);
+	if (*endptr != '\0') {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: error "
+				 "parsing relative target port %s, number "
+				 "required", __func__, id_str);
+		}
+		retval = 1;
+		goto bailout;
+	}
+#ifdef _KERNEL
+	spi = malloc(sizeof(*spi), type, flags);
+#else
+	spi = malloc(sizeof(*spi));
+#endif
+	if (spi == NULL) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: unable to "
+				 "allocate %zu bytes", __func__,
+				 sizeof(*spi));
+		}
+		retval = 1;
+		goto bailout;
+	}
+	*alloc_len = sizeof(*spi);
+	bzero(spi, sizeof(*spi));
+
+	spi->format_protocol = SCSI_PROTO_SPI | SCSI_TRN_SPI_FORMAT_DEFAULT;
+	scsi_ulto2b(scsi_addr, spi->scsi_addr);
+	scsi_ulto2b(target_port, spi->rel_trgt_port_id);
+
+	*hdr = (struct scsi_transportid_header *)spi;
+bailout:
+	return (retval);
+}
+
+/*
+ * Parse an RDMA/SRP Initiator Port ID string.  This is 32 hexadecimal digits,
+ * optionally prefixed by "0x" or "0X".
+ */
+int
+scsi_parse_transportid_rdma(char *id_str, struct scsi_transportid_header **hdr,
+			    unsigned int *alloc_len,
+#ifdef _KERNEL
+			    struct malloc_type *type, int flags,
+#endif
+			    char *error_str, int error_str_len)
+{
+	struct scsi_transportid_rdma *rdma;
+	int retval;
+	size_t id_len, rdma_id_size;
+	uint8_t rdma_id[SCSI_TRN_RDMA_PORT_LEN];
+	char *tmpstr;
+	unsigned int i, j;
+
+	retval = 0;
+	id_len = strlen(id_str);
+	rdma_id_size = SCSI_TRN_RDMA_PORT_LEN;
+
+	/*
+	 * Check the size.  It needs to be either 32 or 34 characters long.
+	 */
+	if ((id_len != (rdma_id_size * 2))
+	 && (id_len != ((rdma_id_size * 2) + 2))) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: RDMA ID "
+				 "must be 32 hex digits (0x prefix "
+				 "optional), only %zu seen", __func__, id_len);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	tmpstr = id_str;
+	/*
+	 * If the user gave us 34 characters, the string needs to start
+	 * with '0x'.
+	 */
+	if (id_len == ((rdma_id_size * 2) + 2)) {
+	 	if ((tmpstr[0] == '0')
+		 && ((tmpstr[1] == 'x') || (tmpstr[1] == 'X'))) {
+			tmpstr += 2;
+		} else {
+			if (error_str != NULL) {
+				snprintf(error_str, error_str_len, "%s: RDMA "
+					 "ID prefix, if used, must be \"0x\", "
+					 "got %s", __func__, tmpstr);
+			}
+			retval = 1;
+			goto bailout;
+		}
+	}
+	bzero(rdma_id, sizeof(rdma_id));
+
+	/*
+	 * Convert ASCII hex into binary bytes.  There is no standard
+	 * 128-bit integer type, and so no strtou128t() routine to convert
+	 * from hex into a large integer.  In the end, we're not going to
+	 * an integer, but rather to a byte array, so that and the fact
+	 * that we require the user to give us 32 hex digits simplifies the
+	 * logic.
+	 */
+	for (i = 0; i < (rdma_id_size * 2); i++) {
+		int cur_shift;
+		unsigned char c;
+
+		/* Increment the byte array one for every 2 hex digits */
+		j = i >> 1;
+
+		/*
+		 * The first digit in every pair is the most significant
+		 * 4 bits.  The second is the least significant 4 bits.
+		 */
+		if ((i % 2) == 0)
+			cur_shift = 4;
+		else 
+			cur_shift = 0;
+
+		c = tmpstr[i];
+		/* Convert the ASCII hex character into a number */
+		if (isdigit(c))
+			c -= '0';
+		else if (isalpha(c))
+			c -= isupper(c) ? 'A' - 10 : 'a' - 10;
+		else {
+			if (error_str != NULL) {
+				snprintf(error_str, error_str_len, "%s: "
+					 "RDMA ID must be hex digits, got "
+					 "invalid character %c", __func__,
+					 tmpstr[i]);
+			}
+			retval = 1;
+			goto bailout;
+		}
+		/*
+		 * The converted number can't be less than 0; the type is
+		 * unsigned, and the subtraction logic will not give us 
+		 * a negative number.  So we only need to make sure that
+		 * the value is not greater than 0xf.  (i.e. make sure the
+		 * user didn't give us a value like "0x12jklmno").
+		 */
+		if (c > 0xf) {
+			if (error_str != NULL) {
+				snprintf(error_str, error_str_len, "%s: "
+					 "RDMA ID must be hex digits, got "
+					 "invalid character %c", __func__,
+					 tmpstr[i]);
+			}
+			retval = 1;
+			goto bailout;
+		}
+		
+		rdma_id[j] |= c << cur_shift;
+	}
+
+#ifdef _KERNEL
+	rdma = malloc(sizeof(*rdma), type, flags);
+#else
+	rdma = malloc(sizeof(*rdma));
+#endif
+	if (rdma == NULL) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: unable to "
+				 "allocate %zu bytes", __func__,
+				 sizeof(*rdma));
+		}
+		retval = 1;
+		goto bailout;
+	}
+	*alloc_len = sizeof(*rdma);
+	bzero(rdma, *alloc_len);
+
+	rdma->format_protocol = SCSI_PROTO_RDMA | SCSI_TRN_RDMA_FORMAT_DEFAULT;
+	bcopy(rdma_id, rdma->initiator_port_id, SCSI_TRN_RDMA_PORT_LEN);
+
+	*hdr = (struct scsi_transportid_header *)rdma;
+
+bailout:
+	return (retval);
+}
+
+/*
+ * Parse an iSCSI name.  The format is either just the name:
+ *
+ *	iqn.2012-06.com.example:target0
+ * or the name, separator and initiator session ID:
+ *
+ *	iqn.2012-06.com.example:target0,i,0x123
+ *
+ * The separator format is exact.
+ */
+int
+scsi_parse_transportid_iscsi(char *id_str, struct scsi_transportid_header **hdr,
+			     unsigned int *alloc_len,
+#ifdef _KERNEL
+			     struct malloc_type *type, int flags,
+#endif
+			     char *error_str, int error_str_len)
+{
+	size_t id_len, sep_len, id_size, name_len;
+	int retval;
+	unsigned int i, sep_pos, sep_found;
+	const char *sep_template = ",i,0x";
+	const char *iqn_prefix = "iqn.";
+	struct scsi_transportid_iscsi_device *iscsi;
+
+	retval = 0;
+	sep_found = 0;
+
+	id_len = strlen(id_str);
+	sep_len = strlen(sep_template);
+
+	/*
+	 * The separator is defined as exactly ',i,0x'.  Any other commas,
+	 * or any other form, is an error.  So look for a comma, and once
+	 * we find that, the next few characters must match the separator
+	 * exactly.  Once we get through the separator, there should be at
+	 * least one character.
+	 */
+	for (i = 0, sep_pos = 0; i < id_len; i++) {
+		if (sep_pos == 0) {
+		 	if (id_str[i] == sep_template[sep_pos])
+				sep_pos++;
+
+			continue;
+		}
+		if (sep_pos < sep_len) {
+			if (id_str[i] == sep_template[sep_pos]) {
+				sep_pos++;
+				continue;
+			} 
+			if (error_str != NULL) {
+				snprintf(error_str, error_str_len, "%s: "
+					 "invalid separator in iSCSI name "
+					 "\"%s\"",
+					 __func__, id_str);
+			}
+			retval = 1;
+			goto bailout;
+		} else {
+			sep_found = 1;
+			break;
+		}
+	}
+
+	/*
+	 * Check to see whether we have a separator but no digits after it.
+	 */
+	if ((sep_pos != 0)
+	 && (sep_found == 0)) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: no digits "
+				 "found after separator in iSCSI name \"%s\"",
+				 __func__, id_str);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	/*
+	 * The incoming ID string has the "iqn." prefix stripped off.  We
+	 * need enough space for the base structure (the structures are the
+	 * same for the two iSCSI forms), the prefix, the ID string and a
+	 * terminating NUL.
+	 */
+	id_size = sizeof(*iscsi) + strlen(iqn_prefix) + id_len + 1;
+
+#ifdef _KERNEL
+	iscsi = malloc(id_size, type, flags);
+#else
+	iscsi = malloc(id_size);
+#endif
+	if (iscsi == NULL) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: unable to "
+				 "allocate %zu bytes", __func__, id_size);
+		}
+		retval = 1;
+		goto bailout;
+	}
+	*alloc_len = id_size;
+	bzero(iscsi, id_size);
+
+	iscsi->format_protocol = SCSI_PROTO_ISCSI;
+	if (sep_found == 0)
+		iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_DEVICE;
+	else
+		iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_PORT;
+	name_len = id_size - sizeof(*iscsi);
+	scsi_ulto2b(name_len, iscsi->additional_length);
+	snprintf(iscsi->iscsi_name, name_len, "%s%s", iqn_prefix, id_str);
+
+	*hdr = (struct scsi_transportid_header *)iscsi;
+
+bailout:
+	return (retval);
+}
+
+/*
+ * Parse a SCSI over PCIe (SOP) identifier.  The Routing ID can either be
+ * of the form 'bus,device,function' or 'bus,function'.
+ */
+int
+scsi_parse_transportid_sop(char *id_str, struct scsi_transportid_header **hdr,
+			   unsigned int *alloc_len,
+#ifdef _KERNEL
+			   struct malloc_type *type, int flags,
+#endif
+			   char *error_str, int error_str_len)
+{
+	struct scsi_transportid_sop *sop;
+	unsigned long bus, device, function;
+	char *tmpstr, *endptr;
+	int retval, device_spec;
+
+	retval = 0;
+	device_spec = 0;
+	device = 0;
+
+	tmpstr = strsep(&id_str, ",");
+	if ((tmpstr == NULL)
+	 || (*tmpstr == '\0')) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: no ID found",
+				 __func__);
+		}
+		retval = 1;
+		goto bailout;
+	}
+	bus = strtoul(tmpstr, &endptr, 0);
+	if (*endptr != '\0') {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: error "
+				 "parsing PCIe bus %s, number required",
+				 __func__, tmpstr);
+		}
+		retval = 1;
+		goto bailout;
+	}
+	if ((id_str == NULL) 
+	 || (*id_str == '\0')) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: no PCIe "
+				 "device or function found", __func__);
+		}
+		retval = 1;
+		goto bailout;
+	}
+	tmpstr = strsep(&id_str, ",");
+	function = strtoul(tmpstr, &endptr, 0);
+	if (*endptr != '\0') {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: error "
+				 "parsing PCIe device/function %s, number "
+				 "required", __func__, tmpstr);
+		}
+		retval = 1;
+		goto bailout;
+	}
+	/*
+	 * Check to see whether the user specified a third value.  If so,
+	 * the second is the device.
+	 */
+	if (id_str != NULL) {
+		if (*id_str == '\0') {
+			if (error_str != NULL) {
+				snprintf(error_str, error_str_len, "%s: "
+					 "no PCIe function found", __func__);
+			}
+			retval = 1;
+			goto bailout;
+		}
+		device = function;
+		device_spec = 1;
+		function = strtoul(id_str, &endptr, 0);
+		if (*endptr != '\0') {
+			if (error_str != NULL) {
+				snprintf(error_str, error_str_len, "%s: "
+					 "error parsing PCIe function %s, "
+					 "number required", __func__, id_str);
+			}
+			retval = 1;
+			goto bailout;
+		}
+	}
+	if (bus > SCSI_TRN_SOP_BUS_MAX) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: bus value "
+				 "%lu greater than maximum %u", __func__,
+				 bus, SCSI_TRN_SOP_BUS_MAX);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	if ((device_spec != 0)
+	 && (device > SCSI_TRN_SOP_DEV_MASK)) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: device value "
+				 "%lu greater than maximum %u", __func__,
+				 device, SCSI_TRN_SOP_DEV_MAX);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	if (((device_spec != 0)
+	  && (function > SCSI_TRN_SOP_FUNC_NORM_MAX))
+	 || ((device_spec == 0)
+	  && (function > SCSI_TRN_SOP_FUNC_ALT_MAX))) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: function value "
+				 "%lu greater than maximum %u", __func__,
+				 function, (device_spec == 0) ?
+				 SCSI_TRN_SOP_FUNC_ALT_MAX : 
+				 SCSI_TRN_SOP_FUNC_NORM_MAX);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+#ifdef _KERNEL
+	sop = malloc(sizeof(*sop), type, flags);
+#else
+	sop = malloc(sizeof(*sop));
+#endif
+	if (sop == NULL) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: unable to "
+				 "allocate %zu bytes", __func__, sizeof(*sop));
+		}
+		retval = 1;
+		goto bailout;
+	}
+	*alloc_len = sizeof(*sop);
+	bzero(sop, sizeof(*sop));
+	sop->format_protocol = SCSI_PROTO_SOP | SCSI_TRN_SOP_FORMAT_DEFAULT;
+	if (device_spec != 0) {
+		struct scsi_sop_routing_id_norm rid;
+
+		rid.bus = bus;
+		rid.devfunc = (device << SCSI_TRN_SOP_DEV_SHIFT) | function;
+		bcopy(&rid, sop->routing_id, MIN(sizeof(rid),
+		      sizeof(sop->routing_id)));
+	} else {
+		struct scsi_sop_routing_id_alt rid;
+
+		rid.bus = bus;
+		rid.function = function;
+		bcopy(&rid, sop->routing_id, MIN(sizeof(rid),
+		      sizeof(sop->routing_id)));
+	}
+
+	*hdr = (struct scsi_transportid_header *)sop;
+bailout:
+	return (retval);
+}
+
+/*
+ * transportid_str: NUL-terminated string with format: protcol,id
+ *		    The ID is protocol specific.
+ * hdr:		    Storage will be allocated for the transport ID.
+ * alloc_len:	    The amount of memory allocated is returned here.
+ * type:	    Malloc bucket (kernel only).
+ * flags:	    Malloc flags (kernel only).
+ * error_str:	    If non-NULL, it will contain error information (without
+ * 		    a terminating newline) if an error is returned.
+ * error_str_len:   Allocated length of the error string.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+scsi_parse_transportid(char *transportid_str,
+		       struct scsi_transportid_header **hdr,
+		       unsigned int *alloc_len,
+#ifdef _KERNEL
+		       struct malloc_type *type, int flags,
+#endif
+		       char *error_str, int error_str_len)
+{
+	char *tmpstr;
+	scsi_nv_status status;
+	int retval, num_proto_entries, table_entry;
+
+	retval = 0;
+	table_entry = 0;
+
+	/*
+	 * We do allow a period as well as a comma to separate the protocol
+	 * from the ID string.  This is to accommodate iSCSI names, which
+	 * start with "iqn.".
+	 */
+	tmpstr = strsep(&transportid_str, ",.");
+	if (tmpstr == NULL) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len,
+				 "%s: transportid_str is NULL", __func__);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	num_proto_entries = sizeof(scsi_proto_map) /
+			    sizeof(scsi_proto_map[0]);
+	status = scsi_get_nv(scsi_proto_map, num_proto_entries, tmpstr,
+			     &table_entry, SCSI_NV_FLAG_IG_CASE);
+	if (status != SCSI_NV_FOUND) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: %s protocol "
+				 "name %s", __func__,
+				 (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" :
+				 "invalid", tmpstr);
+		}
+		retval = 1;
+		goto bailout;
+	}
+	switch (scsi_proto_map[table_entry].value) {
+	case SCSI_PROTO_FC:
+	case SCSI_PROTO_1394:
+	case SCSI_PROTO_SAS:
+		retval = scsi_parse_transportid_64bit(
+		    scsi_proto_map[table_entry].value, transportid_str, hdr,
+		    alloc_len,
+#ifdef _KERNEL
+		    type, flags,
+#endif
+		    error_str, error_str_len);
+		break;
+	case SCSI_PROTO_SPI:
+		retval = scsi_parse_transportid_spi(transportid_str, hdr,
+		    alloc_len,
+#ifdef _KERNEL
+		    type, flags,
+#endif
+		    error_str, error_str_len);
+		break;
+	case SCSI_PROTO_RDMA:
+		retval = scsi_parse_transportid_rdma(transportid_str, hdr,
+		    alloc_len,
+#ifdef _KERNEL
+		    type, flags,
+#endif
+		    error_str, error_str_len);
+		break;
+	case SCSI_PROTO_ISCSI:
+		retval = scsi_parse_transportid_iscsi(transportid_str, hdr,
+		    alloc_len,
+#ifdef _KERNEL
+		    type, flags,
+#endif
+		    error_str, error_str_len);
+		break;
+	case SCSI_PROTO_SOP:
+		retval = scsi_parse_transportid_sop(transportid_str, hdr,
+		    alloc_len,
+#ifdef _KERNEL
+		    type, flags,
+#endif
+		    error_str, error_str_len);
+		break;
+	case SCSI_PROTO_SSA:
+	case SCSI_PROTO_ADITP:
+	case SCSI_PROTO_ATA:
+	case SCSI_PROTO_UAS:
+	case SCSI_PROTO_NONE:
+	default:
+		/*
+		 * There is no format defined for a Transport ID for these
+		 * protocols.  So even if the user gives us something, we
+		 * have no way to turn it into a standard SCSI Transport ID.
+		 */
+		retval = 1;
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "%s: no Transport "
+				 "ID format exists for protocol %s",
+				 __func__, tmpstr);
+		}
+		goto bailout;
+		break;	/* NOTREACHED */
+	}
+bailout:
+	return (retval);
+}
+
+struct scsi_attrib_table_entry scsi_mam_attr_table[] = {
+	{ SMA_ATTR_REM_CAP_PARTITION, SCSI_ATTR_FLAG_NONE,
+	  "Remaining Capacity in Partition",
+	  /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf,/*parse_str*/ NULL },
+	{ SMA_ATTR_MAX_CAP_PARTITION, SCSI_ATTR_FLAG_NONE,
+	  "Maximum Capacity in Partition",
+	  /*suffix*/"MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL },
+	{ SMA_ATTR_TAPEALERT_FLAGS, SCSI_ATTR_FLAG_HEX,
+	  "TapeAlert Flags",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL },
+	{ SMA_ATTR_LOAD_COUNT, SCSI_ATTR_FLAG_NONE,
+	  "Load Count",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL },
+	{ SMA_ATTR_MAM_SPACE_REMAINING, SCSI_ATTR_FLAG_NONE,
+	  "MAM Space Remaining",
+	  /*suffix*/"bytes", /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_DEV_ASSIGNING_ORG, SCSI_ATTR_FLAG_NONE,
+	  "Assigning Organization",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_FORMAT_DENSITY_CODE, SCSI_ATTR_FLAG_HEX,
+	  "Format Density Code",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL },
+	{ SMA_ATTR_INITIALIZATION_COUNT, SCSI_ATTR_FLAG_NONE,
+	  "Initialization Count",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL },
+	{ SMA_ATTR_VOLUME_ID, SCSI_ATTR_FLAG_NONE,
+	  "Volume Identifier",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_VOLUME_CHANGE_REF, SCSI_ATTR_FLAG_HEX,
+	  "Volume Change Reference",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_DEV_SERIAL_LAST_LOAD, SCSI_ATTR_FLAG_NONE,
+	  "Device Vendor/Serial at Last Load",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_DEV_SERIAL_LAST_LOAD_1, SCSI_ATTR_FLAG_NONE,
+	  "Device Vendor/Serial at Last Load - 1",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_DEV_SERIAL_LAST_LOAD_2, SCSI_ATTR_FLAG_NONE,
+	  "Device Vendor/Serial at Last Load - 2",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_DEV_SERIAL_LAST_LOAD_3, SCSI_ATTR_FLAG_NONE,
+	  "Device Vendor/Serial at Last Load - 3",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_TOTAL_MB_WRITTEN_LT, SCSI_ATTR_FLAG_NONE,
+	  "Total MB Written in Medium Life",
+	  /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_TOTAL_MB_READ_LT, SCSI_ATTR_FLAG_NONE,
+	  "Total MB Read in Medium Life",
+	  /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_TOTAL_MB_WRITTEN_CUR, SCSI_ATTR_FLAG_NONE,
+	  "Total MB Written in Current/Last Load",
+	  /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_TOTAL_MB_READ_CUR, SCSI_ATTR_FLAG_NONE,
+	  "Total MB Read in Current/Last Load",
+	  /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_FIRST_ENC_BLOCK, SCSI_ATTR_FLAG_NONE,
+	  "Logical Position of First Encrypted Block",
+	  /*suffix*/ NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_NEXT_UNENC_BLOCK, SCSI_ATTR_FLAG_NONE,
+	  "Logical Position of First Unencrypted Block after First "
+	  "Encrypted Block",
+	  /*suffix*/ NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MEDIUM_USAGE_HIST, SCSI_ATTR_FLAG_NONE,
+	  "Medium Usage History",
+	  /*suffix*/ NULL, /*to_str*/ NULL,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_PART_USAGE_HIST, SCSI_ATTR_FLAG_NONE,
+	  "Partition Usage History",
+	  /*suffix*/ NULL, /*to_str*/ NULL,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_MANUF, SCSI_ATTR_FLAG_NONE,
+	  "Medium Manufacturer",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_SERIAL, SCSI_ATTR_FLAG_NONE,
+	  "Medium Serial Number",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_LENGTH, SCSI_ATTR_FLAG_NONE,
+	  "Medium Length",
+	  /*suffix*/"m", /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_WIDTH, SCSI_ATTR_FLAG_FP | SCSI_ATTR_FLAG_DIV_10 |
+	  SCSI_ATTR_FLAG_FP_1DIGIT,
+	  "Medium Width",
+	  /*suffix*/"mm", /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_ASSIGNING_ORG, SCSI_ATTR_FLAG_NONE,
+	  "Assigning Organization",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_DENSITY_CODE, SCSI_ATTR_FLAG_HEX,
+	  "Medium Density Code",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_MANUF_DATE, SCSI_ATTR_FLAG_NONE,
+	  "Medium Manufacture Date",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MAM_CAPACITY, SCSI_ATTR_FLAG_NONE,
+	  "MAM Capacity",
+	  /*suffix*/"bytes", /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_TYPE, SCSI_ATTR_FLAG_HEX,
+	  "Medium Type",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_TYPE_INFO, SCSI_ATTR_FLAG_HEX,
+	  "Medium Type Information",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MED_SERIAL_NUM, SCSI_ATTR_FLAG_NONE,
+	  "Medium Serial Number",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_APP_VENDOR, SCSI_ATTR_FLAG_NONE,
+	  "Application Vendor",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_APP_NAME, SCSI_ATTR_FLAG_NONE,
+	  "Application Name",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_APP_VERSION, SCSI_ATTR_FLAG_NONE,
+	  "Application Version",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_USER_MED_TEXT_LABEL, SCSI_ATTR_FLAG_NONE,
+	  "User Medium Text Label",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_LAST_WRITTEN_TIME, SCSI_ATTR_FLAG_NONE,
+	  "Date and Time Last Written",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_TEXT_LOCAL_ID, SCSI_ATTR_FLAG_HEX,
+	  "Text Localization Identifier",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_BARCODE, SCSI_ATTR_FLAG_NONE,
+	  "Barcode",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_HOST_OWNER_NAME, SCSI_ATTR_FLAG_NONE,
+	  "Owning Host Textual Name",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_MEDIA_POOL, SCSI_ATTR_FLAG_NONE,
+	  "Media Pool",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_PART_USER_LABEL, SCSI_ATTR_FLAG_NONE,
+	  "Partition User Text Label",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_LOAD_UNLOAD_AT_PART, SCSI_ATTR_FLAG_NONE,
+	  "Load/Unload at Partition",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_APP_FORMAT_VERSION, SCSI_ATTR_FLAG_NONE,
+	  "Application Format Version",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf,
+	  /*parse_str*/ NULL },
+	{ SMA_ATTR_VOL_COHERENCY_INFO, SCSI_ATTR_FLAG_NONE,
+	  "Volume Coherency Information",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_volcoh_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x0ff1, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM Creation",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x0ff2, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM C3",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x0ff3, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM RW",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x0ff4, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM SDC List",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x0ff7, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM Post Scan",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x0ffe, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM Checksum",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x17f1, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM Creation",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x17f2, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM C3",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x17f3, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM RW",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x17f4, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM SDC List",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x17f7, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM Post Scan",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+	{ 0x17ff, SCSI_ATTR_FLAG_NONE,
+	  "Spectra MLM Checksum",
+	  /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf,
+	  /*parse_str*/ NULL },
+};
+
+/*
+ * Print out Volume Coherency Information (Attribute 0x080c).
+ * This field has two variable length members, including one at the
+ * beginning, so it isn't practical to have a fixed structure definition.
+ * This is current as of SSC4r03 (see section 4.2.21.3), dated March 25,
+ * 2013.
+ */
+int
+scsi_attrib_volcoh_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+			 uint32_t valid_len, uint32_t flags,
+			 uint32_t output_flags, char *error_str,
+			 int error_str_len)
+{
+	size_t avail_len;
+	uint32_t field_size;
+	uint64_t tmp_val;
+	uint8_t *cur_ptr;
+	int retval;
+	int vcr_len, as_len;
+
+	retval = 0;
+	tmp_val = 0;
+
+	field_size = scsi_2btoul(hdr->length);
+	avail_len = valid_len - sizeof(*hdr);
+	if (field_size > avail_len) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "Available "
+				 "length of attribute ID 0x%.4x %zu < field "
+				 "length %u", scsi_2btoul(hdr->id), avail_len,
+				 field_size);
+		}
+		retval = 1;
+		goto bailout;
+	} else if (field_size == 0) {
+		/*
+		 * It isn't clear from the spec whether a field length of
+		 * 0 is invalid here.  It probably is, but be lenient here
+		 * to avoid inconveniencing the user.
+		 */
+		goto bailout;
+	}
+	cur_ptr = hdr->attribute;
+	vcr_len = *cur_ptr;
+	cur_ptr++;
+
+	sbuf_printf(sb, "\n\tVolume Change Reference Value:");
+
+	switch (vcr_len) {
+	case 0:
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "Volume Change "
+				 "Reference value has length of 0");
+		}
+		retval = 1;
+		goto bailout;
+		break; /*NOTREACHED*/
+	case 1:
+		tmp_val = *cur_ptr;
+		break;
+	case 2:
+		tmp_val = scsi_2btoul(cur_ptr);
+		break;
+	case 3:
+		tmp_val = scsi_3btoul(cur_ptr);
+		break;
+	case 4:
+		tmp_val = scsi_4btoul(cur_ptr);
+		break;
+	case 8:
+		tmp_val = scsi_8btou64(cur_ptr);
+		break;
+	default:
+		sbuf_printf(sb, "\n");
+		sbuf_hexdump(sb, cur_ptr, vcr_len, NULL, 0);
+		break;
+	}
+	if (vcr_len <= 8)
+		sbuf_printf(sb, " 0x%jx\n", (uintmax_t)tmp_val);
+
+	cur_ptr += vcr_len;
+	tmp_val = scsi_8btou64(cur_ptr);
+	sbuf_printf(sb, "\tVolume Coherency Count: %ju\n", (uintmax_t)tmp_val);
+
+	cur_ptr += sizeof(tmp_val);
+	tmp_val = scsi_8btou64(cur_ptr);
+	sbuf_printf(sb, "\tVolume Coherency Set Identifier: 0x%jx\n",
+		    (uintmax_t)tmp_val);
+
+	/*
+	 * Figure out how long the Application Client Specific Information
+	 * is and produce a hexdump.
+	 */
+	cur_ptr += sizeof(tmp_val);
+	as_len = scsi_2btoul(cur_ptr);
+	cur_ptr += sizeof(uint16_t);
+	sbuf_printf(sb, "\tApplication Client Specific Information: ");
+	if (((as_len == SCSI_LTFS_VER0_LEN)
+	  || (as_len == SCSI_LTFS_VER1_LEN))
+	 && (strncmp(cur_ptr, SCSI_LTFS_STR_NAME, SCSI_LTFS_STR_LEN) == 0)) {
+		sbuf_printf(sb, "LTFS\n");
+		cur_ptr += SCSI_LTFS_STR_LEN + 1;
+		if (cur_ptr[SCSI_LTFS_UUID_LEN] != '\0')
+			cur_ptr[SCSI_LTFS_UUID_LEN] = '\0';
+		sbuf_printf(sb, "\tLTFS UUID: %s\n", cur_ptr);
+		cur_ptr += SCSI_LTFS_UUID_LEN + 1;
+		/* XXX KDM check the length */
+		sbuf_printf(sb, "\tLTFS Version: %d\n", *cur_ptr);
+	} else {
+		sbuf_printf(sb, "Unknown\n");
+		sbuf_hexdump(sb, cur_ptr, as_len, NULL, 0);
+	}
+
+bailout:
+	return (retval);
+}
+
+int
+scsi_attrib_vendser_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+			 uint32_t valid_len, uint32_t flags, 
+			 uint32_t output_flags, char *error_str,
+			 int error_str_len)
+{
+	size_t avail_len;
+	uint32_t field_size;
+	struct scsi_attrib_vendser *vendser;
+	cam_strvis_flags strvis_flags;
+	int retval = 0;
+
+	field_size = scsi_2btoul(hdr->length);
+	avail_len = valid_len - sizeof(*hdr);
+	if (field_size > avail_len) {
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "Available "
+				 "length of attribute ID 0x%.4x %zu < field "
+				 "length %u", scsi_2btoul(hdr->id), avail_len,
+				 field_size);
+		}
+		retval = 1;
+		goto bailout;
+	} else if (field_size == 0) {
+		/*
+		 * A field size of 0 doesn't make sense here.  The device
+		 * can at least give you the vendor ID, even if it can't
+		 * give you the serial number.
+		 */
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "The length of "
+				 "attribute ID 0x%.4x is 0",
+				 scsi_2btoul(hdr->id));
+		}
+		retval = 1;
+		goto bailout;
+	}
+	vendser = (struct scsi_attrib_vendser *)hdr->attribute;
+
+	switch (output_flags & SCSI_ATTR_OUTPUT_NONASCII_MASK) {
+	case SCSI_ATTR_OUTPUT_NONASCII_TRIM:
+		strvis_flags = CAM_STRVIS_FLAG_NONASCII_TRIM;
+		break;
+	case SCSI_ATTR_OUTPUT_NONASCII_RAW:
+		strvis_flags = CAM_STRVIS_FLAG_NONASCII_RAW;
+		break;
+	case SCSI_ATTR_OUTPUT_NONASCII_ESC:
+	default:
+		strvis_flags = CAM_STRVIS_FLAG_NONASCII_ESC;
+		break;;
+	}
+	cam_strvis_sbuf(sb, vendser->vendor, sizeof(vendser->vendor),
+	    strvis_flags);
+	sbuf_putc(sb, ' ');
+	cam_strvis_sbuf(sb, vendser->serial_num, sizeof(vendser->serial_num),
+	    strvis_flags);
+bailout:
+	return (retval);
+}
+
+int
+scsi_attrib_hexdump_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+			 uint32_t valid_len, uint32_t flags,
+			 uint32_t output_flags, char *error_str,
+			 int error_str_len)
+{
+	uint32_t field_size;
+	ssize_t avail_len;
+	uint32_t print_len;
+	uint8_t *num_ptr;
+	int retval = 0;
+
+	field_size = scsi_2btoul(hdr->length);
+	avail_len = valid_len - sizeof(*hdr);
+	print_len = MIN(avail_len, field_size);
+	num_ptr = hdr->attribute;
+
+	if (print_len > 0) {
+		sbuf_printf(sb, "\n");
+		sbuf_hexdump(sb, num_ptr, print_len, NULL, 0);
+	}
+
+	return (retval);
+}
+
+int
+scsi_attrib_int_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+		     uint32_t valid_len, uint32_t flags,
+		     uint32_t output_flags, char *error_str,
+		     int error_str_len)
+{
+	uint64_t print_number;
+	size_t avail_len;
+	uint32_t number_size;
+	int retval = 0;
+
+	number_size = scsi_2btoul(hdr->length);
+
+	avail_len = valid_len - sizeof(*hdr);
+	if (avail_len < number_size) { 
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "Available "
+				 "length of attribute ID 0x%.4x %zu < field "
+				 "length %u", scsi_2btoul(hdr->id), avail_len,
+				 number_size);
+		}
+		retval = 1;
+		goto bailout;
+	}
+
+	switch (number_size) {
+	case 0:
+		/*
+		 * We don't treat this as an error, since there may be
+		 * scenarios where a device reports a field but then gives
+		 * a length of 0.  See the note in scsi_attrib_ascii_sbuf().
+		 */
+		goto bailout;
+		break; /*NOTREACHED*/
+	case 1:
+		print_number = hdr->attribute[0];
+		break;
+	case 2:
+		print_number = scsi_2btoul(hdr->attribute);
+		break;
+	case 3:
+		print_number = scsi_3btoul(hdr->attribute);
+		break;
+	case 4:
+		print_number = scsi_4btoul(hdr->attribute);
+		break;
+	case 8:
+		print_number = scsi_8btou64(hdr->attribute);
+		break;
+	default:
+		/*
+		 * If we wind up here, the number is too big to print
+		 * normally, so just do a hexdump.
+		 */
+		retval = scsi_attrib_hexdump_sbuf(sb, hdr, valid_len,
+						  flags, output_flags,
+						  error_str, error_str_len);
+		goto bailout;
+		break;
+	}
+
+	if (flags & SCSI_ATTR_FLAG_FP) {
+#ifndef _KERNEL
+		long double num_float;
+
+		num_float = (long double)print_number;
+
+		if (flags & SCSI_ATTR_FLAG_DIV_10)
+			num_float /= 10;
+
+		sbuf_printf(sb, "%.*Lf", (flags & SCSI_ATTR_FLAG_FP_1DIGIT) ?
+			    1 : 0, num_float);
+#else /* _KERNEL */
+		sbuf_printf(sb, "%ju", (flags & SCSI_ATTR_FLAG_DIV_10) ?
+			    (print_number / 10) : print_number);
+#endif /* _KERNEL */
+	} else if (flags & SCSI_ATTR_FLAG_HEX) {
+		sbuf_printf(sb, "0x%jx", (uintmax_t)print_number);
+	} else
+		sbuf_printf(sb, "%ju", (uintmax_t)print_number);
+
+bailout:
+	return (retval);
+}
+
+int
+scsi_attrib_ascii_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+		       uint32_t valid_len, uint32_t flags,
+		       uint32_t output_flags, char *error_str,
+		       int error_str_len)
+{
+	size_t avail_len;
+	uint32_t field_size, print_size;
+	int retval = 0;
+
+	avail_len = valid_len - sizeof(*hdr);
+	field_size = scsi_2btoul(hdr->length);
+	print_size = MIN(avail_len, field_size);
+
+	if (print_size > 0) {
+		cam_strvis_flags strvis_flags;
+
+		switch (output_flags & SCSI_ATTR_OUTPUT_NONASCII_MASK) {
+		case SCSI_ATTR_OUTPUT_NONASCII_TRIM:
+			strvis_flags = CAM_STRVIS_FLAG_NONASCII_TRIM;
+			break;
+		case SCSI_ATTR_OUTPUT_NONASCII_RAW:
+			strvis_flags = CAM_STRVIS_FLAG_NONASCII_RAW;
+			break;
+		case SCSI_ATTR_OUTPUT_NONASCII_ESC:
+		default:
+			strvis_flags = CAM_STRVIS_FLAG_NONASCII_ESC;
+			break;
+		}
+		cam_strvis_sbuf(sb, hdr->attribute, print_size, strvis_flags);
+	} else if (avail_len < field_size) {
+		/*
+		 * We only report an error if the user didn't allocate
+		 * enough space to hold the full value of this field.  If
+		 * the field length is 0, that is allowed by the spec.
+		 * e.g. in SPC-4r37, section 7.4.2.2.5, VOLUME IDENTIFIER
+		 * "This attribute indicates the current volume identifier
+		 * (see SMC-3) of the medium. If the device server supports
+		 * this attribute but does not have access to the volume
+		 * identifier, the device server shall report this attribute
+		 * with an attribute length value of zero."
+		 */
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "Available "
+				 "length of attribute ID 0x%.4x %zu < field "
+				 "length %u", scsi_2btoul(hdr->id), avail_len,
+				 field_size);
+		}
+		retval = 1;
+	}
+
+	return (retval);
+}
+
+int
+scsi_attrib_text_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+		      uint32_t valid_len, uint32_t flags, 
+		      uint32_t output_flags, char *error_str,
+		      int error_str_len)
+{
+	size_t avail_len;
+	uint32_t field_size, print_size;
+	int retval = 0;
+	int esc_text = 1;
+
+	avail_len = valid_len - sizeof(*hdr);
+	field_size = scsi_2btoul(hdr->length);
+	print_size = MIN(avail_len, field_size);
+
+	if ((output_flags & SCSI_ATTR_OUTPUT_TEXT_MASK) ==
+	     SCSI_ATTR_OUTPUT_TEXT_RAW)
+		esc_text = 0;
+
+	if (print_size > 0) {
+		uint32_t i;
+
+		for (i = 0; i < print_size; i++) {
+			if (hdr->attribute[i] == '\0')
+				continue;
+			else if (((unsigned char)hdr->attribute[i] < 0x80)
+			      || (esc_text == 0))
+				sbuf_putc(sb, hdr->attribute[i]);
+			else
+				sbuf_printf(sb, "%%%02x",
+				    (unsigned char)hdr->attribute[i]);
+		}
+	} else if (avail_len < field_size) {
+		/*
+		 * We only report an error if the user didn't allocate
+		 * enough space to hold the full value of this field.
+		 */
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "Available "
+				 "length of attribute ID 0x%.4x %zu < field "
+				 "length %u", scsi_2btoul(hdr->id), avail_len,
+				 field_size);
+		}
+		retval = 1;
+	}
+
+	return (retval);
+}
+
+struct scsi_attrib_table_entry *
+scsi_find_attrib_entry(struct scsi_attrib_table_entry *table,
+		       size_t num_table_entries, uint32_t id)
+{
+	uint32_t i;
+
+	for (i = 0; i < num_table_entries; i++) {
+		if (table[i].id == id)
+			return (&table[i]);
+	}
+
+	return (NULL);
+}
+
+struct scsi_attrib_table_entry *
+scsi_get_attrib_entry(uint32_t id)
+{
+	return (scsi_find_attrib_entry(scsi_mam_attr_table,
+		sizeof(scsi_mam_attr_table) / sizeof(scsi_mam_attr_table[0]),
+		id));
+}
+
+int
+scsi_attrib_value_sbuf(struct sbuf *sb, uint32_t valid_len,
+   struct scsi_mam_attribute_header *hdr, uint32_t output_flags,
+   char *error_str, size_t error_str_len)
+{
+	int retval;
+
+	switch (hdr->byte2 & SMA_FORMAT_MASK) {
+	case SMA_FORMAT_ASCII:
+		retval = scsi_attrib_ascii_sbuf(sb, hdr, valid_len,
+		    SCSI_ATTR_FLAG_NONE, output_flags, error_str,error_str_len);
+		break;
+	case SMA_FORMAT_BINARY:
+		if (scsi_2btoul(hdr->length) <= 8)
+			retval = scsi_attrib_int_sbuf(sb, hdr, valid_len,
+			    SCSI_ATTR_FLAG_NONE, output_flags, error_str,
+			    error_str_len);
+		else
+			retval = scsi_attrib_hexdump_sbuf(sb, hdr, valid_len,
+			    SCSI_ATTR_FLAG_NONE, output_flags, error_str,
+			    error_str_len);
+		break;
+	case SMA_FORMAT_TEXT:
+		retval = scsi_attrib_text_sbuf(sb, hdr, valid_len,
+		    SCSI_ATTR_FLAG_NONE, output_flags, error_str,
+		    error_str_len);
+		break;
+	default:
+		if (error_str != NULL) {
+			snprintf(error_str, error_str_len, "Unknown attribute "
+			    "format 0x%x", hdr->byte2 & SMA_FORMAT_MASK);
+		}
+		retval = 1;
+		goto bailout;
+		break; /*NOTREACHED*/
+	}
+
+	sbuf_trim(sb);
+
+bailout:
+
+	return (retval);
+}
+
 void
+scsi_attrib_prefix_sbuf(struct sbuf *sb, uint32_t output_flags,
+			struct scsi_mam_attribute_header *hdr,
+			uint32_t valid_len, const char *desc)
+{
+	int need_space = 0;
+	uint32_t len;
+	uint32_t id;
+
+	/*
+	 * We can't do anything if we don't have enough valid data for the
+	 * header.
+	 */
+	if (valid_len < sizeof(*hdr))
+		return;
+
+	id = scsi_2btoul(hdr->id);
+	/*
+	 * Note that we print out the value of the attribute listed in the
+	 * header, regardless of whether we actually got that many bytes
+	 * back from the device through the controller.  A truncated result
+	 * could be the result of a failure to ask for enough data; the
+	 * header indicates how many bytes are allocated for this attribute
+	 * in the MAM.
+	 */
+	len = scsi_2btoul(hdr->length);
+
+	if ((output_flags & SCSI_ATTR_OUTPUT_FIELD_MASK) ==
+	    SCSI_ATTR_OUTPUT_FIELD_NONE)
+		return;
+
+	if ((output_flags & SCSI_ATTR_OUTPUT_FIELD_DESC)
+	 && (desc != NULL)) {
+		sbuf_printf(sb, "%s", desc);
+		need_space = 1;
+	}
+
+	if (output_flags & SCSI_ATTR_OUTPUT_FIELD_NUM) {
+		sbuf_printf(sb, "%s(0x%.4x)", (need_space) ? " " : "", id);
+		need_space = 0;
+	}
+
+	if (output_flags & SCSI_ATTR_OUTPUT_FIELD_SIZE) {
+		sbuf_printf(sb, "%s[%d]", (need_space) ? " " : "", len);
+		need_space = 0;
+	}
+	if (output_flags & SCSI_ATTR_OUTPUT_FIELD_RW) {
+		sbuf_printf(sb, "%s(%s)", (need_space) ? " " : "",
+			    (hdr->byte2 & SMA_READ_ONLY) ? "RO" : "RW");
+	}
+	sbuf_printf(sb, ": ");
+}
+
+int
+scsi_attrib_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+		 uint32_t valid_len, struct scsi_attrib_table_entry *user_table,
+		 size_t num_user_entries, int prefer_user_table,
+		 uint32_t output_flags, char *error_str, int error_str_len)
+{
+	int retval;
+	struct scsi_attrib_table_entry *table1 = NULL, *table2 = NULL;
+	struct scsi_attrib_table_entry *entry = NULL;
+	size_t table1_size = 0, table2_size = 0;
+	uint32_t id;
+
+	retval = 0;
+
+	if (valid_len < sizeof(*hdr)) {
+		retval = 1;
+		goto bailout;
+	}
+
+	id = scsi_2btoul(hdr->id);
+
+	if (user_table != NULL) {
+		if (prefer_user_table != 0) {
+			table1 = user_table;
+			table1_size = num_user_entries;
+			table2 = scsi_mam_attr_table;
+			table2_size = sizeof(scsi_mam_attr_table) /
+				      sizeof(scsi_mam_attr_table[0]);
+		} else {
+			table1 = scsi_mam_attr_table;
+			table1_size = sizeof(scsi_mam_attr_table) /
+				      sizeof(scsi_mam_attr_table[0]);
+			table2 = user_table;
+			table2_size = num_user_entries;
+		}
+	} else {
+		table1 = scsi_mam_attr_table;
+		table1_size = sizeof(scsi_mam_attr_table) /
+			      sizeof(scsi_mam_attr_table[0]);
+	}
+
+	entry = scsi_find_attrib_entry(table1, table1_size, id);
+	if (entry != NULL) {
+		scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len,
+					entry->desc);
+		if (entry->to_str == NULL)
+			goto print_default;
+		retval = entry->to_str(sb, hdr, valid_len, entry->flags,
+				       output_flags, error_str, error_str_len);
+		goto bailout;
+	}
+	if (table2 != NULL) {
+		entry = scsi_find_attrib_entry(table2, table2_size, id);
+		if (entry != NULL) {
+			if (entry->to_str == NULL)
+				goto print_default;
+
+			scsi_attrib_prefix_sbuf(sb, output_flags, hdr,
+						valid_len, entry->desc);
+			retval = entry->to_str(sb, hdr, valid_len, entry->flags,
+					       output_flags, error_str,
+					       error_str_len);
+			goto bailout;
+		}
+	}
+
+	scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, NULL);
+
+print_default:
+	retval = scsi_attrib_value_sbuf(sb, valid_len, hdr, output_flags,
+	    error_str, error_str_len);
+bailout:
+	if (retval == 0) {
+	 	if ((entry != NULL)
+		 && (entry->suffix != NULL))
+			sbuf_printf(sb, " %s", entry->suffix);
+
+		sbuf_trim(sb);
+		sbuf_printf(sb, "\n");
+	}
+
+	return (retval);
+}
+
+void
 scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
 		     void (*cbfcnp)(struct cam_periph *, union ccb *),
 		     u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout)
@@ -5279,25 +7607,35 @@
 }
 
 void
-scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
-		void (*cbfcnp)(struct cam_periph *, union ccb *),
-		u_int8_t tag_action, int dbd, u_int8_t page_code,
-		u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
-		u_int8_t sense_len, u_int32_t timeout)
+scsi_mode_sense(struct ccb_scsiio *csio, uint32_t retries,
+    void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+    int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len,
+    uint8_t sense_len, uint32_t timeout)
 {
 
-	scsi_mode_sense_len(csio, retries, cbfcnp, tag_action, dbd,
-			    page_code, page, param_buf, param_len, 0,
-			    sense_len, timeout);
+	scsi_mode_sense_subpage(csio, retries, cbfcnp, tag_action, dbd,
+	    pc, page, 0, param_buf, param_len, 0, sense_len, timeout);
 }
 
 void
-scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
-		    void (*cbfcnp)(struct cam_periph *, union ccb *),
-		    u_int8_t tag_action, int dbd, u_int8_t page_code,
-		    u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
-		    int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout)
+scsi_mode_sense_len(struct ccb_scsiio *csio, uint32_t retries,
+    void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+    int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len,
+    int minimum_cmd_size, uint8_t sense_len, uint32_t timeout)
 {
+
+	scsi_mode_sense_subpage(csio, retries, cbfcnp, tag_action, dbd,
+	    pc, page, 0, param_buf, param_len, minimum_cmd_size,
+	    sense_len, timeout);
+}
+
+void
+scsi_mode_sense_subpage(struct ccb_scsiio *csio, uint32_t retries,
+    void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+    int dbd, uint8_t pc, uint8_t page, uint8_t subpage, uint8_t *param_buf,
+    uint32_t param_len, int minimum_cmd_size, uint8_t sense_len,
+    uint32_t timeout)
+{
 	u_int8_t cdb_len;
 
 	/*
@@ -5315,7 +7653,8 @@
 		scsi_cmd->opcode = MODE_SENSE_6;
 		if (dbd != 0)
 			scsi_cmd->byte2 |= SMS_DBD;
-		scsi_cmd->page = page_code | page;
+		scsi_cmd->page = pc | page;
+		scsi_cmd->subpage = subpage;
 		scsi_cmd->length = param_len;
 		cdb_len = sizeof(*scsi_cmd);
 	} else {
@@ -5329,7 +7668,8 @@
 		scsi_cmd->opcode = MODE_SENSE_10;
 		if (dbd != 0)
 			scsi_cmd->byte2 |= SMS_DBD;
-		scsi_cmd->page = page_code | page;
+		scsi_cmd->page = pc | page;
+		scsi_cmd->subpage = subpage;
 		scsi_ulto2b(param_len, scsi_cmd->length);
 		cdb_len = sizeof(*scsi_cmd);
 	}
@@ -5541,8 +7881,8 @@
 scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries,
 		      void (*cbfcnp)(struct cam_periph *, union ccb *),
 		      uint8_t tag_action, uint64_t lba, int reladr, int pmi,
-		      struct scsi_read_capacity_data_long *rcap_buf,
-		      uint8_t sense_len, uint32_t timeout)
+		      uint8_t *rcap_buf, int rcap_buf_len, uint8_t sense_len,
+		      uint32_t timeout)
 {
 	struct scsi_read_capacity_16 *scsi_cmd;
 
@@ -5553,7 +7893,7 @@
 		      /*flags*/CAM_DIR_IN,
 		      tag_action,
 		      /*data_ptr*/(u_int8_t *)rcap_buf,
-		      /*dxfer_len*/sizeof(*rcap_buf),
+		      /*dxfer_len*/rcap_buf_len,
 		      sense_len,
 		      sizeof(*scsi_cmd),
 		      timeout);
@@ -5562,7 +7902,7 @@
 	scsi_cmd->opcode = SERVICE_ACTION_IN;
 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
 	scsi_u64to8b(lba, scsi_cmd->addr);
-	scsi_ulto4b(sizeof(*rcap_buf), scsi_cmd->alloc_len);
+	scsi_ulto4b(rcap_buf_len, scsi_cmd->alloc_len);
 	if (pmi)
 		reladr |= SRC16_PMI;
 	if (reladr)
@@ -5686,7 +8026,11 @@
 		u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
 		u_int32_t timeout)
 {
+	int read;
 	u_int8_t cdb_len;
+
+	read = (readop & SCSI_RW_DIRMASK) == SCSI_RW_READ;
+
 	/*
 	 * Use the smallest possible command to perform the operation
 	 * as some legacy hardware does not support the 10 byte commands.
@@ -5703,7 +8047,7 @@
 		struct scsi_rw_6 *scsi_cmd;
 
 		scsi_cmd = (struct scsi_rw_6 *)&csio->cdb_io.cdb_bytes;
-		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
+		scsi_cmd->opcode = read ? READ_6 : WRITE_6;
 		scsi_ulto3b(lba, scsi_cmd->addr);
 		scsi_cmd->length = block_count & 0xff;
 		scsi_cmd->control = 0;
@@ -5722,7 +8066,7 @@
 		struct scsi_rw_10 *scsi_cmd;
 
 		scsi_cmd = (struct scsi_rw_10 *)&csio->cdb_io.cdb_bytes;
-		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
+		scsi_cmd->opcode = read ? READ_10 : WRITE_10;
 		scsi_cmd->byte2 = byte2;
 		scsi_ulto4b(lba, scsi_cmd->addr);
 		scsi_cmd->reserved = 0;
@@ -5745,7 +8089,7 @@
 		struct scsi_rw_12 *scsi_cmd;
 
 		scsi_cmd = (struct scsi_rw_12 *)&csio->cdb_io.cdb_bytes;
-		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
+		scsi_cmd->opcode = read ? READ_12 : WRITE_12;
 		scsi_cmd->byte2 = byte2;
 		scsi_ulto4b(lba, scsi_cmd->addr);
 		scsi_cmd->reserved = 0;
@@ -5767,7 +8111,7 @@
 		struct scsi_rw_16 *scsi_cmd;
 
 		scsi_cmd = (struct scsi_rw_16 *)&csio->cdb_io.cdb_bytes;
-		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
+		scsi_cmd->opcode = read ? READ_16 : WRITE_16;
 		scsi_cmd->byte2 = byte2;
 		scsi_u64to8b(lba, scsi_cmd->addr);
 		scsi_cmd->reserved = 0;
@@ -5778,7 +8122,8 @@
 	cam_fill_csio(csio,
 		      retries,
 		      cbfcnp,
-		      /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT,
+		      (read ? CAM_DIR_IN : CAM_DIR_OUT) |
+		      ((readop & SCSI_RW_BIO) != 0 ? CAM_DATA_BIO : 0),
 		      tag_action,
 		      data_ptr,
 		      dxfer_len,
@@ -6145,7 +8490,230 @@
 		      timeout);
 }
 
+void
+scsi_read_attribute(struct ccb_scsiio *csio, u_int32_t retries, 
+		    void (*cbfcnp)(struct cam_periph *, union ccb *),
+		    u_int8_t tag_action, u_int8_t service_action,
+		    uint32_t element, u_int8_t elem_type, int logical_volume,
+		    int partition, u_int32_t first_attribute, int cache,
+		    u_int8_t *data_ptr, u_int32_t length, int sense_len,
+		    u_int32_t timeout)
+{
+	struct scsi_read_attribute *scsi_cmd;
 
+	scsi_cmd = (struct scsi_read_attribute *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = READ_ATTRIBUTE;
+	scsi_cmd->service_action = service_action;
+	scsi_ulto2b(element, scsi_cmd->element);
+	scsi_cmd->elem_type = elem_type;
+	scsi_cmd->logical_volume = logical_volume;
+	scsi_cmd->partition = partition;
+	scsi_ulto2b(first_attribute, scsi_cmd->first_attribute);
+	scsi_ulto4b(length, scsi_cmd->length);
+	if (cache != 0)
+		scsi_cmd->cache |= SRA_CACHE;
+	
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_IN,
+		      tag_action,
+		      /*data_ptr*/data_ptr,
+		      /*dxfer_len*/length,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_write_attribute(struct ccb_scsiio *csio, u_int32_t retries, 
+		    void (*cbfcnp)(struct cam_periph *, union ccb *),
+		    u_int8_t tag_action, uint32_t element, int logical_volume,
+		    int partition, int wtc, u_int8_t *data_ptr,
+		    u_int32_t length, int sense_len, u_int32_t timeout)
+{
+	struct scsi_write_attribute *scsi_cmd;
+
+	scsi_cmd = (struct scsi_write_attribute *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = WRITE_ATTRIBUTE;
+	if (wtc != 0)
+		scsi_cmd->byte2 = SWA_WTC;
+	scsi_ulto3b(element, scsi_cmd->element);
+	scsi_cmd->logical_volume = logical_volume;
+	scsi_cmd->partition = partition;
+	scsi_ulto4b(length, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_OUT,
+		      tag_action,
+		      /*data_ptr*/data_ptr,
+		      /*dxfer_len*/length,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries, 
+			   void (*cbfcnp)(struct cam_periph *, union ccb *),
+			   uint8_t tag_action, int service_action,
+			   uint8_t *data_ptr, uint32_t dxfer_len, int sense_len,
+			   int timeout)
+{
+	struct scsi_per_res_in *scsi_cmd;
+
+	scsi_cmd = (struct scsi_per_res_in *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = PERSISTENT_RES_IN;
+	scsi_cmd->action = service_action;
+	scsi_ulto2b(dxfer_len, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_IN,
+		      tag_action,
+		      data_ptr,
+		      dxfer_len,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries, 
+			    void (*cbfcnp)(struct cam_periph *, union ccb *),
+			    uint8_t tag_action, int service_action,
+			    int scope, int res_type, uint8_t *data_ptr,
+			    uint32_t dxfer_len, int sense_len, int timeout)
+{
+	struct scsi_per_res_out *scsi_cmd;
+
+	scsi_cmd = (struct scsi_per_res_out *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = PERSISTENT_RES_OUT;
+	scsi_cmd->action = service_action;
+	scsi_cmd->scope_type = scope | res_type;
+	scsi_ulto4b(dxfer_len, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_OUT,
+		      tag_action,
+		      /*data_ptr*/data_ptr,
+		      /*dxfer_len*/dxfer_len,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_security_protocol_in(struct ccb_scsiio *csio, uint32_t retries, 
+			  void (*cbfcnp)(struct cam_periph *, union ccb *),
+			  uint8_t tag_action, uint32_t security_protocol,
+			  uint32_t security_protocol_specific, int byte4,
+			  uint8_t *data_ptr, uint32_t dxfer_len, int sense_len,
+			  int timeout)
+{
+	struct scsi_security_protocol_in *scsi_cmd;
+
+	scsi_cmd = (struct scsi_security_protocol_in *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = SECURITY_PROTOCOL_IN;
+
+	scsi_cmd->security_protocol = security_protocol;
+	scsi_ulto2b(security_protocol_specific,
+		    scsi_cmd->security_protocol_specific); 
+	scsi_cmd->byte4 = byte4;
+	scsi_ulto4b(dxfer_len, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_IN,
+		      tag_action,
+		      data_ptr,
+		      dxfer_len,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_security_protocol_out(struct ccb_scsiio *csio, uint32_t retries, 
+			   void (*cbfcnp)(struct cam_periph *, union ccb *),
+			   uint8_t tag_action, uint32_t security_protocol,
+			   uint32_t security_protocol_specific, int byte4,
+			   uint8_t *data_ptr, uint32_t dxfer_len, int sense_len,
+			   int timeout)
+{
+	struct scsi_security_protocol_out *scsi_cmd;
+
+	scsi_cmd = (struct scsi_security_protocol_out *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = SECURITY_PROTOCOL_OUT;
+
+	scsi_cmd->security_protocol = security_protocol;
+	scsi_ulto2b(security_protocol_specific,
+		    scsi_cmd->security_protocol_specific); 
+	scsi_cmd->byte4 = byte4;
+	scsi_ulto4b(dxfer_len, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_OUT,
+		      tag_action,
+		      data_ptr,
+		      dxfer_len,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_report_supported_opcodes(struct ccb_scsiio *csio, uint32_t retries, 
+			      void (*cbfcnp)(struct cam_periph *, union ccb *),
+			      uint8_t tag_action, int options, int req_opcode,
+			      int req_service_action, uint8_t *data_ptr,
+			      uint32_t dxfer_len, int sense_len, int timeout)
+{
+	struct scsi_report_supported_opcodes *scsi_cmd;
+
+	scsi_cmd = (struct scsi_report_supported_opcodes *)
+	    &csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = MAINTENANCE_IN;
+	scsi_cmd->service_action = REPORT_SUPPORTED_OPERATION_CODES;
+	scsi_cmd->options = options;
+	scsi_cmd->requested_opcode = req_opcode;
+	scsi_ulto2b(req_service_action, scsi_cmd->requested_service_action);
+	scsi_ulto4b(dxfer_len, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_IN,
+		      tag_action,
+		      data_ptr,
+		      dxfer_len,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
 /*      
  * Try make as good a match as possible with
  * available sub drivers
@@ -6211,7 +8779,7 @@
  * \return  0 on a match, -1 otherwise.
  *
  * Treat rhs and lhs as arrays of vpd device id descriptors.  Walk lhs matching
- * agains each element in rhs until all data are exhausted or we have found
+ * against each element in rhs until all data are exhausted or we have found
  * a match.
  */
 int
@@ -6246,7 +8814,11 @@
 		while (rhs_id <= rhs_last
 		    && (rhs_id->identifier + rhs_id->length) <= rhs_end) {
 
-			if (rhs_id->length == lhs_id->length
+			if ((rhs_id->id_type &
+			     (SVPD_ID_ASSOC_MASK | SVPD_ID_TYPE_MASK)) ==
+			    (lhs_id->id_type &
+			     (SVPD_ID_ASSOC_MASK | SVPD_ID_TYPE_MASK))
+			 && rhs_id->length == lhs_id->length
 			 && memcmp(rhs_id->identifier, lhs_id->identifier,
 				   rhs_id->length) == 0)
 				return (0);

Modified: trunk/sys/cam/scsi/scsi_all.h
===================================================================
--- trunk/sys/cam/scsi/scsi_all.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_all.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Largely written by Julian Elischer (julian at tfs.com)
  * for TRW Financial Systems.
@@ -14,7 +15,7 @@
  *
  * Ported to run under 386BSD by Julian Elischer (julian at tfs.com) Sept 1992
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_all.h 317962 2017-05-08 17:21:57Z ken $
  */
 
 /*
@@ -88,6 +89,9 @@
 					    * and text.
 					    */
 	SSQ_PRINT_SENSE		= 0x0800,
+	SSQ_UA			= 0x1000,  /* Broadcast UA. */
+	SSQ_RESCAN		= 0x2000,  /* Rescan target for LUNs. */
+	SSQ_LOST		= 0x4000,  /* Destroy the LUNs. */
 	SSQ_MASK		= 0xff00
 } scsi_sense_action_qualifier;
 
@@ -100,6 +104,9 @@
 /* The retyable, error action, with table specified error code */
 #define	SS_RET		SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
 
+/* Wait for transient error status to change */
+#define	SS_WAIT		SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
+
 /* Fatal error action, with table specified error code */
 #define	SS_FATAL	SS_FAIL|SSQ_PRINT_SENSE
 
@@ -222,6 +229,7 @@
 	u_int8_t opcode;
 	u_int8_t byte2;
 #define	SMS_SP	0x01
+#define	SMS_RTD	0x02
 #define	SMS_PF	0x10
 	u_int8_t unused[2];
 	u_int8_t length;
@@ -275,6 +283,7 @@
 #define	SPRI_RS	0x03
 	u_int8_t reserved[5];
 	u_int8_t length[2];
+#define	SPRI_MAX_LEN		0xffff
 	u_int8_t control;
 };
 
@@ -299,13 +308,22 @@
 {
 	uint8_t length[2];
 	uint8_t flags1;
-#define	SPRI_CRH	0x10
-#define	SPRI_SIP_C	0x08
-#define	SPRI_ATP_C	0x04
-#define	SPRI_PTPL_C	0x01
+#define	SPRI_RLR_C		0x80
+#define	SPRI_CRH		0x10
+#define	SPRI_SIP_C		0x08
+#define	SPRI_ATP_C		0x04
+#define	SPRI_PTPL_C		0x01
 	uint8_t flags2;
-#define	SPRI_TMV	0x80
-#define	SPRI_PTPL_A	0x01
+#define	SPRI_TMV		0x80
+#define	SPRI_ALLOW_CMD_MASK	0x70
+#define	SPRI_ALLOW_CMD_SHIFT	4
+#define	SPRI_ALLOW_NA		0x00
+#define	SPRI_ALLOW_1		0x10
+#define	SPRI_ALLOW_2		0x20
+#define	SPRI_ALLOW_3		0x30
+#define	SPRI_ALLOW_4		0x40
+#define	SPRI_ALLOW_5		0x50
+#define	SPRI_PTPL_A		0x01
 	uint8_t type_mask[2];
 #define	SPRI_TM_WR_EX_AR	0x8000
 #define	SPRI_TM_EX_AC_RO	0x4000
@@ -319,7 +337,7 @@
 struct scsi_per_res_in_rsrv_data
 {
 	uint8_t reservation[8];
-	uint8_t obsolete1[4];
+	uint8_t scope_addr[4];
 	uint8_t reserved;
 	uint8_t scopetype;
 #define	SPRT_WE    0x01
@@ -328,7 +346,7 @@
 #define	SPRT_EARO  0x06
 #define	SPRT_WEAR  0x07
 #define	SPRT_EAAR  0x08
-	uint8_t obsolete2[2];
+	uint8_t extent_length[2];
 };
 
 struct scsi_per_res_in_rsrv
@@ -337,6 +355,26 @@
 	struct scsi_per_res_in_rsrv_data data;
 };
 
+struct scsi_per_res_in_full_desc
+{
+	struct scsi_per_res_key res_key;
+	uint8_t reserved1[4];
+	uint8_t flags;
+#define	SPRI_FULL_ALL_TG_PT	0x02
+#define	SPRI_FULL_R_HOLDER	0x01
+	uint8_t scopetype;
+	uint8_t reserved2[4];
+	uint8_t rel_trgt_port_id[2];
+	uint8_t additional_length[4];
+	uint8_t transport_id[];
+};
+
+struct scsi_per_res_in_full
+{
+	struct scsi_per_res_in_header header;
+	struct scsi_per_res_in_full_desc desc[];
+};
+
 struct scsi_per_res_out
 {
 	u_int8_t opcode;
@@ -349,13 +387,20 @@
 #define	SPRO_PRE_ABO		0x05
 #define	SPRO_REG_IGNO		0x06
 #define	SPRO_REG_MOVE		0x07
+#define	SPRO_REPL_LOST_RES	0x08
 #define	SPRO_ACTION_MASK	0x1f
 	u_int8_t scope_type;
 #define	SPR_SCOPE_MASK		0xf0
+#define	SPR_SCOPE_SHIFT		4
 #define	SPR_LU_SCOPE		0x00
+#define	SPR_EXTENT_SCOPE	0x10
+#define	SPR_ELEMENT_SCOPE	0x20
 #define	SPR_TYPE_MASK		0x0f
+#define	SPR_TYPE_RD_SHARED	0x00
 #define	SPR_TYPE_WR_EX		0x01
+#define	SPR_TYPE_RD_EX		0x02
 #define	SPR_TYPE_EX_AC		0x03
+#define	SPR_TYPE_SHARED		0x04
 #define	SPR_TYPE_WR_EX_RO	0x05
 #define	SPR_TYPE_EX_AC_RO	0x06
 #define	SPR_TYPE_WR_EX_AR	0x07
@@ -369,16 +414,140 @@
 {
 	struct scsi_per_res_key res_key;
 	u_int8_t serv_act_res_key[8];
-	u_int8_t obsolete1[4];
+	u_int8_t scope_spec_address[4];
 	u_int8_t flags;
 #define	SPR_SPEC_I_PT		0x08
 #define	SPR_ALL_TG_PT		0x04
 #define	SPR_APTPL		0x01
 	u_int8_t reserved1;
-	u_int8_t obsolete2[2];
+	u_int8_t extent_length[2];
+	u_int8_t transport_id_list[];
 };
 
+struct scsi_per_res_out_trans_ids {
+	u_int8_t additional_length[4];
+	u_int8_t transport_ids[];
+};
 
+/*
+ * Used with REGISTER AND MOVE serivce action of the PERSISTENT RESERVE OUT
+ * command.
+ */
+struct scsi_per_res_reg_move
+{
+	struct scsi_per_res_key res_key;
+	u_int8_t serv_act_res_key[8];
+	u_int8_t reserved;
+	u_int8_t flags;
+#define	SPR_REG_MOVE_UNREG	0x02
+#define	SPR_REG_MOVE_APTPL	0x01
+	u_int8_t rel_trgt_port_id[2];
+	u_int8_t transport_id_length[4];
+	u_int8_t transport_id[];
+};
+
+struct scsi_transportid_header
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_FORMAT_MASK		0xc0
+#define	SCSI_TRN_FORMAT_SHIFT		6
+#define	SCSI_TRN_PROTO_MASK		0x0f
+};
+
+struct scsi_transportid_fcp
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_FCP_FORMAT_DEFAULT	0x00
+	uint8_t reserved1[7];
+	uint8_t n_port_name[8];
+	uint8_t reserved2[8];
+};
+
+struct scsi_transportid_spi
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_SPI_FORMAT_DEFAULT	0x00
+	uint8_t reserved1;
+	uint8_t scsi_addr[2];
+	uint8_t obsolete[2];
+	uint8_t rel_trgt_port_id[2];
+	uint8_t reserved2[16];
+};
+
+struct scsi_transportid_1394
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_1394_FORMAT_DEFAULT	0x00
+	uint8_t reserved1[7];
+	uint8_t eui64[8];
+	uint8_t reserved2[8];
+};
+
+struct scsi_transportid_rdma
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_RDMA_FORMAT_DEFAULT	0x00
+	uint8_t reserved[7];
+#define	SCSI_TRN_RDMA_PORT_LEN		16
+	uint8_t initiator_port_id[SCSI_TRN_RDMA_PORT_LEN];
+};
+
+struct scsi_transportid_iscsi_device
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_ISCSI_FORMAT_DEVICE	0x00
+	uint8_t reserved;
+	uint8_t additional_length[2];
+	uint8_t iscsi_name[];
+};
+
+struct scsi_transportid_iscsi_port
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_ISCSI_FORMAT_PORT	0x40
+	uint8_t reserved;
+	uint8_t additional_length[2];
+	uint8_t iscsi_name[];
+	/*
+	 * Followed by a separator and iSCSI initiator session ID
+	 */
+};
+
+struct scsi_transportid_sas
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_SAS_FORMAT_DEFAULT	0x00
+	uint8_t reserved1[3];
+	uint8_t sas_address[8];
+	uint8_t reserved2[12];
+};
+
+struct scsi_sop_routing_id_norm {
+	uint8_t bus;
+	uint8_t devfunc;
+#define	SCSI_TRN_SOP_BUS_MAX		0xff
+#define	SCSI_TRN_SOP_DEV_MAX		0x1f
+#define	SCSI_TRN_SOP_DEV_MASK		0xf8
+#define	SCSI_TRN_SOP_DEV_SHIFT		3
+#define	SCSI_TRN_SOP_FUNC_NORM_MASK	0x07
+#define	SCSI_TRN_SOP_FUNC_NORM_MAX	0x07
+};
+
+struct scsi_sop_routing_id_alt {
+	uint8_t bus;
+	uint8_t function;
+#define	SCSI_TRN_SOP_FUNC_ALT_MAX	0xff
+};
+
+struct scsi_transportid_sop
+{
+	uint8_t format_protocol;
+#define	SCSI_TRN_SOP_FORMAT_DEFAULT	0x00
+	uint8_t reserved1;
+	uint8_t routing_id[2];
+	uint8_t reserved2[20];
+};
+
 struct scsi_log_sense
 {
 	u_int8_t opcode;
@@ -387,7 +556,7 @@
 #define	SLS_PPC				0x02
 	u_int8_t page;
 #define	SLS_PAGE_CODE 			0x3F
-#define	SLS_ALL_PAGES_PAGE		0x00
+#define	SLS_SUPPORTED_PAGES_PAGE	0x00
 #define	SLS_OVERRUN_PAGE		0x01
 #define	SLS_ERROR_WRITE_PAGE		0x02
 #define	SLS_ERROR_READ_PAGE		0x03
@@ -395,7 +564,10 @@
 #define	SLS_ERROR_VERIFY_PAGE		0x05
 #define	SLS_ERROR_NONMEDIUM_PAGE	0x06
 #define	SLS_ERROR_LASTN_PAGE		0x07
+#define	SLS_LOGICAL_BLOCK_PROVISIONING	0x0c
 #define	SLS_SELF_TEST_PAGE		0x10
+#define	SLS_SOLID_STATE_MEDIA		0x11
+#define	SLS_STAT_AND_PERF		0x19
 #define	SLS_IE_PAGE			0x2f
 #define	SLS_PAGE_CTRL_MASK		0xC0
 #define	SLS_PAGE_CTRL_THRESHOLD		0x00
@@ -402,7 +574,9 @@
 #define	SLS_PAGE_CTRL_CUMULATIVE	0x40
 #define	SLS_PAGE_CTRL_THRESH_DEFAULT	0x80
 #define	SLS_PAGE_CTRL_CUMUL_DEFAULT	0xC0
-	u_int8_t reserved[2];
+	u_int8_t subpage;
+#define	SLS_SUPPORTED_SUBPAGES_SUBPAGE	0xff
+	u_int8_t reserved;
 	u_int8_t paramptr[2];
 	u_int8_t length[2];
 	u_int8_t control;
@@ -428,7 +602,10 @@
 struct scsi_log_header
 {
 	u_int8_t page;
-	u_int8_t reserved;
+#define	SL_PAGE_CODE			0x3F
+#define	SL_SPF				0x40
+#define	SL_DS				0x80
+	u_int8_t subpage;
 	u_int8_t datalen[2];
 };
 
@@ -449,6 +626,60 @@
 	u_int8_t param_len;
 };
 
+struct scsi_log_media_pct_used {
+	struct scsi_log_param_header hdr;
+#define	SLP_SS_MEDIA_PCT_USED		0x0001
+	uint8_t reserved[3];
+	uint8_t pct_used;
+};
+
+struct scsi_log_stat_and_perf {
+	struct scsi_log_param_header hdr;
+#define	SLP_SAP				0x0001
+	uint8_t	read_num[8];
+	uint8_t	write_num[8];
+	uint8_t	recvieved_lba[8];
+	uint8_t	transmitted_lba[8];
+	uint8_t	read_int[8];
+	uint8_t	write_int[8];
+	uint8_t	weighted_num[8];
+	uint8_t	weighted_int[8];
+};
+
+struct scsi_log_idle_time {
+	struct scsi_log_param_header hdr;
+#define	SLP_IT				0x0002
+	uint8_t	idle_int[8];
+};
+
+struct scsi_log_time_interval {
+	struct scsi_log_param_header hdr;
+#define	SLP_TI				0x0003
+	uint8_t	exponent[4];
+	uint8_t	integer[4];
+};
+
+struct scsi_log_fua_stat_and_perf {
+	struct scsi_log_param_header hdr;
+#define	SLP_FUA_SAP			0x0004
+	uint8_t	fua_read_num[8];
+	uint8_t	fua_write_num[8];
+	uint8_t	fuanv_read_num[8];
+	uint8_t	fuanv_write_num[8];
+	uint8_t	fua_read_int[8];
+	uint8_t	fua_write_int[8];
+	uint8_t	fuanv_read_int[8];
+	uint8_t	fuanv_write_int[8];
+};
+
+struct scsi_log_informational_exceptions {
+	struct scsi_log_param_header hdr;
+#define	SLP_IE_GEN			0x0000
+	uint8_t	ie_asc;
+	uint8_t	ie_ascq;
+	uint8_t	temperature;
+};
+
 struct scsi_control_page {
 	u_int8_t page_code;
 	u_int8_t page_length;
@@ -467,17 +698,39 @@
 #define	SCP_QUEUE_ALG_MASK		0xF0
 #define	SCP_QUEUE_ALG_RESTRICTED	0x00
 #define	SCP_QUEUE_ALG_UNRESTRICTED	0x10
+#define	SCP_NUAR			0x08	/*No UA on release*/
 #define	SCP_QUEUE_ERR			0x02	/*Queued I/O aborted for CACs*/
 #define	SCP_QUEUE_DQUE			0x01	/*Queued I/O disabled*/
 	u_int8_t eca_and_aen;
 #define	SCP_EECA			0x80	/*Enable Extended CA*/
+#define	SCP_RAC				0x40	/*Report a check*/
+#define	SCP_SWP				0x08	/*Software Write Protect*/
 #define	SCP_RAENP			0x04	/*Ready AEN Permission*/
 #define	SCP_UAAENP			0x02	/*UA AEN Permission*/
 #define	SCP_EAENP			0x01	/*Error AEN Permission*/
-	u_int8_t reserved;
+	u_int8_t flags4;
+#define	SCP_ATO				0x80	/*Application tag owner*/
+#define	SCP_TAS				0x40	/*Task aborted status*/
+#define	SCP_ATMPE			0x20	/*Application tag mode page*/
+#define	SCP_RWWP			0x10	/*Reject write without prot*/
 	u_int8_t aen_holdoff_period[2];
+	u_int8_t busy_timeout_period[2];
+	u_int8_t extended_selftest_completion_time[2];
 };
 
+struct scsi_control_ext_page {
+	uint8_t page_code;
+	uint8_t subpage_code;
+	uint8_t page_length[2];
+	uint8_t flags;
+#define	SCEP_TCMOS			0x04	/* Timestamp Changeable by */
+#define	SCEP_SCSIP			0x02	/* SCSI Precedence (clock) */
+#define	SCEP_IALUAE			0x01	/* Implicit ALUA Enabled */
+	uint8_t prio;
+	uint8_t max_sense;
+	uint8_t reserve[25];
+};
+
 struct scsi_cache_page {
 	u_int8_t page_code;
 #define	SCHP_PAGE_SAVABLE		0x80	/* Page is savable */
@@ -528,55 +781,6 @@
 	uint8_t non_cache_seg_size[3];
 };
 
-/*
- * XXX KDM move this off to a vendor shim.
- */
-struct copan_power_subpage {
-	uint8_t page_code;
-#define	PWR_PAGE_CODE		0x00
-	uint8_t subpage;
-#define	PWR_SUBPAGE_CODE	0x02
-	uint8_t page_length[2];
-	uint8_t page_version;
-#define	PWR_VERSION		    0x01
-	uint8_t total_luns;
-	uint8_t max_active_luns;
-#define	PWR_DFLT_MAX_LUNS	    0x07
-	uint8_t reserved[25];
-};
-
-/*
- * XXX KDM move this off to a vendor shim.
- */
-struct copan_aps_subpage {
-	uint8_t page_code;
-#define	APS_PAGE_CODE		0x00
-	uint8_t subpage;
-#define	APS_SUBPAGE_CODE	0x03
-	uint8_t page_length[2];
-	uint8_t page_version;
-#define	APS_VERSION		    0x00
-	uint8_t lock_active;
-#define	APS_LOCK_ACTIVE	    0x01
-#define	APS_LOCK_INACTIVE	0x00
-	uint8_t reserved[26];
-};
-
-/*
- * XXX KDM move this off to a vendor shim.
- */
-struct copan_debugconf_subpage {
-	uint8_t page_code;
-#define DBGCNF_PAGE_CODE		0x00
-	uint8_t subpage;
-#define DBGCNF_SUBPAGE_CODE	0xF0
-	uint8_t page_length[2];
-	uint8_t page_version;
-#define DBGCNF_VERSION			0x00
-	uint8_t ctl_time_io_secs[2];
-};
-
-
 struct scsi_info_exceptions_page {
 	u_int8_t page_code;
 #define	SIEP_PAGE_SAVABLE		0x80	/* Page is savable */
@@ -590,25 +794,73 @@
 #define	SIEP_FLAGS_EBACKERR		0x02
 #define	SIEP_FLAGS_LOGERR		0x01
 	u_int8_t mrie;
+#define	SIEP_MRIE_NO		0x00
+#define	SIEP_MRIE_UA		0x02
+#define	SIEP_MRIE_REC_COND	0x03
+#define	SIEP_MRIE_REC_UNCOND	0x04
+#define	SIEP_MRIE_NO_SENSE	0x05
+#define	SIEP_MRIE_ON_REQ	0x06
 	u_int8_t interval_timer[4];
 	u_int8_t report_count[4];
 };
 
+struct scsi_logical_block_provisioning_page_descr {
+	uint8_t flags;
+#define	SLBPPD_ENABLED		0x80
+#define	SLBPPD_TYPE_MASK	0x38
+#define	SLBPPD_ARMING_MASK	0x07
+#define	SLBPPD_ARMING_DEC	0x02
+#define	SLBPPD_ARMING_INC	0x01
+	uint8_t resource;
+	uint8_t reserved[2];
+	uint8_t count[4];
+};
+
+struct scsi_logical_block_provisioning_page {
+	uint8_t page_code;
+	uint8_t subpage_code;
+	uint8_t page_length[2];
+	uint8_t flags;
+#define	SLBPP_SITUA		0x01
+	uint8_t reserved[11];
+	struct scsi_logical_block_provisioning_page_descr descr[0];
+};
+
+/*
+ * SCSI protocol identifier values, current as of SPC4r36l.
+ */
+#define	SCSI_PROTO_FC		0x00	/* Fibre Channel */
+#define	SCSI_PROTO_SPI		0x01	/* Parallel SCSI */
+#define	SCSI_PROTO_SSA		0x02	/* Serial Storage Arch. */
+#define	SCSI_PROTO_1394		0x03	/* IEEE 1394 (Firewire) */
+#define	SCSI_PROTO_RDMA		0x04	/* SCSI RDMA Protocol */
+#define	SCSI_PROTO_ISCSI	0x05	/* Internet SCSI */
+#define	SCSI_PROTO_iSCSI	0x05	/* Internet SCSI */
+#define	SCSI_PROTO_SAS		0x06	/* SAS Serial SCSI Protocol */
+#define	SCSI_PROTO_ADT		0x07	/* Automation/Drive Int. Trans. Prot.*/
+#define	SCSI_PROTO_ADITP	0x07	/* Automation/Drive Int. Trans. Prot.*/
+#define	SCSI_PROTO_ATA		0x08	/* AT Attachment Interface */
+#define	SCSI_PROTO_UAS		0x09	/* USB Atached SCSI */
+#define	SCSI_PROTO_SOP		0x0a	/* SCSI over PCI Express */
+#define	SCSI_PROTO_NONE		0x0f	/* No specific protocol */
+
 struct scsi_proto_specific_page {
 	u_int8_t page_code;
 #define	SPSP_PAGE_SAVABLE		0x80	/* Page is savable */
 	u_int8_t page_length;
 	u_int8_t protocol;
-#define	SPSP_PROTO_FC			0x00
-#define	SPSP_PROTO_SPI			0x01
-#define	SPSP_PROTO_SSA			0x02
-#define	SPSP_PROTO_1394			0x03
-#define	SPSP_PROTO_RDMA			0x04
-#define	SPSP_PROTO_ISCSI		0x05
-#define	SPSP_PROTO_SAS			0x06
-#define	SPSP_PROTO_ADT			0x07
-#define	SPSP_PROTO_ATA			0x08
-#define	SPSP_PROTO_NONE			0x0f
+#define	SPSP_PROTO_FC			SCSI_PROTO_FC
+#define	SPSP_PROTO_SPI			SCSI_PROTO_SPI
+#define	SPSP_PROTO_SSA			SCSI_PROTO_SSA
+#define	SPSP_PROTO_1394			SCSI_PROTO_1394
+#define	SPSP_PROTO_RDMA			SCSI_PROTO_RDMA
+#define	SPSP_PROTO_ISCSI		SCSI_PROTO_ISCSI
+#define	SPSP_PROTO_SAS			SCSI_PROTO_SAS
+#define	SPSP_PROTO_ADT			SCSI_PROTO_ADITP
+#define	SPSP_PROTO_ATA			SCSI_PROTO_ATA
+#define	SPSP_PROTO_UAS			SCSI_PROTO_UAS
+#define	SPSP_PROTO_SOP			SCSI_PROTO_SOP
+#define	SPSP_PROTO_NONE			SCSI_PROTO_NONE
 };
 
 struct scsi_reserve
@@ -743,12 +995,16 @@
 {
 	u_int8_t opcode;
 	u_int8_t byte2;
-#define	RWB_MODE		0x07
+#define	RWB_MODE		0x1F
 #define	RWB_MODE_HDR_DATA	0x00
 #define	RWB_MODE_VENDOR		0x01
 #define	RWB_MODE_DATA		0x02
+#define	RWB_MODE_DESCR		0x03
 #define	RWB_MODE_DOWNLOAD	0x04
 #define	RWB_MODE_DOWNLOAD_SAVE	0x05
+#define	RWB_MODE_ECHO		0x0A
+#define	RWB_MODE_ECHO_DESCR	0x0B
+#define	RWB_MODE_ERROR_HISTORY	0x1C
         u_int8_t buffer_id;
         u_int8_t offset[3];
         u_int8_t length[3];
@@ -755,6 +1011,16 @@
         u_int8_t control;
 };
 
+struct scsi_read_buffer_16
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t offset[8];
+	uint8_t length[4];
+	uint8_t buffer_id;
+	uint8_t control;
+};
+
 struct scsi_write_buffer
 {
 	u_int8_t opcode;
@@ -765,6 +1031,216 @@
 	u_int8_t control;
 };
 
+struct scsi_read_attribute
+{
+	u_int8_t opcode;
+	u_int8_t service_action;
+#define	SRA_SA_ATTR_VALUES		0x00
+#define	SRA_SA_ATTR_LIST		0x01
+#define	SRA_SA_LOG_VOL_LIST		0x02
+#define	SRA_SA_PART_LIST		0x03
+#define	SRA_SA_RESTRICTED		0x04
+#define	SRA_SA_SUPPORTED_ATTRS		0x05
+#define	SRA_SA_MASK			0x1f
+	u_int8_t element[2];
+	u_int8_t elem_type;
+	u_int8_t logical_volume;
+	u_int8_t reserved1;
+	u_int8_t partition;
+	u_int8_t first_attribute[2];
+	u_int8_t length[4];
+	u_int8_t cache;
+#define	SRA_CACHE			0x01
+	u_int8_t control;
+};
+
+struct scsi_write_attribute
+{
+	u_int8_t opcode;
+	u_int8_t byte2;
+#define	SWA_WTC				0x01
+	u_int8_t element[3];
+	u_int8_t logical_volume;
+	u_int8_t reserved1;
+	u_int8_t partition;
+	u_int8_t reserved2[2];
+	u_int8_t length[4];
+	u_int8_t reserved3;
+	u_int8_t control;
+};
+
+
+struct scsi_read_attribute_values
+{
+	u_int8_t length[4];
+	u_int8_t attribute_0[0];
+};
+
+struct scsi_mam_attribute_header
+{
+	u_int8_t id[2];
+	/*
+	 * Attributes obtained from SPC-4r36g (section 7.4.2.2) and
+	 * SSC-4r03 (section 4.2.21). 
+	 */
+#define	SMA_ATTR_ID_DEVICE_MIN		0x0000
+
+#define	SMA_ATTR_REM_CAP_PARTITION	0x0000
+#define	SMA_ATTR_MAX_CAP_PARTITION	0x0001
+#define	SMA_ATTR_TAPEALERT_FLAGS	0x0002
+#define	SMA_ATTR_LOAD_COUNT		0x0003
+#define	SMA_ATTR_MAM_SPACE_REMAINING	0x0004
+
+#define	SMA_ATTR_DEV_ASSIGNING_ORG	0x0005
+#define	SMA_ATTR_FORMAT_DENSITY_CODE	0x0006
+#define	SMA_ATTR_INITIALIZATION_COUNT	0x0007
+#define	SMA_ATTR_VOLUME_ID		0x0008
+#define	SMA_ATTR_VOLUME_CHANGE_REF	0x0009
+
+#define	SMA_ATTR_DEV_SERIAL_LAST_LOAD	0x020a
+#define	SMA_ATTR_DEV_SERIAL_LAST_LOAD_1	0x020b
+#define	SMA_ATTR_DEV_SERIAL_LAST_LOAD_2	0x020c
+#define	SMA_ATTR_DEV_SERIAL_LAST_LOAD_3	0x020d
+
+#define	SMA_ATTR_TOTAL_MB_WRITTEN_LT	0x0220
+#define	SMA_ATTR_TOTAL_MB_READ_LT	0x0221
+#define	SMA_ATTR_TOTAL_MB_WRITTEN_CUR	0x0222
+#define	SMA_ATTR_TOTAL_MB_READ_CUR	0x0223
+#define	SMA_ATTR_FIRST_ENC_BLOCK	0x0224
+#define	SMA_ATTR_NEXT_UNENC_BLOCK	0x0225
+
+#define	SMA_ATTR_MEDIUM_USAGE_HIST	0x0340
+#define	SMA_ATTR_PART_USAGE_HIST	0x0341
+
+#define	SMA_ATTR_ID_DEVICE_MAX		0x03ff
+
+#define	SMA_ATTR_ID_MEDIUM_MIN		0x0400
+
+#define	SMA_ATTR_MED_MANUF		0x0400
+#define	SMA_ATTR_MED_SERIAL		0x0401
+
+#define	SMA_ATTR_MED_LENGTH		0x0402
+#define	SMA_ATTR_MED_WIDTH		0x0403
+#define	SMA_ATTR_MED_ASSIGNING_ORG	0x0404
+#define	SMA_ATTR_MED_DENSITY_CODE	0x0405
+
+#define	SMA_ATTR_MED_MANUF_DATE		0x0406
+#define	SMA_ATTR_MAM_CAPACITY		0x0407
+#define	SMA_ATTR_MED_TYPE		0x0408
+#define	SMA_ATTR_MED_TYPE_INFO		0x0409
+#define	SMA_ATTR_MED_SERIAL_NUM		0x040a
+
+#define	SMA_ATTR_ID_MEDIUM_MAX		0x07ff
+
+#define	SMA_ATTR_ID_HOST_MIN		0x0800
+
+#define	SMA_ATTR_APP_VENDOR		0x0800
+#define	SMA_ATTR_APP_NAME		0x0801
+#define	SMA_ATTR_APP_VERSION		0x0802
+#define	SMA_ATTR_USER_MED_TEXT_LABEL	0x0803
+#define	SMA_ATTR_LAST_WRITTEN_TIME	0x0804
+#define	SMA_ATTR_TEXT_LOCAL_ID		0x0805
+#define	SMA_ATTR_BARCODE		0x0806
+#define	SMA_ATTR_HOST_OWNER_NAME	0x0807
+#define	SMA_ATTR_MEDIA_POOL		0x0808
+#define	SMA_ATTR_PART_USER_LABEL	0x0809
+#define	SMA_ATTR_LOAD_UNLOAD_AT_PART	0x080a
+#define	SMA_ATTR_APP_FORMAT_VERSION	0x080b
+#define	SMA_ATTR_VOL_COHERENCY_INFO	0x080c
+
+#define	SMA_ATTR_ID_HOST_MAX		0x0bff
+
+#define	SMA_ATTR_VENDOR_DEVICE_MIN	0x0c00
+#define	SMA_ATTR_VENDOR_DEVICE_MAX	0x0fff
+#define	SMA_ATTR_VENDOR_MEDIUM_MIN	0x1000
+#define	SMA_ATTR_VENDOR_MEDIUM_MAX	0x13ff
+#define	SMA_ATTR_VENDOR_HOST_MIN	0x1400
+#define	SMA_ATTR_VENDOR_HOST_MAX	0x17ff
+	u_int8_t byte2;
+#define	SMA_FORMAT_BINARY	0x00
+#define	SMA_FORMAT_ASCII	0x01
+#define	SMA_FORMAT_TEXT		0x02
+#define	SMA_FORMAT_MASK		0x03
+#define	SMA_READ_ONLY		0x80
+	u_int8_t length[2];
+	u_int8_t attribute[0];
+};
+
+struct scsi_attrib_list_header {
+	u_int8_t length[4];
+	u_int8_t first_attr_0[0];
+};
+
+struct scsi_attrib_lv_list {
+	u_int8_t length[2];
+	u_int8_t first_lv_number;
+	u_int8_t num_logical_volumes;
+};
+
+struct scsi_attrib_vendser {
+	uint8_t vendor[8];
+	uint8_t serial_num[32];
+};
+
+/*
+ * These values are used to decode the Volume Coherency Information
+ * Attribute (0x080c) for LTFS-format coherency information.
+ * Although the Application Client Specific lengths are different for
+ * Version 0 and Version 1, the data is in fact the same.  The length
+ * difference was due to a code bug.
+ */
+#define	SCSI_LTFS_VER0_LEN	42
+#define	SCSI_LTFS_VER1_LEN	43
+#define	SCSI_LTFS_UUID_LEN	36
+#define	SCSI_LTFS_STR_NAME	"LTFS"
+#define	SCSI_LTFS_STR_LEN	4
+
+typedef enum {
+	SCSI_ATTR_FLAG_NONE		= 0x00,
+	SCSI_ATTR_FLAG_HEX		= 0x01,
+	SCSI_ATTR_FLAG_FP		= 0x02,
+	SCSI_ATTR_FLAG_DIV_10		= 0x04,
+	SCSI_ATTR_FLAG_FP_1DIGIT	= 0x08
+} scsi_attrib_flags;
+
+typedef enum {
+	SCSI_ATTR_OUTPUT_NONE		= 0x00,
+	SCSI_ATTR_OUTPUT_TEXT_MASK	= 0x03,
+	SCSI_ATTR_OUTPUT_TEXT_RAW	= 0x00,
+	SCSI_ATTR_OUTPUT_TEXT_ESC	= 0x01,
+	SCSI_ATTR_OUTPUT_TEXT_RSV1	= 0x02,
+	SCSI_ATTR_OUTPUT_TEXT_RSV2	= 0x03,
+	SCSI_ATTR_OUTPUT_NONASCII_MASK	= 0x0c,
+	SCSI_ATTR_OUTPUT_NONASCII_TRIM	= 0x00,
+	SCSI_ATTR_OUTPUT_NONASCII_ESC	= 0x04,
+	SCSI_ATTR_OUTPUT_NONASCII_RAW	= 0x08,
+	SCSI_ATTR_OUTPUT_NONASCII_RSV1	= 0x0c,
+	SCSI_ATTR_OUTPUT_FIELD_MASK	= 0xf0,
+	SCSI_ATTR_OUTPUT_FIELD_ALL	= 0xf0,
+	SCSI_ATTR_OUTPUT_FIELD_NONE	= 0x00,
+	SCSI_ATTR_OUTPUT_FIELD_DESC	= 0x10,
+	SCSI_ATTR_OUTPUT_FIELD_NUM	= 0x20,
+	SCSI_ATTR_OUTPUT_FIELD_SIZE	= 0x40,
+	SCSI_ATTR_OUTPUT_FIELD_RW	= 0x80
+} scsi_attrib_output_flags;
+
+struct sbuf;
+
+struct scsi_attrib_table_entry
+{
+	u_int32_t id;
+	u_int32_t flags;
+	const char *desc;
+	const char *suffix;
+	int (*to_str)(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+		      uint32_t valid_len, uint32_t flags,
+		      uint32_t output_flags, char *error_str,
+		      int error_str_len);
+	int (*parse_str)(char *str, struct scsi_mam_attribute_header *hdr,
+			 uint32_t alloc_len, uint32_t flags, char *error_str,
+			 int error_str_len);
+};
+
 struct scsi_rw_6
 {
 	u_int8_t opcode;
@@ -816,6 +1292,17 @@
 	u_int8_t control;
 };
 
+struct scsi_write_atomic_16
+{
+	uint8_t	opcode;
+	uint8_t	byte2;
+	uint8_t	addr[8];
+	uint8_t	boundary[2];
+	uint8_t	length[2];
+	uint8_t	group;
+	uint8_t	control;
+};
+
 struct scsi_write_same_10
 {
 	uint8_t	opcode;
@@ -834,6 +1321,7 @@
 {
 	uint8_t	opcode;
 	uint8_t	byte2;
+#define	SWS_NDOB	0x01
 	uint8_t	addr[8];
 	uint8_t	length[4];
 	uint8_t	group;
@@ -851,6 +1339,20 @@
 	uint8_t	control;
 };
 
+struct scsi_unmap_header
+{
+	uint8_t	length[2];
+	uint8_t	desc_length[2];
+	uint8_t	reserved[4];
+};
+
+struct scsi_unmap_desc
+{
+	uint8_t	lba[8];
+	uint8_t	length[4];
+	uint8_t	reserved[4];
+};
+
 struct scsi_write_verify_10
 {
 	uint8_t	opcode;
@@ -917,10 +1419,10 @@
 #define	AP_PROTO_DMA_QUEUED	(0x07 << 1)
 #define	AP_PROTO_DEVICE_DIAG	(0x08 << 1)
 #define	AP_PROTO_DEVICE_RESET	(0x09 << 1)
-#define	AP_PROTO_UDMA_IN	(0x10 << 1)
-#define	AP_PROTO_UDMA_OUT	(0x11 << 1)
-#define	AP_PROTO_FPDMA		(0x12 << 1)
-#define	AP_PROTO_RESP_INFO	(0x15 << 1)
+#define	AP_PROTO_UDMA_IN	(0x0a << 1)
+#define	AP_PROTO_UDMA_OUT	(0x0b << 1)
+#define	AP_PROTO_FPDMA		(0x0c << 1)
+#define	AP_PROTO_RESP_INFO	(0x0f << 1)
 #define	AP_MULTI	0xe0
 	u_int8_t flags;
 #define	AP_T_LEN	0x03
@@ -951,6 +1453,526 @@
 	uint8_t  control;
 };
 
+struct scsi_report_supported_opcodes
+{
+        uint8_t  opcode;
+        uint8_t  service_action;
+        uint8_t  options;
+#define RSO_RCTD		0x80
+#define RSO_OPTIONS_MASK	0x07
+#define RSO_OPTIONS_ALL		0x00
+#define RSO_OPTIONS_OC		0x01
+#define RSO_OPTIONS_OC_SA	0x02
+#define RSO_OPTIONS_OC_ASA	0x03
+        uint8_t  requested_opcode;
+        uint8_t  requested_service_action[2];
+	uint8_t  length[4];
+	uint8_t  reserved1;
+	uint8_t  control;
+};
+
+struct scsi_report_supported_opcodes_timeout
+{
+	uint8_t  length[2];
+	uint8_t  reserved;
+	uint8_t  cmd_specific;
+	uint8_t  nominal_time[4];
+	uint8_t  recommended_time[4];
+};
+
+struct scsi_report_supported_opcodes_descr
+{
+	uint8_t  opcode;
+	uint8_t  reserved;
+	uint8_t  service_action[2];
+	uint8_t  reserved2;
+	uint8_t  flags;
+#define RSO_SERVACTV		0x01
+#define RSO_CTDP		0x02
+#define RSO_CDLP_MASK		0x0c
+#define RSO_CDLP_NO		0x00
+#define RSO_CDLP_A		0x04
+#define RSO_CDLP_B		0x08
+	uint8_t  cdb_length[2];
+	struct scsi_report_supported_opcodes_timeout timeout[0];
+};
+
+struct scsi_report_supported_opcodes_all
+{
+	uint8_t  length[4];
+	struct scsi_report_supported_opcodes_descr descr[0];
+};
+
+struct scsi_report_supported_opcodes_one
+{
+	uint8_t  reserved;
+	uint8_t  support;
+#define RSO_ONE_CTDP		0x80
+#define RSO_ONE_CDLP_MASK	0x18
+#define RSO_ONE_CDLP_NO		0x00
+#define RSO_ONE_CDLP_A		0x08
+#define RSO_ONE_CDLP_B		0x10
+#define RSO_ONE_SUP_MASK	0x07
+#define RSO_ONE_SUP_UNAVAIL	0x00
+#define RSO_ONE_SUP_NOT_SUP	0x01
+#define RSO_ONE_SUP_AVAIL	0x03
+#define RSO_ONE_SUP_VENDOR	0x05
+	uint8_t  cdb_length[2];
+	uint8_t  cdb_usage[];
+};
+
+struct scsi_report_supported_tmf
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+	uint8_t  options;
+#define RST_REPD		0x80
+	uint8_t  reserved[3];
+	uint8_t  length[4];
+	uint8_t  reserved1;
+	uint8_t  control;
+};
+
+struct scsi_report_supported_tmf_data
+{
+	uint8_t  byte1;
+#define RST_WAKES		0x01
+#define RST_TRS			0x02
+#define RST_QTS			0x04
+#define RST_LURS		0x08
+#define RST_CTSS		0x10
+#define RST_CACAS		0x20
+#define RST_ATSS		0x40
+#define RST_ATS			0x80
+	uint8_t  byte2;
+#define RST_ITNRS		0x01
+#define RST_QTSS		0x02
+#define RST_QAES		0x04
+	uint8_t  reserved;
+	uint8_t  length;
+};
+
+struct scsi_report_supported_tmf_ext_data
+{
+	uint8_t  byte1;
+	uint8_t  byte2;
+	uint8_t  reserved;
+	uint8_t  length;
+	uint8_t  byte5;
+#define RST_TMFTMOV		0x01
+	uint8_t  reserved2;
+	uint8_t  byte7;
+#define RST_WAKETS		0x01
+#define RST_TRTS		0x02
+#define RST_QTTS		0x04
+#define RST_LURTS		0x08
+#define RST_CTSTS		0x10
+#define RST_CACATS		0x20
+#define RST_ATSTS		0x40
+#define RST_ATTS		0x80
+	uint8_t  byte8;
+#define RST_ITNRTS		0x01
+#define RST_QTSTS		0x02
+#define RST_QAETS		0x04
+	uint8_t  long_timeout[4];
+	uint8_t  short_timeout[4];
+};
+
+struct scsi_report_timestamp
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+	uint8_t  reserved[4];
+	uint8_t  length[4];
+	uint8_t  reserved1;
+	uint8_t  control;
+};
+
+struct scsi_report_timestamp_data
+{
+	uint8_t  length[2];
+	uint8_t  origin;
+#define RTS_ORIG_MASK		0x00
+#define RTS_ORIG_ZERO		0x00
+#define RTS_ORIG_SET		0x02
+#define RTS_ORIG_OUTSIDE	0x03
+	uint8_t  reserved;
+	uint8_t  timestamp[6];
+	uint8_t  reserve2[2];
+};
+
+struct scsi_receive_copy_status_lid1
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define RCS_RCS_LID1		0x00
+	uint8_t  list_identifier;
+	uint8_t  reserved[7];
+	uint8_t  length[4];
+	uint8_t  reserved1;
+	uint8_t  control;
+};
+
+struct scsi_receive_copy_status_lid1_data
+{
+	uint8_t  available_data[4];
+	uint8_t  copy_command_status;
+#define RCS_CCS_INPROG		0x00
+#define RCS_CCS_COMPLETED	0x01
+#define RCS_CCS_ERROR		0x02
+	uint8_t  segments_processed[2];
+	uint8_t  transfer_count_units;
+#define RCS_TC_BYTES		0x00
+#define RCS_TC_KBYTES		0x01
+#define RCS_TC_MBYTES		0x02
+#define RCS_TC_GBYTES		0x03
+#define RCS_TC_TBYTES		0x04
+#define RCS_TC_PBYTES		0x05
+#define RCS_TC_EBYTES		0x06
+#define RCS_TC_LBAS		0xf1
+	uint8_t  transfer_count[4];
+};
+
+struct scsi_receive_copy_failure_details
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define RCS_RCFD		0x04
+	uint8_t  list_identifier;
+	uint8_t  reserved[7];
+	uint8_t  length[4];
+	uint8_t  reserved1;
+	uint8_t  control;
+};
+
+struct scsi_receive_copy_failure_details_data
+{
+	uint8_t  available_data[4];
+	uint8_t  reserved[52];
+	uint8_t  copy_command_status;
+	uint8_t  reserved2;
+	uint8_t  sense_data_length[2];
+	uint8_t  sense_data[];
+};
+
+struct scsi_receive_copy_status_lid4
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define RCS_RCS_LID4		0x05
+	uint8_t  list_identifier[4];
+	uint8_t  reserved[4];
+	uint8_t  length[4];
+	uint8_t  reserved1;
+	uint8_t  control;
+};
+
+struct scsi_receive_copy_status_lid4_data
+{
+	uint8_t  available_data[4];
+	uint8_t  response_to_service_action;
+	uint8_t  copy_command_status;
+#define RCS_CCS_COMPLETED_PROD	0x03
+#define RCS_CCS_COMPLETED_RESID	0x04
+#define RCS_CCS_INPROG_FGBG	0x10
+#define RCS_CCS_INPROG_FG	0x11
+#define RCS_CCS_INPROG_BG	0x12
+#define RCS_CCS_ABORTED		0x60
+	uint8_t  operation_counter[2];
+	uint8_t  estimated_status_update_delay[4];
+	uint8_t  extended_copy_completion_status;
+	uint8_t  length_of_the_sense_data_field;
+	uint8_t  sense_data_length;
+	uint8_t  transfer_count_units;
+	uint8_t  transfer_count[8];
+	uint8_t  segments_processed[2];
+	uint8_t  reserved[6];
+	uint8_t  sense_data[];
+};
+
+struct scsi_receive_copy_operating_parameters
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define RCS_RCOP		0x03
+	uint8_t  reserved[8];
+	uint8_t  length[4];
+	uint8_t  reserved1;
+	uint8_t  control;
+};
+
+struct scsi_receive_copy_operating_parameters_data
+{
+	uint8_t  length[4];
+	uint8_t  snlid;
+#define RCOP_SNLID		0x01
+	uint8_t  reserved[3];
+	uint8_t  maximum_cscd_descriptor_count[2];
+	uint8_t  maximum_segment_descriptor_count[2];
+	uint8_t  maximum_descriptor_list_length[4];
+	uint8_t  maximum_segment_length[4];
+	uint8_t  maximum_inline_data_length[4];
+	uint8_t  held_data_limit[4];
+	uint8_t  maximum_stream_device_transfer_size[4];
+	uint8_t  reserved2[2];
+	uint8_t  total_concurrent_copies[2];
+	uint8_t  maximum_concurrent_copies;
+	uint8_t  data_segment_granularity;
+	uint8_t  inline_data_granularity;
+	uint8_t  held_data_granularity;
+	uint8_t  reserved3[3];
+	uint8_t  implemented_descriptor_list_length;
+	uint8_t  list_of_implemented_descriptor_type_codes[0];
+};
+
+struct scsi_extended_copy
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define EC_EC_LID1		0x00
+#define EC_EC_LID4		0x01
+	uint8_t  reserved[8];
+	uint8_t  length[4];
+	uint8_t  reserved1;
+	uint8_t  control;
+};
+
+struct scsi_ec_cscd_dtsp
+{
+	uint8_t  flags;
+#define EC_CSCD_FIXED		0x01
+#define EC_CSCD_PAD		0x04
+	uint8_t  block_length[3];
+};
+
+struct scsi_ec_cscd
+{
+	uint8_t  type_code;
+#define EC_CSCD_EXT		0xff
+	uint8_t  luidt_pdt;
+#define EC_NUL			0x20
+#define EC_LUIDT_MASK		0xc0
+#define EC_LUIDT_LUN		0x00
+#define EC_LUIDT_PROXY_TOKEN	0x40
+	uint8_t  relative_initiator_port[2];
+	uint8_t  cscd_params[24];
+	struct scsi_ec_cscd_dtsp dtsp;
+};
+
+struct scsi_ec_cscd_id
+{
+	uint8_t  type_code;
+#define EC_CSCD_ID		0xe4
+	uint8_t  luidt_pdt;
+	uint8_t  relative_initiator_port[2];
+	uint8_t  codeset;
+	uint8_t  id_type;
+	uint8_t  reserved;
+	uint8_t  length;
+	uint8_t  designator[20];
+	struct scsi_ec_cscd_dtsp dtsp;
+};
+
+struct scsi_ec_segment
+{
+	uint8_t  type_code;
+	uint8_t  flags;
+#define EC_SEG_DC		0x02
+#define EC_SEG_CAT		0x01
+	uint8_t  descr_length[2];
+	uint8_t  params[];
+};
+
+struct scsi_ec_segment_b2b
+{
+	uint8_t  type_code;
+#define EC_SEG_B2B		0x02
+	uint8_t  flags;
+	uint8_t  descr_length[2];
+	uint8_t  src_cscd[2];
+	uint8_t  dst_cscd[2];
+	uint8_t  reserved[2];
+	uint8_t  number_of_blocks[2];
+	uint8_t  src_lba[8];
+	uint8_t  dst_lba[8];
+};
+
+struct scsi_ec_segment_verify
+{
+	uint8_t  type_code;
+#define EC_SEG_VERIFY		0x07
+	uint8_t  reserved;
+	uint8_t  descr_length[2];
+	uint8_t  src_cscd[2];
+	uint8_t  reserved2[2];
+	uint8_t  tur;
+	uint8_t  reserved3[3];
+};
+
+struct scsi_ec_segment_register_key
+{
+	uint8_t  type_code;
+#define EC_SEG_REGISTER_KEY	0x14
+	uint8_t  reserved;
+	uint8_t  descr_length[2];
+	uint8_t  reserved2[2];
+	uint8_t  dst_cscd[2];
+	uint8_t  res_key[8];
+	uint8_t  sa_res_key[8];
+	uint8_t  reserved3[4];
+};
+
+struct scsi_extended_copy_lid1_data
+{
+	uint8_t  list_identifier;
+	uint8_t  flags;
+#define EC_PRIORITY		0x07
+#define EC_LIST_ID_USAGE_MASK	0x18
+#define EC_LIST_ID_USAGE_FULL	0x08
+#define EC_LIST_ID_USAGE_NOHOLD	0x10
+#define EC_LIST_ID_USAGE_NONE	0x18
+#define EC_STR			0x20
+	uint8_t  cscd_list_length[2];
+	uint8_t  reserved[4];
+	uint8_t  segment_list_length[4];
+	uint8_t  inline_data_length[4];
+	uint8_t  data[];
+};
+
+struct scsi_extended_copy_lid4_data
+{
+	uint8_t  list_format;
+#define EC_LIST_FORMAT		0x01
+	uint8_t  flags;
+	uint8_t  header_cscd_list_length[2];
+	uint8_t  reserved[11];
+	uint8_t  flags2;
+#define EC_IMMED		0x01
+#define EC_G_SENSE		0x02
+	uint8_t  header_cscd_type_code;
+	uint8_t  reserved2[3];
+	uint8_t  list_identifier[4];
+	uint8_t  reserved3[18];
+	uint8_t  cscd_list_length[2];
+	uint8_t  segment_list_length[2];
+	uint8_t  inline_data_length[2];
+	uint8_t  data[];
+};
+
+struct scsi_copy_operation_abort
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define EC_COA			0x1c
+	uint8_t  list_identifier[4];
+	uint8_t  reserved[9];
+	uint8_t  control;
+};
+
+struct scsi_populate_token
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define EC_PT			0x10
+	uint8_t  reserved[4];
+	uint8_t  list_identifier[4];
+	uint8_t  length[4];
+	uint8_t  group_number;
+	uint8_t  control;
+};
+
+struct scsi_range_desc
+{
+	uint8_t	lba[8];
+	uint8_t	length[4];
+	uint8_t	reserved[4];
+};
+
+struct scsi_populate_token_data
+{
+	uint8_t  length[2];
+	uint8_t  flags;
+#define EC_PT_IMMED			0x01
+#define EC_PT_RTV			0x02
+	uint8_t  reserved;
+	uint8_t  inactivity_timeout[4];
+	uint8_t  rod_type[4];
+	uint8_t  reserved2[2];
+	uint8_t  range_descriptor_length[2];
+	struct scsi_range_desc desc[];
+};
+
+struct scsi_write_using_token
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define EC_WUT			0x11
+	uint8_t  reserved[4];
+	uint8_t  list_identifier[4];
+	uint8_t  length[4];
+	uint8_t  group_number;
+	uint8_t  control;
+};
+
+struct scsi_write_using_token_data
+{
+	uint8_t  length[2];
+	uint8_t  flags;
+#define EC_WUT_IMMED			0x01
+#define EC_WUT_DEL_TKN			0x02
+	uint8_t  reserved[5];
+	uint8_t  offset_into_rod[8];
+	uint8_t  rod_token[512];
+	uint8_t  reserved2[6];
+	uint8_t  range_descriptor_length[2];
+	struct scsi_range_desc desc[];
+};
+
+struct scsi_receive_rod_token_information
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define RCS_RRTI		0x07
+	uint8_t  list_identifier[4];
+	uint8_t  reserved[4];
+	uint8_t  length[4];
+	uint8_t  reserved2;
+	uint8_t  control;
+};
+
+struct scsi_token
+{
+	uint8_t  type[4];
+#define ROD_TYPE_INTERNAL	0x00000000
+#define ROD_TYPE_AUR		0x00010000
+#define ROD_TYPE_PIT_DEF	0x00800000
+#define ROD_TYPE_PIT_VULN	0x00800001
+#define ROD_TYPE_PIT_PERS	0x00800002
+#define ROD_TYPE_PIT_ANY	0x0080FFFF
+#define ROD_TYPE_BLOCK_ZERO	0xFFFF0001
+	uint8_t  reserved[2];
+	uint8_t  length[2];
+	uint8_t  body[0];
+};
+
+struct scsi_report_all_rod_tokens
+{
+	uint8_t  opcode;
+	uint8_t  service_action;
+#define RCS_RART		0x08
+	uint8_t  reserved[8];
+	uint8_t  length[4];
+	uint8_t  reserved2;
+	uint8_t  control;
+};
+
+struct scsi_report_all_rod_tokens_data
+{
+	uint8_t  available_data[4];
+	uint8_t  reserved[4];
+	uint8_t  rod_management_token_list[];
+};
+
 struct ata_pass_16 {
 	u_int8_t opcode;
 	u_int8_t protocol;
@@ -1022,15 +2044,24 @@
 #define	MODE_SENSE_10		0x5A
 #define	PERSISTENT_RES_IN	0x5E
 #define	PERSISTENT_RES_OUT	0x5F
+#define	EXTENDED_COPY		0x83
+#define	RECEIVE_COPY_STATUS	0x84
 #define	ATA_PASS_16		0x85
 #define	READ_16			0x88
+#define	COMPARE_AND_WRITE	0x89
 #define	WRITE_16		0x8A
+#define	READ_ATTRIBUTE		0x8C
+#define	WRITE_ATTRIBUTE		0x8D
 #define	WRITE_VERIFY_16		0x8E
+#define	VERIFY_16		0x8F
 #define	SYNCHRONIZE_CACHE_16	0x91
 #define	WRITE_SAME_16		0x93
+#define	READ_BUFFER_16		0x9B
+#define	WRITE_ATOMIC_16		0x9C
 #define	SERVICE_ACTION_IN	0x9E
 #define	REPORT_LUNS		0xA0
 #define	ATA_PASS_12		0xA1
+#define	SECURITY_PROTOCOL_IN	0xA2
 #define	MAINTENANCE_IN		0xA3
 #define	MAINTENANCE_OUT		0xA4
 #define	MOVE_MEDIUM     	0xA5
@@ -1037,6 +2068,8 @@
 #define	READ_12			0xA8
 #define	WRITE_12		0xAA
 #define	WRITE_VERIFY_12		0xAE
+#define	VERIFY_12		0xAF
+#define	SECURITY_PROTOCOL_OUT	0xB5
 #define	READ_ELEMENT_STATUS	0xB8
 #define	READ_CD			0xBE
 
@@ -1129,10 +2162,12 @@
 					 * reserved for this peripheral
 					 * qualifier.
 					 */
-#define	SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x08) != 0)
+#define	SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x04) != 0)
 	u_int8_t dev_qual2;
 #define	SID_QUAL2	0x7F
-#define	SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & 0x80) != 0)
+#define	SID_LU_CONG	0x40
+#define	SID_RMB		0x80
+#define	SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & SID_RMB) != 0)
 	u_int8_t version;
 #define	SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07)
 #define		SCSI_REV_0		0
@@ -1142,6 +2177,7 @@
 #define		SCSI_REV_SPC2		4
 #define		SCSI_REV_SPC3		5
 #define		SCSI_REV_SPC4		6
+#define		SCSI_REV_SPC5		7
 
 #define	SID_ECMA	0x38
 #define	SID_ISO		0xC0
@@ -1280,18 +2316,13 @@
 struct scsi_vpd_id_descriptor
 {
 	u_int8_t	proto_codeset;
-#define	SCSI_PROTO_FC		0x00
-#define	SCSI_PROTO_SPI		0x01
-#define	SCSI_PROTO_SSA		0x02
-#define	SCSI_PROTO_1394		0x03
-#define	SCSI_PROTO_RDMA		0x04
-#define SCSI_PROTO_iSCSI	0x05
-#define	SCSI_PROTO_SAS		0x06
-#define	SCSI_PROTO_ADT		0x07
-#define	SCSI_PROTO_ATA		0x08
+	/*
+	 * See the SCSI_PROTO definitions above for the protocols.
+	 */
 #define	SVPD_ID_PROTO_SHIFT	4
 #define	SVPD_ID_CODESET_BINARY	0x01
 #define	SVPD_ID_CODESET_ASCII	0x02
+#define	SVPD_ID_CODESET_UTF8	0x03
 #define	SVPD_ID_CODESET_MASK	0x0f
 	u_int8_t	id_type;
 #define	SVPD_ID_PIV		0x80
@@ -1308,6 +2339,8 @@
 #define	SVPD_ID_TYPE_LUNGRP	0x06
 #define	SVPD_ID_TYPE_MD5_LUN_ID	0x07
 #define	SVPD_ID_TYPE_SCSI_NAME	0x08
+#define	SVPD_ID_TYPE_PROTO	0x09
+#define	SVPD_ID_TYPE_UUID	0x0a
 #define	SVPD_ID_TYPE_MASK	0x0f
 	u_int8_t	reserved;
 	u_int8_t	length;
@@ -1422,6 +2455,103 @@
 	uint8_t control;
 };
 
+struct scsi_vpd_extended_inquiry_data
+{
+	uint8_t device;
+	uint8_t page_code;
+#define	SVPD_EXTENDED_INQUIRY_DATA	0x86
+	uint8_t page_length[2];
+	uint8_t flags1;
+
+	/* These values are for direct access devices */
+#define	SVPD_EID_AM_MASK	0xC0
+#define	SVPD_EID_AM_DEFER	0x80
+#define	SVPD_EID_AM_IMMED	0x40
+#define	SVPD_EID_AM_UNDEFINED	0x00
+#define	SVPD_EID_AM_RESERVED	0xc0
+#define	SVPD_EID_SPT		0x38
+#define	SVPD_EID_SPT_1		0x00
+#define	SVPD_EID_SPT_12		0x08
+#define	SVPD_EID_SPT_2		0x10
+#define	SVPD_EID_SPT_13		0x18
+#define	SVPD_EID_SPT_3		0x20
+#define	SVPD_EID_SPT_23		0x28
+#define	SVPD_EID_SPT_123	0x38
+
+	/* These values are for sequential access devices */
+#define	SVPD_EID_SA_SPT_LBP	0x08
+
+#define	SVPD_EID_GRD_CHK	0x04
+#define	SVPD_EID_APP_CHK	0x02
+#define	SVPD_EID_REF_CHK	0x01
+
+	uint8_t flags2;
+#define	SVPD_EID_UASK_SUP	0x20
+#define	SVPD_EID_GROUP_SUP	0x10
+#define	SVPD_EID_PRIOR_SUP	0x08
+#define	SVPD_EID_HEADSUP	0x04
+#define	SVPD_EID_ORDSUP		0x02
+#define	SVPD_EID_SIMPSUP	0x01
+	uint8_t flags3;
+#define	SVPD_EID_WU_SUP		0x08
+#define	SVPD_EID_CRD_SUP	0x04
+#define	SVPD_EID_NV_SUP		0x02
+#define	SVPD_EID_V_SUP		0x01
+	uint8_t flags4;
+#define	SVPD_EID_NO_PI_CHK	0x20
+#define	SVPD_EID_P_I_I_SUP	0x10
+#define	SVPD_EID_LUICLR		0x01
+	uint8_t flags5;
+#define	SVPD_EID_LUCT_MASK	0xe0
+#define	SVPD_EID_LUCT_NOT_REP	0x00
+#define	SVPD_EID_LUCT_CONGL	0x20
+#define	SVPD_EID_LUCT_GROUP	0x40
+#define	SVPD_EID_R_SUP		0x10
+#define	SVPD_EID_RTD_SUP	0x08
+#define	SVPD_EID_HSSRELEF	0x02
+#define	SVPD_EID_CBCS		0x01
+	uint8_t flags6;
+#define	SVPD_EID_MULTI_I_T_FW	0x0F
+#define	SVPD_EID_MC_VENDOR_SPEC	0x00
+#define	SVPD_EID_MC_MODE_1	0x01
+#define	SVPD_EID_MC_MODE_2	0x02
+#define	SVPD_EID_MC_MODE_3	0x03
+	uint8_t est[2];
+	uint8_t flags7;
+#define	SVPD_EID_POA_SUP	0x80
+#define	SVPD_EID_HRA_SUP	0x40
+#define	SVPD_EID_VSA_SUP	0x20
+	uint8_t max_sense_length;
+	uint8_t bind_flags;
+#define	SVPD_EID_IBS		0x80
+#define	SVPD_EID_IAS		0x40
+#define	SVPD_EID_SAC		0x04
+#define	SVPD_EID_NRD1		0x02
+#define	SVPD_EID_NRD0		0x01
+	uint8_t reserved2[49];
+};
+
+struct scsi_vpd_mode_page_policy_descr
+{
+	uint8_t page_code;
+	uint8_t subpage_code;
+	uint8_t policy;
+#define	SVPD_MPP_SHARED		0x00
+#define	SVPD_MPP_PORT		0x01
+#define	SVPD_MPP_I_T		0x03
+#define	SVPD_MPP_MLUS		0x80
+	uint8_t reserved;
+};
+
+struct scsi_vpd_mode_page_policy
+{
+	uint8_t device;
+	uint8_t page_code;
+#define	SVPD_MODE_PAGE_POLICY	0x87
+	uint8_t page_length[2];
+	struct scsi_vpd_mode_page_policy_descr descr[0];
+};
+
 struct scsi_diag_page {
 	uint8_t page_code;
 	uint8_t page_specific_flags;
@@ -1429,6 +2559,31 @@
 	uint8_t params[0];
 };
 
+struct scsi_vpd_port_designation
+{
+	uint8_t reserved[2];
+	uint8_t relative_port_id[2];
+	uint8_t reserved2[2];
+	uint8_t initiator_transportid_length[2];
+	uint8_t initiator_transportid[0];
+};
+
+struct scsi_vpd_port_designation_cont
+{
+	uint8_t reserved[2];
+	uint8_t target_port_descriptors_length[2];
+	struct scsi_vpd_id_descriptor target_port_descriptors[0];
+};
+
+struct scsi_vpd_scsi_ports
+{
+	u_int8_t device;
+	u_int8_t page_code;
+#define	SVPD_SCSI_PORTS		0x88
+	u_int8_t page_length[2];
+	struct scsi_vpd_port_designation design[];
+};
+
 /*
  * ATA Information VPD Page based on
  * T10/2126-D Revision 04
@@ -1435,6 +2590,148 @@
  */
 #define SVPD_ATA_INFORMATION		0x89
 
+
+struct scsi_vpd_tpc_descriptor
+{
+	uint8_t desc_type[2];
+	uint8_t desc_length[2];
+	uint8_t parameters[];
+};
+
+struct scsi_vpd_tpc_descriptor_bdrl
+{
+	uint8_t desc_type[2];
+#define	SVPD_TPC_BDRL			0x0000
+	uint8_t desc_length[2];
+	uint8_t vendor_specific[6];
+	uint8_t maximum_ranges[2];
+	uint8_t maximum_inactivity_timeout[4];
+	uint8_t default_inactivity_timeout[4];
+	uint8_t maximum_token_transfer_size[8];
+	uint8_t optimal_transfer_count[8];
+};
+
+struct scsi_vpd_tpc_descriptor_sc_descr
+{
+	uint8_t opcode;
+	uint8_t sa_length;
+	uint8_t supported_service_actions[0];
+};
+
+struct scsi_vpd_tpc_descriptor_sc
+{
+	uint8_t desc_type[2];
+#define	SVPD_TPC_SC			0x0001
+	uint8_t desc_length[2];
+	uint8_t list_length;
+	struct scsi_vpd_tpc_descriptor_sc_descr descr[];
+};
+
+struct scsi_vpd_tpc_descriptor_pd
+{
+	uint8_t desc_type[2];
+#define	SVPD_TPC_PD			0x0004
+	uint8_t desc_length[2];
+	uint8_t reserved[4];
+	uint8_t maximum_cscd_descriptor_count[2];
+	uint8_t maximum_segment_descriptor_count[2];
+	uint8_t maximum_descriptor_list_length[4];
+	uint8_t maximum_inline_data_length[4];
+	uint8_t reserved2[12];
+};
+
+struct scsi_vpd_tpc_descriptor_sd
+{
+	uint8_t desc_type[2];
+#define	SVPD_TPC_SD			0x0008
+	uint8_t desc_length[2];
+	uint8_t list_length;
+	uint8_t supported_descriptor_codes[];
+};
+
+struct scsi_vpd_tpc_descriptor_sdid
+{
+	uint8_t desc_type[2];
+#define	SVPD_TPC_SDID			0x000C
+	uint8_t desc_length[2];
+	uint8_t list_length[2];
+	uint8_t supported_descriptor_ids[];
+};
+
+struct scsi_vpd_tpc_descriptor_rtf_block
+{
+	uint8_t type_format;
+#define	SVPD_TPC_RTF_BLOCK			0x00
+	uint8_t reserved;
+	uint8_t desc_length[2];
+	uint8_t reserved2[2];
+	uint8_t optimal_length_granularity[2];
+	uint8_t maximum_bytes[8];
+	uint8_t optimal_bytes[8];
+	uint8_t optimal_bytes_to_token_per_segment[8];
+	uint8_t optimal_bytes_from_token_per_segment[8];
+	uint8_t reserved3[8];
+};
+
+struct scsi_vpd_tpc_descriptor_rtf
+{
+	uint8_t desc_type[2];
+#define	SVPD_TPC_RTF			0x0106
+	uint8_t desc_length[2];
+	uint8_t remote_tokens;
+	uint8_t reserved[11];
+	uint8_t minimum_token_lifetime[4];
+	uint8_t maximum_token_lifetime[4];
+	uint8_t maximum_token_inactivity_timeout[4];
+	uint8_t reserved2[18];
+	uint8_t type_specific_features_length[2];
+	uint8_t type_specific_features[0];
+};
+
+struct scsi_vpd_tpc_descriptor_srtd
+{
+	uint8_t rod_type[4];
+	uint8_t flags;
+#define	SVPD_TPC_SRTD_TOUT		0x01
+#define	SVPD_TPC_SRTD_TIN		0x02
+#define	SVPD_TPC_SRTD_ECPY		0x80
+	uint8_t reserved;
+	uint8_t preference_indicator[2];
+	uint8_t reserved2[56];
+};
+
+struct scsi_vpd_tpc_descriptor_srt
+{
+	uint8_t desc_type[2];
+#define	SVPD_TPC_SRT			0x0108
+	uint8_t desc_length[2];
+	uint8_t reserved[2];
+	uint8_t rod_type_descriptors_length[2];
+	uint8_t rod_type_descriptors[0];
+};
+
+struct scsi_vpd_tpc_descriptor_gco
+{
+	uint8_t desc_type[2];
+#define	SVPD_TPC_GCO			0x8001
+	uint8_t desc_length[2];
+	uint8_t total_concurrent_copies[4];
+	uint8_t maximum_identified_concurrent_copies[4];
+	uint8_t maximum_segment_length[4];
+	uint8_t data_segment_granularity;
+	uint8_t inline_data_granularity;
+	uint8_t reserved[18];
+};
+
+struct scsi_vpd_tpc
+{
+	uint8_t device;
+	uint8_t page_code;
+#define	SVPD_SCSI_TPC			0x8F
+	uint8_t page_length[2];
+	struct scsi_vpd_tpc_descriptor descr[];
+};
+
 /*
  * Block Device Characteristics VPD Page based on
  * T10/1799-D Revision 31
@@ -1447,7 +2744,7 @@
 	u_int8_t page_length[2];
 	u_int8_t medium_rotation_rate[2];
 #define SVPD_BDC_RATE_NOT_REPORTED	0x00
-#define SVPD_BDC_RATE_NONE_ROTATING	0x01
+#define SVPD_BDC_RATE_NON_ROTATING	0x01
 	u_int8_t reserved1;
 	u_int8_t nominal_form_factor;
 #define SVPD_BDC_FORM_NOT_REPORTED	0x00
@@ -1460,6 +2757,27 @@
 };
 
 /*
+ * Block Device Characteristics VPD Page
+ */
+struct scsi_vpd_block_device_characteristics
+{
+	uint8_t device;
+	uint8_t page_code;
+#define	SVPD_BDC		0xB1
+	uint8_t page_length[2];
+	uint8_t medium_rotation_rate[2];
+#define	SVPD_NOT_REPORTED	0x0000
+#define	SVPD_NON_ROTATING	0x0001
+	uint8_t product_type;
+	uint8_t wab_wac_ff;
+	uint8_t flags;
+#define	SVPD_VBULS		0x01
+#define	SVPD_FUAB		0x02
+#define	SVPD_HAW_ZBC		0x10
+	uint8_t reserved[55];
+};
+
+/*
  * Logical Block Provisioning VPD Page based on
  * T10/1799-D Revision 31
  */
@@ -1489,8 +2807,7 @@
 };
 
 /*
- * Block Limits VDP Page based on
- * T10/1799-D Revision 31
+ * Block Limits VDP Page based on SBC-4 Revision 2
  */
 struct scsi_vpd_block_limits
 {
@@ -1511,7 +2828,11 @@
 	u_int8_t opt_unmap_grain[4];
 	u_int8_t unmap_grain_align[4];
 	u_int8_t max_write_same_length[8];
-	u_int8_t reserved2[20];
+	u_int8_t max_atomic_transfer_length[4];
+	u_int8_t atomic_alignment[4];
+	u_int8_t atomic_transfer_length_granularity[4];
+	u_int8_t max_atomic_transfer_length_with_atomic_boundary[4];
+	u_int8_t max_atomic_boundary_size[4];
 };
 
 struct scsi_read_capacity
@@ -1570,8 +2891,35 @@
 #define	SRC16_LBPRZ_A		0x4000
 #define	SRC16_LBPME_A		0x8000
 	uint8_t lalba_lbp[2];
+	uint8_t	reserved[16];
 };
 
+struct scsi_get_lba_status
+{
+	uint8_t opcode;
+#define	SGLS_SERVICE_ACTION	0x12
+	uint8_t service_action;
+	uint8_t addr[8];
+	uint8_t alloc_len[4];
+	uint8_t reserved;
+	uint8_t control;
+};
+
+struct scsi_get_lba_status_data_descr
+{
+	uint8_t addr[8];
+	uint8_t length[4];
+	uint8_t status;
+	uint8_t reserved[3];
+};
+
+struct scsi_get_lba_status_data
+{
+	uint8_t length[4];
+	uint8_t reserved[4];
+	struct scsi_get_lba_status_data_descr descr[];
+};
+
 struct scsi_report_luns
 {
 	uint8_t opcode;
@@ -1579,6 +2927,9 @@
 #define	RPL_REPORT_DEFAULT	0x00
 #define	RPL_REPORT_WELLKNOWN	0x01
 #define	RPL_REPORT_ALL		0x02
+#define	RPL_REPORT_ADMIN	0x10
+#define	RPL_REPORT_NONSUBSID	0x11
+#define	RPL_REPORT_CONGLOM	0x12
 	uint8_t select_report;
 	uint8_t reserved2[3];
 	uint8_t length[4];
@@ -1618,8 +2969,9 @@
 {
 	uint8_t opcode;
 	uint8_t service_action;
+#define	STG_PDF_MASK		0xe0
 #define	STG_PDF_LENGTH		0x00
-#define	RPL_PDF_EXTENDED	0x20
+#define	STG_PDF_EXTENDED	0x20
 	uint8_t reserved1[4];
 	uint8_t length[4];
 	uint8_t reserved2;
@@ -1669,13 +3021,48 @@
 
 struct scsi_target_group_data_extended {
 	uint8_t length[4];	/* length of returned data, in bytes */
-	uint8_t format_type;	/* STG_PDF_LENGTH or RPL_PDF_EXTENDED */
+	uint8_t format_type;	/* STG_PDF_LENGTH or STG_PDF_EXTENDED */
 	uint8_t	implicit_transition_time;
 	uint8_t reserved[2];
 	struct scsi_target_port_group_descriptor groups[];
 };
 
+struct scsi_security_protocol_in
+{
+	uint8_t opcode;
+	uint8_t security_protocol;
+#define	SPI_PROT_INFORMATION		0x00
+#define	SPI_PROT_CBCS			0x07
+#define	SPI_PROT_TAPE_DATA_ENC		0x20
+#define	SPI_PROT_DATA_ENC_CONFIG	0x21
+#define	SPI_PROT_SA_CREATE_CAP		0x40
+#define	SPI_PROT_IKEV2_SCSI		0x41
+#define	SPI_PROT_JEDEC_UFS		0xEC
+#define	SPI_PROT_SDCARD_TFSSS		0xED
+#define	SPI_PROT_AUTH_HOST_TRANSIENT	0xEE
+#define	SPI_PROT_ATA_DEVICE_PASSWORD	0xEF
+	uint8_t security_protocol_specific[2];
+	uint8_t byte4;
+#define	SPI_INC_512	0x80
+	uint8_t reserved1;
+	uint8_t length[4];
+	uint8_t reserved2;
+	uint8_t control;
+};
 
+struct scsi_security_protocol_out
+{
+	uint8_t opcode;
+	uint8_t security_protocol;
+	uint8_t security_protocol_specific[2];
+	uint8_t byte4;
+#define	SPO_INC_512	0x80
+	uint8_t reserved1;
+	uint8_t length[4];
+	uint8_t reserved2;
+	uint8_t control;
+};
+
 typedef enum {
 	SSD_TYPE_NONE,
 	SSD_TYPE_FIXED,
@@ -1738,11 +3125,12 @@
 #define		SSD_KEY_BLANK_CHECK	0x08
 #define		SSD_KEY_Vendor_Specific	0x09
 #define		SSD_KEY_COPY_ABORTED	0x0a
-#define		SSD_KEY_ABORTED_COMMAND	0x0b		
+#define		SSD_KEY_ABORTED_COMMAND	0x0b
 #define		SSD_KEY_EQUAL		0x0c
 #define		SSD_KEY_VOLUME_OVERFLOW	0x0d
 #define		SSD_KEY_MISCOMPARE	0x0e
-#define		SSD_KEY_COMPLETED	0x0f			
+#define		SSD_KEY_COMPLETED	0x0f
+#define	SSD_SDAT_OVFL	0x10
 #define	SSD_ILI		0x20
 #define	SSD_EOM		0x40
 #define	SSD_FILEMARK	0x80
@@ -1780,7 +3168,9 @@
 	uint8_t sense_key;
 	uint8_t	add_sense_code;
 	uint8_t	add_sense_code_qual;
-	uint8_t	reserved[3];
+	uint8_t	flags;
+#define	SSDD_SDAT_OVFL		0x80
+	uint8_t	reserved[2];
 	/*
 	 * Note that SPC-4, section 4.5.2.1 says that the extra_len field
 	 * must be less than or equal to 244.
@@ -1820,7 +3210,7 @@
 
 /*
  * Command-specific information depends on the command for which the
- * reported condition occured.
+ * reported condition occurred.
  *
  * Note that any changes to the field names or positions in this structure,
  * even reserved fields, should be accompanied by an examination of the
@@ -2019,6 +3409,29 @@
 };
 
 /*
+ * ATA Return descriptor, used for the SCSI ATA PASS-THROUGH(12), (16) and
+ * (32) commands.  Described in SAT-4r05.
+ */
+struct scsi_sense_ata_ret_desc
+{
+	uint8_t desc_type;
+#define	SSD_DESC_ATA		0x09
+	uint8_t length;
+	uint8_t flags;
+#define	SSD_DESC_ATA_FLAG_EXTEND	0x01
+	uint8_t error;
+	uint8_t count_15_8;
+	uint8_t count_7_0;
+	uint8_t lba_31_24;
+	uint8_t lba_7_0;
+	uint8_t lba_39_32;
+	uint8_t lba_15_8;
+	uint8_t lba_47_40;
+	uint8_t lba_23_16;
+	uint8_t device;
+	uint8_t status;
+};
+/*
  * Used with Sense keys No Sense (0x00) and Not Ready (0x02).
  *
  * Maximum descriptors allowed: 32 (as of SPC-4)
@@ -2051,11 +3464,13 @@
 #define	SSD_FORWARDED_SDS_UNK	0x00
 #define	SSD_FORWARDED_SDS_EXSRC	0x01
 #define	SSD_FORWARDED_SDS_EXDST	0x02
+	uint8_t	status;
+	uint8_t	sense_data[];
 };
 
 /*
  * Vendor-specific sense descriptor.  The desc_type field will be in the
- * range bewteen MIN and MAX inclusive.
+ * range between MIN and MAX inclusive.
  */
 struct scsi_sense_vendor
 {
@@ -2183,6 +3598,22 @@
 	SSS_FLAG_PRINT_COMMAND	= 0x01
 } scsi_sense_string_flags;
 
+struct scsi_nv {
+	const char *name;
+	uint64_t value;
+};
+
+typedef enum {
+	SCSI_NV_FOUND,
+	SCSI_NV_AMBIGUOUS,
+	SCSI_NV_NOT_FOUND
+} scsi_nv_status;
+
+typedef enum {
+	SCSI_NV_FLAG_NONE	= 0x00,
+	SCSI_NV_FLAG_IG_CASE	= 0x01	/* Case insensitive comparison */
+} scsi_nv_flags;
+
 struct ccb_scsiio;
 struct cam_periph;
 union  ccb;
@@ -2192,8 +3623,6 @@
 
 extern const char *scsi_sense_key_text[];
 
-struct sbuf;
-
 __BEGIN_DECLS
 void scsi_sense_desc(int sense_key, int asc, int ascq,
 		     struct scsi_inquiry_data *inq_data,
@@ -2209,13 +3638,15 @@
 					void *), void *arg);
 uint8_t *scsi_find_desc(struct scsi_sense_data_desc *sense, u_int sense_len,
 			uint8_t desc_type);
-void scsi_set_sense_data(struct scsi_sense_data *sense_data, 
+void scsi_set_sense_data(struct scsi_sense_data *sense_data,
 			 scsi_sense_data_type sense_format, int current_error,
 			 int sense_key, int asc, int ascq, ...) ;
+void scsi_set_sense_data_len(struct scsi_sense_data *sense_data,
+    u_int *sense_len, scsi_sense_data_type sense_format, int current_error,
+    int sense_key, int asc, int ascq, ...) ;
 void scsi_set_sense_data_va(struct scsi_sense_data *sense_data,
-			    scsi_sense_data_type sense_format,
-			    int current_error, int sense_key, int asc,
-			    int ascq, va_list ap);
+    u_int *sense_len, scsi_sense_data_type sense_format,
+    int current_error, int sense_key, int asc, int ascq, va_list ap);
 int scsi_get_sense_info(struct scsi_sense_data *sense_data, u_int sense_len,
 			uint8_t info_type, uint64_t *info,
 			int64_t *signed_info);
@@ -2265,6 +3696,14 @@
 			      u_int sense_len, uint8_t *cdb, int cdb_len,
 			      struct scsi_inquiry_data *inq_data,
 			      struct scsi_sense_desc_header *header);
+void scsi_sense_ata_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
+			 u_int sense_len, uint8_t *cdb, int cdb_len,
+			 struct scsi_inquiry_data *inq_data,
+			 struct scsi_sense_desc_header *header);
+void scsi_sense_forwarded_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
+			      u_int sense_len, uint8_t *cdb, int cdb_len,
+			      struct scsi_inquiry_data *inq_data,
+			      struct scsi_sense_desc_header *header);
 void scsi_sense_generic_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
 			     u_int sense_len, uint8_t *cdb, int cdb_len,
 			     struct scsi_inquiry_data *inq_data,
@@ -2306,8 +3745,10 @@
 			     struct scsi_inquiry_data *inq_data);
 char *		scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string,
 				size_t len);
+void		scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb);
 
 void		scsi_print_inquiry(struct scsi_inquiry_data *inq_data);
+void		scsi_print_inquiry_short(struct scsi_inquiry_data *inq_data);
 
 u_int		scsi_calc_syncsrate(u_int period_factor);
 u_int		scsi_calc_syncparam(u_int period);
@@ -2315,9 +3756,135 @@
 typedef int	(*scsi_devid_checkfn_t)(uint8_t *);
 int		scsi_devid_is_naa_ieee_reg(uint8_t *bufp);
 int		scsi_devid_is_sas_target(uint8_t *bufp);
-uint8_t *	scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t len,
+int		scsi_devid_is_lun_eui64(uint8_t *bufp);
+int		scsi_devid_is_lun_naa(uint8_t *bufp);
+int		scsi_devid_is_lun_name(uint8_t *bufp);
+int		scsi_devid_is_lun_t10(uint8_t *bufp);
+int		scsi_devid_is_lun_md5(uint8_t *bufp);
+int		scsi_devid_is_lun_uuid(uint8_t *bufp);
+int		scsi_devid_is_port_naa(uint8_t *bufp);
+struct scsi_vpd_id_descriptor *
+		scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t len,
 			       scsi_devid_checkfn_t ck_fn);
+struct scsi_vpd_id_descriptor *
+		scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len,
+			       scsi_devid_checkfn_t ck_fn);
 
+int		scsi_transportid_sbuf(struct sbuf *sb,
+				      struct scsi_transportid_header *hdr,
+				      uint32_t valid_len);
+
+const char *	scsi_nv_to_str(struct scsi_nv *table, int num_table_entries,
+			       uint64_t value);
+
+scsi_nv_status	scsi_get_nv(struct scsi_nv *table, int num_table_entries,
+			    char *name, int *table_entry, scsi_nv_flags flags);
+
+int	scsi_parse_transportid_64bit(int proto_id, char *id_str,
+				     struct scsi_transportid_header **hdr,
+				     unsigned int *alloc_len,
+#ifdef _KERNEL
+				     struct malloc_type *type, int flags,
+#endif
+				     char *error_str, int error_str_len);
+
+int	scsi_parse_transportid_spi(char *id_str,
+				   struct scsi_transportid_header **hdr,
+				   unsigned int *alloc_len,
+#ifdef _KERNEL
+				   struct malloc_type *type, int flags,
+#endif
+				   char *error_str, int error_str_len);
+
+int	scsi_parse_transportid_rdma(char *id_str,
+				    struct scsi_transportid_header **hdr,
+				    unsigned int *alloc_len,
+#ifdef _KERNEL
+				    struct malloc_type *type, int flags,
+#endif
+				    char *error_str, int error_str_len);
+
+int	scsi_parse_transportid_iscsi(char *id_str,
+				     struct scsi_transportid_header **hdr,
+				     unsigned int *alloc_len,
+#ifdef _KERNEL
+				     struct malloc_type *type, int flags,
+#endif
+				     char *error_str,int error_str_len);
+
+int	scsi_parse_transportid_sop(char *id_str,
+				   struct scsi_transportid_header **hdr,
+				   unsigned int *alloc_len,
+#ifdef _KERNEL
+				   struct malloc_type *type, int flags,
+#endif
+				   char *error_str,int error_str_len);
+
+int	scsi_parse_transportid(char *transportid_str,
+			       struct scsi_transportid_header **hdr,
+			       unsigned int *alloc_len,
+#ifdef _KERNEL
+			       struct malloc_type *type, int flags,
+#endif
+			       char *error_str, int error_str_len);
+
+
+int scsi_attrib_volcoh_sbuf(struct sbuf *sb,
+			    struct scsi_mam_attribute_header *hdr,
+			    uint32_t valid_len, uint32_t flags,
+			    uint32_t output_flags, char *error_str,
+			    int error_str_len);
+
+int scsi_attrib_vendser_sbuf(struct sbuf *sb,
+			     struct scsi_mam_attribute_header *hdr,
+			     uint32_t valid_len, uint32_t flags,
+			     uint32_t output_flags, char *error_str,
+			     int error_str_len);
+
+int scsi_attrib_hexdump_sbuf(struct sbuf *sb,
+			     struct scsi_mam_attribute_header *hdr,
+			     uint32_t valid_len, uint32_t flags,
+			     uint32_t output_flags, char *error_str,
+			     int error_str_len);
+
+int scsi_attrib_int_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+			 uint32_t valid_len, uint32_t flags,
+			 uint32_t output_flags, char *error_str,
+			 int error_str_len);
+
+int scsi_attrib_ascii_sbuf(struct sbuf *sb,
+			   struct scsi_mam_attribute_header *hdr,
+			   uint32_t valid_len, uint32_t flags,
+			   uint32_t output_flags, char *error_str,
+			   int error_str_len);
+
+int scsi_attrib_text_sbuf(struct sbuf *sb,
+			  struct scsi_mam_attribute_header *hdr,
+			  uint32_t valid_len, uint32_t flags,
+			  uint32_t output_flags, char *error_str,
+			  int error_str_len);
+
+struct scsi_attrib_table_entry *scsi_find_attrib_entry(
+			struct scsi_attrib_table_entry *table,
+			size_t num_table_entries, uint32_t id);
+
+struct scsi_attrib_table_entry *scsi_get_attrib_entry(uint32_t id);
+
+int scsi_attrib_value_sbuf(struct sbuf *sb, uint32_t valid_len,
+			   struct scsi_mam_attribute_header *hdr,
+			   uint32_t output_flags, char *error_str,
+			   size_t error_str_len);
+
+void scsi_attrib_prefix_sbuf(struct sbuf *sb, uint32_t output_flags,
+			     struct scsi_mam_attribute_header *hdr,
+			     uint32_t valid_len, const char *desc);
+
+int scsi_attrib_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr,
+		     uint32_t valid_len,
+		     struct scsi_attrib_table_entry *user_table,
+		     size_t num_user_entries, int prefer_user_table,
+		     uint32_t output_flags, char *error_str, int error_str_len);
+
 void		scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
 				     void (*cbfcnp)(struct cam_periph *, 
 						    union ccb *),
@@ -2338,22 +3905,25 @@
 			     u_int8_t sense_len, u_int32_t timeout);
 
 void		scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
-				void (*cbfcnp)(struct cam_periph *,
-					       union ccb *),
-				u_int8_t tag_action, int dbd,
-				u_int8_t page_code, u_int8_t page,
-				u_int8_t *param_buf, u_int32_t param_len,
-				u_int8_t sense_len, u_int32_t timeout);
+		    void (*cbfcnp)(struct cam_periph *, union ccb *),
+		    uint8_t tag_action, int dbd, uint8_t pc, uint8_t page,
+		    uint8_t *param_buf, uint32_t param_len,
+		    uint8_t sense_len, uint32_t timeout);
 
 void		scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
-				    void (*cbfcnp)(struct cam_periph *,
-						   union ccb *),
-				    u_int8_t tag_action, int dbd,
-				    u_int8_t page_code, u_int8_t page,
-				    u_int8_t *param_buf, u_int32_t param_len,
-				    int minimum_cmd_size, u_int8_t sense_len,
-				    u_int32_t timeout);
+		    void (*cbfcnp)(struct cam_periph *, union ccb *),
+		    uint8_t tag_action, int dbd, uint8_t pc, uint8_t page,
+		    uint8_t *param_buf, uint32_t param_len,
+		    int minimum_cmd_size, uint8_t sense_len, uint32_t timeout);
 
+void		scsi_mode_sense_subpage(struct ccb_scsiio *csio,
+		    uint32_t retries,
+		    void (*cbfcnp)(struct cam_periph *, union ccb *),
+		    uint8_t tag_action, int dbd, uint8_t pc,
+		    uint8_t page, uint8_t subpage,
+		    uint8_t *param_buf, uint32_t param_len,
+		    int minimum_cmd_size, uint8_t sense_len, uint32_t timeout);
+
 void		scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries,
 				 void (*cbfcnp)(struct cam_periph *,
 						union ccb *),
@@ -2400,9 +3970,8 @@
 				      void (*cbfcnp)(struct cam_periph *,
 				      union ccb *), uint8_t tag_action,
 				      uint64_t lba, int reladr, int pmi,
-				      struct scsi_read_capacity_data_long
-				      *rcap_buf, uint8_t sense_len,
-				      uint32_t timeout);
+				      uint8_t *rcap_buf, int rcap_buf_len,
+				      uint8_t sense_len, uint32_t timeout);
 
 void		scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries,
 				 void (*cbfcnp)(struct cam_periph *, 
@@ -2463,6 +4032,10 @@
 			uint8_t *data_ptr, uint32_t param_list_length,
 			uint8_t sense_len, uint32_t timeout);
 
+#define	SCSI_RW_READ	0x0001
+#define	SCSI_RW_WRITE	0x0002
+#define	SCSI_RW_DIRMASK	0x0003
+#define	SCSI_RW_BIO	0x1000
 void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries,
 		     void (*cbfcnp)(struct cam_periph *, union ccb *),
 		     u_int8_t tag_action, int readop, u_int8_t byte2, 
@@ -2509,7 +4082,55 @@
 		     void (*cbfcnp)(struct cam_periph *, union ccb *),
 		     u_int8_t tag_action, int start, int load_eject,
 		     int immediate, u_int8_t sense_len, u_int32_t timeout);
+void scsi_read_attribute(struct ccb_scsiio *csio, u_int32_t retries, 
+			 void (*cbfcnp)(struct cam_periph *, union ccb *),
+			 u_int8_t tag_action, u_int8_t service_action,
+			 uint32_t element, u_int8_t elem_type,
+			 int logical_volume, int partition,
+			 u_int32_t first_attribute, int cache, u_int8_t *data_ptr,
+			 u_int32_t length, int sense_len, u_int32_t timeout);
+void scsi_write_attribute(struct ccb_scsiio *csio, u_int32_t retries, 
+			  void (*cbfcnp)(struct cam_periph *, union ccb *),
+			  u_int8_t tag_action, uint32_t element,
+			  int logical_volume, int partition, int wtc, u_int8_t *data_ptr,
+			  u_int32_t length, int sense_len, u_int32_t timeout);
 
+void scsi_security_protocol_in(struct ccb_scsiio *csio, uint32_t retries, 
+			       void (*cbfcnp)(struct cam_periph *, union ccb *),
+			       uint8_t tag_action, uint32_t security_protocol,
+			       uint32_t security_protocol_specific, int byte4,
+			       uint8_t *data_ptr, uint32_t dxfer_len,
+			       int sense_len, int timeout);
+
+void scsi_security_protocol_out(struct ccb_scsiio *csio, uint32_t retries, 
+				void (*cbfcnp)(struct cam_periph *,union ccb *),
+				uint8_t tag_action, uint32_t security_protocol,
+				uint32_t security_protocol_specific, int byte4,
+				uint8_t *data_ptr, uint32_t dxfer_len,
+				int sense_len, int timeout);
+
+void scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries, 
+				void (*cbfcnp)(struct cam_periph *,union ccb *),
+				uint8_t tag_action, int service_action,
+				uint8_t *data_ptr, uint32_t dxfer_len,
+				int sense_len, int timeout);
+
+void scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries, 
+				 void (*cbfcnp)(struct cam_periph *,
+				       union ccb *),
+				 uint8_t tag_action, int service_action,
+				 int scope, int res_type, uint8_t *data_ptr,
+				 uint32_t dxfer_len, int sense_len,
+				 int timeout);
+
+void scsi_report_supported_opcodes(struct ccb_scsiio *csio, uint32_t retries, 
+				   void (*cbfcnp)(struct cam_periph *,
+						  union ccb *),
+				   uint8_t tag_action, int options,
+				   int req_opcode, int req_service_action,
+				   uint8_t *data_ptr, uint32_t dxfer_len,
+				   int sense_len, int timeout);
+
 int		scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry);
 int		scsi_static_inquiry_match(caddr_t inqbuffer,
 					  caddr_t table_entry);

Modified: trunk/sys/cam/scsi/scsi_cd.c
===================================================================
--- trunk/sys/cam/scsi/scsi_cd.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_cd.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 1997 Justin T. Gibbs.
  * Copyright (c) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Kenneth D. Merry.
@@ -46,7 +47,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_cd.c 291497 2015-11-30 21:23:30Z mav $");
 
 #include "opt_cd.h"
 
@@ -87,9 +88,8 @@
 	CD_Q_NONE		= 0x00,
 	CD_Q_NO_TOUCH		= 0x01,
 	CD_Q_BCD_TRACKS		= 0x02,
-	CD_Q_NO_CHANGER		= 0x04,
-	CD_Q_CHANGER		= 0x08,
-	CD_Q_10_BYTE_ONLY	= 0x10
+	CD_Q_10_BYTE_ONLY	= 0x10,
+	CD_Q_RETRY_BUSY		= 0x40
 } cd_quirks;
 
 #define CD_Q_BIT_STRING		\
@@ -96,9 +96,8 @@
 	"\020"			\
 	"\001NO_TOUCH"		\
 	"\002BCD_TRACKS"	\
-	"\003NO_CHANGER"	\
-	"\004CHANGER"		\
-	"\00510_BYTE_ONLY"
+	"\00510_BYTE_ONLY"	\
+	"\007RETRY_BUSY"
 
 typedef enum {
 	CD_FLAG_INVALID		= 0x0001,
@@ -106,7 +105,6 @@
 	CD_FLAG_DISC_LOCKED	= 0x0004,
 	CD_FLAG_DISC_REMOVABLE	= 0x0008,
 	CD_FLAG_SAW_MEDIA	= 0x0010,
-	CD_FLAG_CHANGER		= 0x0040,
 	CD_FLAG_ACTIVE		= 0x0080,
 	CD_FLAG_SCHED_ON_COMP	= 0x0100,
 	CD_FLAG_RETRY_UA	= 0x0200,
@@ -118,19 +116,11 @@
 typedef enum {
 	CD_CCB_PROBE		= 0x01,
 	CD_CCB_BUFFER_IO	= 0x02,
-	CD_CCB_WAITING		= 0x03,
 	CD_CCB_TUR		= 0x04,
 	CD_CCB_TYPE_MASK	= 0x0F,
 	CD_CCB_RETRY_UA		= 0x10
 } cd_ccb_state;
 
-typedef enum {
-	CHANGER_TIMEOUT_SCHED		= 0x01,
-	CHANGER_SHORT_TMOUT_SCHED	= 0x02,
-	CHANGER_MANUAL_CALL		= 0x04,
-	CHANGER_NEED_TIMEOUT		= 0x08
-} cd_changer_flags;
-
 #define ccb_state ppriv_field0
 #define ccb_bp ppriv_ptr1
 
@@ -158,9 +148,6 @@
 	struct cd_params	params;
 	union ccb		saved_ccb;
 	cd_quirks		quirks;
-	STAILQ_ENTRY(cd_softc)	changer_links;
-	struct cdchanger	*changer;
-	int			bufs_left;
 	struct cam_periph	*periph;
 	int			minimum_command_size;
 	int			outstanding_cmds;
@@ -190,13 +177,6 @@
 };
 
 /*
- * The changer quirk entries aren't strictly necessary.  Basically, what
- * they do is tell cdregister() up front that a device is a changer.
- * Otherwise, it will figure that fact out once it sees a LUN on the device
- * that is greater than 0.  If it is known up front that a device is a changer,
- * all I/O to the device will go through the changer scheduling routines, as
- * opposed to the "normal" CD code.
- *
  * NOTE ON 10_BYTE_ONLY quirks:  Any 10_BYTE_ONLY quirks MUST be because
  * your device hangs when it gets a 10 byte command.  Adding a quirk just
  * to get rid of the informative diagnostic message is not acceptable.  All
@@ -210,20 +190,16 @@
 static struct cd_quirk_entry cd_quirk_table[] =
 {
 	{
-		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NRC", "MBR-7", "*"},
-		 /*quirks*/ CD_Q_CHANGER
+		{ T_CDROM, SIP_MEDIA_REMOVABLE, "CHINON", "CD-ROM CDS-535","*"},
+		/* quirks */ CD_Q_BCD_TRACKS
 	},
 	{
-		{ T_CDROM, SIP_MEDIA_REMOVABLE, "PIONEER", "CD-ROM DRM*",
-		  "*"}, /* quirks */ CD_Q_CHANGER
-	},
-	{
-		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NAKAMICH", "MJ-*", "*"},
-		 /* quirks */ CD_Q_CHANGER
-	},
-	{
-		{ T_CDROM, SIP_MEDIA_REMOVABLE, "CHINON", "CD-ROM CDS-535","*"},
-		/* quirks */ CD_Q_BCD_TRACKS
+		/*
+		 * VMware returns BUSY status when storage has transient
+		 * connectivity problems, so better wait.
+		 */
+		{T_CDROM, SIP_MEDIA_REMOVABLE, "NECVMWar", "VMware IDE CDR10", "*"},
+		/*quirks*/ CD_Q_RETRY_BUSY
 	}
 };
 
@@ -240,17 +216,11 @@
 static	void		cdasync(void *callback_arg, u_int32_t code,
 				struct cam_path *path, void *arg);
 static	int		cdcmdsizesysctl(SYSCTL_HANDLER_ARGS);
-static	void		cdshorttimeout(void *arg);
-static	void		cdschedule(struct cam_periph *periph, int priority);
-static	void		cdrunchangerqueue(void *arg);
-static	void		cdchangerschedule(struct cd_softc *softc);
 static	int		cdrunccb(union ccb *ccb,
 				 int (*error_routine)(union ccb *ccb,
 						      u_int32_t cam_flags,
 						      u_int32_t sense_flags),
 				 u_int32_t cam_flags, u_int32_t sense_flags);
-static	union ccb 	*cdgetccb(struct cam_periph *periph,
-				  u_int32_t priority);
 static	void		cddone(struct cam_periph *periph,
 			       union ccb *start_ccb);
 static	union cd_pages	*cdgetpage(struct cd_mode_params *mode_params);
@@ -312,22 +282,12 @@
 #ifndef	CD_DEFAULT_TIMEOUT
 #define	CD_DEFAULT_TIMEOUT	30000
 #endif
-#ifndef CHANGER_MIN_BUSY_SECONDS
-#define CHANGER_MIN_BUSY_SECONDS	5
-#endif
-#ifndef CHANGER_MAX_BUSY_SECONDS
-#define CHANGER_MAX_BUSY_SECONDS	15
-#endif
 
 static int cd_poll_period = CD_DEFAULT_POLL_PERIOD;
 static int cd_retry_count = CD_DEFAULT_RETRY;
 static int cd_timeout = CD_DEFAULT_TIMEOUT;
-static int changer_min_busy_seconds = CHANGER_MIN_BUSY_SECONDS;
-static int changer_max_busy_seconds = CHANGER_MAX_BUSY_SECONDS;
 
 static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver");
-static SYSCTL_NODE(_kern_cam_cd, OID_AUTO, changer, CTLFLAG_RD, 0,
-    "CD Changer");
 SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RW,
            &cd_poll_period, 0, "Media polling period in seconds");
 TUNABLE_INT("kern.cam.cd.poll_period", &cd_poll_period);
@@ -337,31 +297,7 @@
 SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RW,
 	   &cd_timeout, 0, "Timeout, in us, for read operations");
 TUNABLE_INT("kern.cam.cd.timeout", &cd_timeout);
-SYSCTL_INT(_kern_cam_cd_changer, OID_AUTO, min_busy_seconds, CTLFLAG_RW,
-	   &changer_min_busy_seconds, 0, "Minimum changer scheduling quantum");
-TUNABLE_INT("kern.cam.cd.changer.min_busy_seconds", &changer_min_busy_seconds);
-SYSCTL_INT(_kern_cam_cd_changer, OID_AUTO, max_busy_seconds, CTLFLAG_RW,
-	   &changer_max_busy_seconds, 0, "Maximum changer scheduling quantum");
-TUNABLE_INT("kern.cam.cd.changer.max_busy_seconds", &changer_max_busy_seconds);
 
-struct cdchanger {
-	path_id_t			 path_id;
-	target_id_t			 target_id;
-	int				 num_devices;
-	struct camq			 devq;
-	struct timeval			 start_time;
-	struct cd_softc			 *cur_device;
-	struct callout			 short_handle;
-	struct callout			 long_handle;
-	volatile cd_changer_flags	 flags;
-	STAILQ_ENTRY(cdchanger)		 changer_links;
-	STAILQ_HEAD(chdevlist, cd_softc) chluns;
-};
-
-static struct mtx changerq_mtx;
-static STAILQ_HEAD(changerlist, cdchanger) changerq;
-static int num_changers;
-
 static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers");
 
 static void
@@ -369,9 +305,6 @@
 {
 	cam_status status;
 
-	mtx_init(&changerq_mtx, "cdchangerq", "SCSI CD Changer List", MTX_DEF);
-	STAILQ_INIT(&changerq);
-
 	/*
 	 * Install a global async callback.  This callback will
 	 * receive async callbacks like "new device found".
@@ -418,17 +351,7 @@
 	 */
 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
 
-	/*
-	 * If this device is part of a changer, and it was scheduled
-	 * to run, remove it from the run queue since we just nuked
-	 * all of its scheduled I/O.
-	 */
-	if ((softc->flags & CD_FLAG_CHANGER)
-	 && (softc->pinfo.index != CAM_UNQUEUED_INDEX))
-		camq_remove(&softc->changer->devq, softc->pinfo.index);
-
 	disk_gone(softc->disk);
-	xpt_print(periph->path, "lost device, %d refs\n", periph->refcount);
 }
 
 static void
@@ -438,75 +361,6 @@
 
 	softc = (struct cd_softc *)periph->softc;
 
-	xpt_print(periph->path, "removing device entry\n");
-
-	/*
-	 * In the queued, non-active case, the device in question
-	 * has already been removed from the changer run queue.  Since this
-	 * device is active, we need to de-activate it, and schedule
-	 * another device to run.  (if there is another one to run)
-	 */
-	if ((softc->flags & CD_FLAG_CHANGER)
-	 && (softc->flags & CD_FLAG_ACTIVE)) {
-
-		/*
-		 * The purpose of the short timeout is soley to determine
-		 * whether the current device has finished or not.  Well,
-		 * since we're removing the active device, we know that it
-		 * is finished.  So, get rid of the short timeout.
-		 * Otherwise, if we're in the time period before the short
-		 * timeout fires, and there are no other devices in the
-		 * queue to run, there won't be any other device put in the
-		 * active slot.  i.e., when we call cdrunchangerqueue()
-		 * below, it won't do anything.  Then, when the short
-		 * timeout fires, it'll look at the "current device", which
-		 * we are free below, and possibly panic the kernel on a
-		 * bogus pointer reference.
-		 *
-		 * The long timeout doesn't really matter, since we
-		 * decrement the qfrozen_cnt to indicate that there is
-		 * nothing in the active slot now.  Therefore, there won't
-		 * be any bogus pointer references there.
-		 */
-		if (softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED) {
-			callout_stop(&softc->changer->short_handle);
-			softc->changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED;
-		}
-		softc->changer->devq.qfrozen_cnt[0]--;
-		softc->changer->flags |= CHANGER_MANUAL_CALL;
-		cdrunchangerqueue(softc->changer);
-	}
-
-	/*
-	 * If we're removing the last device on the changer, go ahead and
-	 * remove the changer device structure.
-	 */
-	if ((softc->flags & CD_FLAG_CHANGER)
-	 && (--softc->changer->num_devices == 0)) {
-
-		/*
-		 * Theoretically, there shouldn't be any timeouts left, but
-		 * I'm not completely sure that that will be the case.  So,
-		 * it won't hurt to check and see if there are any left.
-		 */
-		if (softc->changer->flags & CHANGER_TIMEOUT_SCHED) {
-			callout_stop(&softc->changer->long_handle);
-			softc->changer->flags &= ~CHANGER_TIMEOUT_SCHED;
-		}
-
-		if (softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED) {
-			callout_stop(&softc->changer->short_handle);
-			softc->changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED;
-		}
-
-		mtx_lock(&changerq_mtx);
-		STAILQ_REMOVE(&changerq, softc->changer, cdchanger,
-			      changer_links);
-		num_changers--;
-		mtx_unlock(&changerq_mtx);
-		xpt_print(periph->path, "removing changer entry\n");
-		free(softc->changer, M_DEVBUF);
-	}
 	cam_periph_unlock(periph);
 	if ((softc->flags & CD_FLAG_SCTX_INIT) != 0
 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
@@ -539,7 +393,8 @@
 
 		if (cgd->protocol != PROTO_SCSI)
 			break;
-
+		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
+			break;
 		if (SID_TYPE(&cgd->inq_data) != T_CDROM
 		    && SID_TYPE(&cgd->inq_data) != T_WORM)
 			break;
@@ -552,7 +407,7 @@
 		status = cam_periph_alloc(cdregister, cdoninvalidate,
 					  cdcleanup, cdstart,
 					  "cd", CAM_PERIPH_BIO,
-					  cgd->ccb_h.path, cdasync,
+					  path, cdasync,
 					  AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP
@@ -838,237 +693,16 @@
 	    AC_SCSI_AEN | AC_UNIT_ATTENTION, cdasync, periph, periph->path);
 
 	/*
-	 * If the target lun is greater than 0, we most likely have a CD
-	 * changer device.  Check the quirk entries as well, though, just
-	 * in case someone has a CD tower with one lun per drive or
-	 * something like that.  Also, if we know up front that a
-	 * particular device is a changer, we can mark it as such starting
-	 * with lun 0, instead of lun 1.  It shouldn't be necessary to have
-	 * a quirk entry to define something as a changer, however.
-	 */
-	if (((cgd->ccb_h.target_lun > 0)
-	  && ((softc->quirks & CD_Q_NO_CHANGER) == 0))
-	 || ((softc->quirks & CD_Q_CHANGER) != 0)) {
-		struct cdchanger *nchanger;
-		struct cam_periph *nperiph;
-		struct cam_path *path;
-		cam_status status;
-		int found;
-
-		/* Set the changer flag in the current device's softc */
-		softc->flags |= CD_FLAG_CHANGER;
-
-		/*
-		 * Now, look around for an existing changer device with the
-		 * same path and target ID as the current device.
-		 */
-		mtx_lock(&changerq_mtx);
-		for (found = 0,
-		     nchanger = (struct cdchanger *)STAILQ_FIRST(&changerq);
-		     nchanger != NULL;
-		     nchanger = STAILQ_NEXT(nchanger, changer_links)){
-			if ((nchanger->path_id == cgd->ccb_h.path_id) 
-			 && (nchanger->target_id == cgd->ccb_h.target_id)) {
-				found = 1;
-				break;
-			}
-		}
-		mtx_unlock(&changerq_mtx);
-
-		/*
-		 * If we found a matching entry, just add this device to
-		 * the list of devices on this changer.
-		 */
-		if (found == 1) {
-			struct chdevlist *chlunhead;
-
-			chlunhead = &nchanger->chluns;
-
-			/*
-			 * XXX KDM look at consolidating this code with the
-			 * code below in a separate function.
-			 */
-
-			/*
-			 * Create a path with lun id 0, and see if we can
-			 * find a matching device
-			 */
-			status = xpt_create_path(&path, /*periph*/ periph,
-						 cgd->ccb_h.path_id,
-						 cgd->ccb_h.target_id, 0);
-
-			if ((status == CAM_REQ_CMP)
-			 && ((nperiph = cam_periph_find(path, "cd")) != NULL)){
-				struct cd_softc *nsoftc;
-
-				nsoftc = (struct cd_softc *)nperiph->softc;
-
-				if ((nsoftc->flags & CD_FLAG_CHANGER) == 0){
-					nsoftc->flags |= CD_FLAG_CHANGER;
-					nchanger->num_devices++;
-					if (camq_resize(&nchanger->devq,
-					   nchanger->num_devices)!=CAM_REQ_CMP){
-						printf("cdregister: "
-						       "camq_resize "
-						       "failed, changer "
-						       "support may "
-						       "be messed up\n");
-					}
-					nsoftc->changer = nchanger;
-					nsoftc->pinfo.index =CAM_UNQUEUED_INDEX;
-
-					STAILQ_INSERT_TAIL(&nchanger->chluns,
-							  nsoftc,changer_links);
-				}
-				xpt_free_path(path);
-			} else if (status == CAM_REQ_CMP)
-				xpt_free_path(path);
-			else {
-				printf("cdregister: unable to allocate path\n"
-				       "cdregister: changer support may be "
-				       "broken\n");
-			}
-
-			nchanger->num_devices++;
-
-			softc->changer = nchanger;
-			softc->pinfo.index = CAM_UNQUEUED_INDEX;
-
-			if (camq_resize(&nchanger->devq,
-			    nchanger->num_devices) != CAM_REQ_CMP) {
-				printf("cdregister: camq_resize "
-				       "failed, changer support may "
-				       "be messed up\n");
-			}
-
-			STAILQ_INSERT_TAIL(chlunhead, softc, changer_links);
-		}
-		/*
-		 * In this case, we don't already have an entry for this
-		 * particular changer, so we need to create one, add it to
-		 * the queue, and queue this device on the list for this
-		 * changer.  Before we queue this device, however, we need
-		 * to search for lun id 0 on this target, and add it to the
-		 * queue first, if it exists.  (and if it hasn't already
-		 * been marked as part of the changer.)
-		 */
-		else {
-			nchanger = malloc(sizeof(struct cdchanger),
-				M_DEVBUF, M_NOWAIT | M_ZERO);
-			if (nchanger == NULL) {
-				softc->flags &= ~CD_FLAG_CHANGER;
-				printf("cdregister: unable to malloc "
-				       "changer structure\ncdregister: "
-				       "changer support disabled\n");
-
-				/*
-				 * Yes, gotos can be gross but in this case
-				 * I think it's justified..
-				 */
-				goto cdregisterexit;
-			}
-			if (camq_init(&nchanger->devq, 1) != 0) {
-				softc->flags &= ~CD_FLAG_CHANGER;
-				printf("cdregister: changer support "
-				       "disabled\n");
-				goto cdregisterexit;
-			}
-
-			nchanger->path_id = cgd->ccb_h.path_id;
-			nchanger->target_id = cgd->ccb_h.target_id;
-
-			/* this is superfluous, but it makes things clearer */
-			nchanger->num_devices = 0;
-
-			STAILQ_INIT(&nchanger->chluns);
-
-			callout_init_mtx(&nchanger->long_handle,
-			    periph->sim->mtx, 0);
-			callout_init_mtx(&nchanger->short_handle,
-			    periph->sim->mtx, 0);
-
-			mtx_lock(&changerq_mtx);
-			num_changers++;
-			STAILQ_INSERT_TAIL(&changerq, nchanger,
-					   changer_links);
-			mtx_unlock(&changerq_mtx);
-			
-			/*
-			 * Create a path with lun id 0, and see if we can
-			 * find a matching device
-			 */
-			status = xpt_create_path(&path, /*periph*/ periph,
-						 cgd->ccb_h.path_id,
-						 cgd->ccb_h.target_id, 0);
-
-			/*
-			 * If we were able to allocate the path, and if we
-			 * find a matching device and it isn't already
-			 * marked as part of a changer, then we add it to
-			 * the current changer.
-			 */
-			if ((status == CAM_REQ_CMP)
-			 && ((nperiph = cam_periph_find(path, "cd")) != NULL)
-			 && ((((struct cd_softc *)periph->softc)->flags &
-			       CD_FLAG_CHANGER) == 0)) {
-				struct cd_softc *nsoftc;
-
-				nsoftc = (struct cd_softc *)nperiph->softc;
-
-				nsoftc->flags |= CD_FLAG_CHANGER;
-				nchanger->num_devices++;
-				if (camq_resize(&nchanger->devq,
-				    nchanger->num_devices) != CAM_REQ_CMP) {
-					printf("cdregister: camq_resize "
-					       "failed, changer support may "
-					       "be messed up\n");
-				}
-				nsoftc->changer = nchanger;
-				nsoftc->pinfo.index = CAM_UNQUEUED_INDEX;
-
-				STAILQ_INSERT_TAIL(&nchanger->chluns,
-						   nsoftc, changer_links);
-				xpt_free_path(path);
-			} else if (status == CAM_REQ_CMP)
-				xpt_free_path(path);
-			else {
-				printf("cdregister: unable to allocate path\n"
-				       "cdregister: changer support may be "
-				       "broken\n");
-			}
-
-			softc->changer = nchanger;
-			softc->pinfo.index = CAM_UNQUEUED_INDEX;
-			nchanger->num_devices++;
-			if (camq_resize(&nchanger->devq,
-			    nchanger->num_devices) != CAM_REQ_CMP) {
-				printf("cdregister: camq_resize "
-				       "failed, changer support may "
-				       "be messed up\n");
-			}
-			STAILQ_INSERT_TAIL(&nchanger->chluns, softc,
-					   changer_links);
-		}
-	}
-
-	/*
 	 * Schedule a periodic media polling events.
 	 */
-	callout_init_mtx(&softc->mediapoll_c, periph->sim->mtx, 0);
+	callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
 	if ((softc->flags & CD_FLAG_DISC_REMOVABLE) &&
-	    (softc->flags & CD_FLAG_CHANGER) == 0 &&
 	    (cgd->inq_flags & SID_AEN) == 0 &&
 	    cd_poll_period != 0)
 		callout_reset(&softc->mediapoll_c, cd_poll_period * hz,
 		    cdmediapoll, periph);
 
-cdregisterexit:
-
-	if ((softc->flags & CD_FLAG_CHANGER) == 0)
-		xpt_schedule(periph, CAM_PRIORITY_DEV);
-	else
-		cdschedule(periph, CAM_PRIORITY_DEV);
-
+	xpt_schedule(periph, CAM_PRIORITY_DEV);
 	return(CAM_REQ_CMP);
 }
 
@@ -1157,251 +791,6 @@
 	return (0);
 }
 
-static void
-cdshorttimeout(void *arg)
-{
-	struct cdchanger *changer;
-
-	changer = (struct cdchanger *)arg;
-
-	/* Always clear the short timeout flag, since that's what we're in */
-	changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED;
-
-	/*
-	 * Check to see if there is any more pending or outstanding I/O for
-	 * this device.  If not, move it out of the active slot.
-	 */
-	if ((bioq_first(&changer->cur_device->bio_queue) == NULL)
-	 && (changer->cur_device->outstanding_cmds == 0)) {
-		changer->flags |= CHANGER_MANUAL_CALL;
-		cdrunchangerqueue(changer);
-	}
-}
-
-/*
- * This is a wrapper for xpt_schedule.  It only applies to changers.
- */
-static void
-cdschedule(struct cam_periph *periph, int priority)
-{
-	struct cd_softc *softc;
-
-	softc = (struct cd_softc *)periph->softc;
-
-	/*
-	 * If this device isn't currently queued, and if it isn't
-	 * the active device, then we queue this device and run the
-	 * changer queue if there is no timeout scheduled to do it.
-	 * If this device is the active device, just schedule it
-	 * to run again.  If this device is queued, there should be
-	 * a timeout in place already that will make sure it runs.
-	 */
-	if ((softc->pinfo.index == CAM_UNQUEUED_INDEX) 
-	 && ((softc->flags & CD_FLAG_ACTIVE) == 0)) {
-		/*
-		 * We don't do anything with the priority here.
-		 * This is strictly a fifo queue.
-		 */
-		softc->pinfo.priority = CAM_PRIORITY_NORMAL;
-		softc->pinfo.generation = ++softc->changer->devq.generation;
-		camq_insert(&softc->changer->devq, (cam_pinfo *)softc);
-
-		/*
-		 * Since we just put a device in the changer queue,
-		 * check and see if there is a timeout scheduled for
-		 * this changer.  If so, let the timeout handle
-		 * switching this device into the active slot.  If
-		 * not, manually call the timeout routine to
-		 * bootstrap things.
-		 */
-		if (((softc->changer->flags & CHANGER_TIMEOUT_SCHED)==0)
-		 && ((softc->changer->flags & CHANGER_NEED_TIMEOUT)==0)
-		 && ((softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED)==0)){
-			softc->changer->flags |= CHANGER_MANUAL_CALL;
-			cdrunchangerqueue(softc->changer);
-		}
-	} else if ((softc->flags & CD_FLAG_ACTIVE)
-		&& ((softc->flags & CD_FLAG_SCHED_ON_COMP) == 0))
-		xpt_schedule(periph, priority);
-}
-
-static void
-cdrunchangerqueue(void *arg)
-{
-	struct cd_softc *softc;
-	struct cdchanger *changer;
-	int called_from_timeout;
-
-	changer = (struct cdchanger *)arg;
-
-	/*
-	 * If we have NOT been called from cdstrategy() or cddone(), and
-	 * instead from a timeout routine, go ahead and clear the
-	 * timeout flag.
-	 */
-	if ((changer->flags & CHANGER_MANUAL_CALL) == 0) {
-		changer->flags &= ~CHANGER_TIMEOUT_SCHED;
-		called_from_timeout = 1;
-	} else
-		called_from_timeout = 0;
-
-	/* Always clear the manual call flag */
-	changer->flags &= ~CHANGER_MANUAL_CALL;
-
-	/* nothing to do if the queue is empty */
-	if (changer->devq.entries <= 0) {
-		return;
-	}
-
-	/*
-	 * If the changer queue is frozen, that means we have an active
-	 * device.
-	 */
-	if (changer->devq.qfrozen_cnt[0] > 0) {
-
-		/*
-		 * We always need to reset the frozen count and clear the
-		 * active flag.
-		 */
-		changer->devq.qfrozen_cnt[0]--;
-		changer->cur_device->flags &= ~CD_FLAG_ACTIVE;
-		changer->cur_device->flags &= ~CD_FLAG_SCHED_ON_COMP;
-
-		if (changer->cur_device->outstanding_cmds > 0) {
-			changer->cur_device->flags |= CD_FLAG_SCHED_ON_COMP;
-			changer->cur_device->bufs_left = 
-				changer->cur_device->outstanding_cmds;
-			if (called_from_timeout) {
-				callout_reset(&changer->long_handle,
-			            changer_max_busy_seconds * hz,
-				    cdrunchangerqueue, changer);
-				changer->flags |= CHANGER_TIMEOUT_SCHED;
-			}
-			return;
-		}
-
-		/*
-		 * Check to see whether the current device has any I/O left
-		 * to do.  If so, requeue it at the end of the queue.  If
-		 * not, there is no need to requeue it.
-		 */
-		if (bioq_first(&changer->cur_device->bio_queue) != NULL) {
-
-			changer->cur_device->pinfo.generation =
-				++changer->devq.generation;
-			camq_insert(&changer->devq,
-				(cam_pinfo *)changer->cur_device);
-		} 
-	}
-
-	softc = (struct cd_softc *)camq_remove(&changer->devq, CAMQ_HEAD);
-
-	changer->cur_device = softc;
-
-	changer->devq.qfrozen_cnt[0]++;
-	softc->flags |= CD_FLAG_ACTIVE;
-
-	/* Just in case this device is waiting */
-	wakeup(&softc->changer);
-	xpt_schedule(softc->periph, CAM_PRIORITY_NORMAL);
-
-	/*
-	 * Get rid of any pending timeouts, and set a flag to schedule new
-	 * ones so this device gets its full time quantum.
-	 */
-	if (changer->flags & CHANGER_TIMEOUT_SCHED) {
-		callout_stop(&changer->long_handle);
-		changer->flags &= ~CHANGER_TIMEOUT_SCHED;
-	}
-
-	if (changer->flags & CHANGER_SHORT_TMOUT_SCHED) {
-		callout_stop(&changer->short_handle);
-		changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED;
-	}
-
-	/*
-	 * We need to schedule timeouts, but we only do this after the
-	 * first transaction has completed.  This eliminates the changer
-	 * switch time.
-	 */
-	changer->flags |= CHANGER_NEED_TIMEOUT;
-}
-
-static void
-cdchangerschedule(struct cd_softc *softc)
-{
-	struct cdchanger *changer;
-
-	changer = softc->changer;
-
-	/*
-	 * If this is a changer, and this is the current device,
-	 * and this device has at least the minimum time quantum to
-	 * run, see if we can switch it out.
-	 */
-	if ((softc->flags & CD_FLAG_ACTIVE) 
-	 && ((changer->flags & CHANGER_SHORT_TMOUT_SCHED) == 0)
-	 && ((changer->flags & CHANGER_NEED_TIMEOUT) == 0)) {
-		/*
-		 * We try three things here.  The first is that we
-		 * check to see whether the schedule on completion
-		 * flag is set.  If it is, we decrement the number
-		 * of buffers left, and if it's zero, we reschedule.
-		 * Next, we check to see whether the pending buffer
-		 * queue is empty and whether there are no
-		 * outstanding transactions.  If so, we reschedule.
-		 * Next, we see if the pending buffer queue is empty.
-		 * If it is, we set the number of buffers left to
-		 * the current active buffer count and set the
-		 * schedule on complete flag.
-		 */
-		if (softc->flags & CD_FLAG_SCHED_ON_COMP) {
-		 	if (--softc->bufs_left == 0) {
-				softc->changer->flags |=
-					CHANGER_MANUAL_CALL;
-				softc->flags &= ~CD_FLAG_SCHED_ON_COMP;
-				cdrunchangerqueue(softc->changer);
-			}
-		} else if ((bioq_first(&softc->bio_queue) == NULL)
-		        && (softc->outstanding_cmds == 0)) {
-			softc->changer->flags |= CHANGER_MANUAL_CALL;
-			cdrunchangerqueue(softc->changer);
-		}
-	} else if ((softc->changer->flags & CHANGER_NEED_TIMEOUT) 
-		&& (softc->flags & CD_FLAG_ACTIVE)) {
-
-		/*
-		 * Now that the first transaction to this
-		 * particular device has completed, we can go ahead
-		 * and schedule our timeouts.
-		 */
-		if ((changer->flags & CHANGER_TIMEOUT_SCHED) == 0) {
-			callout_reset(&changer->long_handle,
-			    changer_max_busy_seconds * hz,
-			    cdrunchangerqueue, changer);
-			changer->flags |= CHANGER_TIMEOUT_SCHED;
-		} else
-			printf("cdchangerschedule: already have a long"
-			       " timeout!\n");
-
-		if ((changer->flags & CHANGER_SHORT_TMOUT_SCHED) == 0) {
-			callout_reset(&changer->short_handle,
-			    changer_min_busy_seconds * hz,
-			    cdshorttimeout, changer);
-			changer->flags |= CHANGER_SHORT_TMOUT_SCHED;
-		} else
-			printf("cdchangerschedule: already have a short "
-			       "timeout!\n");
-
-		/*
-		 * We just scheduled timeouts, no need to schedule
-		 * more.
-		 */
-		changer->flags &= ~CHANGER_NEED_TIMEOUT;
-
-	}
-}
-
 static int
 cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb,
 					      u_int32_t cam_flags,
@@ -1418,50 +807,9 @@
 	error = cam_periph_runccb(ccb, error_routine, cam_flags, sense_flags,
 				  softc->disk->d_devstat);
 
-	if (softc->flags & CD_FLAG_CHANGER)
-		cdchangerschedule(softc);
-
 	return(error);
 }
 
-static union ccb *
-cdgetccb(struct cam_periph *periph, u_int32_t priority)
-{
-	struct cd_softc *softc;
-
-	softc = (struct cd_softc *)periph->softc;
-
-	if (softc->flags & CD_FLAG_CHANGER) {
-		/*
-		 * This should work the first time this device is woken up,
-		 * but just in case it doesn't, we use a while loop.
-		 */
-		while ((softc->flags & CD_FLAG_ACTIVE) == 0) {
-			/*
-			 * If this changer isn't already queued, queue it up.
-			 */
-			if (softc->pinfo.index == CAM_UNQUEUED_INDEX) {
-				softc->pinfo.priority = CAM_PRIORITY_NORMAL;
-				softc->pinfo.generation =
-					++softc->changer->devq.generation;
-				camq_insert(&softc->changer->devq,
-					    (cam_pinfo *)softc);
-			}
-			if (((softc->changer->flags & CHANGER_TIMEOUT_SCHED)==0)
-			 && ((softc->changer->flags & CHANGER_NEED_TIMEOUT)==0)
-			 && ((softc->changer->flags
-			      & CHANGER_SHORT_TMOUT_SCHED)==0)) {
-				softc->changer->flags |= CHANGER_MANUAL_CALL;
-				cdrunchangerqueue(softc->changer);
-			} else
-				cam_periph_sleep(periph, &softc->changer,
-				    PRIBIO, "cgticb", 0);
-		}
-	}
-	return(cam_periph_getccb(periph, priority));
-}
-
-
 /*
  * Actually translate the requested transfer into one the physical driver
  * can understand.  The transfer is described by a buf and will include
@@ -1509,14 +857,7 @@
 	 */
 	bioq_disksort(&softc->bio_queue, bp);
 
-	/*
-	 * Schedule ourselves for performing the work.  We do things
-	 * differently for changers.
-	 */
-	if ((softc->flags & CD_FLAG_CHANGER) == 0)
-		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
-	else
-		cdschedule(periph, CAM_PRIORITY_NORMAL);
+	xpt_schedule(periph, CAM_PRIORITY_NORMAL);
 
 	cam_periph_unlock(periph);
 	return;
@@ -1538,14 +879,7 @@
 	case CD_STATE_NORMAL:
 	{
 		bp = bioq_first(&softc->bio_queue);
-		if (periph->immediate_priority <= periph->pinfo.priority) {
-			start_ccb->ccb_h.ccb_state = CD_CCB_WAITING;
-
-			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-					  periph_links.sle);
-			periph->immediate_priority = CAM_PRIORITY_NONE;
-			wakeup(&periph->ccb_list);
-		} else if (bp == NULL) {
+		if (bp == NULL) {
 			if (softc->tur) {
 				softc->tur = 0;
 				csio = &start_ccb->csio;
@@ -1571,7 +905,8 @@
 					/*retries*/ cd_retry_count,
 					/* cbfcnp */ cddone,
 					MSG_SIMPLE_Q_TAG,
-					/* read */bp->bio_cmd == BIO_READ,
+					/* read */bp->bio_cmd == BIO_READ ?
+					SCSI_RW_READ : SCSI_RW_WRITE,
 					/* byte2 */ 0,
 					/* minimum_cmd_size */ 10,
 					/* lba */ bp->bio_offset /
@@ -1608,11 +943,9 @@
 
 			xpt_action(start_ccb);
 		}
-		if (bp != NULL || softc->tur ||
-		    periph->immediate_priority != CAM_PRIORITY_NONE) {
+		if (bp != NULL || softc->tur) {
 			/* Have more work to do, so ensure we stay scheduled */
-			xpt_schedule(periph, min(CAM_PRIORITY_NORMAL,
-			    periph->immediate_priority));
+			xpt_schedule(periph, CAM_PRIORITY_NORMAL);
 		}
 		break;
 	}
@@ -1711,9 +1044,6 @@
 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
 		softc->outstanding_cmds--;
 
-		if (softc->flags & CD_FLAG_CHANGER)
-			cdchangerschedule(softc);
-
 		biofinish(bp, NULL, 0);
 		break;
 	}
@@ -1755,11 +1085,11 @@
 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP ||
 		    (error = cderror(done_ccb, CAM_RETRY_SELTO,
 				SF_RETRY_UA | SF_NO_PRINT)) == 0) {
-
 			snprintf(announce_buf, sizeof(announce_buf),
-				"cd present [%lu x %lu byte records]",
-				cdp->disksize, (u_long)cdp->blksize);
-
+			    "%juMB (%ju %u byte sectors)",
+			    ((uintmax_t)cdp->disksize * cdp->blksize) /
+			     (1024 * 1024),
+			    (uintmax_t)cdp->disksize, cdp->blksize);
 		} else {
 			if (error == ERESTART) {
 				/*
@@ -1876,8 +1206,6 @@
 			xpt_announce_periph(periph, announce_buf);
 			xpt_announce_quirks(periph, softc->quirks,
 			    CD_Q_BIT_STRING);
-			if (softc->flags & CD_FLAG_CHANGER)
-				cdchangerschedule(softc);
 			/*
 			 * Create our sysctl variables, now that we know
 			 * we have successfully attached.
@@ -1897,15 +1225,6 @@
 		cam_periph_unhold(periph);
 		return;
 	}
-	case CD_CCB_WAITING:
-	{
-		/* Caller will release the CCB */
-		CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 
-			  ("trying to wakeup ccbwait\n"));
-
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
-	}
 	case CD_CCB_TUR:
 	{
 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
@@ -2862,7 +2181,7 @@
 		return;
 	}
 	    
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	scsi_prevent(&ccb->csio, 
 		     /*retries*/ cd_retry_count,
@@ -3042,7 +2361,7 @@
 
 	softc = (struct cd_softc *)periph->softc;
              
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	/* XXX Should be M_WAITOK */
 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_data), 
@@ -3277,6 +2596,9 @@
 	 * don't treat UAs as errors.
 	 */
 	sense_flags |= SF_RETRY_UA;
+
+	if (softc->quirks & CD_Q_RETRY_BUSY)
+		sense_flags |= SF_RETRY_BUSY;
 	return (cam_periph_error(ccb, cam_flags, sense_flags, 
 				 &softc->saved_ccb));
 }
@@ -3287,9 +2609,6 @@
 	struct cam_periph *periph = arg;
 	struct cd_softc *softc = periph->softc;
 
-	if (softc->flags & CD_FLAG_CHANGER)
-		return;
-
 	if (softc->state == CD_STATE_NORMAL && !softc->tur &&
 	    softc->outstanding_cmds == 0) {
 		if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
@@ -3318,7 +2637,7 @@
 	ntoc = len;
 	error = 0;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	csio = &ccb->csio;
 
@@ -3365,7 +2684,7 @@
 
 	error = 0;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	csio = &ccb->csio;
 
@@ -3417,7 +2736,7 @@
 
 	softc = (struct cd_softc *)periph->softc;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	csio = &ccb->csio;
 
@@ -3516,7 +2835,7 @@
 
 	softc = (struct cd_softc *)periph->softc;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	csio = &ccb->csio;
 
@@ -3608,7 +2927,7 @@
 	u_int8_t cdb_len;
 
 	error = 0;
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 	csio = &ccb->csio;
 	/*
 	 * Use the smallest possible command to perform the operation.
@@ -3665,7 +2984,7 @@
 
 	error = 0;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	csio = &ccb->csio;
 
@@ -3711,7 +3030,7 @@
 
 	error = 0;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	csio = &ccb->csio;
 
@@ -3753,7 +3072,7 @@
 
 	error = 0;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	csio = &ccb->csio;
 
@@ -3790,7 +3109,7 @@
 
 	error = 0;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	scsi_start_stop(&ccb->csio,
 			/* retries */ cd_retry_count,
@@ -3818,7 +3137,7 @@
 
 	error = 0;
 
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	scsi_start_stop(&ccb->csio,
 			/* retries */ cd_retry_count,
@@ -3847,7 +3166,7 @@
 	int error;
 
 	error = 0;
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 	csio = &ccb->csio;
 
 	/* Preserve old behavior: units in multiples of CDROM speed */
@@ -3929,7 +3248,7 @@
 		databuf = NULL;
 
 	cam_periph_lock(periph);
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	scsi_report_key(&ccb->csio,
 			/* retries */ cd_retry_count,
@@ -4107,7 +3426,7 @@
 	}
 
 	cam_periph_lock(periph);
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	scsi_send_key(&ccb->csio,
 		      /* retries */ cd_retry_count,
@@ -4211,7 +3530,7 @@
 		databuf = NULL;
 
 	cam_periph_lock(periph);
-	ccb = cdgetccb(periph, CAM_PRIORITY_NORMAL);
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
 	scsi_read_dvd_structure(&ccb->csio,
 				/* retries */ cd_retry_count,

Modified: trunk/sys/cam/scsi/scsi_cd.h
===================================================================
--- trunk/sys/cam/scsi/scsi_cd.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_cd.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2000, 2002 Kenneth D. Merry
  * All rights reserved.
@@ -41,7 +42,7 @@
  * Ported to run under 386BSD by Julian Elischer (julian at tfs.com) Sept 1992
  *
  *	from: scsi_cd.h,v 1.10 1997/02/22 09:44:28 peter Exp $
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_cd.h 288812 2015-10-05 11:38:51Z mav $
  */
 #ifndef	_SCSI_SCSI_CD_H
 #define _SCSI_SCSI_CD_H 1
@@ -56,6 +57,83 @@
  * SCSI command format
  */
 
+struct scsi_get_config
+{
+	uint8_t opcode;
+	uint8_t rt;
+#define	SGC_RT_ALL		0x00
+#define	SGC_RT_CURRENT		0x01
+#define	SGC_RT_SPECIFIC		0x02
+#define	SGC_RT_MASK		0x03
+	uint8_t starting_feature[2];
+	uint8_t reserved[3];
+	uint8_t length[2];
+	uint8_t control;
+};
+
+struct scsi_get_config_header
+{
+	uint8_t data_length[4];
+	uint8_t reserved[2];
+	uint8_t current_profile[2];
+};
+
+struct scsi_get_config_feature
+{
+	uint8_t feature_code[2];
+	uint8_t flags;
+#define	SGC_F_CURRENT		0x01
+#define	SGC_F_PERSISTENT	0x02
+#define	SGC_F_VERSION_MASK	0x2C
+#define	SGC_F_VERSION_SHIFT	2
+	uint8_t add_length;
+	uint8_t feature_data[];
+};
+
+struct scsi_get_event_status
+{
+	uint8_t opcode;
+	uint8_t byte2;
+#define	SGESN_POLLED		1
+	uint8_t reserved[2];
+	uint8_t notif_class;
+	uint8_t reserved2[2];
+	uint8_t length[2];
+	uint8_t control;
+};
+
+struct scsi_get_event_status_header
+{
+	uint8_t descr_length[4];
+	uint8_t nea_class;
+#define	SGESN_NEA		0x80
+	uint8_t supported_class;
+};
+
+struct scsi_get_event_status_descr
+{
+	uint8_t event_code;
+	uint8_t event_info[];
+};
+
+struct scsi_mechanism_status
+{
+	uint8_t opcode;
+	uint8_t reserved[7];
+	uint8_t length[2];
+	uint8_t reserved2;
+	uint8_t control;
+};
+
+struct scsi_mechanism_status_header
+{
+	uint8_t state1;
+	uint8_t state2;
+	uint8_t lba[3];
+	uint8_t slots_num;
+	uint8_t slots_length[2];
+};
+
 struct scsi_pause
 {
 	u_int8_t op_code;
@@ -151,12 +229,29 @@
 {
 	u_int8_t op_code;
 	u_int8_t byte2;
-	u_int8_t unused[4];
+	u_int8_t format;
+	u_int8_t unused[3];
 	u_int8_t from_track;
 	u_int8_t data_len[2];
 	u_int8_t control;
 };
 
+struct scsi_read_toc_hdr
+{
+	uint8_t data_length[2];
+	uint8_t first;
+	uint8_t last;
+};
+
+struct scsi_read_toc_type01_descr
+{
+	uint8_t reserved;
+	uint8_t addr_ctl;
+	uint8_t track_number;
+	uint8_t reserved2;
+	uint8_t track_start[4];
+};
+
 struct scsi_read_cd_capacity
 {
 	u_int8_t op_code;
@@ -252,9 +347,11 @@
 #define READ_TOC		0x43	/* cdrom read TOC */
 #define READ_HEADER		0x44	/* cdrom read header */
 #define PLAY_10			0x45	/* cdrom play  'play audio' mode */
+#define GET_CONFIGURATION	0x46	/* Get device configuration */
 #define PLAY_MSF		0x47	/* cdrom play Min,Sec,Frames mode */
 #define PLAY_TRACK		0x48	/* cdrom play track/index mode */
 #define PLAY_TRACK_REL		0x49	/* cdrom play track/index mode */
+#define GET_EVENT_STATUS	0x4a	/* Get event status notification */
 #define PAUSE			0x4b	/* cdrom pause in 'play audio' mode */
 #define SEND_KEY		0xa3	/* dvd send key command */
 #define REPORT_KEY		0xa4	/* dvd report key command */
@@ -262,6 +359,7 @@
 #define PLAY_TRACK_REL_BIG	0xa9	/* cdrom play track/index mode */
 #define READ_DVD_STRUCTURE	0xad	/* read dvd structure */
 #define SET_CD_SPEED		0xbb	/* set c/dvd speed */
+#define MECHANISM_STATUS	0xbd	/* get status of c/dvd mechanics */
 
 struct scsi_report_key_data_header
 {
@@ -686,6 +784,37 @@
 #define	RIGHT_PORT		1
 };
 
+struct scsi_cddvd_capabilities_page_sd {
+	uint8_t reserved;
+	uint8_t rotation_control;
+	uint8_t write_speed_supported[2];
+};
+
+struct scsi_cddvd_capabilities_page {
+	uint8_t page_code;
+#define	SMS_CDDVD_CAPS_PAGE		0x2a
+	uint8_t page_length;
+	uint8_t caps1;
+	uint8_t caps2;
+	uint8_t caps3;
+	uint8_t caps4;
+	uint8_t caps5;
+	uint8_t caps6;
+	uint8_t obsolete[2];
+	uint8_t nvol_levels[2];
+	uint8_t buffer_size[2];
+	uint8_t obsolete2[2];
+	uint8_t reserved;
+	uint8_t digital;
+	uint8_t obsolete3;
+	uint8_t copy_management;
+	uint8_t reserved2;
+	uint8_t rotation_control;
+	uint8_t cur_write_speed;
+	uint8_t num_speed_descr;
+	struct scsi_cddvd_capabilities_page_sd speed_descr[];
+};
+
 union cd_pages
 {
 	struct cd_audio_page audio;

Modified: trunk/sys/cam/scsi/scsi_ch.c
===================================================================
--- trunk/sys/cam/scsi/scsi_ch.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_ch.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 1997 Justin T. Gibbs.
  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
@@ -68,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_ch.c 316791 2017-04-13 20:36:40Z bdrewery $");
 
 #include <sys/param.h>
 #include <sys/queue.h>
@@ -99,10 +100,10 @@
  */
 
 static const u_int32_t	CH_TIMEOUT_MODE_SENSE                = 6000;
-static const u_int32_t	CH_TIMEOUT_MOVE_MEDIUM               = 100000;
-static const u_int32_t	CH_TIMEOUT_EXCHANGE_MEDIUM           = 100000;
-static const u_int32_t	CH_TIMEOUT_POSITION_TO_ELEMENT       = 100000;
-static const u_int32_t	CH_TIMEOUT_READ_ELEMENT_STATUS       = 10000;
+static const u_int32_t	CH_TIMEOUT_MOVE_MEDIUM               = 15 * 60 * 1000;
+static const u_int32_t	CH_TIMEOUT_EXCHANGE_MEDIUM           = 15 * 60 * 1000;
+static const u_int32_t	CH_TIMEOUT_POSITION_TO_ELEMENT       = 15 * 60 * 1000;
+static const u_int32_t	CH_TIMEOUT_READ_ELEMENT_STATUS       = 5 * 60 * 1000;
 static const u_int32_t	CH_TIMEOUT_SEND_VOLTAG		     = 10000;
 static const u_int32_t	CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS = 500000;
 
@@ -116,18 +117,19 @@
 } ch_state;
 
 typedef enum {
-	CH_CCB_PROBE,
-	CH_CCB_WAITING
+	CH_CCB_PROBE
 } ch_ccb_types;
 
 typedef enum {
 	CH_Q_NONE	= 0x00,
-	CH_Q_NO_DBD	= 0x01
+	CH_Q_NO_DBD	= 0x01,
+	CH_Q_NO_DVCID	= 0x02
 } ch_quirks;
 
 #define CH_Q_BIT_STRING	\
 	"\020"		\
-	"\001NO_DBD"
+	"\001NO_DBD"	\
+	"\002NO_DVCID"
 
 #define ccb_state	ppriv_field0
 #define ccb_bp		ppriv_ptr1
@@ -198,6 +200,7 @@
 static	int		chposition(struct cam_periph *periph,
 				   struct changer_position *cp);
 static	int		chgetelemstatus(struct cam_periph *periph,
+				int scsi_version, u_long cmd,
 				struct changer_element_status_request *csr);
 static	int		chsetvoltag(struct cam_periph *periph,
 				    struct changer_set_voltag_request *csvr);
@@ -204,6 +207,7 @@
 static	int		chielem(struct cam_periph *periph, 
 				unsigned int timeout);
 static	int		chgetparams(struct cam_periph *periph);
+static	int		chscsiversion(struct cam_periph *periph);
 
 static struct periph_driver chdriver =
 {
@@ -244,20 +248,19 @@
 static void
 chdevgonecb(void *arg)
 {
-	struct cam_sim	  *sim;
 	struct ch_softc   *softc;
 	struct cam_periph *periph;
+	struct mtx *mtx;
 	int i;
 
 	periph = (struct cam_periph *)arg;
-	sim = periph->sim;
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
+
 	softc = (struct ch_softc *)periph->softc;
-
 	KASSERT(softc->open_count >= 0, ("Negative open count %d",
 		softc->open_count));
 
-	mtx_lock(sim->mtx);
-
 	/*
 	 * When we get this callback, we will get no more close calls from
 	 * devfs.  So if we have any dangling opens, we need to release the
@@ -274,13 +277,13 @@
 	cam_periph_release_locked(periph);
 
 	/*
-	 * We reference the SIM lock directly here, instead of using
+	 * We reference the lock directly here, instead of using
 	 * cam_periph_unlock().  The reason is that the final call to
 	 * cam_periph_release_locked() above could result in the periph
 	 * getting freed.  If that is the case, dereferencing the periph
 	 * with a cam_periph_unlock() call would cause a page fault.
 	 */
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
 }
 
 static void
@@ -302,9 +305,6 @@
 	 * when it has cleaned up its state.
 	 */
 	destroy_dev_sched_cb(softc->dev, chdevgonecb, periph);
-
-	xpt_print(periph->path, "lost device\n");
-
 }
 
 static void
@@ -314,8 +314,6 @@
 
 	softc = (struct ch_softc *)periph->softc;
 
-	xpt_print(periph->path, "removing device entry\n");
-
 	devstat_remove_entry(softc->device_stats);
 
 	free(softc, M_DEVBUF);
@@ -340,7 +338,8 @@
 
 		if (cgd->protocol != PROTO_SCSI)
 			break;
-
+		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
+			break;
 		if (SID_TYPE(&cgd->inq_data)!= T_CHANGER)
 			break;
 
@@ -351,7 +350,7 @@
 		 */
 		status = cam_periph_alloc(chregister, choninvalidate,
 					  chcleanup, chstart, "ch",
-					  CAM_PERIPH_BIO, cgd->ccb_h.path,
+					  CAM_PERIPH_BIO, path,
 					  chasync, AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP
@@ -374,6 +373,8 @@
 	struct ch_softc *softc;
 	struct ccb_getdev *cgd;
 	struct ccb_pathinq cpi;
+	struct make_dev_args args;
+	int error;
 
 	cgd = (struct ccb_getdev *)arg;
 	if (cgd == NULL) {
@@ -394,6 +395,14 @@
 	periph->softc = softc;
 	softc->quirks = CH_Q_NONE;
 
+	/*
+	 * The DVCID and CURDATA bits were not introduced until the SMC
+	 * spec.  If this device claims SCSI-2 or earlier support, then it
+	 * very likely does not support these bits.
+	 */
+	if (cgd->inq_data.version <= SCSI_REV_2)
+		softc->quirks |= CH_Q_NO_DVCID;
+
 	bzero(&cpi, sizeof(cpi));
 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
 	cpi.ccb_h.func_code = XPT_PATH_INQ;
@@ -425,11 +434,20 @@
 
 
 	/* Register the device */
-	softc->dev = make_dev(&ch_cdevsw, periph->unit_number, UID_ROOT,
-			      GID_OPERATOR, 0600, "%s%d", periph->periph_name,
-			      periph->unit_number);
+	make_dev_args_init(&args);
+	args.mda_devsw = &ch_cdevsw;
+	args.mda_unit = periph->unit_number;
+	args.mda_uid = UID_ROOT;
+	args.mda_gid = GID_OPERATOR;
+	args.mda_mode = 0600;
+	args.mda_si_drv1 = periph;
+	error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name,
+	    periph->unit_number);
 	cam_periph_lock(periph);
-	softc->dev->si_drv1 = periph;
+	if (error != 0) {
+		cam_periph_release_locked(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
 
 	/*
 	 * Add an async callback so that we get
@@ -478,6 +496,7 @@
 	 * Load information about this changer device into the softc.
 	 */
 	if ((error = chgetparams(periph)) != 0) {
+		cam_periph_unhold(periph);
 		cam_periph_release_locked(periph);
 		cam_periph_unlock(periph);
 		return(error);
@@ -495,25 +514,21 @@
 static int
 chclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 {
-	struct	cam_sim *sim;
 	struct	cam_periph *periph;
 	struct  ch_softc *softc;
+	struct mtx *mtx;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return(ENXIO);
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
 
-	sim = periph->sim;
 	softc = (struct ch_softc *)periph->softc;
-
-	mtx_lock(sim->mtx);
-
 	softc->open_count--;
 
 	cam_periph_release_locked(periph);
 
 	/*
-	 * We reference the SIM lock directly here, instead of using
+	 * We reference the lock directly here, instead of using
 	 * cam_periph_unlock().  The reason is that the call to
 	 * cam_periph_release_locked() above could result in the periph
 	 * getting freed.  If that is the case, dereferencing the periph
@@ -524,7 +539,7 @@
 	 * protect the open count and avoid another lock acquisition and
 	 * release.
 	 */
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
 
 	return(0);
 }
@@ -539,14 +554,7 @@
 	switch (softc->state) {
 	case CH_STATE_NORMAL:
 	{
-		if (periph->immediate_priority <= periph->pinfo.priority){
-			start_ccb->ccb_h.ccb_state = CH_CCB_WAITING;
-
-			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-					  periph_links.sle);
-			periph->immediate_priority = CAM_PRIORITY_NONE;
-			wakeup(&periph->ccb_list);
-		}
+		xpt_release_ccb(start_ccb);
 		break;
 	}
 	case CH_STATE_PROBE:
@@ -579,7 +587,7 @@
 				/* tag_action */ MSG_SIMPLE_Q_TAG,
 				/* dbd */ (softc->quirks & CH_Q_NO_DBD) ?
 					FALSE : TRUE,
-				/* page_code */ SMS_PAGE_CTRL_CURRENT,
+				/* pc */ SMS_PAGE_CTRL_CURRENT,
 				/* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE,
 				/* param_buf */ (u_int8_t *)mode_buffer,
 				/* param_len */ mode_buffer_len,
@@ -641,6 +649,11 @@
 		    		softc->sc_counts[CHET_IE],
 				PLURAL(softc->sc_counts[CHET_IE]));
 #undef PLURAL
+			if (announce_buf[0] != '\0') {
+				xpt_announce_periph(periph, announce_buf);
+				xpt_announce_quirks(periph, softc->quirks,
+				    CH_Q_BIT_STRING);
+			}
 		} else {
 			int error;
 
@@ -657,11 +670,13 @@
 				 */
 				return;
 			} else if (error != 0) {
-				int retry_scheduled;
 				struct scsi_mode_sense_6 *sms;
+				int frozen, retry_scheduled;
 
 				sms = (struct scsi_mode_sense_6 *)
 					done_ccb->csio.cdb_io.cdb_bytes;
+				frozen = (done_ccb->ccb_h.status &
+				    CAM_DEV_QFRZN) != 0;
 
 				/*
 				 * Check to see if block descriptors were
@@ -672,7 +687,8 @@
 				 * block descriptors were disabled, enable
 				 * them and re-send the command.
 				 */
-				if (sms->byte2 & SMS_DBD) {
+				if ((sms->byte2 & SMS_DBD) != 0 &&
+				    (periph->flags & CAM_PERIPH_INVALID) == 0) {
 					sms->byte2 &= ~SMS_DBD;
 					xpt_action(done_ccb);
 					softc->quirks |= CH_Q_NO_DBD;
@@ -681,7 +697,7 @@
 					retry_scheduled = 0;
 
 				/* Don't wedge this device's queue */
-				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+				if (frozen)
 					cam_release_devq(done_ccb->ccb_h.path,
 						 /*relsim_flags*/0,
 						 /*reduction*/0,
@@ -704,14 +720,8 @@
 
 				cam_periph_invalidate(periph);
 
-				announce_buf[0] = '\0';
 			}
 		}
-		if (announce_buf[0] != '\0') {
-			xpt_announce_periph(periph, announce_buf);
-			xpt_announce_quirks(periph, softc->quirks,
-			    CH_Q_BIT_STRING);
-		}
 		softc->state = CH_STATE_NORMAL;
 		free(mode_header, M_SCSICH);
 		/*
@@ -726,12 +736,6 @@
 		cam_periph_unhold(periph);
 		return;
 	}
-	case CH_CCB_WAITING:
-	{
-		/* Caller will release the CCB */
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
-	}
 	default:
 		break;
 	}
@@ -759,9 +763,6 @@
 	int error;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return(ENXIO);
-
 	cam_periph_lock(periph);
 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering chioctl\n"));
 
@@ -779,6 +780,7 @@
 	switch (cmd) {
 	case CHIOGPICKER:
 	case CHIOGPARAMS:
+	case OCHIOGSTATUS:
 	case CHIOGSTATUS:
 		break;
 
@@ -831,10 +833,26 @@
 		error = chielem(periph, *(unsigned int *)addr);
 		break;
 
+	case OCHIOGSTATUS:
+	{
+		error = chgetelemstatus(periph, SCSI_REV_2, cmd,
+		    (struct changer_element_status_request *)addr);
+		break;
+	}
+
 	case CHIOGSTATUS:
 	{
-		error = chgetelemstatus(periph,
-			       (struct changer_element_status_request *) addr);
+		int scsi_version;
+
+		scsi_version = chscsiversion(periph);
+		if (scsi_version >= SCSI_REV_0) {
+			error = chgetelemstatus(periph, scsi_version, cmd,
+			    (struct changer_element_status_request *)addr);
+	  	}
+		else { /* unable to determine the SCSI version */
+			cam_periph_unlock(periph);
+			return (ENXIO);
+		}
 		break;
 	}
 
@@ -1041,18 +1059,20 @@
 }
 
 /*
- * Copy an an element status descriptor to a user-mode
+ * Copy an element status descriptor to a user-mode
  * changer_element_status structure.
  */
-
-static	void
+static void
 copy_element_status(struct ch_softc *softc,
 		    u_int16_t flags,
 		    struct read_element_status_descriptor *desc,
-		    struct changer_element_status *ces)
+		    struct changer_element_status *ces,
+		    int scsi_version)
 {
 	u_int16_t eaddr = scsi_2btoul(desc->eaddr);
 	u_int16_t et;
+	struct volume_tag *pvol_tag = NULL, *avol_tag = NULL;
+	struct read_element_status_device_id *devid = NULL;
 
 	ces->ces_int_addr = eaddr;
 	/* set up logical address in element status */
@@ -1083,7 +1103,7 @@
 			if ((softc->sc_firsts[et] <= eaddr)
 			    && ((softc->sc_firsts[et] + softc->sc_counts[et])
 				> eaddr)) {
-				ces->ces_source_addr = 
+				ces->ces_source_addr =
 					eaddr - softc->sc_firsts[et];
 				ces->ces_source_type = et;
 				ces->ces_flags |= CES_SOURCE_VALID;
@@ -1096,27 +1116,88 @@
 			       "address %ud to a valid element type\n",
 			       eaddr);
 	}
-			
 
+	/*
+	 * pvoltag and avoltag are common between SCSI-2 and later versions
+	 */
 	if (flags & READ_ELEMENT_STATUS_PVOLTAG)
-		copy_voltag(&(ces->ces_pvoltag), &(desc->pvoltag));
+		pvol_tag = &desc->voltag_devid.pvoltag;
 	if (flags & READ_ELEMENT_STATUS_AVOLTAG)
-		copy_voltag(&(ces->ces_avoltag), &(desc->avoltag));
+		avol_tag = (flags & READ_ELEMENT_STATUS_PVOLTAG) ?
+		    &desc->voltag_devid.voltag[1] :&desc->voltag_devid.pvoltag;
+	/*
+	 * For SCSI-3 and later, element status can carry designator and
+	 * other information.
+	 */
+	if (scsi_version >= SCSI_REV_SPC) {
+		if ((flags & READ_ELEMENT_STATUS_PVOLTAG) ^
+		    (flags & READ_ELEMENT_STATUS_AVOLTAG))
+			devid = &desc->voltag_devid.pvol_and_devid.devid;
+		else if (!(flags & READ_ELEMENT_STATUS_PVOLTAG) &&
+			 !(flags & READ_ELEMENT_STATUS_AVOLTAG))
+			devid = &desc->voltag_devid.devid;
+		else /* Have both PVOLTAG and AVOLTAG */
+			devid = &desc->voltag_devid.vol_tags_and_devid.devid;
+	}
 
-	if (desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_IDVALID) {
-		ces->ces_flags |= CES_SCSIID_VALID;
-		ces->ces_scsi_id = desc->dt_scsi_addr;
+	if (pvol_tag)
+		copy_voltag(&(ces->ces_pvoltag), pvol_tag);
+	if (avol_tag)
+		copy_voltag(&(ces->ces_pvoltag), avol_tag);
+	if (devid != NULL) {
+		if (devid->designator_length > 0) {
+			bcopy((void *)devid->designator,
+			      (void *)ces->ces_designator,
+			      devid->designator_length);
+			ces->ces_designator_length = devid->designator_length;
+			/*
+			 * Make sure we are always NUL terminated.  The
+			 * This won't matter for the binary code set,
+			 * since the user will only pay attention to the
+			 * length field.
+			 */
+			ces->ces_designator[devid->designator_length]= '\0';
+		}
+		if (devid->piv_assoc_designator_type &
+		    READ_ELEMENT_STATUS_PIV_SET) {
+			ces->ces_flags |= CES_PIV;
+			ces->ces_protocol_id =
+			    READ_ELEMENT_STATUS_PROTOCOL_ID(
+			    devid->prot_code_set);
+		}
+		ces->ces_code_set =
+		    READ_ELEMENT_STATUS_CODE_SET(devid->prot_code_set);
+		ces->ces_assoc = READ_ELEMENT_STATUS_ASSOCIATION(
+		    devid->piv_assoc_designator_type);
+		ces->ces_designator_type = READ_ELEMENT_STATUS_DESIGNATOR_TYPE(
+		    devid->piv_assoc_designator_type);
+	} else if (scsi_version > SCSI_REV_2) {
+		/* SCSI-SPC and No devid, no designator */
+		ces->ces_designator_length = 0;
+		ces->ces_designator[0] = '\0';
+		ces->ces_protocol_id = CES_PROTOCOL_ID_FCP_4;
 	}
 
-	if (desc->dt_scsi_addr & READ_ELEMENT_STATUS_DT_LUVALID) {
-		ces->ces_flags |= CES_LUN_VALID;
-		ces->ces_scsi_lun = 
-			desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUNMASK;
+	if (scsi_version <= SCSI_REV_2) {
+		if (desc->dt_or_obsolete.scsi_2.dt_scsi_flags &
+		    READ_ELEMENT_STATUS_DT_IDVALID) {
+			ces->ces_flags |= CES_SCSIID_VALID;
+			ces->ces_scsi_id =
+			    desc->dt_or_obsolete.scsi_2.dt_scsi_addr;
+		}
+
+		if (desc->dt_or_obsolete.scsi_2.dt_scsi_addr &
+		    READ_ELEMENT_STATUS_DT_LUVALID) {
+			ces->ces_flags |= CES_LUN_VALID;
+			ces->ces_scsi_lun =
+			    desc->dt_or_obsolete.scsi_2.dt_scsi_flags &
+			    READ_ELEMENT_STATUS_DT_LUNMASK;
+		}
 	}
 }
 
 static int
-chgetelemstatus(struct cam_periph *periph, 
+chgetelemstatus(struct cam_periph *periph, int scsi_version, u_long cmd,
 		struct changer_element_status_request *cesr)
 {
 	struct read_element_status_header *st_hdr;
@@ -1125,6 +1206,8 @@
 	caddr_t data = NULL;
 	size_t size, desclen;
 	int avail, i, error = 0;
+	int curdata, dvcid, sense_flags;
+	int try_no_dvcid = 0;
 	struct changer_element_status *user_data = NULL;
 	struct ch_softc *softc;
 	union ccb *ccb;
@@ -1156,6 +1239,23 @@
 	cam_periph_lock(periph);
 	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 
+	sense_flags = SF_RETRY_UA;
+	if (softc->quirks & CH_Q_NO_DVCID) {
+		dvcid = 0;
+		curdata = 0;
+	} else {
+		dvcid = 1;
+		curdata = 1;
+		/*
+		 * Don't print anything for an Illegal Request, because
+		 * these flags can cause some changers to complain.  We'll
+		 * retry without them if we get an error.
+		 */
+		sense_flags |= SF_QUIET_IR;
+	}
+
+retry_einval:
+
 	scsi_read_element_status(&ccb->csio,
 				 /* retries */ 1,
 				 /* cbfcnp */ chdone,
@@ -1162,6 +1262,8 @@
 				 /* tag_action */ MSG_SIMPLE_Q_TAG,
 				 /* voltag */ want_voltags,
 				 /* sea */ softc->sc_firsts[chet],
+				 /* curdata */ curdata,
+				 /* dvcid */ dvcid,
 				 /* count */ 1,
 				 /* data_ptr */ data,
 				 /* dxfer_len */ 1024,
@@ -1169,9 +1271,38 @@
 				 /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS);
 
 	error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO,
-				  /*sense_flags*/ SF_RETRY_UA,
+				  /*sense_flags*/ sense_flags,
 				  softc->device_stats);
 
+	/*
+	 * An Illegal Request sense key (only used if there is no asc/ascq)
+	 * or 0x24,0x00 for an ASC/ASCQ both map to EINVAL.  If dvcid or
+	 * curdata are set (we set both or neither), try turning them off
+	 * and see if the command is successful.
+	 */
+	if ((error == EINVAL)
+	 && (dvcid || curdata))  {
+		dvcid = 0;
+		curdata = 0;
+		error = 0;
+		/* At this point we want to report any Illegal Request */
+		sense_flags &= ~SF_QUIET_IR;
+		try_no_dvcid = 1;
+		goto retry_einval;
+	}
+
+	/*
+	 * In this case, we tried a read element status with dvcid and
+	 * curdata set, and it failed.  We retried without those bits, and
+	 * it succeeded.  Suggest to the user that he set a quirk, so we
+	 * don't go through the retry process the first time in the future.
+	 * This should only happen on changers that claim SCSI-3 or higher,
+	 * but don't support these bits.
+	 */
+	if ((try_no_dvcid != 0)
+	 && (error == 0))
+		softc->quirks |= CH_Q_NO_DVCID;
+
 	if (error)
 		goto done;
 	cam_periph_unlock(periph);
@@ -1184,7 +1315,6 @@
 	size = sizeof(struct read_element_status_header) +
 	       sizeof(struct read_element_status_page_header) +
 	       (desclen * cesr->cesr_element_count);
-
 	/*
 	 * Reallocate storage for descriptors and get them from the
 	 * device.
@@ -1200,12 +1330,14 @@
 				 /* voltag */ want_voltags,
 				 /* sea */ softc->sc_firsts[chet]
 				 + cesr->cesr_element_base,
+				 /* curdata */ curdata,
+				 /* dvcid */ dvcid,
 				 /* count */ cesr->cesr_element_count,
 				 /* data_ptr */ data,
 				 /* dxfer_len */ size,
 				 /* sense_len */ SSD_FULL_SIZE,
 				 /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS);
-	
+
 	error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO,
 				  /*sense_flags*/ SF_RETRY_UA,
 				  softc->device_stats);
@@ -1238,18 +1370,41 @@
 	 * Set up the individual element status structures
 	 */
 	for (i = 0; i < avail; ++i) {
-		struct changer_element_status *ces = &(user_data[i]);
+		struct changer_element_status *ces;
 
-		copy_element_status(softc, pg_hdr->flags, desc, ces);
+		/*
+		 * In the changer_element_status structure, fields from
+		 * the beginning to the field of ces_scsi_lun are common
+		 * between SCSI-2 and SCSI-3, while all the rest are new
+		 * from SCSI-3. In order to maintain backward compatibility
+		 * of the chio command, the ces pointer, below, is computed
+		 * such that it lines up with the structure boundary
+		 * corresponding to the SCSI version.
+		 */
+		ces = cmd == OCHIOGSTATUS ?
+		    (struct changer_element_status *)
+		    ((unsigned char *)user_data + i *
+		     (offsetof(struct changer_element_status,ces_scsi_lun)+1)):
+		    &user_data[i];
 
+		copy_element_status(softc, pg_hdr->flags, desc,
+				    ces, scsi_version);
+
 		desc = (struct read_element_status_descriptor *)
-		       ((uintptr_t)desc + desclen);
+		       ((unsigned char *)desc + desclen);
 	}
 
 	/* Copy element status structures out to userspace. */
-	error = copyout(user_data,
-			cesr->cesr_element_status,
-			avail * sizeof(struct changer_element_status));
+	if (cmd == OCHIOGSTATUS)
+		error = copyout(user_data,
+				cesr->cesr_element_status,
+				avail* (offsetof(struct changer_element_status,
+				ces_scsi_lun) + 1));
+	else
+		error = copyout(user_data,
+				cesr->cesr_element_status,
+				avail * sizeof(struct changer_element_status));
+
 	cam_periph_lock(periph);
 
  done:
@@ -1415,6 +1570,7 @@
 
 	if (mode_buffer == NULL) {
 		printf("chgetparams: couldn't malloc mode sense data\n");
+		xpt_release_ccb(ccb);
 		return(ENOSPC);
 	}
 
@@ -1433,7 +1589,7 @@
 			/* cbfcnp */ chdone,
 			/* tag_action */ MSG_SIMPLE_Q_TAG,
 			/* dbd */ dbd,
-			/* page_code */ SMS_PAGE_CTRL_CURRENT,
+			/* pc */ SMS_PAGE_CTRL_CURRENT,
 			/* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE,
 			/* param_buf */ (u_int8_t *)mode_buffer,
 			/* param_len */ mode_buffer_len,
@@ -1496,7 +1652,7 @@
 			/* cbfcnp */ chdone,
 			/* tag_action */ MSG_SIMPLE_Q_TAG,
 			/* dbd */ dbd,
-			/* page_code */ SMS_PAGE_CTRL_CURRENT,
+			/* pc */ SMS_PAGE_CTRL_CURRENT,
 			/* page */ CH_DEVICE_CAP_PAGE,
 			/* param_buf */ (u_int8_t *)mode_buffer,
 			/* param_len */ mode_buffer_len,
@@ -1556,6 +1712,37 @@
 	return(error);
 }
 
+static int
+chscsiversion(struct cam_periph *periph)
+{
+	struct scsi_inquiry_data *inq_data;
+	struct ccb_getdev *cgd;
+	int dev_scsi_version;
+
+	cam_periph_assert(periph, MA_OWNED);
+	if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) == NULL)
+		return (-1);
+	/*
+	 * Get the device information.
+	 */
+	xpt_setup_ccb(&cgd->ccb_h,
+		      periph->path,
+		      CAM_PRIORITY_NORMAL);
+	cgd->ccb_h.func_code = XPT_GDEV_TYPE;
+	xpt_action((union ccb *)cgd);
+
+	if (cgd->ccb_h.status != CAM_REQ_CMP) {
+		xpt_free_ccb((union ccb *)cgd);
+		return -1;
+	}
+
+	inq_data = &cgd->inq_data;
+	dev_scsi_version = inq_data->version;
+	xpt_free_ccb((union ccb *)cgd);
+
+	return dev_scsi_version;
+}
+
 void
 scsi_move_medium(struct ccb_scsiio *csio, u_int32_t retries,
 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
@@ -1661,6 +1848,7 @@
 scsi_read_element_status(struct ccb_scsiio *csio, u_int32_t retries,
 			 void (*cbfcnp)(struct cam_periph *, union ccb *),
 			 u_int8_t tag_action, int voltag, u_int32_t sea,
+			 int curdata, int dvcid,
 			 u_int32_t count, u_int8_t *data_ptr,
 			 u_int32_t dxfer_len, u_int8_t sense_len,
 			 u_int32_t timeout)
@@ -1675,6 +1863,10 @@
 	scsi_ulto2b(sea, scsi_cmd->sea);
 	scsi_ulto2b(count, scsi_cmd->count);
 	scsi_ulto3b(dxfer_len, scsi_cmd->len);
+	if (dvcid)
+		scsi_cmd->flags |= READ_ELEMENT_STATUS_DVCID;
+	if (curdata)
+		scsi_cmd->flags |= READ_ELEMENT_STATUS_CURDATA;
 
 	if (voltag)
 		scsi_cmd->byte2 |= READ_ELEMENT_STATUS_VOLTAG;

Modified: trunk/sys/cam/scsi/scsi_ch.h
===================================================================
--- trunk/sys/cam/scsi/scsi_ch.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_ch.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,4 +1,5 @@
 /* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/cam/scsi/scsi_ch.h 311402 2017-01-05 11:20:31Z mav $ */
 /*	$NetBSD: scsi_changer.h,v 1.11 1998/02/13 08:28:32 enami Exp $	*/
 
 /*-
@@ -136,11 +137,14 @@
 struct scsi_read_element_status {
 	u_int8_t	opcode;
 	u_int8_t	byte2;
-#define READ_ELEMENT_STATUS_VOLTAG	0x10	/* report volume tag info */
+#define	READ_ELEMENT_STATUS_VOLTAG	0x10	/* report volume tag info */
 	/* ...next 4 bits are an element type code... */
 	u_int8_t	sea[2];	/* starting element address */
 	u_int8_t	count[2]; /* number of elements */
-	u_int8_t	reserved0;
+	u_int8_t	flags;
+#define	READ_ELEMENT_STATUS_DVCID	0x01 /* report device serial number */
+#define	READ_ELEMENT_STATUS_CURDATA	0x02 /* allow motion during command */
+
 	u_int8_t	len[3];	/* length of data buffer */
 	u_int8_t	reserved1;
 	u_int8_t	control;
@@ -149,7 +153,7 @@
 struct scsi_request_volume_element_address {
 	u_int8_t	opcode;
 	u_int8_t	byte2;
-#define REQUEST_VOLUME_ELEMENT_ADDRESS_VOLTAG	0x10
+#define	REQUEST_VOLUME_ELEMENT_ADDRESS_VOLTAG	0x10
 	/* ...next 4 bits are an element type code... */
 	u_int8_t	eaddr[2];	/* element address */
 	u_int8_t	count[2];	/* number of elements */
@@ -182,8 +186,8 @@
 struct read_element_status_page_header {
 	u_int8_t	type;	/* element type code; see type codes below */
 	u_int8_t	flags;
-#define READ_ELEMENT_STATUS_AVOLTAG	0x40
-#define READ_ELEMENT_STATUS_PVOLTAG	0x80
+#define	READ_ELEMENT_STATUS_AVOLTAG	0x40
+#define	READ_ELEMENT_STATUS_PVOLTAG	0x80
 	u_int8_t	edl[2];	/* element descriptor length */
 	u_int8_t	reserved;
 	u_int8_t	nbytes[3]; /* byte count of all descriptors */
@@ -199,50 +203,79 @@
 	u_int8_t	vsn[2];		/* volume sequence number */
 };
 
+struct read_element_status_device_id {
+	u_int8_t	prot_code_set;
+#define	READ_ELEMENT_STATUS_CODE_SET(p) ((p) & 0x0F)
+#define	READ_ELEMENT_STATUS_PROTOCOL_ID(p) ((p) >> 4)
+
+	u_int8_t	piv_assoc_designator_type;
+#define	READ_ELEMENT_STATUS_PIV_SET 0x80
+#define	READ_ELEMENT_STATUS_ASSOCIATION(p) ((p) >> 4)
+#define	READ_ELEMENT_STATUS_DESIGNATOR_TYPE(p) ((p) & 0x0F)
+
+	u_int8_t	reserved2;
+	u_int8_t	designator_length;
+	u_int8_t	designator[256]; /* Allocate max length */
+};
+
 struct read_element_status_descriptor {
 	u_int8_t	eaddr[2];	/* element address */
 	u_int8_t	flags1;
 
-#define READ_ELEMENT_STATUS_FULL	0x01
-#define READ_ELEMENT_STATUS_IMPEXP	0x02
-#define READ_ELEMENT_STATUS_EXCEPT	0x04
-#define READ_ELEMENT_STATUS_ACCESS	0x08
-#define READ_ELEMENT_STATUS_EXENAB	0x10
-#define READ_ELEMENT_STATUS_INENAB	0x20
+#define	READ_ELEMENT_STATUS_FULL	0x01
+#define	READ_ELEMENT_STATUS_IMPEXP	0x02
+#define	READ_ELEMENT_STATUS_EXCEPT	0x04
+#define	READ_ELEMENT_STATUS_ACCESS	0x08
+#define	READ_ELEMENT_STATUS_EXENAB	0x10
+#define	READ_ELEMENT_STATUS_INENAB	0x20
 
-#define READ_ELEMENT_STATUS_MT_MASK1	0x05
-#define READ_ELEMENT_STATUS_ST_MASK1	0x0c
-#define READ_ELEMENT_STATUS_IE_MASK1	0x3f
-#define READ_ELEMENT_STATUS_DT_MASK1	0x0c
+#define	READ_ELEMENT_STATUS_MT_MASK1	0x05
+#define	READ_ELEMENT_STATUS_ST_MASK1	0x0c
+#define	READ_ELEMENT_STATUS_IE_MASK1	0x3f
+#define	READ_ELEMENT_STATUS_DT_MASK1	0x0c
 
 	u_int8_t	reserved0;
 	u_int8_t	sense_code;
 	u_int8_t	sense_qual;
 
-	/*
-	 * dt_scsi_flags and dt_scsi_addr are valid only on data transport
-	 * elements.  These bytes are undefined for all other element types.
-	 */
-	u_int8_t	dt_scsi_flags;
+	union {
+		struct {
+			u_int8_t	dt_scsi_flags;
 
-#define READ_ELEMENT_STATUS_DT_LUNMASK	0x07
-#define READ_ELEMENT_STATUS_DT_LUVALID	0x10
-#define READ_ELEMENT_STATUS_DT_IDVALID	0x20
-#define READ_ELEMENT_STATUS_DT_NOTBUS	0x80
+#define	READ_ELEMENT_STATUS_DT_LUNMASK	0x07
+#define	READ_ELEMENT_STATUS_DT_LUVALID	0x10
+#define	READ_ELEMENT_STATUS_DT_IDVALID	0x20
+#define	READ_ELEMENT_STATUS_DT_NOTBUS	0x80
 
-	u_int8_t	dt_scsi_addr;
+			u_int8_t	dt_scsi_addr;
+			u_int8_t	reserved1;
+		} scsi_2;
 
-	u_int8_t	reserved1;
+		/* reserved and obsolete (as of SCSI-3) fields */
+		u_int8_t	reserved_or_obsolete[3];
+	} dt_or_obsolete;
 
 	u_int8_t	flags2;
-#define READ_ELEMENT_STATUS_INVERT	0x40
-#define READ_ELEMENT_STATUS_SVALID	0x80
+#define	READ_ELEMENT_STATUS_INVERT		0x40
+#define	READ_ELEMENT_STATUS_SVALID		0x80
+#define	READ_ELEMENT_STATUS_ED			0x80
+#define	READ_ELEMENT_STATUS_MEDIA_TYPE_MASK	0x07
+
 	u_int8_t	ssea[2];	/* source storage element address */
 
-	struct volume_tag pvoltag;	/* omitted if PVOLTAG == 0 */
-	struct volume_tag avoltag;	/* omitted if AVOLTAG == 0 */
-
-	/* Other data may follow */
+	union {
+		struct volume_tag			pvoltag;
+		struct volume_tag 			voltag[2];
+		struct read_element_status_device_id	devid;
+		struct {
+			struct volume_tag			pvoltag;
+			struct read_element_status_device_id	devid;
+		} pvol_and_devid;
+		struct {
+			struct volume_tag			voltag[2];
+			struct read_element_status_device_id	devid;
+		} vol_tags_and_devid;
+	} voltag_devid;
 };
 
 /* XXX add data returned by REQUEST VOLUME ELEMENT ADDRESS */
@@ -301,7 +334,7 @@
 /*
  * Device capabilities page.
  *
- * This page defines characteristics of the elemenet types in the
+ * This page defines characteristics of the element types in the
  * medium changer device.
  *
  * Note in the definitions below, the following abbreviations are
@@ -309,7 +342,7 @@
  *		MT	Medium transport element (picker)
  *		ST	Storage transport element (slot)
  *		IE	Import/export element (portal)
- *		DT	Data tranfer element (tape/disk drive)
+ *		DT	Data transfer element (tape/disk drive)
  */
 struct page_device_capabilities {
 	u_int8_t	pg_code;	/* page code (0x1f) */
@@ -457,6 +490,7 @@
 void scsi_read_element_status(struct ccb_scsiio *csio, u_int32_t retries,
 			      void (*cbfcnp)(struct cam_periph *, union ccb *),
 			      u_int8_t tag_action, int voltag, u_int32_t sea,
+			      int curdata, int dvcid,
 			      u_int32_t count, u_int8_t *data_ptr,
 			      u_int32_t dxfer_len, u_int8_t sense_len,
 			      u_int32_t timeout);

Modified: trunk/sys/cam/scsi/scsi_da.c
===================================================================
--- trunk/sys/cam/scsi/scsi_da.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_da.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Implementation of SCSI Direct Access Peripheral driver for CAM.
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_da.c 329319 2018-02-15 16:31:35Z avg $");
 
 #include <sys/param.h>
 
@@ -45,6 +46,7 @@
 #include <sys/malloc.h>
 #include <sys/cons.h>
 #include <sys/endian.h>
+#include <sys/proc.h>
 #include <geom/geom.h>
 #include <geom/geom_disk.h>
 #endif /* _KERNEL */
@@ -68,6 +70,7 @@
 
 #ifdef _KERNEL
 typedef enum {
+	DA_STATE_PROBE_WP,
 	DA_STATE_PROBE_RC,
 	DA_STATE_PROBE_RC16,
 	DA_STATE_PROBE_LBP,
@@ -82,14 +85,15 @@
 	DA_FLAG_NEW_PACK	= 0x002,
 	DA_FLAG_PACK_LOCKED	= 0x004,
 	DA_FLAG_PACK_REMOVABLE	= 0x008,
-	DA_FLAG_SAW_MEDIA	= 0x010,
 	DA_FLAG_NEED_OTAG	= 0x020,
-	DA_FLAG_WENT_IDLE	= 0x040,
+	DA_FLAG_WAS_OTAG	= 0x040,
 	DA_FLAG_RETRY_UA	= 0x080,
 	DA_FLAG_OPEN		= 0x100,
 	DA_FLAG_SCTX_INIT	= 0x200,
 	DA_FLAG_CAN_RC16	= 0x400,
-	DA_FLAG_PROBED		= 0x800		
+	DA_FLAG_PROBED		= 0x800,
+	DA_FLAG_DIRTY		= 0x1000,
+	DA_FLAG_ANNOUNCED	= 0x2000
 } da_flags;
 
 typedef enum {
@@ -97,7 +101,10 @@
 	DA_Q_NO_SYNC_CACHE	= 0x01,
 	DA_Q_NO_6_BYTE		= 0x02,
 	DA_Q_NO_PREVENT		= 0x04,
-	DA_Q_4K			= 0x08
+	DA_Q_4K			= 0x08,
+	DA_Q_NO_RC16		= 0x10,
+	DA_Q_NO_UNMAP		= 0x20,
+	DA_Q_RETRY_BUSY		= 0x40
 } da_quirks;
 
 #define DA_Q_BIT_STRING		\
@@ -105,7 +112,10 @@
 	"\001NO_SYNC_CACHE"	\
 	"\002NO_6_BYTE"		\
 	"\003NO_PREVENT"	\
-	"\0044K"
+	"\0044K"		\
+	"\005NO_RC16"		\
+	"\006NO_UNMAP"		\
+	"\007RETRY_BUSY"
 
 typedef enum {
 	DA_CCB_PROBE_RC		= 0x01,
@@ -115,12 +125,12 @@
 	DA_CCB_PROBE_BDC	= 0x05,
 	DA_CCB_PROBE_ATA	= 0x06,
 	DA_CCB_BUFFER_IO	= 0x07,
-	DA_CCB_WAITING		= 0x08,
 	DA_CCB_DUMP		= 0x0A,
 	DA_CCB_DELETE		= 0x0B,
  	DA_CCB_TUR		= 0x0C,
-	DA_CCB_TYPE_MASK	= 0x0F,
-	DA_CCB_RETRY_UA		= 0x10
+	DA_CCB_PROBE_WP		= 0x12,
+	DA_CCB_TYPE_MASK	= 0x1F,
+	DA_CCB_RETRY_UA		= 0x20
 } da_ccb_state;
 
 /*
@@ -144,6 +154,22 @@
 	DA_DELETE_MAX = DA_DELETE_ZERO
 } da_delete_methods;
 
+typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
+			      struct bio *bp);
+static da_delete_func_t da_delete_trim;
+static da_delete_func_t da_delete_unmap;
+static da_delete_func_t da_delete_ws;
+
+static const void * da_delete_functions[] = {
+	NULL,
+	NULL,
+	da_delete_trim,
+	da_delete_unmap,
+	da_delete_ws,
+	da_delete_ws,
+	da_delete_ws
+};
+
 static const char *da_delete_method_names[] =
     { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
 static const char *da_delete_method_desc[] =
@@ -180,8 +206,9 @@
 	struct	 bio_queue_head bio_queue;
 	struct	 bio_queue_head delete_queue;
 	struct	 bio_queue_head delete_run_queue;
-	SLIST_ENTRY(da_softc) links;
 	LIST_HEAD(, ccb_hdr) pending_ccbs;
+	int	 tur;			/* TEST UNIT READY should be sent */
+	int	 refcount;		/* Active xpt_action() calls */
 	da_state state;
 	da_flags flags;	
 	da_quirks quirks;
@@ -188,16 +215,16 @@
 	int	 sort_io_queue;
 	int	 minimum_cmd_size;
 	int	 error_inject;
-	int	 ordered_tag_count;
-	int	 outstanding_cmds;
 	int	 trim_max_ranges;
 	int	 delete_running;
-	int	 tur;
 	int	 delete_available;	/* Delete methods possibly available */
+	u_int	 maxio;
 	uint32_t		unmap_max_ranges;
-	uint32_t		unmap_max_lba;
+	uint32_t		unmap_max_lba; /* Max LBAs in UNMAP req */
 	uint64_t		ws_max_blks;
+	da_delete_methods	delete_method_pref;
 	da_delete_methods	delete_method;
+	da_delete_func_t	*delete_func;
 	struct	 disk_params params;
 	struct	 disk *disk;
 	union	 ccb saved_ccb;
@@ -332,6 +359,21 @@
 		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
 		/*quirks*/ DA_Q_NO_SYNC_CACHE
 	},
+	{
+		/*
+		 * The STEC SSDs sometimes hang on UNMAP.
+		 */
+		{T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
+		/*quirks*/ DA_Q_NO_UNMAP
+	},
+	{
+		/*
+		 * VMware returns BUSY status when storage has transient
+		 * connectivity problems, so better wait.
+		 */
+		{T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
+		/*quirks*/ DA_Q_RETRY_BUSY
+	},
 	/* USB mass storage devices supported by umass(4) */
 	{
 		/*
@@ -520,6 +562,13 @@
 	},
 	{
 		/*
+		 * PNY USB 3.0 Flash Drives
+		*/
+		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
+		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
+	},
+	{
+		/*
 		 * PNY USB Flash keys
 		 * PR: usb/75578, usb/72344, usb/65436 
 		 */
@@ -662,6 +711,19 @@
 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
 		 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
 	},
+	{
+		/* At least several Transcent USB sticks lie on RC16. */
+		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
+		 "*"}, /*quirks*/ DA_Q_NO_RC16
+	},
+	{
+		/*
+		 * I-O Data USB Flash Disk
+		 * PR: usb/211716
+		 */
+		{T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
+		 "*"}, /*quirks*/ DA_Q_NO_RC16
+	},
 	/* ATA/SATA devices over SAS/USB/... */
 	{
 		/* Hitachi Advanced Format (4k) drives */
@@ -925,12 +987,20 @@
 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
 		/*quirks*/DA_Q_4K
 	},
+        {
+		/*
+		 * Corsair Neutron GTX SSDs
+		 * 4k optimised & trim only works in 4k requests + 4k aligned
+		 */
+		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
+		/*quirks*/DA_Q_4K
+	},
 	{
 		/*
-		 * Corsair Force GT SSDs
+		 * Corsair Force GT & GS SSDs
 		 * 4k optimised & trim only works in 4k requests + 4k aligned
 		 */
-		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force GT*", "*" },
+		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
 		/*quirks*/DA_Q_4K
 	},
 	{
@@ -983,6 +1053,14 @@
 	},
 	{
 		/*
+		 * Intel X25-M Series SSDs
+		 * 4k optimised & trim only works in 4k requests + 4k aligned
+		 */
+		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
+		/*quirks*/DA_Q_4K
+	},
+	{
+		/*
 		 * Kingston E100 Series SSDs
 		 * 4k optimised & trim only works in 4k requests + 4k aligned
 		 */
@@ -999,6 +1077,22 @@
 	},
 	{
 		/*
+		 * Marvell SSDs (entry taken from OpenSolaris)
+		 * 4k optimised & trim only works in 4k requests + 4k aligned
+		 */
+		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
+		/*quirks*/DA_Q_4K
+	},
+	{
+		/*
+		 * OCZ Agility 2 SSDs
+		 * 4k optimised & trim only works in 4k requests + 4k aligned
+		 */
+		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
+		/*quirks*/DA_Q_4K
+	},
+	{
+		/*
 		 * OCZ Agility 3 SSDs
 		 * 4k optimised & trim only works in 4k requests + 4k aligned
 		 */
@@ -1031,6 +1125,14 @@
 	},
 	{
 		/*
+		 * OCZ Vertex 4 SSDs
+		 * 4k optimised & trim only works in 4k requests + 4k aligned
+		 */
+		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
+		/*quirks*/DA_Q_4K
+	},
+	{
+		/*
 		 * Samsung 830 Series SSDs
 		 * 4k optimised & trim only works in 4k requests + 4k aligned
 		 */
@@ -1039,6 +1141,33 @@
 	},
 	{
 		/*
+		 * Samsung 840 SSDs
+		 * 4k optimised & trim only works in 4k requests + 4k aligned
+		 */
+		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
+		/*quirks*/DA_Q_4K
+	},
+	{
+		/*
+		 * Samsung 850 SSDs
+		 * 4k optimised & trim only works in 4k requests + 4k aligned
+		 */
+		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
+		/*quirks*/DA_Q_4K
+	},
+	{
+		/*
+		 * Samsung 843T Series SSDs (MZ7WD*)
+		 * Samsung PM851 Series SSDs (MZ7TE*)
+		 * Samsung PM853T Series SSDs (MZ7GE*)
+		 * Samsung SM863 Series SSDs (MZ7KM*)
+		 * 4k optimised
+		 */
+		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
+		/*quirks*/DA_Q_4K
+	},
+	{
+		/*
 		 * SuperTalent TeraDrive CT SSDs
 		 * 4k optimised & trim only works in 4k requests + 4k aligned
 		 */
@@ -1053,6 +1182,20 @@
 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
 		/*quirks*/DA_Q_4K
 	},
+	{
+		/*
+		 * Hama Innostor USB-Stick 
+		 */
+		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, 
+		/*quirks*/DA_Q_NO_RC16
+	},
+	{
+		/*
+		 * MX-ES USB Drive by Mach Xtreme
+		 */
+		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
+		/*quirks*/DA_Q_NO_RC16
+	},
 };
 
 static	disk_strategy_t	dastrategy;
@@ -1063,8 +1206,11 @@
 static	void		dasysctlinit(void *context, int pending);
 static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
 static	int		dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
+static	int		dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
 static	void		dadeletemethodset(struct da_softc *softc,
 					  da_delete_methods delete_method);
+static	off_t		dadeletemaxsize(struct da_softc *softc,
+					da_delete_methods delete_method);
 static	void		dadeletemethodchoose(struct da_softc *softc,
 					     da_delete_methods default_method);
 static	void		daprobedone(struct cam_periph *periph, union ccb *ccb);
@@ -1157,7 +1303,6 @@
 {
 	struct cam_periph *periph;
 	struct da_softc *softc;
-	int unit;
 	int error;
 
 	periph = (struct cam_periph *)dp->d_drv1;
@@ -1172,18 +1317,10 @@
 		return (error);
 	}
 
-	unit = periph->unit_number;
-	softc = (struct da_softc *)periph->softc;
-	softc->flags |= DA_FLAG_OPEN;
-
 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
 	    ("daopen\n"));
 
-	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
-		/* Invalidate our pack information. */
-		softc->flags &= ~DA_FLAG_PACK_INVALID;
-	}
-
+	softc = (struct da_softc *)periph->softc;
 	dareprobe(periph);
 
 	/* Wait for the disk size update.  */
@@ -1190,11 +1327,9 @@
 	error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
 	    "dareprobe", 0);
 	if (error != 0)
-		xpt_print(periph->path, "unable to retrieve capacity data");
+		xpt_print(periph->path, "unable to retrieve capacity data\n");
 
-	if (periph->flags & CAM_PERIPH_INVALID ||
-	    softc->disk->d_sectorsize == 0 ||
-	    softc->disk->d_mediasize == 0)
+	if (periph->flags & CAM_PERIPH_INVALID)
 		error = ENXIO;
 
 	if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
@@ -1201,16 +1336,16 @@
 	    (softc->quirks & DA_Q_NO_PREVENT) == 0)
 		daprevent(periph, PR_PREVENT);
 
-	if (error == 0)
-		softc->flags |= DA_FLAG_SAW_MEDIA;
+	if (error == 0) {
+		softc->flags &= ~DA_FLAG_PACK_INVALID;
+		softc->flags |= DA_FLAG_OPEN;
+	}
 
 	cam_periph_unhold(periph);
 	cam_periph_unlock(periph);
 
-	if (error != 0) {
-		softc->flags &= ~DA_FLAG_OPEN;
+	if (error != 0)
 		cam_periph_release(periph);
-	}
 
 	return (error);
 }
@@ -1220,58 +1355,55 @@
 {
 	struct	cam_periph *periph;
 	struct	da_softc *softc;
+	union	ccb *ccb;
+	int error;
 
 	periph = (struct cam_periph *)dp->d_drv1;
+	softc = (struct da_softc *)periph->softc;
 	cam_periph_lock(periph);
-	if (cam_periph_hold(periph, PRIBIO) != 0) {
-		cam_periph_unlock(periph);
-		cam_periph_release(periph);
-		return (0);
-	}
-
-	softc = (struct da_softc *)periph->softc;
-
 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
 	    ("daclose\n"));
 
-	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0
-	 && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
-		union	ccb *ccb;
+	if (cam_periph_hold(periph, PRIBIO) == 0) {
 
-		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
+		/* Flush disk cache. */
+		if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
+		    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
+		    (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
+			ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
+			scsi_synchronize_cache(&ccb->csio, /*retries*/1,
+			    /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG,
+			    /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
+			    5 * 60 * 1000);
+			error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
+			    /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
+			    softc->disk->d_devstat);
+			softc->flags &= ~DA_FLAG_DIRTY;
+			xpt_release_ccb(ccb);
+		}
 
-		scsi_synchronize_cache(&ccb->csio,
-				       /*retries*/1,
-				       /*cbfcnp*/dadone,
-				       MSG_SIMPLE_Q_TAG,
-				       /*begin_lba*/0,/* Cover the whole disk */
-				       /*lb_count*/0,
-				       SSD_FULL_SIZE,
-				       5 * 60 * 1000);
+		/* Allow medium removal. */
+		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
+		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
+			daprevent(periph, PR_ALLOW);
 
-		cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
-				  /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
-				  softc->disk->d_devstat);
-		xpt_release_ccb(ccb);
-
+		cam_periph_unhold(periph);
 	}
 
-	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
-		if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
-			daprevent(periph, PR_ALLOW);
-		/*
-		 * If we've got removeable media, mark the blocksize as
-		 * unavailable, since it could change when new media is
-		 * inserted.
-		 */
+	/*
+	 * If we've got removeable media, mark the blocksize as
+	 * unavailable, since it could change when new media is
+	 * inserted.
+	 */
+	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
 		softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
-	}
 
 	softc->flags &= ~DA_FLAG_OPEN;
-	cam_periph_unhold(periph);
+	while (softc->refcount != 0)
+		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
 	cam_periph_unlock(periph);
 	cam_periph_release(periph);
-	return (0);	
+	return (0);
 }
 
 static void
@@ -1278,24 +1410,16 @@
 daschedule(struct cam_periph *periph)
 {
 	struct da_softc *softc = (struct da_softc *)periph->softc;
-	uint32_t prio;
 
 	if (softc->state != DA_STATE_NORMAL)
 		return;
 
-	/* Check if cam_periph_getccb() was called. */
-	prio = periph->immediate_priority;
-
 	/* Check if we have more work to do. */
 	if (bioq_first(&softc->bio_queue) ||
 	    (!softc->delete_running && bioq_first(&softc->delete_queue)) ||
 	    softc->tur) {
-		prio = CAM_PRIORITY_NORMAL;
+		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
 	}
-
-	/* Schedule CCB if any of above is true. */
-	if (prio != CAM_PRIORITY_NONE)
-		xpt_schedule(periph, prio);
 }
 
 /*
@@ -1329,12 +1453,7 @@
 	 * Place it in the queue of disk activities for this disk
 	 */
 	if (bp->bio_cmd == BIO_DELETE) {
-		if (bp->bio_bcount == 0)
-			biodone(bp);
-		else if (DA_SIO)
-			bioq_disksort(&softc->delete_queue, bp);
-		else
-			bioq_insert_tail(&softc->delete_queue, bp);
+		bioq_disksort(&softc->delete_queue, bp);
 	} else if (DA_SIO) {
 		bioq_disksort(&softc->bio_queue, bp);
 	} else {
@@ -1378,7 +1497,7 @@
 				/*retries*/0,
 				dadone,
 				MSG_ORDERED_Q_TAG,
-				/*read*/FALSE,
+				/*read*/SCSI_RW_WRITE,
 				/*byte2*/0,
 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
 				offset / secsize,
@@ -1508,9 +1627,6 @@
 	 * done cleaning up its resources.
 	 */
 	disk_gone(softc->disk);
-
-	xpt_print(periph->path, "lost device - %d outstanding, %d refs\n",
-		  softc->outstanding_cmds, periph->refcount);
 }
 
 static void
@@ -1520,7 +1636,6 @@
 
 	softc = (struct da_softc *)periph->softc;
 
-	xpt_print(periph->path, "removing device entry\n");
 	cam_periph_unlock(periph);
 
 	/*
@@ -1558,7 +1673,8 @@
 
 		if (cgd->protocol != PROTO_SCSI)
 			break;
-
+		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
+			break;
 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
 		    && SID_TYPE(&cgd->inq_data) != T_RBC
 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
@@ -1572,7 +1688,7 @@
 		status = cam_periph_alloc(daregister, daoninvalidate,
 					  dacleanup, dastart,
 					  "da", CAM_PERIPH_BIO,
-					  cgd->ccb_h.path, daasync,
+					  path, daasync,
 					  AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP
@@ -1612,10 +1728,18 @@
 		     &error_code, &sense_key, &asc, &ascq)) {
 			if (asc == 0x2A && ascq == 0x09) {
 				xpt_print(ccb->ccb_h.path,
-				    "capacity data has changed\n");
+				    "Capacity data has changed\n");
+				softc->flags &= ~DA_FLAG_PROBED;
 				dareprobe(periph);
-			} else if (asc == 0x28 && ascq == 0x00)
+			} else if (asc == 0x28 && ascq == 0x00) {
+				softc->flags &= ~DA_FLAG_PROBED;
 				disk_media_changed(softc->disk, M_NOWAIT);
+			} else if (asc == 0x3F && ascq == 0x03) {
+				xpt_print(ccb->ccb_h.path,
+				    "INQUIRY data has changed\n");
+				softc->flags &= ~DA_FLAG_PROBED;
+				dareprobe(periph);
+			}
 		}
 		cam_periph_async(periph, code, path, arg);
 		break;
@@ -1644,6 +1768,11 @@
 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
 		break;
 	}
+	case AC_INQ_CHANGED:
+		softc = (struct da_softc *)periph->softc;
+		softc->flags &= ~DA_FLAG_PROBED;
+		dareprobe(periph);
+		break;
 	default:
 		break;
 	}
@@ -1687,10 +1816,14 @@
 	 * the fly.
 	 */
 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
-		OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW,
+		OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN,
 		softc, 0, dadeletemethodsysctl, "A",
 		"BIO_DELETE execution method");
 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+		OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW,
+		softc, 0, dadeletemaxsysctl, "Q",
+		"Maximum BIO_DELETE size");
+	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
 		OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
 		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
 		"Minimum CDB size");
@@ -1736,6 +1869,29 @@
 }
 
 static int
+dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
+{
+	int error;
+	uint64_t value;
+	struct da_softc *softc;
+
+	softc = (struct da_softc *)arg1;
+
+	value = softc->disk->d_delmaxsize;
+	error = sysctl_handle_64(oidp, &value, 0, req);
+	if ((error != 0) || (req->newptr == NULL))
+		return (error);
+
+	/* only accept values smaller than the calculated value */
+	if (value > dadeletemaxsize(softc, softc->delete_method)) {
+		return (EINVAL);
+	}
+	softc->disk->d_delmaxsize = value;
+
+	return (0);
+}
+
+static int
 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
 {
 	int error, value;
@@ -1771,8 +1927,9 @@
 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
 {
 
-
 	softc->delete_method = delete_method;
+	softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
+	softc->delete_func = da_delete_functions[delete_method];
 
 	if (softc->delete_method > DA_DELETE_DISABLE)
 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
@@ -1780,6 +1937,33 @@
 		softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
 }
 
+static off_t
+dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
+{
+	off_t sectors;
+
+	switch(delete_method) {
+	case DA_DELETE_UNMAP:
+		sectors = (off_t)softc->unmap_max_lba;
+		break;
+	case DA_DELETE_ATA_TRIM:
+		sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
+		break;
+	case DA_DELETE_WS16:
+		sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
+		break;
+	case DA_DELETE_ZERO:
+	case DA_DELETE_WS10:
+		sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
+		break;
+	default:
+		return 0;
+	}
+
+	return (off_t)softc->params.secsize *
+	    omin(sectors, softc->params.sectors);
+}
+
 static void
 daprobedone(struct cam_periph *periph, union ccb *ccb)
 {
@@ -1789,32 +1973,24 @@
 
 	dadeletemethodchoose(softc, DA_DELETE_NONE);
 
-	if (bootverbose && (softc->flags & DA_FLAG_PROBED) == 0) {
+	if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
 		char buf[80];
 		int i, sep;
 
 		snprintf(buf, sizeof(buf), "Delete methods: <");
 		sep = 0;
-		for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
-			if (softc->delete_available & (1 << i)) {
-				if (sep) {
-					strlcat(buf, ",", sizeof(buf));
-				} else {
-				    sep = 1;
-				}
-				strlcat(buf, da_delete_method_names[i],
-				    sizeof(buf));
-				if (i == softc->delete_method) {
-					strlcat(buf, "(*)", sizeof(buf));
-				}
-			}
+		for (i = 0; i <= DA_DELETE_MAX; i++) {
+			if ((softc->delete_available & (1 << i)) == 0 &&
+			    i != softc->delete_method)
+				continue;
+			if (sep)
+				strlcat(buf, ",", sizeof(buf));
+			strlcat(buf, da_delete_method_names[i],
+			    sizeof(buf));
+			if (i == softc->delete_method)
+				strlcat(buf, "(*)", sizeof(buf));
+			sep = 1;
 		}
-		if (sep == 0) {
-			if (softc->delete_method == DA_DELETE_NONE) 
-				strlcat(buf, "NONE(*)", sizeof(buf));
-			else
-				strlcat(buf, "DISABLED(*)", sizeof(buf));
-		}
 		strlcat(buf, ">", sizeof(buf));
 		printf("%s%d: %s\n", periph->periph_name,
 		    periph->unit_number, buf);
@@ -1830,10 +2006,11 @@
 	 */
 	xpt_release_ccb(ccb);
 	softc->state = DA_STATE_NORMAL;
+	softc->flags |= DA_FLAG_PROBED;
 	daschedule(periph);
 	wakeup(&softc->disk->d_mediasize);
-	if ((softc->flags & DA_FLAG_PROBED) == 0) {
-		softc->flags |= DA_FLAG_PROBED;
+	if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
+		softc->flags |= DA_FLAG_ANNOUNCED;
 		cam_periph_unhold(periph);
 	} else
 		cam_periph_release_locked(periph);
@@ -1842,21 +2019,28 @@
 static void
 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
 {
-	int i, delete_method;
+	int i, methods;
 
-	delete_method = default_method;
+	/* If available, prefer the method requested by user. */
+	i = softc->delete_method_pref;
+	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
+	if (methods & (1 << i)) {
+		dadeletemethodset(softc, i);
+		return;
+	}
 
-	/*
-	 * Use the pre-defined order to choose the best
-	 * performing delete.
-	 */
+	/* Use the pre-defined order to choose the best performing delete. */
 	for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
+		if (i == DA_DELETE_ZERO)
+			continue;
 		if (softc->delete_available & (1 << i)) {
 			dadeletemethodset(softc, i);
 			return;
 		}
 	}
-	dadeletemethodset(softc, delete_method);
+
+	/* Fallback to default. */
+	dadeletemethodset(softc, default_method);
 }
 
 static int
@@ -1865,7 +2049,7 @@
 	char buf[16];
 	const char *p;
 	struct da_softc *softc;
-	int i, error, value;
+	int i, error, methods, value;
 
 	softc = (struct da_softc *)arg1;
 
@@ -1878,14 +2062,16 @@
 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
 	if (error != 0 || req->newptr == NULL)
 		return (error);
+	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
 	for (i = 0; i <= DA_DELETE_MAX; i++) {
-		if (!(softc->delete_available & (1 << i)) ||
-		    strcmp(buf, da_delete_method_names[i]) != 0)
-			continue;
-		dadeletemethodset(softc, i);
-		return (0);
+		if (strcmp(buf, da_delete_method_names[i]) == 0)
+			break;
 	}
-	return (EINVAL);
+	if (i > DA_DELETE_MAX)
+		return (EINVAL);
+	softc->delete_method_pref = i;
+	dadeletemethodchoose(softc, DA_DELETE_NONE);
+	return (0);
 }
 
 static cam_status
@@ -1913,7 +2099,7 @@
 	}
 
 	LIST_INIT(&softc->pending_ccbs);
-	softc->state = DA_STATE_PROBE_RC;
+	softc->state = DA_STATE_PROBE_WP;
 	bioq_init(&softc->bio_queue);
 	bioq_init(&softc->delete_queue);
 	bioq_init(&softc->delete_run_queue);
@@ -1961,7 +2147,7 @@
 	 * Schedule a periodic event to occasionally send an
 	 * ordered tag to a device.
 	 */
-	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
+	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
 	callout_reset(&softc->sendordered_c,
 	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
 	    dasendorderedtag, softc);
@@ -1997,9 +2183,9 @@
 		softc->minimum_cmd_size = 16;
 
 	/* Predict whether device may support READ CAPACITY(16). */
-	if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3) {
+	if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
+	    (softc->quirks & DA_Q_NO_RC16) == 0) {
 		softc->flags |= DA_FLAG_CAN_RC16;
-		softc->state = DA_STATE_PROBE_RC16;
 	}
 
 	/*
@@ -2021,15 +2207,18 @@
 	softc->disk->d_name = "da";
 	softc->disk->d_drv1 = periph;
 	if (cpi.maxio == 0)
-		softc->disk->d_maxsize = DFLTPHYS;	/* traditional default */
+		softc->maxio = DFLTPHYS;	/* traditional default */
 	else if (cpi.maxio > MAXPHYS)
-		softc->disk->d_maxsize = MAXPHYS;	/* for safety */
+		softc->maxio = MAXPHYS;		/* for safety */
 	else
-		softc->disk->d_maxsize = cpi.maxio;
+		softc->maxio = cpi.maxio;
+	softc->disk->d_maxsize = softc->maxio;
 	softc->disk->d_unit = periph->unit_number;
-	softc->disk->d_flags = 0;
+	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
+	if ((cpi.hba_misc & PIM_UNMAPPED) != 0)
+		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
 	cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
 	    sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
 	strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
@@ -2064,8 +2253,8 @@
 	 * would be to not attach the device on failure.
 	 */
 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
-	    AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION,
-	    daasync, periph, periph->path);
+	    AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
+	    AC_INQ_CHANGED, daasync, periph, periph->path);
 
 	/*
 	 * Emit an attribute changed notification just in case 
@@ -2078,7 +2267,7 @@
 	/*
 	 * Schedule a periodic media polling events.
 	 */
-	callout_init_mtx(&softc->mediapoll_c, periph->sim->mtx, 0);
+	callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
 	    (cgd->inq_flags & SID_AEN) == 0 &&
 	    da_poll_period != 0)
@@ -2103,252 +2292,19 @@
 	switch (softc->state) {
 	case DA_STATE_NORMAL:
 	{
-		struct bio *bp, *bp1;
+		struct bio *bp;
 		uint8_t tag_code;
 
-		/* Execute immediate CCB if waiting. */
-		if (periph->immediate_priority <= periph->pinfo.priority) {
-			CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
-					("queuing for immediate ccb\n"));
-			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
-			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-					  periph_links.sle);
-			periph->immediate_priority = CAM_PRIORITY_NONE;
-			wakeup(&periph->ccb_list);
-			/* May have more work to do, so ensure we stay scheduled */
-			daschedule(periph);
-			break;
-		}
-
 		/* Run BIO_DELETE if not running yet. */
 		if (!softc->delete_running &&
 		    (bp = bioq_first(&softc->delete_queue)) != NULL) {
-		    uint64_t lba;
-		    uint64_t count; /* forward compat with WS32 */
-
-		    /*
-		     * In each of the methods below, while its the caller's
-		     * responsibility to ensure the request will fit into a
-		     * single device request, we might have changed the delete
-		     * method due to the device incorrectly advertising either
-		     * its supported methods or limits.
-		     * 
-		     * To prevent this causing further issues we validate the
-		     * against the methods limits, and warn which would
-		     * otherwise be unnecessary.
-		     */
-
-		    if (softc->delete_method == DA_DELETE_UNMAP) {
-			uint8_t *buf = softc->unmap_buf;
-			uint64_t lastlba = (uint64_t)-1;
-			uint32_t lastcount = 0, c;
-			uint64_t totalcount = 0;
-			uint32_t off, ranges = 0;
-
-			/*
-			 * Currently this doesn't take the UNMAP
-			 * Granularity and Granularity Alignment
-			 * fields into account.
-			 *
-			 * This could result in both unoptimal unmap
-			 * requests as as well as UNMAP calls unmapping
-			 * fewer LBA's than requested.
-			 */
-
-			softc->delete_running = 1;
-			bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
-			bp1 = bp;
-			do {
-				bioq_remove(&softc->delete_queue, bp1);
-				if (bp1 != bp)
-					bioq_insert_tail(&softc->delete_run_queue, bp1);
-				lba = bp1->bio_pblkno;
-				count = bp1->bio_bcount / softc->params.secsize;
-
-				/* Try to extend the previous range. */
-				if (lba == lastlba) {
-					c = min(count, softc->unmap_max_lba -
-						lastcount);
-					lastcount += c;
-					off = ((ranges - 1) * UNMAP_RANGE_SIZE) +
-					      UNMAP_HEAD_SIZE;
-					scsi_ulto4b(lastcount, &buf[off + 8]);
-					count -= c;
-					lba +=c;
-					totalcount += c;
-				}
-
-				while (count > 0) {
-					c = min(count, softc->unmap_max_lba);
-					if (totalcount + c > softc->unmap_max_lba ||
-					    ranges >= softc->unmap_max_ranges) {
-						xpt_print(periph->path,
-						  "%s issuing short delete %ld > %ld"
-						  "|| %d >= %d",
-						  da_delete_method_desc[softc->delete_method],
-						  totalcount + c, softc->unmap_max_lba,
-						  ranges, softc->unmap_max_ranges);
-						break;
-					}
-					off = (ranges * UNMAP_RANGE_SIZE) +
-					      UNMAP_HEAD_SIZE;
-					scsi_u64to8b(lba, &buf[off + 0]);
-					scsi_ulto4b(c, &buf[off + 8]);
-					lba += c;
-					totalcount += c;
-					ranges++;
-					count -= c;
-					lastcount = c;
-				}
-				lastlba = lba;
-				bp1 = bioq_first(&softc->delete_queue);
-				if (bp1 == NULL ||
-				    ranges >= softc->unmap_max_ranges ||
-				    totalcount + bp1->bio_bcount /
-				     softc->params.secsize > softc->unmap_max_lba)
-					break;
-			} while (1);
-			scsi_ulto2b(ranges * 16 + 6, &buf[0]);
-			scsi_ulto2b(ranges * 16, &buf[2]);
-
-			scsi_unmap(&start_ccb->csio,
-					/*retries*/da_retry_count,
-					/*cbfcnp*/dadone,
-					/*tag_action*/MSG_SIMPLE_Q_TAG,
-					/*byte2*/0,
-					/*data_ptr*/ buf,
-					/*dxfer_len*/ ranges * 16 + 8,
-					/*sense_len*/SSD_FULL_SIZE,
-					da_default_timeout * 1000);
-			start_ccb->ccb_h.ccb_state = DA_CCB_DELETE;
-			goto out;
-		    } else if (softc->delete_method == DA_DELETE_ATA_TRIM) {
-				uint8_t *buf = softc->unmap_buf;
-				uint64_t lastlba = (uint64_t)-1;
-				uint32_t lastcount = 0, c, requestcount;
-				int ranges = 0, off, block_count;
-
-				softc->delete_running = 1;
-				bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
-				bp1 = bp;
-				do {
-					bioq_remove(&softc->delete_queue, bp1);
-					if (bp1 != bp)
-						bioq_insert_tail(&softc->delete_run_queue, bp1);
-					lba = bp1->bio_pblkno;
-					count = bp1->bio_bcount / softc->params.secsize;
-					requestcount = count;
-
-					/* Try to extend the previous range. */
-					if (lba == lastlba) {
-						c = min(count, ATA_DSM_RANGE_MAX - lastcount);
-						lastcount += c;
-						off = (ranges - 1) * 8;
-						buf[off + 6] = lastcount & 0xff;
-						buf[off + 7] = (lastcount >> 8) & 0xff;
-						count -= c;
-						lba += c;
-					}
-
-					while (count > 0) {
-						c = min(count, ATA_DSM_RANGE_MAX);
-						off = ranges * 8;
-
-						buf[off + 0] = lba & 0xff;
-						buf[off + 1] = (lba >> 8) & 0xff;
-						buf[off + 2] = (lba >> 16) & 0xff;
-						buf[off + 3] = (lba >> 24) & 0xff;
-						buf[off + 4] = (lba >> 32) & 0xff;
-						buf[off + 5] = (lba >> 40) & 0xff;
-						buf[off + 6] = c & 0xff;
-						buf[off + 7] = (c >> 8) & 0xff;
-						lba += c;
-						ranges++;
-						count -= c;
-						lastcount = c;
-						if (count != 0 && ranges == softc->trim_max_ranges) {
-							xpt_print(periph->path,
-							  "%s issuing short delete %ld > %ld",
-							  da_delete_method_desc[softc->delete_method],
-							  requestcount,
-							  (softc->trim_max_ranges - ranges) *
-							  ATA_DSM_RANGE_MAX);
-							break;
-						}
-					}
-					lastlba = lba;
-					bp1 = bioq_first(&softc->delete_queue);
-					if (bp1 == NULL ||
-					    bp1->bio_bcount / softc->params.secsize >
-					    (softc->trim_max_ranges - ranges) *
-						    ATA_DSM_RANGE_MAX)
-						break;
-				} while (1);
-
-				block_count = (ranges + ATA_DSM_BLK_RANGES - 1) /
-					      ATA_DSM_BLK_RANGES;
-				scsi_ata_trim(&start_ccb->csio,
-						/*retries*/da_retry_count,
-						/*cbfcnp*/dadone,
-						/*tag_action*/MSG_SIMPLE_Q_TAG,
-						block_count,
-						/*data_ptr*/buf,
-						/*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
-						/*sense_len*/SSD_FULL_SIZE,
-						da_default_timeout * 1000);
-				start_ccb->ccb_h.ccb_state = DA_CCB_DELETE;
+			if (softc->delete_func != NULL) {
+				softc->delete_func(periph, start_ccb, bp);
 				goto out;
-		    } else if (softc->delete_method == DA_DELETE_ZERO ||
-			       softc->delete_method == DA_DELETE_WS10 ||
-			       softc->delete_method == DA_DELETE_WS16) {
-			uint64_t ws_max_blks;
-			ws_max_blks = softc->ws_max_blks / softc->params.secsize;
-			softc->delete_running = 1;
-			lba = bp->bio_pblkno;
-			count = 0;
-			bp1 = bp;
-			do {
-				bioq_remove(&softc->delete_queue, bp1);
-				if (bp1 != bp)
-					bioq_insert_tail(&softc->delete_run_queue, bp1);
-				count += bp1->bio_bcount / softc->params.secsize;
-				if (count > ws_max_blks) {
-					count = min(count, ws_max_blks);
-					xpt_print(periph->path,
-					  "%s issuing short delete %ld > %ld",
-					  da_delete_method_desc[softc->delete_method],
-					  count, ws_max_blks);
-					break;
-				}
-				bp1 = bioq_first(&softc->delete_queue);
-				if (bp1 == NULL ||
-				    lba + count != bp1->bio_pblkno ||
-				    count + bp1->bio_bcount /
-				     softc->params.secsize > ws_max_blks)
-					break;
-			} while (1);
-
-			scsi_write_same(&start_ccb->csio,
-					/*retries*/da_retry_count,
-					/*cbfcnp*/dadone,
-					/*tag_action*/MSG_SIMPLE_Q_TAG,
-					/*byte2*/softc->delete_method ==
-					    DA_DELETE_ZERO ? 0 : SWS_UNMAP,
-					softc->delete_method ==
-					    DA_DELETE_WS16 ? 16 : 10,
-					/*lba*/lba,
-					/*block_count*/count,
-					/*data_ptr*/ __DECONST(void *,
-					    zero_region),
-					/*dxfer_len*/ softc->params.secsize,
-					/*sense_len*/SSD_FULL_SIZE,
-					da_default_timeout * 1000);
-			start_ccb->ccb_h.ccb_state = DA_CCB_DELETE;
-			goto out;
-		    } else {
-			bioq_flush(&softc->delete_queue, NULL, 0);
-			/* FALLTHROUGH */
-		    }
+			} else {
+				bioq_flush(&softc->delete_queue, NULL, 0);
+				/* FALLTHROUGH */
+			}
 		}
 
 		/* Run regular command. */
@@ -2377,7 +2333,7 @@
 		if ((bp->bio_flags & BIO_ORDERED) != 0 ||
 		    (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
 			softc->flags &= ~DA_FLAG_NEED_OTAG;
-			softc->ordered_tag_count++;
+			softc->flags |= DA_FLAG_WAS_OTAG;
 			tag_code = MSG_ORDERED_Q_TAG;
 		} else {
 			tag_code = MSG_SIMPLE_Q_TAG;
@@ -2384,24 +2340,41 @@
 		}
 
 		switch (bp->bio_cmd) {
+		case BIO_WRITE:
 		case BIO_READ:
-		case BIO_WRITE:
+		{
+			void *data_ptr;
+			int rw_op;
+
+			if (bp->bio_cmd == BIO_WRITE) {
+				softc->flags |= DA_FLAG_DIRTY;
+				rw_op = SCSI_RW_WRITE;
+			} else {
+				rw_op = SCSI_RW_READ;
+			}
+
+			data_ptr = bp->bio_data;
+			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
+				rw_op |= SCSI_RW_BIO;
+				data_ptr = bp;
+			}
+
 			scsi_read_write(&start_ccb->csio,
 					/*retries*/da_retry_count,
 					/*cbfcnp*/dadone,
 					/*tag_action*/tag_code,
-					/*read_op*/bp->bio_cmd
-						== BIO_READ,
+					rw_op,
 					/*byte2*/0,
 					softc->minimum_cmd_size,
 					/*lba*/bp->bio_pblkno,
 					/*block_count*/bp->bio_bcount /
 					softc->params.secsize,
-					/*data_ptr*/ bp->bio_data,
+					data_ptr,
 					/*dxfer_len*/ bp->bio_bcount,
 					/*sense_len*/SSD_FULL_SIZE,
 					da_default_timeout * 1000);
 			break;
+		}
 		case BIO_FLUSH:
 			/*
 			 * BIO_FLUSH doesn't currently communicate
@@ -2421,15 +2394,11 @@
 			break;
 		}
 		start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
+		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
 
 out:
-		/*
-		 * Block out any asyncronous callbacks
-		 * while we touch the pending ccb list.
-		 */
 		LIST_INSERT_HEAD(&softc->pending_ccbs,
 				 &start_ccb->ccb_h, periph_links.le);
-		softc->outstanding_cmds++;
 
 		/* We expect a unit attention from this device */
 		if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
@@ -2438,12 +2407,46 @@
 		}
 
 		start_ccb->ccb_h.ccb_bp = bp;
+		softc->refcount++;
+		cam_periph_unlock(periph);
 		xpt_action(start_ccb);
+		cam_periph_lock(periph);
+		softc->refcount--;
 
 		/* May have more work to do, so ensure we stay scheduled */
 		daschedule(periph);
 		break;
 	}
+	case DA_STATE_PROBE_WP:
+	{
+		void  *mode_buf;
+		int    mode_buf_len;
+
+		mode_buf_len = 192;
+		mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
+		if (mode_buf == NULL) {
+			xpt_print(periph->path, "Unable to send mode sense - "
+			    "malloc failure\n");
+			softc->state = DA_STATE_PROBE_RC;
+			goto skipstate;
+		}
+		scsi_mode_sense_len(&start_ccb->csio,
+				    /*retries*/ da_retry_count,
+				    /*cbfcnp*/ dadone,
+				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
+				    /*dbd*/ FALSE,
+				    /*pc*/ SMS_PAGE_CTRL_CURRENT,
+				    /*page*/ SMS_ALL_PAGES_PAGE,
+				    /*param_buf*/ mode_buf,
+				    /*param_len*/ mode_buf_len,
+				    /*minimum_cmd_size*/ softc->minimum_cmd_size,
+				    /*sense_len*/ SSD_FULL_SIZE,
+				    /*timeout*/ da_default_timeout * 1000);
+		start_ccb->ccb_h.ccb_bp = NULL;
+		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP;
+		xpt_action(start_ccb);
+		break;
+	}
 	case DA_STATE_PROBE_RC:
 	{
 		struct scsi_read_capacity_data *rcap;
@@ -2485,7 +2488,8 @@
 				      /*lba*/ 0,
 				      /*reladr*/ 0,
 				      /*pmi*/ 0,
-				      rcaplong,
+				      /*rcap_buf*/ (uint8_t *)rcaplong,
+				      /*rcap_buf_len*/ sizeof(*rcaplong),
 				      /*sense_len*/ SSD_FULL_SIZE,
 				      /*timeout*/ da_default_timeout * 1000);
 		start_ccb->ccb_h.ccb_bp = NULL;
@@ -2636,6 +2640,243 @@
 	}
 }
 
+/*
+ * In each of the methods below, while its the caller's
+ * responsibility to ensure the request will fit into a
+ * single device request, we might have changed the delete
+ * method due to the device incorrectly advertising either
+ * its supported methods or limits.
+ * 
+ * To prevent this causing further issues we validate the
+ * against the methods limits, and warn which would
+ * otherwise be unnecessary.
+ */
+static void
+da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
+{
+	struct da_softc *softc = (struct da_softc *)periph->softc;;
+	struct bio *bp1;
+	uint8_t *buf = softc->unmap_buf;
+	uint64_t lba, lastlba = (uint64_t)-1;
+	uint64_t totalcount = 0;
+	uint64_t count;
+	uint32_t lastcount = 0, c;
+	uint32_t off, ranges = 0;
+
+	/*
+	 * Currently this doesn't take the UNMAP
+	 * Granularity and Granularity Alignment
+	 * fields into account.
+	 *
+	 * This could result in both unoptimal unmap
+	 * requests as as well as UNMAP calls unmapping
+	 * fewer LBA's than requested.
+	 */
+
+	softc->delete_running = 1;
+	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
+	bp1 = bp;
+	do {
+		bioq_remove(&softc->delete_queue, bp1);
+		if (bp1 != bp)
+			bioq_insert_tail(&softc->delete_run_queue, bp1);
+		lba = bp1->bio_pblkno;
+		count = bp1->bio_bcount / softc->params.secsize;
+
+		/* Try to extend the previous range. */
+		if (lba == lastlba) {
+			c = omin(count, UNMAP_RANGE_MAX - lastcount);
+			lastcount += c;
+			off = ((ranges - 1) * UNMAP_RANGE_SIZE) +
+			      UNMAP_HEAD_SIZE;
+			scsi_ulto4b(lastcount, &buf[off + 8]);
+			count -= c;
+			lba +=c;
+			totalcount += c;
+		}
+
+		while (count > 0) {
+			c = omin(count, UNMAP_RANGE_MAX);
+			if (totalcount + c > softc->unmap_max_lba ||
+			    ranges >= softc->unmap_max_ranges) {
+				xpt_print(periph->path,
+				    "%s issuing short delete %ld > %ld"
+				    "|| %d >= %d",
+				    da_delete_method_desc[softc->delete_method],
+				    totalcount + c, softc->unmap_max_lba,
+				    ranges, softc->unmap_max_ranges);
+				break;
+			}
+			off = (ranges * UNMAP_RANGE_SIZE) + UNMAP_HEAD_SIZE;
+			scsi_u64to8b(lba, &buf[off + 0]);
+			scsi_ulto4b(c, &buf[off + 8]);
+			lba += c;
+			totalcount += c;
+			ranges++;
+			count -= c;
+			lastcount = c;
+		}
+		lastlba = lba;
+		bp1 = bioq_first(&softc->delete_queue);
+		if (bp1 == NULL || ranges >= softc->unmap_max_ranges ||
+		    totalcount + bp1->bio_bcount /
+		    softc->params.secsize > softc->unmap_max_lba)
+			break;
+	} while (1);
+	scsi_ulto2b(ranges * 16 + 6, &buf[0]);
+	scsi_ulto2b(ranges * 16, &buf[2]);
+
+	scsi_unmap(&ccb->csio,
+		   /*retries*/da_retry_count,
+		   /*cbfcnp*/dadone,
+		   /*tag_action*/MSG_SIMPLE_Q_TAG,
+		   /*byte2*/0,
+		   /*data_ptr*/ buf,
+		   /*dxfer_len*/ ranges * 16 + 8,
+		   /*sense_len*/SSD_FULL_SIZE,
+		   da_default_timeout * 1000);
+	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
+	ccb->ccb_h.flags |= CAM_UNLOCKED;
+}
+
+static void
+da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
+{
+	struct da_softc *softc = (struct da_softc *)periph->softc;
+	struct bio *bp1;
+	uint8_t *buf = softc->unmap_buf;
+	uint64_t lastlba = (uint64_t)-1;
+	uint64_t count;
+	uint64_t lba;
+	uint32_t lastcount = 0, c, requestcount;
+	int ranges = 0, off, block_count;
+
+	softc->delete_running = 1;
+	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
+	bp1 = bp;
+	do {
+		bioq_remove(&softc->delete_queue, bp1);
+		if (bp1 != bp)
+			bioq_insert_tail(&softc->delete_run_queue, bp1);
+		lba = bp1->bio_pblkno;
+		count = bp1->bio_bcount / softc->params.secsize;
+		requestcount = count;
+
+		/* Try to extend the previous range. */
+		if (lba == lastlba) {
+			c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
+			lastcount += c;
+			off = (ranges - 1) * 8;
+			buf[off + 6] = lastcount & 0xff;
+			buf[off + 7] = (lastcount >> 8) & 0xff;
+			count -= c;
+			lba += c;
+		}
+
+		while (count > 0) {
+			c = omin(count, ATA_DSM_RANGE_MAX);
+			off = ranges * 8;
+
+			buf[off + 0] = lba & 0xff;
+			buf[off + 1] = (lba >> 8) & 0xff;
+			buf[off + 2] = (lba >> 16) & 0xff;
+			buf[off + 3] = (lba >> 24) & 0xff;
+			buf[off + 4] = (lba >> 32) & 0xff;
+			buf[off + 5] = (lba >> 40) & 0xff;
+			buf[off + 6] = c & 0xff;
+			buf[off + 7] = (c >> 8) & 0xff;
+			lba += c;
+			ranges++;
+			count -= c;
+			lastcount = c;
+			if (count != 0 && ranges == softc->trim_max_ranges) {
+				xpt_print(periph->path,
+				    "%s issuing short delete %ld > %ld\n",
+				    da_delete_method_desc[softc->delete_method],
+				    requestcount,
+				    (softc->trim_max_ranges - ranges) *
+				    ATA_DSM_RANGE_MAX);
+				break;
+			}
+		}
+		lastlba = lba;
+		bp1 = bioq_first(&softc->delete_queue);
+		if (bp1 == NULL || bp1->bio_bcount / softc->params.secsize >
+		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX)
+			break;
+	} while (1);
+
+	block_count = (ranges + ATA_DSM_BLK_RANGES - 1) / ATA_DSM_BLK_RANGES;
+	scsi_ata_trim(&ccb->csio,
+		      /*retries*/da_retry_count,
+		      /*cbfcnp*/dadone,
+		      /*tag_action*/MSG_SIMPLE_Q_TAG,
+		      block_count,
+		      /*data_ptr*/buf,
+		      /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
+		      /*sense_len*/SSD_FULL_SIZE,
+		      da_default_timeout * 1000);
+	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
+	ccb->ccb_h.flags |= CAM_UNLOCKED;
+}
+
+/*
+ * We calculate ws_max_blks here based off d_delmaxsize instead
+ * of using softc->ws_max_blks as it is absolute max for the
+ * device not the protocol max which may well be lower.
+ */
+static void
+da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
+{
+	struct da_softc *softc;
+	struct bio *bp1;
+	uint64_t ws_max_blks;
+	uint64_t lba;
+	uint64_t count; /* forward compat with WS32 */
+
+	softc = (struct da_softc *)periph->softc;
+	ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
+	softc->delete_running = 1;
+	lba = bp->bio_pblkno;
+	count = 0;
+	bp1 = bp;
+	do {
+		bioq_remove(&softc->delete_queue, bp1);
+		if (bp1 != bp)
+			bioq_insert_tail(&softc->delete_run_queue, bp1);
+		count += bp1->bio_bcount / softc->params.secsize;
+		if (count > ws_max_blks) {
+			xpt_print(periph->path,
+			    "%s issuing short delete %ld > %ld\n",
+			    da_delete_method_desc[softc->delete_method],
+			    count, ws_max_blks);
+			count = omin(count, ws_max_blks);
+			break;
+		}
+		bp1 = bioq_first(&softc->delete_queue);
+		if (bp1 == NULL || lba + count != bp1->bio_pblkno ||
+		    count + bp1->bio_bcount /
+		    softc->params.secsize > ws_max_blks)
+			break;
+	} while (1);
+
+	scsi_write_same(&ccb->csio,
+			/*retries*/da_retry_count,
+			/*cbfcnp*/dadone,
+			/*tag_action*/MSG_SIMPLE_Q_TAG,
+			/*byte2*/softc->delete_method ==
+			    DA_DELETE_ZERO ? 0 : SWS_UNMAP,
+			softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
+			/*lba*/lba,
+			/*block_count*/count,
+			/*data_ptr*/ __DECONST(void *, zero_region),
+			/*dxfer_len*/ softc->params.secsize,
+			/*sense_len*/SSD_FULL_SIZE,
+			da_default_timeout * 1000);
+	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
+	ccb->ccb_h.flags |= CAM_UNLOCKED;
+}
+
 static int
 cmd6workaround(union ccb *ccb)
 {
@@ -2659,7 +2900,7 @@
 		 *
 		 * While we will attempt to choose an alternative delete method
 		 * this may result in short deletes if the existing delete
-		 * requests from geom are big for the new method choosen.
+		 * requests from geom are big for the new method chosen.
 		 *
 		 * This method assumes that the error which triggered this
 		 * will not retry the io otherwise a panic will occur
@@ -2676,21 +2917,37 @@
 				  da_delete_method_desc[old_method],
 				  da_delete_method_desc[softc->delete_method]);
 
-		if (DA_SIO) {
-			while ((bp = bioq_takefirst(&softc->delete_run_queue))
-			    != NULL)
-				bioq_disksort(&softc->delete_queue, bp);
-		} else {
-			while ((bp = bioq_takefirst(&softc->delete_run_queue))
-			    != NULL)
-				bioq_insert_tail(&softc->delete_queue, bp);
-		}
-		bioq_insert_tail(&softc->delete_queue,
+		while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
+			bioq_disksort(&softc->delete_queue, bp);
+		bioq_disksort(&softc->delete_queue,
 		    (struct bio *)ccb->ccb_h.ccb_bp);
 		ccb->ccb_h.ccb_bp = NULL;
 		return (0);
 	}
 
+	/* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
+	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
+	    (*cdb == PREVENT_ALLOW) &&
+	    (softc->quirks & DA_Q_NO_PREVENT) == 0) {
+		if (bootverbose)
+			xpt_print(ccb->ccb_h.path,
+			    "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
+		softc->quirks |= DA_Q_NO_PREVENT;
+		return (0);
+	}
+
+	/* Detect unsupported SYNCHRONIZE CACHE(10). */
+	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
+	    (*cdb == SYNCHRONIZE_CACHE) &&
+	    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
+		if (bootverbose)
+			xpt_print(ccb->ccb_h.path,
+			    "SYNCHRONIZE CACHE(10) not supported.\n");
+		softc->quirks |= DA_Q_NO_SYNC_CACHE;
+		softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
+		return (0);
+	}
+
 	/* Translation only possible if CDB is an array and cmd is R/W6 */
 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
 	    (*cdb != READ_6 && *cdb != WRITE_6))
@@ -2745,6 +3002,7 @@
 	{
 		struct bio *bp, *bp1;
 
+		cam_periph_lock(periph);
 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 			int error;
@@ -2761,6 +3019,7 @@
 				 * A retry was scheduled, so
 				 * just return.
 				 */
+				cam_periph_unlock(periph);
 				return;
 			}
 			bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
@@ -2797,7 +3056,10 @@
 					bp->bio_flags |= BIO_ERROR;
 				}
 			} else if (bp != NULL) {
-				bp->bio_resid = csio->resid;
+				if (state == DA_CCB_DELETE)
+					bp->bio_resid = 0;
+				else
+					bp->bio_resid = csio->resid;
 				bp->bio_error = 0;
 				if (bp->bio_resid != 0)
 					bp->bio_flags |= BIO_ERROR;
@@ -2811,7 +3073,10 @@
 		} else if (bp != NULL) {
 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 				panic("REQ_CMP with QFRZN");
-			bp->bio_resid = csio->resid;
+			if (state == DA_CCB_DELETE)
+				bp->bio_resid = 0;
+			else
+				bp->bio_resid = csio->resid;
 			if (csio->resid > 0)
 				bp->bio_flags |= BIO_ERROR;
 			if (softc->error_inject != 0) {
@@ -2820,40 +3085,94 @@
 				bp->bio_flags |= BIO_ERROR;
 				softc->error_inject = 0;
 			}
-
 		}
 
-		/*
-		 * Block out any asyncronous callbacks
-		 * while we touch the pending ccb list.
-		 */
 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
-		softc->outstanding_cmds--;
-		if (softc->outstanding_cmds == 0)
-			softc->flags |= DA_FLAG_WENT_IDLE;
+		if (LIST_EMPTY(&softc->pending_ccbs))
+			softc->flags |= DA_FLAG_WAS_OTAG;
 
-		if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
-			xpt_print(periph->path, "oustanding %d\n",
-				  softc->outstanding_cmds);
-		}
+		xpt_release_ccb(done_ccb);
+		if (state == DA_CCB_DELETE) {
+			TAILQ_HEAD(, bio) queue;
 
-		if (state == DA_CCB_DELETE) {
-			while ((bp1 = bioq_takefirst(&softc->delete_run_queue))
-			    != NULL) {
-				bp1->bio_resid = bp->bio_resid;
+			TAILQ_INIT(&queue);
+			TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
+			softc->delete_run_queue.insert_point = NULL;
+			/*
+			 * Normally, the xpt_release_ccb() above would make sure
+			 * that when we have more work to do, that work would
+			 * get kicked off. However, we specifically keep
+			 * delete_running set to 0 before the call above to
+			 * allow other I/O to progress when many BIO_DELETE
+			 * requests are pushed down. We set delete_running to 0
+			 * and call daschedule again so that we don't stall if
+			 * there are no other I/Os pending apart from BIO_DELETEs.
+			 */
+			softc->delete_running = 0;
+			daschedule(periph);
+			cam_periph_unlock(periph);
+			while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
+				TAILQ_REMOVE(&queue, bp1, bio_queue);
 				bp1->bio_error = bp->bio_error;
-				if (bp->bio_flags & BIO_ERROR)
+				if (bp->bio_flags & BIO_ERROR) {
 					bp1->bio_flags |= BIO_ERROR;
+					bp1->bio_resid = bp1->bio_bcount;
+				} else
+					bp1->bio_resid = 0;
 				biodone(bp1);
 			}
-			softc->delete_running = 0;
-			if (bp != NULL)
-				biodone(bp);
-			daschedule(periph);
-		} else if (bp != NULL)
+		} else
+			cam_periph_unlock(periph);
+		if (bp != NULL)
 			biodone(bp);
-		break;
+		return;
 	}
+	case DA_CCB_PROBE_WP:
+	{
+		struct scsi_mode_header_6 *mode_hdr6;
+		struct scsi_mode_header_10 *mode_hdr10;
+		uint8_t dev_spec;
+
+		if (softc->minimum_cmd_size > 6) {
+			mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr;
+			dev_spec = mode_hdr10->dev_spec;
+		} else {
+			mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr;
+			dev_spec = mode_hdr6->dev_spec;
+		}
+		if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
+			if ((dev_spec & 0x80) != 0)
+				softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT;
+			else
+				softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT;
+		} else {
+			int error;
+
+			error = daerror(done_ccb, CAM_RETRY_SELTO,
+					SF_RETRY_UA|SF_NO_PRINT);
+			if (error == ERESTART)
+				return;
+			else if (error != 0) {
+				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+					/* Don't wedge this device's queue */
+					cam_release_devq(done_ccb->ccb_h.path,
+							 /*relsim_flags*/0,
+							 /*reduction*/0,
+							 /*timeout*/0,
+							 /*getcount_only*/0);
+				}
+			}
+		}
+
+		free(csio->data_ptr, M_SCSIDA);
+		xpt_release_ccb(done_ccb);
+		if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
+			softc->state = DA_STATE_PROBE_RC16;
+		else
+			softc->state = DA_STATE_PROBE_RC;
+		xpt_schedule(periph, priority);
+		return;
+	}
 	case DA_CCB_PROBE_RC:
 	case DA_CCB_PROBE_RC16:
 	{
@@ -2911,10 +3230,12 @@
 			 * give them an 'illegal' value we'll avoid that
 			 * here.
 			 */
-			if (block_size == 0 && maxsector == 0) {
-				snprintf(announce_buf, sizeof(announce_buf),
-				        "0MB (no media?)");
-			} else if (block_size >= MAXPHYS || block_size == 0) {
+			if (block_size == 0) {
+				block_size = 512;
+				if (maxsector == 0)
+					maxsector = -1;
+			}
+			if (block_size >= MAXPHYS) {
 				xpt_print(periph->path,
 				    "unsupportable block size %ju\n",
 				    (uintmax_t) block_size);
@@ -2931,13 +3252,10 @@
 				lbp = (lalba & SRC16_LBPME_A);
 				dp = &softc->params;
 				snprintf(announce_buf, sizeof(announce_buf),
-				        "%juMB (%ju %u byte sectors: %dH %dS/T "
-                                        "%dC)", (uintmax_t)
-	                                (((uintmax_t)dp->secsize *
-				        dp->sectors) / (1024*1024)),
-			                (uintmax_t)dp->sectors,
-				        dp->secsize, dp->heads,
-                                        dp->secs_per_track, dp->cylinders);
+				    "%juMB (%ju %u byte sectors)",
+				    ((uintmax_t)dp->secsize * dp->sectors) /
+				     (1024 * 1024),
+				    (uintmax_t)dp->sectors, dp->secsize);
 			}
 		} else {
 			int	error;
@@ -3014,6 +3332,7 @@
 					const char *sense_key_desc;
 					const char *asc_desc;
 
+					dasetgeom(periph, 512, -1, NULL, 0);
 					scsi_sense_desc(sense_key, asc, ascq,
 							&cgd.inq_data,
 							&sense_key_desc,
@@ -3045,7 +3364,8 @@
 			}
 		}
 		free(csio->data_ptr, M_SCSIDA);
-		if (announce_buf[0] != '\0' && ((softc->flags & DA_FLAG_PROBED) == 0)) {
+		if (announce_buf[0] != '\0' &&
+		    ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
 			/*
 			 * Create our sysctl variables, now that we know
 			 * we have successfully attached.
@@ -3063,14 +3383,21 @@
 			}
 		}
 
+		/* We already probed the device. */
+		if (softc->flags & DA_FLAG_PROBED) {
+			daprobedone(periph, done_ccb);
+			return;
+		}
+
 		/* Ensure re-probe doesn't see old delete. */
 		softc->delete_available = 0;
-		if (lbp) {
+		dadeleteflag(softc, DA_DELETE_ZERO, 1);
+		if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
 			/*
 			 * Based on older SBC-3 spec revisions
 			 * any of the UNMAP methods "may" be
 			 * available via LBP given this flag so
-			 * we flag all of them as availble and
+			 * we flag all of them as available and
 			 * then remove those which further
 			 * probes confirm aren't available
 			 * later.
@@ -3081,7 +3408,6 @@
 			 */
 			dadeleteflag(softc, DA_DELETE_WS16, 1);
 			dadeleteflag(softc, DA_DELETE_WS10, 1);
-			dadeleteflag(softc, DA_DELETE_ZERO, 1);
 			dadeleteflag(softc, DA_DELETE_UNMAP, 1);
 
 			xpt_release_ccb(done_ccb);
@@ -3110,18 +3436,8 @@
 				     (lbp->flags & SVPD_LBP_WS16));
 			dadeleteflag(softc, DA_DELETE_WS10,
 				     (lbp->flags & SVPD_LBP_WS10));
-			dadeleteflag(softc, DA_DELETE_ZERO,
-				     (lbp->flags & SVPD_LBP_WS10));
 			dadeleteflag(softc, DA_DELETE_UNMAP,
 				     (lbp->flags & SVPD_LBP_UNMAP));
-
-			if (lbp->flags & SVPD_LBP_UNMAP) {
-				free(lbp, M_SCSIDA);
-				xpt_release_ccb(done_ccb);
-				softc->state = DA_STATE_PROBE_BLK_LIMITS;
-				xpt_schedule(periph, priority);
-				return;
-			}
 		} else {
 			int error;
 			error = daerror(done_ccb, CAM_RETRY_SELTO,
@@ -3147,7 +3463,7 @@
 
 		free(lbp, M_SCSIDA);
 		xpt_release_ccb(done_ccb);
-		softc->state = DA_STATE_PROBE_BDC;
+		softc->state = DA_STATE_PROBE_BLK_LIMITS;
 		xpt_schedule(periph, priority);
 		return;
 	}
@@ -3158,6 +3474,8 @@
 		block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
 
 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+			uint32_t max_txfer_len = scsi_4btoul(
+				block_limits->max_txfer_len);
 			uint32_t max_unmap_lba_cnt = scsi_4btoul(
 				block_limits->max_unmap_lba_cnt);
 			uint32_t max_unmap_blk_cnt = scsi_4btoul(
@@ -3164,6 +3482,12 @@
 				block_limits->max_unmap_blk_cnt);
 			uint64_t ws_max_blks = scsi_8btou64(
 				block_limits->max_write_same_length);
+
+			if (max_txfer_len != 0) {
+				softc->disk->d_maxsize = MIN(softc->maxio,
+				    (off_t)max_txfer_len * softc->params.secsize);
+			}
+
 			/*
 			 * We should already support UNMAP but we check lba
 			 * and block count to be sure
@@ -3225,9 +3549,18 @@
 			 * Disable queue sorting for non-rotational media
 			 * by default.
 			 */
-			if (scsi_2btoul(bdc->medium_rotation_rate) ==
-			    SVPD_BDC_RATE_NONE_ROTATING)
+			u_int16_t old_rate = softc->disk->d_rotation_rate;
+
+			softc->disk->d_rotation_rate =
+				scsi_2btoul(bdc->medium_rotation_rate);
+			if (softc->disk->d_rotation_rate ==
+			    SVPD_BDC_RATE_NON_ROTATING) {
 				softc->sort_io_queue = 0;
+			}
+			if (softc->disk->d_rotation_rate != old_rate) {
+				disk_attr_changed(softc->disk,
+				    "GEOM::rotation_rate", M_NOWAIT);
+			}
 		} else {
 			int error;
 			error = daerror(done_ccb, CAM_RETRY_SELTO,
@@ -3262,9 +3595,12 @@
 		ptr = (uint16_t *)ata_params;
 
 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+			uint16_t old_rate;
+
 			for (i = 0; i < sizeof(*ata_params) / 2; i++)
 				ptr[i] = le16toh(ptr[i]);
-			if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM) {
+			if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
+			    (softc->quirks & DA_Q_NO_UNMAP) == 0) {
 				dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
 				if (ata_params->max_dsm_blocks != 0)
 					softc->trim_max_ranges = min(
@@ -3276,8 +3612,18 @@
 			 * Disable queue sorting for non-rotational media
 			 * by default.
 			 */
-			if (ata_params->media_rotation_rate == 1)
+			old_rate = softc->disk->d_rotation_rate;
+			softc->disk->d_rotation_rate =
+			    ata_params->media_rotation_rate;
+			if (softc->disk->d_rotation_rate ==
+			    ATA_RATE_NON_ROTATING) {
 				softc->sort_io_queue = 0;
+			}
+
+			if (softc->disk->d_rotation_rate != old_rate) {
+				disk_attr_changed(softc->disk,
+				    "GEOM::rotation_rate", M_NOWAIT);
+			}
 		} else {
 			int error;
 			error = daerror(done_ccb, CAM_RETRY_SELTO,
@@ -3300,12 +3646,6 @@
 		daprobedone(periph, done_ccb);
 		return;
 	}
-	case DA_CCB_WAITING:
-	{
-		/* Caller will release the CCB */
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
-	}
 	case DA_CCB_DUMP:
 		/* No-op.  We're polling */
 		return;
@@ -3350,11 +3690,7 @@
 	KASSERT(status == CAM_REQ_CMP,
 	    ("dareprobe: cam_periph_acquire failed"));
 
-	if (softc->flags & DA_FLAG_CAN_RC16)
-		softc->state = DA_STATE_PROBE_RC16;
-	else
-		softc->state = DA_STATE_PROBE_RC;
-
+	softc->state = DA_STATE_PROBE_WP;
 	xpt_schedule(periph, CAM_PRIORITY_DEV);
 }
 
@@ -3385,15 +3721,23 @@
 		 */
 		else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
 		    asc == 0x2A && ascq == 0x09) {
-			xpt_print(periph->path, "capacity data has changed\n");
+			xpt_print(periph->path, "Capacity data has changed\n");
+			softc->flags &= ~DA_FLAG_PROBED;
 			dareprobe(periph);
 			sense_flags |= SF_NO_PRINT;
 		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
-		    asc == 0x28 && ascq == 0x00)
+		    asc == 0x28 && ascq == 0x00) {
+			softc->flags &= ~DA_FLAG_PROBED;
 			disk_media_changed(softc->disk, M_NOWAIT);
-		else if (sense_key == SSD_KEY_NOT_READY &&
-		    asc == 0x3a && (softc->flags & DA_FLAG_SAW_MEDIA)) {
-			softc->flags &= ~DA_FLAG_SAW_MEDIA;
+		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
+		    asc == 0x3F && ascq == 0x03) {
+			xpt_print(periph->path, "INQUIRY data has changed\n");
+			softc->flags &= ~DA_FLAG_PROBED;
+			dareprobe(periph);
+			sense_flags |= SF_NO_PRINT;
+		} else if (sense_key == SSD_KEY_NOT_READY &&
+		    asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
+			softc->flags |= DA_FLAG_PACK_INVALID;
 			disk_media_gone(softc->disk, M_NOWAIT);
 		}
 	}
@@ -3406,6 +3750,9 @@
 	 * don't treat UAs as errors.
 	 */
 	sense_flags |= SF_RETRY_UA;
+
+	if (softc->quirks & DA_Q_RETRY_BUSY)
+		sense_flags |= SF_RETRY_BUSY;
 	return(cam_periph_error(ccb, cam_flags, sense_flags,
 				&softc->saved_ccb));
 }
@@ -3416,7 +3763,7 @@
 	struct cam_periph *periph = arg;
 	struct da_softc *softc = periph->softc;
 
-	if (!softc->tur && softc->outstanding_cmds == 0) {
+	if (!softc->tur && LIST_EMPTY(&softc->pending_ccbs)) {
 		if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
 			softc->tur = 1;
 			daschedule(periph);
@@ -3454,7 +3801,7 @@
 		     5000);
 
 	error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
-	    SF_RETRY_UA | SF_QUIET_IR, softc->disk->d_devstat);
+	    SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
 
 	if (error == 0) {
 		if (action == PR_ALLOW)
@@ -3474,6 +3821,7 @@
 	struct da_softc *softc;
 	struct disk_params *dp;
 	u_int lbppbe, lalba;
+	int error;
 
 	softc = (struct da_softc *)periph->softc;
 
@@ -3548,7 +3896,7 @@
 		xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 		cdai.buftype = CDAI_TYPE_RCAPLONG;
-		cdai.flags |= CDAI_FLAG_STORE;
+		cdai.flags = CDAI_FLAG_STORE;
 		cdai.bufsiz = rcap_len;
 		cdai.buf = (uint8_t *)rcaplong;
 		xpt_action((union ccb *)&cdai);
@@ -3575,6 +3923,10 @@
 	softc->disk->d_fwheads = softc->params.heads;
 	softc->disk->d_devstat->block_size = softc->params.secsize;
 	softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
+
+	error = disk_resize(softc->disk, M_NOWAIT);
+	if (error != 0)
+		xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
 }
 
 static void
@@ -3583,14 +3935,11 @@
 	struct da_softc *softc = arg;
 
 	if (da_send_ordered) {
-		if ((softc->ordered_tag_count == 0) 
-		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
-			softc->flags |= DA_FLAG_NEED_OTAG;
+		if (!LIST_EMPTY(&softc->pending_ccbs)) {
+			if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
+				softc->flags |= DA_FLAG_NEED_OTAG;
+			softc->flags &= ~DA_FLAG_WAS_OTAG;
 		}
-		if (softc->outstanding_cmds > 0)
-			softc->flags &= ~DA_FLAG_WENT_IDLE;
-
-		softc->ordered_tag_count = 0;
 	}
 	/* Queue us up again */
 	callout_reset(&softc->sendordered_c,
@@ -3611,8 +3960,16 @@
 	int error;
 
 	CAM_PERIPH_FOREACH(periph, &dadriver) {
+		softc = (struct da_softc *)periph->softc;
+		if (SCHEDULER_STOPPED()) {
+			/* If we paniced with the lock held, do not recurse. */
+			if (!cam_periph_owned(periph) &&
+			    (softc->flags & DA_FLAG_OPEN)) {
+				dadump(softc->disk, NULL, 0, 0, 0);
+			}
+			continue;
+		}
 		cam_periph_lock(periph);
-		softc = (struct da_softc *)periph->softc;
 
 		/*
 		 * We only sync the cache if the drive is still open, and
@@ -3647,9 +4004,9 @@
 #else /* !_KERNEL */
 
 /*
- * XXX This is only left out of the kernel build to silence warnings.  If,
- * for some reason this function is used in the kernel, the ifdefs should
- * be moved so it is included both in the kernel and userland.
+ * XXX These are only left out of the kernel build to silence warnings.  If,
+ * for some reason these functions are used in the kernel, the ifdefs should
+ * be moved so they are included both in the kernel and userland.
  */
 void
 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
@@ -3677,4 +4034,84 @@
 		      timeout);
 }
 
+void
+scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
+		  void (*cbfcnp)(struct cam_periph *, union ccb *),
+		  uint8_t tag_action, uint8_t list_format,
+		  uint32_t addr_desc_index, uint8_t *data_ptr,
+		  uint32_t dxfer_len, int minimum_cmd_size, 
+		  uint8_t sense_len, uint32_t timeout)
+{
+	uint8_t cdb_len;
+
+	/*
+	 * These conditions allow using the 10 byte command.  Otherwise we
+	 * need to use the 12 byte command.
+	 */
+	if ((minimum_cmd_size <= 10)
+	 && (addr_desc_index == 0) 
+	 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
+		struct scsi_read_defect_data_10 *cdb10;
+
+		cdb10 = (struct scsi_read_defect_data_10 *)
+			&csio->cdb_io.cdb_bytes;
+
+		cdb_len = sizeof(*cdb10);
+		bzero(cdb10, cdb_len);
+                cdb10->opcode = READ_DEFECT_DATA_10;
+                cdb10->format = list_format;
+                scsi_ulto2b(dxfer_len, cdb10->alloc_length);
+	} else {
+		struct scsi_read_defect_data_12 *cdb12;
+
+		cdb12 = (struct scsi_read_defect_data_12 *)
+			&csio->cdb_io.cdb_bytes;
+
+		cdb_len = sizeof(*cdb12);
+		bzero(cdb12, cdb_len);
+                cdb12->opcode = READ_DEFECT_DATA_12;
+                cdb12->format = list_format;
+                scsi_ulto4b(dxfer_len, cdb12->alloc_length);
+		scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
+	}
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/ CAM_DIR_IN,
+		      tag_action,
+		      data_ptr,
+		      dxfer_len,
+		      sense_len,
+		      cdb_len,
+		      timeout);
+}
+
+void
+scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
+	      void (*cbfcnp)(struct cam_periph *, union ccb *),
+	      u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
+	      u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
+	      u_int32_t timeout)
+{
+	struct scsi_sanitize *scsi_cmd;
+
+	scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
+	scsi_cmd->opcode = SANITIZE;
+	scsi_cmd->byte2 = byte2;
+	scsi_cmd->control = control;
+	scsi_ulto2b(dxfer_len, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
+		      tag_action,
+		      data_ptr,
+		      dxfer_len,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
 #endif /* _KERNEL */

Modified: trunk/sys/cam/scsi/scsi_da.h
===================================================================
--- trunk/sys/cam/scsi/scsi_da.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_da.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Structures and definitions for SCSI commands to Direct Access Devices
  */
@@ -46,7 +47,7 @@
  *
  * Ported to run under 386BSD by Julian Elischer (julian at tfs.com) Sept 1992
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_da.h 311403 2017-01-05 11:23:55Z mav $
  */
 
 #ifndef	_SCSI_SCSI_DA_H
@@ -92,55 +93,65 @@
 
 struct scsi_read_defect_data_10
 {
-	u_int8_t opcode;
-
-	/* 
-	 * The most significant 3 bits are the LUN, the other 5 are
-	 * reserved.
-	 */
-#define SRDD10_LUN_MASK 0xE0
-	u_int8_t byte2;
+	uint8_t opcode;
+	uint8_t byte2;
 #define SRDD10_GLIST 0x08
 #define SRDD10_PLIST 0x10
 #define SRDD10_DLIST_FORMAT_MASK 0x07
 #define SRDD10_BLOCK_FORMAT            0x00
+#define SRDD10_EXT_BFI_FORMAT 	       0x01
+#define SRDD10_EXT_PHYS_FORMAT 	       0x02
+#define SRDD10_LONG_BLOCK_FORMAT       0x03
 #define SRDD10_BYTES_FROM_INDEX_FORMAT 0x04
 #define SRDD10_PHYSICAL_SECTOR_FORMAT  0x05
-	u_int8_t format;
-
-	u_int8_t reserved[4];
-
-	u_int8_t alloc_length[2];
+#define SRDD10_VENDOR_FORMAT	       0x06
+	uint8_t format;
+	uint8_t reserved[4];
+	uint8_t alloc_length[2];
 #define	SRDD10_MAX_LENGTH		0xffff
+	uint8_t control;
+};
 
+struct scsi_sanitize
+{
+	u_int8_t opcode;
+	u_int8_t byte2;
+#define SSZ_SERVICE_ACTION_OVERWRITE         0x01
+#define SSZ_SERVICE_ACTION_BLOCK_ERASE       0x02
+#define SSZ_SERVICE_ACTION_CRYPTO_ERASE      0x03
+#define SSZ_SERVICE_ACTION_EXIT_MODE_FAILURE 0x1F
+#define SSZ_UNRESTRICTED_EXIT                0x20
+#define SSZ_IMMED                            0x80
+	u_int8_t reserved[5];
+	u_int8_t length[2];
 	u_int8_t control;
 };
 
+struct scsi_sanitize_parameter_list
+{
+	u_int8_t byte1;
+#define SSZPL_INVERT 0x80
+	u_int8_t reserved;
+	u_int8_t length[2];
+	/* Variable length initialization pattern. */
+#define SSZPL_MAX_PATTERN_LENGTH 65535
+};
+
 struct scsi_read_defect_data_12
 {
-	u_int8_t opcode;
-
-	/* 
-	 * The most significant 3 bits are the LUN, the other 5 are
-	 * reserved.
-	 */
-#define SRDD12_LUN_MASK 0xE0
-	u_int8_t byte2;
-
+	uint8_t opcode;
 #define SRDD12_GLIST 0x08
 #define SRDD12_PLIST 0x10
 #define SRDD12_DLIST_FORMAT_MASK 0x07
-#define SRDD12_BLOCK_FORMAT            0x00
-#define SRDD12_BYTES_FROM_INDEX_FORMAT 0x04
-#define SRDD12_PHYSICAL_SECTOR_FORMAT  0x05
-	u_int8_t format;
-
-	u_int8_t reserved[4];
-
-	u_int8_t alloc_length[4];
-
-	u_int8_t control;
-	
+#define SRDD12_BLOCK_FORMAT            SRDD10_BLOCK_FORMAT
+#define SRDD12_BYTES_FROM_INDEX_FORMAT SRDD10_BYTES_FROM_INDEX_FORMAT
+#define SRDD12_PHYSICAL_SECTOR_FORMAT  SRDD10_PHYSICAL_SECTOR_FORMAT
+	uint8_t format;
+	uint8_t address_descriptor_index[4];
+	uint8_t alloc_length[4];
+#define	SRDD12_MAX_LENGTH		0xffffffff
+	uint8_t reserved;
+	uint8_t control;
 };
 
 
@@ -156,6 +167,7 @@
 #define	WRITE_AND_VERIFY	0x2e
 #define	VERIFY			0x2f
 #define READ_DEFECT_DATA_10	0x37
+#define SANITIZE		0x48
 #define READ_DEFECT_DATA_12	0xb7
 
 struct format_defect_list_header
@@ -196,20 +208,51 @@
 	uint8_t	reserved1[3];
 };
 
-struct scsi_verify
+struct scsi_verify_10
 {
-	uint8_t	opcode;		/* VERIFY */
+	uint8_t	opcode;		/* VERIFY(10) */
 	uint8_t	byte2;
 #define	SVFY_LUN_MASK	0xE0
 #define	SVFY_RELADR	0x01
-#define	SVFY_BYTECHK	0x02
+#define	SVFY_BYTCHK	0x02
 #define	SVFY_DPO	0x10
 	uint8_t	addr[4];	/* LBA to begin verification at */
-	uint8_t	reserved0[1];
-	uint8_t	len[2];		/* number of blocks to verify */
-	uint8_t	reserved1[3];
+	uint8_t	group;
+	uint8_t	length[2];		/* number of blocks to verify */
+	uint8_t	control;
 };
 
+struct scsi_verify_12
+{
+	uint8_t	opcode;		/* VERIFY(12) */
+	uint8_t	byte2;
+	uint8_t	addr[4];	/* LBA to begin verification at */
+	uint8_t	length[4];		/* number of blocks to verify */
+	uint8_t	group;
+	uint8_t	control;
+};
+
+struct scsi_verify_16
+{
+	uint8_t	opcode;		/* VERIFY(16) */
+	uint8_t	byte2;
+	uint8_t	addr[8];	/* LBA to begin verification at */
+	uint8_t	length[4];		/* number of blocks to verify */
+	uint8_t	group;
+	uint8_t	control;
+};
+
+struct scsi_compare_and_write
+{
+	uint8_t	opcode;		/* COMPARE AND WRITE */
+	uint8_t	byte2;
+	uint8_t	addr[8];	/* LBA to begin verification at */
+	uint8_t	reserved[3];
+	uint8_t	length;		/* number of blocks */
+	uint8_t	group;
+	uint8_t	control;
+};
+
 struct scsi_write_and_verify
 {
 	uint8_t	opcode;		/* WRITE_AND_VERIFY */
@@ -288,6 +331,8 @@
 #define SRDDH10_PHYSICAL_SECTOR_FORMAT  0x05
 	u_int8_t format;
 	u_int8_t length[2];
+#define	SRDDH10_MAX_LENGTH	SRDD10_MAX_LENGTH -			     \
+				sizeof(struct scsi_read_defect_data_hdr_10) 
 };
 
 struct scsi_defect_desc_block
@@ -295,10 +340,18 @@
 	u_int8_t address[4];
 };
 
+struct scsi_defect_desc_long_block
+{
+	u_int8_t address[8];
+};
+
 struct scsi_defect_desc_bytes_from_index
 {
 	u_int8_t cylinder[3];
 	u_int8_t head;
+#define	SDD_EXT_BFI_MADS		0x80000000
+#define	SDD_EXT_BFI_FLAG_MASK		0xf0000000
+#define	SDD_EXT_BFI_ENTIRE_TRACK	0x0fffffff
 	u_int8_t bytes_from_index[4];
 };
 
@@ -306,6 +359,9 @@
 {
 	u_int8_t cylinder[3];
 	u_int8_t head;
+#define	SDD_EXT_PHYS_MADS		0x80000000
+#define	SDD_EXT_PHYS_FLAG_MASK		0xf0000000
+#define	SDD_EXT_PHYS_ENTIRE_TRACK	0x0fffffff
 	u_int8_t sector[4];
 };
 
@@ -319,7 +375,10 @@
 #define SRDDH12_BYTES_FROM_INDEX_FORMAT 0x04
 #define SRDDH12_PHYSICAL_SECTOR_FORMAT  0x05
 	u_int8_t format;
+	u_int8_t generation[2];
 	u_int8_t length[4];
+#define	SRDDH12_MAX_LENGTH	SRDD12_MAX_LENGTH -			    \
+				sizeof(struct scsi_read_defect_data_hdr_12)
 };
 
 union	disk_pages /* this is the structure copied from osf */
@@ -489,17 +548,32 @@
 	u_int8_t correction_span;
 	u_int8_t head_offset_count;
 	u_int8_t data_strobe_offset_cnt;
-	u_int8_t reserved;
+	u_int8_t byte8;
+#define SMS_RWER_LBPERE			0x80
 	u_int8_t write_retry_count;
 	u_int8_t reserved2;
 	u_int8_t recovery_time_limit[2];
 };
 
+struct scsi_da_verify_recovery_page {
+	u_int8_t page_code;
+#define SMS_VERIFY_ERROR_RECOVERY_PAGE	0x07
+	u_int8_t page_length;
+	u_int8_t byte3;
+#define SMS_VER_EER			0x08
+#define SMS_VER_PER			0x04
+#define SMS_VER_DTE			0x02
+#define SMS_VER_DCR			0x01
+	u_int8_t read_retry_count;
+	u_int8_t reserved[6];
+	u_int8_t recovery_time_limit[2];
+};
+
 __BEGIN_DECLS
 /*
- * XXX This is only left out of the kernel build to silence warnings.  If,
- * for some reason this function is used in the kernel, the ifdefs should
- * be moved so it is included both in the kernel and userland.
+ * XXX These are only left out of the kernel build to silence warnings.  If,
+ * for some reason these functions are used in the kernel, the ifdefs should
+ * be moved so they are included both in the kernel and userland.
  */
 #ifndef _KERNEL
 void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
@@ -508,6 +582,19 @@
 		      u_int8_t *data_ptr, u_int32_t dxfer_len,
 		      u_int8_t sense_len, u_int32_t timeout);
 
+void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
+		       void (*cbfcnp)(struct cam_periph *, union ccb *),
+		       uint8_t tag_action, uint8_t list_format,
+		       uint32_t addr_desc_index, uint8_t *data_ptr,
+		       uint32_t dxfer_len, int minimum_cmd_size, 
+		       uint8_t sense_len, uint32_t timeout);
+
+void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
+		   void (*cbfcnp)(struct cam_periph *, union ccb *),
+		   u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
+		   u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
+		   u_int32_t timeout);
+
 #endif /* !_KERNEL */
 __END_DECLS
 

Modified: trunk/sys/cam/scsi/scsi_dvcfg.h
===================================================================
--- trunk/sys/cam/scsi/scsi_dvcfg.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_dvcfg.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,4 +1,5 @@
-/*	$MidnightBSD$	*/
+/* $MidnightBSD$ */
+/*	$FreeBSD: stable/10/sys/cam/scsi/scsi_dvcfg.h 139743 2005-01-05 22:34:37Z imp $	*/
 /*	$NecBSD: scsi_dvcfg.h,v 1.4 1998/03/14 07:05:06 kmatsuda Exp $	*/
 /*	$NetBSD$	*/
 

Modified: trunk/sys/cam/scsi/scsi_enc.c
===================================================================
--- trunk/sys/cam/scsi/scsi_enc.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_enc.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2000 Matthew Jacob
  * All rights reserved.
@@ -25,8 +26,10 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_enc.c 331633 2018-03-27 17:43:03Z brooks $");
 
+#include "opt_compat.h"
+
 #include <sys/param.h>
 
 #include <sys/conf.h>
@@ -37,8 +40,10 @@
 #include <sys/lock.h>
 #include <sys/malloc.h>
 #include <sys/mutex.h>
+#include <sys/proc.h>
 #include <sys/queue.h>
 #include <sys/sx.h>
+#include <sys/sysent.h>
 #include <sys/systm.h>
 #include <sys/sysctl.h>
 #include <sys/types.h>
@@ -56,6 +61,8 @@
 #include <cam/scsi/scsi_enc.h>
 #include <cam/scsi/scsi_enc_internal.h>
 
+#include <opt_ses.h>
+
 MALLOC_DEFINE(M_SCSIENC, "SCSI ENC", "SCSI ENC buffers");
 
 /* Enclosure type independent driver */
@@ -67,7 +74,6 @@
 static  periph_ctor_t	enc_ctor;
 static	periph_oninv_t	enc_oninvalidate;
 static  periph_dtor_t   enc_dtor;
-static  periph_start_t  enc_start;
 
 static void enc_async(void *, uint32_t, struct cam_path *, void *);
 static enctyp enc_type(struct ccb_getdev *);
@@ -111,17 +117,16 @@
 static void
 enc_devgonecb(void *arg)
 {
-	struct cam_sim    *sim;
 	struct cam_periph *periph;
 	struct enc_softc  *enc;
+	struct mtx *mtx;
 	int i;
 
 	periph = (struct cam_periph *)arg;
-	sim = periph->sim;
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
 	enc = (struct enc_softc *)periph->softc;
 
-	mtx_lock(sim->mtx);
-
 	/*
 	 * When we get this callback, we will get no more close calls from
 	 * devfs.  So if we have any dangling opens, we need to release the
@@ -138,13 +143,13 @@
 	cam_periph_release_locked(periph);
 
 	/*
-	 * We reference the SIM lock directly here, instead of using
+	 * We reference the lock directly here, instead of using
 	 * cam_periph_unlock().  The reason is that the final call to
 	 * cam_periph_release_locked() above could result in the periph
 	 * getting freed.  If that is the case, dereferencing the periph
 	 * with a cam_periph_unlock() call would cause a page fault.
 	 */
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
 }
 
 static void
@@ -176,8 +181,6 @@
 	callout_drain(&enc->status_updater);
 
 	destroy_dev_sched_cb(enc->enc_dev, enc_devgonecb, periph);
-
-	xpt_print(periph->path, "lost device\n");
 }
 
 static void
@@ -187,9 +190,6 @@
 
 	enc = periph->softc;
 
-	xpt_print(periph->path, "removing device entry\n");
-
-
 	/* If the sub-driver has a cleanup routine, call it */
 	if (enc->enc_vec.softc_cleanup != NULL)
 		enc->enc_vec.softc_cleanup(enc);
@@ -246,8 +246,8 @@
 		}
 
 		status = cam_periph_alloc(enc_ctor, enc_oninvalidate,
-		    enc_dtor, enc_start, "ses", CAM_PERIPH_BIO,
-		    cgd->ccb_h.path, enc_async, AC_FOUND_DEVICE, cgd);
+		    enc_dtor, NULL, "ses", CAM_PERIPH_BIO,
+		    path, enc_async, AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
 			printf("enc_async: Unable to probe new device due to "
@@ -269,10 +269,6 @@
 	int error = 0;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL) {
-		return (ENXIO);
-	}
-
 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
 		return (ENXIO);
 
@@ -302,25 +298,21 @@
 static int
 enc_close(struct cdev *dev, int flag, int fmt, struct thread *td)
 {
-	struct cam_sim    *sim;
 	struct cam_periph *periph;
 	struct enc_softc  *enc;
+	struct mtx *mtx;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
 
-	sim = periph->sim;
 	enc = periph->softc;
-
-	mtx_lock(sim->mtx);
-
 	enc->open_count--;
 
 	cam_periph_release_locked(periph);
 
 	/*
-	 * We reference the SIM lock directly here, instead of using
+	 * We reference the lock directly here, instead of using
 	 * cam_periph_unlock().  The reason is that the call to
 	 * cam_periph_release_locked() above could result in the periph
 	 * getting freed.  If that is the case, dereferencing the periph
@@ -331,34 +323,11 @@
 	 * protect the open count and avoid another lock acquisition and
 	 * release.
 	 */
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
 
 	return (0);
 }
 
-static void
-enc_start(struct cam_periph *p, union ccb *sccb)
-{
-	struct enc_softc *enc;
-
-	enc = p->softc;
-	ENC_DLOG(enc, "%s enter imm=%d prio=%d\n",
-	    __func__, p->immediate_priority, p->pinfo.priority);
-	if (p->immediate_priority <= p->pinfo.priority) {
-		SLIST_INSERT_HEAD(&p->ccb_list, &sccb->ccb_h, periph_links.sle);
-		p->immediate_priority = CAM_PRIORITY_NONE;
-		wakeup(&p->ccb_list);
-	} else
-		xpt_release_ccb(sccb);
-	ENC_DLOG(enc, "%s exit\n", __func__);
-}
-
-void
-enc_done(struct cam_periph *periph, union ccb *dccb)
-{
-	wakeup(&dccb->ccb_h.cbfcnp);
-}
-
 int
 enc_error(union ccb *ccb, uint32_t cflags, uint32_t sflags)
 {
@@ -387,6 +356,10 @@
 	void *addr;
 	int error, i;
 
+#ifdef	COMPAT_FREEBSD32
+	if (SV_PROC_FLAG(td->td_proc, SV_ILP32))
+		return (ENOTTY);
+#endif
 
 	if (arg_addr)
 		addr = *((caddr_t *) arg_addr);
@@ -394,9 +367,6 @@
 		addr = NULL;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);
-
 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering encioctl\n"));
 
 	cam_periph_lock(periph);
@@ -437,6 +407,8 @@
 	case ENCIOC_GETELMSTAT:
 	case ENCIOC_GETELMDESC:
 	case ENCIOC_GETELMDEVNAMES:
+	case ENCIOC_GETENCNAME:
+	case ENCIOC_GETENCID:
 		break;
 	default:
 		if ((flag & FWRITE) == 0) {
@@ -491,6 +463,8 @@
 
 	case ENCIOC_GETSTRING:
 	case ENCIOC_SETSTRING:
+	case ENCIOC_GETENCNAME:
+	case ENCIOC_GETENCID:
 		if (enc->enc_vec.handle_string == NULL) {
 			error = EINVAL;
 			break;
@@ -617,7 +591,7 @@
 	if (enc->enc_type == ENC_SEMB_SES || enc->enc_type == ENC_SEMB_SAFT) {
 		tdlen = min(dlen, 1020);
 		tdlen = (tdlen + 3) & ~3;
-		cam_fill_ataio(&ccb->ataio, 0, enc_done, ddf, 0, dptr, tdlen,
+		cam_fill_ataio(&ccb->ataio, 0, NULL, ddf, 0, dptr, tdlen,
 		    30 * 1000);
 		if (cdb[0] == RECEIVE_DIAGNOSTIC)
 			ata_28bit_cmd(&ccb->ataio,
@@ -635,7 +609,7 @@
 			    0x80, tdlen / 4);
 	} else {
 		tdlen = dlen;
-		cam_fill_csio(&ccb->csio, 0, enc_done, ddf, MSG_SIMPLE_Q_TAG,
+		cam_fill_csio(&ccb->csio, 0, NULL, ddf, MSG_SIMPLE_Q_TAG,
 		    dptr, dlen, sizeof (struct scsi_sense_data), cdbl,
 		    60 * 1000);
 		bcopy(cdb, ccb->csio.cdb_io.cdb_bytes, cdbl);
@@ -681,7 +655,7 @@
 /*
  * Is this a device that supports enclosure services?
  *
- * It's a a pretty simple ruleset- if it is device type
+ * It's a pretty simple ruleset- if it is device type
  * 0x0D (13), it's an ENCLOSURE device.
  */
 
@@ -719,12 +693,12 @@
 		return (ENC_NONE);
 	}
 
-#ifdef	ENC_ENABLE_PASSTHROUGH
+#ifdef	SES_ENABLE_PASSTHROUGH
 	if ((iqd[6] & 0x40) && (iqd[2] & 0x7) >= 2) {
 		/*
 		 * PassThrough Device.
 		 */
-		return (ENC_ENC_PASSTHROUGH);
+		return (ENC_SES_PASSTHROUGH);
 	}
 #endif
 
@@ -889,7 +863,7 @@
 {
 	int result;
 
-	callout_init_mtx(&enc->status_updater, enc->periph->sim->mtx, 0);
+	callout_init_mtx(&enc->status_updater, cam_periph_mtx(enc->periph), 0);
 
 	if (cam_periph_acquire(enc->periph) != CAM_REQ_CMP)
 		return (ENXIO);
@@ -931,6 +905,7 @@
 	enc_softc_t *enc;
 	struct ccb_getdev *cgd;
 	char *tname;
+	struct make_dev_args args;
 
 	cgd = (struct ccb_getdev *)arg;
 	if (cgd == NULL) {
@@ -1013,12 +988,20 @@
 		return (CAM_REQ_CMP_ERR);
 	}
 
-	enc->enc_dev = make_dev(&enc_cdevsw, periph->unit_number,
-	    UID_ROOT, GID_OPERATOR, 0600, "%s%d",
-	    periph->periph_name, periph->unit_number);
-
+	make_dev_args_init(&args);
+	args.mda_devsw = &enc_cdevsw;
+	args.mda_unit = periph->unit_number;
+	args.mda_uid = UID_ROOT;
+	args.mda_gid = GID_OPERATOR;
+	args.mda_mode = 0600;
+	args.mda_si_drv1 = periph;
+	err = make_dev_s(&args, &enc->enc_dev, "%s%d", periph->periph_name,
+	    periph->unit_number);
 	cam_periph_lock(periph);
-	enc->enc_dev->si_drv1 = periph;
+	if (err != 0) {
+		cam_periph_release_locked(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
 
 	enc->enc_flags |= ENC_FLAG_INITIALIZED;
 

Modified: trunk/sys/cam/scsi/scsi_enc.h
===================================================================
--- trunk/sys/cam/scsi/scsi_enc.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_enc.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,4 +1,5 @@
-/* $FreeBSD$ */
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/cam/scsi/scsi_enc.h 291429 2015-11-28 17:26:46Z mav $ */
 /*-
  * Copyright (c) 2000 by Matthew Jacob
  * All rights reserved.
@@ -46,6 +47,8 @@
 #define	ENCIOC_GETELMDEVNAMES	_IO(ENCIOC, 10)
 #define	ENCIOC_GETSTRING	_IO(ENCIOC, 11)
 #define	ENCIOC_SETSTRING	_IO(ENCIOC, 12)
+#define	ENCIOC_GETENCNAME	_IO(ENCIOC, 13)
+#define	ENCIOC_GETENCID		_IO(ENCIOC, 14)
 
 /*
  * Platform Independent Definitions for enclosure devices.

Modified: trunk/sys/cam/scsi/scsi_enc_internal.h
===================================================================
--- trunk/sys/cam/scsi/scsi_enc_internal.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_enc_internal.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2000 Matthew Jacob
  * All rights reserved.
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_enc_internal.h 260387 2014-01-07 01:51:48Z scottl $
  */
 
 /*
@@ -192,7 +193,6 @@
 /* Enclosure core interface for sub-drivers */
 int  enc_runcmd(struct enc_softc *, char *, int, char *, int *);
 void enc_log(struct enc_softc *, const char *, ...);
-void enc_done(struct cam_periph *, union ccb *);
 int  enc_error(union ccb *, uint32_t, uint32_t);
 void enc_update_request(enc_softc_t *, uint32_t);
 

Modified: trunk/sys/cam/scsi/scsi_enc_safte.c
===================================================================
--- trunk/sys/cam/scsi/scsi_enc_safte.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_enc_safte.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2000 Matthew Jacob
  * All rights reserved.
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_enc_safte.c 300589 2016-05-24 07:21:23Z mav $");
 
 #include <sys/param.h>
 
@@ -243,12 +244,12 @@
 
 	if (enc->enc_type == ENC_SEMB_SAFT) {
 		semb_read_buffer(&ccb->ataio, /*retries*/5,
-				enc_done, MSG_SIMPLE_Q_TAG,
+				NULL, MSG_SIMPLE_Q_TAG,
 				state->page_code, buf, state->buf_size,
 				state->timeout);
 	} else {
 		scsi_read_buffer(&ccb->csio, /*retries*/5,
-				enc_done, MSG_SIMPLE_Q_TAG, 1,
+				NULL, MSG_SIMPLE_Q_TAG, 1,
 				state->page_code, 0, buf, state->buf_size,
 				SSD_FULL_SIZE, state->timeout);
 	}
@@ -292,11 +293,8 @@
 	    cfg->DoorLock + cfg->Ntherm + cfg->Nspkrs + cfg->Ntstats + 1;
 	ENC_FREE_AND_NULL(enc->enc_cache.elm_map);
 	enc->enc_cache.elm_map =
-	    ENC_MALLOCZ(enc->enc_cache.nelms * sizeof(enc_element_t));
-	if (enc->enc_cache.elm_map == NULL) {
-		enc->enc_cache.nelms = 0;
-		return (ENOMEM);
-	}
+	    malloc(enc->enc_cache.nelms * sizeof(enc_element_t),
+	    M_SCSIENC, M_WAITOK|M_ZERO);
 
 	r = 0;
 	/*
@@ -942,11 +940,11 @@
 
 	if (enc->enc_type == ENC_SEMB_SAFT) {
 		semb_write_buffer(&ccb->ataio, /*retries*/5,
-				enc_done, MSG_SIMPLE_Q_TAG,
+				NULL, MSG_SIMPLE_Q_TAG,
 				buf, xfer_len, state->timeout);
 	} else {
 		scsi_write_buffer(&ccb->csio, /*retries*/5,
-				enc_done, MSG_SIMPLE_Q_TAG, 1,
+				NULL, MSG_SIMPLE_Q_TAG, 1,
 				0, 0, buf, xfer_len,
 				SSD_FULL_SIZE, state->timeout);
 	}

Modified: trunk/sys/cam/scsi/scsi_enc_ses.c
===================================================================
--- trunk/sys/cam/scsi/scsi_enc_ses.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_enc_ses.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2000 Matthew Jacob
  * Copyright (c) 2010 Spectra Logic Corporation
@@ -32,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_enc_ses.c 320474 2017-06-29 17:29:07Z markj $");
 
 #include <sys/param.h>
 
@@ -345,8 +346,9 @@
 	const struct ses_cfg_page		*cfg_page;
 
 	/* References into the config page. */
+	int					 ses_nsubencs;
 	const struct ses_enc_desc * const	*subencs;
-	uint8_t					 ses_ntypes;
+	int					 ses_ntypes;
 	const ses_type_t			*ses_types;
 
 	/* Source for all the status pointers */
@@ -549,6 +551,7 @@
 static int ses_putstatus(enc_softc_t *, int, struct ses_comstat *);
 #endif
 
+static void ses_poll_status(enc_softc_t *);
 static void ses_print_addl_data(enc_softc_t *, enc_element_t *);
 
 /*=========================== SES cleanup routines ===========================*/
@@ -567,8 +570,8 @@
 		return;
 
 	for (cur_elm = cache->elm_map,
-	     last_elm = &cache->elm_map[cache->nelms - 1];
-	     cur_elm <= last_elm; cur_elm++) {
+	     last_elm = &cache->elm_map[cache->nelms];
+	     cur_elm != last_elm; cur_elm++) {
 		ses_element_t *elmpriv;
 
 		elmpriv = cur_elm->elm_private;
@@ -598,8 +601,8 @@
 		return;
 
 	for (cur_elm = cache->elm_map,
-	     last_elm = &cache->elm_map[cache->nelms - 1];
-	     cur_elm <= last_elm; cur_elm++) {
+	     last_elm = &cache->elm_map[cache->nelms];
+	     cur_elm != last_elm; cur_elm++) {
 		ses_element_t *elmpriv;
 
 		elmpriv = cur_elm->elm_private;
@@ -644,8 +647,8 @@
 	ses_cache_free_elm_descs(enc, cache);
 	ses_cache_free_elm_addlstatus(enc, cache);
 	for (cur_elm = cache->elm_map,
-	     last_elm = &cache->elm_map[cache->nelms - 1];
-	     cur_elm <= last_elm; cur_elm++) {
+	     last_elm = &cache->elm_map[cache->nelms];
+	     cur_elm != last_elm; cur_elm++) {
 
 		ENC_FREE_AND_NULL(cur_elm->elm_private);
 	}
@@ -714,13 +717,15 @@
 	 * The element map is independent even though it starts out
 	 * pointing to the same constant page data.
 	 */
-	dst->elm_map = ENC_MALLOCZ(dst->nelms * sizeof(enc_element_t));
+	dst->elm_map = malloc(dst->nelms * sizeof(enc_element_t),
+	    M_SCSIENC, M_WAITOK);
 	memcpy(dst->elm_map, src->elm_map, dst->nelms * sizeof(enc_element_t));
 	for (dst_elm = dst->elm_map, src_elm = src->elm_map,
-	     last_elm = &src->elm_map[src->nelms - 1];
-	     src_elm <= last_elm; src_elm++, dst_elm++) {
+	     last_elm = &src->elm_map[src->nelms];
+	     src_elm != last_elm; src_elm++, dst_elm++) {
 
-		dst_elm->elm_private = ENC_MALLOCZ(sizeof(ses_element_t));
+		dst_elm->elm_private = malloc(sizeof(ses_element_t),
+		    M_SCSIENC, M_WAITOK);
 		memcpy(dst_elm->elm_private, src_elm->elm_private,
 		       sizeof(ses_element_t));
 	}
@@ -888,7 +893,6 @@
 	struct device_match_result  *device_match;
 	struct device_match_pattern *device_pattern;
 	ses_path_iter_args_t	    *args;
-	struct cam_sim		    *sim;
 
 	args = (ses_path_iter_args_t *)arg;
 	match_pattern.type = DEV_MATCH_DEVICE;
@@ -901,10 +905,10 @@
 	       device_pattern->data.devid_pat.id_len);
 
 	memset(&cdm, 0, sizeof(cdm));
-	if (xpt_create_path_unlocked(&cdm.ccb_h.path, /*periph*/NULL,
-				     CAM_XPT_PATH_ID,
-				     CAM_TARGET_WILDCARD,
-				     CAM_LUN_WILDCARD) != CAM_REQ_CMP)
+	if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL,
+			     CAM_XPT_PATH_ID,
+			     CAM_TARGET_WILDCARD,
+			     CAM_LUN_WILDCARD) != CAM_REQ_CMP)
 		return;
 
 	cdm.ccb_h.func_code = XPT_DEV_MATCH;
@@ -914,11 +918,8 @@
 	cdm.match_buf_len   = sizeof(match_result);
 	cdm.matches         = &match_result;
 
-	sim = xpt_path_sim(cdm.ccb_h.path);
-	CAM_SIM_LOCK(sim);
 	xpt_action((union ccb *)&cdm);
 	xpt_free_path(cdm.ccb_h.path);
-	CAM_SIM_UNLOCK(sim);
 
 	if ((cdm.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
 	 || (cdm.status != CAM_DEV_MATCH_LAST
@@ -927,18 +928,15 @@
 		return;
 
 	device_match = &match_result.result.device_result;
-	if (xpt_create_path_unlocked(&cdm.ccb_h.path, /*periph*/NULL,
-				     device_match->path_id,
-				     device_match->target_id,
-				     device_match->target_lun) != CAM_REQ_CMP)
+	if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL,
+			     device_match->path_id,
+			     device_match->target_id,
+			     device_match->target_lun) != CAM_REQ_CMP)
 		return;
 
 	args->callback(enc, elem, cdm.ccb_h.path, args->callback_arg);
 
-	sim = xpt_path_sim(cdm.ccb_h.path);
-	CAM_SIM_LOCK(sim);
 	xpt_free_path(cdm.ccb_h.path);
-	CAM_SIM_UNLOCK(sim);
 }
 
 /**
@@ -1014,7 +1012,7 @@
 	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 	cdai.buftype = CDAI_TYPE_PHYS_PATH;
-	cdai.flags = 0;
+	cdai.flags = CDAI_FLAG_NONE;
 	cdai.bufsiz = MAXPATHLEN;
 	cdai.buf = old_physpath;
 	xpt_action((union ccb *)&cdai);
@@ -1026,7 +1024,7 @@
 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 		cdai.buftype = CDAI_TYPE_PHYS_PATH;
-		cdai.flags |= CDAI_FLAG_STORE;
+		cdai.flags = CDAI_FLAG_STORE;
 		cdai.bufsiz = sbuf_len(args->physpath);
 		cdai.buf = sbuf_data(args->physpath);
 		xpt_action((union ccb *)&cdai);
@@ -1056,7 +1054,8 @@
 	ses_setphyspath_callback_args_t args;
 	int i, ret;
 	struct sbuf sb;
-	uint8_t *devid, *elmaddr;
+	struct scsi_vpd_id_descriptor *idd;
+	uint8_t *devid;
 	ses_element_t *elmpriv;
 	const char *c;
 
@@ -1069,13 +1068,10 @@
 	 */
 	xpt_setup_ccb(&cdai.ccb_h, enc->periph->path, CAM_PRIORITY_NORMAL);
 	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
+	cdai.flags = CDAI_FLAG_NONE;
 	cdai.buftype = CDAI_TYPE_SCSI_DEVID;
 	cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
-	cdai.buf = devid = ENC_MALLOCZ(cdai.bufsiz);
-	if (devid == NULL) {
-		ret = ENOMEM;
-		goto out;
-	}
+	cdai.buf = devid = malloc(cdai.bufsiz, M_SCSIENC, M_WAITOK|M_ZERO);
 	cam_periph_lock(enc->periph);
 	xpt_action((union ccb *)&cdai);
 	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
@@ -1084,9 +1080,9 @@
 	if (cdai.ccb_h.status != CAM_REQ_CMP)
 		goto out;
 
-	elmaddr = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
+	idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 	    cdai.provsiz, scsi_devid_is_naa_ieee_reg);
-	if (elmaddr == NULL)
+	if (idd == NULL)
 		goto out;
 
 	if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) {
@@ -1095,7 +1091,7 @@
 	}
 	/* Next, generate the physical path string */
 	sbuf_printf(&sb, "id1,enc at n%jx/type@%x/slot@%x",
-	    scsi_8btou64(elmaddr), iter->type_index,
+	    scsi_8btou64(idd->identifier), iter->type_index,
 	    iter->type_element_index);
 	/* Append the element descriptor if one exists */
 	elmpriv = elm->elm_private;
@@ -1185,7 +1181,7 @@
 	if (mode_buf == NULL)
 		goto out;
 
-	scsi_mode_sense(&ccb->csio, /*retries*/4, enc_done, MSG_SIMPLE_Q_TAG,
+	scsi_mode_sense(&ccb->csio, /*retries*/4, NULL, MSG_SIMPLE_Q_TAG,
 	    /*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SES_MGMT_MODE_PAGE_CODE,
 	    mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60 * 1000);
 
@@ -1213,7 +1209,7 @@
 	/* SES2r20: a completion time of zero means as long as possible */
 	bzero(&mgmt->max_comp_time, sizeof(mgmt->max_comp_time));
 
-	scsi_mode_select(&ccb->csio, 5, enc_done, MSG_SIMPLE_Q_TAG,
+	scsi_mode_select(&ccb->csio, 5, NULL, MSG_SIMPLE_Q_TAG,
 	    /*page_fmt*/FALSE, /*save_pages*/TRUE, mode_buf, mode_buf_len,
 	    SSD_FULL_SIZE, /*timeout*/60 * 1000);
 
@@ -1332,7 +1328,7 @@
 	enc_cache = &enc->enc_daemon_cache;
 	ses_cache = enc_cache->private;
 	buf = *bufp;
-	err = -1;;
+	err = -1;
 
 	if (error != 0) {
 		err = error;
@@ -1376,12 +1372,8 @@
 	 * Now waltz through all the subenclosures summing the number of
 	 * types available in each.
 	 */
-	subencs = ENC_MALLOCZ(ses_cfg_page_get_num_subenc(cfg_page)
-			    * sizeof(*subencs));
-	if (subencs == NULL) {
-		err = ENOMEM;
-		goto out;
-	}
+	subencs = malloc(ses_cfg_page_get_num_subenc(cfg_page)
+	    * sizeof(*subencs), M_SCSIENC, M_WAITOK|M_ZERO);
 	/*
 	 * Sub-enclosure data is const after construction (i.e. when
 	 * accessed via our cache object.
@@ -1389,11 +1381,12 @@
 	 * The cast here is not required in C++ but C99 is not so
 	 * sophisticated (see C99 6.5.16.1(1)).
 	 */
+	ses_cache->ses_nsubencs = ses_cfg_page_get_num_subenc(cfg_page);
 	ses_cache->subencs = subencs;
 
 	buf_subenc = cfg_page->subencs;
 	cur_subenc = subencs;
-	last_subenc = &subencs[ses_cfg_page_get_num_subenc(cfg_page) - 1];
+	last_subenc = &subencs[ses_cache->ses_nsubencs - 1];
 	ntype = 0;
 	while (cur_subenc <= last_subenc) {
 
@@ -1418,15 +1411,13 @@
 	}
 
 	/* Process the type headers. */
-	ses_types = ENC_MALLOCZ(ntype * sizeof(*ses_types));
-	if (ses_types == NULL) {
-		err = ENOMEM;
-		goto out;
-	}
+	ses_types = malloc(ntype * sizeof(*ses_types),
+	    M_SCSIENC, M_WAITOK|M_ZERO);
 	/*
 	 * Type data is const after construction (i.e. when accessed via
 	 * our cache object.
 	 */
+	ses_cache->ses_ntypes = ntype;
 	ses_cache->ses_types = ses_types;
 
 	cur_buf_type = (const struct ses_elm_type_desc *)
@@ -1458,12 +1449,8 @@
 	}
 
 	/* Create the object map. */
-	enc_cache->elm_map = ENC_MALLOCZ(nelm * sizeof(enc_element_t));
-	if (enc_cache->elm_map == NULL) {
-		err = ENOMEM;
-		goto out;
-	}
-	ses_cache->ses_ntypes = (uint8_t)ntype;
+	enc_cache->elm_map = malloc(nelm * sizeof(enc_element_t),
+	    M_SCSIENC, M_WAITOK|M_ZERO);
 	enc_cache->nelms = nelm;
 
 	ses_iter_init(enc, enc_cache, &iter);
@@ -1477,11 +1464,8 @@
 		element->subenclosure = thdr->etype_subenc;
 		element->enctype = thdr->etype_elm_type;
 		element->overall_status_elem = iter.type_element_index == 0;
-		element->elm_private = ENC_MALLOCZ(sizeof(ses_element_t));
-		if (element->elm_private == NULL) {
-			err = ENOMEM;
-			goto out;
-		}
+		element->elm_private = malloc(sizeof(ses_element_t),
+		    M_SCSIENC, M_WAITOK|M_ZERO);
 		ENC_DLOG(enc, "%s: creating elmpriv %d(%d,%d) subenc %d "
 		    "type 0x%x\n", __func__, iter.global_element_index,
 		    iter.type_index, iter.type_element_index,
@@ -1494,11 +1478,7 @@
 	if (err)
 		ses_cache_free(enc, enc_cache);
 	else {
-		enc_update_request(enc, SES_UPDATE_GETSTATUS);
-		if (ses->ses_flags & SES_FLAG_DESC)
-			enc_update_request(enc, SES_UPDATE_GETELMDESCS);
-		if (ses->ses_flags & SES_FLAG_ADDLSTATUS)
-			enc_update_request(enc, SES_UPDATE_GETELMADDLSTATUS);
+		ses_poll_status(enc);
 		enc_update_request(enc, SES_PUBLISH_CACHE);
 	}
 	ENC_DLOG(enc, "%s: exiting with err %d\n", __func__, err);
@@ -1554,6 +1534,18 @@
 		ENC_VLOG(enc, "Enclosure Status Page Too Long\n");
 		goto out;
 	}
+
+	/* Check for simple enclosure reporting short enclosure status. */
+	if (length >= 4 && page->hdr.page_code == SesShortStatus) {
+		ENC_DLOG(enc, "Got Short Enclosure Status page\n");
+		ses->ses_flags &= ~(SES_FLAG_ADDLSTATUS | SES_FLAG_DESC);
+		ses_cache_free(enc, enc_cache);
+		enc_cache->enc_status = page->hdr.page_specific_flags;
+		enc_update_request(enc, SES_PUBLISH_CACHE);
+		err = 0;
+		goto out;
+	}
+
 	/* Make sure the length contains at least one header and status */
 	if (length < (sizeof(*page) + sizeof(*page->elements))) {
 		ENC_VLOG(enc, "Enclosure Status Page Too Short\n");
@@ -1766,14 +1758,20 @@
 		eip = ses_elm_addlstatus_eip(elm_hdr);
 		if (eip && !ignore_index) {
 			struct ses_elm_addlstatus_eip_hdr *eip_hdr;
-			int expected_index;
+			int expected_index, index;
+			ses_elem_index_type_t index_type;
 
 			eip_hdr = (struct ses_elm_addlstatus_eip_hdr *)elm_hdr;
-			expected_index = iter.individual_element_index;
+			if (eip_hdr->byte2 & SES_ADDL_EIP_EIIOE) {
+				index_type = SES_ELEM_INDEX_GLOBAL;
+				expected_index = iter.global_element_index;
+			} else {
+				index_type = SES_ELEM_INDEX_INDIVIDUAL;
+				expected_index = iter.individual_element_index;
+			}
 			titer = iter;
 			telement = ses_iter_seek_to(&titer,
-						   eip_hdr->element_index,
-						   SES_ELEM_INDEX_INDIVIDUAL);
+			    eip_hdr->element_index, index_type);
 			if (telement != NULL &&
 			    (ses_typehasaddlstatus(enc, titer.type_index) !=
 			     TYPE_ADDLSTATUS_NONE ||
@@ -1783,13 +1781,18 @@
 			} else
 				ignore_index = 1;
 
-			if (iter.individual_element_index > expected_index
+			if (eip_hdr->byte2 & SES_ADDL_EIP_EIIOE)
+				index = iter.global_element_index;
+			else
+				index = iter.individual_element_index;
+			if (index > expected_index
 			 && status_type == TYPE_ADDLSTATUS_MANDATORY) {
-				ENC_VLOG(enc, "%s: provided element "
+				ENC_VLOG(enc, "%s: provided %s element"
 					"index %d skips mandatory status "
 					" element at index %d\n",
-					__func__, eip_hdr->element_index,
-					expected_index);
+					__func__, (eip_hdr->byte2 &
+					SES_ADDL_EIP_EIIOE) ? "global " : "",
+					index, expected_index);
 			}
 		}
 		elmpriv = element->elm_private;
@@ -1865,7 +1868,7 @@
 	 *  o Some SCSI status error.
 	 */
 	ses_terminate_control_requests(&ses->ses_pending_requests, error);
-	enc_update_request(enc, SES_UPDATE_GETSTATUS);
+	ses_poll_status(enc);
 	return (0);
 }
 
@@ -2017,12 +2020,12 @@
 
 	if (enc->enc_type == ENC_SEMB_SES) {
 		semb_receive_diagnostic_results(&ccb->ataio, /*retries*/5,
-					enc_done, MSG_SIMPLE_Q_TAG, /*pcv*/1,
+					NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1,
 					state->page_code, buf, state->buf_size,
 					state->timeout);
 	} else {
 		scsi_receive_diagnostic_results(&ccb->csio, /*retries*/5,
-					enc_done, MSG_SIMPLE_Q_TAG, /*pcv*/1,
+					NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1,
 					state->page_code, buf, state->buf_size,
 					SSD_FULL_SIZE, state->timeout);
 	}
@@ -2140,12 +2143,12 @@
 
 	/* Fill out the ccb */
 	if (enc->enc_type == ENC_SEMB_SES) {
-		semb_send_diagnostic(&ccb->ataio, /*retries*/5, enc_done,
+		semb_send_diagnostic(&ccb->ataio, /*retries*/5, NULL,
 			     MSG_SIMPLE_Q_TAG,
 			     buf, ses_page_length(&ses_cache->status_page->hdr),
 			     state->timeout);
 	} else {
-		scsi_send_diagnostic(&ccb->csio, /*retries*/5, enc_done,
+		scsi_send_diagnostic(&ccb->csio, /*retries*/5, NULL,
 			     MSG_SIMPLE_Q_TAG, /*unit_offline*/0,
 			     /*device_offline*/0, /*self_test*/0,
 			     /*page_format*/1, /*self_test_code*/0,
@@ -2682,13 +2685,14 @@
 	if (len < 0)
 		return (EINVAL);
 
-	sbuf_new(&sb, elmdn->elm_devnames, len, 0);
-
 	cam_periph_unlock(enc->periph);
+	sbuf_new(&sb, NULL, len, SBUF_FIXEDLEN);
 	ses_paths_iter(enc, &enc->enc_cache.elm_map[elmdn->elm_idx],
-		       ses_elmdevname_callback, &sb);
+	    ses_elmdevname_callback, &sb);
 	sbuf_finish(&sb);
 	elmdn->elm_names_len = sbuf_len(&sb);
+	copyout(sbuf_data(&sb), elmdn->elm_devnames, elmdn->elm_names_len + 1);
+	sbuf_delete(&sb);
 	cam_periph_lock(enc->periph);
 	return (elmdn->elm_names_len > 0 ? 0 : ENODEV);
 }
@@ -2706,10 +2710,23 @@
 static int
 ses_handle_string(enc_softc_t *enc, encioc_string_t *sstr, int ioc)
 {
+	ses_softc_t *ses;
+	enc_cache_t *enc_cache;
+	ses_cache_t *ses_cache;
+	const struct ses_enc_desc *enc_desc;
 	int amt, payload, ret;
 	char cdb[6];
+	char str[32];
+	char vendor[9];
+	char product[17];
+	char rev[5];
 	uint8_t *buf;
+	size_t size, rsize;
 
+	ses = enc->enc_private;
+	enc_cache = &enc->enc_daemon_cache;
+	ses_cache = enc_cache->private;
+
 	/* Implement SES2r20 6.1.6 */
 	if (sstr->bufsiz > 0xffff)
 		return (EINVAL); /* buffer size too large */
@@ -2733,6 +2750,40 @@
 		amt = payload;
 		ses_page_cdb(cdb, payload, SesStringIn, CAM_DIR_IN);
 		buf = sstr->buf;
+	} else if (ioc == ENCIOC_GETENCNAME) {
+		if (ses_cache->ses_nsubencs < 1)
+			return (ENODEV);
+		enc_desc = ses_cache->subencs[0];
+		cam_strvis(vendor, enc_desc->vendor_id,
+		    sizeof(enc_desc->vendor_id), sizeof(vendor));
+		cam_strvis(product, enc_desc->product_id,
+		    sizeof(enc_desc->product_id), sizeof(product));
+		cam_strvis(rev, enc_desc->product_rev,
+		    sizeof(enc_desc->product_rev), sizeof(rev));
+		rsize = snprintf(str, sizeof(str), "%s %s %s",
+		    vendor, product, rev) + 1;
+		if (rsize > sizeof(str))
+			rsize = sizeof(str);
+		copyout(&rsize, &sstr->bufsiz, sizeof(rsize));
+		size = rsize;
+		if (size > sstr->bufsiz)
+			size = sstr->bufsiz;
+		copyout(str, sstr->buf, size);
+		return (size == rsize ? 0 : ENOMEM);
+	} else if (ioc == ENCIOC_GETENCID) {
+		if (ses_cache->ses_nsubencs < 1)
+			return (ENODEV);
+		enc_desc = ses_cache->subencs[0];
+		rsize = snprintf(str, sizeof(str), "%16jx",
+		    scsi_8btou64(enc_desc->logical_id)) + 1;
+		if (rsize > sizeof(str))
+			rsize = sizeof(str);
+		copyout(&rsize, &sstr->bufsiz, sizeof(rsize));
+		size = rsize;
+		if (size > sstr->bufsiz)
+			size = sstr->bufsiz;
+		copyout(str, sstr->buf, size);
+		return (size == rsize ? 0 : ENOMEM);
 	} else
 		return EINVAL;
 
@@ -2752,6 +2803,8 @@
 
 	ses = enc->enc_private;
 	enc_update_request(enc, SES_UPDATE_GETSTATUS);
+	if (ses->ses_flags & SES_FLAG_DESC)
+		enc_update_request(enc, SES_UPDATE_GETELMDESCS);
 	if (ses->ses_flags & SES_FLAG_ADDLSTATUS)
 		enc_update_request(enc, SES_UPDATE_GETELMADDLSTATUS);
 }

Modified: trunk/sys/cam/scsi/scsi_iu.h
===================================================================
--- trunk/sys/cam/scsi/scsi_iu.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_iu.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * This file is in the public domain.
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_iu.h 139743 2005-01-05 22:34:37Z imp $
  */
 #ifndef	_SCSI_SCSI_IU_H
 #define _SCSI_SCSI_IU_H 1

Modified: trunk/sys/cam/scsi/scsi_low.c
===================================================================
--- trunk/sys/cam/scsi/scsi_low.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_low.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,8 +1,9 @@
+/* $MidnightBSD$ */
 /*	$NecBSD: scsi_low.c,v 1.24.10.8 2001/06/26 07:39:44 honda Exp $	*/
 /*	$NetBSD$	*/
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_low.c 315813 2017-03-23 06:41:13Z mav $");
 
 #define	SCSI_LOW_STATICS
 #define	SCSI_LOW_DEBUG
@@ -14,13 +15,7 @@
 /* #define	SCSI_LOW_QCLEAR_AFTER_CA */
 /* #define	SCSI_LOW_FLAGS_QUIRKS_OK */
 
-#ifdef __NetBSD__
-#define	SCSI_LOW_TARGET_OPEN
-#endif	/* __NetBSD__ */
-
-#ifdef	__FreeBSD__
 #define	SCSI_LOW_FLAGS_QUIRKS_OK
-#endif	/* __FreeBSD__ */
 
 /*-
  * [NetBSD for NEC PC-98 series]
@@ -71,41 +66,12 @@
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
-
-#ifdef __FreeBSD__
-#if __FreeBSD_version >= 500001
 #include <sys/bio.h>
-#else
-#include <machine/clock.h>
-#endif
-#endif	/* __FreeBSD__ */
-
 #include <sys/buf.h>
 #include <sys/queue.h>
 #include <sys/malloc.h>
 #include <sys/errno.h>
 
-#ifdef	__NetBSD__
-#include <sys/device.h>
-#include <vm/vm.h>
-
-#include <machine/bus.h>
-#include <machine/intr.h>
-#include <machine/dvcfg.h>
-
-#include <dev/cons.h>
-
-#include <dev/scsipi/scsipi_all.h>
-#include <dev/scsipi/scsipiconf.h>
-#include <dev/scsipi/scsipi_disk.h>
-#include <dev/scsipi/scsi_all.h>
-#include <dev/scsipi/scsiconf.h>
-#include <sys/scsiio.h>
-
-#include <i386/Cbus/dev/scsi_low.h>
-#endif	/* __NetBSD__ */
-
-#ifdef __FreeBSD__
 #include <cam/cam.h>
 #include <cam/cam_ccb.h>
 #include <cam/cam_sim.h>
@@ -119,7 +85,6 @@
 #include <cam/scsi/scsi_low.h>
 
 #include <sys/cons.h>
-#endif	/* __FreeBSD__ */
 
 /**************************************************************
  * Constants
@@ -193,7 +158,7 @@
 #ifdef	SCSI_LOW_INFO_DETAIL
 #define	SCSI_LOW_INFO(slp, ti, s) scsi_low_info((slp), (ti), (s))
 #else	/* !SCSI_LOW_INFO_DETAIL */
-#define	SCSI_LOW_INFO(slp, ti, s) printf("%s: %s\n", (slp)->sl_xname, (s))
+#define	SCSI_LOW_INFO(slp, ti, s) device_printf((slp)->sl_dev, "%s\n", (s))
 #endif	/* !SCSI_LOW_INFO_DETAIL */
 
 #ifdef	SCSI_LOW_STATICS
@@ -362,15 +327,13 @@
 	if ((cb = slp->sl_Qnexus) != NULL && cb->osdep == osdep)
 		return cb;
 
-	for (cb = TAILQ_FIRST(&slp->sl_start); cb != NULL;
-	     cb = TAILQ_NEXT(cb, ccb_chain))
+	TAILQ_FOREACH(cb, &slp->sl_start, ccb_chain)
 	{
 		if (cb->osdep == osdep)
 			return cb;
 	}
 
-	for (cb = TAILQ_FIRST(&li->li_discq); cb != NULL;
-	     cb = TAILQ_NEXT(cb, ccb_chain))
+	TAILQ_FOREACH(cb, &li->li_discq, ccb_chain)
 	{
 		if (cb->osdep == osdep)
 			return cb;
@@ -392,502 +355,7 @@
 	return tp->error_code;
 }
 
-#ifdef SCSI_LOW_INTERFACE_XS
 /**************************************************************
- * SCSI INTERFACE (XS)
- **************************************************************/
-#define	SCSI_LOW_MINPHYS		0x10000
-#define	SCSI_LOW_MALLOC(size)		malloc((size), M_SCSILOW, M_NOWAIT)
-#define	SCSI_LOW_FREE(pt)		free((pt), M_SCSILOW)
-#define	SCSI_LOW_ALLOC_CCB(flags)	scsi_low_get_ccb((flags))
-#define	SCSI_LOW_XS_POLL_HZ		1000
-
-static int scsi_low_poll_xs(struct scsi_low_softc *, struct slccb *);
-static void scsi_low_scsi_minphys_xs(struct buf *);
-#ifdef	SCSI_LOW_TARGET_OPEN
-static int scsi_low_target_open(struct scsipi_link *, struct cfdata *);
-#endif	/* SCSI_LOW_TARGET_OPEN */
-static int scsi_low_scsi_cmd_xs(struct scsipi_xfer *);
-static int scsi_low_enable_xs(void *, int);
-static int scsi_low_ioctl_xs(struct scsipi_link *, u_long, caddr_t, int, struct proc *);
-
-static int scsi_low_attach_xs(struct scsi_low_softc *);
-static int scsi_low_world_start_xs(struct scsi_low_softc *);
-static int scsi_low_dettach_xs(struct scsi_low_softc *);
-static int scsi_low_ccb_setup_xs(struct scsi_low_softc *, struct slccb *);
-static int scsi_low_done_xs(struct scsi_low_softc *, struct slccb *);
-static void scsi_low_timeout_xs(struct scsi_low_softc *, int, int);
-static u_int scsi_low_translate_quirks_xs(u_int);
-static void scsi_low_setup_quirks_xs(struct targ_info *, struct lun_info *, u_int);
-
-struct scsi_low_osdep_funcs scsi_low_osdep_funcs_xs = {
-	scsi_low_attach_xs,
-	scsi_low_world_start_xs,
-	scsi_low_dettach_xs,
-	scsi_low_ccb_setup_xs,
-	scsi_low_done_xs,
-	scsi_low_timeout_xs
-};
-	
-struct scsipi_device scsi_low_dev = {
-	NULL,	/* Use default error handler */
-	NULL,	/* have a queue, served by this */
-	NULL,	/* have no async handler */
-	NULL,	/* Use default 'done' routine */
-};
-
-struct scsi_low_error_code scsi_low_error_code_xs[] = {
-	{0,		XS_NOERROR},
-	{SENSEIO,	XS_SENSE},
-	{BUSYERR,	XS_BUSY	},
-	{SELTIMEOUTIO,	XS_SELTIMEOUT},
-	{TIMEOUTIO,	XS_TIMEOUT},
-	{-1,		XS_DRIVER_STUFFUP}
-};
-
-static int
-scsi_low_ioctl_xs(link, cmd, addr, flag, p)
-	struct scsipi_link *link;
-	u_long cmd;
-	caddr_t addr;
-	int flag;
-	struct proc *p;
-{
-	struct scsi_low_softc *slp;
-	int s, error = ENOTTY;
-
-	slp = (struct scsi_low_softc *) link->adapter_softc;
-	if ((slp->sl_flags & HW_INACTIVE) != 0)
-		return ENXIO;
-
-	if (cmd == SCBUSIORESET)
-	{
-		s = SCSI_LOW_SPLSCSI();
-		scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, NULL);
-		splx(s);
-		error = 0;
-	}
-	else if (slp->sl_funcs->scsi_low_ioctl != 0)
-	{
-		error = (*slp->sl_funcs->scsi_low_ioctl)
-				(slp, cmd, addr, flag, p);
-	}
-
-	return error;
-}
-
-static int
-scsi_low_enable_xs(arg, enable)
-	void *arg;
-	int enable;
-{
-	struct scsi_low_softc *slp = arg;
-
-	if (enable != 0)
-	{
-		if ((slp->sl_flags & HW_INACTIVE) != 0)
-			return ENXIO;
-	}
-	else
-	{
-		if ((slp->sl_flags & HW_INACTIVE) != 0 ||
-		    (slp->sl_flags & HW_POWERCTRL) == 0)
-			return 0;
-
-		slp->sl_flags |= HW_POWDOWN;
-		if (slp->sl_funcs->scsi_low_power != NULL)
-		{
-			(*slp->sl_funcs->scsi_low_power)
-					(slp, SCSI_LOW_POWDOWN);
-		}
-	}
-	return 0;
-}
-
-static void
-scsi_low_scsi_minphys_xs(bp)
-	struct buf *bp;
-{
-
-	if (bp->b_bcount > SCSI_LOW_MINPHYS)
-		bp->b_bcount = SCSI_LOW_MINPHYS;
-	minphys(bp);
-}
-
-static int
-scsi_low_poll_xs(slp, cb)
-	struct scsi_low_softc *slp;
-	struct slccb *cb;
-{
-	struct scsipi_xfer *xs = cb->osdep;
-	int tcount;
-
-	cb->ccb_flags |= CCB_NOSDONE;
-	tcount = 0;
-
-	while (slp->sl_nio > 0)
-	{
-		SCSI_LOW_DELAY((1000 * 1000) / SCSI_LOW_XS_POLL_HZ);
-
-		(*slp->sl_funcs->scsi_low_poll) (slp);
-
-		if ((slp->sl_flags & (HW_INACTIVE | HW_INITIALIZING)) != 0)
-		{
-			cb->ccb_flags |= CCB_NORETRY;
-			cb->ccb_error |= FATALIO;
-			(void) scsi_low_revoke_ccb(slp, cb, 1);
-			printf("%s: hardware inactive in poll mode\n", 
-				slp->sl_xname);
-		}
-
-		if ((xs->flags & ITSDONE) != 0)
-			break;
-
-		if (tcount ++ < SCSI_LOW_XS_POLL_HZ / SCSI_LOW_TIMEOUT_HZ)
-			continue;
-
-		tcount = 0;
-		scsi_low_timeout_check(slp);
-	}
-
-	xs->flags |= ITSDONE;
-	scsipi_done(xs);
-	return COMPLETE;
-}
-
-static int
-scsi_low_scsi_cmd_xs(xs)
-	struct scsipi_xfer *xs;
-{
-	struct scsipi_link *splp = xs->sc_link;
-	struct scsi_low_softc *slp = splp->adapter_softc;
-	struct targ_info *ti;
-	struct lun_info *li;
-	struct slccb *cb;
-	int s, targ, lun, flags, rv;
-
-	if ((cb = SCSI_LOW_ALLOC_CCB(xs->flags & SCSI_NOSLEEP)) == NULL)
-		return TRY_AGAIN_LATER;
-
-	targ = splp->scsipi_scsi.target,
-	lun = splp->scsipi_scsi.lun;
-	ti = slp->sl_ti[targ];
-
-	cb->osdep = xs;
-	cb->bp = xs->bp;
-
-	if ((xs->flags & SCSI_POLL) == 0)
-		flags = CCB_AUTOSENSE;
-	else
-		flags = CCB_AUTOSENSE | CCB_POLLED;
-		
-
-	s = SCSI_LOW_SPLSCSI();
-	li = scsi_low_alloc_li(ti, lun, 1);
-	if ((u_int) splp->quirks != li->li_sloi.sloi_quirks)
-	{
-		scsi_low_setup_quirks_xs(ti, li, (u_int) splp->quirks);
-	}
-
-	if ((xs->flags & SCSI_RESET) != 0)
-	{
-		flags |= CCB_NORETRY | CCB_URGENT;
-		scsi_low_enqueue(slp, ti, li, cb, flags, SCSI_LOW_MSG_RESET);
-	}
-	else
-	{
-		if (ti->ti_setup_msg != 0)
-		{
-			scsi_low_message_enqueue(slp, ti, li, flags);
-		}
-
-		flags |= CCB_SCSIIO;
-		scsi_low_enqueue(slp, ti, li, cb, flags, 0);
-	}
-
-#ifdef	SCSI_LOW_DEBUG
-	if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ABORT_CHECK, ti->ti_id) != 0)
-	{
-		scsi_low_test_abort(slp, ti, li);
-	}
-#endif	/* SCSI_LOW_DEBUG */
-
-	if ((cb->ccb_flags & CCB_POLLED) != 0)
-	{
-		rv = scsi_low_poll_xs(slp, cb);
-	}
-	else
-	{
-		rv = SUCCESSFULLY_QUEUED;
-	}
-	splx(s);
-	return rv;
-}
-
-static int
-scsi_low_attach_xs(slp)
-	struct scsi_low_softc *slp;
-{
-	struct scsipi_adapter *sap;
-	struct scsipi_link *splp;
-
-	strncpy(slp->sl_xname, slp->sl_dev.dv_xname, 16);
-
-	sap = SCSI_LOW_MALLOC(sizeof(*sap));
-	if (sap == NULL)
-		return ENOMEM;
-	splp = SCSI_LOW_MALLOC(sizeof(*splp));
-	if (splp == NULL)
-	{
-		SCSI_LOW_FREE(sap);
-		return ENOMEM;
-	}
-
-	SCSI_LOW_BZERO(sap, sizeof(*sap));
-	SCSI_LOW_BZERO(splp, sizeof(*splp));
-
-	sap->scsipi_cmd = scsi_low_scsi_cmd_xs;
-	sap->scsipi_minphys = scsi_low_scsi_minphys_xs;
-	sap->scsipi_enable = scsi_low_enable_xs;
-	sap->scsipi_ioctl = scsi_low_ioctl_xs;
-#ifdef	SCSI_LOW_TARGET_OPEN
-	sap->open_target_lu = scsi_low_target_open;
-#endif	/* SCSI_LOW_TARGET_OPEN */
-
-	splp->adapter_softc = slp;
-	splp->scsipi_scsi.adapter_target = slp->sl_hostid;
-	splp->scsipi_scsi.max_target = slp->sl_ntargs - 1;
-	splp->scsipi_scsi.max_lun = slp->sl_nluns - 1;
-	splp->scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
-	splp->openings = slp->sl_openings;
-	splp->type = BUS_SCSI;
-	splp->adapter_softc = slp;
-	splp->adapter = sap;
-	splp->device = &scsi_low_dev;
-
-	slp->sl_si.si_splp = splp;
-	slp->sl_show_result = SHOW_ALL_NEG;
-	return 0;
-}
-
-static int
-scsi_low_world_start_xs(slp)
-	struct scsi_low_softc *slp;
-{
-
-	return 0;
-}
-
-static int
-scsi_low_dettach_xs(slp)
-	struct scsi_low_softc *slp;
-{
-
-	/*
-	 * scsipi does not have dettach bus fucntion.
-	 *
-	scsipi_dettach_scsibus(slp->sl_si.si_splp);
-	*/
-	return 0;
-}
-
-static int
-scsi_low_ccb_setup_xs(slp, cb)
-	struct scsi_low_softc *slp;
-	struct slccb *cb;
-{
-	struct scsipi_xfer *xs = (struct scsipi_xfer *) cb->osdep;
-
-	if ((cb->ccb_flags & CCB_SCSIIO) != 0)
-	{
-		cb->ccb_scp.scp_cmd = (u_int8_t *) xs->cmd;
-		cb->ccb_scp.scp_cmdlen = xs->cmdlen;
-		cb->ccb_scp.scp_data = xs->data;
-		cb->ccb_scp.scp_datalen = xs->datalen;
-		cb->ccb_scp.scp_direction = (xs->flags & SCSI_DATA_OUT) ? 
-					SCSI_LOW_WRITE : SCSI_LOW_READ;
-		cb->ccb_tcmax = xs->timeout / 1000;
-	}
-	else
-	{
-		scsi_low_unit_ready_cmd(cb);
-	}
-	return SCSI_LOW_START_QTAG;
-}
-
-static int
-scsi_low_done_xs(slp, cb)
-	struct scsi_low_softc *slp;
-	struct slccb *cb;
-{
-	struct scsipi_xfer *xs;
-
-	xs = (struct scsipi_xfer *) cb->osdep;
-	if (cb->ccb_error == 0)
-	{
-		xs->error = XS_NOERROR;
-		xs->resid = 0;
-	}
-	else 	
-	{
-	        if (cb->ccb_rcnt >= slp->sl_max_retry)
-			cb->ccb_error |= ABORTIO;
-
-		if ((cb->ccb_flags & CCB_NORETRY) == 0 &&
-		    (cb->ccb_error & ABORTIO) == 0)
-			return EJUSTRETURN;
-
-		if ((cb->ccb_error & SENSEIO) != 0)
-		{
-			xs->sense.scsi_sense = cb->ccb_sense;
-		}
-
-		xs->error = scsi_low_translate_error_code(cb,
-				&scsi_low_error_code_xs[0]);
-	
-#ifdef	SCSI_LOW_DIAGNOSTIC
-		if ((cb->ccb_flags & CCB_SILENT) == 0 &&
-		    cb->ccb_scp.scp_cmdlen > 0 &&
-		    (scsi_low_cmd_flags[cb->ccb_scp.scp_cmd[0]] &
-		     SCSI_LOW_CMD_ABORT_WARNING) != 0)
-		{
-			printf("%s: WARNING: scsi_low IO abort\n",
-				slp->sl_xname);
-			scsi_low_print(slp, NULL);
-		}
-#endif	/* SCSI_LOW_DIAGNOSTIC */
-	}
-
-	if (cb->ccb_scp.scp_status == ST_UNKNOWN)
-		xs->status = 0;	/* XXX */
-	else
-		xs->status = cb->ccb_scp.scp_status;
-
-	xs->flags |= ITSDONE;
-	if ((cb->ccb_flags & CCB_NOSDONE) == 0)
-		scsipi_done(xs);
-
-	return 0;
-}
-
-static void
-scsi_low_timeout_xs(slp, ch, action)
-	struct scsi_low_softc *slp;
-	int ch;
-	int action;
-{
-
-	switch (ch)
-	{
-	case SCSI_LOW_TIMEOUT_CH_IO:
-		switch (action)
-		{
-		case SCSI_LOW_TIMEOUT_START:
-			timeout(scsi_low_timeout, slp,
-				hz / SCSI_LOW_TIMEOUT_HZ);
-			break;
-		case SCSI_LOW_TIMEOUT_STOP:
-			untimeout(scsi_low_timeout, slp);
-			break;
-		}
-		break;
-
-	case SCSI_LOW_TIMEOUT_CH_ENGAGE:
-		switch (action)
-		{
-		case SCSI_LOW_TIMEOUT_START:
-			timeout(scsi_low_engage, slp, 1);
-			break;
-		case SCSI_LOW_TIMEOUT_STOP:
-			untimeout(scsi_low_engage, slp);
-			break;
-		}
-		break;
-
-	case SCSI_LOW_TIMEOUT_CH_RECOVER:
-		break;
-	}
-}
-
-u_int
-scsi_low_translate_quirks_xs(quirks)
-	u_int quirks;
-{
-	u_int flags;
-	
-	flags = SCSI_LOW_DISK_LFLAGS | SCSI_LOW_DISK_TFLAGS;
-
-#ifdef	SDEV_NODISC
-	if (quirks & SDEV_NODISC)
-		flags &= ~SCSI_LOW_DISK_DISC;
-#endif	/* SDEV_NODISC */
-#ifdef	SDEV_NOPARITY
-	if (quirks & SDEV_NOPARITY)
-		flags &= ~SCSI_LOW_DISK_PARITY;
-#endif	/* SDEV_NOPARITY */
-#ifdef	SDEV_NOCMDLNK
-	if (quirks & SDEV_NOCMDLNK)
-		flags &= ~SCSI_LOW_DISK_LINK;
-#endif	/* SDEV_NOCMDLNK */
-#ifdef	SDEV_NOTAG
-	if (quirks & SDEV_NOTAG)
-		flags &= ~SCSI_LOW_DISK_QTAG;
-#endif	/* SDEV_NOTAG */
-#ifdef	SDEV_NOSYNC
-	if (quirks & SDEV_NOSYNC)
-		flags &= ~SCSI_LOW_DISK_SYNC;
-#endif	/* SDEV_NOSYNC */
-
-	return flags;
-}
-
-static void
-scsi_low_setup_quirks_xs(ti, li, flags)
-	struct targ_info *ti;
-	struct lun_info *li;
-	u_int flags;
-{
-	u_int quirks;
-
-	li->li_sloi.sloi_quirks = flags;
-	quirks = scsi_low_translate_quirks_xs(flags);
-	ti->ti_quirks = quirks & SCSI_LOW_DISK_TFLAGS;
-	li->li_quirks = quirks & SCSI_LOW_DISK_LFLAGS;
-	ti->ti_flags_valid |= SCSI_LOW_TARG_FLAGS_QUIRKS_VALID;
-	li->li_flags_valid |= SCSI_LOW_LUN_FLAGS_QUIRKS_VALID;
-	scsi_low_calcf_target(ti);
-	scsi_low_calcf_lun(li);
-	scsi_low_calcf_show(li);
-}
-
-#ifdef	SCSI_LOW_TARGET_OPEN
-static int
-scsi_low_target_open(link, cf)
-	struct scsipi_link *link;
-	struct cfdata *cf;
-{
-	u_int target = link->scsipi_scsi.target;
-	u_int lun = link->scsipi_scsi.lun;
-	struct scsi_low_softc *slp;
-	struct targ_info *ti;
-	struct lun_info *li;
-
-	slp = (struct scsi_low_softc *) link->adapter_softc;
-	ti = slp->sl_ti[target];
-	li = scsi_low_alloc_li(ti, lun, 0);
-	if (li == NULL)
-		return 0;
-
-	li->li_cfgflags = cf->cf_flags;
-	scsi_low_setup_quirks_xs(ti, li, (u_int) link->quirks);
-	return 0;
-}
-#endif	/* SCSI_LOW_TARGET_OPEN */
-
-#endif	/* SCSI_LOW_INTERFACE_XS */
-
-#ifdef SCSI_LOW_INTERFACE_CAM
-/**************************************************************
  * SCSI INTERFACE (CAM)
  **************************************************************/
 #define	SCSI_LOW_MALLOC(size)		malloc((size), M_SCSILOW, M_NOWAIT)
@@ -970,8 +438,9 @@
 #ifdef	SCSI_LOW_DEBUG
 	if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_ACTION, target) != 0)
 	{
-		printf("%s: cam_action: func code 0x%x target: %d, lun: %d\n",
-			slp->sl_xname, ccb->ccb_h.func_code, target, lun);
+		device_printf(slp->sl_dev,
+		    "cam_action: func code 0x%x target: %d, lun: %d\n",
+		    ccb->ccb_h.func_code, target, lun);
 	}
 #endif	/* SCSI_LOW_DEBUG */
 
@@ -980,7 +449,7 @@
 #ifdef	SCSI_LOW_DIAGNOSTIC
 		if (target == CAM_TARGET_WILDCARD || lun == CAM_LUN_WILDCARD)
 		{
-			printf("%s: invalid target/lun\n", slp->sl_xname);
+			device_printf(slp->sl_dev, "invalid target/lun\n");
 			ccb->ccb_h.status = CAM_REQ_INVALID;
 			xpt_done(ccb);
 			return;
@@ -1001,7 +470,7 @@
 		else
 			flags = CCB_SCSIIO;
 
-		s = SCSI_LOW_SPLSCSI();
+		s = splcam();
 		li = scsi_low_alloc_li(ti, lun, 1);
 
 		if (ti->ti_setup_msg != 0)
@@ -1033,7 +502,7 @@
 #ifdef	SCSI_LOW_DIAGNOSTIC
 		if (target == CAM_TARGET_WILDCARD || lun == CAM_LUN_WILDCARD)
 		{
-			printf("%s: invalid target/lun\n", slp->sl_xname);
+			device_printf(slp->sl_dev, "invalid target/lun\n");
 			ccb->ccb_h.status = CAM_REQ_INVALID;
 			xpt_done(ccb);
 			return;
@@ -1040,7 +509,7 @@
 		}
 #endif	/* SCSI_LOW_DIAGNOSTIC */
 
-		s = SCSI_LOW_SPLSCSI();
+		s = splcam();
 		cb = scsi_low_find_ccb(slp, target, lun, ccb->cab.abort_ccb);
 		rv = scsi_low_abort_ccb(slp, cb);
 		splx(s);
@@ -1061,7 +530,7 @@
 #ifdef	SCSI_LOW_DIAGNOSTIC
 		if (target == CAM_TARGET_WILDCARD)
 		{
-			printf("%s: invalid target\n", slp->sl_xname);
+			device_printf(slp->sl_dev, "invalid target\n");
 			ccb->ccb_h.status = CAM_REQ_INVALID;
 			xpt_done(ccb);
 			return;
@@ -1072,7 +541,7 @@
 		if (lun == CAM_LUN_WILDCARD)
 			lun = 0;
 
-		s = SCSI_LOW_SPLSCSI();
+		s = splcam();
 		scsi = &cts->proto_specific.scsi;
 		spi = &cts->xport_specific.spi;
 		if ((spi->valid & (CTS_SPI_VALID_BUS_WIDTH |
@@ -1134,7 +603,7 @@
 #ifdef	SCSI_LOW_DIAGNOSTIC
 		if (target == CAM_TARGET_WILDCARD)
 		{
-			printf("%s: invalid target\n", slp->sl_xname);
+			device_printf(slp->sl_dev, "invalid target\n");
 			ccb->ccb_h.status = CAM_REQ_INVALID;
 			xpt_done(ccb);
 			return;
@@ -1144,7 +613,7 @@
 		if (lun == CAM_LUN_WILDCARD)
 			lun = 0;
 
-		s = SCSI_LOW_SPLSCSI();
+		s = splcam();
 		li = scsi_low_alloc_li(ti, lun, 1);
 		if (li != NULL && cts->type == CTS_TYPE_CURRENT_SETTINGS) {
 			struct ccb_trans_settings_scsi *scsi =
@@ -1155,8 +624,8 @@
 			if (li->li_flags_valid != SCSI_LOW_LUN_FLAGS_ALL_VALID)
 			{
 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
-				printf("%s: invalid GET_TRANS_CURRENT_SETTINGS call\n",
-					slp->sl_xname);
+				device_printf(slp->sl_dev,
+				    "invalid GET_TRANS_CURRENT_SETTINGS call\n");
 				goto settings_out;
 			}
 #endif	/* SCSI_LOW_DIAGNOSTIC */
@@ -1202,7 +671,7 @@
 	}
 
 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
-		s = SCSI_LOW_SPLSCSI();
+		s = splcam();
 		scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, NULL);
 		splx(s);
 		ccb->ccb_h.status = CAM_REQ_CMP;
@@ -1218,7 +687,7 @@
 #ifdef	SCSI_LOW_DIAGNOSTIC
 		if (target == CAM_TARGET_WILDCARD)
 		{
-			printf("%s: invalid target\n", slp->sl_xname);
+			device_printf(slp->sl_dev, "invalid target\n");
 			ccb->ccb_h.status = CAM_REQ_INVALID;
 			xpt_done(ccb);
 			return;
@@ -1243,7 +712,7 @@
 		else
 			flags = CCB_NORETRY | CCB_URGENT;
 
-		s = SCSI_LOW_SPLSCSI();
+		s = splcam();
 		li = scsi_low_alloc_li(ti, lun, 1);
 		scsi_low_enqueue(slp, ti, li, cb, flags, msg);
 		splx(s);
@@ -1273,9 +742,9 @@
 		cpi->transport_version = 2;
 		cpi->protocol = PROTO_SCSI;
 		cpi->protocol_version = SCSI_REV_2;
-		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
-		strncpy(cpi->hba_vid, "SCSI_LOW", HBA_IDLEN);
-		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+		strlcpy(cpi->hba_vid, "SCSI_LOW", HBA_IDLEN);
+		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
 		cpi->unit_number = cam_sim_unit(sim);
 		cpi->ccb_h.status = CAM_REQ_CMP;
 		xpt_done(ccb);
@@ -1298,9 +767,6 @@
 	struct cam_devq *devq;
 	int tagged_openings;
 
-	sprintf(slp->sl_xname, "%s%d",
-		DEVPORT_DEVNAME(slp->sl_dev), DEVPORT_DEVUNIT(slp->sl_dev));
-
 	devq = cam_simq_alloc(SCSI_LOW_NCCB);
 	if (devq == NULL)
 		return (ENOMEM);
@@ -1311,8 +777,8 @@
 	tagged_openings = min(slp->sl_openings, SCSI_LOW_MAXNEXUS);
 	slp->sl_si.sim = cam_sim_alloc(scsi_low_scsi_action_cam,
 				scsi_low_poll_cam,
-				DEVPORT_DEVNAME(slp->sl_dev), slp,
-				DEVPORT_DEVUNIT(slp->sl_dev), &Giant,
+				device_get_name(slp->sl_dev), slp,
+				device_get_unit(slp->sl_dev), &Giant,
 				slp->sl_openings, tagged_openings, devq);
 
 	if (slp->sl_si.sim == NULL) {
@@ -1421,8 +887,8 @@
 		    (scsi_low_cmd_flags[cb->ccb_scp.scp_cmd[0]] &
 		     SCSI_LOW_CMD_ABORT_WARNING) != 0)
 		{
-			printf("%s: WARNING: scsi_low IO abort\n",
-				slp->sl_xname);
+			device_printf(slp->sl_dev,
+			    "WARNING: scsi_low IO abort\n");
 			scsi_low_print(slp, NULL);
 		}
 #endif	/* SCSI_LOW_DIAGNOSTIC */
@@ -1479,8 +945,6 @@
 	}
 }
 
-#endif	/* SCSI_LOW_INTERFACE_CAM */
-
 /*=============================================================
  * END OF OS switch  (All OS depend fucntions should be above)
  =============================================================*/
@@ -1504,7 +968,7 @@
 {
 	int s;
 
-	s = SCSI_LOW_SPLSCSI();
+	s = splcam();
 	slp->sl_flags |= HW_INACTIVE;
 	(*slp->sl_osdep_fp->scsi_low_osdep_timeout)
 		(slp, SCSI_LOW_TIMEOUT_CH_IO, SCSI_LOW_TIMEOUT_STOP);
@@ -1520,7 +984,7 @@
 {
 	int error, s;
 
-	s = SCSI_LOW_SPLSCSI();
+	s = splcam();
 	slp->sl_flags &= ~HW_INACTIVE;
 	if ((error = scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, NULL)) != 0)
 	{
@@ -1600,7 +1064,7 @@
 	void *arg;
 {
 	struct scsi_low_softc *slp = arg;
-	int s = SCSI_LOW_SPLSCSI();
+	int s = splcam();
 
 	switch (slp->sl_rstep)
 	{
@@ -1696,7 +1160,7 @@
 	if (li == NULL)
 		panic("no lun info mem");
 
-	SCSI_LOW_BZERO(li, ti->ti_lunsize);
+	bzero(li, ti->ti_lunsize);
 	li->li_lun = lun;
 	li->li_ti = ti;
 
@@ -1736,9 +1200,9 @@
 
 	ti = SCSI_LOW_MALLOC(slp->sl_targsize);
 	if (ti == NULL)
-		panic("%s short of memory", slp->sl_xname);
+		panic("%s short of memory", device_get_nameunit(slp->sl_dev));
 
-	SCSI_LOW_BZERO(ti, slp->sl_targsize);
+	bzero(ti, slp->sl_targsize);
 	ti->ti_id = targ;
 	ti->ti_sc = slp;
 
@@ -1812,7 +1276,7 @@
 	struct scsi_low_softc *slp = arg;
 	int s;
 
-	s = SCSI_LOW_SPLSCSI();
+	s = splcam();
 	(void) scsi_low_timeout_check(slp);
 	(*slp->sl_osdep_fp->scsi_low_osdep_timeout)
 		(slp, SCSI_LOW_TIMEOUT_CH_IO, SCSI_LOW_TIMEOUT_START);
@@ -1843,7 +1307,8 @@
 			cb->ccb_flags |= CCB_NORETRY;
 			cb->ccb_error |= SELTIMEOUTIO;
 			if (scsi_low_revoke_ccb(slp, cb, 1) != NULL)
-				panic("%s: ccb not finished", slp->sl_xname);
+				panic("%s: ccb not finished",
+				    device_get_nameunit(slp->sl_dev));
 		}
 
 		if (slp->sl_Tnexus == NULL)
@@ -1926,7 +1391,7 @@
 
 bus_reset:
 	cb->ccb_error |= TIMEOUTIO;
-	printf("%s: slccb (0x%lx) timeout!\n", slp->sl_xname, (u_long) cb);
+	device_printf(slp->sl_dev, "slccb (0x%lx) timeout!\n", (u_long) cb);
 	scsi_low_info(slp, NULL, "scsi bus hangup. try to recover.");
 	scsi_low_init(slp, SCSI_LOW_RESTART_HARD);
 	scsi_low_start(slp);
@@ -1967,7 +1432,8 @@
 	else if ((cb->ccb_flags & CCB_DISCQ) != 0)
 	{
 		if (scsi_low_revoke_ccb(slp, cb, 0) == NULL)
-			panic("%s: revoked ccb done", slp->sl_xname);
+			panic("%s: revoked ccb done",
+			    device_get_nameunit(slp->sl_dev));
 
 		cb->ccb_flags |= CCB_STARTQ;
 		TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain);
@@ -1978,7 +1444,8 @@
 	else
 	{
 		if (scsi_low_revoke_ccb(slp, cb, 1) != NULL)
-			panic("%s: revoked ccb retried", slp->sl_xname);
+			panic("%s: revoked ccb retried",
+			    device_get_nameunit(slp->sl_dev));
 	}
 	return 0;
 }
@@ -1995,12 +1462,7 @@
 	struct lun_info *li;
 	int s, i, nccb, rv;
 
-#ifdef	SCSI_LOW_INTERFACE_XS
-	slp->sl_osdep_fp = &scsi_low_osdep_funcs_xs;
-#endif	/* SCSI_LOW_INTERFACE_XS */
-#ifdef	SCSI_LOW_INTERFACE_CAM
 	slp->sl_osdep_fp = &scsi_low_osdep_funcs_cam;
-#endif	/* SCSI_LOW_INTERFACE_CAM */
 
 	if (slp->sl_osdep_fp == NULL)
 		panic("scsi_low: interface not spcified");
@@ -2042,23 +1504,23 @@
 	TAILQ_INIT(&slp->sl_start);
 
 	/* call os depend attach */
-	s = SCSI_LOW_SPLSCSI();
+	s = splcam();
 	rv = (*slp->sl_osdep_fp->scsi_low_osdep_attach) (slp);
 	if (rv != 0)
 	{
 		splx(s);
-		printf("%s: scsi_low_attach: osdep attach failed\n",
-			slp->sl_xname);
+		device_printf(slp->sl_dev,
+		    "scsi_low_attach: osdep attach failed\n");
 		return EINVAL;
 	}
 
 	/* check hardware */
-	SCSI_LOW_DELAY(1000);	/* wait for 1ms */
+	DELAY(1000);	/* wait for 1ms */
 	if (scsi_low_init(slp, SCSI_LOW_RESTART_HARD) != 0)
 	{
 		splx(s);
-		printf("%s: scsi_low_attach: initialization failed\n",
-			slp->sl_xname);
+		device_printf(slp->sl_dev,
+		    "scsi_low_attach: initialization failed\n");
 		return EINVAL;
 	}
 
@@ -2088,7 +1550,7 @@
 {
 	int s, rv;
 
-	s = SCSI_LOW_SPLSCSI();
+	s = splcam();
 	if (scsi_low_is_busy(slp) != 0)
 	{
 		splx(s);
@@ -2208,7 +1670,7 @@
 {
 
 	cb->ccb_scp.scp_cmdlen = 6;
-	SCSI_LOW_BZERO(cb->ccb_scsi_cmd, cb->ccb_scp.scp_cmdlen);
+	bzero(cb->ccb_scsi_cmd, cb->ccb_scp.scp_cmdlen);
 	cb->ccb_scsi_cmd[0] = REQUEST_SENSE;
 	cb->ccb_scsi_cmd[4] = sizeof(cb->ccb_sense);
 	cb->ccb_scp.scp_cmd = cb->ccb_scsi_cmd;
@@ -2223,7 +1685,7 @@
 	}
 	else
 	{
-		SCSI_LOW_BZERO(&cb->ccb_sense, sizeof(cb->ccb_sense));
+		bzero(&cb->ccb_sense, sizeof(cb->ccb_sense));
 #ifdef	SCSI_LOW_NEGOTIATE_BEFORE_SENSE
 		scsi_low_assert_msg(slp, ti, ti->ti_setup_msg_done, 0);
 #endif	/* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */
@@ -2273,7 +1735,7 @@
 		return SCSI_LOW_START_QTAG;
 
 	default:
-		panic("%s: no setup phase", slp->sl_xname);
+		panic("%s: no setup phase", device_get_nameunit(slp->sl_dev));
 	}
 
 	return SCSI_LOW_START_NO_QTAG;
@@ -2329,7 +1791,7 @@
 	if (slp->sl_Tnexus || slp->sl_Lnexus || slp->sl_Qnexus)
 	{
 		scsi_low_info(slp, NULL, "NEXUS INCOSISTENT");
-		panic("%s: inconsistent", slp->sl_xname);
+		panic("%s: inconsistent", device_get_nameunit(slp->sl_dev));
 	}
 #endif	/* SCSI_LOW_DIAGNOSTIC */
 
@@ -2466,7 +1928,7 @@
 	if (slp->sl_disc == 0)
 	{
 #ifdef	SCSI_LOW_DIAGNOSTIC
-		printf("%s: try selection again\n", slp->sl_xname);
+		device_printf(slp->sl_dev, "try selection again\n");
 #endif	/* SCSI_LOW_DIAGNOSTIC */
 		slp->sl_retry_sel = 1;
 	}
@@ -2883,7 +2345,7 @@
 	cnputc('\b');
 	cnputc(tw_chars[tw_pos++]);
 	tw_pos %= (sizeof(tw_chars) - 1);
-	SCSI_LOW_DELAY(TWIDDLEWAIT);
+	DELAY(TWIDDLEWAIT);
 }
 
 void
@@ -2894,7 +2356,7 @@
 
 	(*slp->sl_funcs->scsi_low_bus_reset) (slp);
 
-	printf("%s: try to reset scsi bus  ", slp->sl_xname);
+	device_printf(slp->sl_dev, "try to reset scsi bus  ");
 	for (i = 0; i <= SCSI2_RESET_DELAY / TWIDDLEWAIT ; i++)
 		scsi_low_twiddle_wait();
 	cnputc('\b');
@@ -2910,7 +2372,7 @@
 	int error;
 
 	if (s != NULL)
-		printf("%s: scsi bus restart. reason: %s\n", slp->sl_xname, s);
+		device_printf(slp->sl_dev, "scsi bus restart. reason: %s\n", s);
 
 	if ((error = scsi_low_init(slp, flags)) != 0)
 		return error;
@@ -2949,8 +2411,8 @@
 #ifdef	SCSI_LOW_DEBUG
 	if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_NEXUS_CHECK, ti->ti_id) != 0)
 	{
-		printf("%s: nexus(0x%lx) abort check start\n",
-			slp->sl_xname, (u_long) cb);
+		device_printf(slp->sl_dev, "nexus(0x%lx) abort check start\n",
+		    (u_long) cb);
 		cb->ccb_flags |= (CCB_NORETRY | CCB_SILENT);
 		scsi_low_revoke_ccb(slp, cb, 1);
 		return NULL;
@@ -3052,7 +2514,7 @@
 	return ti;
 
 world_restart:
-	printf("%s: reselect(%x:unknown) %s\n", slp->sl_xname, targ, s);
+	device_printf(slp->sl_dev, "reselect(%x:unknown) %s\n", targ, s);
 	scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, 
 		         "reselect: scsi world confused");
 	return NULL;
@@ -3432,7 +2894,7 @@
 			slp->sl_Lnexus->li_cfgflags &= ~SCSI_LOW_QTAG;
 			scsi_low_calcf_lun(slp->sl_Lnexus);
 		}
-		printf("%s: scsi_low: qtag msg rejected\n", slp->sl_xname);
+		device_printf(slp->sl_dev, "scsi_low: qtag msg rejected\n");
 	}
 	return 0;
 }
@@ -3458,7 +2920,7 @@
 	slp->sl_ph_count ++;
 	if (slp->sl_ph_count > SCSI_LOW_MAX_PHCHANGES)
 	{
-		printf("%s: too many phase changes\n", slp->sl_xname);
+		device_printf(slp->sl_dev, "too many phase changes\n");
 		slp->sl_error |= FATALIO;
 		scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0);
 	}
@@ -3483,7 +2945,7 @@
 		ti->ti_msgflags |= ti->ti_omsgflags;
 		ti->ti_omsgflags = 0;
 #ifdef	SCSI_LOW_DIAGNOSTIC
-		printf("%s: scsi_low_msgout: retry msgout\n", slp->sl_xname);
+		device_printf(slp->sl_dev, "scsi_low_msgout: retry msgout\n");
 #endif	/* SCSI_LOW_DIAGNOSTIC */
 	}
 
@@ -3558,7 +3020,7 @@
 	struct targ_info *ti = slp->sl_Tnexus;
 	u_int8_t msg = ti->ti_msgin[0];
 
-	printf("%s: MSGIN: msg 0x%x rejected\n", slp->sl_xname, (u_int) msg);
+	device_printf(slp->sl_dev, "MSGIN: msg 0x%x rejected\n", (u_int) msg);
 	scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0);
 	return 0;
 }
@@ -3666,7 +3128,8 @@
 	cb->ccb_tag = SCSI_LOW_UNKTAG;
 	cb->ccb_otag = SCSI_LOW_UNKTAG;
 	if (scsi_low_done(slp, cb) == SCSI_LOW_DONE_RETRY)
-		panic("%s: linked ccb retried", slp->sl_xname);
+		panic("%s: linked ccb retried",
+		    device_get_nameunit(slp->sl_dev));
 
 	slp->sl_Qnexus = ncb;
 	slp->sl_ph_count = 0;
@@ -3758,8 +3221,8 @@
 		 */
 		ti->ti_maxsynch.period = 0;
 		ti->ti_maxsynch.offset = 0;
-		printf("%s: target brain damaged. async transfer\n",
-			slp->sl_xname);
+		device_printf(slp->sl_dev,
+		    "target brain damaged. async transfer\n");
 		return EINVAL;
 	}
 
@@ -3774,8 +3237,8 @@
 		 * for our adapter.
 		 * The adapter changes max synch and max offset.
 		 */
-		printf("%s: synch neg failed. retry synch msg neg ...\n",
-			slp->sl_xname);
+		device_printf(slp->sl_dev,
+		    "synch neg failed. retry synch msg neg ...\n");
 		return error;
 	}
 
@@ -3795,8 +3258,9 @@
 			return 0;
 #endif	/* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */
 
-		printf("%s(%d:*): <%s> offset %d period %dns ",
-			slp->sl_xname, ti->ti_id, s, offset, period * 4);
+		device_printf(slp->sl_dev,
+		    "(%d:*): <%s> offset %d period %dns ",
+		    ti->ti_id, s, offset, period * 4);
 
 		if (period != 0)
 		{
@@ -3823,8 +3287,8 @@
 		 * Current width is not acceptable for our adapter.
 		 * The adapter changes max width.
 		 */
-		printf("%s: wide neg failed. retry wide msg neg ...\n",
-			slp->sl_xname);
+		device_printf(slp->sl_dev,
+		    "wide neg failed. retry wide msg neg ...\n");
 		return error;
 	}
 
@@ -3845,8 +3309,8 @@
 			return 0;
 #endif	/* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */
 
-		printf("%s(%d:*): transfer width %d bits\n",
-			slp->sl_xname, ti->ti_id, 1 << (3 + ti->ti_width));
+		device_printf(slp->sl_dev, "(%d:*): transfer width %d bits\n",
+		    ti->ti_id, 1 << (3 + ti->ti_width));
 	}
 	return 0;
 }
@@ -3990,8 +3454,8 @@
 
 	if (ti->ti_emsgflags != 0)
 	{
-		printf("%s: msg flags [0x%x] rejected\n",
-		       slp->sl_xname, ti->ti_emsgflags);
+		device_printf(slp->sl_dev, "msg flags [0x%x] rejected\n",
+		    ti->ti_emsgflags);
 		msgflags = SCSI_LOW_MSG_REJECT;
 		mdp = &scsi_low_msgout_data[0];
 		for ( ; mdp->md_flags != SCSI_LOW_MSG_ALL; mdp ++)
@@ -4043,7 +3507,7 @@
 		slp->sl_ph_count ++;
 		if (slp->sl_ph_count > SCSI_LOW_MAX_PHCHANGES)
 		{
-			printf("%s: too many phase changes\n", slp->sl_xname);
+			device_printf(slp->sl_dev, "too many phase changes\n");
 			slp->sl_error |= FATALIO;
 			scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0);
 		}
@@ -4336,7 +3800,8 @@
 	if ((cb->ccb_flags & (CCB_STARTQ | CCB_DISCQ)) == 
 	    (CCB_STARTQ | CCB_DISCQ))
 	{
-		panic("%s: ccb in both queue", slp->sl_xname);
+		panic("%s: ccb in both queue",
+		    device_get_nameunit(slp->sl_dev));
 	}
 #endif	/* SCSI_LOW_DIAGNOSTIC */
 
@@ -4363,7 +3828,8 @@
 		cb->ccb_error |= FATALIO;
 		cb->ccb_flags &= ~CCB_AUTOSENSE;
 		if (scsi_low_done(slp, cb) != SCSI_LOW_DONE_COMPLETE)
-			panic("%s: done ccb retried", slp->sl_xname);
+			panic("%s: done ccb retried",
+			    device_get_nameunit(slp->sl_dev));
 		return NULL;
 	}
 	else
@@ -4530,8 +3996,9 @@
 #ifdef	SCSI_LOW_DEBUG
 	if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_CALCF, ti->ti_id) != 0)
 	{
-		printf("%s(%d:*): max period(%dns) offset(%d) width(%d)\n",
-			slp->sl_xname, ti->ti_id,
+		device_printf(slp->sl_dev,
+			"(%d:*): max period(%dns) offset(%d) width(%d)\n",
+			ti->ti_id,
 			ti->ti_maxsynch.period * 4,
 			ti->ti_maxsynch.offset,
 			ti->ti_width);
@@ -4546,8 +4013,9 @@
 	struct targ_info *ti = li->li_ti;
 	struct scsi_low_softc *slp = ti->ti_sc;
 
-	printf("%s(%d:%d): period(%d ns) offset(%d) width(%d) flags 0x%b\n",
-		slp->sl_xname, ti->ti_id, li->li_lun,
+	device_printf(slp->sl_dev,
+		"(%d:%d): period(%d ns) offset(%d) width(%d) flags 0x%b\n",
+		ti->ti_id, li->li_lun,
 		ti->ti_maxsynch.period * 4,
 		ti->ti_maxsynch.offset,
 		ti->ti_width,
@@ -4569,7 +4037,7 @@
 	struct slccb *cb;
 	int target, lun;
 
-	printf("%s: scsi_low: probing all devices ....\n", slp->sl_xname);
+	device_printf(slp->sl_dev, "scsi_low: probing all devices ....\n");
 
 	for (target = 0; target < slp->sl_ntargs; target ++)
 	{
@@ -4577,8 +4045,9 @@
 		{
 			if ((slp->sl_show_result & SHOW_PROBE_RES) != 0)
 			{
-				printf("%s: scsi_low: target %d (host card)\n",
-					slp->sl_xname, target);
+				device_printf(slp->sl_dev,
+				    "scsi_low: target %d (host card)\n",
+				    target);
 			}
 			continue;
 		}
@@ -4585,8 +4054,8 @@
 
 		if ((slp->sl_show_result & SHOW_PROBE_RES) != 0)
 		{
-			printf("%s: scsi_low: target %d lun ",
-				slp->sl_xname, target);
+			device_printf(slp->sl_dev, "scsi_low: target %d lun ",
+			    target);
 		}
 
 		ti = slp->sl_ti[target];
@@ -4632,7 +4101,7 @@
 	tcount = 0;
 	while (slp->sl_nio > 0)
 	{
-		SCSI_LOW_DELAY((1000 * 1000) / SCSI_LOW_POLL_HZ);
+		DELAY((1000 * 1000) / SCSI_LOW_POLL_HZ);
 
 		(*slp->sl_funcs->scsi_low_poll) (slp);
 		if (tcount ++ < SCSI_LOW_POLL_HZ / SCSI_LOW_TIMEOUT_HZ)
@@ -4663,8 +4132,8 @@
 		acb = TAILQ_FIRST(&li->li_discq); 
 		if (scsi_low_abort_ccb(slp, acb) == 0)
 		{
-			printf("%s: aborting ccb(0x%lx) start\n",
-				slp->sl_xname, (u_long) acb);
+			device_printf(slp->sl_dev,
+			    "aborting ccb(0x%lx) start\n", (u_long) acb);
 		}
 	}
 }
@@ -4679,7 +4148,7 @@
 	if (slp->sl_ph_count < SCSI_LOW_MAX_ATTEN_CHECK)
 		scsi_low_assert_msg(slp, ti, msg, 0);
 	else
-		printf("%s: atten check OK\n", slp->sl_xname);
+		device_printf(slp->sl_dev, "atten check OK\n");
 }
 
 static void
@@ -4714,8 +4183,7 @@
 	printf(">>>>> SCSI_LOW_INFO(0x%lx): %s\n", (u_long) slp->sl_Tnexus, s);
 	if (ti == NULL)
 	{
-		for (ti = TAILQ_FIRST(&slp->sl_titab); ti != NULL;
-		     ti = TAILQ_NEXT(ti, ti_chain))
+		TAILQ_FOREACH(ti, &slp->sl_titab, ti_chain)
 		{
 			scsi_low_print(slp, ti);
 		}
@@ -4754,15 +4222,15 @@
 	}
  	sp = &slp->sl_scp;
 
-	printf("%s: === NEXUS T(0x%lx) L(0x%lx) Q(0x%lx) NIO(%d) ===\n",
-		slp->sl_xname, (u_long) ti, (u_long) li, (u_long) cb,
-		slp->sl_nio);
+	device_printf(slp->sl_dev,
+	    "=== NEXUS T(0x%lx) L(0x%lx) Q(0x%lx) NIO(%d) ===\n",
+	    (u_long) ti, (u_long) li, (u_long) cb, slp->sl_nio);
 
 	/* target stat */
 	if (ti != NULL)
 	{
 		u_int flags = 0, maxnqio = 0, nqio = 0;
-		int lun = -1;
+		int lun = CAM_LUN_WILDCARD;
 
 		if (li != NULL)
 		{
@@ -4772,8 +4240,8 @@
 			nqio = li->li_nqio;
 		}
 
-		printf("%s(%d:%d) ph<%s> => ph<%s> DISC(%d) QIO(%d:%d)\n",
-			slp->sl_xname,
+		device_printf(slp->sl_dev,
+		       "(%d:%d) ph<%s> => ph<%s> DISC(%d) QIO(%d:%d)\n",
 		       ti->ti_id, lun, phase[(int) ti->ti_ophase], 
 		       phase[(int) ti->ti_phase], ti->ti_disc,
 		       nqio, maxnqio);

Modified: trunk/sys/cam/scsi/scsi_low.h
===================================================================
--- trunk/sys/cam/scsi/scsi_low.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_low.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,4 +1,5 @@
-/*	$MidnightBSD$	*/
+/* $MidnightBSD$ */
+/*	$FreeBSD: stable/10/sys/cam/scsi/scsi_low.h 311402 2017-01-05 11:20:31Z mav $	*/
 /*	$NecBSD: scsi_low.h,v 1.24.10.5 2001/06/26 07:31:46 honda Exp $	*/
 /*	$NetBSD$	*/
 
@@ -48,24 +49,9 @@
  * Scsi low OSDEP 
  * (All os depend structures should be here!)
  ================================================*/
-/******** interface ******************************/
-#ifdef	__NetBSD__
-#define	SCSI_LOW_INTERFACE_XS
-#endif	/* __NetBSD__ */
-
-#ifdef	__FreeBSD__
-#define	SCSI_LOW_INTERFACE_CAM
-#define	CAM
-#endif	/* __FreeBSD__ */
-
 /******** includes *******************************/
-#ifdef	__NetBSD__
-#include <i386/Cbus/dev/scsi_dvcfg.h>
-#include <dev/isa/ccbque.h>
-#endif	/* __NetBSD__ */
 
-#ifdef	__FreeBSD__
-#include <sys/device_port.h>
+#include <sys/bus.h>
 #include <sys/kdb.h>
 #include <cam/cam.h>
 #include <cam/cam_ccb.h>
@@ -75,47 +61,16 @@
 
 #include <cam/scsi/scsi_dvcfg.h>
 #include <i386/isa/ccbque.h>
-#endif	/* __FreeBSD__ */
 
 /******** functions macro ************************/
-#ifdef	__NetBSD__
-#define	SCSI_LOW_DEBUGGER(dev)	Debugger()
-#define	SCSI_LOW_DELAY(mu)	delay((mu))
-#define	SCSI_LOW_SPLSCSI	splbio
-#define	SCSI_LOW_BZERO(pt, size)	memset((pt), 0, (size))
-#endif	/* __NetBSD__ */
 
-#ifdef	__FreeBSD__
 #undef	MSG_IDENTIFY
-#define	SCSI_LOW_DEBUGGER(dev)	kdb_enter(KDB_WHY_CAM, dev)
-#define	SCSI_LOW_DELAY(mu)	DELAY((mu))
-#define	SCSI_LOW_SPLSCSI	splcam
-#define	SCSI_LOW_BZERO(pt, size)	bzero((pt), (size))
-#endif	/* __FreeBSD__ */
 
 /******** os depend interface structures **********/
-#ifdef	__NetBSD__
-typedef	struct scsipi_sense_data scsi_low_osdep_sense_data_t;
-
-struct scsi_low_osdep_interface {
-	struct device si_dev;
-
-	struct scsipi_link *si_splp;
-};
-
-struct scsi_low_osdep_targ_interface {
-};
-
-struct scsi_low_osdep_lun_interface {
-	u_int sloi_quirks;
-};
-#endif	/* __NetBSD__ */
-
-#ifdef	__FreeBSD__
 typedef	struct scsi_sense_data scsi_low_osdep_sense_data_t;
 
 struct scsi_low_osdep_interface {
-	DEVPORT_DEVICE si_dev;
+	device_t si_dev;
 
 	struct cam_sim *sim;
 	struct cam_path *path;
@@ -129,13 +84,6 @@
 #endif
 };
 
-struct scsi_low_osdep_targ_interface {
-};
-
-struct scsi_low_osdep_lun_interface {
-};
-#endif	/* __FreeBSD__ */
-
 /******** os depend interface functions *************/
 struct slccb;
 struct scsi_low_softc;
@@ -246,7 +194,7 @@
 	 *****************************************/
 	struct sc_p ccb_scp;		/* given */
 	struct sc_p ccb_sscp;		/* saved scsi data pointer */
-	int ccb_datalen;		/* transfered data counter */
+	int ccb_datalen;		/* transferred data counter */
 
 	/*****************************************
 	 * Msgout 
@@ -265,7 +213,7 @@
 #define	CCB_STARTQ	0x0010
 #define	CCB_POLLED	0x0100	/* polling ccb */
 #define	CCB_NORETRY	0x0200	/* do NOT retry */
-#define	CCB_AUTOSENSE	0x0400	/* do a sence after CA */
+#define	CCB_AUTOSENSE	0x0400	/* do a sense after CA */
 #define	CCB_URGENT	0x0800	/* an urgent ccb */
 #define	CCB_NOSDONE	0x1000	/* do not call an os done routine */
 #define	CCB_SCSIIO	0x2000	/* a normal scsi io coming from upper layer */
@@ -299,8 +247,6 @@
 LIST_HEAD(lun_info_tab, lun_info);
 
 struct lun_info {
-	struct scsi_low_osdep_lun_interface li_sloi;
-
 	int li_lun;
 	struct targ_info *li_ti;		/* my target */
 
@@ -387,8 +333,6 @@
 };
 
 struct targ_info {
-	struct scsi_low_osdep_targ_interface ti_slti;
-
 	TAILQ_ENTRY(targ_info) ti_chain;	/* targ_info link */
 
 	struct scsi_low_softc *ti_sc;		/* our softc */
@@ -547,7 +491,6 @@
 	struct scsi_low_osdep_interface sl_si;
 #define	sl_dev	sl_si.si_dev
 	struct scsi_low_osdep_funcs *sl_osdep_fp;
-	u_char sl_xname[16];
 				
 	/* our chain */
 	LIST_ENTRY(scsi_low_softc) sl_chain;
@@ -652,10 +595,6 @@
 
 	/* targinfo size */
 	int sl_targsize;
-
-#if	defined(i386) || defined(__i386__)
-	u_int sl_irq;		/* XXX */
-#endif	/* i386 */
 };
 
 /*************************************************

Modified: trunk/sys/cam/scsi/scsi_message.h
===================================================================
--- trunk/sys/cam/scsi/scsi_message.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_message.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /*-
  * This file is in the public domain.
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_message.h 290775 2015-11-13 19:23:22Z mav $
  */
 
 /* Messages (1 byte) */		     /* I/T (M)andatory or (O)ptional */
@@ -68,3 +69,9 @@
 #define	MSG_EXT_PPR_QAS_REQ	0x04
 #define	MSG_EXT_PPR_DT_REQ	0x02
 #define	MSG_EXT_PPR_IU_REQ	0x01
+
+/* Fake messages not defined for SPI, but needed for other transports */
+#define	MSG_QUERY_TASK		0x100
+#define	MSG_QUERY_TASK_SET	0x101
+#define	MSG_QUERY_ASYNC_EVENT	0x102
+

Modified: trunk/sys/cam/scsi/scsi_pass.c
===================================================================
--- trunk/sys/cam/scsi/scsi_pass.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_pass.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
@@ -26,28 +27,42 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_pass.c 331632 2018-03-27 17:42:04Z brooks $");
 
+#include "opt_compat.h"
+
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
+#include <sys/conf.h>
 #include <sys/types.h>
 #include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/devicestat.h>
+#include <sys/errno.h>
+#include <sys/fcntl.h>
 #include <sys/malloc.h>
-#include <sys/fcntl.h>
-#include <sys/conf.h>
-#include <sys/errno.h>
-#include <sys/devicestat.h>
 #include <sys/proc.h>
+#include <sys/poll.h>
+#include <sys/selinfo.h>
+#include <sys/sdt.h>
+#include <sys/sysent.h>
 #include <sys/taskqueue.h>
+#include <vm/uma.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
 
+#include <machine/bus.h>
+
 #include <cam/cam.h>
 #include <cam/cam_ccb.h>
 #include <cam/cam_periph.h>
 #include <cam/cam_queue.h>
+#include <cam/cam_xpt.h>
 #include <cam/cam_xpt_periph.h>
 #include <cam/cam_debug.h>
-#include <cam/cam_sim.h>
+#include <cam/cam_compat.h>
+#include <cam/cam_xpt_periph.h>
 
 #include <cam/scsi/scsi_all.h>
 #include <cam/scsi/scsi_pass.h>
@@ -56,7 +71,11 @@
 	PASS_FLAG_OPEN			= 0x01,
 	PASS_FLAG_LOCKED		= 0x02,
 	PASS_FLAG_INVALID		= 0x04,
-	PASS_FLAG_INITIAL_PHYSPATH	= 0x08
+	PASS_FLAG_INITIAL_PHYSPATH	= 0x08,
+	PASS_FLAG_ZONE_INPROG		= 0x10,
+	PASS_FLAG_ZONE_VALID		= 0x20,
+	PASS_FLAG_UNMAPPED_CAPABLE	= 0x40,
+	PASS_FLAG_ABANDONED_REF_SET	= 0x80
 } pass_flags;
 
 typedef enum {
@@ -65,28 +84,81 @@
 
 typedef enum {
 	PASS_CCB_BUFFER_IO,
-	PASS_CCB_WAITING
+	PASS_CCB_QUEUED_IO
 } pass_ccb_types;
 
 #define ccb_type	ppriv_field0
-#define ccb_bp		ppriv_ptr1
+#define ccb_ioreq	ppriv_ptr1
 
+/*
+ * The maximum number of memory segments we preallocate.
+ */
+#define	PASS_MAX_SEGS	16
+
+typedef enum {
+	PASS_IO_NONE		= 0x00,
+	PASS_IO_USER_SEG_MALLOC	= 0x01,
+	PASS_IO_KERN_SEG_MALLOC	= 0x02,
+	PASS_IO_ABANDONED	= 0x04
+} pass_io_flags; 
+
+struct pass_io_req {
+	union ccb			 ccb;
+	union ccb			*alloced_ccb;
+	union ccb			*user_ccb_ptr;
+	camq_entry			 user_periph_links;
+	ccb_ppriv_area			 user_periph_priv;
+	struct cam_periph_map_info	 mapinfo;
+	pass_io_flags			 flags;
+	ccb_flags			 data_flags;
+	int				 num_user_segs;
+	bus_dma_segment_t		 user_segs[PASS_MAX_SEGS];
+	int				 num_kern_segs;
+	bus_dma_segment_t		 kern_segs[PASS_MAX_SEGS];
+	bus_dma_segment_t		*user_segptr;
+	bus_dma_segment_t		*kern_segptr;
+	int				 num_bufs;
+	uint32_t			 dirs[CAM_PERIPH_MAXMAPS];
+	uint32_t			 lengths[CAM_PERIPH_MAXMAPS];
+	uint8_t				*user_bufs[CAM_PERIPH_MAXMAPS];
+	uint8_t				*kern_bufs[CAM_PERIPH_MAXMAPS];
+	struct bintime			 start_time;
+	TAILQ_ENTRY(pass_io_req)	 links;
+};
+
 struct pass_softc {
-	pass_state	 state;
-	pass_flags	 flags;
-	u_int8_t	 pd_type;
-	union ccb	 saved_ccb;
-	int		 open_count;
-	struct devstat	*device_stats;
-	struct cdev	*dev;
-	struct cdev	*alias_dev;
-	struct task	 add_physpath_task;
+	pass_state		  state;
+	pass_flags		  flags;
+	u_int8_t		  pd_type;
+	union ccb		  saved_ccb;
+	int			  open_count;
+	u_int		 	  maxio;
+	struct devstat		 *device_stats;
+	struct cdev		 *dev;
+	struct cdev		 *alias_dev;
+	struct task		  add_physpath_task;
+	struct task		  shutdown_kqueue_task;
+	struct selinfo		  read_select;
+	TAILQ_HEAD(, pass_io_req) incoming_queue;
+	TAILQ_HEAD(, pass_io_req) active_queue;
+	TAILQ_HEAD(, pass_io_req) abandoned_queue;
+	TAILQ_HEAD(, pass_io_req) done_queue;
+	struct cam_periph	 *periph;
+	char			  zone_name[12];
+	char			  io_zone_name[12];
+	uma_zone_t		  pass_zone;
+	uma_zone_t		  pass_io_zone;
+	size_t			  io_zone_size;
 };
 
-
 static	d_open_t	passopen;
 static	d_close_t	passclose;
 static	d_ioctl_t	passioctl;
+static	d_ioctl_t	passdoioctl;
+static	d_poll_t	passpoll;
+static	d_kqfilter_t	passkqfilter;
+static	void		passreadfiltdetach(struct knote *kn);
+static	int		passreadfilt(struct knote *kn, long hint);
 
 static	periph_init_t	passinit;
 static	periph_ctor_t	passregister;
@@ -93,11 +165,22 @@
 static	periph_oninv_t	passoninvalidate;
 static	periph_dtor_t	passcleanup;
 static	periph_start_t	passstart;
-static void		pass_add_physpath(void *context, int pending);
+static	void		pass_shutdown_kqueue(void *context, int pending);
+static	void		pass_add_physpath(void *context, int pending);
 static	void		passasync(void *callback_arg, u_int32_t code,
 				  struct cam_path *path, void *arg);
 static	void		passdone(struct cam_periph *periph, 
 				 union ccb *done_ccb);
+static	int		passcreatezone(struct cam_periph *periph);
+static	void		passiocleanup(struct pass_softc *softc, 
+				      struct pass_io_req *io_req);
+static	int		passcopysglist(struct cam_periph *periph,
+				       struct pass_io_req *io_req,
+				       ccb_flags direction);
+static	int		passmemsetup(struct cam_periph *periph,
+				     struct pass_io_req *io_req);
+static	int		passmemdone(struct cam_periph *periph,
+				    struct pass_io_req *io_req);
 static	int		passerror(union ccb *ccb, u_int32_t cam_flags, 
 				  u_int32_t sense_flags);
 static 	int		passsendccb(struct cam_periph *periph, union ccb *ccb,
@@ -117,9 +200,19 @@
 	.d_open =	passopen,
 	.d_close =	passclose,
 	.d_ioctl =	passioctl,
+	.d_poll = 	passpoll,
+	.d_kqfilter = 	passkqfilter,
 	.d_name =	"pass",
 };
 
+static struct filterops passread_filtops = {
+	.f_isfd	=	1,
+	.f_detach =	passreadfiltdetach,
+	.f_event =	passreadfilt
+};
+
+static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers");
+
 static void
 passinit(void)
 {
@@ -139,22 +232,75 @@
 }
 
 static void
+passrejectios(struct cam_periph *periph)
+{
+	struct pass_io_req *io_req, *io_req2;
+	struct pass_softc *softc;
+
+	softc = (struct pass_softc *)periph->softc;
+
+	/*
+	 * The user can no longer get status for I/O on the done queue, so
+	 * clean up all outstanding I/O on the done queue.
+	 */
+	TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
+		TAILQ_REMOVE(&softc->done_queue, io_req, links);
+		passiocleanup(softc, io_req);
+		uma_zfree(softc->pass_zone, io_req);
+	}
+
+	/*
+	 * The underlying device is gone, so we can't issue these I/Os.
+	 * The devfs node has been shut down, so we can't return status to
+	 * the user.  Free any I/O left on the incoming queue.
+	 */
+	TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) {
+		TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
+		passiocleanup(softc, io_req);
+		uma_zfree(softc->pass_zone, io_req);
+	}
+
+	/*
+	 * Normally we would put I/Os on the abandoned queue and acquire a
+	 * reference when we saw the final close.  But, the device went
+	 * away and devfs may have moved everything off to deadfs by the
+	 * time the I/O done callback is called; as a result, we won't see
+	 * any more closes.  So, if we have any active I/Os, we need to put
+	 * them on the abandoned queue.  When the abandoned queue is empty,
+	 * we'll release the remaining reference (see below) to the peripheral.
+	 */
+	TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) {
+		TAILQ_REMOVE(&softc->active_queue, io_req, links);
+		io_req->flags |= PASS_IO_ABANDONED;
+		TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links);
+	}
+
+	/*
+	 * If we put any I/O on the abandoned queue, acquire a reference.
+	 */
+	if ((!TAILQ_EMPTY(&softc->abandoned_queue))
+	 && ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) {
+		cam_periph_doacquire(periph);
+		softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
+	}
+}
+
+static void
 passdevgonecb(void *arg)
 {
-	struct cam_sim    *sim;
 	struct cam_periph *periph;
+	struct mtx *mtx;
 	struct pass_softc *softc;
 	int i;
 
 	periph = (struct cam_periph *)arg;
-	sim = periph->sim;
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
+
 	softc = (struct pass_softc *)periph->softc;
-
 	KASSERT(softc->open_count >= 0, ("Negative open count %d",
 		softc->open_count));
 
-	mtx_lock(sim->mtx);
-
 	/*
 	 * When we get this callback, we will get no more close calls from
 	 * devfs.  So if we have any dangling opens, we need to release the
@@ -167,8 +313,10 @@
 
 	/*
 	 * Release the reference held for the device node, it is gone now.
+	 * Accordingly, inform all queued I/Os of their fate.
 	 */
 	cam_periph_release_locked(periph);
+	passrejectios(periph);
 
 	/*
 	 * We reference the SIM lock directly here, instead of using
@@ -177,7 +325,14 @@
 	 * getting freed.  If that is the case, dereferencing the periph
 	 * with a cam_periph_unlock() call would cause a page fault.
 	 */
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
+
+	/*
+	 * We have to remove our kqueue context from a thread because it
+	 * may sleep.  It would be nice if we could get a callback from
+	 * kqueue when it is done cleaning up resources.
+	 */
+	taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task);
 }
 
 static void
@@ -199,17 +354,6 @@
 	 * when it has cleaned up its state.
 	 */
 	destroy_dev_sched_cb(softc->dev, passdevgonecb, periph);
-
-	/*
-	 * XXX Return all queued I/O with ENXIO.
-	 * XXX Handle any transactions queued to the card
-	 *     with XPT_ABORT_CCB.
-	 */
-
-	if (bootverbose) {
-		xpt_print(periph->path, "lost device\n");
-	}
-
 }
 
 static void
@@ -219,11 +363,40 @@
 
 	softc = (struct pass_softc *)periph->softc;
 
-	if (bootverbose)
-		xpt_print(periph->path, "removing device entry\n");
+	cam_periph_assert(periph, MA_OWNED);
+	KASSERT(TAILQ_EMPTY(&softc->active_queue),
+		("%s called when there are commands on the active queue!\n",
+		__func__));
+	KASSERT(TAILQ_EMPTY(&softc->abandoned_queue),
+		("%s called when there are commands on the abandoned queue!\n",
+		__func__));
+	KASSERT(TAILQ_EMPTY(&softc->incoming_queue),
+		("%s called when there are commands on the incoming queue!\n",
+		__func__));
+	KASSERT(TAILQ_EMPTY(&softc->done_queue),
+		("%s called when there are commands on the done queue!\n",
+		__func__));
+
 	devstat_remove_entry(softc->device_stats);
 
 	cam_periph_unlock(periph);
+
+	/*
+	 * We call taskqueue_drain() for the physpath task to make sure it
+	 * is complete.  We drop the lock because this can potentially
+	 * sleep.  XXX KDM that is bad.  Need a way to get a callback when
+	 * a taskqueue is drained.
+	 *
+ 	 * Note that we don't drain the kqueue shutdown task queue.  This
+	 * is because we hold a reference on the periph for kqueue, and
+	 * release that reference from the kqueue shutdown task queue.  So
+	 * we cannot come into this routine unless we've released that
+	 * reference.  Also, because that could be the last reference, we
+	 * could be called from the cam_periph_release() call in
+	 * pass_shutdown_kqueue().  In that case, the taskqueue_drain()
+	 * would deadlock.  It would be preferable if we had a way to
+	 * get a callback when a taskqueue is done.
+	 */
 	taskqueue_drain(taskqueue_thread, &softc->add_physpath_task);
 
 	cam_periph_lock(periph);
@@ -232,10 +405,29 @@
 }
 
 static void
+pass_shutdown_kqueue(void *context, int pending)
+{
+	struct cam_periph *periph;
+	struct pass_softc *softc;
+
+	periph = context;
+	softc = periph->softc;
+
+	knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0);
+	knlist_destroy(&softc->read_select.si_note);
+
+	/*
+	 * Release the reference we held for kqueue.
+	 */
+	cam_periph_release(periph);
+}
+
+static void
 pass_add_physpath(void *context, int pending)
 {
 	struct cam_periph *periph;
 	struct pass_softc *softc;
+	struct mtx *mtx;
 	char *physpath;
 
 	/*
@@ -245,34 +437,38 @@
 	periph = context;
 	softc = periph->softc;
 	physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK);
-	cam_periph_lock(periph);
-	if (periph->flags & CAM_PERIPH_INVALID) {
-		cam_periph_unlock(periph);
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
+
+	if (periph->flags & CAM_PERIPH_INVALID)
 		goto out;
-	}
+
 	if (xpt_getattr(physpath, MAXPATHLEN,
 			"GEOM::physpath", periph->path) == 0
 	 && strlen(physpath) != 0) {
 
-		cam_periph_unlock(periph);
+		mtx_unlock(mtx);
 		make_dev_physpath_alias(MAKEDEV_WAITOK, &softc->alias_dev,
 					softc->dev, softc->alias_dev, physpath);
-		cam_periph_lock(periph);
+		mtx_lock(mtx);
 	}
 
+out:
 	/*
 	 * Now that we've made our alias, we no longer have to have a
 	 * reference to the device.
 	 */
-	if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0) {
+	if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0)
 		softc->flags |= PASS_FLAG_INITIAL_PHYSPATH;
-		cam_periph_unlock(periph);
-		dev_rel(softc->dev);
-	}
-	else
-		cam_periph_unlock(periph);
 
-out:
+	/*
+	 * We always acquire a reference to the periph before queueing this
+	 * task queue function, so it won't go away before we run.
+	 */
+	while (pending-- > 0)
+		cam_periph_release_locked(periph);
+	mtx_unlock(mtx);
+
 	free(physpath, M_DEVBUF);
 }
 
@@ -301,7 +497,7 @@
 		 */
 		status = cam_periph_alloc(passregister, passoninvalidate,
 					  passcleanup, passstart, "pass",
-					  CAM_PERIPH_BIO, cgd->ccb_h.path,
+					  CAM_PERIPH_BIO, path,
 					  passasync, AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP
@@ -324,8 +520,19 @@
 		buftype = (uintptr_t)arg;
 		if (buftype == CDAI_TYPE_PHYS_PATH) {
 			struct pass_softc *softc;
+			cam_status status;
 
 			softc = (struct pass_softc *)periph->softc;
+			/*
+			 * Acquire a reference to the periph before we
+			 * start the taskqueue, so that we don't run into
+			 * a situation where the periph goes away before
+			 * the task queue has a chance to run.
+			 */
+			status = cam_periph_acquire(periph);
+			if (status != CAM_REQ_CMP)
+				break;
+
 			taskqueue_enqueue(taskqueue_thread,
 					  &softc->add_physpath_task);
 		}
@@ -343,7 +550,8 @@
 	struct pass_softc *softc;
 	struct ccb_getdev *cgd;
 	struct ccb_pathinq cpi;
-	int    no_tags;
+	struct make_dev_args args;
+	int error, no_tags;
 
 	cgd = (struct ccb_getdev *)arg;
 	if (cgd == NULL) {
@@ -370,6 +578,17 @@
 		softc->pd_type = T_DIRECT;
 
 	periph->softc = softc;
+	softc->periph = periph;
+	TAILQ_INIT(&softc->incoming_queue);
+	TAILQ_INIT(&softc->active_queue);
+	TAILQ_INIT(&softc->abandoned_queue);
+	TAILQ_INIT(&softc->done_queue);
+	snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d",
+		 periph->periph_name, periph->unit_number);
+	snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO",
+		 periph->periph_name, periph->unit_number);
+	softc->io_zone_size = MAXPHYS;
+	knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph));
 
 	bzero(&cpi, sizeof(cpi));
 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
@@ -376,6 +595,16 @@
 	cpi.ccb_h.func_code = XPT_PATH_INQ;
 	xpt_action((union ccb *)&cpi);
 
+	if (cpi.maxio == 0)
+		softc->maxio = DFLTPHYS;	/* traditional default */
+	else if (cpi.maxio > MAXPHYS)
+		softc->maxio = MAXPHYS;		/* for safety */
+	else
+		softc->maxio = cpi.maxio;	/* real value */
+
+	if (cpi.hba_misc & PIM_UNMAPPED)
+		softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE;
+
 	/*
 	 * We pass in 0 for a blocksize, since we don't 
 	 * know what the blocksize of this device is, if 
@@ -393,6 +622,23 @@
 			  DEVSTAT_PRIORITY_PASS);
 
 	/*
+	 * Initialize the taskqueue handler for shutting down kqueue.
+	 */
+	TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0,
+		  pass_shutdown_kqueue, periph);
+
+	/*
+	 * Acquire a reference to the periph that we can release once we've
+	 * cleaned up the kqueue.
+	 */
+	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
+		xpt_print(periph->path, "%s: lost periph during "
+			  "registration!\n", __func__);
+		cam_periph_lock(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
+
+	/*
 	 * Acquire a reference to the periph before we create the devfs
 	 * instance for it.  We'll release this reference once the devfs
 	 * instance has been freed.
@@ -405,20 +651,33 @@
 	}
 
 	/* Register the device */
-	softc->dev = make_dev(&pass_cdevsw, periph->unit_number,
-			      UID_ROOT, GID_OPERATOR, 0600, "%s%d",
-			      periph->periph_name, periph->unit_number);
+	make_dev_args_init(&args);
+	args.mda_devsw = &pass_cdevsw;
+	args.mda_unit = periph->unit_number;
+	args.mda_uid = UID_ROOT;
+	args.mda_gid = GID_OPERATOR;
+	args.mda_mode = 0600;
+	args.mda_si_drv1 = periph;
+	error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name,
+	    periph->unit_number);
+	if (error != 0) {
+		cam_periph_lock(periph);
+		cam_periph_release_locked(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
 
 	/*
-	 * Now that we have made the devfs instance, hold a reference to it
-	 * until the task queue has run to setup the physical path alias.
-	 * That way devfs won't get rid of the device before we add our
-	 * alias.
+	 * Hold a reference to the periph before we create the physical
+	 * path alias so it can't go away.
 	 */
-	dev_ref(softc->dev);
+	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
+		xpt_print(periph->path, "%s: lost periph during "
+			  "registration!\n", __func__);
+		cam_periph_lock(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
 
 	cam_periph_lock(periph);
-	softc->dev->si_drv1 = periph;
 
 	TASK_INIT(&softc->add_physpath_task, /*priority*/0,
 		  pass_add_physpath, periph);
@@ -503,25 +762,70 @@
 static int
 passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 {
-	struct  cam_sim    *sim;
 	struct 	cam_periph *periph;
 	struct  pass_softc *softc;
+	struct mtx *mtx;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);	
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
 
-	sim = periph->sim;
 	softc = periph->softc;
+	softc->open_count--;
 
-	mtx_lock(sim->mtx);
+	if (softc->open_count == 0) {
+		struct pass_io_req *io_req, *io_req2;
+		int need_unlock;
 
-	softc->open_count--;
+		need_unlock = 0;
 
+		TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
+			TAILQ_REMOVE(&softc->done_queue, io_req, links);
+			passiocleanup(softc, io_req);
+			uma_zfree(softc->pass_zone, io_req);
+		}
+
+		TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links,
+				   io_req2) {
+			TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
+			passiocleanup(softc, io_req);
+			uma_zfree(softc->pass_zone, io_req);
+		}
+
+		/*
+		 * If there are any active I/Os, we need to forcibly acquire a
+		 * reference to the peripheral so that we don't go away
+		 * before they complete.  We'll release the reference when
+		 * the abandoned queue is empty.
+		 */
+		io_req = TAILQ_FIRST(&softc->active_queue);
+		if ((io_req != NULL)
+		 && (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) {
+			cam_periph_doacquire(periph);
+			softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
+		}
+
+		/*
+		 * Since the I/O in the active queue is not under our
+		 * control, just set a flag so that we can clean it up when
+		 * it completes and put it on the abandoned queue.  This
+		 * will prevent our sending spurious completions in the
+		 * event that the device is opened again before these I/Os
+		 * complete.
+		 */
+		TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links,
+				   io_req2) {
+			TAILQ_REMOVE(&softc->active_queue, io_req, links);
+			io_req->flags |= PASS_IO_ABANDONED;
+			TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req,
+					  links);
+		}
+	}
+
 	cam_periph_release_locked(periph);
 
 	/*
-	 * We reference the SIM lock directly here, instead of using
+	 * We reference the lock directly here, instead of using
 	 * cam_periph_unlock().  The reason is that the call to
 	 * cam_periph_release_locked() above could result in the periph
 	 * getting freed.  If that is the case, dereferencing the periph
@@ -532,11 +836,12 @@
 	 * protect the open count and avoid another lock acquisition and
 	 * release.
 	 */
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
 
 	return (0);
 }
 
+
 static void
 passstart(struct cam_periph *periph, union ccb *start_ccb)
 {
@@ -545,14 +850,44 @@
 	softc = (struct pass_softc *)periph->softc;
 
 	switch (softc->state) {
-	case PASS_STATE_NORMAL:
-		start_ccb->ccb_h.ccb_type = PASS_CCB_WAITING;			
-		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-				  periph_links.sle);
-		periph->immediate_priority = CAM_PRIORITY_NONE;
-		wakeup(&periph->ccb_list);
+	case PASS_STATE_NORMAL: {
+		struct pass_io_req *io_req;
+
+		/*
+		 * Check for any queued I/O requests that require an
+		 * allocated slot.
+		 */
+		io_req = TAILQ_FIRST(&softc->incoming_queue);
+		if (io_req == NULL) {
+			xpt_release_ccb(start_ccb);
+			break;
+		}
+		TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
+		TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
+		/*
+		 * Merge the user's CCB into the allocated CCB.
+		 */
+		xpt_merge_ccb(start_ccb, &io_req->ccb);
+		start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO;
+		start_ccb->ccb_h.ccb_ioreq = io_req;
+		start_ccb->ccb_h.cbfcnp = passdone;
+		io_req->alloced_ccb = start_ccb;
+		binuptime(&io_req->start_time);
+		devstat_start_transaction(softc->device_stats,
+					  &io_req->start_time);
+
+		xpt_action(start_ccb);
+
+		/*
+		 * If we have any more I/O waiting, schedule ourselves again.
+		 */
+		if (!TAILQ_EMPTY(&softc->incoming_queue))
+			xpt_schedule(periph, CAM_PRIORITY_NORMAL);
 		break;
 	}
+	default:
+		break;
+	}
 }
 
 static void
@@ -562,19 +897,873 @@
 	struct ccb_scsiio *csio;
 
 	softc = (struct pass_softc *)periph->softc;
+
+	cam_periph_assert(periph, MA_OWNED);
+
 	csio = &done_ccb->csio;
 	switch (csio->ccb_h.ccb_type) {
-	case PASS_CCB_WAITING:
-		/* Caller will release the CCB */
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
+	case PASS_CCB_QUEUED_IO: {
+		struct pass_io_req *io_req;
+
+		io_req = done_ccb->ccb_h.ccb_ioreq;
+#if 0
+		xpt_print(periph->path, "%s: called for user CCB %p\n",
+			  __func__, io_req->user_ccb_ptr);
+#endif
+		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+		 && (done_ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER)
+		 && ((io_req->flags & PASS_IO_ABANDONED) == 0)) {
+			int error;
+
+			error = passerror(done_ccb, CAM_RETRY_SELTO,
+					  SF_RETRY_UA | SF_NO_PRINT);
+
+			if (error == ERESTART) {
+				/*
+				 * A retry was scheduled, so
+ 				 * just return.
+				 */
+				return;
+			}
+		}
+
+		/*
+		 * Copy the allocated CCB contents back to the malloced CCB
+		 * so we can give status back to the user when he requests it.
+		 */
+		bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb));
+
+		/*
+		 * Log data/transaction completion with devstat(9).
+		 */
+		switch (done_ccb->ccb_h.func_code) {
+		case XPT_SCSI_IO:
+			devstat_end_transaction(softc->device_stats,
+			    done_ccb->csio.dxfer_len - done_ccb->csio.resid,
+			    done_ccb->csio.tag_action & 0x3,
+			    ((done_ccb->ccb_h.flags & CAM_DIR_MASK) ==
+			    CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
+			    (done_ccb->ccb_h.flags & CAM_DIR_OUT) ?
+			    DEVSTAT_WRITE : DEVSTAT_READ, NULL,
+			    &io_req->start_time);
+			break;
+		case XPT_ATA_IO:
+			devstat_end_transaction(softc->device_stats,
+			    done_ccb->ataio.dxfer_len - done_ccb->ataio.resid,
+			    done_ccb->ataio.tag_action & 0x3,
+			    ((done_ccb->ccb_h.flags & CAM_DIR_MASK) ==
+			    CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 
+			    (done_ccb->ccb_h.flags & CAM_DIR_OUT) ?
+			    DEVSTAT_WRITE : DEVSTAT_READ, NULL,
+			    &io_req->start_time);
+			break;
+		case XPT_SMP_IO:
+			/*
+			 * XXX KDM this isn't quite right, but there isn't
+			 * currently an easy way to represent a bidirectional 
+			 * transfer in devstat.  The only way to do it
+			 * and have the byte counts come out right would
+			 * mean that we would have to record two
+			 * transactions, one for the request and one for the
+			 * response.  For now, so that we report something,
+			 * just treat the entire thing as a read.
+			 */
+			devstat_end_transaction(softc->device_stats,
+			    done_ccb->smpio.smp_request_len +
+			    done_ccb->smpio.smp_response_len,
+			    DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL,
+			    &io_req->start_time);
+			break;
+		default:
+			devstat_end_transaction(softc->device_stats, 0,
+			    DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL,
+			    &io_req->start_time);
+			break;
+		}
+
+		/*
+		 * In the normal case, take the completed I/O off of the
+		 * active queue and put it on the done queue.  Notitfy the
+		 * user that we have a completed I/O.
+		 */
+		if ((io_req->flags & PASS_IO_ABANDONED) == 0) {
+			TAILQ_REMOVE(&softc->active_queue, io_req, links);
+			TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
+			selwakeuppri(&softc->read_select, PRIBIO);
+			KNOTE_LOCKED(&softc->read_select.si_note, 0);
+		} else {
+			/*
+			 * In the case of an abandoned I/O (final close
+			 * without fetching the I/O), take it off of the
+			 * abandoned queue and free it.
+			 */
+			TAILQ_REMOVE(&softc->abandoned_queue, io_req, links);
+			passiocleanup(softc, io_req);
+			uma_zfree(softc->pass_zone, io_req);
+
+			/*
+			 * Release the done_ccb here, since we may wind up
+			 * freeing the peripheral when we decrement the
+			 * reference count below.
+			 */
+			xpt_release_ccb(done_ccb);
+
+			/*
+			 * If the abandoned queue is empty, we can release
+			 * our reference to the periph since we won't have
+			 * any more completions coming.
+			 */
+			if ((TAILQ_EMPTY(&softc->abandoned_queue))
+			 && (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) {
+				softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET;
+				cam_periph_release_locked(periph);
+			}
+
+			/*
+			 * We have already released the CCB, so we can
+			 * return.
+			 */
+			return;
+		}
+		break;
 	}
+	}
 	xpt_release_ccb(done_ccb);
 }
 
 static int
+passcreatezone(struct cam_periph *periph)
+{
+	struct pass_softc *softc;
+	int error;
+
+	error = 0;
+	softc = (struct pass_softc *)periph->softc;
+
+	cam_periph_assert(periph, MA_OWNED);
+	KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0), 
+		("%s called when the pass(4) zone is valid!\n", __func__));
+	KASSERT((softc->pass_zone == NULL), 
+		("%s called when the pass(4) zone is allocated!\n", __func__));
+
+	if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) {
+
+		/*
+		 * We're the first context through, so we need to create
+		 * the pass(4) UMA zone for I/O requests.
+		 */
+		softc->flags |= PASS_FLAG_ZONE_INPROG;
+
+		/*
+		 * uma_zcreate() does a blocking (M_WAITOK) allocation,
+		 * so we cannot hold a mutex while we call it.
+		 */
+		cam_periph_unlock(periph);
+
+		softc->pass_zone = uma_zcreate(softc->zone_name,
+		    sizeof(struct pass_io_req), NULL, NULL, NULL, NULL,
+		    /*align*/ 0, /*flags*/ 0);
+
+		softc->pass_io_zone = uma_zcreate(softc->io_zone_name,
+		    softc->io_zone_size, NULL, NULL, NULL, NULL,
+		    /*align*/ 0, /*flags*/ 0);
+
+		cam_periph_lock(periph);
+
+		if ((softc->pass_zone == NULL)
+		 || (softc->pass_io_zone == NULL)) {
+			if (softc->pass_zone == NULL)
+				xpt_print(periph->path, "unable to allocate "
+				    "IO Req UMA zone\n");
+			else
+				xpt_print(periph->path, "unable to allocate "
+				    "IO UMA zone\n");
+			softc->flags &= ~PASS_FLAG_ZONE_INPROG;
+			goto bailout;
+		}
+
+		/*
+		 * Set the flags appropriately and notify any other waiters.
+		 */
+		softc->flags &= PASS_FLAG_ZONE_INPROG;
+		softc->flags |= PASS_FLAG_ZONE_VALID;
+		wakeup(&softc->pass_zone);
+	} else {
+		/*
+		 * In this case, the UMA zone has not yet been created, but
+		 * another context is in the process of creating it.  We
+		 * need to sleep until the creation is either done or has
+		 * failed.
+		 */
+		while ((softc->flags & PASS_FLAG_ZONE_INPROG)
+		    && ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) {
+			error = msleep(&softc->pass_zone,
+				       cam_periph_mtx(periph), PRIBIO,
+				       "paszon", 0);
+			if (error != 0)
+				goto bailout;
+		}
+		/*
+		 * If the zone creation failed, no luck for the user.
+		 */
+		if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){
+			error = ENOMEM;
+			goto bailout;
+		}
+	}
+bailout:
+	return (error);
+}
+
+static void
+passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req)
+{
+	union ccb *ccb;
+	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
+	int i, numbufs;
+
+	ccb = &io_req->ccb;
+
+	switch (ccb->ccb_h.func_code) {
+	case XPT_DEV_MATCH:
+		numbufs = min(io_req->num_bufs, 2);
+
+		if (numbufs == 1) {
+			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
+		} else {
+			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
+			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
+		}
+		break;
+	case XPT_SCSI_IO:
+	case XPT_CONT_TARGET_IO:
+		data_ptrs[0] = &ccb->csio.data_ptr;
+		numbufs = min(io_req->num_bufs, 1);
+		break;
+	case XPT_ATA_IO:
+		data_ptrs[0] = &ccb->ataio.data_ptr;
+		numbufs = min(io_req->num_bufs, 1);
+		break;
+	case XPT_SMP_IO:
+		numbufs = min(io_req->num_bufs, 2);
+		data_ptrs[0] = &ccb->smpio.smp_request;
+		data_ptrs[1] = &ccb->smpio.smp_response;
+		break;
+	case XPT_DEV_ADVINFO:
+		numbufs = min(io_req->num_bufs, 1);
+		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
+		break;
+	default:
+		/* allow ourselves to be swapped once again */
+		return;
+		break; /* NOTREACHED */ 
+	}
+
+	if (io_req->flags & PASS_IO_USER_SEG_MALLOC) {
+		free(io_req->user_segptr, M_SCSIPASS);
+		io_req->user_segptr = NULL;
+	}
+
+	/*
+	 * We only want to free memory we malloced.
+	 */
+	if (io_req->data_flags == CAM_DATA_VADDR) {
+		for (i = 0; i < io_req->num_bufs; i++) {
+			if (io_req->kern_bufs[i] == NULL)
+				continue;
+
+			free(io_req->kern_bufs[i], M_SCSIPASS);
+			io_req->kern_bufs[i] = NULL;
+		}
+	} else if (io_req->data_flags == CAM_DATA_SG) {
+		for (i = 0; i < io_req->num_kern_segs; i++) {
+			if ((uint8_t *)(uintptr_t)
+			    io_req->kern_segptr[i].ds_addr == NULL)
+				continue;
+
+			uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t)
+			    io_req->kern_segptr[i].ds_addr);
+			io_req->kern_segptr[i].ds_addr = 0;
+		}
+	}
+
+	if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) {
+		free(io_req->kern_segptr, M_SCSIPASS);
+		io_req->kern_segptr = NULL;
+	}
+
+	if (io_req->data_flags != CAM_DATA_PADDR) {
+		for (i = 0; i < numbufs; i++) {
+			/*
+			 * Restore the user's buffer pointers to their
+			 * previous values.
+			 */
+			if (io_req->user_bufs[i] != NULL)
+				*data_ptrs[i] = io_req->user_bufs[i];
+		}
+	}
+
+}
+
+static int
+passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req,
+	       ccb_flags direction)
+{
+	bus_size_t kern_watermark, user_watermark, len_copied, len_to_copy;
+	bus_dma_segment_t *user_sglist, *kern_sglist;
+	int i, j, error;
+
+	error = 0;
+	kern_watermark = 0;
+	user_watermark = 0;
+	len_to_copy = 0;
+	len_copied = 0;
+	user_sglist = io_req->user_segptr;
+	kern_sglist = io_req->kern_segptr;
+
+	for (i = 0, j = 0; i < io_req->num_user_segs &&
+	     j < io_req->num_kern_segs;) {
+		uint8_t *user_ptr, *kern_ptr;
+
+		len_to_copy = min(user_sglist[i].ds_len -user_watermark,
+		    kern_sglist[j].ds_len - kern_watermark);
+
+		user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr;
+		user_ptr = user_ptr + user_watermark;
+		kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr;
+		kern_ptr = kern_ptr + kern_watermark;
+
+		user_watermark += len_to_copy;
+		kern_watermark += len_to_copy;
+
+		if (!useracc(user_ptr, len_to_copy,
+		    (direction == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) {
+			xpt_print(periph->path, "%s: unable to access user "
+				  "S/G list element %p len %zu\n", __func__,
+				  user_ptr, len_to_copy);
+			error = EFAULT;
+			goto bailout;
+		}
+
+		if (direction == CAM_DIR_IN) {
+			error = copyout(kern_ptr, user_ptr, len_to_copy);
+			if (error != 0) {
+				xpt_print(periph->path, "%s: copyout of %u "
+					  "bytes from %p to %p failed with "
+					  "error %d\n", __func__, len_to_copy,
+					  kern_ptr, user_ptr, error);
+				goto bailout;
+			}
+		} else {
+			error = copyin(user_ptr, kern_ptr, len_to_copy);
+			if (error != 0) {
+				xpt_print(periph->path, "%s: copyin of %u "
+					  "bytes from %p to %p failed with "
+					  "error %d\n", __func__, len_to_copy,
+					  user_ptr, kern_ptr, error);
+				goto bailout;
+			}
+		}
+
+		len_copied += len_to_copy;
+
+		if (user_sglist[i].ds_len == user_watermark) {
+			i++;
+			user_watermark = 0;
+		}
+
+		if (kern_sglist[j].ds_len == kern_watermark) {
+			j++;
+			kern_watermark = 0;
+		}
+	}
+
+bailout:
+
+	return (error);
+}
+
+static int
+passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req)
+{
+	union ccb *ccb;
+	struct pass_softc *softc;
+	int numbufs, i;
+	uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
+	uint32_t lengths[CAM_PERIPH_MAXMAPS];
+	uint32_t dirs[CAM_PERIPH_MAXMAPS];
+	uint32_t num_segs;
+	uint16_t *seg_cnt_ptr;
+	size_t maxmap;
+	int error;
+
+	cam_periph_assert(periph, MA_NOTOWNED);
+
+	softc = periph->softc;
+
+	error = 0;
+	ccb = &io_req->ccb;
+	maxmap = 0;
+	num_segs = 0;
+	seg_cnt_ptr = NULL;
+
+	switch(ccb->ccb_h.func_code) {
+	case XPT_DEV_MATCH:
+		if (ccb->cdm.match_buf_len == 0) {
+			printf("%s: invalid match buffer length 0\n", __func__);
+			return(EINVAL);
+		}
+		if (ccb->cdm.pattern_buf_len > 0) {
+			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
+			lengths[0] = ccb->cdm.pattern_buf_len;
+			dirs[0] = CAM_DIR_OUT;
+			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
+			lengths[1] = ccb->cdm.match_buf_len;
+			dirs[1] = CAM_DIR_IN;
+			numbufs = 2;
+		} else {
+			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
+			lengths[0] = ccb->cdm.match_buf_len;
+			dirs[0] = CAM_DIR_IN;
+			numbufs = 1;
+		}
+		io_req->data_flags = CAM_DATA_VADDR;
+		break;
+	case XPT_SCSI_IO:
+	case XPT_CONT_TARGET_IO:
+		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
+			return(0);
+
+		/*
+		 * The user shouldn't be able to supply a bio.
+		 */
+		if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
+			return (EINVAL);
+
+		io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK;
+
+		data_ptrs[0] = &ccb->csio.data_ptr;
+		lengths[0] = ccb->csio.dxfer_len;
+		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
+		num_segs = ccb->csio.sglist_cnt;
+		seg_cnt_ptr = &ccb->csio.sglist_cnt;
+		numbufs = 1;
+		maxmap = softc->maxio;
+		break;
+	case XPT_ATA_IO:
+		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
+			return(0);
+
+		/*
+		 * We only support a single virtual address for ATA I/O.
+		 */
+		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
+			return (EINVAL);
+
+		io_req->data_flags = CAM_DATA_VADDR;
+
+		data_ptrs[0] = &ccb->ataio.data_ptr;
+		lengths[0] = ccb->ataio.dxfer_len;
+		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
+		numbufs = 1;
+		maxmap = softc->maxio;
+		break;
+	case XPT_SMP_IO:
+		io_req->data_flags = CAM_DATA_VADDR;
+
+		data_ptrs[0] = &ccb->smpio.smp_request;
+		lengths[0] = ccb->smpio.smp_request_len;
+		dirs[0] = CAM_DIR_OUT;
+		data_ptrs[1] = &ccb->smpio.smp_response;
+		lengths[1] = ccb->smpio.smp_response_len;
+		dirs[1] = CAM_DIR_IN;
+		numbufs = 2;
+		maxmap = softc->maxio;
+		break;
+	case XPT_DEV_ADVINFO:
+		if (ccb->cdai.bufsiz == 0)
+			return (0);
+
+		io_req->data_flags = CAM_DATA_VADDR;
+
+		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
+		lengths[0] = ccb->cdai.bufsiz;
+		dirs[0] = CAM_DIR_IN;
+		numbufs = 1;
+		break;
+	default:
+		return(EINVAL);
+		break; /* NOTREACHED */
+	}
+
+	io_req->num_bufs = numbufs;
+
+	/*
+	 * If there is a maximum, check to make sure that the user's
+	 * request fits within the limit.  In general, we should only have
+	 * a maximum length for requests that go to hardware.  Otherwise it
+	 * is whatever we're able to malloc.
+	 */
+	for (i = 0; i < numbufs; i++) {
+		io_req->user_bufs[i] = *data_ptrs[i];
+		io_req->dirs[i] = dirs[i];
+		io_req->lengths[i] = lengths[i];
+
+		if (maxmap == 0)
+			continue;
+
+		if (lengths[i] <= maxmap)
+			continue;
+
+		xpt_print(periph->path, "%s: data length %u > max allowed %u "
+			  "bytes\n", __func__, lengths[i], maxmap);
+		error = EINVAL;
+		goto bailout;
+	}
+
+	switch (io_req->data_flags) {
+	case CAM_DATA_VADDR:
+		/* Map or copy the buffer into kernel address space */
+		for (i = 0; i < numbufs; i++) {
+			uint8_t *tmp_buf;
+
+			/*
+			 * If for some reason no length is specified, we
+			 * don't need to allocate anything.
+			 */
+			if (io_req->lengths[i] == 0)
+				continue;
+
+			/*
+			 * Make sure that the user's buffer is accessible
+			 * to that process.
+			 */
+			if (!useracc(io_req->user_bufs[i], io_req->lengths[i],
+			    (io_req->dirs[i] == CAM_DIR_IN) ? VM_PROT_WRITE :
+			     VM_PROT_READ)) {
+				xpt_print(periph->path, "%s: user address %p "
+				    "length %u is not accessible\n", __func__,
+				    io_req->user_bufs[i], io_req->lengths[i]);
+				error = EFAULT;
+				goto bailout;
+			}
+
+			tmp_buf = malloc(lengths[i], M_SCSIPASS,
+					 M_WAITOK | M_ZERO);
+			io_req->kern_bufs[i] = tmp_buf;
+			*data_ptrs[i] = tmp_buf;
+
+#if 0
+			xpt_print(periph->path, "%s: malloced %p len %u, user "
+				  "buffer %p, operation: %s\n", __func__,
+				  tmp_buf, lengths[i], io_req->user_bufs[i],
+				  (dirs[i] == CAM_DIR_IN) ? "read" : "write");
+#endif
+			/*
+			 * We only need to copy in if the user is writing.
+			 */
+			if (dirs[i] != CAM_DIR_OUT)
+				continue;
+
+			error = copyin(io_req->user_bufs[i],
+				       io_req->kern_bufs[i], lengths[i]);
+			if (error != 0) {
+				xpt_print(periph->path, "%s: copy of user "
+					  "buffer from %p to %p failed with "
+					  "error %d\n", __func__,
+					  io_req->user_bufs[i],
+					  io_req->kern_bufs[i], error);
+				goto bailout;
+			}
+		}
+		break;
+	case CAM_DATA_PADDR:
+		/* Pass down the pointer as-is */
+		break;
+	case CAM_DATA_SG: {
+		size_t sg_length, size_to_go, alloc_size;
+		uint32_t num_segs_needed;
+
+		/*
+		 * Copy the user S/G list in, and then copy in the
+		 * individual segments.
+		 */
+		/*
+		 * We shouldn't see this, but check just in case.
+		 */
+		if (numbufs != 1) {
+			xpt_print(periph->path, "%s: cannot currently handle "
+				  "more than one S/G list per CCB\n", __func__);
+			error = EINVAL;
+			goto bailout;
+		}
+
+		/*
+		 * We have to have at least one segment.
+		 */
+		if (num_segs == 0) {
+			xpt_print(periph->path, "%s: CAM_DATA_SG flag set, "
+				  "but sglist_cnt=0!\n", __func__);
+			error = EINVAL;
+			goto bailout;
+		}
+
+		/*
+		 * Make sure the user specified the total length and didn't
+		 * just leave it to us to decode the S/G list.
+		 */
+		if (lengths[0] == 0) {
+			xpt_print(periph->path, "%s: no dxfer_len specified, "
+				  "but CAM_DATA_SG flag is set!\n", __func__);
+			error = EINVAL;
+			goto bailout;
+		}
+
+		/*
+		 * We allocate buffers in io_zone_size increments for an
+		 * S/G list.  This will generally be MAXPHYS.
+		 */
+		if (lengths[0] <= softc->io_zone_size)
+			num_segs_needed = 1;
+		else {
+			num_segs_needed = lengths[0] / softc->io_zone_size;
+			if ((lengths[0] % softc->io_zone_size) != 0)
+				num_segs_needed++;
+		}
+
+		/* Figure out the size of the S/G list */
+		sg_length = num_segs * sizeof(bus_dma_segment_t);
+		io_req->num_user_segs = num_segs;
+		io_req->num_kern_segs = num_segs_needed;
+
+		/* Save the user's S/G list pointer for later restoration */
+		io_req->user_bufs[0] = *data_ptrs[0];
+
+		/*
+		 * If we have enough segments allocated by default to handle
+		 * the length of the user's S/G list,
+		 */
+		if (num_segs > PASS_MAX_SEGS) {
+			io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
+			    num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
+			io_req->flags |= PASS_IO_USER_SEG_MALLOC;
+		} else
+			io_req->user_segptr = io_req->user_segs;
+
+		if (!useracc(*data_ptrs[0], sg_length, VM_PROT_READ)) {
+			xpt_print(periph->path, "%s: unable to access user "
+				  "S/G list at %p\n", __func__, *data_ptrs[0]);
+			error = EFAULT;
+			goto bailout;
+		}
+
+		error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
+		if (error != 0) {
+			xpt_print(periph->path, "%s: copy of user S/G list "
+				  "from %p to %p failed with error %d\n",
+				  __func__, *data_ptrs[0], io_req->user_segptr,
+				  error);
+			goto bailout;
+		}
+
+		if (num_segs_needed > PASS_MAX_SEGS) {
+			io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) *
+			    num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO);
+			io_req->flags |= PASS_IO_KERN_SEG_MALLOC;
+		} else {
+			io_req->kern_segptr = io_req->kern_segs;
+		}
+
+		/*
+		 * Allocate the kernel S/G list.
+		 */
+		for (size_to_go = lengths[0], i = 0;
+		     size_to_go > 0 && i < num_segs_needed;
+		     i++, size_to_go -= alloc_size) {
+			uint8_t *kern_ptr;
+
+			alloc_size = min(size_to_go, softc->io_zone_size);
+			kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK);
+			io_req->kern_segptr[i].ds_addr =
+			    (bus_addr_t)(uintptr_t)kern_ptr;
+			io_req->kern_segptr[i].ds_len = alloc_size;
+		}
+		if (size_to_go > 0) {
+			printf("%s: size_to_go = %zu, software error!\n",
+			       __func__, size_to_go);
+			error = EINVAL;
+			goto bailout;
+		}
+
+		*data_ptrs[0] = (uint8_t *)io_req->kern_segptr;
+		*seg_cnt_ptr = io_req->num_kern_segs;
+
+		/*
+		 * We only need to copy data here if the user is writing.
+		 */
+		if (dirs[0] == CAM_DIR_OUT)
+			error = passcopysglist(periph, io_req, dirs[0]);
+		break;
+	}
+	case CAM_DATA_SG_PADDR: {
+		size_t sg_length;
+
+		/*
+		 * We shouldn't see this, but check just in case.
+		 */
+		if (numbufs != 1) {
+			printf("%s: cannot currently handle more than one "
+			       "S/G list per CCB\n", __func__);
+			error = EINVAL;
+			goto bailout;
+		}
+
+		/*
+		 * We have to have at least one segment.
+		 */
+		if (num_segs == 0) {
+			xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag "
+				  "set, but sglist_cnt=0!\n", __func__);
+			error = EINVAL;
+			goto bailout;
+		}
+
+		/*
+		 * Make sure the user specified the total length and didn't
+		 * just leave it to us to decode the S/G list.
+		 */
+		if (lengths[0] == 0) {
+			xpt_print(periph->path, "%s: no dxfer_len specified, "
+				  "but CAM_DATA_SG flag is set!\n", __func__);
+			error = EINVAL;
+			goto bailout;
+		}
+
+		/* Figure out the size of the S/G list */
+		sg_length = num_segs * sizeof(bus_dma_segment_t);
+		io_req->num_user_segs = num_segs;
+		io_req->num_kern_segs = io_req->num_user_segs;
+
+		/* Save the user's S/G list pointer for later restoration */
+		io_req->user_bufs[0] = *data_ptrs[0];
+
+		if (num_segs > PASS_MAX_SEGS) {
+			io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
+			    num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
+			io_req->flags |= PASS_IO_USER_SEG_MALLOC;
+		} else
+			io_req->user_segptr = io_req->user_segs;
+
+		io_req->kern_segptr = io_req->user_segptr;
+
+		error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
+		if (error != 0) {
+			xpt_print(periph->path, "%s: copy of user S/G list "
+				  "from %p to %p failed with error %d\n",
+				  __func__, *data_ptrs[0], io_req->user_segptr,
+				  error);
+			goto bailout;
+		}
+		break;
+	}
+	default:
+	case CAM_DATA_BIO:
+		/*
+		 * A user shouldn't be attaching a bio to the CCB.  It
+		 * isn't a user-accessible structure.
+		 */
+		error = EINVAL;
+		break;
+	}
+
+bailout:
+	if (error != 0)
+		passiocleanup(softc, io_req);
+
+	return (error);
+}
+
+static int
+passmemdone(struct cam_periph *periph, struct pass_io_req *io_req)
+{
+	struct pass_softc *softc;
+	union ccb *ccb;
+	int error;
+	int i;
+
+	error = 0;
+	softc = (struct pass_softc *)periph->softc;
+	ccb = &io_req->ccb;
+
+	switch (io_req->data_flags) {
+	case CAM_DATA_VADDR:
+		/*
+		 * Copy back to the user buffer if this was a read.
+		 */
+		for (i = 0; i < io_req->num_bufs; i++) {
+			if (io_req->dirs[i] != CAM_DIR_IN)
+				continue;
+
+			error = copyout(io_req->kern_bufs[i],
+			    io_req->user_bufs[i], io_req->lengths[i]);
+			if (error != 0) {
+				xpt_print(periph->path, "Unable to copy %u "
+					  "bytes from %p to user address %p\n",
+					  io_req->lengths[i],
+					  io_req->kern_bufs[i],
+					  io_req->user_bufs[i]);
+				goto bailout;
+			}
+
+		}
+		break;
+	case CAM_DATA_PADDR:
+		/* Do nothing.  The pointer is a physical address already */
+		break;
+	case CAM_DATA_SG:
+		/*
+		 * Copy back to the user buffer if this was a read.
+		 * Restore the user's S/G list buffer pointer.
+		 */
+		if (io_req->dirs[0] == CAM_DIR_IN)
+			error = passcopysglist(periph, io_req, io_req->dirs[0]);
+		break;
+	case CAM_DATA_SG_PADDR:
+		/*
+		 * Restore the user's S/G list buffer pointer.  No need to
+		 * copy.
+		 */
+		break;
+	default:
+	case CAM_DATA_BIO:
+		error = EINVAL;
+		break;
+	}
+
+bailout:
+	/*
+	 * Reset the user's pointers to their original values and free
+	 * allocated memory.
+	 */
+	passiocleanup(softc, io_req);
+
+	return (error);
+}
+
+static int
 passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 {
+	int error;
+
+	if ((error = passdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
+		error = cam_compat_ioctl(dev, cmd, addr, flag, td, passdoioctl);
+	}
+	return (error);
+}
+
+static int
+passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
+{
 	struct	cam_periph *periph;
 	struct	pass_softc *softc;
 	int	error;
@@ -581,9 +1770,6 @@
 	uint32_t priority;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return(ENXIO);
-
 	cam_periph_lock(periph);
 	softc = (struct pass_softc *)periph->softc;
 
@@ -613,8 +1799,8 @@
 
 		/* Compatibility for RL/priority-unaware code. */
 		priority = inccb->ccb_h.pinfo.priority;
-		if (priority < CAM_RL_TO_PRIORITY(CAM_RL_NORMAL))
-		    priority += CAM_RL_TO_PRIORITY(CAM_RL_NORMAL);
+		if (priority <= CAM_PRIORITY_OOB)
+		    priority += CAM_PRIORITY_OOB + 1;
 
 		/*
 		 * Non-immediate CCBs need a CCB from the per-device pool
@@ -650,15 +1836,335 @@
 
 		break;
 	}
+	case CAMIOQUEUE:
+	{
+		struct pass_io_req *io_req;
+		union ccb **user_ccb, *ccb;
+		xpt_opcode fc;
+
+#ifdef COMPAT_FREEBSD32
+		if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+			error = ENOTTY;
+			goto bailout;
+		}
+#endif
+		if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) {
+			error = passcreatezone(periph);
+			if (error != 0)
+				goto bailout;
+		}
+
+		/*
+		 * We're going to do a blocking allocation for this I/O
+		 * request, so we have to drop the lock.
+		 */
+		cam_periph_unlock(periph);
+
+		io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO);
+		ccb = &io_req->ccb;
+		user_ccb = (union ccb **)addr;
+
+		/*
+		 * Unlike the CAMIOCOMMAND ioctl above, we only have a
+		 * pointer to the user's CCB, so we have to copy the whole
+		 * thing in to a buffer we have allocated (above) instead
+		 * of allowing the ioctl code to malloc a buffer and copy
+		 * it in.
+		 *
+		 * This is an advantage for this asynchronous interface,
+		 * since we don't want the memory to get freed while the
+		 * CCB is outstanding.
+		 */
+#if 0
+		xpt_print(periph->path, "Copying user CCB %p to "
+			  "kernel address %p\n", *user_ccb, ccb);
+#endif
+		error = copyin(*user_ccb, ccb, sizeof(*ccb));
+		if (error != 0) {
+			xpt_print(periph->path, "Copy of user CCB %p to "
+				  "kernel address %p failed with error %d\n",
+				  *user_ccb, ccb, error);
+			uma_zfree(softc->pass_zone, io_req);
+			cam_periph_lock(periph);
+			break;
+		}
+
+		if (ccb->ccb_h.flags & CAM_CDB_POINTER) {
+			if (ccb->csio.cdb_len > IOCDBLEN) {
+				error = EINVAL;
+				break;
+			}
+			error = copyin(ccb->csio.cdb_io.cdb_ptr,
+			    ccb->csio.cdb_io.cdb_bytes, ccb->csio.cdb_len);
+			if (error)
+				break;
+			ccb->ccb_h.flags &= ~CAM_CDB_POINTER;
+		}
+
+		/*
+		 * Some CCB types, like scan bus and scan lun can only go
+		 * through the transport layer device.
+		 */
+		if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) {
+			xpt_print(periph->path, "CCB function code %#x is "
+			    "restricted to the XPT device\n",
+			    ccb->ccb_h.func_code);
+			uma_zfree(softc->pass_zone, io_req);
+			cam_periph_lock(periph);
+			error = ENODEV;
+			break;
+		}
+
+		/*
+		 * Save the user's CCB pointer as well as his linked list
+		 * pointers and peripheral private area so that we can
+		 * restore these later.
+		 */
+		io_req->user_ccb_ptr = *user_ccb;
+		io_req->user_periph_links = ccb->ccb_h.periph_links;
+		io_req->user_periph_priv = ccb->ccb_h.periph_priv;
+
+		/*
+		 * Now that we've saved the user's values, we can set our
+		 * own peripheral private entry.
+		 */
+		ccb->ccb_h.ccb_ioreq = io_req;
+
+		/* Compatibility for RL/priority-unaware code. */
+		priority = ccb->ccb_h.pinfo.priority;
+		if (priority <= CAM_PRIORITY_OOB)
+		    priority += CAM_PRIORITY_OOB + 1;
+
+		/*
+		 * Setup fields in the CCB like the path and the priority.
+		 * The path in particular cannot be done in userland, since
+		 * it is a pointer to a kernel data structure.
+		 */
+		xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority,
+				    ccb->ccb_h.flags);
+
+		/*
+		 * Setup our done routine.  There is no way for the user to
+		 * have a valid pointer here.
+		 */
+		ccb->ccb_h.cbfcnp = passdone;
+
+		fc = ccb->ccb_h.func_code;
+		/*
+		 * If this function code has memory that can be mapped in
+		 * or out, we need to call passmemsetup().
+		 */
+		if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO)
+		 || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH)
+		 || (fc == XPT_DEV_ADVINFO)) {
+			error = passmemsetup(periph, io_req);
+			if (error != 0) {
+				uma_zfree(softc->pass_zone, io_req);
+				cam_periph_lock(periph);
+				break;
+			}
+		} else
+			io_req->mapinfo.num_bufs_used = 0;
+
+		cam_periph_lock(periph);
+
+		/*
+		 * Everything goes on the incoming queue initially.
+		 */
+		TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links);
+
+		/*
+		 * If the CCB is queued, and is not a user CCB, then
+		 * we need to allocate a slot for it.  Call xpt_schedule()
+		 * so that our start routine will get called when a CCB is
+		 * available.
+		 */
+		if ((fc & XPT_FC_QUEUED)
+		 && ((fc & XPT_FC_USER_CCB) == 0)) {
+			xpt_schedule(periph, priority);
+			break;
+		} 
+
+		/*
+		 * At this point, the CCB in question is either an
+		 * immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB
+		 * and therefore should be malloced, not allocated via a slot.
+		 * Remove the CCB from the incoming queue and add it to the
+		 * active queue.
+		 */
+		TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
+		TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
+
+		xpt_action(ccb);
+
+		/*
+		 * If this is not a queued CCB (i.e. it is an immediate CCB),
+		 * then it is already done.  We need to put it on the done
+		 * queue for the user to fetch.
+		 */
+		if ((fc & XPT_FC_QUEUED) == 0) {
+			TAILQ_REMOVE(&softc->active_queue, io_req, links);
+			TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
+		}
+		break;
+	}
+	case CAMIOGET:
+	{
+		union ccb **user_ccb;
+		struct pass_io_req *io_req;
+		int old_error;
+
+#ifdef COMPAT_FREEBSD32
+		if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+			error = ENOTTY;
+			goto bailout;
+		}
+#endif
+		user_ccb = (union ccb **)addr;
+		old_error = 0;
+
+		io_req = TAILQ_FIRST(&softc->done_queue);
+		if (io_req == NULL) {
+			error = ENOENT;
+			break;
+		}
+
+		/*
+		 * Remove the I/O from the done queue.
+		 */
+		TAILQ_REMOVE(&softc->done_queue, io_req, links);
+
+		/*
+		 * We have to drop the lock during the copyout because the
+		 * copyout can result in VM faults that require sleeping.
+		 */
+		cam_periph_unlock(periph);
+
+		/*
+		 * Do any needed copies (e.g. for reads) and revert the
+		 * pointers in the CCB back to the user's pointers.
+		 */
+		error = passmemdone(periph, io_req);
+
+		old_error = error;
+
+		io_req->ccb.ccb_h.periph_links = io_req->user_periph_links;
+		io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv;
+
+#if 0
+		xpt_print(periph->path, "Copying to user CCB %p from "
+			  "kernel address %p\n", *user_ccb, &io_req->ccb);
+#endif
+
+		error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb));
+		if (error != 0) {
+			xpt_print(periph->path, "Copy to user CCB %p from "
+				  "kernel address %p failed with error %d\n",
+				  *user_ccb, &io_req->ccb, error);
+		}
+
+		/*
+		 * Prefer the first error we got back, and make sure we
+		 * don't overwrite bad status with good.
+		 */
+		if (old_error != 0)
+			error = old_error;
+
+		cam_periph_lock(periph);
+
+		/*
+		 * At this point, if there was an error, we could potentially
+		 * re-queue the I/O and try again.  But why?  The error
+		 * would almost certainly happen again.  We might as well
+		 * not leak memory.
+		 */
+		uma_zfree(softc->pass_zone, io_req);
+		break;
+	}
 	default:
 		error = cam_periph_ioctl(periph, cmd, addr, passerror);
 		break;
 	}
 
+bailout:
 	cam_periph_unlock(periph);
+
 	return(error);
 }
 
+static int
+passpoll(struct cdev *dev, int poll_events, struct thread *td)
+{
+	struct cam_periph *periph;
+	struct pass_softc *softc;
+	int revents;
+
+	periph = (struct cam_periph *)dev->si_drv1;
+	softc = (struct pass_softc *)periph->softc;
+
+	revents = poll_events & (POLLOUT | POLLWRNORM);
+	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
+		cam_periph_lock(periph);
+
+		if (!TAILQ_EMPTY(&softc->done_queue)) {
+			revents |= poll_events & (POLLIN | POLLRDNORM);
+		}
+		cam_periph_unlock(periph);
+		if (revents == 0)
+			selrecord(td, &softc->read_select);
+	}
+
+	return (revents);
+}
+
+static int
+passkqfilter(struct cdev *dev, struct knote *kn)
+{
+	struct cam_periph *periph;
+	struct pass_softc *softc;
+
+	periph = (struct cam_periph *)dev->si_drv1;
+	softc = (struct pass_softc *)periph->softc;
+
+	kn->kn_hook = (caddr_t)periph;
+	kn->kn_fop = &passread_filtops;
+	knlist_add(&softc->read_select.si_note, kn, 0);
+
+	return (0);
+}
+
+static void
+passreadfiltdetach(struct knote *kn)
+{
+	struct cam_periph *periph;
+	struct pass_softc *softc;
+
+	periph = (struct cam_periph *)kn->kn_hook;
+	softc = (struct pass_softc *)periph->softc;
+
+	knlist_remove(&softc->read_select.si_note, kn, 0);
+}
+
+static int
+passreadfilt(struct knote *kn, long hint)
+{
+	struct cam_periph *periph;
+	struct pass_softc *softc;
+	int retval;
+
+	periph = (struct cam_periph *)kn->kn_hook;
+	softc = (struct pass_softc *)periph->softc;
+
+	cam_periph_assert(periph, MA_OWNED);
+
+	if (TAILQ_EMPTY(&softc->done_queue))
+		retval = 0;
+	else
+		retval = 1;
+
+	return (retval);
+}
+
 /*
  * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
  * should be the CCB that is copied in from the user.
@@ -668,12 +2174,12 @@
 {
 	struct pass_softc *softc;
 	struct cam_periph_map_info mapinfo;
-	int error, need_unmap;
+	uint8_t *cmd;
+	xpt_opcode fc;
+	int error;
 
 	softc = (struct pass_softc *)periph->softc;
 
-	need_unmap = 0;
-
 	/*
 	 * There are some fields in the CCB header that need to be
 	 * preserved, the rest we get from the user.
@@ -680,32 +2186,26 @@
 	 */
 	xpt_merge_ccb(ccb, inccb);
 
+	if (ccb->ccb_h.flags & CAM_CDB_POINTER) {
+		cmd = __builtin_alloca(ccb->csio.cdb_len);
+		error = copyin(ccb->csio.cdb_io.cdb_ptr, cmd, ccb->csio.cdb_len);
+		if (error)
+			return (error);
+		ccb->csio.cdb_io.cdb_ptr = cmd;
+	}
+
 	/*
-	 * There's no way for the user to have a completion
-	 * function, so we put our own completion function in here.
 	 */
 	ccb->ccb_h.cbfcnp = passdone;
 
 	/*
-	 * We only attempt to map the user memory into kernel space
-	 * if they haven't passed in a physical memory pointer,
-	 * and if there is actually an I/O operation to perform.
-	 * cam_periph_mapmem() supports SCSI, ATA, SMP, ADVINFO and device
-	 * match CCBs.  For the SCSI, ATA and ADVINFO CCBs, we only pass the
-	 * CCB in if there's actually data to map.  cam_periph_mapmem() will
-	 * do the right thing, even if there isn't data to map, but since CCBs
-	 * without data are a reasonably common occurance (e.g. test unit
-	 * ready), it will save a few cycles if we check for it here.
+	 * Let cam_periph_mapmem do a sanity check on the data pointer format.
+	 * Even if no data transfer is needed, it's a cheap check and it
+	 * simplifies the code.
 	 */
-	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
-	 && (((ccb->ccb_h.func_code == XPT_SCSI_IO ||
-	       ccb->ccb_h.func_code == XPT_ATA_IO)
-	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
-	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH)
-	  || (ccb->ccb_h.func_code == XPT_SMP_IO)
-	  || ((ccb->ccb_h.func_code == XPT_DEV_ADVINFO)
-	   && (ccb->cdai.bufsiz > 0)))) {
-
+	fc = ccb->ccb_h.func_code;
+	if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO)
+	 || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO)) {
 		bzero(&mapinfo, sizeof(mapinfo));
 
 		/*
@@ -714,7 +2214,7 @@
 		 * Dropping it here is reasonably safe.
 		 */
 		cam_periph_unlock(periph);
-		error = cam_periph_mapmem(ccb, &mapinfo); 
+		error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio);
 		cam_periph_lock(periph);
 
 		/*
@@ -723,26 +2223,21 @@
 		 */
 		if (error)
 			return(error);
+	} else
+		/* Ensure that the unmap call later on is a no-op. */
+		mapinfo.num_bufs_used = 0;
 
-		/*
-		 * We successfully mapped the memory in, so we need to
-		 * unmap it when the transaction is done.
-		 */
-		need_unmap = 1;
-	}
-
 	/*
 	 * If the user wants us to perform any error recovery, then honor
 	 * that request.  Otherwise, it's up to the user to perform any
 	 * error recovery.
 	 */
-	cam_periph_runccb(ccb, passerror, /* cam_flags */ CAM_RETRY_SELTO,
-	    /* sense_flags */ ((ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ?
-	     SF_RETRY_UA : SF_NO_RECOVERY) | SF_NO_PRINT,
+	cam_periph_runccb(ccb, (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ? 
+	    passerror : NULL, /* cam_flags */ CAM_RETRY_SELTO,
+	    /* sense_flags */ SF_RETRY_UA | SF_NO_PRINT,
 	    softc->device_stats);
 
-	if (need_unmap != 0)
-		cam_periph_unmapmem(ccb, &mapinfo);
+	cam_periph_unmapmem(ccb, &mapinfo);
 
 	ccb->ccb_h.cbfcnp = NULL;
 	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;

Modified: trunk/sys/cam/scsi/scsi_pass.h
===================================================================
--- trunk/sys/cam/scsi/scsi_pass.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_pass.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 1997, 1999 Kenneth D. Merry.
  * All rights reserved.
@@ -22,7 +23,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_pass.h 292348 2015-12-16 19:01:14Z ken $
  */
 
 #ifndef _SCSI_PASS_H
@@ -39,4 +40,12 @@
 #define CAMIOCOMMAND	_IOWR(CAM_VERSION, 2, union ccb)
 #define CAMGETPASSTHRU	_IOWR(CAM_VERSION, 3, union ccb)
 
+/*
+ * These two ioctls take a union ccb *, but that is not explicitly declared
+ * to avoid having the ioctl handling code malloc and free their own copy
+ * of the CCB or the CCB pointer.
+ */
+#define CAMIOQUEUE	_IO(CAM_VERSION, 4)
+#define CAMIOGET	_IO(CAM_VERSION, 5)
+
 #endif

Modified: trunk/sys/cam/scsi/scsi_pt.c
===================================================================
--- trunk/sys/cam/scsi/scsi_pt.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_pt.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Implementation of SCSI Processor Target Peripheral driver for CAM.
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_pt.c 294978 2016-01-28 09:25:15Z kib $");
 
 #include <sys/param.h>
 #include <sys/queue.h>
@@ -66,7 +67,6 @@
 
 typedef enum {
 	PT_CCB_BUFFER_IO	= 0x01,
-	PT_CCB_WAITING		= 0x02,
 	PT_CCB_RETRY_UA		= 0x04,
 	PT_CCB_BUFFER_IO_UA	= PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA
 } pt_ccb_state;
@@ -174,9 +174,6 @@
 	struct	pt_softc *softc;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);	
-
 	softc = (struct pt_softc *)periph->softc;
 
 	cam_periph_lock(periph);
@@ -253,6 +250,8 @@
 	struct pt_softc *softc;
 	struct ccb_getdev *cgd;
 	struct ccb_pathinq cpi;
+	struct make_dev_args args;
+	int error;
 
 	cgd = (struct ccb_getdev *)arg;
 	if (cgd == NULL) {
@@ -283,6 +282,21 @@
 	xpt_action((union ccb *)&cpi);
 
 	cam_periph_unlock(periph);
+
+	make_dev_args_init(&args);
+	args.mda_devsw = &pt_cdevsw;
+	args.mda_unit = periph->unit_number;
+	args.mda_uid = UID_ROOT;
+	args.mda_gid = GID_OPERATOR;
+	args.mda_mode = 0600;
+	args.mda_si_drv1 = periph;
+	error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name,
+	    periph->unit_number);
+	if (error != 0) {
+		cam_periph_lock(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
+
 	softc->device_stats = devstat_new_entry("pt",
 			  periph->unit_number, 0,
 			  DEVSTAT_NO_BLOCKSIZE,
@@ -290,11 +304,7 @@
 			  XPORT_DEVSTAT_TYPE(cpi.transport),
 			  DEVSTAT_PRIORITY_OTHER);
 
-	softc->dev = make_dev(&pt_cdevsw, periph->unit_number, UID_ROOT,
-			      GID_OPERATOR, 0600, "%s%d", periph->periph_name,
-			      periph->unit_number);
 	cam_periph_lock(periph);
-	softc->dev->si_drv1 = periph;
 
 	/*
 	 * Add async callbacks for bus reset and
@@ -333,8 +343,6 @@
 	 *     with XPT_ABORT_CCB.
 	 */
 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
-
-	xpt_print(periph->path, "lost device\n");
 }
 
 static void
@@ -344,7 +352,6 @@
 
 	softc = (struct pt_softc *)periph->softc;
 
-	xpt_print(periph->path, "removing device entry\n");
 	devstat_remove_entry(softc->device_stats);
 	cam_periph_unlock(periph);
 	destroy_dev(softc->dev);
@@ -370,7 +377,8 @@
 
 		if (cgd->protocol != PROTO_SCSI)
 			break;
-
+		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
+			break;
 		if (SID_TYPE(&cgd->inq_data) != T_PROCESSOR)
 			break;
 
@@ -381,7 +389,7 @@
 		 */
 		status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor,
 					  ptstart, "pt", CAM_PERIPH_BIO,
-					  cgd->ccb_h.path, ptasync,
+					  path, ptasync,
 					  AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP
@@ -426,15 +434,7 @@
 	 * See if there is a buf with work for us to do..
 	 */
 	bp = bioq_first(&softc->bio_queue);
-	if (periph->immediate_priority <= periph->pinfo.priority) {
-		CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
-				("queuing for immediate ccb\n"));
-		start_ccb->ccb_h.ccb_state = PT_CCB_WAITING;
-		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-				  periph_links.sle);
-		periph->immediate_priority = CAM_PRIORITY_NONE;
-		wakeup(&periph->ccb_list);
-	} else if (bp == NULL) {
+	if (bp == NULL) {
 		xpt_release_ccb(start_ccb);
 	} else {
 		bioq_remove(&softc->bio_queue, bp);
@@ -455,7 +455,7 @@
 		start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO_UA;
 
 		/*
-		 * Block out any asyncronous callbacks
+		 * Block out any asynchronous callbacks
 		 * while we touch the pending ccb list.
 		 */
 		LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h,
@@ -549,7 +549,7 @@
 		}
 
 		/*
-		 * Block out any asyncronous callbacks
+		 * Block out any asynchronous callbacks
 		 * while we touch the pending ccb list.
 		 */
 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
@@ -557,10 +557,6 @@
 		biofinish(bp, softc->device_stats, 0);
 		break;
 	}
-	case PT_CCB_WAITING:
-		/* Caller will release the CCB */
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
 	}
 	xpt_release_ccb(done_ccb);
 }
@@ -586,9 +582,6 @@
 	int error = 0;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return(ENXIO);
-
 	softc = (struct pt_softc *)periph->softc;
 
 	cam_periph_lock(periph);

Modified: trunk/sys/cam/scsi/scsi_pt.h
===================================================================
--- trunk/sys/cam/scsi/scsi_pt.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_pt.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Structure and function declarations for Processor type devices.
  *
@@ -25,7 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_pt.h 139743 2005-01-05 22:34:37Z imp $
  */
 
 #ifndef	_SCSI_SCSI_PT_H

Modified: trunk/sys/cam/scsi/scsi_sa.c
===================================================================
--- trunk/sys/cam/scsi/scsi_sa.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_sa.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,7 +1,9 @@
+/* $MidnightBSD$ */
 /*-
  * Implementation of SCSI Sequential Access Peripheral driver for CAM.
  *
  * Copyright (c) 1999, 2000 Matthew Jacob
+ * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_sa.c 332933 2018-04-24 13:52:39Z ken $");
 
 #include <sys/param.h>
 #include <sys/queue.h>
@@ -43,6 +45,9 @@
 #include <sys/mtio.h>
 #ifdef _KERNEL
 #include <sys/conf.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
 #endif
 #include <sys/fcntl.h>
 #include <sys/devicestat.h>
@@ -67,7 +72,7 @@
 #include <opt_sa.h>
 
 #ifndef SA_IO_TIMEOUT
-#define SA_IO_TIMEOUT		4
+#define SA_IO_TIMEOUT		32
 #endif
 #ifndef SA_SPACE_TIMEOUT
 #define SA_SPACE_TIMEOUT	1 * 60
@@ -78,6 +83,9 @@
 #ifndef SA_ERASE_TIMEOUT
 #define SA_ERASE_TIMEOUT	4 * 60
 #endif
+#ifndef SA_REP_DENSITY_TIMEOUT
+#define SA_REP_DENSITY_TIMEOUT	90
+#endif
 
 #define	SCSIOP_TIMEOUT		(60 * 1000)	/* not an option */
 
@@ -85,6 +93,7 @@
 #define	REWIND_TIMEOUT		(SA_REWIND_TIMEOUT * 60 * 1000)
 #define	ERASE_TIMEOUT		(SA_ERASE_TIMEOUT * 60 * 1000)
 #define	SPACE_TIMEOUT		(SA_SPACE_TIMEOUT * 60 * 1000)
+#define	REP_DENSITY_TIMEOUT	(SA_REP_DENSITY_TIMEOUT * 60 * 1000)
 
 /*
  * Additional options that can be set for config: SA_1FM_AT_EOT
@@ -111,19 +120,10 @@
 #define ccb_pflags	ppriv_field0
 #define ccb_bp	 	ppriv_ptr1
 
-#define	SA_CCB_BUFFER_IO	0x0
-#define	SA_CCB_WAITING		0x1
-#define	SA_CCB_TYPEMASK		0x1
-#define	SA_POSITION_UPDATED	0x2
+/* bits in ccb_pflags */
+#define	SA_POSITION_UPDATED	0x1
 
-#define	Set_CCB_Type(x, type)				\
-	x->ccb_h.ccb_pflags &= ~SA_CCB_TYPEMASK;	\
-	x->ccb_h.ccb_pflags |= type
 
-#define	CCB_Type(x)	(x->ccb_h.ccb_pflags & SA_CCB_TYPEMASK)
-
-
-
 typedef enum {
 	SA_FLAG_OPEN		= 0x0001,
 	SA_FLAG_FIXED		= 0x0002,
@@ -140,7 +140,12 @@
 	SA_FLAG_COMP_ENABLED	= 0x0400,
 	SA_FLAG_COMP_SUPP	= 0x0800,
 	SA_FLAG_COMP_UNSUPP	= 0x1000,
-	SA_FLAG_TAPE_FROZEN	= 0x2000
+	SA_FLAG_TAPE_FROZEN	= 0x2000,
+	SA_FLAG_PROTECT_SUPP	= 0x4000,
+
+	SA_FLAG_COMPRESSION	= (SA_FLAG_COMP_SUPP|SA_FLAG_COMP_ENABLED|
+				   SA_FLAG_COMP_UNSUPP),
+	SA_FLAG_SCTX_INIT	= 0x8000
 } sa_flags;
 
 typedef enum {
@@ -150,27 +155,30 @@
 } sa_mode;
 
 typedef enum {
-	SA_PARAM_NONE		= 0x00,
-	SA_PARAM_BLOCKSIZE	= 0x01,
-	SA_PARAM_DENSITY	= 0x02,
-	SA_PARAM_COMPRESSION	= 0x04,
-	SA_PARAM_BUFF_MODE	= 0x08,
-	SA_PARAM_NUMBLOCKS	= 0x10,
-	SA_PARAM_WP		= 0x20,
-	SA_PARAM_SPEED		= 0x40,
-	SA_PARAM_ALL		= 0x7f
+	SA_PARAM_NONE		= 0x000,
+	SA_PARAM_BLOCKSIZE	= 0x001,
+	SA_PARAM_DENSITY	= 0x002,
+	SA_PARAM_COMPRESSION	= 0x004,
+	SA_PARAM_BUFF_MODE	= 0x008,
+	SA_PARAM_NUMBLOCKS	= 0x010,
+	SA_PARAM_WP		= 0x020,
+	SA_PARAM_SPEED		= 0x040,
+	SA_PARAM_DENSITY_EXT	= 0x080,
+	SA_PARAM_LBP		= 0x100,
+	SA_PARAM_ALL		= 0x1ff
 } sa_params;
 
 typedef enum {
-	SA_QUIRK_NONE		= 0x00,
-	SA_QUIRK_NOCOMP		= 0x01,	/* Can't deal with compression at all */
-	SA_QUIRK_FIXED		= 0x02,	/* Force fixed mode */
-	SA_QUIRK_VARIABLE	= 0x04,	/* Force variable mode */
-	SA_QUIRK_2FM		= 0x08,	/* Needs Two File Marks at EOD */
-	SA_QUIRK_1FM		= 0x10,	/* No more than 1 File Mark at EOD */
-	SA_QUIRK_NODREAD	= 0x20,	/* Don't try and dummy read density */
-	SA_QUIRK_NO_MODESEL	= 0x40,	/* Don't do mode select at all */
-	SA_QUIRK_NO_CPAGE	= 0x80	/* Don't use DEVICE COMPRESSION page */
+	SA_QUIRK_NONE		= 0x000,
+	SA_QUIRK_NOCOMP		= 0x001, /* Can't deal with compression at all*/
+	SA_QUIRK_FIXED		= 0x002, /* Force fixed mode */
+	SA_QUIRK_VARIABLE	= 0x004, /* Force variable mode */
+	SA_QUIRK_2FM		= 0x008, /* Needs Two File Marks at EOD */
+	SA_QUIRK_1FM		= 0x010, /* No more than 1 File Mark at EOD */
+	SA_QUIRK_NODREAD	= 0x020, /* Don't try and dummy read density */
+	SA_QUIRK_NO_MODESEL	= 0x040, /* Don't do mode select at all */
+	SA_QUIRK_NO_CPAGE	= 0x080, /* Don't use DEVICE COMPRESSION page */
+	SA_QUIRK_NO_LONG_POS	= 0x100  /* No long position information */
 } sa_quirks;
 
 #define SA_QUIRK_BIT_STRING	\
@@ -182,10 +190,10 @@
 	"\0051FM"		\
 	"\006NODREAD"		\
 	"\007NO_MODESEL"	\
-	"\010NO_CPAGE"
+	"\010NO_CPAGE"		\
+	"\011NO_LONG_POS"
 
 #define	SAMODE(z)	(dev2unit(z) & 0x3)
-#define	SADENSITY(z)	((dev2unit(z) >> 2) & 0x3)
 #define	SA_IS_CTRL(z)	(dev2unit(z) & (1 << 4))
 
 #define SA_NOT_CTLDEV	0
@@ -194,33 +202,144 @@
 #define SA_ATYPE_R	0
 #define SA_ATYPE_NR	1
 #define SA_ATYPE_ER	2
+#define SA_NUM_ATYPES	3
 
-#define	SAMINOR(ctl, mode, access) \
-	((ctl << 4) | (mode << 2) | (access & 0x3))
+#define	SAMINOR(ctl, access) \
+	((ctl << 4) | (access & 0x3))
 
-#define SA_NUM_MODES	4
 struct sa_devs {
 	struct cdev *ctl_dev;
-	struct sa_mode_devs {
-		struct cdev *r_dev;
-		struct cdev *nr_dev;
-		struct cdev *er_dev;
-	} mode_devs[SA_NUM_MODES];
+	struct cdev *r_dev;
+	struct cdev *nr_dev;
+	struct cdev *er_dev;
 };
 
+#define	SASBADDBASE(sb, indent, data, xfmt, name, type, xsize, desc)	\
+	sbuf_printf(sb, "%*s<%s type=\"%s\" size=\"%zd\" "		\
+	    "fmt=\"%s\" desc=\"%s\">" #xfmt "</%s>\n", indent, "", 	\
+	    #name, #type, xsize, #xfmt, desc ? desc : "", data, #name);
+
+#define	SASBADDINT(sb, indent, data, fmt, name)				\
+	SASBADDBASE(sb, indent, data, fmt, name, int, sizeof(data),	\
+		    NULL)
+
+#define	SASBADDINTDESC(sb, indent, data, fmt, name, desc)		\
+	SASBADDBASE(sb, indent, data, fmt, name, int, sizeof(data),	\
+		    desc)
+
+#define	SASBADDUINT(sb, indent, data, fmt, name)			\
+	SASBADDBASE(sb, indent, data, fmt, name, uint, sizeof(data), 	\
+		    NULL)
+
+#define	SASBADDUINTDESC(sb, indent, data, fmt, name, desc)		\
+	SASBADDBASE(sb, indent, data, fmt, name, uint, sizeof(data), 	\
+		    desc)
+
+#define	SASBADDFIXEDSTR(sb, indent, data, fmt, name)			\
+	SASBADDBASE(sb, indent, data, fmt, name, str, sizeof(data),	\
+		    NULL)
+
+#define	SASBADDFIXEDSTRDESC(sb, indent, data, fmt, name, desc)		\
+	SASBADDBASE(sb, indent, data, fmt, name, str, sizeof(data),	\
+		    desc)
+
+#define	SASBADDVARSTR(sb, indent, data, fmt, name, maxlen)		\
+	SASBADDBASE(sb, indent, data, fmt, name, str, maxlen, NULL)
+
+#define	SASBADDVARSTRDESC(sb, indent, data, fmt, name, maxlen, desc)	\
+	SASBADDBASE(sb, indent, data, fmt, name, str, maxlen, desc)
+
+#define	SASBADDNODE(sb, indent, name) {					\
+	sbuf_printf(sb, "%*s<%s type=\"%s\">\n", indent, "", #name,	\
+	    "node");							\
+	indent += 2;							\
+}
+
+#define	SASBADDNODENUM(sb, indent, name, num) {				\
+	sbuf_printf(sb, "%*s<%s type=\"%s\" num=\"%d\">\n", indent, "",	\
+	    #name, "node", num);					\
+	indent += 2;							\
+}
+
+#define	SASBENDNODE(sb, indent, name) {					\
+	indent -= 2;							\
+	sbuf_printf(sb, "%*s</%s>\n", indent, "", #name);		\
+}
+
+#define	SA_DENSITY_TYPES	4
+
+struct sa_prot_state {
+	int initialized;
+	uint32_t prot_method;
+	uint32_t pi_length;
+	uint32_t lbp_w;
+	uint32_t lbp_r;
+	uint32_t rbdp;
+};
+
+struct sa_prot_info {
+	struct sa_prot_state cur_prot_state;
+	struct sa_prot_state pending_prot_state;
+};
+
+/*
+ * A table mapping protection parameters to their types and values.
+ */
+struct sa_prot_map {
+	char *name;
+	mt_param_set_type param_type;
+	off_t offset;
+	uint32_t min_val;
+	uint32_t max_val;
+	uint32_t *value;
+} sa_prot_table[] = {
+	{ "prot_method", MT_PARAM_SET_UNSIGNED,
+	  __offsetof(struct sa_prot_state, prot_method), 
+	  /*min_val*/ 0, /*max_val*/ 255, NULL },
+	{ "pi_length", MT_PARAM_SET_UNSIGNED, 
+	  __offsetof(struct sa_prot_state, pi_length),
+	  /*min_val*/ 0, /*max_val*/ SA_CTRL_DP_PI_LENGTH_MASK, NULL },
+	{ "lbp_w", MT_PARAM_SET_UNSIGNED,
+	  __offsetof(struct sa_prot_state, lbp_w),
+	  /*min_val*/ 0, /*max_val*/ 1, NULL },
+	{ "lbp_r", MT_PARAM_SET_UNSIGNED,
+	  __offsetof(struct sa_prot_state, lbp_r),
+	  /*min_val*/ 0, /*max_val*/ 1, NULL },
+	{ "rbdp", MT_PARAM_SET_UNSIGNED,
+	  __offsetof(struct sa_prot_state, rbdp),
+	  /*min_val*/ 0, /*max_val*/ 1, NULL }
+};
+
+#define	SA_NUM_PROT_ENTS sizeof(sa_prot_table)/sizeof(sa_prot_table[0])
+
+#define	SA_PROT_ENABLED(softc) ((softc->flags & SA_FLAG_PROTECT_SUPP)	\
+	&& (softc->prot_info.cur_prot_state.initialized != 0)		\
+	&& (softc->prot_info.cur_prot_state.prot_method != 0))
+
+#define	SA_PROT_LEN(softc)	softc->prot_info.cur_prot_state.pi_length
+
 struct sa_softc {
 	sa_state	state;
 	sa_flags	flags;
 	sa_quirks	quirks;
+	u_int		si_flags;
+	struct cam_periph *periph;
 	struct		bio_queue_head bio_queue;
 	int		queue_count;
 	struct		devstat *device_stats;
 	struct sa_devs	devs;
+	int		open_count;
+	int		num_devs_to_destroy;
 	int		blk_gran;
 	int		blk_mask;
 	int		blk_shift;
 	u_int32_t	max_blk;
 	u_int32_t	min_blk;
+	u_int32_t	maxio;
+	u_int32_t	cpi_maxio;
+	int		allow_io_split;
+	int		inject_eom;
+	int		set_pews_status;
 	u_int32_t	comp_algorithm;
 	u_int32_t	saved_comp_algorithm;
 	u_int32_t	media_blksize;
@@ -234,12 +353,37 @@
 	int		filemarks;
 	union		ccb saved_ccb;
 	int		last_resid_was_io;
+	uint8_t		density_type_bits[SA_DENSITY_TYPES];
+	int		density_info_valid[SA_DENSITY_TYPES];
+	uint8_t		density_info[SA_DENSITY_TYPES][SRDS_MAX_LENGTH];
 
+	struct sa_prot_info	prot_info;
+
+	int		sili;
+	int		eot_warn;
+
 	/*
-	 * Relative to BOT Location.
+	 * Current position information.  -1 means that the given value is
+	 * unknown.  fileno and blkno are always calculated.  blkno is
+	 * relative to the previous file mark.  rep_fileno and rep_blkno
+	 * are as reported by the drive, if it supports the long form
+	 * report for the READ POSITION command.  rep_blkno is relative to
+	 * the beginning of the partition.
+	 *
+	 * bop means that the drive is at the beginning of the partition.
+	 * eop means that the drive is between early warning and end of
+	 * partition, inside the current partition.
+	 * bpew means that the position is in a PEWZ (Programmable Early
+	 * Warning Zone)
 	 */
-	daddr_t		fileno;
-	daddr_t		blkno;
+	daddr_t		partition;	/* Absolute from BOT */
+	daddr_t		fileno;		/* Relative to beginning of partition */
+	daddr_t		blkno;		/* Relative to last file mark */
+	daddr_t		rep_blkno;	/* Relative to beginning of partition */
+	daddr_t		rep_fileno;	/* Relative to beginning of partition */
+	int		bop;		/* Beginning of Partition */
+	int		eop;		/* End of Partition */
+	int		bpew;		/* Beyond Programmable Early Warning */
 
 	/*
 	 * Latched Error Info
@@ -266,6 +410,10 @@
 		open_rdonly		: 1,	/* open read-only */
 		open_pending_mount	: 1,	/* open pending mount */
 		ctrl_mode		: 1;	/* control device open */
+
+	struct task		sysctl_task;
+	struct sysctl_ctx_list	sysctl_ctx;
+	struct sysctl_oid	*sysctl_tree;
 };
 
 struct sa_quirk_entry {
@@ -402,16 +550,42 @@
 				    u_int8_t *write_protect, u_int8_t *speed,
 				    int *comp_supported, int *comp_enabled,
 				    u_int32_t *comp_algorithm,
-				    sa_comp_t *comp_page);
+				    sa_comp_t *comp_page,
+				    struct scsi_control_data_prot_subpage
+				    *prot_page, int dp_size,
+				    int prot_changeable);
+static int		sasetprot(struct cam_periph *periph,
+				  struct sa_prot_state *new_prot);
 static int		sasetparams(struct cam_periph *periph,
 				    sa_params params_to_set,
 				    u_int32_t blocksize, u_int8_t density,
 				    u_int32_t comp_algorithm,
 				    u_int32_t sense_flags);
+static int		sasetsili(struct cam_periph *periph,
+				  struct mtparamset *ps, int num_params);
+static int		saseteotwarn(struct cam_periph *periph,
+				     struct mtparamset *ps, int num_params);
+static void		safillprot(struct sa_softc *softc, int *indent,
+				   struct sbuf *sb);
+static void		sapopulateprots(struct sa_prot_state *cur_state,
+					struct sa_prot_map *new_table,
+					int table_ents);
+static struct sa_prot_map *safindprotent(char *name, struct sa_prot_map *table,
+					 int table_ents);
+static int		sasetprotents(struct cam_periph *periph,
+				      struct mtparamset *ps, int num_params);
+static struct sa_param_ent *safindparament(struct mtparamset *ps);
+static int		saparamsetlist(struct cam_periph *periph,
+				       struct mtsetlist *list, int need_copy);
+static	int		saextget(struct cdev *dev, struct cam_periph *periph,
+				 struct sbuf *sb, struct mtextget *g);
+static	int		saparamget(struct sa_softc *softc, struct sbuf *sb);
 static void		saprevent(struct cam_periph *periph, int action);
 static int		sarewind(struct cam_periph *periph);
 static int		saspace(struct cam_periph *periph, int count,
 				scsi_space_code code);
+static void		sadevgonecb(void *arg);
+static void		sasetupdev(struct sa_softc *softc, struct cdev *dev);
 static int		samount(struct cam_periph *, int, struct cdev *);
 static int		saretension(struct cam_periph *periph);
 static int		sareservereleaseunit(struct cam_periph *periph,
@@ -419,11 +593,34 @@
 static int		saloadunload(struct cam_periph *periph, int load);
 static int		saerase(struct cam_periph *periph, int longerase);
 static int		sawritefilemarks(struct cam_periph *periph,
-					 int nmarks, int setmarks);
+					 int nmarks, int setmarks, int immed);
+static int		sagetpos(struct cam_periph *periph);
 static int		sardpos(struct cam_periph *periph, int, u_int32_t *);
-static int		sasetpos(struct cam_periph *periph, int, u_int32_t *);
+static int		sasetpos(struct cam_periph *periph, int, 
+				 struct mtlocate *);
+static void		safilldenstypesb(struct sbuf *sb, int *indent,
+					 uint8_t *buf, int buf_len,
+					 int is_density);
+static void		safilldensitysb(struct sa_softc *softc, int *indent,
+					struct sbuf *sb);
 
 
+#ifndef	SA_DEFAULT_IO_SPLIT
+#define	SA_DEFAULT_IO_SPLIT	0
+#endif
+
+static int sa_allow_io_split = SA_DEFAULT_IO_SPLIT;
+
+/*
+ * Tunable to allow the user to set a global allow_io_split value.  Note
+ * that this WILL GO AWAY in FreeBSD 11.0.  Silently splitting the I/O up
+ * is bad behavior, because it hides the true tape block size from the
+ * application.
+ */
+TUNABLE_INT("kern.cam.sa.allow_io_split", &sa_allow_io_split);
+static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0,
+		  "CAM Sequential Access Tape Driver");
+
 static struct periph_driver sadriver =
 {
 	sainit, "sa",
@@ -447,7 +644,7 @@
 	.d_ioctl =	saioctl,
 	.d_strategy =	sastrategy,
 	.d_name =	"sa",
-	.d_flags =	D_TAPE | D_NEEDGIANT,
+	.d_flags =	D_TAPE | D_TRACKCLOSE,
 };
 
 static int
@@ -471,6 +668,7 @@
 
 	if (SA_IS_CTRL(dev)) {
 		softc->ctrl_mode = 1;
+		softc->open_count++;
 		cam_periph_unlock(periph);
 		return (0);
 	}
@@ -502,6 +700,7 @@
 		if (error && (flags & O_NONBLOCK)) {
 			softc->flags |= SA_FLAG_OPEN;
 			softc->open_pending_mount = 1;
+			softc->open_count++;
 			cam_periph_unhold(periph);
 			cam_periph_unlock(periph);
 			return (0);
@@ -517,6 +716,7 @@
 
 	saprevent(periph, PR_PREVENT);
 	softc->flags |= SA_FLAG_OPEN;
+	softc->open_count++;
 
 	cam_periph_unhold(periph);
 	cam_periph_unlock(periph);
@@ -528,14 +728,11 @@
 {
 	struct	cam_periph *periph;
 	struct	sa_softc *softc;
-	int	mode, error, writing, tmp;
+	int	mode, error, writing, tmp, i;
 	int	closedbits = SA_FLAG_OPEN;
 
 	mode = SAMODE(dev);
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);	
-
 	cam_periph_lock(periph);
 
 	softc = (struct sa_softc *)periph->softc;
@@ -547,6 +744,7 @@
 	softc->open_rdonly = 0; 
 	if (SA_IS_CTRL(dev)) {
 		softc->ctrl_mode = 0;
+		softc->open_count--;
 		cam_periph_unlock(periph);
 		cam_periph_release(periph);
 		return (0);
@@ -555,6 +753,7 @@
 	if (softc->open_pending_mount) {
 		softc->flags &= ~SA_FLAG_OPEN;
 		softc->open_pending_mount = 0; 
+		softc->open_count--;
 		cam_periph_unlock(periph);
 		cam_periph_release(periph);
 		return (0);
@@ -659,8 +858,18 @@
 	 * And we are no longer open for business.
 	 */
 	softc->flags &= ~closedbits;
+	softc->open_count--;
 
 	/*
+	 * Invalidate any density information that depends on having tape
+	 * media in the drive.
+	 */
+	for (i = 0; i < SA_DENSITY_TYPES; i++) {
+		if (softc->density_type_bits[i] & SRDS_MEDIA)
+			softc->density_info_valid[i] = 0;
+	}
+
+	/*
 	 * Inform users if tape state if frozen....
 	 */
 	if (softc->flags & SA_FLAG_TAPE_FROZEN) {
@@ -696,10 +905,6 @@
 		return;
 	}
 	periph = (struct cam_periph *)bp->bio_dev->si_drv1;
-	if (periph == NULL) {
-		biofinish(bp, NULL, ENXIO);
-		return;
-	}
 	cam_periph_lock(periph);
 
 	softc = (struct sa_softc *)periph->softc;
@@ -807,7 +1012,481 @@
 	return;
 }
 
+static int
+sasetsili(struct cam_periph *periph, struct mtparamset *ps, int num_params)
+{
+	uint32_t sili_blocksize;
+	struct sa_softc *softc;
+	int error;
 
+	error = 0;
+	softc = (struct sa_softc *)periph->softc;
+
+	if (ps->value_type != MT_PARAM_SET_SIGNED) {
+		snprintf(ps->error_str, sizeof(ps->error_str),
+		    "sili is a signed parameter");
+		goto bailout;
+	}
+	if ((ps->value.value_signed < 0)
+	 || (ps->value.value_signed > 1)) {
+		snprintf(ps->error_str, sizeof(ps->error_str),
+		    "invalid sili value %jd", (intmax_t)ps->value.value_signed);
+		goto bailout_error;
+	}
+	/*
+	 * We only set the SILI flag in variable block
+	 * mode.  You'll get a check condition in fixed
+	 * block mode if things don't line up in any case.
+	 */
+	if (softc->flags & SA_FLAG_FIXED) {
+		snprintf(ps->error_str, sizeof(ps->error_str),
+		    "can't set sili bit in fixed block mode");
+		goto bailout_error;
+	}
+	if (softc->sili == ps->value.value_signed)
+		goto bailout;
+
+	if (ps->value.value_signed == 1)
+		sili_blocksize = 4;
+	else
+		sili_blocksize = 0;
+
+	error = sasetparams(periph, SA_PARAM_BLOCKSIZE,
+			    sili_blocksize, 0, 0, SF_QUIET_IR);
+	if (error != 0) {
+		snprintf(ps->error_str, sizeof(ps->error_str),
+		    "sasetparams() returned error %d", error);
+		goto bailout_error;
+	}
+
+	softc->sili = ps->value.value_signed;
+
+bailout:
+	ps->status = MT_PARAM_STATUS_OK;
+	return (error);
+
+bailout_error:
+	ps->status = MT_PARAM_STATUS_ERROR;
+	if (error == 0)
+		error = EINVAL;
+
+	return (error);
+}
+
+static int
+saseteotwarn(struct cam_periph *periph, struct mtparamset *ps, int num_params)
+{
+	struct sa_softc *softc;
+	int error;
+
+	error = 0;
+	softc = (struct sa_softc *)periph->softc;
+
+	if (ps->value_type != MT_PARAM_SET_SIGNED) {
+		snprintf(ps->error_str, sizeof(ps->error_str),
+		    "eot_warn is a signed parameter");
+		ps->status = MT_PARAM_STATUS_ERROR;
+		goto bailout;
+	}
+	if ((ps->value.value_signed < 0)
+	 || (ps->value.value_signed > 1)) {
+		snprintf(ps->error_str, sizeof(ps->error_str),
+		    "invalid eot_warn value %jd\n",
+		    (intmax_t)ps->value.value_signed);
+		ps->status = MT_PARAM_STATUS_ERROR;
+		goto bailout;
+	}
+	softc->eot_warn = ps->value.value_signed;
+	ps->status = MT_PARAM_STATUS_OK;
+bailout:
+	if (ps->status != MT_PARAM_STATUS_OK)
+		error = EINVAL;
+
+	return (error);
+}
+
+
+static void
+safillprot(struct sa_softc *softc, int *indent, struct sbuf *sb)
+{
+	int tmpint;
+
+	SASBADDNODE(sb, *indent, protection);
+	if (softc->flags & SA_FLAG_PROTECT_SUPP)
+		tmpint = 1;
+	else
+		tmpint = 0;
+	SASBADDINTDESC(sb, *indent, tmpint, %d, protection_supported,
+	    "Set to 1 if protection information is supported");
+
+	if ((tmpint != 0)
+	 && (softc->prot_info.cur_prot_state.initialized != 0)) {
+		struct sa_prot_state *prot;
+
+		prot = &softc->prot_info.cur_prot_state;
+
+		SASBADDUINTDESC(sb, *indent, prot->prot_method, %u,
+		    prot_method, "Current Protection Method");
+		SASBADDUINTDESC(sb, *indent, prot->pi_length, %u,
+		    pi_length, "Length of Protection Information");
+		SASBADDUINTDESC(sb, *indent, prot->lbp_w, %u,
+		    lbp_w, "Check Protection on Writes");
+		SASBADDUINTDESC(sb, *indent, prot->lbp_r, %u,
+		    lbp_r, "Check and Include Protection on Reads");
+		SASBADDUINTDESC(sb, *indent, prot->rbdp, %u,
+		    rbdp, "Transfer Protection Information for RECOVER "
+		    "BUFFERED DATA command");
+	}
+	SASBENDNODE(sb, *indent, protection);
+}
+
+static void
+sapopulateprots(struct sa_prot_state *cur_state, struct sa_prot_map *new_table,
+    int table_ents)
+{
+	int i;
+
+	bcopy(sa_prot_table, new_table, min(table_ents * sizeof(*new_table),
+	    sizeof(sa_prot_table)));
+
+	table_ents = min(table_ents, SA_NUM_PROT_ENTS);
+
+	for (i = 0; i < table_ents; i++)
+		new_table[i].value = (uint32_t *)((uint8_t *)cur_state +
+		    new_table[i].offset);
+
+	return;
+}
+
+static struct sa_prot_map *
+safindprotent(char *name, struct sa_prot_map *table, int table_ents)
+{
+	char *prot_name = "protection.";
+	int i, prot_len;
+
+	prot_len = strlen(prot_name);
+
+	/*
+	 * This shouldn't happen, but we check just in case.
+	 */
+	if (strncmp(name, prot_name, prot_len) != 0)
+		goto bailout;
+
+	for (i = 0; i < table_ents; i++) {
+		if (strcmp(&name[prot_len], table[i].name) != 0)
+			continue;
+		return (&table[i]);
+	}
+bailout:
+	return (NULL);
+}
+
+static int
+sasetprotents(struct cam_periph *periph, struct mtparamset *ps, int num_params)
+{
+	struct sa_softc *softc;
+	struct sa_prot_map prot_ents[SA_NUM_PROT_ENTS];
+	struct sa_prot_state new_state;
+	int error;
+	int i;
+
+	softc = (struct sa_softc *)periph->softc;
+	error = 0;
+
+	/*
+	 * Make sure that this tape drive supports protection information.
+	 * Otherwise we can't set anything.
+	 */
+	if ((softc->flags & SA_FLAG_PROTECT_SUPP) == 0) {
+		snprintf(ps[0].error_str, sizeof(ps[0].error_str),
+		    "Protection information is not supported for this device");
+		ps[0].status = MT_PARAM_STATUS_ERROR;
+		goto bailout;
+	}
+
+	/*
+	 * We can't operate with physio(9) splitting enabled, because there
+	 * is no way to insure (especially in variable block mode) that
+	 * what the user writes (with a checksum block at the end) will 
+	 * make it into the sa(4) driver intact.
+	 */
+	if ((softc->si_flags & SI_NOSPLIT) == 0) {
+		snprintf(ps[0].error_str, sizeof(ps[0].error_str),
+		    "Protection information cannot be enabled with I/O "
+		    "splitting");
+		ps[0].status = MT_PARAM_STATUS_ERROR;
+		goto bailout;
+	}
+
+	/*
+	 * Take the current cached protection state and use that as the
+	 * basis for our new entries.
+	 */
+	bcopy(&softc->prot_info.cur_prot_state, &new_state, sizeof(new_state));
+
+	/*
+	 * Populate the table mapping property names to pointers into the
+	 * state structure.
+	 */
+	sapopulateprots(&new_state, prot_ents, SA_NUM_PROT_ENTS);
+
+	/*
+	 * For each parameter the user passed in, make sure the name, type
+	 * and value are valid.
+	 */
+	for (i = 0; i < num_params; i++) {
+		struct sa_prot_map *ent;
+
+		ent = safindprotent(ps[i].value_name, prot_ents,
+		    SA_NUM_PROT_ENTS);
+		if (ent == NULL) {
+			ps[i].status = MT_PARAM_STATUS_ERROR;
+			snprintf(ps[i].error_str, sizeof(ps[i].error_str),
+			    "Invalid protection entry name %s",
+			    ps[i].value_name);
+			error = EINVAL;
+			goto bailout;
+		}
+		if (ent->param_type != ps[i].value_type) {
+			ps[i].status = MT_PARAM_STATUS_ERROR;
+			snprintf(ps[i].error_str, sizeof(ps[i].error_str),
+			    "Supplied type %d does not match actual type %d",
+			    ps[i].value_type, ent->param_type);
+			error = EINVAL;
+			goto bailout;
+		}
+		if ((ps[i].value.value_unsigned < ent->min_val)
+		 || (ps[i].value.value_unsigned > ent->max_val)) {
+			ps[i].status = MT_PARAM_STATUS_ERROR;
+			snprintf(ps[i].error_str, sizeof(ps[i].error_str),
+			    "Value %ju is outside valid range %u - %u",
+			    (uintmax_t)ps[i].value.value_unsigned, ent->min_val,
+			    ent->max_val);
+			error = EINVAL;
+			goto bailout;
+		}
+		*(ent->value) = ps[i].value.value_unsigned;
+	}
+
+	/*
+	 * Actually send the protection settings to the drive.
+	 */
+	error = sasetprot(periph, &new_state);
+	if (error != 0) {
+		for (i = 0; i < num_params; i++) {
+			ps[i].status = MT_PARAM_STATUS_ERROR;
+			snprintf(ps[i].error_str, sizeof(ps[i].error_str),
+			    "Unable to set parameter, see dmesg(8)");
+		}
+		goto bailout;
+	}
+
+	/*
+	 * Let the user know that his settings were stored successfully.
+	 */
+	for (i = 0; i < num_params; i++)
+		ps[i].status = MT_PARAM_STATUS_OK;
+
+bailout:
+	return (error);
+}
+/*
+ * Entry handlers generally only handle a single entry.  Node handlers will
+ * handle a contiguous range of parameters to set in a single call.
+ */
+typedef enum {
+	SA_PARAM_TYPE_ENTRY,
+	SA_PARAM_TYPE_NODE
+} sa_param_type;
+
+struct sa_param_ent {
+	char *name;
+	sa_param_type param_type;
+	int (*set_func)(struct cam_periph *periph, struct mtparamset *ps,
+			int num_params);
+} sa_param_table[] = {
+	{"sili", SA_PARAM_TYPE_ENTRY, sasetsili },
+	{"eot_warn", SA_PARAM_TYPE_ENTRY, saseteotwarn },
+	{"protection.", SA_PARAM_TYPE_NODE, sasetprotents }
+};
+
+static struct sa_param_ent *
+safindparament(struct mtparamset *ps)
+{
+	unsigned int i;
+
+	for (i = 0; i < sizeof(sa_param_table) /sizeof(sa_param_table[0]); i++){
+		/*
+		 * For entries, we compare all of the characters.  For
+		 * nodes, we only compare the first N characters.  The node
+		 * handler will decode the rest.
+		 */
+		if (sa_param_table[i].param_type == SA_PARAM_TYPE_ENTRY) {
+			if (strcmp(ps->value_name, sa_param_table[i].name) != 0)
+				continue;
+		} else {
+			if (strncmp(ps->value_name, sa_param_table[i].name,
+			    strlen(sa_param_table[i].name)) != 0)
+				continue;
+		}
+		return (&sa_param_table[i]);
+	}
+
+	return (NULL);
+}
+
+/*
+ * Go through a list of parameters, coalescing contiguous parameters with
+ * the same parent node into a single call to a set_func.
+ */
+static int
+saparamsetlist(struct cam_periph *periph, struct mtsetlist *list,
+    int need_copy)
+{
+	int i, contig_ents;
+	int error;
+	struct mtparamset *params, *first;
+	struct sa_param_ent *first_ent;
+
+	error = 0;
+	params = NULL;
+
+	if (list->num_params == 0)
+		/* Nothing to do */
+		goto bailout;
+
+	/*
+	 * Verify that the user has the correct structure size.
+	 */
+	if ((list->num_params * sizeof(struct mtparamset)) !=
+	     list->param_len) {
+		xpt_print(periph->path, "%s: length of params %d != "
+		    "sizeof(struct mtparamset) %zd * num_params %d\n",
+		    __func__, list->param_len, sizeof(struct mtparamset),
+		    list->num_params);
+		error = EINVAL;
+		goto bailout;
+	}
+
+	if (need_copy != 0) {
+		/*
+		 * XXX KDM will dropping the lock cause an issue here?
+		 */
+		cam_periph_unlock(periph);
+		params = malloc(list->param_len, M_SCSISA, M_WAITOK | M_ZERO);
+		error = copyin(list->params, params, list->param_len);
+		cam_periph_lock(periph);
+
+		if (error != 0)
+			goto bailout;
+	} else {
+		params = list->params;
+	}
+
+	contig_ents = 0;
+	first = NULL;
+	first_ent = NULL;
+	for (i = 0; i < list->num_params; i++) {
+		struct sa_param_ent *ent;
+
+		ent = safindparament(&params[i]);
+		if (ent == NULL) {
+			snprintf(params[i].error_str,
+			    sizeof(params[i].error_str),
+			    "%s: cannot find parameter %s", __func__,
+			    params[i].value_name);
+			params[i].status = MT_PARAM_STATUS_ERROR;
+			break;
+		}
+
+		if (first != NULL) {
+			if (first_ent == ent) {
+				/*
+				 * We're still in a contiguous list of
+				 * parameters that can be handled by one
+				 * node handler.
+				 */
+				contig_ents++;
+				continue;
+			} else {
+				error = first_ent->set_func(periph, first,
+				    contig_ents);
+				first = NULL;
+				first_ent = NULL;
+				contig_ents = 0;
+				if (error != 0) {
+					error = 0;
+					break;
+				}
+			}
+		}
+		if (ent->param_type == SA_PARAM_TYPE_NODE) {
+			first = &params[i];
+			first_ent = ent;
+			contig_ents = 1;
+		} else {
+			error = ent->set_func(periph, &params[i], 1);
+			if (error != 0) {
+				error = 0;
+				break;
+			}
+		}
+	}
+	if (first != NULL)
+		first_ent->set_func(periph, first, contig_ents);
+
+bailout:
+	if (need_copy != 0) {
+		if (error != EFAULT) {
+			cam_periph_unlock(periph);
+			copyout(params, list->params, list->param_len);
+			cam_periph_lock(periph);
+		}
+		free(params, M_SCSISA);
+	}
+	return (error);
+}
+
+static int
+sagetparams_common(struct cdev *dev, struct cam_periph *periph)
+{
+	struct sa_softc *softc;
+	u_int8_t write_protect;
+	int comp_enabled, comp_supported, error;
+
+	softc = (struct sa_softc *)periph->softc;
+
+	if (softc->open_pending_mount)
+		return (0);
+
+	/* The control device may issue getparams() if there are no opens. */
+	if (SA_IS_CTRL(dev) && (softc->flags & SA_FLAG_OPEN) != 0)
+		return (0);
+
+	error = sagetparams(periph, SA_PARAM_ALL, &softc->media_blksize,
+	    &softc->media_density, &softc->media_numblks, &softc->buffer_mode,
+	    &write_protect, &softc->speed, &comp_supported, &comp_enabled,
+	    &softc->comp_algorithm, NULL, NULL, 0, 0);
+	if (error)
+		return (error);
+	if (write_protect)
+		softc->flags |= SA_FLAG_TAPE_WP;
+	else
+		softc->flags &= ~SA_FLAG_TAPE_WP;
+	softc->flags &= ~SA_FLAG_COMPRESSION;
+	if (comp_supported) {
+		if (softc->saved_comp_algorithm == 0)
+			softc->saved_comp_algorithm =
+			    softc->comp_algorithm;
+		softc->flags |= SA_FLAG_COMP_SUPP;
+		if (comp_enabled)
+			softc->flags |= SA_FLAG_COMP_ENABLED;
+	} else  
+		softc->flags |= SA_FLAG_COMP_UNSUPP;
+
+	return (0);
+}
+
 #define	PENDING_MOUNT_CHECK(softc, periph, dev)		\
 	if (softc->open_pending_mount) {		\
 		error = samount(periph, 0, dev);	\
@@ -833,9 +1512,6 @@
 	spaceop = 0;		/* shut up gcc */
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);	
-
 	cam_periph_lock(periph);
 	softc = (struct sa_softc *)periph->softc;
 
@@ -851,6 +1527,9 @@
 		switch (cmd) {
 		case MTIOCGETEOTMODEL:
 		case MTIOCGET:
+		case MTIOCEXTGET:
+		case MTIOCPARAMGET:
+		case MTIOCRBLIM:
 			break;
 		case MTIOCERRSTAT:
 			/*
@@ -922,36 +1601,9 @@
 	{
 		struct mtget *g = (struct mtget *)arg;
 
-		/*
-		 * If this isn't the control mode device, actually go out
-		 * and ask the drive again what it's set to.
-		 */
-		if (!SA_IS_CTRL(dev) && !softc->open_pending_mount) {
-			u_int8_t write_protect;
-			int comp_enabled, comp_supported;
-			error = sagetparams(periph, SA_PARAM_ALL,
-			    &softc->media_blksize, &softc->media_density,
-			    &softc->media_numblks, &softc->buffer_mode,
-			    &write_protect, &softc->speed, &comp_supported,
-			    &comp_enabled, &softc->comp_algorithm, NULL);
-			if (error)
-				break;
-			if (write_protect)
-				softc->flags |= SA_FLAG_TAPE_WP;
-			else
-				softc->flags &= ~SA_FLAG_TAPE_WP;
-			softc->flags &= ~(SA_FLAG_COMP_SUPP|
-			    SA_FLAG_COMP_ENABLED|SA_FLAG_COMP_UNSUPP);
-			if (comp_supported) {
-				if (softc->saved_comp_algorithm == 0)
-					softc->saved_comp_algorithm =
-					    softc->comp_algorithm;
-				softc->flags |= SA_FLAG_COMP_SUPP;
-				if (comp_enabled)
-					softc->flags |= SA_FLAG_COMP_ENABLED;
-			} else  
-				softc->flags |= SA_FLAG_COMP_UNSUPP;
-		}
+		error = sagetparams_common(dev, periph);
+		if (error)
+			break;
 		bzero(g, sizeof(struct mtget));
 		g->mt_type = MT_ISAR;
 		if (softc->flags & SA_FLAG_COMP_UNSUPP) {
@@ -1003,6 +1655,84 @@
 		error = 0;
 		break;
 	}
+	case MTIOCEXTGET:
+	case MTIOCPARAMGET:
+	{
+		struct mtextget *g = (struct mtextget *)arg;
+		char *tmpstr2;
+		struct sbuf *sb;
+
+		/*
+		 * Report drive status using an XML format.
+		 */
+
+		/*
+		 * XXX KDM will dropping the lock cause any problems here?
+		 */
+		cam_periph_unlock(periph);
+		sb = sbuf_new(NULL, NULL, g->alloc_len, SBUF_FIXEDLEN);
+		if (sb == NULL) {
+			g->status = MT_EXT_GET_ERROR;
+			snprintf(g->error_str, sizeof(g->error_str),
+				 "Unable to allocate %d bytes for status info",
+				 g->alloc_len);
+			cam_periph_lock(periph);
+			goto extget_bailout;
+		}
+		cam_periph_lock(periph);
+
+		if (cmd == MTIOCEXTGET)
+			error = saextget(dev, periph, sb, g);
+		else
+			error = saparamget(softc, sb);
+
+		if (error != 0)
+			goto extget_bailout;
+
+		error = sbuf_finish(sb);
+		if (error == ENOMEM) {
+			g->status = MT_EXT_GET_NEED_MORE_SPACE;
+			error = 0;
+		} else if (error != 0) {
+			g->status = MT_EXT_GET_ERROR;
+			snprintf(g->error_str, sizeof(g->error_str),
+			    "Error %d returned from sbuf_finish()", error);
+		} else
+			g->status = MT_EXT_GET_OK;
+
+		error = 0;
+		tmpstr2 = sbuf_data(sb);
+		g->fill_len = strlen(tmpstr2) + 1;
+		cam_periph_unlock(periph);
+
+		error = copyout(tmpstr2, g->status_xml, g->fill_len);
+
+		cam_periph_lock(periph);
+
+extget_bailout:
+		sbuf_delete(sb);
+		break;
+	}
+	case MTIOCPARAMSET:
+	{
+		struct mtsetlist list;
+		struct mtparamset *ps = (struct mtparamset *)arg;
+		
+		bzero(&list, sizeof(list));
+		list.num_params = 1;
+		list.param_len = sizeof(*ps);
+		list.params = ps;
+
+		error = saparamsetlist(periph, &list, /*need_copy*/ 0);
+		break;
+	}
+	case MTIOCSETLIST:
+	{
+		struct mtsetlist *list = (struct mtsetlist *)arg;
+
+		error = saparamsetlist(periph, list, /*need_copy*/ 1);
+		break;
+	}
 	case MTIOCERRSTAT:
 	{
 		struct scsi_tape_errors *sep =
@@ -1023,7 +1753,7 @@
 		bcopy((caddr_t) &softc->last_ctl_cdb, sep->ctl_cdb,
 		    sizeof (sep->ctl_cdb));
 
-		if ((SA_IS_CTRL(dev) == 0 && softc->open_pending_mount) ||
+		if ((SA_IS_CTRL(dev) == 0 && !softc->open_pending_mount) ||
 		    didlockperiph)
 			bzero((caddr_t) &softc->errinfo,
 			    sizeof (softc->errinfo));
@@ -1050,13 +1780,17 @@
 			/*
 			 * We don't need to clear the SA_FLAG_TAPE_WRITTEN
 			 * flag because by keeping track of filemarks
-			 * we have last written we know ehether or not
+			 * we have last written we know whether or not
 			 * we need to write more when we close the device.
 			 */
-			error = sawritefilemarks(periph, count, FALSE);
+			error = sawritefilemarks(periph, count, FALSE, FALSE);
 			break;
+		case MTWEOFI:
+			/* write an end-of-file marker without waiting */
+			error = sawritefilemarks(periph, count, FALSE, TRUE);
+			break;
 		case MTWSS:	/* write a setmark */
-			error = sawritefilemarks(periph, count, TRUE);
+			error = sawritefilemarks(periph, count, TRUE, FALSE);
 			break;
 		case MTBSR:	/* backward space record */
 		case MTFSR:	/* forward space record */
@@ -1181,6 +1915,9 @@
 			}
 			break;
 
+		case MTLOAD:
+			error = saloadunload(periph, TRUE);
+			break;
 		case MTNOP:	/* no operation, sets status only */
 		case MTCACHE:	/* enable controller cache */
 		case MTNOCACHE:	/* disable controller cache */
@@ -1191,6 +1928,13 @@
 
 			PENDING_MOUNT_CHECK(softc, periph, dev);
 
+			if ((softc->sili != 0)
+			 && (count != 0)) {
+				xpt_print(periph->path, "Can't enter fixed "
+				    "block mode with SILI enabled\n");
+				error = EINVAL;
+				break;
+			}
 			error = sasetparams(periph, SA_PARAM_BLOCKSIZE, count,
 					    0, 0, 0);
 			if (error == 0) {
@@ -1276,12 +2020,29 @@
 		error = sardpos(periph, 1, (u_int32_t *) arg);
 		break;
 	case MTIOCSLOCATE:
+	case MTIOCHLOCATE: {
+		struct mtlocate locate_info;
+		int hard;
+
+		bzero(&locate_info, sizeof(locate_info));
+		locate_info.logical_id = *((uint32_t *)arg);
+		if (cmd == MTIOCSLOCATE)
+			hard = 0;
+		else
+			hard = 1;
+
 		PENDING_MOUNT_CHECK(softc, periph, dev);
-		error = sasetpos(periph, 0, (u_int32_t *) arg);
+
+		error = sasetpos(periph, hard, &locate_info);
 		break;
-	case MTIOCHLOCATE:
+	}
+	case MTIOCEXTLOCATE:
 		PENDING_MOUNT_CHECK(softc, periph, dev);
-		error = sasetpos(periph, 1, (u_int32_t *) arg);
+		error = sasetpos(periph, /*hard*/ 0, (struct mtlocate *)arg);
+		softc->flags &=
+		    ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN);
+		softc->flags &= ~SA_FLAG_ERR_PENDING;
+		softc->filemarks = 0;
 		break;
 	case MTIOCGETEOTMODEL:
 		error = 0;
@@ -1307,6 +2068,16 @@
 			break;
 		}
 		break;
+	case MTIOCRBLIM: {
+		struct mtrblim *rblim;
+
+		rblim = (struct mtrblim *)arg;
+
+		rblim->granularity = softc->blk_gran;
+		rblim->min_block_length = softc->min_blk;
+		rblim->max_block_length = softc->max_blk;
+		break;
+	}
 	default:
 		error = cam_periph_ioctl(periph, cmd, arg, saerror);
 		break;
@@ -1321,8 +2092,14 @@
 		case MTIOCRDHPOS:
 		case MTIOCSLOCATE:
 		case MTIOCHLOCATE:
+			/*
+			 * XXX KDM look at this.
+			 */
 			softc->fileno = (daddr_t) -1;
 			softc->blkno = (daddr_t) -1;
+			softc->rep_blkno = (daddr_t) -1;
+			softc->rep_fileno = (daddr_t) -1;
+			softc->partition = (daddr_t) -1;
 			softc->flags &= ~SA_FLAG_TAPE_FROZEN;
 			xpt_print(periph->path,
 			    "tape state now unfrozen.\n");
@@ -1355,6 +2132,51 @@
 }
 
 static void
+sadevgonecb(void *arg)
+{
+	struct cam_periph *periph;
+	struct mtx *mtx;
+	struct sa_softc *softc;
+
+	periph = (struct cam_periph *)arg;
+	softc = (struct sa_softc *)periph->softc;
+
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
+
+	softc->num_devs_to_destroy--;
+	if (softc->num_devs_to_destroy == 0) {
+		int i;
+
+		/*
+		 * When we have gotten all of our callbacks, we will get
+		 * no more close calls from devfs.  So if we have any
+		 * dangling opens, we need to release the reference held
+		 * for that particular context.
+		 */
+		for (i = 0; i < softc->open_count; i++)
+			cam_periph_release_locked(periph);
+
+		softc->open_count = 0;
+
+		/*
+		 * Release the reference held for devfs, all of our
+		 * instances are gone now.
+		 */
+		cam_periph_release_locked(periph);
+	}
+
+	/*
+	 * We reference the lock directly here, instead of using
+	 * cam_periph_unlock().  The reason is that the final call to
+	 * cam_periph_release_locked() above could result in the periph
+	 * getting freed.  If that is the case, dereferencing the periph
+	 * with a cam_periph_unlock() call would cause a page fault.
+	 */
+	mtx_unlock(mtx);
+}
+
+static void
 saoninvalidate(struct cam_periph *periph)
 {
 	struct sa_softc *softc;
@@ -1376,8 +2198,14 @@
 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
 	softc->queue_count = 0;
 
-	xpt_print(periph->path, "lost device\n");
-
+	/*
+	 * Tell devfs that all of our devices have gone away, and ask for a
+	 * callback when it has cleaned up its state.
+	 */
+	destroy_dev_sched_cb(softc->devs.ctl_dev, sadevgonecb, periph);
+	destroy_dev_sched_cb(softc->devs.r_dev, sadevgonecb, periph);
+	destroy_dev_sched_cb(softc->devs.nr_dev, sadevgonecb, periph);
+	destroy_dev_sched_cb(softc->devs.er_dev, sadevgonecb, periph);
 }
 
 static void
@@ -1384,20 +2212,19 @@
 sacleanup(struct cam_periph *periph)
 {
 	struct sa_softc *softc;
-	int i;
 
 	softc = (struct sa_softc *)periph->softc;
 
-	xpt_print(periph->path, "removing device entry\n");
-	devstat_remove_entry(softc->device_stats);
 	cam_periph_unlock(periph);
-	destroy_dev(softc->devs.ctl_dev);
-	for (i = 0; i < SA_NUM_MODES; i++) {
-		destroy_dev(softc->devs.mode_devs[i].r_dev);
-		destroy_dev(softc->devs.mode_devs[i].nr_dev);
-		destroy_dev(softc->devs.mode_devs[i].er_dev);
-	}
+
+	if ((softc->flags & SA_FLAG_SCTX_INIT) != 0
+	 && sysctl_ctx_free(&softc->sysctl_ctx) != 0)
+		xpt_print(periph->path, "can't remove sysctl context\n");
+
 	cam_periph_lock(periph);
+
+	devstat_remove_entry(softc->device_stats);
+
 	free(softc, M_SCSISA);
 }
 
@@ -1420,7 +2247,8 @@
 
 		if (cgd->protocol != PROTO_SCSI)
 			break;
-
+		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
+			break;
 		if (SID_TYPE(&cgd->inq_data) != T_SEQUENTIAL)
 			break;
 
@@ -1431,7 +2259,7 @@
 		 */
 		status = cam_periph_alloc(saregister, saoninvalidate,
 					  sacleanup, sastart,
-					  "sa", CAM_PERIPH_BIO, cgd->ccb_h.path,
+					  "sa", CAM_PERIPH_BIO, path,
 					  saasync, AC_FOUND_DEVICE, cgd);
 
 		if (status != CAM_REQ_CMP
@@ -1446,6 +2274,68 @@
 	}
 }
 
+static void
+sasetupdev(struct sa_softc *softc, struct cdev *dev)
+{
+
+	dev->si_iosize_max = softc->maxio;
+	dev->si_flags |= softc->si_flags;
+	/*
+	 * Keep a count of how many non-alias devices we have created,
+	 * so we can make sure we clean them all up on shutdown.  Aliases
+	 * are cleaned up when we destroy the device they're an alias for.
+	 */
+	if ((dev->si_flags & SI_ALIAS) == 0)
+		softc->num_devs_to_destroy++;
+}
+
+static void
+sasysctlinit(void *context, int pending)
+{
+	struct cam_periph *periph;
+	struct sa_softc *softc;
+	char tmpstr[80], tmpstr2[80];
+
+	periph = (struct cam_periph *)context;
+	/*
+	 * If the periph is invalid, no need to setup the sysctls.
+	 */
+	if (periph->flags & CAM_PERIPH_INVALID)
+		goto bailout;
+
+	softc = (struct sa_softc *)periph->softc;
+
+	snprintf(tmpstr, sizeof(tmpstr), "CAM SA unit %d", periph->unit_number);
+	snprintf(tmpstr2, sizeof(tmpstr2), "%u", periph->unit_number);
+
+	sysctl_ctx_init(&softc->sysctl_ctx);
+	softc->flags |= SA_FLAG_SCTX_INIT;
+	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
+	    SYSCTL_STATIC_CHILDREN(_kern_cam_sa), OID_AUTO, tmpstr2,
+                    CTLFLAG_RD, 0, tmpstr);
+	if (softc->sysctl_tree == NULL)
+		goto bailout;
+
+	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "allow_io_split", CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 
+	    &softc->allow_io_split, 0, "Allow Splitting I/O");
+	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "maxio", CTLFLAG_RD, 
+	    &softc->maxio, 0, "Maximum I/O size");
+	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "cpi_maxio", CTLFLAG_RD, 
+	    &softc->cpi_maxio, 0, "Maximum Controller I/O size");
+	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "inject_eom", CTLFLAG_RW, 
+	    &softc->inject_eom, 0, "Queue EOM for the next write/read");
+
+bailout:
+	/*
+	 * Release the reference that was held when this task was enqueued.
+	 */
+	cam_periph_release(periph);
+}
+
 static cam_status
 saregister(struct cam_periph *periph, void *arg)
 {
@@ -1452,8 +2342,10 @@
 	struct sa_softc *softc;
 	struct ccb_getdev *cgd;
 	struct ccb_pathinq cpi;
+	struct make_dev_args args;
 	caddr_t match;
-	int i;
+	char tmpstr[80];
+	int error;
 	
 	cgd = (struct ccb_getdev *)arg;
 	if (cgd == NULL) {
@@ -1472,8 +2364,15 @@
 	softc->state = SA_STATE_NORMAL;
 	softc->fileno = (daddr_t) -1;
 	softc->blkno = (daddr_t) -1;
+	softc->rep_fileno = (daddr_t) -1;
+	softc->rep_blkno = (daddr_t) -1;
+	softc->partition = (daddr_t) -1;
+	softc->bop = -1;
+	softc->eop = -1;
+	softc->bpew = -1;
 
 	bioq_init(&softc->bio_queue);
+	softc->periph = periph;
 	periph->softc = softc;
 
 	/*
@@ -1491,6 +2390,41 @@
 	} else
 		softc->quirks = SA_QUIRK_NONE;
 
+	/*
+	 * Long format data for READ POSITION was introduced in SSC, which
+	 * was after SCSI-2.  (Roughly equivalent to SCSI-3.)  If the drive
+	 * reports that it is SCSI-2 or older, it is unlikely to support
+	 * long position data, but it might.  Some drives from that era
+	 * claim to be SCSI-2, but do support long position information.
+	 * So, instead of immediately disabling long position information
+	 * for SCSI-2 devices, we'll try one pass through sagetpos(), and 
+	 * then disable long position information if we get an error.   
+	 */
+	if (cgd->inq_data.version <= SCSI_REV_CCS)
+		softc->quirks |= SA_QUIRK_NO_LONG_POS;
+
+	if (cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) {
+		struct ccb_dev_advinfo cdai;
+		struct scsi_vpd_extended_inquiry_data ext_inq;
+
+		bzero(&ext_inq, sizeof(ext_inq));
+
+		xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
+
+		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
+		cdai.flags = CDAI_FLAG_NONE;
+		cdai.buftype = CDAI_TYPE_EXT_INQ;
+		cdai.bufsiz = sizeof(ext_inq);
+		cdai.buf = (uint8_t *)&ext_inq;
+		xpt_action((union ccb *)&cdai);
+
+		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
+			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
+		if ((cdai.ccb_h.status == CAM_REQ_CMP)
+		 && (ext_inq.flags1 & SVPD_EID_SA_SPT_LBP))
+			softc->flags |= SA_FLAG_PROTECT_SUPP;
+	}
+
 	bzero(&cpi, sizeof(cpi));
 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
 	cpi.ccb_h.func_code = XPT_PATH_INQ;
@@ -1506,51 +2440,126 @@
 	    DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) |
 	    XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_TAPE);
 
-	softc->devs.ctl_dev = make_dev(&sa_cdevsw, SAMINOR(SA_CTLDEV,
-	    0, SA_ATYPE_R), UID_ROOT, GID_OPERATOR,
-	    0660, "%s%d.ctl", periph->periph_name, periph->unit_number);
-	softc->devs.ctl_dev->si_drv1 = periph;
+	/*
+	 * Load the default value that is either compiled in, or loaded 
+	 * in the global kern.cam.sa.allow_io_split tunable.
+	 */
+	softc->allow_io_split = sa_allow_io_split;
 
-	for (i = 0; i < SA_NUM_MODES; i++) {
+	/*
+	 * Load a per-instance tunable, if it exists.  NOTE that this
+	 * tunable WILL GO AWAY in FreeBSD 11.0.
+	 */ 
+	snprintf(tmpstr, sizeof(tmpstr), "kern.cam.sa.%u.allow_io_split",
+		 periph->unit_number);
+	TUNABLE_INT_FETCH(tmpstr, &softc->allow_io_split);
 
-		softc->devs.mode_devs[i].r_dev = make_dev(&sa_cdevsw,
-		    SAMINOR(SA_NOT_CTLDEV, i, SA_ATYPE_R),
-		    UID_ROOT, GID_OPERATOR, 0660, "%s%d.%d",
-		    periph->periph_name, periph->unit_number, i);
-		softc->devs.mode_devs[i].r_dev->si_drv1 = periph;
+	/*
+	 * If maxio isn't set, we fall back to DFLTPHYS.  Otherwise we take
+	 * the smaller of cpi.maxio or MAXPHYS.
+	 */
+	if (cpi.maxio == 0)
+		softc->maxio = DFLTPHYS;
+	else if (cpi.maxio > MAXPHYS)
+		softc->maxio = MAXPHYS;
+	else
+		softc->maxio = cpi.maxio;
 
-		softc->devs.mode_devs[i].nr_dev = make_dev(&sa_cdevsw,
-		    SAMINOR(SA_NOT_CTLDEV, i, SA_ATYPE_NR),
-		    UID_ROOT, GID_OPERATOR, 0660, "n%s%d.%d",
-		    periph->periph_name, periph->unit_number, i);
-		softc->devs.mode_devs[i].nr_dev->si_drv1 = periph;
+	/*
+	 * Record the controller's maximum I/O size so we can report it to
+	 * the user later.
+	 */
+	softc->cpi_maxio = cpi.maxio;
 
-		softc->devs.mode_devs[i].er_dev = make_dev(&sa_cdevsw,
-		    SAMINOR(SA_NOT_CTLDEV, i, SA_ATYPE_ER),
-		    UID_ROOT, GID_OPERATOR, 0660, "e%s%d.%d",
-		    periph->periph_name, periph->unit_number, i);
-		softc->devs.mode_devs[i].er_dev->si_drv1 = periph;
+	/*
+	 * By default we tell physio that we do not want our I/O split.
+	 * The user needs to have a 1:1 mapping between the size of his
+	 * write to a tape character device and the size of the write
+	 * that actually goes down to the drive.
+	 */
+	if (softc->allow_io_split == 0)
+		softc->si_flags = SI_NOSPLIT;
+	else
+		softc->si_flags = 0;
 
-		/*
-		 * Make the (well known) aliases for the first mode.
-		 */
-		if (i == 0) {
-			struct cdev *alias;
+	TASK_INIT(&softc->sysctl_task, 0, sasysctlinit, periph);
 
-			alias = make_dev_alias(softc->devs.mode_devs[i].r_dev,
-			   "%s%d", periph->periph_name, periph->unit_number);
-			alias->si_drv1 = periph;
-			alias = make_dev_alias(softc->devs.mode_devs[i].nr_dev,
-			    "n%s%d", periph->periph_name, periph->unit_number);
-			alias->si_drv1 = periph;
-			alias = make_dev_alias(softc->devs.mode_devs[i].er_dev,
-			    "e%s%d", periph->periph_name, periph->unit_number);
-			alias->si_drv1 = periph;
-		}
+	/*
+	 * If the SIM supports unmapped I/O, let physio know that we can
+	 * handle unmapped buffers.
+	 */
+	if (cpi.hba_misc & PIM_UNMAPPED)
+		softc->si_flags |= SI_UNMAPPED;
+
+	/*
+	 * Acquire a reference to the periph before we create the devfs
+	 * instances for it.  We'll release this reference once the devfs
+	 * instances have been freed.
+	 */
+	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
+		xpt_print(periph->path, "%s: lost periph during "
+			  "registration!\n", __func__);
+		cam_periph_lock(periph);
+		return (CAM_REQ_CMP_ERR);
 	}
+
+	make_dev_args_init(&args);
+	args.mda_devsw = &sa_cdevsw;
+	args.mda_si_drv1 = softc->periph;
+	args.mda_uid = UID_ROOT;
+	args.mda_gid = GID_OPERATOR;
+	args.mda_mode = 0660;
+
+	args.mda_unit = SAMINOR(SA_CTLDEV, SA_ATYPE_R);
+	error = make_dev_s(&args, &softc->devs.ctl_dev, "%s%d.ctl",
+	    periph->periph_name, periph->unit_number);
+	if (error != 0) {
+		cam_periph_lock(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
+	sasetupdev(softc, softc->devs.ctl_dev);
+
+	args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_R);
+	error = make_dev_s(&args, &softc->devs.r_dev, "%s%d",
+	    periph->periph_name, periph->unit_number);
+	if (error != 0) {
+		cam_periph_lock(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
+	sasetupdev(softc, softc->devs.r_dev);
+
+	args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_NR);
+	error = make_dev_s(&args, &softc->devs.nr_dev, "n%s%d",
+	    periph->periph_name, periph->unit_number);
+	if (error != 0) {
+		cam_periph_lock(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
+	sasetupdev(softc, softc->devs.nr_dev);
+
+	args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_ER);
+	error = make_dev_s(&args, &softc->devs.er_dev, "e%s%d",
+	    periph->periph_name, periph->unit_number);
+	if (error != 0) {
+		cam_periph_lock(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
+	sasetupdev(softc, softc->devs.er_dev);
+
 	cam_periph_lock(periph);
 
+	softc->density_type_bits[0] = 0;
+	softc->density_type_bits[1] = SRDS_MEDIA;
+	softc->density_type_bits[2] = SRDS_MEDIUM_TYPE;
+	softc->density_type_bits[3] = SRDS_MEDIUM_TYPE | SRDS_MEDIA;
 	/*
+	 * Bump the peripheral refcount for the sysctl thread, in case we
+	 * get invalidated before the thread has a chance to run.
+	 */
+	cam_periph_acquire(periph);
+	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
+
+	/*
 	 * Add an async callback so that we get
 	 * notified if this device goes away.
 	 */
@@ -1582,18 +2591,29 @@
 		 * See if there is a buf with work for us to do..
 		 */
 		bp = bioq_first(&softc->bio_queue);
-		if (periph->immediate_priority <= periph->pinfo.priority) {
-			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
-					("queuing for immediate ccb\n"));
-			Set_CCB_Type(start_ccb, SA_CCB_WAITING);
-			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-					  periph_links.sle);
-			periph->immediate_priority = CAM_PRIORITY_NONE;
-			wakeup(&periph->ccb_list);
-		} else if (bp == NULL) {
+		if (bp == NULL) {
 			xpt_release_ccb(start_ccb);
-		} else if ((softc->flags & SA_FLAG_ERR_PENDING) != 0) {
+		} else if (((softc->flags & SA_FLAG_ERR_PENDING) != 0)
+			|| (softc->inject_eom != 0)) {
 			struct bio *done_bp;
+
+			if (softc->inject_eom != 0) {
+				softc->flags |= SA_FLAG_EOM_PENDING;
+				softc->inject_eom = 0;
+				/*
+				 * If we're injecting EOM for writes, we
+				 * need to keep PEWS set for 3 queries
+				 * to cover 2 position requests from the
+				 * kernel via sagetpos(), and then allow
+				 * for one for the user to see the BPEW
+				 * flag (e.g. via mt status).  After that,
+				 * it will be cleared.
+				 */
+				if (bp->bio_cmd == BIO_WRITE)
+					softc->set_pews_status = 3;
+				else
+					softc->set_pews_status = 1;
+			}
 again:
 			softc->queue_count--;
 			bioq_remove(&softc->bio_queue, bp);
@@ -1601,10 +2621,33 @@
 			done_bp = bp;
 			if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) {
 				/*
-				 * We now just clear errors in this case
-				 * and let the residual be the notifier.
+				 * We have two different behaviors for
+				 * writes when we hit either Early Warning
+				 * or the PEWZ (Programmable Early Warning
+				 * Zone).  The default behavior is that
+				 * for all writes that are currently
+				 * queued after the write where we saw the
+				 * early warning, we will return the write
+				 * with the residual equal to the count.
+				 * i.e. tell the application that 0 bytes
+				 * were written.
+				 * 
+				 * The alternate behavior, which is enabled
+				 * when eot_warn is set, is that in
+				 * addition to setting the residual equal
+				 * to the count, we will set the error
+				 * to ENOSPC.
+				 *
+				 * In either case, once queued writes are
+				 * cleared out, we clear the error flag
+				 * (see below) and the application is free to
+				 * attempt to write more.
 				 */
-				bp->bio_error = 0;
+				if (softc->eot_warn != 0) {
+					bp->bio_flags |= BIO_ERROR;
+					bp->bio_error = ENOSPC;
+				} else
+					bp->bio_error = 0;
 			} else if ((softc->flags & SA_FLAG_EOF_PENDING) != 0) {
 				/*
 				 * This can only happen if we're reading
@@ -1641,13 +2684,13 @@
 			bioq_remove(&softc->bio_queue, bp);
 			softc->queue_count--;
 
+			length = bp->bio_bcount;
+
 			if ((softc->flags & SA_FLAG_FIXED) != 0) {
 				if (softc->blk_shift != 0) {
-					length =
-					    bp->bio_bcount >> softc->blk_shift;
+					length = length >> softc->blk_shift;
 				} else if (softc->media_blksize != 0) {
-					length = bp->bio_bcount /
-					    softc->media_blksize;
+					length = length / softc->media_blksize;
 				} else {
 					bp->bio_error = EIO;
 					xpt_print(periph->path, "zero blocksize"
@@ -1662,7 +2705,6 @@
 				    "write"));
 #endif
 			} else {
-				length = bp->bio_bcount;
 #if	0
 				CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO,
 				    ("issuing a %d variable byte %s\n",
@@ -1690,16 +2732,32 @@
 			 * would be to issue, e.g., 64KB reads and occasionally
 			 * have to do deal with 512 byte or 1KB intermediate
 			 * records.
+			 *
+			 * That said, though, we now support setting the
+			 * SILI bit on reads, and we set the blocksize to 4
+			 * bytes when we do that.  This gives us
+			 * compatibility with software that wants this,
+			 * although the only real difference between that
+			 * and not setting the SILI bit on reads is that we
+			 * won't get a check condition on reads where our
+			 * request size is larger than the block on tape.
+			 * That probably only makes a real difference in
+			 * non-packetized SCSI, where you have to go back
+			 * to the drive to request sense and thus incur
+			 * more latency.
 			 */
 			softc->dsreg = (bp->bio_cmd == BIO_READ)?
 			    MTIO_DSREG_RD : MTIO_DSREG_WR;
 			scsi_sa_read_write(&start_ccb->csio, 0, sadone,
-			    MSG_SIMPLE_Q_TAG, (bp->bio_cmd == BIO_READ),
-			    FALSE, (softc->flags & SA_FLAG_FIXED) != 0,
-			    length, bp->bio_data, bp->bio_bcount, SSD_FULL_SIZE,
+			    MSG_SIMPLE_Q_TAG, (bp->bio_cmd == BIO_READ ? 
+			    SCSI_RW_READ : SCSI_RW_WRITE) |
+			    ((bp->bio_flags & BIO_UNMAPPED) != 0 ?
+			    SCSI_RW_BIO : 0), softc->sili,
+			    (softc->flags & SA_FLAG_FIXED) != 0, length,
+			    (bp->bio_flags & BIO_UNMAPPED) != 0 ? (void *)bp :
+			    bp->bio_data, bp->bio_bcount, SSD_FULL_SIZE,
 			    IO_TIMEOUT);
 			start_ccb->ccb_h.ccb_pflags &= ~SA_POSITION_UPDATED;
-			Set_CCB_Type(start_ccb, SA_CCB_BUFFER_IO);
 			start_ccb->ccb_h.ccb_bp = bp;
 			bp = bioq_first(&softc->bio_queue);
 			xpt_action(start_ccb);
@@ -1724,98 +2782,86 @@
 {
 	struct sa_softc *softc;
 	struct ccb_scsiio *csio;
+	struct bio *bp;
+	int error;
 
 	softc = (struct sa_softc *)periph->softc;
 	csio = &done_ccb->csio;
-	switch (CCB_Type(csio)) {
-	case SA_CCB_BUFFER_IO:
-	{
-		struct bio *bp;
-		int error;
 
-		softc->dsreg = MTIO_DSREG_REST;
-		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
-		error = 0;
-		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
-			if ((error = saerror(done_ccb, 0, 0)) == ERESTART) {
-				/*
-				 * A retry was scheduled, so just return.
-				 */
-				return;
-			}
+	softc->dsreg = MTIO_DSREG_REST;
+	bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
+	error = 0;
+	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		if ((error = saerror(done_ccb, 0, 0)) == ERESTART) {
+			/*
+			 * A retry was scheduled, so just return.
+			 */
+			return;
 		}
+	}
 
-		if (error == EIO) {
+	if (error == EIO) {
 
-			/*
-			 * Catastrophic error. Mark the tape as frozen
-			 * (we no longer know tape position).
-			 *
-			 * Return all queued I/O with EIO, and unfreeze
-			 * our queue so that future transactions that
-			 * attempt to fix this problem can get to the
-			 * device.
-			 *
-			 */
+		/*
+		 * Catastrophic error. Mark the tape as frozen
+		 * (we no longer know tape position).
+		 *
+		 * Return all queued I/O with EIO, and unfreeze
+		 * our queue so that future transactions that
+		 * attempt to fix this problem can get to the
+		 * device.
+		 *
+		 */
 
-			softc->flags |= SA_FLAG_TAPE_FROZEN;
-			bioq_flush(&softc->bio_queue, NULL, EIO);
+		softc->flags |= SA_FLAG_TAPE_FROZEN;
+		bioq_flush(&softc->bio_queue, NULL, EIO);
+	}
+	if (error != 0) {
+		bp->bio_resid = bp->bio_bcount;
+		bp->bio_error = error;
+		bp->bio_flags |= BIO_ERROR;
+		/*
+		 * In the error case, position is updated in saerror.
+		 */
+	} else {
+		bp->bio_resid = csio->resid;
+		bp->bio_error = 0;
+		if (csio->resid != 0) {
+			bp->bio_flags |= BIO_ERROR;
 		}
-		if (error != 0) {
-			bp->bio_resid = bp->bio_bcount;
-			bp->bio_error = error;
-			bp->bio_flags |= BIO_ERROR;
-			/*
-			 * In the error case, position is updated in saerror.
-			 */
-		} else {
-			bp->bio_resid = csio->resid;
-			bp->bio_error = 0;
-			if (csio->resid != 0) {
-				bp->bio_flags |= BIO_ERROR;
-			}
-			if (bp->bio_cmd == BIO_WRITE) {
-				softc->flags |= SA_FLAG_TAPE_WRITTEN;
-				softc->filemarks = 0;
-			}
-			if (!(csio->ccb_h.ccb_pflags & SA_POSITION_UPDATED) &&
-			    (softc->blkno != (daddr_t) -1)) {
-				if ((softc->flags & SA_FLAG_FIXED) != 0) {
-					u_int32_t l;
-					if (softc->blk_shift != 0) {
-						l = bp->bio_bcount >>
-							softc->blk_shift;
-					} else {
-						l = bp->bio_bcount /
-							softc->media_blksize;
-					}
-					softc->blkno += (daddr_t) l;
+		if (bp->bio_cmd == BIO_WRITE) {
+			softc->flags |= SA_FLAG_TAPE_WRITTEN;
+			softc->filemarks = 0;
+		}
+		if (!(csio->ccb_h.ccb_pflags & SA_POSITION_UPDATED) &&
+		    (softc->blkno != (daddr_t) -1)) {
+			if ((softc->flags & SA_FLAG_FIXED) != 0) {
+				u_int32_t l;
+				if (softc->blk_shift != 0) {
+					l = bp->bio_bcount >>
+						softc->blk_shift;
 				} else {
-					softc->blkno++;
+					l = bp->bio_bcount /
+						softc->media_blksize;
 				}
+				softc->blkno += (daddr_t) l;
+			} else {
+				softc->blkno++;
 			}
 		}
-		/*
-		 * If we had an error (immediate or pending),
-		 * release the device queue now.
-		 */
-		if (error || (softc->flags & SA_FLAG_ERR_PENDING))
-			cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
-		if (error || bp->bio_resid) {
-			CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
-			    	  ("error %d resid %ld count %ld\n", error,
-				  bp->bio_resid, bp->bio_bcount));
-		}
-		biofinish(bp, softc->device_stats, 0);
-		break;
 	}
-	case SA_CCB_WAITING:
-	{
-		/* Caller will release the CCB */
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
+	/*
+	 * If we had an error (immediate or pending),
+	 * release the device queue now.
+	 */
+	if (error || (softc->flags & SA_FLAG_ERR_PENDING))
+		cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
+	if (error || bp->bio_resid) {
+		CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
+		    	  ("error %d resid %ld count %ld\n", error,
+			  bp->bio_resid, bp->bio_bcount));
 	}
-	}
+	biofinish(bp, softc->device_stats, 0);
 	xpt_release_ccb(done_ccb);
 }
 
@@ -1840,7 +2886,7 @@
 	softc = (struct sa_softc *)periph->softc;
 
 	/*
-	 * This should determine if something has happend since the last
+	 * This should determine if something has happened since the last
 	 * open/mount that would invalidate the mount. We do *not* want
 	 * to retry this command- we just want the status. But we only
 	 * do this if we're mounted already- if we're not mounted,
@@ -1892,8 +2938,7 @@
 		 * Clear out old state.
 		 */
 		softc->flags &= ~(SA_FLAG_TAPE_WP|SA_FLAG_TAPE_WRITTEN|
-				  SA_FLAG_ERR_PENDING|SA_FLAG_COMP_ENABLED|
-				  SA_FLAG_COMP_SUPP|SA_FLAG_COMP_UNSUPP);
+				  SA_FLAG_ERR_PENDING|SA_FLAG_COMPRESSION);
 		softc->filemarks = 0;
 
 		/*
@@ -2002,7 +3047,7 @@
 				    &softc->buffer_mode, &write_protect,
 				    &softc->speed, &comp_supported,
 				    &comp_enabled, &softc->comp_algorithm,
-				    NULL);
+				    NULL, NULL, 0, 0);
 
 		if (error != 0) {
 			/*
@@ -2249,6 +3294,8 @@
 			softc->dsreg = MTIO_DSREG_NIL;
 		} else {
 			softc->fileno = softc->blkno = 0;
+			softc->rep_fileno = softc->rep_blkno = -1;
+			softc->partition = 0;
 			softc->dsreg = MTIO_DSREG_REST;
 		}
 #ifdef	SA_1FM_AT_EOD
@@ -2308,7 +3355,7 @@
 	markswanted = samarkswanted(periph);
 
 	if (markswanted > 0) {
-		error = sawritefilemarks(periph, markswanted, FALSE);
+		error = sawritefilemarks(periph, markswanted, FALSE, FALSE);
 	} else {
 		error = 0;
 	}
@@ -2368,7 +3415,8 @@
 					info /= softc->media_blksize;
 			}
 		}
-		if (CCB_Type(csio) == SA_CCB_BUFFER_IO) {
+		if (csio->cdb_io.cdb_bytes[0] == SA_READ ||
+		    csio->cdb_io.cdb_bytes[0] == SA_WRITE) {
 			bcopy((caddr_t) sense, (caddr_t) &softc->last_io_sense,
 			    sizeof (struct scsi_sense_data));
 			bcopy(csio->cdb_io.cdb_bytes, softc->last_io_cdb,
@@ -2401,16 +3449,18 @@
 		/*
 		 * If a read/write command, we handle it here.
 		 */
-		if (CCB_Type(csio) != SA_CCB_WAITING) {
+		if (csio->cdb_io.cdb_bytes[0] == SA_READ ||
+		    csio->cdb_io.cdb_bytes[0] == SA_WRITE) {
 			break;
 		}
 		/*
-		 * If this was just EOM/EOP, Filemark, Setmark or ILI detected
-		 * on a non read/write command, we assume it's not an error
-		 * and propagate the residule and return.
+		 * If this was just EOM/EOP, Filemark, Setmark, ILI or
+		 * PEW detected on a non read/write command, we assume
+		 * it's not an error and propagate the residual and return.
 		 */
-		if ((aqvalid && asc == 0 && ascq > 0 && ascq <= 5) ||
-		    (aqvalid == 0 && sense_key == SSD_KEY_NO_SENSE)) {
+		if ((aqvalid && asc == 0 && ((ascq > 0 && ascq <= 5)
+		  || (ascq == 0x07)))
+		 || (aqvalid == 0 && sense_key == SSD_KEY_NO_SENSE)) {
 			csio->resid = resid;
 			QFRLS(ccb);
 			return (0);
@@ -2526,7 +3576,8 @@
 	    u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks,
 	    int *buff_mode, u_int8_t *write_protect, u_int8_t *speed,
 	    int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm,
-	    sa_comp_t *tcs)
+	    sa_comp_t *tcs, struct scsi_control_data_prot_subpage *prot_page,
+	    int dp_size, int prot_changeable)
 {
 	union ccb *ccb;
 	void *mode_buffer;
@@ -2684,6 +3735,151 @@
 			bcopy(ntcs, tcs, sizeof (sa_comp_t));
 	}
 
+	if ((params_to_get & SA_PARAM_DENSITY_EXT)
+	 && (softc->scsi_rev >= SCSI_REV_SPC)) {
+		int i;
+
+		for (i = 0; i < SA_DENSITY_TYPES; i++) {
+			scsi_report_density_support(&ccb->csio,
+			    /*retries*/ 1,
+			    /*cbfcnp*/ sadone,
+			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
+			    /*media*/ softc->density_type_bits[i] & SRDS_MEDIA,
+			    /*medium_type*/ softc->density_type_bits[i] &
+					    SRDS_MEDIUM_TYPE,
+			    /*data_ptr*/ softc->density_info[i],
+			    /*length*/ sizeof(softc->density_info[i]),
+			    /*sense_len*/ SSD_FULL_SIZE,
+			    /*timeout*/ REP_DENSITY_TIMEOUT);
+			error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT,
+			    softc->device_stats);
+			status = ccb->ccb_h.status & CAM_STATUS_MASK;
+
+			/*
+			 * Some tape drives won't support this command at
+			 * all, but hopefully we'll minimize that with the
+			 * check for SPC or greater support above.  If they
+			 * don't support the default report (neither the
+			 * MEDIA or MEDIUM_TYPE bits set), then there is
+			 * really no point in continuing on to look for
+			 * other reports.
+			 */
+			if ((error != 0)
+			 || (status != CAM_REQ_CMP)) {
+				error = 0;
+				softc->density_info_valid[i] = 0;
+				if (softc->density_type_bits[i] == 0)
+					break;
+				else
+					continue;
+			}
+			softc->density_info_valid[i] = ccb->csio.dxfer_len -
+			    ccb->csio.resid;
+		}
+	}
+
+	/*
+	 * Get logical block protection parameters if the drive supports it.
+	 */
+	if ((params_to_get & SA_PARAM_LBP)
+	 && (softc->flags & SA_FLAG_PROTECT_SUPP)) {
+		struct scsi_mode_header_10 *mode10_hdr;
+		struct scsi_control_data_prot_subpage *dp_page;
+		struct scsi_mode_sense_10 *cdb;
+		struct sa_prot_state *prot;
+		int dp_len, returned_len;
+
+		if (dp_size == 0)
+			dp_size = sizeof(*dp_page);
+
+		dp_len = sizeof(*mode10_hdr) + dp_size;
+		mode10_hdr = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO);
+		if (mode10_hdr == NULL) {
+			error = ENOMEM;
+			goto sagetparamsexit;
+		}
+
+		scsi_mode_sense_len(&ccb->csio,
+				    /*retries*/ 5,
+				    /*cbfcnp*/ sadone,
+				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
+				    /*dbd*/ TRUE,
+				    /*page_code*/ (prot_changeable == 0) ?
+						  SMS_PAGE_CTRL_CURRENT :
+						  SMS_PAGE_CTRL_CHANGEABLE,
+				    /*page*/ SMS_CONTROL_MODE_PAGE,
+				    /*param_buf*/ (uint8_t *)mode10_hdr,
+				    /*param_len*/ dp_len,
+				    /*minimum_cmd_size*/ 10,
+				    /*sense_len*/ SSD_FULL_SIZE,
+				    /*timeout*/ SCSIOP_TIMEOUT);
+		/*
+		 * XXX KDM we need to be able to set the subpage in the
+		 * fill function.
+		 */
+		cdb = (struct scsi_mode_sense_10 *)ccb->csio.cdb_io.cdb_bytes;
+		cdb->subpage = SA_CTRL_DP_SUBPAGE_CODE;
+
+		error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT,
+		    softc->device_stats);
+		if (error != 0) {
+			free(mode10_hdr, M_SCSISA);
+			goto sagetparamsexit;
+		}
+
+		status = ccb->ccb_h.status & CAM_STATUS_MASK;
+		if (status != CAM_REQ_CMP) {
+			error = EINVAL;
+			free(mode10_hdr, M_SCSISA);
+			goto sagetparamsexit;
+		}
+
+		/*
+		 * The returned data length at least has to be long enough
+		 * for us to look at length in the mode page header.
+		 */
+		returned_len = ccb->csio.dxfer_len - ccb->csio.resid;
+		if (returned_len < sizeof(mode10_hdr->data_length)) {
+			error = EINVAL;
+			free(mode10_hdr, M_SCSISA);
+			goto sagetparamsexit;
+		}
+
+		returned_len = min(returned_len, 
+		    sizeof(mode10_hdr->data_length) +
+		    scsi_2btoul(mode10_hdr->data_length));
+
+		dp_page = (struct scsi_control_data_prot_subpage *)
+		    &mode10_hdr[1];
+
+		/*
+		 * We also have to have enough data to include the prot_bits
+		 * in the subpage.
+		 */
+		if (returned_len < (sizeof(*mode10_hdr) +
+		    __offsetof(struct scsi_control_data_prot_subpage, prot_bits)
+		    + sizeof(dp_page->prot_bits))) {
+			error = EINVAL;
+			free(mode10_hdr, M_SCSISA);
+			goto sagetparamsexit;
+		}
+
+		prot = &softc->prot_info.cur_prot_state;
+		prot->prot_method = dp_page->prot_method;
+		prot->pi_length = dp_page->pi_length &
+		    SA_CTRL_DP_PI_LENGTH_MASK;
+		prot->lbp_w = (dp_page->prot_bits & SA_CTRL_DP_LBP_W) ? 1 :0;
+		prot->lbp_r = (dp_page->prot_bits & SA_CTRL_DP_LBP_R) ? 1 :0;
+		prot->rbdp = (dp_page->prot_bits & SA_CTRL_DP_RBDP) ? 1 :0;
+		prot->initialized = 1;
+
+		if (prot_page != NULL)
+			bcopy(dp_page, prot_page, min(sizeof(*prot_page),
+			    sizeof(*dp_page)));
+
+		free(mode10_hdr, M_SCSISA);
+	}
+
 	if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) {
 		int idx;
 		char *xyz = mode_buffer;
@@ -2702,6 +3898,177 @@
 }
 
 /*
+ * Set protection information to the pending protection information stored
+ * in the softc.
+ */
+static int
+sasetprot(struct cam_periph *periph, struct sa_prot_state *new_prot)
+{
+	struct sa_softc *softc;
+	struct scsi_control_data_prot_subpage *dp_page, *dp_changeable;
+	struct scsi_mode_header_10 *mode10_hdr, *mode10_changeable;
+	union ccb *ccb;
+	uint8_t current_speed;
+	size_t dp_size, dp_page_length;
+	int dp_len, buff_mode;
+	int error;
+
+	softc = (struct sa_softc *)periph->softc;
+	mode10_hdr = NULL;
+	mode10_changeable = NULL;
+	ccb = NULL;
+
+	/*
+	 * Start off with the size set to the actual length of the page
+	 * that we have defined.
+	 */
+	dp_size = sizeof(*dp_changeable);
+	dp_page_length = dp_size -
+	    __offsetof(struct scsi_control_data_prot_subpage, prot_method);
+
+retry_length:
+
+	dp_len = sizeof(*mode10_changeable) + dp_size;
+	mode10_changeable = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO);
+	if (mode10_changeable == NULL) {
+		error = ENOMEM;
+		goto bailout;
+	}
+
+	dp_changeable =
+	    (struct scsi_control_data_prot_subpage *)&mode10_changeable[1];
+
+	/*
+	 * First get the data protection page changeable parameters mask.
+	 * We need to know which parameters the drive supports changing.
+	 * We also need to know what the drive claims that its page length
+	 * is.  The reason is that IBM drives in particular are very picky
+	 * about the page length.  They want it (the length set in the
+	 * page structure itself) to be 28 bytes, and they want the
+	 * parameter list length specified in the mode select header to be
+	 * 40 bytes.  So, to work with IBM drives as well as any other tape
+	 * drive, find out what the drive claims the page length is, and
+	 * make sure that we match that.
+	 */
+	error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP,  
+	    NULL, NULL, NULL, &buff_mode, NULL, &current_speed, NULL, NULL,
+	    NULL, NULL, dp_changeable, dp_size, /*prot_changeable*/ 1);
+	if (error != 0)
+		goto bailout;
+
+	if (scsi_2btoul(dp_changeable->length) > dp_page_length) {
+		dp_page_length = scsi_2btoul(dp_changeable->length);
+		dp_size = dp_page_length +
+		    __offsetof(struct scsi_control_data_prot_subpage,
+		    prot_method);
+		free(mode10_changeable, M_SCSISA);
+		mode10_changeable = NULL;
+		goto retry_length;
+	}
+
+	mode10_hdr = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO);
+	if (mode10_hdr == NULL) {
+		error = ENOMEM;
+		goto bailout;
+	}
+
+	dp_page = (struct scsi_control_data_prot_subpage *)&mode10_hdr[1];
+
+	/*
+	 * Now grab the actual current settings in the page.
+	 */
+	error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP,  
+	    NULL, NULL, NULL, &buff_mode, NULL, &current_speed, NULL, NULL,
+	    NULL, NULL, dp_page, dp_size, /*prot_changeable*/ 0);
+	if (error != 0)
+		goto bailout;
+
+	/* These two fields need to be 0 for MODE SELECT */
+	scsi_ulto2b(0, mode10_hdr->data_length);
+	mode10_hdr->medium_type = 0;
+	/* We are not including a block descriptor */
+	scsi_ulto2b(0, mode10_hdr->blk_desc_len);
+
+	mode10_hdr->dev_spec = current_speed;
+	/* if set, set single-initiator buffering mode */
+	if (softc->buffer_mode == SMH_SA_BUF_MODE_SIBUF) {
+		mode10_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF;
+	}
+
+	/*
+	 * For each field, make sure that the drive allows changing it
+	 * before bringing in the user's setting.
+	 */
+	if (dp_changeable->prot_method != 0)
+		dp_page->prot_method = new_prot->prot_method;
+
+	if (dp_changeable->pi_length & SA_CTRL_DP_PI_LENGTH_MASK) {
+		dp_page->pi_length &= ~SA_CTRL_DP_PI_LENGTH_MASK;
+		dp_page->pi_length |= (new_prot->pi_length &
+		    SA_CTRL_DP_PI_LENGTH_MASK);
+	}
+	if (dp_changeable->prot_bits & SA_CTRL_DP_LBP_W) {
+		if (new_prot->lbp_w)
+			dp_page->prot_bits |= SA_CTRL_DP_LBP_W;
+		else
+			dp_page->prot_bits &= ~SA_CTRL_DP_LBP_W;
+	}
+
+	if (dp_changeable->prot_bits & SA_CTRL_DP_LBP_R) {
+		if (new_prot->lbp_r)
+			dp_page->prot_bits |= SA_CTRL_DP_LBP_R;
+		else
+			dp_page->prot_bits &= ~SA_CTRL_DP_LBP_R;
+	}
+
+	if (dp_changeable->prot_bits & SA_CTRL_DP_RBDP) {
+		if (new_prot->rbdp)
+			dp_page->prot_bits |= SA_CTRL_DP_RBDP;
+		else
+			dp_page->prot_bits &= ~SA_CTRL_DP_RBDP;
+	}
+
+	ccb = cam_periph_getccb(periph, 1);
+
+	scsi_mode_select_len(&ccb->csio,
+			     /*retries*/ 5,
+			     /*cbfcnp*/ sadone,
+			     /*tag_action*/ MSG_SIMPLE_Q_TAG,
+			     /*scsi_page_fmt*/ TRUE,
+			     /*save_pages*/ FALSE,
+			     /*param_buf*/ (uint8_t *)mode10_hdr,
+			     /*param_len*/ dp_len,
+			     /*minimum_cmd_size*/ 10,
+			     /*sense_len*/ SSD_FULL_SIZE,
+			     /*timeout*/ SCSIOP_TIMEOUT);
+
+	error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats);
+	if (error != 0)
+		goto bailout;
+
+	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		error = EINVAL;
+		goto bailout;
+	}
+
+	/*
+	 * The operation was successful.  We could just copy the settings
+	 * the user requested, but just in case the drive ignored some of
+	 * our settings, let's ask for status again.
+	 */
+	error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP,  
+	    NULL, NULL, NULL, &buff_mode, NULL, &current_speed, NULL, NULL,
+	    NULL, NULL, dp_page, dp_size, 0);
+
+bailout:
+	if (ccb != NULL)
+		xpt_release_ccb(ccb);
+	free(mode10_hdr, M_SCSISA);
+	free(mode10_changeable, M_SCSISA);
+	return (error);
+}
+
+/*
  * The purpose of this function is to set one of four different parameters
  * for a tape drive:
  *	- blocksize
@@ -2753,7 +4120,7 @@
 	    params_to_set | SA_PARAM_BLOCKSIZE | SA_PARAM_SPEED,
 	    &current_blocksize, &current_density, NULL, &buff_mode, NULL,
 	    &current_speed, &comp_supported, &comp_enabled,
-	    &current_calg, ccomp);
+	    &current_calg, ccomp, NULL, 0, 0);
 
 	if (error != 0) {
 		free(ccomp, M_SCSISA);
@@ -3032,6 +4399,208 @@
 	return (error);
 }
 
+static int
+saextget(struct cdev *dev, struct cam_periph *periph, struct sbuf *sb,
+    struct mtextget *g)
+{
+	int indent, error;
+	char tmpstr[80];
+	struct sa_softc *softc;
+	int tmpint;
+	uint32_t maxio_tmp;
+	struct ccb_getdev cgd;
+
+	softc = (struct sa_softc *)periph->softc;
+
+	error = 0;
+
+	error = sagetparams_common(dev, periph);
+	if (error)
+		goto extget_bailout;
+	if (!SA_IS_CTRL(dev) && !softc->open_pending_mount)
+		sagetpos(periph);
+
+	indent = 0;
+	SASBADDNODE(sb, indent, mtextget);
+	/*
+	 * Basic CAM peripheral information.
+	 */
+	SASBADDVARSTR(sb, indent, periph->periph_name, %s, periph_name,
+	    strlen(periph->periph_name) + 1);
+	SASBADDUINT(sb, indent, periph->unit_number, %u, unit_number);
+	xpt_setup_ccb(&cgd.ccb_h,
+		      periph->path,
+		      CAM_PRIORITY_NORMAL);
+	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
+	xpt_action((union ccb *)&cgd);
+	if ((cgd.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		g->status = MT_EXT_GET_ERROR;
+		snprintf(g->error_str, sizeof(g->error_str),
+		    "Error %#x returned for XPT_GDEV_TYPE CCB",
+		    cgd.ccb_h.status);
+		goto extget_bailout;
+	}
+
+	cam_strvis(tmpstr, cgd.inq_data.vendor,
+	    sizeof(cgd.inq_data.vendor), sizeof(tmpstr));
+	SASBADDVARSTRDESC(sb, indent, tmpstr, %s, vendor,
+	    sizeof(cgd.inq_data.vendor) + 1, "SCSI Vendor ID");
+
+	cam_strvis(tmpstr, cgd.inq_data.product,
+	    sizeof(cgd.inq_data.product), sizeof(tmpstr));
+	SASBADDVARSTRDESC(sb, indent, tmpstr, %s, product,
+	    sizeof(cgd.inq_data.product) + 1, "SCSI Product ID");
+
+	cam_strvis(tmpstr, cgd.inq_data.revision,
+	    sizeof(cgd.inq_data.revision), sizeof(tmpstr));
+	SASBADDVARSTRDESC(sb, indent, tmpstr, %s, revision,
+	    sizeof(cgd.inq_data.revision) + 1, "SCSI Revision");
+
+	if (cgd.serial_num_len > 0) {
+		char *tmpstr2;
+		size_t ts2_len;
+		int ts2_malloc;
+
+		ts2_len = 0;
+
+		if (cgd.serial_num_len > sizeof(tmpstr)) {
+			ts2_len = cgd.serial_num_len + 1;
+			ts2_malloc = 1;
+			tmpstr2 = malloc(ts2_len, M_SCSISA, M_NOWAIT | M_ZERO);
+			/*
+			 * The 80 characters allocated on the stack above
+			 * will handle the vast majority of serial numbers.
+			 * If we run into one that is larger than that, and
+			 * we can't malloc the length without blocking,
+			 * bail out with an out of memory error.
+			 */
+			if (tmpstr2 == NULL) {
+				error = ENOMEM;
+				goto extget_bailout;
+			}
+		} else {
+			ts2_len = sizeof(tmpstr);
+			ts2_malloc = 0;
+			tmpstr2 = tmpstr;
+		}
+
+		cam_strvis(tmpstr2, cgd.serial_num, cgd.serial_num_len,
+		    ts2_len);
+
+		SASBADDVARSTRDESC(sb, indent, tmpstr2, %s, serial_num,
+		    (ssize_t)cgd.serial_num_len + 1, "Serial Number");
+		if (ts2_malloc != 0)
+			free(tmpstr2, M_SCSISA);
+	} else {
+		/*
+		 * We return a serial_num element in any case, but it will
+		 * be empty if the device has no serial number.
+		 */
+		tmpstr[0] = '\0';
+		SASBADDVARSTRDESC(sb, indent, tmpstr, %s, serial_num,
+		    (ssize_t)0, "Serial Number");
+	}
+
+	SASBADDUINTDESC(sb, indent, softc->maxio, %u, maxio, 
+	    "Maximum I/O size allowed by driver and controller");
+
+	SASBADDUINTDESC(sb, indent, softc->cpi_maxio, %u, cpi_maxio, 
+	    "Maximum I/O size reported by controller");
+
+	SASBADDUINTDESC(sb, indent, softc->max_blk, %u, max_blk, 
+	    "Maximum block size supported by tape drive and media");
+
+	SASBADDUINTDESC(sb, indent, softc->min_blk, %u, min_blk, 
+	    "Minimum block size supported by tape drive and media");
+
+	SASBADDUINTDESC(sb, indent, softc->blk_gran, %u, blk_gran, 
+	    "Block granularity supported by tape drive and media");
+	
+	maxio_tmp = min(softc->max_blk, softc->maxio);
+
+	SASBADDUINTDESC(sb, indent, maxio_tmp, %u, max_effective_iosize, 
+	    "Maximum possible I/O size");
+
+	SASBADDINTDESC(sb, indent, softc->flags & SA_FLAG_FIXED ? 1 : 0, %d, 
+	    fixed_mode, "Set to 1 for fixed block mode, 0 for variable block");
+
+	/*
+	 * XXX KDM include SIM, bus, target, LUN?
+	 */
+	if (softc->flags & SA_FLAG_COMP_UNSUPP)
+		tmpint = 0;
+	else
+		tmpint = 1;
+	SASBADDINTDESC(sb, indent, tmpint, %d, compression_supported,
+	    "Set to 1 if compression is supported, 0 if not");
+	if (softc->flags & SA_FLAG_COMP_ENABLED)
+		tmpint = 1;
+	else
+		tmpint = 0;
+	SASBADDINTDESC(sb, indent, tmpint, %d, compression_enabled,
+	    "Set to 1 if compression is enabled, 0 if not");
+	SASBADDUINTDESC(sb, indent, softc->comp_algorithm, %u,
+	    compression_algorithm, "Numeric compression algorithm");
+
+	safillprot(softc, &indent, sb);
+
+	SASBADDUINTDESC(sb, indent, softc->media_blksize, %u,
+	    media_blocksize, "Block size reported by drive or set by user");
+	SASBADDINTDESC(sb, indent, (intmax_t)softc->fileno, %jd,
+	    calculated_fileno, "Calculated file number, -1 if unknown");
+	SASBADDINTDESC(sb, indent, (intmax_t)softc->blkno, %jd,
+	    calculated_rel_blkno, "Calculated block number relative to file, "
+	    "set to -1 if unknown");
+	SASBADDINTDESC(sb, indent, (intmax_t)softc->rep_fileno, %jd,
+	    reported_fileno, "File number reported by drive, -1 if unknown");
+	SASBADDINTDESC(sb, indent, (intmax_t)softc->rep_blkno, %jd,
+	    reported_blkno, "Block number relative to BOP/BOT reported by "
+	    "drive, -1 if unknown");
+	SASBADDINTDESC(sb, indent, (intmax_t)softc->partition, %jd,
+	    partition, "Current partition number, 0 is the default");
+	SASBADDINTDESC(sb, indent, softc->bop, %d, bop,
+	    "Set to 1 if drive is at the beginning of partition/tape, 0 if "
+	    "not, -1 if unknown");
+	SASBADDINTDESC(sb, indent, softc->eop, %d, eop,
+	    "Set to 1 if drive is past early warning, 0 if not, -1 if unknown");
+	SASBADDINTDESC(sb, indent, softc->bpew, %d, bpew,
+	    "Set to 1 if drive is past programmable early warning, 0 if not, "
+	    "-1 if unknown");
+	SASBADDINTDESC(sb, indent, (intmax_t)softc->last_io_resid, %jd,
+	    residual, "Residual for the last I/O");
+	/*
+	 * XXX KDM should we send a string with the current driver
+	 * status already decoded instead of a numeric value?
+	 */
+	SASBADDINTDESC(sb, indent, softc->dsreg, %d, dsreg, 
+	    "Current state of the driver");
+
+	safilldensitysb(softc, &indent, sb);
+
+	SASBENDNODE(sb, indent, mtextget);
+
+extget_bailout:
+
+	return (error);
+}
+
+static int
+saparamget(struct sa_softc *softc, struct sbuf *sb)
+{
+	int indent;
+
+	indent = 0;
+	SASBADDNODE(sb, indent, mtparamget);
+	SASBADDINTDESC(sb, indent, softc->sili, %d, sili, 
+	    "Suppress an error on underlength variable reads");
+	SASBADDINTDESC(sb, indent, softc->eot_warn, %d, eot_warn, 
+	    "Return an error to warn that end of tape is approaching");
+	safillprot(softc, &indent, sb);
+	SASBENDNODE(sb, indent, mtparamget);
+
+	return (0);
+}
+
 static void
 saprevent(struct cam_periph *periph, int action)
 {
@@ -3091,10 +4660,14 @@
 	softc->dsreg = MTIO_DSREG_REST;
 
 	xpt_release_ccb(ccb);
-	if (error == 0)
-		softc->fileno = softc->blkno = (daddr_t) 0;
-	else
+	if (error == 0) {
+		softc->partition = softc->fileno = softc->blkno = (daddr_t) 0;
+		softc->rep_fileno = softc->rep_blkno = (daddr_t) 0;
+	} else {
 		softc->fileno = softc->blkno = (daddr_t) -1;
+		softc->partition = (daddr_t) -1; 
+		softc->rep_fileno = softc->rep_blkno = (daddr_t) -1;
+	}
 	return (error);
 }
 
@@ -3146,6 +4719,8 @@
 	 */
 	if (error) {
 		softc->fileno = softc->blkno = (daddr_t) -1;
+		softc->rep_blkno = softc->partition = (daddr_t) -1;
+		softc->rep_fileno = (daddr_t) -1;
 	} else if (code == SS_SETMARKS || code == SS_EOD) {
 		softc->fileno = softc->blkno = (daddr_t) -1;
 	} else if (code == SS_FILEMARKS && softc->fileno != (daddr_t) -1) {
@@ -3165,11 +4740,14 @@
 			}
 		}
 	}
+	if (error == 0)
+		sagetpos(periph);
+
 	return (error);
 }
 
 static int
-sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks)
+sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks, int immed)
 {
 	union	ccb *ccb;
 	struct	sa_softc *softc;
@@ -3188,7 +4766,7 @@
 	softc->dsreg = MTIO_DSREG_FMK;
 	/* this *must* not be retried */
 	scsi_write_filemarks(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG,
-	    FALSE, setmarks, nmarks, SSD_FULL_SIZE, IO_TIMEOUT);
+	    immed, setmarks, nmarks, SSD_FULL_SIZE, IO_TIMEOUT);
 	softc->dsreg = MTIO_DSREG_REST;
 
 
@@ -3206,15 +4784,127 @@
 	 * Update relative positions (if we're doing that).
 	 */
 	if (error) {
-		softc->fileno = softc->blkno = (daddr_t) -1;
+		softc->fileno = softc->blkno = softc->partition = (daddr_t) -1;
 	} else if (softc->fileno != (daddr_t) -1) {
 		softc->fileno += nwm;
 		softc->blkno = 0;
 	}
+
+	/*
+	 * Ask the tape drive for position information.
+	 */
+	sagetpos(periph);
+
+	/*
+	 * If we got valid position information, since we just wrote a file
+	 * mark, we know we're at the file mark and block 0 after that
+	 * filemark.
+	 */
+	if (softc->rep_fileno != (daddr_t) -1) {
+		softc->fileno = softc->rep_fileno;
+		softc->blkno = 0;
+	}
+
 	return (error);
 }
 
 static int
+sagetpos(struct cam_periph *periph)
+{
+	union ccb *ccb;
+	struct scsi_tape_position_long_data long_pos;
+	struct sa_softc *softc = (struct sa_softc *)periph->softc;
+	int error;
+
+	if (softc->quirks & SA_QUIRK_NO_LONG_POS) {
+		softc->rep_fileno = (daddr_t) -1;
+		softc->rep_blkno = (daddr_t) -1;
+		softc->bop = softc->eop = softc->bpew = -1;
+		return (EOPNOTSUPP);
+	}
+
+	bzero(&long_pos, sizeof(long_pos));
+
+	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
+	scsi_read_position_10(&ccb->csio,
+			      /*retries*/ 1,
+			      /*cbfcnp*/ sadone,
+			      /*tag_action*/ MSG_SIMPLE_Q_TAG,
+			      /*service_action*/ SA_RPOS_LONG_FORM,
+			      /*data_ptr*/ (uint8_t *)&long_pos,
+			      /*length*/ sizeof(long_pos),
+			      /*sense_len*/ SSD_FULL_SIZE,
+			      /*timeout*/ SCSIOP_TIMEOUT);
+
+	softc->dsreg = MTIO_DSREG_RBSY;
+	error = cam_periph_runccb(ccb, saerror, 0, SF_QUIET_IR,
+				  softc->device_stats);
+	softc->dsreg = MTIO_DSREG_REST;
+
+	if (error == 0) {
+		if (long_pos.flags & SA_RPOS_LONG_MPU) {
+			/*
+			 * If the drive doesn't know what file mark it is
+			 * on, our calculated filemark isn't going to be
+			 * accurate either.
+			 */
+			softc->fileno = (daddr_t) -1;
+			softc->rep_fileno = (daddr_t) -1;
+		} else {
+			softc->fileno = softc->rep_fileno =
+			    scsi_8btou64(long_pos.logical_file_num);
+		}
+
+		if (long_pos.flags & SA_RPOS_LONG_LONU) {
+			softc->partition = (daddr_t) -1;
+			softc->rep_blkno = (daddr_t) -1;
+			/*
+			 * If the tape drive doesn't know its block
+			 * position, we can't claim to know it either.
+			 */
+			softc->blkno = (daddr_t) -1;
+		} else {
+			softc->partition = scsi_4btoul(long_pos.partition);
+			softc->rep_blkno =
+			    scsi_8btou64(long_pos.logical_object_num);
+		}
+		if (long_pos.flags & SA_RPOS_LONG_BOP)
+			softc->bop = 1;
+		else
+			softc->bop = 0;
+
+		if (long_pos.flags & SA_RPOS_LONG_EOP)
+			softc->eop = 1;
+		else
+			softc->eop = 0;
+
+		if ((long_pos.flags & SA_RPOS_LONG_BPEW)
+		 || (softc->set_pews_status != 0)) {
+			softc->bpew = 1;
+			if (softc->set_pews_status > 0)
+				softc->set_pews_status--;
+		} else
+			softc->bpew = 0;
+	} else if (error == EINVAL) {
+		/*
+		 * If this drive returned an invalid-request type error,
+		 * then it likely doesn't support the long form report.
+		 */
+		softc->quirks |= SA_QUIRK_NO_LONG_POS;
+	}
+
+	if (error != 0) {
+		softc->rep_fileno = softc->rep_blkno = (daddr_t) -1;
+		softc->partition = (daddr_t) -1;
+		softc->bop = softc->eop = softc->bpew = -1;
+	}
+
+	xpt_release_ccb(ccb);
+
+	return (error);
+}
+
+static int
 sardpos(struct cam_periph *periph, int hard, u_int32_t *blkptr)
 {
 	struct scsi_tape_position_data loc;
@@ -3233,7 +4923,7 @@
 	 */
 
 	if (hard && (softc->flags & SA_FLAG_TAPE_WRITTEN)) {
-		error = sawritefilemarks(periph, 0, 0);
+		error = sawritefilemarks(periph, 0, 0, 0);
 		if (error && error != EACCES)
 			return (error);
 	}
@@ -3258,10 +4948,12 @@
 }
 
 static int
-sasetpos(struct cam_periph *periph, int hard, u_int32_t *blkptr)
+sasetpos(struct cam_periph *periph, int hard, struct mtlocate *locate_info)
 {
 	union ccb *ccb;
 	struct sa_softc *softc;
+	int locate16;
+	int immed, cp;
 	int error;
 
 	/*
@@ -3275,19 +4967,100 @@
 	softc = (struct sa_softc *)periph->softc;
 	ccb = cam_periph_getccb(periph, 1);
 
-	
-	scsi_set_position(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG,
-	    hard, *blkptr, SSD_FULL_SIZE, SPACE_TIMEOUT);
+	cp = locate_info->flags & MT_LOCATE_FLAG_CHANGE_PART ? 1 : 0;
+	immed = locate_info->flags & MT_LOCATE_FLAG_IMMED ? 1 : 0;
 
+	/*
+	 * Determine whether we have to use LOCATE or LOCATE16.  The hard
+	 * bit is only possible with LOCATE, but the new ioctls do not
+	 * allow setting that bit.  So we can't get into the situation of
+	 * having the hard bit set with a block address that is larger than
+	 * 32-bits.
+	 */
+	if (hard != 0)
+		locate16 = 0;
+	else if ((locate_info->dest_type != MT_LOCATE_DEST_OBJECT)
+	      || (locate_info->block_address_mode != MT_LOCATE_BAM_IMPLICIT)
+	      || (locate_info->logical_id > SA_SPOS_MAX_BLK))
+		locate16 = 1;
+	else
+		locate16 = 0;
 
+	if (locate16 != 0) {
+		scsi_locate_16(&ccb->csio,
+			       /*retries*/ 1,
+			       /*cbfcnp*/ sadone,
+			       /*tag_action*/ MSG_SIMPLE_Q_TAG,
+			       /*immed*/ immed,
+			       /*cp*/ cp,
+			       /*dest_type*/ locate_info->dest_type,
+			       /*bam*/ locate_info->block_address_mode,
+			       /*partition*/ locate_info->partition,
+			       /*logical_id*/ locate_info->logical_id,
+			       /*sense_len*/ SSD_FULL_SIZE,
+			       /*timeout*/ SPACE_TIMEOUT);
+	} else {
+		uint32_t blk_pointer;
+
+		blk_pointer = locate_info->logical_id;
+
+		scsi_locate_10(&ccb->csio,
+			       /*retries*/ 1,
+			       /*cbfcnp*/ sadone,
+			       /*tag_action*/ MSG_SIMPLE_Q_TAG,
+			       /*immed*/ immed,
+			       /*cp*/ cp,
+			       /*hard*/ hard,
+			       /*partition*/ locate_info->partition,
+			       /*block_address*/ locate_info->logical_id,
+			       /*sense_len*/ SSD_FULL_SIZE,
+			       /*timeout*/ SPACE_TIMEOUT);
+	}
+
 	softc->dsreg = MTIO_DSREG_POS;
 	error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats);
 	softc->dsreg = MTIO_DSREG_REST;
 	xpt_release_ccb(ccb);
+
 	/*
-	 * Note relative file && block number position as now unknown.
+	 * We assume the calculated file and block numbers are unknown
+	 * unless we have enough information to populate them.
 	 */
 	softc->fileno = softc->blkno = (daddr_t) -1;
+
+	/*
+	 * If the user requested changing the partition and the request
+	 * succeeded, note the partition.
+	 */
+	if ((error == 0)
+	 && (cp != 0))
+		softc->partition = locate_info->partition;
+	else
+		softc->partition = (daddr_t) -1;
+
+	if (error == 0) {
+		switch (locate_info->dest_type) {
+		case MT_LOCATE_DEST_FILE:
+			/*
+			 * This is the only case where we can reliably
+			 * calculate the file and block numbers.
+			 */
+			softc->fileno = locate_info->logical_id;
+			softc->blkno = 0;
+			break;
+		case MT_LOCATE_DEST_OBJECT:
+		case MT_LOCATE_DEST_SET:
+		case MT_LOCATE_DEST_EOD:
+		default:
+			break;
+		}
+	}
+
+	/*
+	 * Ask the drive for current position information.
+	 */
+	sagetpos(periph);
+
 	return (error);
 }
 
@@ -3311,10 +5084,11 @@
 	softc->dsreg = MTIO_DSREG_REST;
 
 	xpt_release_ccb(ccb);
-	if (error == 0)
-		softc->fileno = softc->blkno = (daddr_t) 0;
-	else
-		softc->fileno = softc->blkno = (daddr_t) -1;
+	if (error == 0) {
+		softc->partition = softc->fileno = softc->blkno = (daddr_t) 0;
+		sagetpos(periph);
+	} else
+		softc->partition = softc->fileno = softc->blkno = (daddr_t) -1;
 	return (error);
 }
 
@@ -3368,10 +5142,13 @@
 	softc->dsreg = MTIO_DSREG_REST;
 	xpt_release_ccb(ccb);
 
-	if (error || load == 0)
-		softc->fileno = softc->blkno = (daddr_t) -1;
-	else if (error == 0)
-		softc->fileno = softc->blkno = (daddr_t) 0;
+	if (error || load == 0) {
+		softc->partition = softc->fileno = softc->blkno = (daddr_t) -1;
+		softc->rep_fileno = softc->rep_blkno = (daddr_t) -1;
+	} else if (error == 0) {
+		softc->partition = softc->fileno = softc->blkno = (daddr_t) 0;
+		sagetpos(periph);
+	}
 	return (error);
 }
 
@@ -3400,6 +5177,297 @@
 	return (error);
 }
 
+/*
+ * Fill an sbuf with density data in XML format.  This particular macro
+ * works for multi-byte integer fields.
+ *
+ * Note that 1 byte fields aren't supported here.  The reason is that the
+ * compiler does not evaluate the sizeof(), and assumes that any of the
+ * sizes are possible for a given field.  So passing in a multi-byte
+ * field will result in a warning that the assignment makes an integer
+ * from a pointer without a cast, if there is an assignment in the 1 byte
+ * case.
+ */
+#define	SAFILLDENSSB(dens_data, sb, indent, field, desc_remain, 	\
+		     len_to_go, cur_offset, desc){			\
+	size_t cur_field_len;						\
+									\
+	cur_field_len = sizeof(dens_data->field);			\
+	if (desc_remain < cur_field_len) {				\
+		len_to_go -= desc_remain;				\
+		cur_offset += desc_remain;				\
+		continue;						\
+	}								\
+	len_to_go -= cur_field_len;					\
+	cur_offset += cur_field_len;					\
+	desc_remain -= cur_field_len;					\
+									\
+	switch (sizeof(dens_data->field)) {				\
+	case 1:								\
+		KASSERT(1 == 0, ("Programmer error, invalid 1 byte "	\
+			"field width for SAFILLDENSFIELD"));		\
+		break;							\
+	case 2:								\
+		SASBADDUINTDESC(sb, indent,				\
+		    scsi_2btoul(dens_data->field), %u, field, desc);	\
+		break;							\
+	case 3:								\
+		SASBADDUINTDESC(sb, indent,				\
+		    scsi_3btoul(dens_data->field), %u, field, desc);	\
+		break;							\
+	case 4:								\
+		SASBADDUINTDESC(sb, indent,				\
+		    scsi_4btoul(dens_data->field), %u, field, desc);	\
+		break;							\
+	case 8:								\
+		SASBADDUINTDESC(sb, indent, 				\
+		    (uintmax_t)scsi_8btou64(dens_data->field),	%ju, 	\
+		    field, desc);					\
+		break;							\
+	default:							\
+		break;							\
+	}								\
+};
+/*
+ * Fill an sbuf with density data in XML format.  This particular macro
+ * works for strings.
+ */
+#define	SAFILLDENSSBSTR(dens_data, sb, indent, field, desc_remain, 	\
+			len_to_go, cur_offset, desc){			\
+	size_t cur_field_len;						\
+	char tmpstr[32];						\
+									\
+	cur_field_len = sizeof(dens_data->field);			\
+	if (desc_remain < cur_field_len) {				\
+		len_to_go -= desc_remain;				\
+		cur_offset += desc_remain;				\
+		continue;						\
+	}								\
+	len_to_go -= cur_field_len;					\
+	cur_offset += cur_field_len;					\
+	desc_remain -= cur_field_len;					\
+									\
+	cam_strvis(tmpstr, dens_data->field,				\
+	    sizeof(dens_data->field), sizeof(tmpstr));			\
+	SASBADDVARSTRDESC(sb, indent, tmpstr, %s, field,		\
+	    strlen(tmpstr) + 1, desc);					\
+};
+
+/*
+ * Fill an sbuf with density data descriptors.
+ */
+static void
+safilldenstypesb(struct sbuf *sb, int *indent, uint8_t *buf, int buf_len,
+    int is_density)
+{
+	struct scsi_density_hdr *hdr;
+	uint32_t hdr_len;
+	int len_to_go, cur_offset;
+	int length_offset;
+	int num_reports, need_close;
+
+	/*
+	 * We need at least the header length.  Note that this isn't an
+	 * error, not all tape drives will have every data type.
+	 */
+	if (buf_len < sizeof(*hdr))
+		goto bailout;
+
+
+	hdr = (struct scsi_density_hdr *)buf;
+	hdr_len = scsi_2btoul(hdr->length);
+	len_to_go = min(buf_len - sizeof(*hdr), hdr_len);
+	if (is_density) {
+		length_offset = __offsetof(struct scsi_density_data,
+		    bits_per_mm);
+	} else {
+		length_offset = __offsetof(struct scsi_medium_type_data,
+		    num_density_codes);
+	}
+	cur_offset = sizeof(*hdr);
+
+	num_reports = 0;
+	need_close = 0;
+
+	while (len_to_go > length_offset) {
+		struct scsi_density_data *dens_data;
+		struct scsi_medium_type_data *type_data;
+		int desc_remain;
+		size_t cur_field_len;
+
+		dens_data = NULL;
+		type_data = NULL;
+
+		if (is_density) {
+			dens_data =(struct scsi_density_data *)&buf[cur_offset];
+			if (dens_data->byte2 & SDD_DLV)
+				desc_remain = scsi_2btoul(dens_data->length);
+			else
+				desc_remain = SDD_DEFAULT_LENGTH -
+				    length_offset;
+		} else {
+			type_data = (struct scsi_medium_type_data *)
+			    &buf[cur_offset];
+			desc_remain = scsi_2btoul(type_data->length);
+		}
+
+		len_to_go -= length_offset;
+		desc_remain = min(desc_remain, len_to_go);
+		cur_offset += length_offset;
+
+		if (need_close != 0) {
+			SASBENDNODE(sb, *indent, density_entry);
+		}
+
+		SASBADDNODENUM(sb, *indent, density_entry, num_reports);
+		num_reports++;
+		need_close = 1;
+
+		if (is_density) {
+			SASBADDUINTDESC(sb, *indent,
+			    dens_data->primary_density_code, %u,
+			    primary_density_code, "Primary Density Code");
+			SASBADDUINTDESC(sb, *indent,
+			    dens_data->secondary_density_code, %u,
+			    secondary_density_code, "Secondary Density Code");
+			SASBADDUINTDESC(sb, *indent,
+			    dens_data->byte2 & ~SDD_DLV, %#x, density_flags,
+			    "Density Flags");
+
+			SAFILLDENSSB(dens_data, sb, *indent, bits_per_mm,
+			    desc_remain, len_to_go, cur_offset, "Bits per mm");
+			SAFILLDENSSB(dens_data, sb, *indent, media_width,
+			    desc_remain, len_to_go, cur_offset, "Media width");
+			SAFILLDENSSB(dens_data, sb, *indent, tracks,
+			    desc_remain, len_to_go, cur_offset,
+			    "Number of Tracks");
+			SAFILLDENSSB(dens_data, sb, *indent, capacity,
+			    desc_remain, len_to_go, cur_offset, "Capacity");
+
+			SAFILLDENSSBSTR(dens_data, sb, *indent, assigning_org,
+			    desc_remain, len_to_go, cur_offset,
+			    "Assigning Organization");
+
+			SAFILLDENSSBSTR(dens_data, sb, *indent, density_name,
+			    desc_remain, len_to_go, cur_offset, "Density Name");
+
+			SAFILLDENSSBSTR(dens_data, sb, *indent, description,
+			    desc_remain, len_to_go, cur_offset, "Description");
+		} else {
+			int i;
+
+			SASBADDUINTDESC(sb, *indent, type_data->medium_type,
+			    %u, medium_type, "Medium Type");
+
+			cur_field_len =
+			    __offsetof(struct scsi_medium_type_data,
+				       media_width) -
+			    __offsetof(struct scsi_medium_type_data,
+				       num_density_codes);
+
+			if (desc_remain < cur_field_len) {
+				len_to_go -= desc_remain;
+				cur_offset += desc_remain;
+				continue;
+			}
+			len_to_go -= cur_field_len;
+			cur_offset += cur_field_len;
+			desc_remain -= cur_field_len;
+
+			SASBADDINTDESC(sb, *indent,
+			    type_data->num_density_codes, %d,
+			    num_density_codes, "Number of Density Codes");
+			SASBADDNODE(sb, *indent, density_code_list);
+			for (i = 0; i < type_data->num_density_codes;
+			     i++) {
+				SASBADDUINTDESC(sb, *indent,
+				    type_data->primary_density_codes[i], %u,
+				    density_code, "Density Code");
+			}
+			SASBENDNODE(sb, *indent, density_code_list);
+
+			SAFILLDENSSB(type_data, sb, *indent, media_width,
+			    desc_remain, len_to_go, cur_offset,
+			    "Media width");
+			SAFILLDENSSB(type_data, sb, *indent, medium_length,
+			    desc_remain, len_to_go, cur_offset,
+			    "Medium length");
+
+			/*
+			 * Account for the two reserved bytes.
+			 */
+			cur_field_len = sizeof(type_data->reserved2);
+			if (desc_remain < cur_field_len) {
+				len_to_go -= desc_remain;
+				cur_offset += desc_remain;
+				continue;
+			}
+			len_to_go -= cur_field_len;
+			cur_offset += cur_field_len;
+			desc_remain -= cur_field_len;
+			
+			SAFILLDENSSBSTR(type_data, sb, *indent, assigning_org,
+			    desc_remain, len_to_go, cur_offset,
+			    "Assigning Organization");
+			SAFILLDENSSBSTR(type_data, sb, *indent,
+			    medium_type_name, desc_remain, len_to_go,
+			    cur_offset, "Medium type name");
+			SAFILLDENSSBSTR(type_data, sb, *indent, description,
+			    desc_remain, len_to_go, cur_offset, "Description");
+
+		}
+	}
+	if (need_close != 0) {
+		SASBENDNODE(sb, *indent, density_entry);
+	}
+
+bailout:
+	return;
+}
+
+/*
+ * Fill an sbuf with density data information
+ */
+static void
+safilldensitysb(struct sa_softc *softc, int *indent, struct sbuf *sb)
+{
+	int i, is_density;
+	
+	SASBADDNODE(sb, *indent, mtdensity);
+	SASBADDUINTDESC(sb, *indent, softc->media_density, %u, media_density,
+	    "Current Medium Density");
+	is_density = 0;
+	for (i = 0; i < SA_DENSITY_TYPES; i++) {
+		int tmpint;
+
+		if (softc->density_info_valid[i] == 0)
+			continue;
+
+		SASBADDNODE(sb, *indent, density_report);
+		if (softc->density_type_bits[i] & SRDS_MEDIUM_TYPE) {
+			tmpint = 1;
+			is_density = 0;
+		} else {
+			tmpint = 0;
+			is_density = 1;
+		}
+		SASBADDINTDESC(sb, *indent, tmpint, %d, medium_type_report,
+		    "Medium type report");
+
+		if (softc->density_type_bits[i] & SRDS_MEDIA)
+			tmpint = 1;
+		else
+			tmpint = 0;
+		SASBADDINTDESC(sb, *indent, tmpint, %d, media_report, 
+		    "Media report");
+
+		safilldenstypesb(sb, indent, softc->density_info[i],
+		    softc->density_info_valid[i], is_density);
+		SASBENDNODE(sb, *indent, density_report);
+	}
+	SASBENDNODE(sb, *indent, mtdensity);
+}
+
 #endif /* _KERNEL */
 
 /*
@@ -3431,11 +5499,14 @@
 		   u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout)
 {
 	struct scsi_sa_rw *scsi_cmd;
+	int read;
 
+	read = (readop & SCSI_RW_DIRMASK) == SCSI_RW_READ;
+
 	scsi_cmd = (struct scsi_sa_rw *)&csio->cdb_io.cdb_bytes;
-	scsi_cmd->opcode = readop ? SA_READ : SA_WRITE;
+	scsi_cmd->opcode = read ? SA_READ : SA_WRITE;
 	scsi_cmd->sli_fixed = 0;
-	if (sli && readop)
+	if (sli && read)
 		scsi_cmd->sli_fixed |= SAR_SLI;
 	if (fixed)
 		scsi_cmd->sli_fixed |= SARW_FIXED;
@@ -3442,7 +5513,8 @@
 	scsi_ulto3b(length, scsi_cmd->length);
 	scsi_cmd->control = 0;
 
-	cam_fill_csio(csio, retries, cbfcnp, readop ? CAM_DIR_IN : CAM_DIR_OUT,
+	cam_fill_csio(csio, retries, cbfcnp, (read ? CAM_DIR_IN : CAM_DIR_OUT) |
+	    ((readop & SCSI_RW_BIO) != 0 ? CAM_DATA_BIO : 0),
 	    tag_action, data_ptr, dxfer_len, sense_len,
 	    sizeof(*scsi_cmd), timeout);
 }
@@ -3605,6 +5677,42 @@
 }
 
 /*
+ * Read Tape Position command.
+ */
+void
+scsi_read_position_10(struct ccb_scsiio *csio, u_int32_t retries,
+		      void (*cbfcnp)(struct cam_periph *, union ccb *),
+		      u_int8_t tag_action, int service_action,
+		      u_int8_t *data_ptr, u_int32_t length,
+		      u_int32_t sense_len, u_int32_t timeout)
+{
+	struct scsi_tape_read_position *scmd;
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_IN,
+		      tag_action,
+		      /*data_ptr*/data_ptr,
+		      /*dxfer_len*/length,
+		      sense_len,
+		      sizeof(*scmd),
+		      timeout);
+
+
+	scmd = (struct scsi_tape_read_position *)&csio->cdb_io.cdb_bytes;
+	bzero(scmd, sizeof(*scmd));
+	scmd->opcode = READ_POSITION;
+	scmd->byte1 = service_action;
+	/*
+	 * The length is only currently set (as of SSC4r03) if the extended
+	 * form is specified.  The other forms have fixed lengths.
+	 */
+	if (service_action == SA_RPOS_EXTENDED_FORM)
+		scsi_ulto2b(length, scmd->length);
+}
+
+/*
  * Set Tape Position command.
  */
 void
@@ -3624,3 +5732,193 @@
 		scmd->byte1 |= SA_SPOS_BT;
 	scsi_ulto4b(blkno, scmd->blkaddr);
 }
+
+/*
+ * XXX KDM figure out how to make a compatibility function.
+ */
+void
+scsi_locate_10(struct ccb_scsiio *csio, u_int32_t retries,
+	       void (*cbfcnp)(struct cam_periph *, union ccb *),
+	       u_int8_t tag_action, int immed, int cp, int hard,
+	       int64_t partition, u_int32_t block_address,
+	       int sense_len, u_int32_t timeout)
+{
+	struct scsi_tape_locate *scmd;
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      CAM_DIR_NONE,
+		      tag_action,
+		      /*data_ptr*/ NULL,
+		      /*dxfer_len*/ 0,
+		      sense_len,
+		      sizeof(*scmd),
+		      timeout);
+	scmd = (struct scsi_tape_locate *)&csio->cdb_io.cdb_bytes;
+	bzero(scmd, sizeof(*scmd));
+	scmd->opcode = LOCATE;
+	if (immed)
+		scmd->byte1 |= SA_SPOS_IMMED;
+	if (cp)
+		scmd->byte1 |= SA_SPOS_CP;
+	if (hard)
+		scmd->byte1 |= SA_SPOS_BT;
+	scsi_ulto4b(block_address, scmd->blkaddr);
+	scmd->partition = partition;
+}
+
+void
+scsi_locate_16(struct ccb_scsiio *csio, u_int32_t retries,
+	       void (*cbfcnp)(struct cam_periph *, union ccb *),
+	       u_int8_t tag_action, int immed, int cp, u_int8_t dest_type,
+	       int bam, int64_t partition, u_int64_t logical_id,
+	       int sense_len, u_int32_t timeout)
+{
+
+	struct scsi_locate_16 *scsi_cmd;
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_NONE,
+		      tag_action,
+		      /*data_ptr*/NULL,
+		      /*dxfer_len*/0,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+
+	scsi_cmd = (struct scsi_locate_16 *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+	scsi_cmd->opcode = LOCATE_16;
+	if (immed)
+		scsi_cmd->byte1 |= SA_LC_IMMEDIATE;
+	if (cp)
+		scsi_cmd->byte1 |= SA_LC_CP;
+	scsi_cmd->byte1 |= (dest_type << SA_LC_DEST_TYPE_SHIFT);
+
+	scsi_cmd->byte2 |= bam;
+	scsi_cmd->partition = partition;
+	scsi_u64to8b(logical_id, scsi_cmd->logical_id);
+}
+
+void
+scsi_report_density_support(struct ccb_scsiio *csio, u_int32_t retries,
+			    void (*cbfcnp)(struct cam_periph *, union ccb *),
+			    u_int8_t tag_action, int media, int medium_type,
+			    u_int8_t *data_ptr, u_int32_t length,
+			    u_int32_t sense_len, u_int32_t timeout)
+{
+	struct scsi_report_density_support *scsi_cmd;
+
+	scsi_cmd =(struct scsi_report_density_support *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = REPORT_DENSITY_SUPPORT;
+	if (media != 0)
+		scsi_cmd->byte1 |= SRDS_MEDIA;
+	if (medium_type != 0)
+		scsi_cmd->byte1 |= SRDS_MEDIUM_TYPE;
+
+	scsi_ulto2b(length, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_IN,
+		      tag_action,
+		      /*data_ptr*/data_ptr,
+		      /*dxfer_len*/length,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_set_capacity(struct ccb_scsiio *csio, u_int32_t retries,
+		  void (*cbfcnp)(struct cam_periph *, union ccb *),
+		  u_int8_t tag_action, int byte1, u_int32_t proportion,
+		  u_int32_t sense_len, u_int32_t timeout)
+{
+	struct scsi_set_capacity *scsi_cmd;
+
+	scsi_cmd = (struct scsi_set_capacity *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = SET_CAPACITY;
+
+	scsi_cmd->byte1 = byte1;
+	scsi_ulto2b(proportion, scsi_cmd->cap_proportion);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/CAM_DIR_NONE,
+		      tag_action,
+		      /*data_ptr*/NULL,
+		      /*dxfer_len*/0,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_format_medium(struct ccb_scsiio *csio, u_int32_t retries,
+		   void (*cbfcnp)(struct cam_periph *, union ccb *),
+		   u_int8_t tag_action, int byte1, int byte2, 
+		   u_int8_t *data_ptr, u_int32_t dxfer_len,
+		   u_int32_t sense_len, u_int32_t timeout)
+{
+	struct scsi_format_medium *scsi_cmd;
+
+	scsi_cmd = (struct scsi_format_medium*)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = FORMAT_MEDIUM;
+
+	scsi_cmd->byte1 = byte1;
+	scsi_cmd->byte2 = byte2;
+
+	scsi_ulto2b(dxfer_len, scsi_cmd->length);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      /*flags*/(dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
+		      tag_action,
+		      /*data_ptr*/ data_ptr,
+		      /*dxfer_len*/ dxfer_len,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}
+
+void
+scsi_allow_overwrite(struct ccb_scsiio *csio, u_int32_t retries,
+		   void (*cbfcnp)(struct cam_periph *, union ccb *),
+		   u_int8_t tag_action, int allow_overwrite, int partition, 
+		   u_int64_t logical_id, u_int32_t sense_len, u_int32_t timeout)
+{
+	struct scsi_allow_overwrite *scsi_cmd;
+
+	scsi_cmd = (struct scsi_allow_overwrite *)&csio->cdb_io.cdb_bytes;
+	bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+	scsi_cmd->opcode = ALLOW_OVERWRITE;
+
+	scsi_cmd->allow_overwrite = allow_overwrite;
+	scsi_cmd->partition = partition;
+	scsi_u64to8b(logical_id, scsi_cmd->logical_id);
+
+	cam_fill_csio(csio,
+		      retries,
+		      cbfcnp,
+		      CAM_DIR_NONE,
+		      tag_action,
+		      /*data_ptr*/ NULL,
+		      /*dxfer_len*/ 0,
+		      sense_len,
+		      sizeof(*scsi_cmd),
+		      timeout);
+}

Modified: trunk/sys/cam/scsi/scsi_sa.h
===================================================================
--- trunk/sys/cam/scsi/scsi_sa.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_sa.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,8 +1,10 @@
+/* $MidnightBSD$ */
 /*-
  * Structure and function declarations for the
  * SCSI Sequential Access Peripheral driver for CAM.
  *
  * Copyright (c) 1999, 2000 Matthew Jacob
+ * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -26,7 +28,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_sa.h 317851 2017-05-05 20:25:31Z ken $
  */
 
 #ifndef	_SCSI_SCSI_SA_H
@@ -142,6 +144,53 @@
 };
 
 /*
+ * Set tape capacity.
+ */
+struct scsi_set_capacity
+{
+	u_int8_t opcode;
+	u_int8_t byte1;
+#define	SA_SSC_IMMED		0x01
+	u_int8_t reserved;
+	u_int8_t cap_proportion[2];
+	u_int8_t control;
+};
+
+/*
+ * Format tape media.  The CDB opcode is the same as the disk-specific
+ * FORMAT UNIT command, but the fields are different inside the CDB.  Thus
+ * the reason for a separate definition here.
+ */
+struct scsi_format_medium
+{
+	u_int8_t opcode;
+	u_int8_t byte1;
+#define	SFM_IMMED		0x01
+#define	SFM_VERIFY		0x02
+	u_int8_t byte2;
+#define	SFM_FORMAT_DEFAULT	0x00
+#define	SFM_FORMAT_PARTITION	0x01
+#define	SFM_FORMAT_DEF_PART	0x02
+#define	SFM_FORMAT_MASK		0x0f
+	u_int8_t length[2];
+	u_int8_t control;
+};
+
+struct scsi_allow_overwrite
+{
+	u_int8_t opcode;
+	u_int8_t reserved1;
+	u_int8_t allow_overwrite;
+#define	SAO_ALLOW_OVERWRITE_DISABLED	0x00
+#define	SAO_ALLOW_OVERWRITE_CUR_POS	0x01
+#define	SAO_ALLOW_OVERWRITE_FORMAT	0x02
+	u_int8_t partition;
+	u_int8_t logical_id[8];
+	u_int8_t reserved2[3];
+	u_int8_t control;
+};
+
+/*
  * Dev specific mode page masks.
  */
 #define SMH_SA_WP		0x80
@@ -180,11 +229,18 @@
 #define	SA_BIS			0x40	/* block identifiers supported */
 #define	SA_RSMK			0x20	/* report setmarks */
 #define	SA_AVC			0x10	/* automatic velocity control */
-#define	SA_SOCF_MASK		0xc0	/* stop on consecutive formats */
-#define	SA_RBO			0x20	/* recover buffer order */
-#define	SA_REW			0x10	/* report early warning */
+#define	SA_SOCF_MASK		0x0c	/* stop on consecutive formats */
+#define	SA_RBO			0x02	/* recover buffer order */
+#define	SA_REW			0x01	/* report early warning */
 	u_int8_t gap_size;
 	u_int8_t byte10;
+/* from SCSI-3: SSC-4 Working draft (2/14) 8.3.3 */
+#define	SA_EOD_DEF_MASK		0xe0	/* EOD defined */
+#define	SA_EEG			0x10	/* Enable EOD Generation */
+#define	SA_SEW			0x08	/* Synchronize at Early Warning */
+#define	SA_SOFT_WP		0x04	/* Software Write Protect */
+#define	SA_BAML			0x02	/* Block Address Mode Lock */
+#define	SA_BAM			0x01	/* Block Address Mode */
 	u_int8_t ew_bufsize[3];
 	u_int8_t sel_comp_alg;
 #define	SA_COMP_NONE		0x00
@@ -221,10 +277,78 @@
 	struct scsi_data_compression_page dcomp;
 } sa_comp_t;
 
+/*
+ * Control Data Protection subpage.  This is as defined in SSC3r03.
+ */
+struct scsi_control_data_prot_subpage {
+	uint8_t page_code;
+#define	SA_CTRL_DP_PAGE_CODE		0x0a
+	uint8_t subpage_code;
+#define	SA_CTRL_DP_SUBPAGE_CODE		0xf0
+	uint8_t length[2];
+	uint8_t prot_method;
+#define	SA_CTRL_DP_NO_LBP		0x00
+#define	SA_CTRL_DP_REED_SOLOMON		0x01
+#define	SA_CTRL_DP_METHOD_MAX		0xff
+	uint8_t pi_length;
+#define	SA_CTRL_DP_PI_LENGTH_MASK	0x3f
+#define	SA_CTRL_DP_RS_LENGTH		4
+	uint8_t prot_bits;
+#define	SA_CTRL_DP_LBP_W		0x80
+#define	SA_CTRL_DP_LBP_R		0x40
+#define	SA_CTRL_DP_RBDP			0x20
+	uint8_t reserved[];
+};
+
+/*
+ * This is the Read/Write Control mode page used on IBM Enterprise Tape
+ * Drives.  They are known as 3592, TS, or Jaguar drives.  The SCSI inquiry
+ * data will show a Product ID "03592XXX", where XXX is 'J1A', 'E05' (TS1120),
+ * 'E06' (TS1130), 'E07' (TS1140) or 'E08' (TS1150).
+ *
+ * This page definition is current as of the 3592 SCSI Reference v6,
+ * released on December 16th, 2014.
+ */
+struct scsi_tape_ibm_rw_control {
+	uint8_t page_code;
+#define SA_IBM_RW_CTRL_PAGE_CODE		0x25
+	uint8_t page_length;
+	uint8_t ignore_seq_checks;
+#define	SA_IBM_RW_CTRL_LOC_IGNORE_SEQ		0x04
+#define	SA_IBM_RW_CTRL_SPC_BLK_IGNORE_SEQ	0x02
+#define	SA_IBM_RW_CTRL_SPC_FM_IGNORE_SEQ	0x01
+	uint8_t ignore_data_checks;
+#define	SA_IBM_RW_CTRL_LOC_IGNORE_DATA		0x04
+#define	SA_IBM_RW_CTRL_SPC_BLK_IGNORE_DATA	0x02
+#define	SA_IBM_RW_CTRL_SPC_FM_IGNORE_DATA	0x01
+	uint8_t reserved1;
+	uint8_t leop_method;
+#define	SA_IBM_RW_CTRL_LEOP_DEFAULT		0x00
+#define	SA_IBM_RW_CTRL_LEOP_MAX_CAP		0x01
+#define	SA_IBM_RW_CTRL_LEOP_CONST_CAP		0x02
+	uint8_t leop_ew[2];
+	uint8_t byte8;
+#define	SA_IBM_RW_CTRL_DISABLE_FASTSYNC		0x80
+#define	SA_IBM_RW_CTRL_DISABLE_SKIPSYNC		0x40
+#define	SA_IBM_RW_CTRL_DISABLE_CROSS_EOD	0x08
+#define	SA_IBM_RW_CTRL_DISABLE_CROSS_PERM_ERR	0x04
+#define	SA_IBM_RW_CTRL_REPORT_SEG_EW		0x02
+#define	SA_IBM_RW_CTRL_REPORT_HOUSEKEEPING_ERR	0x01
+	uint8_t default_write_dens_bop_0;
+	uint8_t pending_write_dens_bop_0;
+	uint8_t reserved2[21];
+};
+
 struct scsi_tape_read_position {
 	u_int8_t opcode;		/* READ_POSITION */
 	u_int8_t byte1;			/* set LSB to read hardware block pos */
-	u_int8_t reserved[8];
+#define	SA_RPOS_SHORT_FORM	0x00
+#define	SA_RPOS_SHORT_VENDOR	0x01
+#define	SA_RPOS_LONG_FORM	0x06
+#define	SA_RPOS_EXTENDED_FORM	0x08
+	u_int8_t reserved[5];
+	u_int8_t length[2];
+	u_int8_t control;
 };
 
 struct scsi_tape_position_data	{	/* Short Form */
@@ -235,6 +359,7 @@
 #define	SA_RPOS_BYCU		0x10	/* Byte Count Unknown (SCSI3) */
 #define	SA_RPOS_BPU		0x04	/* Block Position Unknown */
 #define	SA_RPOS_PERR		0x02	/* Position Error (SCSI3) */
+#define	SA_RPOS_BPEW		0x01	/* Beyond Programmable Early Warning */
 #define	SA_RPOS_UNCERTAIN	SA_RPOS_BPU
 	u_int8_t partition;
 	u_int8_t reserved[2];
@@ -245,6 +370,38 @@
 	u_int8_t nbufbyte[4];
 };
 
+struct scsi_tape_position_long_data {
+	u_int8_t flags;
+#define	SA_RPOS_LONG_BOP	0x80	/* Beginning of Partition */
+#define	SA_RPOS_LONG_EOP	0x40	/* End of Partition */
+#define	SA_RPOS_LONG_MPU	0x08	/* Mark Position Unknown */
+#define	SA_RPOS_LONG_LONU	0x04	/* Logical Object Number Unknown */
+#define	SA_RPOS_LONG_BPEW	0x01	/* Beyond Programmable Early Warning */
+	u_int8_t reserved[3];
+	u_int8_t partition[4];
+	u_int8_t logical_object_num[8];
+	u_int8_t logical_file_num[8];
+	u_int8_t set_id[8];
+};
+
+struct scsi_tape_position_ext_data {
+	u_int8_t flags;
+#define	SA_RPOS_EXT_BOP		0x80	/* Beginning of Partition */
+#define	SA_RPOS_EXT_EOP		0x40	/* End of Partition */
+#define	SA_RPOS_EXT_LOCU	0x20	/* Logical Object Count Unknown */
+#define	SA_RPOS_EXT_BYCU	0x10	/* Byte Count Unknown */
+#define	SA_RPOS_EXT_LOLU	0x04	/* Logical Object Location Unknown */
+#define	SA_RPOS_EXT_PERR	0x02	/* Position Error */
+#define	SA_RPOS_EXT_BPEW	0x01	/* Beyond Programmable Early Warning */
+	u_int8_t partition;
+	u_int8_t length[2];
+	u_int8_t reserved;
+	u_int8_t num_objects[3];
+	u_int8_t first_object[8];
+	u_int8_t last_object[8];
+	u_int8_t bytes_in_buffer[8];
+};
+
 struct scsi_tape_locate {
 	u_int8_t opcode;
 	u_int8_t byte1;
@@ -253,18 +410,534 @@
 #define	SA_SPOS_BT		0x04
 	u_int8_t reserved1;
 	u_int8_t blkaddr[4];
+#define	SA_SPOS_MAX_BLK		0xffffffff
 	u_int8_t reserved2;
 	u_int8_t partition;
 	u_int8_t control;
 };
 
+struct scsi_locate_16 {
+	u_int8_t opcode;
+	u_int8_t byte1;
+#define	SA_LC_IMMEDIATE		0x01
+#define	SA_LC_CP		0x02
+#define	SA_LC_DEST_TYPE_MASK	0x38
+#define	SA_LC_DEST_TYPE_SHIFT	3
+#define	SA_LC_DEST_OBJECT	0x00
+#define	SA_LC_DEST_FILE		0x01
+#define	SA_LC_DEST_SET		0x02
+#define	SA_LC_DEST_EOD		0x03
+	u_int8_t byte2;
+#define	SA_LC_BAM_IMPLICIT	0x00
+#define	SA_LC_BAM_EXPLICIT	0x01
+	u_int8_t partition;
+	u_int8_t logical_id[8];
+	u_int8_t reserved[3];
+	u_int8_t control;
+};
+
+struct scsi_report_density_support {
+	u_int8_t opcode;
+	u_int8_t byte1;
+#define	SRDS_MEDIA		0x01
+#define	SRDS_MEDIUM_TYPE	0x02
+	u_int8_t reserved[5];
+	u_int8_t length[2];
+#define	SRDS_MAX_LENGTH		0xffff
+	u_int8_t control;
+};
+
+struct scsi_density_hdr {
+	u_int8_t length[2];
+	u_int8_t reserved[2];
+	u_int8_t descriptor[];
+};
+
+struct scsi_density_data {
+	u_int8_t primary_density_code;
+	u_int8_t secondary_density_code;
+	u_int8_t byte2;
+#define	SDD_DLV			0x01
+#define	SDD_DEFLT		0x20
+#define	SDD_DUP			0x40
+#define SDD_WRTOK		0x80
+	u_int8_t length[2];
+#define	SDD_DEFAULT_LENGTH	52
+	u_int8_t bits_per_mm[3];
+	u_int8_t media_width[2];
+	u_int8_t tracks[2];
+	u_int8_t capacity[4];
+	u_int8_t assigning_org[8];
+	u_int8_t density_name[8];
+	u_int8_t description[20];
+};
+
+struct scsi_medium_type_data {
+	u_int8_t medium_type;
+	u_int8_t reserved1;
+	u_int8_t length[2];
+#define	SMTD_DEFAULT_LENGTH	52
+	u_int8_t num_density_codes;
+	u_int8_t primary_density_codes[9];
+	u_int8_t media_width[2];
+	u_int8_t medium_length[2];
+	u_int8_t reserved2[2];
+	u_int8_t assigning_org[8];
+	u_int8_t medium_type_name[8];
+	u_int8_t description[20];
+};
+
 /*
+ * Manufacturer-assigned Serial Number VPD page.
+ * Current as of SSC-5r03, 28 September 2016.
+ */
+struct scsi_vpd_mfg_serial_number
+{
+	u_int8_t device;
+	u_int8_t page_code;
+#define	SVPD_MFG_SERIAL_NUMBER_PAGE_CODE 0xB1
+	u_int8_t page_length[2];
+	u_int8_t mfg_serial_num[];
+};
+
+/*
+ * Security Protocol Specific values for the Tape Data Encryption protocol
+ * (0x20) used with SECURITY PROTOCOL IN.  See below for values used with
+ * SECURITY PROTOCOL OUT.  Current as of SSC4r03.
+ */
+#define	TDE_IN_SUPPORT_PAGE		0x0000
+#define	TDE_OUT_SUPPORT_PAGE		0x0001
+#define	TDE_DATA_ENC_CAP_PAGE		0x0010
+#define	TDE_SUPPORTED_KEY_FORMATS_PAGE	0x0011
+#define	TDE_DATA_ENC_MAN_CAP_PAGE	0x0012
+#define	TDE_DATA_ENC_STATUS_PAGE	0x0020
+#define	TDE_NEXT_BLOCK_ENC_STATUS_PAGE	0x0021
+#define	TDE_GET_ENC_MAN_ATTR_PAGE	0x0022
+#define	TDE_RANDOM_NUM_PAGE		0x0030
+#define	TDE_KEY_WRAP_PK_PAGE		0x0031
+
+/*
+ * Tape Data Encryption protocol pages used with SECURITY PROTOCOL IN and
+ * SECURITY PROTOCOL OUT.
+ */
+/*
+ * Tape Data Encryption In Support page (0x0000).
+ */
+struct tde_in_support_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t page_codes[];
+};
+
+/*
+ * Tape Data Encryption Out Support page (0x0001).
+ */
+struct tde_out_support_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t page_codes[];
+};
+
+/*
+ * Logical block encryption algorithm descriptor.  This is reported in the
+ * Data Encryption Capabilities page.
+ */
+struct tde_block_enc_alg_desc {
+	uint8_t alg_index;
+	uint8_t reserved1;
+	uint8_t desc_length[2];
+	uint8_t byte4;
+#define	TDE_BEA_AVFMV			0x80
+#define	TDE_BEA_SDK_C			0x40
+#define	TDE_BEA_MAC_C			0x20
+#define	TDE_BEA_DELB_C			0x10
+#define	TDE_BEA_DECRYPT_C_MASK		0x0c
+#define	TDE_BEA_DECRYPT_C_EXT		0x0c
+#define	TDE_BEA_DECRYPT_C_HARD		0x08
+#define	TDE_BEA_DECRYPT_C_SOFT		0x04
+#define	TDE_BEA_DECRYPT_C_NO_CAP	0x00
+#define	TDE_BEA_ENCRYPT_C_MASK		0x03
+#define	TDE_BEA_ENCRYPT_C_EXT		0x03
+#define	TDE_BEA_ENCRYPT_C_HARD		0x02
+#define	TDE_BEA_ENCRYPT_C_SOFT		0x01
+#define	TDE_BEA_ENCRYPT_C_NO_CAP	0x00
+	uint8_t byte5;
+#define	TDE_BEA_AVFCLP_MASK		0xc0
+#define	TDE_BEA_AVFCLP_VALID		0x80
+#define	TDE_BEA_AVFCLP_NOT_VALID	0x40
+#define	TDE_BEA_AVFCLP_NOT_APP		0x00
+#define	TDE_BEA_NONCE_C_MASK		0x30
+#define	TDE_BEA_NONCE_C_SUPPORTED	0x30
+#define	TDE_BEA_NONCE_C_PROVIDED	0x20
+#define	TDE_BEA_NONCE_C_GENERATED	0x10
+#define	TDE_BEA_NONCE_C_NOT_REQUIRED	0x00
+#define	TDE_BEA_KADF_C			0x08
+#define	TDE_BEA_VCELB_C			0x04
+#define	TDE_BEA_UKADF			0x02
+#define	TDE_BEA_AKADF			0x01
+	uint8_t max_unauth_key_bytes[2];
+	uint8_t max_auth_key_bytes[2];
+	uint8_t lbe_key_size[2];
+	uint8_t byte12;
+#define	TDE_BEA_DKAD_C_MASK		0xc0
+#define	TDE_BEA_DKAD_C_CAPABLE		0xc0
+#define	TDE_BEA_DKAD_C_NOT_ALLOWED	0x80
+#define	TDE_BEA_DKAD_C_REQUIRED		0x40
+#define	TDE_BEA_EEMC_C_MASK		0x30
+#define	TDE_BEA_EEMC_C_ALLOWED		0x20
+#define	TDE_BEA_EEMC_C_NOT_ALLOWED	0x10
+#define	TDE_BEA_EEMC_C_NOT_SPECIFIED	0x00
+	/*
+	 * Raw Decryption Mode Control Capabilities (RDMC_C) field.  The
+	 * descriptions are too complex to represent as a simple name.
+	 */
+#define	TDE_BEA_RDMC_C_MASK		0x0e
+#define	TDE_BEA_RDMC_C_MODE_7		0x0e
+#define	TDE_BEA_RDMC_C_MODE_6		0x0c
+#define	TDE_BEA_RDMC_C_MODE_5		0x0a
+#define	TDE_BEA_RDMC_C_MODE_4		0x08
+#define	TDE_BEA_RDMC_C_MODE_1		0x02
+#define	TDE_BEA_EAREM			0x01
+	uint8_t byte13;
+#define	TDE_BEA_MAX_EEDKS_MASK		0x0f
+	uint8_t msdk_count[2];
+	uint8_t max_eedk_size[2];
+	uint8_t reserved2[2];
+	uint8_t security_algo_code[4];
+};
+
+/*
+ * Data Encryption Capabilities page (0x0010).
+ */
+struct tde_data_enc_cap_page {
+	uint8_t page_code[2];
+	uint8_t page_length;
+	uint8_t byte4;
+#define	DATA_ENC_CAP_EXTDECC_MASK		0x0c
+#define	DATA_ENC_CAP_EXTDECC_NOT_REPORTED	0x00
+#define	DATA_ENC_CAP_EXTDECC_NOT_CAPABLE	0x04
+#define	DATA_ENC_CAP_EXTDECC_CAPABLE		0x08
+#define	DATA_ENC_CAP_CFG_P_MASK			0x03
+#define	DATA_ENC_CAP_CFG_P_NOT_REPORTED		0x00
+#define	DATA_ENC_CAP_CFG_P_ALLOWED		0x01
+#define	DATA_ENC_CAP_CFG_P_NOT_ALLOWED		0x02
+	uint8_t reserved[15];
+	struct tde_block_enc_alg_desc alg_descs[];
+};
+
+/*
+ * Tape Data Encryption Supported Key Formats page (0x0011).
+ */
+struct tde_supported_key_formats_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t key_formats_list[];
+};
+
+/*
+ * Tape Data Encryption Management Capabilities page (0x0012).
+ */
+struct tde_data_enc_man_cap_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t byte4;
+#define	TDE_DEMC_LOCK_C		0x01
+	uint8_t byte5;
+#define	TDE_DEMC_CKOD_C		0x04
+#define	TDE_DEMC_CKORP_C	0x02
+#define	TDE_DEMC_CKORL_C	0x01
+	uint8_t reserved1;
+	uint8_t byte7;
+#define	TDE_DEMC_AITN_C		0x04
+#define	TDE_DEMC_LOCAL_C	0x02
+#define	TDE_DEMC_PUBLIC_C	0x01
+	uint8_t reserved2[8];
+};
+
+/*
+ * Tape Data Encryption Status Page (0x0020).
+ */
+struct tde_data_enc_status_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t scope;
+#define	TDE_DES_IT_NEXUS_SCOPE_MASK	0xe0
+#define	TDE_DES_LBE_SCOPE_MASK		0x07
+	uint8_t encryption_mode;
+	uint8_t decryption_mode;
+	uint8_t algo_index;
+	uint8_t key_instance_counter[4];
+	uint8_t byte12;
+#define	TDE_DES_PARAM_CTRL_MASK		0x70
+#define	TDE_DES_PARAM_CTRL_MGMT		0x40
+#define	TDE_DES_PARAM_CTRL_CHANGER	0x30
+#define	TDE_DES_PARAM_CTRL_DRIVE	0x20
+#define	TDE_DES_PARAM_CTRL_EXT		0x10
+#define	TDE_DES_PARAM_CTRL_NOT_REPORTED	0x00
+#define	TDE_DES_VCELB			0x08
+#define	TDE_DES_CEEMS_MASK		0x06
+#define	TDE_DES_RDMD			0x01
+	uint8_t enc_params_kad_format;
+	uint8_t asdk_count[2];
+	uint8_t reserved[8];
+	uint8_t key_assoc_data_desc[];
+};
+
+/*
+ * Tape Data Encryption Next Block Encryption Status page (0x0021).
+ */
+struct tde_next_block_enc_status_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t logical_obj_number[8];
+	uint8_t status;
+#define	TDE_NBES_COMP_STATUS_MASK	0xf0
+#define	TDE_NBES_COMP_INCAPABLE		0x00
+#define	TDE_NBES_COMP_NOT_YET		0x10
+#define	TDE_NBES_COMP_NOT_A_BLOCK	0x20
+#define	TDE_NBES_COMP_NOT_COMPRESSED	0x30
+#define	TDE_NBES_COMP_COMPRESSED	0x40
+#define	TDE_NBES_ENC_STATUS_MASK	0x0f
+#define	TDE_NBES_ENC_INCAPABLE		0x00
+#define	TDE_NBES_ENC_NOT_YET		0x01
+#define	TDE_NBES_ENC_NOT_A_BLOCK	0x02
+#define	TDE_NBES_ENC_NOT_ENCRYPTED	0x03
+#define	TDE_NBES_ENC_ALG_NOT_SUPPORTED	0x04
+#define	TDE_NBES_ENC_SUPPORTED_ALG	0x05
+#define	TDE_NBES_ENC_NO_KEY		0x06
+	uint8_t algo_index;
+	uint8_t byte14;
+#define	TDE_NBES_EMES			0x02
+#define	TDE_NBES_RDMDS			0x01
+	uint8_t next_block_kad_format;
+	uint8_t key_assoc_data_desc[];
+};
+
+/*
+ * Tape Data Encryption Get Encryption Management Attributes page (0x0022).
+ */
+struct tde_get_enc_man_attr_page {
+	uint8_t page_code[2];
+	uint8_t reserved[3];
+	uint8_t byte5;
+#define	TDE_GEMA_CAOD			0x01
+	uint8_t page_length[2];
+	uint8_t enc_mgmt_attr_desc[];
+};
+
+/*
+ * Tape Data Encryption Random Number page (0x0030).
+ */
+struct tde_random_num_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t random_number[32];
+};
+
+/*
+ * Tape Data Encryption Device Server Key Wrapping Public Key page (0x0031).
+ */
+struct tde_key_wrap_pk_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t public_key_type[4];
+	uint8_t public_key_format[4];
+	uint8_t public_key_length[2];
+	uint8_t public_key[];
+};
+
+/*
+ * Security Protocol Specific values for the Tape Data Encryption protocol
+ * (0x20) used with SECURITY PROTOCOL OUT.  See above for values used with
+ * SECURITY PROTOCOL IN.  Current as of SSCr03.
+ */
+#define	TDE_SET_DATA_ENC_PAGE		0x0010
+#define	TDE_SA_ENCAP_PAGE		0x0011
+#define	TDE_SET_ENC_MGMT_ATTR_PAGE	0x0022
+
+/*
+ * Tape Data Encryption Set Data Encryption page (0x0010).
+ */
+struct tde_set_data_enc_page {
+	uint8_t page_code[2];
+	uint8_t page_length[2];
+	uint8_t byte4;
+#define	TDE_SDE_SCOPE_MASK		0xe0
+#define	TDE_SDE_SCOPE_ALL_IT_NEXUS	0x80
+#define	TDE_SDE_SCOPE_LOCAL		0x40
+#define	TDE_SDE_SCOPE_PUBLIC		0x00
+#define	TDE_SDE_LOCK			0x01
+	uint8_t byte5;
+#define	TDE_SDE_CEEM_MASK		0xc0
+#define	TDE_SDE_CEEM_ENCRYPT		0xc0
+#define	TDE_SDE_CEEM_EXTERNAL		0x80
+#define	TDE_SDE_CEEM_NO_CHECK		0x40
+#define	TDE_SDE_RDMC_MASK		0x30
+#define	TDE_SDE_RDMC_DISABLED		0x30
+#define	TDE_SDE_RDMC_ENABLED		0x20
+#define	TDE_SDE_RDMC_DEFAULT		0x00
+#define	TDE_SDE_SDK			0x08
+#define	TDE_SDE_CKOD			0x04
+#define	TDE_SDE_CKORP			0x02
+#define	TDE_SDE_CKORL			0x01
+	uint8_t encryption_mode;
+#define	TDE_SDE_ENC_MODE_DISABLE	0x00
+#define	TDE_SDE_ENC_MODE_EXTERNAL	0x01
+#define	TDE_SDE_ENC_MODE_ENCRYPT	0x02
+	uint8_t decryption_mode;
+#define	TDE_SDE_DEC_MODE_DISABLE	0x00
+#define	TDE_SDE_DEC_MODE_RAW		0x01
+#define	TDE_SDE_DEC_MODE_DECRYPT	0x02
+#define	TDE_SDE_DEC_MODE_MIXED		0x03
+	uint8_t algo_index;
+	uint8_t lbe_key_format;
+#define	TDE_SDE_KEY_PLAINTEXT		0x00
+#define	TDE_SDE_KEY_VENDOR_SPEC		0x01
+#define	TDE_SDE_KEY_PUBLIC_WRAP		0x02
+#define	TDE_SDE_KEY_ESP_SCSI		0x03
+	uint8_t kad_format;
+#define	TDE_SDE_KAD_ASCII		0x02
+#define	TDE_SDE_KAD_BINARY		0x01
+#define	TDE_SDE_KAD_UNSPECIFIED		0x00
+	uint8_t reserved[7];
+	uint8_t lbe_key_length[2];
+	uint8_t lbe_key[];
+};
+
+/*
+ * Used for the Vendor Specific key format (0x01).
+ */
+struct tde_key_format_vendor {
+	uint8_t t10_vendor_id[8];
+	uint8_t vendor_key[];
+};
+
+/*
+ * Used for the public key wrapped format (0x02).
+ */
+struct tde_key_format_public_wrap {
+	uint8_t parameter_set[2];
+#define	TDE_PARAM_SET_RSA2048		0x0000
+#define	TDE_PARAM_SET_ECC521		0x0010
+	uint8_t label_length[2];
+	uint8_t label[];
+};
+
+/*
+ * Tape Data Encryption SA Encapsulation page (0x0011).
+ */
+struct tde_sa_encap_page {
+	uint8_t page_code[2];
+	uint8_t data_desc[];
+};
+
+/*
+ * Tape Data Encryption Set Encryption Management Attributes page (0x0022).
+ */
+struct tde_set_enc_mgmt_attr_page {
+	uint8_t page_code[2];
+	uint8_t reserved[3];
+	uint8_t byte5;
+#define	TDE_SEMA_CAOD			0x01
+	uint8_t page_length[2];
+	uint8_t attr_desc[];
+};
+
+/*
+ * Tape Data Encryption descriptor format.
+ * SSC4r03 Section 8.5.4.2.1 Table 197
+ */
+struct tde_data_enc_desc {
+	uint8_t key_desc_type;
+#define	TDE_KEY_DESC_WK_KAD		0x04
+#define	TDE_KEY_DESC_M_KAD		0x03
+#define	TDE_KEY_DESC_NONCE_VALUE	0x02
+#define	TDE_KEY_DESC_A_KAD		0x01
+#define	TDE_KEY_DESC_U_KAD		0x00
+	uint8_t byte2;
+#define	TDE_KEY_DESC_AUTH_MASK		0x07
+#define	TDE_KEY_DESC_AUTH_FAILED	0x04
+#define	TDE_KEY_DESC_AUTH_SUCCESS	0x03
+#define	TDE_KEY_DESC_AUTH_NO_ATTEMPT	0x02
+#define	TDE_KEY_DESC_AUTH_U_KAD		0x01
+	uint8_t key_desc_length[2];
+	uint8_t key_desc[];
+};
+
+/*
+ * Wrapped Key descriptor format.
+ * SSC4r03 Section 8.5.4.3.1 Table 200
+ */
+struct tde_wrapped_key_desc {
+	uint8_t wrapped_key_type;
+#define	TDE_WRAP_KEY_DESC_LENGTH	0x04
+#define	TDE_WRAP_KEY_DESC_IDENT		0x03
+#define	TDE_WRAP_KEY_DESC_INFO		0x02
+#define	TDE_WRAP_KEY_DESC_ENTITY_ID	0x01
+#define	TDE_WRAP_KEY_DESC_DEVICE_ID	0x00
+	uint8_t reserved;
+	uint8_t wrapped_desc_length[2];
+	uint8_t wrapped_desc[];
+};
+
+/*
+ * Encryption management attributes descriptor format.
+ * SSC4r03 Section 8.5.4.4.1 Table 202
+ */
+struct tde_enc_mgmt_attr_desc {
+	uint8_t enc_mgmt_attr_type[2];
+#define	TDE_EMAD_DESIRED_KEY_MGR_OP	0x0000
+#define	TDE_EMAD_LOG_BLOCK_ENC_KEY_CRIT	0x0001
+#define	TDE_EMAD_LOG_BLOCK_ENC_KEY_WRAP	0x0002
+	uint8_t reserved;
+	uint8_t byte2;
+#define	TDE_EMAD_CRIT			0x80
+	uint8_t attr_length[2];
+	uint8_t attributes[];
+#define	TDE_EMAD_DESIRED_KEY_CREATE	0x0001
+#define	TDE_EMAD_DESIRED_KEY_RESOLVE	0x0002
+};
+
+/*
+ * Logical block encryption key selection criteria descriptor format.
+ * SSC4r03 Section 8.5.4.4.3.1 Table 206
+ */
+struct tde_lb_enc_key_sel_desc {
+	uint8_t lbe_key_sel_crit_type[2];
+	/*
+	 * The CRIT bit is the top bit of the first byte of the type.
+	 */
+#define	TDE_LBE_KEY_SEL_CRIT		0x80
+#define	TDE_LBE_KEY_SEL_ALGO		0x0001
+#define	TDE_LBE_KEY_SEL_ID		0x0002
+	uint8_t lbe_key_sel_crit_length[2];
+	uint8_t lbe_key_sel_crit[];
+};
+
+/*
+ * Logical block encryption key wrapping attribute descriptor format.
+ * SSC4r03 Section 8.5.4.4.4.1 Table 209
+ */
+struct tde_lb_enc_key_wrap_desc {
+	uint8_t lbe_key_wrap_type[2];
+	/*
+	 * The CRIT bit is the top bit of the first byte of the type.
+	 */
+#define	TDE_LBE_KEY_WRAP_CRIT		0x80
+#define	TDE_LBE_KEY_WRAP_KEKS		0x0001
+	uint8_t lbe_key_wrap_length[2];
+	uint8_t lbe_key_wrap_attr[];
+};
+
+/*
  * Opcodes
  */
 #define REWIND			0x01
+#define FORMAT_MEDIUM		0x04
 #define READ_BLOCK_LIMITS	0x05
 #define SA_READ			0x08
 #define SA_WRITE		0x0A
+#define SET_CAPACITY		0x0B
 #define WRITE_FILEMARKS		0x10
 #define SPACE			0x11
 #define RESERVE_UNIT		0x16
@@ -273,6 +946,9 @@
 #define LOAD_UNLOAD		0x1B
 #define	LOCATE			0x2B
 #define	READ_POSITION		0x34
+#define	REPORT_DENSITY_SUPPORT	0x44
+#define	ALLOW_OVERWRITE		0x82
+#define	LOCATE_16		0x92
 
 /*
  * Tape specific density codes- only enough of them here to recognize
@@ -352,11 +1028,55 @@
                            u_int8_t tag_action, int hardsoft,
                            struct scsi_tape_position_data *sbp,
                            u_int8_t sense_len, u_int32_t timeout);
+void	scsi_read_position_10(struct ccb_scsiio *csio, u_int32_t retries,
+			      void (*cbfcnp)(struct cam_periph *, union ccb *),
+			      u_int8_t tag_action, int service_action,
+			      u_int8_t *data_ptr, u_int32_t length,
+			      u_int32_t sense_len, u_int32_t timeout);
 
 void	scsi_set_position(struct ccb_scsiio *csio, u_int32_t retries,
                          void (*cbfcnp)(struct cam_periph *, union ccb *),
                          u_int8_t tag_action, int hardsoft, u_int32_t blkno,
                          u_int8_t sense_len, u_int32_t timeout);
+
+void	scsi_locate_10(struct ccb_scsiio *csio, u_int32_t retries,
+		       void (*cbfcnp)(struct cam_periph *, union ccb *),
+		       u_int8_t tag_action, int immed, int cp, int hard,
+		       int64_t partition, u_int32_t block_address,
+		       int sense_len, u_int32_t timeout);
+
+void	scsi_locate_16(struct ccb_scsiio *csio, u_int32_t retries,
+		       void (*cbfcnp)(struct cam_periph *, union ccb *),
+		       u_int8_t tag_action, int immed, int cp,
+		       u_int8_t dest_type, int bam, int64_t partition,
+		       u_int64_t logical_id, int sense_len,
+		       u_int32_t timeout);
+
+void	scsi_report_density_support(struct ccb_scsiio *csio, u_int32_t retries,
+				    void (*cbfcnp)(struct cam_periph *,
+						   union ccb *),
+				    u_int8_t tag_action, int media,
+				    int medium_type, u_int8_t *data_ptr,
+				    u_int32_t length, u_int32_t sense_len,
+				    u_int32_t timeout);
+
+void	scsi_set_capacity(struct ccb_scsiio *csio, u_int32_t retries,
+			  void (*cbfcnp)(struct cam_periph *, union ccb *),
+			  u_int8_t tag_action, int byte1, u_int32_t proportion,
+			  u_int32_t sense_len, u_int32_t timeout);
+
+void	scsi_format_medium(struct ccb_scsiio *csio, u_int32_t retries,
+			   void (*cbfcnp)(struct cam_periph *, union ccb *),
+			   u_int8_t tag_action, int byte1, int byte2, 
+			   u_int8_t *data_ptr, u_int32_t length,
+			   u_int32_t sense_len, u_int32_t timeout);
+
+void	scsi_allow_overwrite(struct ccb_scsiio *csio, u_int32_t retries,
+			     void (*cbfcnp)(struct cam_periph *, union ccb *),
+			     u_int8_t tag_action, int allow_overwrite,
+			     int partition, u_int64_t logical_id,
+			     u_int32_t sense_len, u_int32_t timeout);
+
 __END_DECLS
 
 #endif /* _SCSI_SCSI_SA_H */

Modified: trunk/sys/cam/scsi/scsi_ses.h
===================================================================
--- trunk/sys/cam/scsi/scsi_ses.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_ses.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,4 +1,5 @@
 /* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/cam/scsi/scsi_ses.h 309042 2016-11-23 09:10:45Z mav $ */
 /*-
  * Copyright (c) 2000 by Matthew Jacob
  * All rights reserved.
@@ -2413,7 +2414,8 @@
 
 struct ses_elm_addlstatus_eip_hdr {
 	struct ses_elm_addlstatus_base_hdr base;
-	uint8_t reserved;
+	uint8_t byte2;
+#define	SES_ADDL_EIP_EIIOE	1
 	uint8_t element_index;
 	/* NB: This define (currently) applies to all eip=1 headers */
 #define	SES_EIP_HDR_EXTRA_LEN	2

Modified: trunk/sys/cam/scsi/scsi_sg.c
===================================================================
--- trunk/sys/cam/scsi/scsi_sg.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_sg.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2007 Scott Long
  * All rights reserved.
@@ -30,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_sg.c 294978 2016-01-28 09:25:15Z kib $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -76,8 +77,7 @@
 } sg_rdwr_state;
 
 typedef enum {
-	SG_CCB_RDWR_IO,
-	SG_CCB_WAITING
+	SG_CCB_RDWR_IO
 } sg_ccb_types;
 
 #define ccb_type	ppriv_field0
@@ -100,6 +100,7 @@
 	sg_state		state;
 	sg_flags		flags;
 	int			open_count;
+	u_int			maxio;
 	struct devstat		*device_stats;
 	TAILQ_HEAD(, sg_rdwr)	rdwr_done;
 	struct cdev		*dev;
@@ -119,7 +120,6 @@
 static periph_ctor_t	sgregister;
 static periph_oninv_t	sgoninvalidate;
 static periph_dtor_t	sgcleanup;
-static periph_start_t	sgstart;
 static void		sgasync(void *callback_arg, uint32_t code,
 				struct cam_path *path, void *arg);
 static void		sgdone(struct cam_periph *periph, union ccb *done_ccb);
@@ -172,20 +172,19 @@
 static void
 sgdevgonecb(void *arg)
 {
-	struct cam_sim    *sim;
 	struct cam_periph *periph;
 	struct sg_softc *softc;
+	struct mtx *mtx;
 	int i;
 
 	periph = (struct cam_periph *)arg;
-	sim = periph->sim;
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
+
 	softc = (struct sg_softc *)periph->softc;
-
 	KASSERT(softc->open_count >= 0, ("Negative open count %d",
 		softc->open_count));
 
-	mtx_lock(sim->mtx);
-
 	/*
 	 * When we get this callback, we will get no more close calls from
 	 * devfs.  So if we have any dangling opens, we need to release the
@@ -202,13 +201,13 @@
 	cam_periph_release_locked(periph);
 
 	/*
-	 * We reference the SIM lock directly here, instead of using
+	 * We reference the lock directly here, instead of using
 	 * cam_periph_unlock().  The reason is that the final call to
 	 * cam_periph_release_locked() above could result in the periph
 	 * getting freed.  If that is the case, dereferencing the periph
 	 * with a cam_periph_unlock() call would cause a page fault.
 	 */
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
 }
 
 
@@ -238,9 +237,6 @@
 	 *     with XPT_ABORT_CCB.
 	 */
 
-	if (bootverbose) {
-		xpt_print(periph->path, "lost device\n");
-	}
 }
 
 static void
@@ -249,8 +245,6 @@
 	struct sg_softc *softc;
 
 	softc = (struct sg_softc *)periph->softc;
-	if (bootverbose)
-		xpt_print(periph->path, "removing device entry\n");
 
 	devstat_remove_entry(softc->device_stats);
 
@@ -282,8 +276,8 @@
 		 * start the probe process.
 		 */
 		status = cam_periph_alloc(sgregister, sgoninvalidate,
-					  sgcleanup, sgstart, "sg",
-					  CAM_PERIPH_BIO, cgd->ccb_h.path,
+					  sgcleanup, NULL, "sg",
+					  CAM_PERIPH_BIO, path,
 					  sgasync, AC_FOUND_DEVICE, cgd);
 		if ((status != CAM_REQ_CMP) && (status != CAM_REQ_INPROG)) {
 			const struct cam_status_entry *entry;
@@ -307,7 +301,8 @@
 	struct sg_softc *softc;
 	struct ccb_getdev *cgd;
 	struct ccb_pathinq cpi;
-	int no_tags;
+	struct make_dev_args args;
+	int no_tags, error;
 
 	cgd = (struct ccb_getdev *)arg;
 	if (cgd == NULL) {
@@ -333,6 +328,13 @@
 	cpi.ccb_h.func_code = XPT_PATH_INQ;
 	xpt_action((union ccb *)&cpi);
 
+	if (cpi.maxio == 0)
+		softc->maxio = DFLTPHYS;	/* traditional default */
+	else if (cpi.maxio > MAXPHYS)
+		softc->maxio = MAXPHYS;		/* for safety */
+	else
+		softc->maxio = cpi.maxio;	/* real value */
+
 	/*
 	 * We pass in 0 for all blocksize, since we don't know what the
 	 * blocksize of the device is, if it even has a blocksize.
@@ -361,9 +363,20 @@
 	}
 
 	/* Register the device */
-	softc->dev = make_dev(&sg_cdevsw, periph->unit_number,
-			      UID_ROOT, GID_OPERATOR, 0600, "%s%d",
-			      periph->periph_name, periph->unit_number);
+	make_dev_args_init(&args);
+	args.mda_devsw = &sg_cdevsw;
+	args.mda_unit = periph->unit_number;
+	args.mda_uid = UID_ROOT;
+	args.mda_gid = GID_OPERATOR;
+	args.mda_mode = 0600;
+	args.mda_si_drv1 = periph;
+	error = make_dev_s(&args, &softc->dev, "%s%d",
+	    periph->periph_name, periph->unit_number);
+	if (error != 0) {
+		cam_periph_lock(periph);
+		cam_periph_release_locked(periph);
+		return (CAM_REQ_CMP_ERR);
+	}
 	if (periph->unit_number < 26) {
 		(void)make_dev_alias(softc->dev, "sg%c",
 		    periph->unit_number + 'a');
@@ -373,7 +386,6 @@
 		    (periph->unit_number % 26) + 'a');
 	}
 	cam_periph_lock(periph);
-	softc->dev->si_drv1 = periph;
 
 	/*
 	 * Add as async callback so that we get
@@ -388,24 +400,6 @@
 }
 
 static void
-sgstart(struct cam_periph *periph, union ccb *start_ccb)
-{
-	struct sg_softc *softc;
-
-	softc = (struct sg_softc *)periph->softc;
-
-	switch (softc->state) {
-	case SG_STATE_NORMAL:
-		start_ccb->ccb_h.ccb_type = SG_CCB_WAITING;
-		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-				  periph_links.sle);
-		periph->immediate_priority = CAM_PRIORITY_NONE;
-		wakeup(&periph->ccb_list);
-		break;
-	}
-}
-
-static void
 sgdone(struct cam_periph *periph, union ccb *done_ccb)
 {
 	struct sg_softc *softc;
@@ -414,10 +408,6 @@
 	softc = (struct sg_softc *)periph->softc;
 	csio = &done_ccb->csio;
 	switch (csio->ccb_h.ccb_type) {
-	case SG_CCB_WAITING:
-		/* Caller will release the CCB */
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
 	case SG_CCB_RDWR_IO:
 	{
 		struct sg_rdwr *rdwr;
@@ -451,9 +441,6 @@
 	int error = 0;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);
-
 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
 		return (ENXIO);
 
@@ -485,25 +472,21 @@
 static int
 sgclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 {
-	struct cam_sim    *sim;
 	struct cam_periph *periph;
 	struct sg_softc   *softc;
+	struct mtx *mtx;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);
+	mtx = cam_periph_mtx(periph);
+	mtx_lock(mtx);
 
-	sim = periph->sim;
 	softc = periph->softc;
-
-	mtx_lock(sim->mtx);
-
 	softc->open_count--;
 
 	cam_periph_release_locked(periph);
 
 	/*
-	 * We reference the SIM lock directly here, instead of using
+	 * We reference the lock directly here, instead of using
 	 * cam_periph_unlock().  The reason is that the call to
 	 * cam_periph_release_locked() above could result in the periph
 	 * getting freed.  If that is the case, dereferencing the periph
@@ -514,7 +497,7 @@
 	 * protect the open count and avoid another lock acquisition and
 	 * release.
 	 */
-	mtx_unlock(sim->mtx);
+	mtx_unlock(mtx);
 
 	return (0);
 }
@@ -526,13 +509,10 @@
 	struct ccb_scsiio *csio;
 	struct cam_periph *periph;
 	struct sg_softc *softc;
-	struct sg_io_hdr req;
+	struct sg_io_hdr *req;
 	int dir, error;
 
 	periph = (struct cam_periph *)dev->si_drv1;
-	if (periph == NULL)
-		return (ENXIO);
-
 	cam_periph_lock(periph);
 
 	softc = (struct sg_softc *)periph->softc;
@@ -539,40 +519,22 @@
 	error = 0;
 
 	switch (cmd) {
-	case LINUX_SCSI_GET_BUS_NUMBER: {
-		int busno;
+	case SG_GET_VERSION_NUM:
+	{
+		int *version = (int *)arg;
 
-		busno = xpt_path_path_id(periph->path);
-		error = copyout(&busno, arg, sizeof(busno));
+		*version = sg_version;
 		break;
 	}
-	case LINUX_SCSI_GET_IDLUN: {
-		struct scsi_idlun idlun;
-		struct cam_sim *sim;
-
-		idlun.dev_id = xpt_path_target_id(periph->path);
-		sim = xpt_path_sim(periph->path);
-		idlun.host_unique_id = sim->unit_number;
-		error = copyout(&idlun, arg, sizeof(idlun));
-		break;
-	}
-	case SG_GET_VERSION_NUM:
-	case LINUX_SG_GET_VERSION_NUM:
-		error = copyout(&sg_version, arg, sizeof(sg_version));
-		break;
 	case SG_SET_TIMEOUT:
-	case LINUX_SG_SET_TIMEOUT: {
-		u_int user_timeout;
+	{
+		u_int user_timeout = *(u_int *)arg;
 
-		error = copyin(arg, &user_timeout, sizeof(u_int));
-		if (error == 0) {
-			softc->sg_user_timeout = user_timeout;
-			softc->sg_timeout = user_timeout / SG_DEFAULT_HZ * hz;
-		}
+		softc->sg_user_timeout = user_timeout;
+		softc->sg_timeout = user_timeout / SG_DEFAULT_HZ * hz;
 		break;
 	}
 	case SG_GET_TIMEOUT:
-	case LINUX_SG_GET_TIMEOUT:
 		/*
 		 * The value is returned directly to the syscall.
 		 */
@@ -580,17 +542,14 @@
 		error = 0;
 		break;
 	case SG_IO:
-	case LINUX_SG_IO:
-		error = copyin(arg, &req, sizeof(req));
-		if (error)
-			break;
+		req = (struct sg_io_hdr *)arg;
 
-		if (req.cmd_len > IOCDBLEN) {
+		if (req->cmd_len > IOCDBLEN) {
 			error = EINVAL;
 			break;
 		}
 
-		if (req.iovec_count != 0) {
+		if (req->iovec_count != 0) {
 			error = EOPNOTSUPP;
 			break;
 		}
@@ -598,14 +557,14 @@
 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 		csio = &ccb->csio;
 
-		error = copyin(req.cmdp, &csio->cdb_io.cdb_bytes,
-		    req.cmd_len);
+		error = copyin(req->cmdp, &csio->cdb_io.cdb_bytes,
+		    req->cmd_len);
 		if (error) {
 			xpt_release_ccb(ccb);
 			break;
 		}
 
-		switch(req.dxfer_direction) {
+		switch(req->dxfer_direction) {
 		case SG_DXFER_TO_DEV:
 			dir = CAM_DIR_OUT;
 			break;
@@ -626,33 +585,32 @@
 			      sgdone,
 			      dir|CAM_DEV_QFRZDIS,
 			      MSG_SIMPLE_Q_TAG,
-			      req.dxferp,
-			      req.dxfer_len,
-			      req.mx_sb_len,
-			      req.cmd_len,
-			      req.timeout);
+			      req->dxferp,
+			      req->dxfer_len,
+			      req->mx_sb_len,
+			      req->cmd_len,
+			      req->timeout);
 
 		error = sgsendccb(periph, ccb);
 		if (error) {
-			req.host_status = DID_ERROR;
-			req.driver_status = DRIVER_INVALID;
+			req->host_status = DID_ERROR;
+			req->driver_status = DRIVER_INVALID;
 			xpt_release_ccb(ccb);
 			break;
 		}
 
-		req.status = csio->scsi_status;
-		req.masked_status = (csio->scsi_status >> 1) & 0x7f;
-		sg_scsiio_status(csio, &req.host_status, &req.driver_status);
-		req.resid = csio->resid;
-		req.duration = csio->ccb_h.timeout;
-		req.info = 0;
+		req->status = csio->scsi_status;
+		req->masked_status = (csio->scsi_status >> 1) & 0x7f;
+		sg_scsiio_status(csio, &req->host_status, &req->driver_status);
+		req->resid = csio->resid;
+		req->duration = csio->ccb_h.timeout;
+		req->info = 0;
 
-		error = copyout(&req, arg, sizeof(req));
-		if ((error == 0) && (csio->ccb_h.status & CAM_AUTOSNS_VALID)
-		    && (req.sbp != NULL)) {
-			req.sb_len_wr = req.mx_sb_len - csio->sense_resid;
-			error = copyout(&csio->sense_data, req.sbp,
-					req.sb_len_wr);
+		if ((csio->ccb_h.status & CAM_AUTOSNS_VALID)
+		    && (req->sbp != NULL)) {
+			req->sb_len_wr = req->mx_sb_len - csio->sense_resid;
+			error = copyout(&csio->sense_data, req->sbp,
+					req->sb_len_wr);
 		}
 
 		xpt_release_ccb(ccb);
@@ -659,29 +617,32 @@
 		break;
 		
 	case SG_GET_RESERVED_SIZE:
-	case LINUX_SG_GET_RESERVED_SIZE: {
-		int size = 32768;
-
-		error = copyout(&size, arg, sizeof(size));
+	{
+		int *size = (int *)arg;
+		*size = DFLTPHYS;
 		break;
 	}
 
 	case SG_GET_SCSI_ID:
-	case LINUX_SG_GET_SCSI_ID:
 	{
-		struct sg_scsi_id id;
+		struct sg_scsi_id *id = (struct sg_scsi_id *)arg;
 
-		id.host_no = cam_sim_path(xpt_path_sim(periph->path));
-		id.channel = xpt_path_path_id(periph->path);
-		id.scsi_id = xpt_path_target_id(periph->path);
-		id.lun = xpt_path_lun_id(periph->path);
-		id.scsi_type = softc->pd_type;
-		id.h_cmd_per_lun = 1;
-		id.d_queue_depth = 1;
-		id.unused[0] = 0;
-		id.unused[1] = 0;
+		id->host_no = cam_sim_path(xpt_path_sim(periph->path));
+		id->channel = xpt_path_path_id(periph->path);
+		id->scsi_id = xpt_path_target_id(periph->path);
+		id->lun = xpt_path_lun_id(periph->path);
+		id->scsi_type = softc->pd_type;
+		id->h_cmd_per_lun = 1;
+		id->d_queue_depth = 1;
+		id->unused[0] = 0;
+		id->unused[1] = 0;
+		break;
+	}
 
-		error = copyout(&id, arg, sizeof(id));
+	case SG_GET_SG_TABLESIZE:
+	{
+		int *size = (int *)arg;
+		*size = 0;
 		break;
 	}
 
@@ -696,7 +657,6 @@
 	case SG_GET_ACCESS_COUNT:
 	case SG_SET_FORCE_LOW_DMA:
 	case SG_GET_LOW_DMA:
-	case SG_GET_SG_TABLESIZE:
 	case SG_SET_FORCE_PACK_ID:
 	case SG_GET_PACK_ID:
 	case SG_SET_RESERVED_SIZE:
@@ -704,25 +664,6 @@
 	case SG_SET_COMMAND_Q:
 	case SG_SET_DEBUG:
 	case SG_NEXT_CMD_LEN:
-	case LINUX_SG_EMULATED_HOST:
-	case LINUX_SG_SET_TRANSFORM:
-	case LINUX_SG_GET_TRANSFORM:
-	case LINUX_SG_GET_NUM_WAITING:
-	case LINUX_SG_SCSI_RESET:
-	case LINUX_SG_GET_REQUEST_TABLE:
-	case LINUX_SG_SET_KEEP_ORPHAN:
-	case LINUX_SG_GET_KEEP_ORPHAN:
-	case LINUX_SG_GET_ACCESS_COUNT:
-	case LINUX_SG_SET_FORCE_LOW_DMA:
-	case LINUX_SG_GET_LOW_DMA:
-	case LINUX_SG_GET_SG_TABLESIZE:
-	case LINUX_SG_SET_FORCE_PACK_ID:
-	case LINUX_SG_GET_PACK_ID:
-	case LINUX_SG_SET_RESERVED_SIZE:
-	case LINUX_SG_GET_COMMAND_Q:
-	case LINUX_SG_SET_COMMAND_Q:
-	case LINUX_SG_SET_DEBUG:
-	case LINUX_SG_NEXT_CMD_LEN:
 	default:
 #ifdef CAMDEBUG
 		printf("sgioctl: rejecting cmd 0x%lx\n", cmd);
@@ -761,6 +702,12 @@
 	if (error)
 		goto out_hdr;
 
+	/* XXX: We don't support SG 3.x read/write API. */
+	if (hdr->reply_len < 0) {
+		error = ENODEV;
+		goto out_hdr;
+	}
+
 	ccb = xpt_alloc_ccb();
 	if (ccb == NULL) {
 		error = ENOMEM;
@@ -884,7 +831,7 @@
 			break;
 	}
 	if ((rdwr == NULL) || (rdwr->state != SG_RDWR_DONE)) {
-		if (msleep(rdwr, periph->sim->mtx, PCATCH, "sgread", 0) == ERESTART)
+		if (cam_periph_sleep(periph, rdwr, PCATCH, "sgread", 0) == ERESTART)
 			return (EAGAIN);
 		goto search;
 	}
@@ -946,25 +893,23 @@
 {
 	struct sg_softc *softc;
 	struct cam_periph_map_info mapinfo;
-	int error, need_unmap = 0;
+	int error;
 
 	softc = periph->softc;
-	if (((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)
-	    && (ccb->csio.data_ptr != NULL)) {
-		bzero(&mapinfo, sizeof(mapinfo));
+	bzero(&mapinfo, sizeof(mapinfo));
 
-		/*
-		 * cam_periph_mapmem calls into proc and vm functions that can
-		 * sleep as well as trigger I/O, so we can't hold the lock.
-		 * Dropping it here is reasonably safe.
-		 */
-		cam_periph_unlock(periph);
-		error = cam_periph_mapmem(ccb, &mapinfo);
-		cam_periph_lock(periph);
-		if (error)
-			return (error);
-		need_unmap = 1;
-	}
+	/*
+	 * cam_periph_mapmem calls into proc and vm functions that can
+	 * sleep as well as trigger I/O, so we can't hold the lock.
+	 * Dropping it here is reasonably safe.
+	 * The only CCB opcode that is possible here is XPT_SCSI_IO, no
+	 * need for additional checks.
+	 */
+	cam_periph_unlock(periph);
+	error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio);
+	cam_periph_lock(periph);
+	if (error)
+		return (error);
 
 	error = cam_periph_runccb(ccb,
 				  sgerror,
@@ -972,8 +917,7 @@
 				  SF_RETRY_UA,
 				  softc->device_stats);
 
-	if (need_unmap)
-		cam_periph_unmapmem(ccb, &mapinfo);
+	cam_periph_unmapmem(ccb, &mapinfo);
 
 	return (error);
 }

Modified: trunk/sys/cam/scsi/scsi_sg.h
===================================================================
--- trunk/sys/cam/scsi/scsi_sg.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_sg.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,7 +1,8 @@
+/* $MidnightBSD$ */
 /*
  * Structures and definitions for SCSI commands to the SG passthrough device.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_sg.h 268139 2014-07-02 10:16:12Z mav $
  */
 
 #ifndef _SCSI_SG_H
@@ -8,31 +9,31 @@
 #define _SCSI_SG_H
 
 #define SGIOC	'"'
-#define SG_SET_TIMEOUT		_IO(SGIOC, 0x01)
+#define SG_SET_TIMEOUT		_IOW(SGIOC, 0x01, u_int)
 #define SG_GET_TIMEOUT		_IO(SGIOC, 0x02)
-#define SG_EMULATED_HOST	_IO(SGIOC, 0x03)
+#define SG_EMULATED_HOST	_IOR(SGIOC, 0x03, int)
 #define SG_SET_TRANSFORM	_IO(SGIOC, 0x04)
 #define SG_GET_TRANSFORM	_IO(SGIOC, 0x05)
-#define SG_GET_COMMAND_Q	_IO(SGIOC, 0x70)
-#define SG_SET_COMMAND_Q	_IO(SGIOC, 0x71)
-#define SG_GET_RESERVED_SIZE	_IO(SGIOC, 0x72)
-#define SG_SET_RESERVED_SIZE	_IO(SGIOC, 0x75)
-#define SG_GET_SCSI_ID		_IO(SGIOC, 0x76)
-#define SG_SET_FORCE_LOW_DMA	_IO(SGIOC, 0x79)
-#define SG_GET_LOW_DMA		_IO(SGIOC, 0x7a)
-#define SG_SET_FORCE_PACK_ID	_IO(SGIOC, 0x7b)
-#define SG_GET_PACK_ID		_IO(SGIOC, 0x7c)
-#define SG_GET_NUM_WAITING	_IO(SGIOC, 0x7d)
-#define SG_SET_DEBUG		_IO(SGIOC, 0x7e)
-#define SG_GET_SG_TABLESIZE	_IO(SGIOC, 0x7f)
-#define SG_GET_VERSION_NUM	_IO(SGIOC, 0x82)
-#define SG_NEXT_CMD_LEN		_IO(SGIOC, 0x83)
-#define SG_SCSI_RESET		_IO(SGIOC, 0x84)
-#define SG_IO			_IO(SGIOC, 0x85)
+#define SG_GET_COMMAND_Q	_IOW(SGIOC, 0x70, int)
+#define SG_SET_COMMAND_Q	_IOR(SGIOC, 0x71, int)
+#define SG_GET_RESERVED_SIZE	_IOR(SGIOC, 0x72, int)
+#define SG_SET_RESERVED_SIZE	_IOW(SGIOC, 0x75, int)
+#define SG_GET_SCSI_ID		_IOR(SGIOC, 0x76, struct sg_scsi_id)
+#define SG_SET_FORCE_LOW_DMA	_IOW(SGIOC, 0x79, int)
+#define SG_GET_LOW_DMA		_IOR(SGIOC, 0x7a, int)
+#define SG_SET_FORCE_PACK_ID	_IOW(SGIOC, 0x7b, int)
+#define SG_GET_PACK_ID		_IOR(SGIOC, 0x7c, int)
+#define SG_GET_NUM_WAITING	_IOR(SGIOC, 0x7d, int)
+#define SG_SET_DEBUG		_IOW(SGIOC, 0x7e, int)
+#define SG_GET_SG_TABLESIZE	_IOR(SGIOC, 0x7f, int)
+#define SG_GET_VERSION_NUM	_IOR(SGIOC, 0x82, int)
+#define SG_NEXT_CMD_LEN		_IOW(SGIOC, 0x83, int)
+#define SG_SCSI_RESET		_IOW(SGIOC, 0x84, int)
+#define SG_IO			_IOWR(SGIOC, 0x85, struct sg_io_hdr)
 #define SG_GET_REQUEST_TABLE	_IO(SGIOC, 0x86)
-#define SG_SET_KEEP_ORPHAN	_IO(SGIOC, 0x87)
-#define SG_GET_KEEP_ORPHAN	_IO(SGIOC, 0x88)
-#define SG_GET_ACCESS_COUNT	_IO(SGIOC, 0x89)
+#define SG_SET_KEEP_ORPHAN	_IOW(SGIOC, 0x87, int)
+#define SG_GET_KEEP_ORPHAN	_IOR(SGIOC, 0x88, int)
+#define SG_GET_ACCESS_COUNT	_IOR(SGIOC, 0x89, int)
 
 struct sg_io_hdr {
 	int		interface_id;
@@ -59,6 +60,31 @@
 	u_int		info;
 };
 
+struct sg_io_hdr32 {
+	int		interface_id;
+	int		dxfer_direction;
+	u_char		cmd_len;
+	u_char		mx_sb_len;
+	u_short		iovec_count;
+	u_int		dxfer_len;
+	uint32_t	dxferp;
+	uint32_t	cmdp;
+	uint32_t	sbp;
+	u_int		timeout;
+	u_int		flags;
+	int		pack_id;
+	uint32_t	usr_ptr;
+	u_char		status;
+	u_char		masked_status;
+	u_char		msg_status;
+	u_char		sb_len_wr;
+	u_short		host_status;
+	u_short		driver_status;
+	int		resid;
+	u_int		duration;
+	u_int		info;
+};
+
 #define SG_DXFER_NONE		-1
 #define SG_DXFER_TO_DEV		-2
 #define SG_DXFER_FROM_DEV	-3

Modified: trunk/sys/cam/scsi/scsi_targ_bh.c
===================================================================
--- trunk/sys/cam/scsi/scsi_targ_bh.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_targ_bh.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Implementation of the Target Mode 'Black Hole device' for CAM.
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_targ_bh.c 260387 2014-01-07 01:51:48Z scottl $");
 
 #include <sys/param.h>
 #include <sys/queue.h>
@@ -65,8 +66,7 @@
 } targbh_flags;
 
 typedef enum {
-	TARGBH_CCB_WORKQ,
-	TARGBH_CCB_WAITING
+	TARGBH_CCB_WORKQ
 } targbh_ccb_types;
 
 #define MAX_ACCEPT	8
@@ -283,16 +283,13 @@
 		xpt_setup_ccb(&atio->ccb_h, periph->path, CAM_PRIORITY_NORMAL);
 		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
 		atio->ccb_h.cbfcnp = targbhdone;
+		((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
+		    softc->accept_tio_list;
+		softc->accept_tio_list = atio;
 		xpt_action((union ccb *)atio);
 		status = atio->ccb_h.status;
-		if (status != CAM_REQ_INPROG) {
-			targbhfreedescr(atio->ccb_h.ccb_descr);
-			free(atio, M_SCSIBH);
+		if (status != CAM_REQ_INPROG)
 			break;
-		}
-		((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
-		    softc->accept_tio_list;
-		softc->accept_tio_list = atio;
 	}
 
 	if (i == 0) {
@@ -308,10 +305,10 @@
 	 * so the SIM can tell us of asynchronous target mode events.
 	 */
 	for (i = 0; i < MAX_ACCEPT; i++) {
-		struct ccb_immed_notify *inot;
+		struct ccb_immediate_notify *inot;
 
-		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_SCSIBH,
-						        M_NOWAIT);
+		inot = (struct ccb_immediate_notify*)malloc(sizeof(*inot),
+			    M_SCSIBH, M_NOWAIT);
 
 		if (inot == NULL) {
 			status = CAM_RESRC_UNAVAIL;
@@ -319,16 +316,14 @@
 		}
 
 		xpt_setup_ccb(&inot->ccb_h, periph->path, CAM_PRIORITY_NORMAL);
-		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
+		inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
 		inot->ccb_h.cbfcnp = targbhdone;
+		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
+				  periph_links.sle);
 		xpt_action((union ccb *)inot);
 		status = inot->ccb_h.status;
-		if (status != CAM_REQ_INPROG) {
-			free(inot, M_SCSIBH);
+		if (status != CAM_REQ_INPROG)
 			break;
-		}
-		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
-				  periph_links.sle);
 	}
 
 	if (i == 0) {
@@ -413,7 +408,9 @@
 	periph->softc = softc;
 	softc->init_level++;
 
-	return (targbhenlun(periph));
+	if (targbhenlun(periph) != CAM_REQ_CMP)
+		cam_periph_invalidate(periph);
+	return (CAM_REQ_CMP);
 }
 
 static void
@@ -434,7 +431,7 @@
 		/* FALLTHROUGH */
 	default:
 		/* XXX Wait for callback of targbhdislun() */
-		msleep(softc, periph->sim->mtx, PRIBIO, "targbh", hz/2);
+		cam_periph_sleep(periph, softc, PRIBIO, "targbh", hz/2);
 		free(softc, M_SCSIBH);
 		break;
 	}
@@ -453,13 +450,7 @@
 	softc = (struct targbh_softc *)periph->softc;
 	
 	ccbh = TAILQ_FIRST(&softc->work_queue);
-	if (periph->immediate_priority <= periph->pinfo.priority) {
-		start_ccb->ccb_h.ccb_type = TARGBH_CCB_WAITING;			
-		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
-				  periph_links.sle);
-		periph->immediate_priority = CAM_PRIORITY_NONE;
-		wakeup(&periph->ccb_list);
-	} else if (ccbh == NULL) {
+	if (ccbh == NULL) {
 		xpt_release_ccb(start_ccb);	
 	} else {
 		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
@@ -538,12 +529,6 @@
 
 	softc = (struct targbh_softc *)periph->softc;
 
-	if (done_ccb->ccb_h.ccb_type == TARGBH_CCB_WAITING) {
-		/* Caller will release the CCB */
-		wakeup(&done_ccb->ccb_h.cbfcnp);
-		return;
-	}
-
 	switch (done_ccb->ccb_h.func_code) {
 	case XPT_ACCEPT_TARGET_IO:
 	{
@@ -715,7 +700,7 @@
 		}
 		break;
 	}
-	case XPT_IMMED_NOTIFY:
+	case XPT_IMMEDIATE_NOTIFY:
 	{
 		int frozen;
 

Modified: trunk/sys/cam/scsi/scsi_target.c
===================================================================
--- trunk/sys/cam/scsi/scsi_target.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_target.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Generic SCSI Target Kernel Mode Driver
  *
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_target.c 288817 2015-10-05 11:45:28Z mav $");
 
 
 #include <sys/param.h>
@@ -94,14 +95,12 @@
 	struct cam_periph	*periph;
 	struct cam_path		*path;
 	targ_state		 state;
+	u_int			 maxio;
 	struct selinfo		 read_select;
 	struct devstat		 device_stats;
-	struct callout		destroy_dev_callout;
-	struct mtx		destroy_mtx;
 };
 
 static d_open_t		targopen;
-static d_close_t	targclose;
 static d_read_t		targread;
 static d_write_t	targwrite;
 static d_ioctl_t	targioctl;
@@ -119,7 +118,6 @@
 	.d_version =	D_VERSION,
 	.d_flags =	D_NEEDGIANT,
 	.d_open =	targopen,
-	.d_close =	targclose,
 	.d_read =	targread,
 	.d_write =	targwrite,
 	.d_ioctl =	targioctl,
@@ -152,8 +150,6 @@
 static struct targ_cmd_descr *
 			targgetdescr(struct targ_softc *softc);
 static periph_init_t	targinit;
-static void		targclone(void *arg, struct ucred *cred, char *name,
-				  int namelen, struct cdev **dev);
 static void		targasync(void *callback_arg, u_int32_t code,
 				  struct cam_path *path, void *arg);
 static void		abort_all_pending(struct targ_softc *softc);
@@ -160,7 +156,6 @@
 static void		notify_user(struct targ_softc *softc);
 static int		targcamstatus(cam_status status);
 static size_t		targccblen(xpt_opcode func_code);
-static void		targdestroy(void *);
 
 static struct periph_driver targdriver =
 {
@@ -171,66 +166,18 @@
 
 static MALLOC_DEFINE(M_TARG, "TARG", "TARG data");
 
-/*
- * Create softc and initialize it. Only one proc can open each targ device.
- * There is no locking here because a periph doesn't get created until an
- * ioctl is issued to do so, and that can't happen until this method returns.
- */
-static int
-targopen(struct cdev *dev, int flags, int fmt, struct thread *td)
+/* Disable LUN if enabled and teardown softc */
+static void
+targcdevdtor(void *data)
 {
 	struct targ_softc *softc;
+	struct cam_periph *periph;
 
-	if (dev->si_drv1 != 0) {
-		return (EBUSY);
-	}
-	
-	/* Mark device busy before any potentially blocking operations */
-	dev->si_drv1 = (void *)~0;
-
-	/* Create the targ device, allocate its softc, initialize it */
-	if ((dev->si_flags & SI_NAMED) == 0) {
-		make_dev(&targ_cdevsw, dev2unit(dev), UID_ROOT, GID_WHEEL, 0600,
-			 "targ%d", dev2unit(dev));
-	}
-	softc = malloc(sizeof(*softc), M_TARG,
-	       M_WAITOK | M_ZERO);
-	dev->si_drv1 = softc;
-	softc->state = TARG_STATE_OPENED;
-	softc->periph = NULL;
-	softc->path = NULL;
-
-	TAILQ_INIT(&softc->pending_ccb_queue);
-	TAILQ_INIT(&softc->work_queue);
-	TAILQ_INIT(&softc->abort_queue);
-	TAILQ_INIT(&softc->user_ccb_queue);
-	knlist_init_mtx(&softc->read_select.si_note, NULL);
-
-	return (0);
-}
-
-/* Disable LUN if enabled and teardown softc */
-static int
-targclose(struct cdev *dev, int flag, int fmt, struct thread *td)
-{
-	struct targ_softc     *softc;
-	struct cam_periph     *periph;
-	int    error;
-
-	softc = (struct targ_softc *)dev->si_drv1;
-	mtx_init(&softc->destroy_mtx, "targ_destroy", "SCSI Target dev destroy", MTX_DEF);
- 	callout_init_mtx(&softc->destroy_dev_callout, &softc->destroy_mtx, CALLOUT_RETURNUNLOCKED);
+	softc = data;
 	if (softc->periph == NULL) {
-#if 0
-		destroy_dev(dev);
+		printf("%s: destroying non-enabled target\n", __func__);
 		free(softc, M_TARG);
-#endif
-		printf("%s: destroying non-enabled target\n", __func__);
-		mtx_lock(&softc->destroy_mtx);
-       		callout_reset(&softc->destroy_dev_callout, hz / 2,
-                        (void *)targdestroy, (void *)dev);
-		mtx_unlock(&softc->destroy_mtx);
-		return (0);
+		return;
 	}
 
 	/*
@@ -240,7 +187,7 @@
 	periph = softc->periph;
 	cam_periph_acquire(periph);
 	cam_periph_lock(periph);
-	error = targdisable(softc);
+	(void)targdisable(softc);
 	if (softc->periph != NULL) {
 		cam_periph_invalidate(softc->periph);
 		softc->periph = NULL;
@@ -247,18 +194,34 @@
 	}
 	cam_periph_unlock(periph);
 	cam_periph_release(periph);
-
-#if 0
-	destroy_dev(dev);
 	free(softc, M_TARG);
-#endif
+}
 
-	printf("%s: close finished error(%d)\n", __func__, error);
-	mtx_lock(&softc->destroy_mtx);
-      	callout_reset(&softc->destroy_dev_callout, hz / 2,
-		(void *)targdestroy, (void *)dev);
-	mtx_unlock(&softc->destroy_mtx);
-	return (error);
+/*
+ * Create softc and initialize it.  There is no locking here because a
+ * periph doesn't get created until an ioctl is issued to do so, and
+ * that can't happen until this method returns.
+ */
+static int
+targopen(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+	struct targ_softc *softc;
+
+	/* Allocate its softc, initialize it */
+	softc = malloc(sizeof(*softc), M_TARG,
+	       M_WAITOK | M_ZERO);
+	softc->state = TARG_STATE_OPENED;
+	softc->periph = NULL;
+	softc->path = NULL;
+
+	TAILQ_INIT(&softc->pending_ccb_queue);
+	TAILQ_INIT(&softc->work_queue);
+	TAILQ_INIT(&softc->abort_queue);
+	TAILQ_INIT(&softc->user_ccb_queue);
+	knlist_init_mtx(&softc->read_select.si_note, NULL);
+
+	devfs_set_cdevpriv(softc, targcdevdtor);
+	return (0);
 }
 
 /* Enable/disable LUNs, set debugging level */
@@ -268,7 +231,7 @@
 	struct targ_softc *softc;
 	cam_status	   status;
 
-	softc = (struct targ_softc *)dev->si_drv1;
+	devfs_get_cdevpriv((void **)&softc);
 
 	switch (cmd) {
 	case TARGIOCENABLE:
@@ -275,23 +238,21 @@
 	{
 		struct ioc_enable_lun	*new_lun;
 		struct cam_path		*path;
-		struct cam_sim		*sim;
 
 		new_lun = (struct ioc_enable_lun *)addr;
-		status = xpt_create_path_unlocked(&path, /*periph*/NULL,
-						  new_lun->path_id,
-						  new_lun->target_id,
-						  new_lun->lun_id);
+		status = xpt_create_path(&path, /*periph*/NULL,
+					  new_lun->path_id,
+					  new_lun->target_id,
+					  new_lun->lun_id);
 		if (status != CAM_REQ_CMP) {
 			printf("Couldn't create path, status %#x\n", status);
 			break;
 		}
-		sim = xpt_path_sim(path);
-		mtx_lock(sim->mtx);
+		xpt_path_lock(path);
 		status = targenable(softc, path, new_lun->grp6_len,
 				    new_lun->grp7_len);
+		xpt_path_unlock(path);
 		xpt_free_path(path);
-		mtx_unlock(sim->mtx);
 		break;
 	}
 	case TARGIOCDISABLE:
@@ -317,13 +278,10 @@
 			cdbg.flags = CAM_DEBUG_PERIPH;
 		else
 			cdbg.flags = CAM_DEBUG_NONE;
-		cam_periph_lock(softc->periph);
 		xpt_setup_ccb(&cdbg.ccb_h, softc->path, CAM_PRIORITY_NORMAL);
 		cdbg.ccb_h.func_code = XPT_DEBUG;
 		cdbg.ccb_h.cbfcnp = targdone;
-
 		xpt_action((union ccb *)&cdbg);
-		cam_periph_unlock(softc->periph);
 		status = cdbg.ccb_h.status & CAM_STATUS_MASK;
 		break;
 	}
@@ -342,7 +300,7 @@
 	struct targ_softc *softc;
 	int	revents;
 
-	softc = (struct targ_softc *)dev->si_drv1;
+	devfs_get_cdevpriv((void **)&softc);
 
 	/* Poll for write() is always ok. */
 	revents = poll_events & (POLLOUT | POLLWRNORM);
@@ -367,7 +325,7 @@
 {
 	struct  targ_softc *softc;
 
-	softc = (struct targ_softc *)dev->si_drv1;
+	devfs_get_cdevpriv((void **)&softc);
 	kn->kn_hook = (caddr_t)softc;
 	kn->kn_fop = &targread_filtops;
 	knlist_add(&softc->read_select.si_note, kn, 0);
@@ -447,6 +405,12 @@
 		status = CAM_FUNC_NOTAVAIL;
 		goto enable_fail;
 	}
+	if (cpi.maxio == 0)
+		softc->maxio = DFLTPHYS;	/* traditional default */
+	else if (cpi.maxio > MAXPHYS)
+		softc->maxio = MAXPHYS;		/* for safety */
+	else
+		softc->maxio = cpi.maxio;	/* real value */
 
 	/* Destroy any periph on our path if it is disabled */
 	periph = cam_periph_find(path, "targ");
@@ -568,7 +532,7 @@
 	int write_len, error;
 	int func_code, priority;
 
-	softc = (struct targ_softc *)dev->si_drv1;
+	devfs_get_cdevpriv((void **)&softc);
 	write_len = error = 0;
 	CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
 		  ("write - uio_resid %zd\n", uio->uio_resid));
@@ -590,6 +554,7 @@
 		switch (func_code) {
 		case XPT_ACCEPT_TARGET_IO:
 		case XPT_IMMED_NOTIFY:
+		case XPT_IMMEDIATE_NOTIFY:
 			cam_periph_lock(softc->periph);
 			ccb = targgetccb(softc, func_code, priority);
 			descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
@@ -765,23 +730,10 @@
 	ccb_h->cbfcnp = targdone;
 	ccb_h->targ_descr = descr;
 
-	/*
-	 * We only attempt to map the user memory into kernel space
-	 * if they haven't passed in a physical memory pointer,
-	 * and if there is actually an I/O operation to perform.
-	 * Right now cam_periph_mapmem() only supports SCSI and device
-	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
-	 * there's actually data to map.  cam_periph_mapmem() will do the
-	 * right thing, even if there isn't data to map, but since CCBs
-	 * without data are a reasonably common occurance (e.g. test unit
-	 * ready), it will save a few cycles if we check for it here.
-	 */
-	if (((ccb_h->flags & CAM_DATA_PHYS) == 0)
-	 && (((ccb_h->func_code == XPT_CONT_TARGET_IO)
-	    && ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE))
-	  || (ccb_h->func_code == XPT_DEV_MATCH))) {
+	if ((ccb_h->func_code == XPT_CONT_TARGET_IO) ||
+	    (ccb_h->func_code == XPT_DEV_MATCH)) {
 
-		error = cam_periph_mapmem(ccb, mapinfo);
+		error = cam_periph_mapmem(ccb, mapinfo, softc->maxio);
 
 		/*
 		 * cam_periph_mapmem returned an error, we can't continue.
@@ -833,6 +785,7 @@
 	switch (done_ccb->ccb_h.func_code) {
 	/* All FC_*_QUEUED CCBs go back to userland */
 	case XPT_IMMED_NOTIFY:
+	case XPT_IMMEDIATE_NOTIFY:
 	case XPT_ACCEPT_TARGET_IO:
 	case XPT_CONT_TARGET_IO:
 		TAILQ_INSERT_TAIL(&softc->user_ccb_queue, &done_ccb->ccb_h,
@@ -862,7 +815,7 @@
 
 	error = 0;
 	read_len = 0;
-	softc = (struct targ_softc *)dev->si_drv1;
+	devfs_get_cdevpriv((void **)&softc);
 	user_queue = &softc->user_ccb_queue;
 	abort_queue = &softc->abort_queue;
 	CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread\n"));
@@ -873,7 +826,7 @@
 	user_descr = TAILQ_FIRST(abort_queue);
 	while (ccb_h == NULL && user_descr == NULL) {
 		if ((ioflag & IO_NDELAY) == 0) {
-			error = msleep(user_queue, softc->periph->sim->mtx,
+			error = cam_periph_sleep(softc->periph, user_queue,
 			    PRIBIO | PCATCH, "targrd", 0);
 			ccb_h = TAILQ_FIRST(user_queue);
 			user_descr = TAILQ_FIRST(abort_queue);
@@ -1013,6 +966,7 @@
 	switch (ccb->ccb_h.func_code) {
 	case XPT_ACCEPT_TARGET_IO:
 	case XPT_IMMED_NOTIFY:
+	case XPT_IMMEDIATE_NOTIFY:
 		CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb));
 		free(ccb, M_TARG);
 		break;
@@ -1047,23 +1001,11 @@
 static void
 targinit(void)
 {
-	EVENTHANDLER_REGISTER(dev_clone, targclone, 0, 1000);
-}
+	struct cdev *dev;
 
-static void
-targclone(void *arg, struct ucred *cred, char *name, int namelen,
-    struct cdev **dev)
-{
-	int u;
-
-	if (*dev != NULL)
-		return;
-	if (dev_stdclone(name, NULL, "targ", &u) != 1)
-		return;
-	*dev = make_dev(&targ_cdevsw, u, UID_ROOT, GID_WHEEL,
-			0600, "targ%d", u);
-	dev_ref(*dev);
-	(*dev)->si_flags |= SI_CHEAPCLONE;
+	/* Add symbolic link to targ0 for compatibility. */
+	dev = make_dev(&targ_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "targ");
+	make_dev_alias(dev, "targ0");
 }
 
 static void
@@ -1080,7 +1022,6 @@
 	struct targ_cmd_descr   *descr;
 	struct ccb_abort	 cab;
 	struct ccb_hdr		*ccb_h;
-	struct cam_sim		*sim;
 
 	CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n"));
 
@@ -1113,8 +1054,7 @@
 
 	/* If we aborted at least one pending CCB ok, wait for it. */
 	if (cab.ccb_h.status == CAM_REQ_CMP) {
-		sim = xpt_path_sim(softc->path);
-		msleep(&softc->pending_ccb_queue, sim->mtx,
+		cam_periph_sleep(softc->periph, &softc->pending_ccb_queue,
 		       PRIBIO | PCATCH, "tgabrt", 0);
 	}
 
@@ -1195,6 +1135,9 @@
 	case XPT_IMMED_NOTIFY:
 		len = sizeof(struct ccb_immed_notify);
 		break;
+	case XPT_IMMEDIATE_NOTIFY:
+		len = sizeof(struct ccb_immediate_notify);
+		break;
 	case XPT_REL_SIMQ:
 		len = sizeof(struct ccb_relsim);
 		break;
@@ -1217,25 +1160,3 @@
 
 	return (len);
 }
-
-/*
- * work around to destroy targ device
- * outside of targclose
- */
-static void
-targdestroy(void *dev)
-{
-	struct cdev *device = (struct cdev *)dev;
-	struct targ_softc *softc = (struct targ_softc *)device->si_drv1;
-
-#if 0
-	callout_stop(&softc->destroy_dev_callout);
-#endif
-
-	mtx_unlock(&softc->destroy_mtx);
-	mtx_destroy(&softc->destroy_mtx);
-	free(softc, M_TARG);
-	device->si_drv1 = 0;
-	destroy_dev(device);
-	printf("%s: destroyed dev\n", __func__);
-}

Modified: trunk/sys/cam/scsi/scsi_targetio.h
===================================================================
--- trunk/sys/cam/scsi/scsi_targetio.h	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_targetio.h	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Ioctl definitions for the SCSI Target Driver
  *
@@ -26,7 +27,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/cam/scsi/scsi_targetio.h 139743 2005-01-05 22:34:37Z imp $
  */
 
 #ifndef _CAM_SCSI_SCSI_TARGETIO_H_

Modified: trunk/sys/cam/scsi/scsi_xpt.c
===================================================================
--- trunk/sys/cam/scsi/scsi_xpt.c	2018-06-01 22:49:39 UTC (rev 10168)
+++ trunk/sys/cam/scsi/scsi_xpt.c	2018-06-01 22:51:18 UTC (rev 10169)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Implementation of the SCSI Transport
  *
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/cam/scsi/scsi_xpt.c 311402 2017-01-05 11:20:31Z mav $");
 
 #include <sys/param.h>
 #include <sys/bus.h>
@@ -100,6 +101,12 @@
 		(lval) <<= 8;						\
 		(lval) |=  (lp)->luns[(i)].lundata[1];			\
 	}
+#define	CAM_GET_LUN(lp, i, lval)					\
+	(lval) = scsi_4btoul((lp)->luns[(i)].lundata);			\
+	(lval) = ((lval) >> 16) | ((lval) << 16);
+#define CAM_LUN_ONLY_32BITS(lp, i)				\
+	(scsi_4btoul(&((lp)->luns[(i)].lundata[4])) == 0)
+
 /*
  * If we're not quirked to search <= the first 8 luns
  * and we are either quirked to search above lun 8,
@@ -136,6 +143,7 @@
 	PROBE_MODE_SENSE,
 	PROBE_SUPPORTED_VPD_LIST,
 	PROBE_DEVICE_ID,
+	PROBE_EXTENDED_INQUIRY,
 	PROBE_SERIAL_NUM,
 	PROBE_TUR_FOR_NEGOTIATION,
 	PROBE_INQUIRY_BASIC_DV1,
@@ -153,6 +161,7 @@
 	"PROBE_MODE_SENSE",
 	"PROBE_SUPPORTED_VPD_LIST",
 	"PROBE_DEVICE_ID",
+	"PROBE_EXTENDED_INQUIRY",
 	"PROBE_SERIAL_NUM",
 	"PROBE_TUR_FOR_NEGOTIATION",
 	"PROBE_INQUIRY_BASIC_DV1",
@@ -175,7 +184,8 @@
 typedef enum {
 	PROBE_INQUIRY_CKSUM	= 0x01,
 	PROBE_SERIAL_CKSUM	= 0x02,
-	PROBE_NO_ANNOUNCE	= 0x04
+	PROBE_NO_ANNOUNCE	= 0x04,
+	PROBE_EXTLUN		= 0x08
 } probe_flags;
 
 typedef struct {
@@ -450,7 +460,7 @@
 	},
 	{
 		/*
-		 * The Hitachi CJ series with J8A8 firmware apparantly has
+		 * The Hitachi CJ series with J8A8 firmware apparently has
 		 * problems with tagged commands.
 		 * PR: 23536
 		 * Reported by: amagai at nue.org
@@ -561,10 +571,9 @@
 static int       proberequestbackoff(struct cam_periph *periph,
 				     struct cam_ed *device);
 static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
-static int	 probe_strange_rpl_data(struct scsi_report_luns_data *rp,
-					uint32_t maxlun);
 static void	 probe_purge_old(struct cam_path *path,
-				 struct scsi_report_luns_data *new);
+				 struct scsi_report_luns_data *new,
+				 probe_flags flags);
 static void	 probecleanup(struct cam_periph *periph);
 static void	 scsi_find_quirk(struct cam_ed *device);
 static void	 scsi_scan_bus(struct cam_periph *periph, union ccb *ccb);
@@ -577,7 +586,7 @@
 				   lun_id_t lun_id);
 static void	 scsi_devise_transport(struct cam_path *path);
 static void	 scsi_set_transfer_settings(struct ccb_trans_settings *cts,
-					    struct cam_ed *device,
+					    struct cam_path *path,
 					    int async_update);
 static void	 scsi_toggle_tags(struct cam_path *path);
 static void	 scsi_dev_async(u_int32_t async_code,
@@ -639,6 +648,7 @@
 		return (status);
 	}
 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
+	scsi_devise_transport(periph->path);
 
 	/*
 	 * Ensure we've waited at least a bus settle
@@ -647,11 +657,6 @@
 	 */
 	cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
 				      scsi_delay);
-	/*
-	 * Ensure nobody slip in until probe finish.
-	 */
-	cam_freeze_devq_arg(periph->path,
-	    RELSIM_RELEASE_RUNLEVEL, CAM_RL_XPT + 1);
 	probeschedule(periph);
 	return(CAM_REQ_CMP);
 }
@@ -704,6 +709,11 @@
 	else
 		softc->flags &= ~PROBE_NO_ANNOUNCE;
 
+	if (cpi.hba_misc & PIM_EXTLUNS)
+		softc->flags |= PROBE_EXTLUN;
+	else
+		softc->flags &= ~PROBE_EXTLUN;
+
 	xpt_schedule(periph, CAM_PRIORITY_XPT);
 }
 
@@ -751,7 +761,8 @@
 		 * serial number check finish, we attempt to figure out
 		 * whether we still have the same device.
 		 */
-		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
+		if (((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
+		 && ((softc->flags & PROBE_INQUIRY_CKSUM) == 0)) {
 
 			MD5Init(&softc->context);
 			MD5Update(&softc->context, (unsigned char *)inq_buf,
@@ -883,11 +894,14 @@
 				     /*timeout*/60 * 1000);
 			break;
 		}
+done:
 		/*
 		 * We'll have to do without, let our probedone
 		 * routine finish up for us.
 		 */
 		start_ccb->csio.data_ptr = NULL;
+		cam_freeze_devq(periph->path);
+		cam_periph_doacquire(periph);
 		probedone(periph, start_ccb);
 		return;
 	}
@@ -913,13 +927,35 @@
 				     /*timeout*/60 * 1000);
 			break;
 		}
+		goto done;
+	}
+	case PROBE_EXTENDED_INQUIRY:
+	{
+		struct scsi_vpd_extended_inquiry_data *ext_inq;
+
+		ext_inq = NULL;
+		if (scsi_vpd_supported_page(periph, SVPD_EXTENDED_INQUIRY_DATA))
+			ext_inq = malloc(sizeof(*ext_inq), M_CAMXPT,
+			    M_NOWAIT | M_ZERO);
+
+		if (ext_inq != NULL) {
+			scsi_inquiry(csio,
+				     /*retries*/4,
+				     probedone,
+				     MSG_SIMPLE_Q_TAG,
+				     (uint8_t *)ext_inq,
+				     sizeof(*ext_inq),
+				     /*evpd*/TRUE,
+				     SVPD_EXTENDED_INQUIRY_DATA,
+				     SSD_MIN_SIZE,
+				     /*timeout*/60 * 1000);
+			break;
+		}
 		/*
 		 * We'll have to do without, let our probedone
 		 * routine finish up for us.
 		 */
-		start_ccb->csio.data_ptr = NULL;
-		probedone(periph, start_ccb);
-		return;
+		goto done;
 	}
 	case PROBE_SERIAL_NUM:
 	{
@@ -952,17 +988,13 @@
 				     /*timeout*/60 * 1000);
 			break;
 		}
-		/*
-		 * We'll have to do without, let our probedone
-		 * routine finish up for us.
-		 */
-		start_ccb->csio.data_ptr = NULL;
-		probedone(periph, start_ccb);
-		return;
+		goto done;
 	}
 	default:
 		panic("probestart: invalid action state 0x%x\n", softc->action);
 	}
+	start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
+	cam_periph_doacquire(periph);
 	xpt_action(start_ccb);
 }
 
@@ -975,7 +1007,7 @@
 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 	cts.type = CTS_TYPE_USER_SETTINGS;
 	xpt_action((union ccb *)&cts);
-	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+	if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
 		return;
 	}
 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
@@ -997,7 +1029,7 @@
 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
 	xpt_action((union ccb *)&cts);
-	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+	if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
 		if (bootverbose) {
 			xpt_print(periph->path,
 			    "failed to get current device settings\n");
@@ -1076,7 +1108,7 @@
 		cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
 		xpt_action((union ccb *)&cts);
-		if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+		if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
 			break;
 		}
 		CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
@@ -1095,6 +1127,7 @@
 {
 	probe_softc *softc;
 	struct cam_path *path;
+	struct scsi_inquiry_data *inq_buf;
 	u_int32_t  priority;
 
 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
@@ -1106,11 +1139,15 @@
 	switch (softc->action) {
 	case PROBE_TUR:
 	{
-		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
 
 			if (cam_periph_error(done_ccb, 0,
-					     SF_NO_PRINT, NULL) == ERESTART)
+					     SF_NO_PRINT, NULL) == ERESTART) {
+outr:
+				/* Drop freeze taken due to CAM_DEV_QFREEZE */
+				cam_release_devq(path, 0, 0, 0, FALSE);
 				return;
+			}
 			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 				/* Don't wedge the queue */
 				xpt_release_devq(done_ccb->ccb_h.path,
@@ -1120,21 +1157,26 @@
 		PROBE_SET_ACTION(softc, PROBE_INQUIRY);
 		xpt_release_ccb(done_ccb);
 		xpt_schedule(periph, priority);
+out:
+		/* Drop freeze taken due to CAM_DEV_QFREEZE and release. */
+		cam_release_devq(path, 0, 0, 0, FALSE);
+		cam_periph_release_locked(periph);
 		return;
 	}
 	case PROBE_INQUIRY:
 	case PROBE_FULL_INQUIRY:
 	{
-		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
-			struct scsi_inquiry_data *inq_buf;
+		if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
 			u_int8_t periph_qual;
 
 			path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
+			scsi_find_quirk(path->device);
 			inq_buf = &path->device->inq_data;
 
 			periph_qual = SID_QUAL(inq_buf);
 
-			if (periph_qual == SID_QUAL_LU_CONNECTED) {
+			if (periph_qual == SID_QUAL_LU_CONNECTED ||
+			    periph_qual == SID_QUAL_LU_OFFLINE) {
 				u_int8_t len;
 
 				/*
@@ -1155,11 +1197,9 @@
 					PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY);
 					xpt_release_ccb(done_ccb);
 					xpt_schedule(periph, priority);
-					return;
+					goto out;
 				}
 
-				scsi_find_quirk(path->device);
-
 				scsi_devise_transport(path);
 
 				if (path->device->lun_id == 0 &&
@@ -1185,22 +1225,16 @@
 				}
 				xpt_release_ccb(done_ccb);
 				xpt_schedule(periph, priority);
-				return;
+				goto out;
 			} else if (path->device->lun_id == 0 &&
-			    SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 &&
+			    SID_ANSI_REV(inq_buf) >= SCSI_REV_SPC2 &&
 			    (SCSI_QUIRK(path->device)->quirks &
 			     CAM_QUIRK_NORPTLUNS) == 0) {
-				if (path->device->flags &
-				    CAM_DEV_UNCONFIGURED) {
-					path->device->flags &=
-					    ~CAM_DEV_UNCONFIGURED;
-					xpt_acquire_device(path->device);
-				}
 				PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS);
 				periph->path->target->rpl_size = 16;
 				xpt_release_ccb(done_ccb);
 				xpt_schedule(periph, priority);
-				return;
+				goto out;
 			}
 		} else if (cam_periph_error(done_ccb, 0,
 					    done_ccb->ccb_h.target_lun > 0
@@ -1207,11 +1241,14 @@
 					    ? SF_RETRY_UA|SF_QUIET_IR
 					    : SF_RETRY_UA,
 					    &softc->saved_ccb) == ERESTART) {
-			return;
-		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
-			/* Don't wedge the queue */
-			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
-					 /*run_queue*/TRUE);
+			goto outr;
+		} else {
+			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+				/* Don't wedge the queue */
+				xpt_release_devq(done_ccb->ccb_h.path,
+				    /*count*/1, /*run_queue*/TRUE);
+			}
+			path->device->flags &= ~CAM_DEV_INQUIRY_DATA_VALID;
 		}
 		/*
 		 * If we get to this point, we got an error status back
@@ -1243,12 +1280,12 @@
 		nlun = scsi_4btoul(lp->length) / 8;
 		maxlun = (csio->dxfer_len / 8) - 1;
 
-		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
 			if (cam_periph_error(done_ccb, 0,
 			    done_ccb->ccb_h.target_lun > 0 ?
 			    SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA,
 			    &softc->saved_ccb) == ERESTART) {
-				return;
+				goto outr;
 			}
 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 				xpt_release_devq(done_ccb->ccb_h.path, 1,
@@ -1267,7 +1304,7 @@
 			path->target->rpl_size = (nlun << 3) + 8;
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		} else if (nlun == 0) {
 			/*
 			 * If there don't appear to be any luns, bail.
@@ -1274,13 +1311,6 @@
 			 */
 			free(lp, M_CAMXPT);
 			lp = NULL;
-		} else if (probe_strange_rpl_data(lp, maxlun)) {
-			/*
-			 * If we can't understand the lun format
-			 * of any entry, bail.
-			 */
-			free(lp, M_CAMXPT);
-			lp = NULL;
 		} else {
 			lun_id_t lun;
 			int idx;
@@ -1288,7 +1318,7 @@
 			CAM_DEBUG(path, CAM_DEBUG_PROBE,
 			   ("Probe: %u lun(s) reported\n", nlun));
 
-			CAM_GET_SIMPLE_LUN(lp, 0, lun);
+			CAM_GET_LUN(lp, 0, lun);
 			/*
 			 * If the first lun is not lun 0, then either there
 			 * is no lun 0 in the list, or the list is unsorted.
@@ -1295,7 +1325,7 @@
 			 */
 			if (lun != 0) {
 				for (idx = 0; idx < nlun; idx++) {
-					CAM_GET_SIMPLE_LUN(lp, idx, lun);
+					CAM_GET_LUN(lp, idx, lun);
 					if (lun == 0) {
 						break;
 					}
@@ -1310,14 +1340,6 @@
 					    tlun, 8);
 					CAM_DEBUG(path, CAM_DEBUG_PROBE,
 					    ("lun 0 in position %u\n", idx));
-				} else {
-					/*
-					 * There is no lun 0 in our list. Destroy
-					 * the validity of the inquiry data so we
-					 * bail here and now.
-					 */
-					path->device->flags &=
-					    ~CAM_DEV_INQUIRY_DATA_VALID;
 				}
 			}
 			/*
@@ -1327,12 +1349,13 @@
 			 * This function will also install the new list
 			 * in the target structure.
 			 */
-			probe_purge_old(path, lp);
+			probe_purge_old(path, lp, softc->flags);
 			lp = NULL;
 		}
-		if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) {
-			struct scsi_inquiry_data *inq_buf;
-			inq_buf = &path->device->inq_data;
+		inq_buf = &path->device->inq_data;
+		if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID &&
+		    (SID_QUAL(inq_buf) == SID_QUAL_LU_CONNECTED ||
+		    SID_QUAL(inq_buf) == SID_QUAL_LU_OFFLINE)) {
 			if (INQ_DATA_TQ_ENABLED(inq_buf))
 				PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
 			else
@@ -1340,11 +1363,13 @@
 				    PROBE_SUPPORTED_VPD_LIST);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		if (lp) {
 			free(lp, M_CAMXPT);
 		}
+		PROBE_SET_ACTION(softc, PROBE_INVALID);
+		xpt_release_ccb(done_ccb);
 		break;
 	}
 	case PROBE_MODE_SENSE:
@@ -1354,7 +1379,7 @@
 
 		csio = &done_ccb->csio;
 		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
-		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+		if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
 			struct scsi_control_page *page;
 			u_int8_t *offset;
 
@@ -1365,7 +1390,7 @@
 		} else if (cam_periph_error(done_ccb, 0,
 					    SF_RETRY_UA|SF_NO_PRINT,
 					    &softc->saved_ccb) == ERESTART) {
-			return;
+			goto outr;
 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 			/* Don't wedge the queue */
 			xpt_release_devq(done_ccb->ccb_h.path,
@@ -1375,7 +1400,7 @@
 		free(mode_hdr, M_CAMXPT);
 		PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST);
 		xpt_schedule(periph, priority);
-		return;
+		goto out;
 	}
 	case PROBE_SUPPORTED_VPD_LIST:
 	{
@@ -1404,11 +1429,11 @@
 			xpt_release_ccb(done_ccb);
 			PROBE_SET_ACTION(softc, PROBE_DEVICE_ID);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		} else if (cam_periph_error(done_ccb, 0,
 					    SF_RETRY_UA|SF_NO_PRINT,
 					    &softc->saved_ccb) == ERESTART) {
-			return;
+			goto outr;
 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 			/* Don't wedge the queue */
 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
@@ -1453,7 +1478,7 @@
 		} else if (cam_periph_error(done_ccb, 0,
 					    SF_RETRY_UA,
 					    &softc->saved_ccb) == ERESTART) {
-			return;
+			goto outr;
 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 			/* Don't wedge the queue */
 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
@@ -1464,9 +1489,53 @@
 		if (devid && length == 0)
 			free(devid, M_CAMXPT);
 		xpt_release_ccb(done_ccb);
+		PROBE_SET_ACTION(softc, PROBE_EXTENDED_INQUIRY);
+		xpt_schedule(periph, priority);
+		goto out;
+	}
+	case PROBE_EXTENDED_INQUIRY: {
+		struct scsi_vpd_extended_inquiry_data *ext_inq;
+		struct ccb_scsiio *csio;
+		int32_t length = 0;
+
+		csio = &done_ccb->csio;
+		ext_inq = (struct scsi_vpd_extended_inquiry_data *)
+		    csio->data_ptr;
+		if (path->device->ext_inq != NULL) {
+			path->device->ext_inq_len = 0;
+			free(path->device->ext_inq, M_CAMXPT);
+			path->device->ext_inq = NULL;
+		}
+
+		if (ext_inq == NULL) {
+			/* Don't process the command as it was never sent */
+		} else if (CCB_COMPLETED_OK(csio->ccb_h)) {
+			length = scsi_2btoul(ext_inq->page_length) +
+			    __offsetof(struct scsi_vpd_extended_inquiry_data,
+			    flags1);
+			length = min(length, sizeof(*ext_inq));
+			length -= csio->resid;
+			if (length > 0) {
+				path->device->ext_inq_len = length;
+				path->device->ext_inq = (uint8_t *)ext_inq;
+			}
+		} else if (cam_periph_error(done_ccb, 0,
+					    SF_RETRY_UA,
+					    &softc->saved_ccb) == ERESTART) {
+			goto outr;
+		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+			/* Don't wedge the queue */
+			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+					 /*run_queue*/TRUE);
+		}
+
+		/* Free the device id space if we don't use it */
+		if (ext_inq && length <= 0)
+			free(ext_inq, M_CAMXPT);
+		xpt_release_ccb(done_ccb);
 		PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM);
 		xpt_schedule(periph, priority);
-		return;
+		goto out;
 	}
 
 probe_device_check:
@@ -1489,7 +1558,7 @@
 			/*
 			 * Don't process the command as it was never sent
 			 */
-		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
+		} else if (cam_ccb_status(done_ccb) == CAM_REQ_CMP
 			&& (serial_buf->length > 0)) {
 
 			have_serialnum = 1;
@@ -1508,7 +1577,7 @@
 		} else if (cam_periph_error(done_ccb, 0,
 					    SF_RETRY_UA|SF_NO_PRINT,
 					    &softc->saved_ccb) == ERESTART) {
-			return;
+			goto outr;
 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 			/* Don't wedge the queue */
 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
@@ -1567,7 +1636,7 @@
 			 */
 			PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		xpt_release_ccb(done_ccb);
 		break;
@@ -1574,7 +1643,7 @@
 	}
 	case PROBE_TUR_FOR_NEGOTIATION:
 	case PROBE_DV_EXIT:
-		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
 			cam_periph_error(done_ccb, 0,
 			    SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY, NULL);
 		}
@@ -1597,7 +1666,7 @@
 			xpt_release_ccb(done_ccb);
 			PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		if (softc->action == PROBE_DV_EXIT) {
 			CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
@@ -1625,7 +1694,7 @@
 		struct scsi_inquiry_data *nbuf;
 		struct ccb_scsiio *csio;
 
-		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
 			cam_periph_error(done_ccb, 0,
 			    SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY, NULL);
 		}
@@ -1650,7 +1719,7 @@
 			free(nbuf, M_CAMXPT);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		free(nbuf, M_CAMXPT);
 		if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
@@ -1657,7 +1726,7 @@
 			PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2);
 			xpt_release_ccb(done_ccb);
 			xpt_schedule(periph, priority);
-			return;
+			goto out;
 		}
 		if (softc->action == PROBE_INQUIRY_BASIC_DV2) {
 			CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
@@ -1689,45 +1758,36 @@
 	xpt_done(done_ccb);
 	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
 		CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
+		/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
+		cam_release_devq(path, 0, 0, 0, FALSE);
+		cam_periph_release_locked(periph);
 		cam_periph_invalidate(periph);
-		cam_release_devq(periph->path,
-		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_XPT + 1, FALSE);
 		cam_periph_release_locked(periph);
 	} else {
 		probeschedule(periph);
+		goto out;
 	}
 }
 
-static int
-probe_strange_rpl_data(struct scsi_report_luns_data *rp, uint32_t maxlun)
-{
-	uint32_t idx;
-	uint32_t nlun = MIN(maxlun, (scsi_4btoul(rp->length) / 8));
-
-	for (idx = 0; idx < nlun; idx++) {
-		if (!CAM_CAN_GET_SIMPLE_LUN(rp, idx)) {
-			return (-1);
-		}
-	}
-	return (0);
-}
-
 static void
-probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new)
+probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new,
+    probe_flags flags)
 {
 	struct cam_path *tp;
 	struct scsi_report_luns_data *old;
-	u_int idx1, idx2, nlun_old, nlun_new, this_lun;
+	u_int idx1, idx2, nlun_old, nlun_new;
+	lun_id_t this_lun;
 	u_int8_t *ol, *nl;
 
 	if (path->target == NULL) {
 		return;
 	}
-	if (path->target->luns == NULL) {
-		path->target->luns = new;
+	mtx_lock(&path->target->luns_mtx);
+	old = path->target->luns;
+	path->target->luns = new;
+	mtx_unlock(&path->target->luns_mtx);
+	if (old == NULL)
 		return;
-	}
-	old = path->target->luns;
 	nlun_old = scsi_4btoul(old->length) / 8;
 	nlun_new = scsi_4btoul(new->length) / 8;
 
@@ -1751,17 +1811,26 @@
 		 * that would be what the probe state
 		 * machine is currently working on,
 		 * so we won't do that.
-		 *
-		 * We also cannot nuke it if it is
-		 * not in a lun format we understand.
 		 */
-		if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1)) {
+		CAM_GET_LUN(old, idx1, this_lun);
+		if (this_lun == 0) {
 			continue;
 		}
-		CAM_GET_SIMPLE_LUN(old, idx1, this_lun);
-		if (this_lun == 0) {
+
+		/*
+		 * We also cannot nuke it if it is
+		 * not in a lun format we understand
+		 * and replace the LUN with a "simple" LUN
+		 * if that is all the HBA supports.
+		 */
+		if (!(flags & PROBE_EXTLUN)) {
+			if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1))
+				continue;
+			CAM_GET_SIMPLE_LUN(old, idx1, this_lun);
+		}
+		if (!CAM_LUN_ONLY_32BITS(old, idx1))
 			continue;
-		}
+
 		if (xpt_create_path(&tp, NULL, xpt_path_path_id(path),
 		    xpt_path_target_id(path), this_lun) == CAM_REQ_CMP) {
 			xpt_async(AC_LOST_DEVICE, tp, NULL);
@@ -1769,7 +1838,6 @@
 		}
 	}
 	free(old, M_CAMXPT);
-	path->target->luns = new;
 }
 
 static void
@@ -1831,6 +1899,8 @@
 static void
 scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
 {
+	struct mtx *mtx;
+
 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 		  ("scsi_scan_bus\n"));
 	switch (request_ccb->ccb_h.func_code) {
@@ -1877,8 +1947,8 @@
 		if ((work_ccb->cpi.hba_inquiry &
 		    (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) &&
 		    !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) &&
-		    !timevalisset(&request_ccb->ccb_h.path->bus->last_reset)) {
-			reset_ccb = xpt_alloc_ccb_nowait();
+		    !timevalisset(&request_ccb->ccb_h.path->bus->last_reset) &&
+		    (reset_ccb = xpt_alloc_ccb_nowait()) != NULL) {
 			xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path,
 			      CAM_PRIORITY_NONE);
 			reset_ccb->ccb_h.func_code = XPT_RESET_BUS;
@@ -1898,6 +1968,7 @@
 		    (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT);
 		if (scan_info == NULL) {
 			request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+			xpt_free_ccb(work_ccb);
 			xpt_done(request_ccb);
 			return;
 		}
@@ -1928,6 +1999,8 @@
 				scan_info->counter--;
 			}
 		}
+		mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
+		mtx_unlock(mtx);
 
 		for (i = low_target; i <= max_target; i++) {
 			cam_status status;
@@ -1934,7 +2007,7 @@
 			if (i == initiator_id)
 				continue;
 
-			status = xpt_create_path(&path, xpt_periph,
+			status = xpt_create_path(&path, NULL,
 						 request_ccb->ccb_h.path_id,
 						 i, 0);
 			if (status != CAM_REQ_CMP) {
@@ -1960,10 +2033,13 @@
 				      request_ccb->ccb_h.pinfo.priority);
 			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 			work_ccb->ccb_h.cbfcnp = scsi_scan_bus;
+			work_ccb->ccb_h.flags |= CAM_UNLOCKED;
 			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 			work_ccb->crcn.flags = request_ccb->crcn.flags;
 			xpt_action(work_ccb);
 		}
+
+		mtx_lock(mtx);
 		break;
 	}
 	case XPT_SCAN_LUN:
@@ -1972,7 +2048,7 @@
 		struct cam_path *path, *oldpath;
 		scsi_scan_bus_info *scan_info;
 		struct cam_et *target;
-		struct cam_ed *device;
+		struct cam_ed *device, *nextdev;
 		int next_target;
 		path_id_t path_id;
 		target_id_t target_id;
@@ -1980,24 +2056,19 @@
 
 		oldpath = request_ccb->ccb_h.path;
 
-		status = request_ccb->ccb_h.status & CAM_STATUS_MASK;
-		/* Reuse the same CCB to query if a device was really found */
+		status = cam_ccb_status(request_ccb);
 		scan_info = (scsi_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
-		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
-			      request_ccb->ccb_h.pinfo.priority);
-		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
-
-
 		path_id = request_ccb->ccb_h.path_id;
 		target_id = request_ccb->ccb_h.target_id;
 		lun_id = request_ccb->ccb_h.target_lun;
-		xpt_action(request_ccb);
-
 		target = request_ccb->ccb_h.path->target;
 		next_target = 1;
 
+		mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
+		mtx_lock(mtx);
+		mtx_lock(&target->luns_mtx);
 		if (target->luns) {
-			uint32_t first;
+			lun_id_t first;
 			u_int nluns = scsi_4btoul(target->luns->length) / 8;
 
 			/*
@@ -2005,87 +2076,84 @@
 			 * of the list as we've actually just finished probing
 			 * it.
 			 */
-			CAM_GET_SIMPLE_LUN(target->luns, 0, first);
+			CAM_GET_LUN(target->luns, 0, first);
 			if (first == 0 && scan_info->lunindex[target_id] == 0) {
 				scan_info->lunindex[target_id]++;
 			} 
 
+			/*
+			 * Skip any LUNs that the HBA can't deal with.
+			 */
+			while (scan_info->lunindex[target_id] < nluns) {
+				if (scan_info->cpi->hba_misc & PIM_EXTLUNS) {
+					CAM_GET_LUN(target->luns,
+					    scan_info->lunindex[target_id],
+					    lun_id);
+					break;
+				}
+
+				/* XXX print warning? */
+				if (!CAM_LUN_ONLY_32BITS(target->luns,
+				    scan_info->lunindex[target_id]))
+					continue;
+				if (CAM_CAN_GET_SIMPLE_LUN(target->luns,
+				    scan_info->lunindex[target_id])) {
+					CAM_GET_SIMPLE_LUN(target->luns,
+					    scan_info->lunindex[target_id],
+					    lun_id);
+					break;
+				}
+					
+				scan_info->lunindex[target_id]++;
+			}
+
 			if (scan_info->lunindex[target_id] < nluns) {
-				CAM_GET_SIMPLE_LUN(target->luns,
-				    scan_info->lunindex[target_id], lun_id);
+				mtx_unlock(&target->luns_mtx);
 				next_target = 0;
 				CAM_DEBUG(request_ccb->ccb_h.path,
 				    CAM_DEBUG_PROBE,
-				   ("next lun to try at index %u is %u\n",
-				   scan_info->lunindex[target_id], lun_id));
+				   ("next lun to try at index %u is %jx\n",
+				   scan_info->lunindex[target_id],
+				   (uintmax_t)lun_id));
 				scan_info->lunindex[target_id]++;
 			} else {
-				/*
-				 * We're done with scanning all luns.
-				 *
-				 * Nuke the bogus device for lun 0 if lun 0
-				 * wasn't on the list.
-				 */
-				if (first != 0) {
-					TAILQ_FOREACH(device,
-					    &target->ed_entries, links) {
-						if (device->lun_id == 0) {
-							break;
-						}
-					}
-					if (device) {
-						xpt_release_device(device);
-					}
-				}
+				mtx_unlock(&target->luns_mtx);
+				/* We're done with scanning all luns. */
 			}
-		} else if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
-			int phl;
-
-			/*
-			 * If we already probed lun 0 successfully, or
-			 * we have additional configured luns on this
-			 * target that might have "gone away", go onto
-			 * the next lun.
-			 */
-			/*
-			 * We may touch devices that we don't
-			 * hold references too, so ensure they
-			 * don't disappear out from under us.
-			 * The target above is referenced by the
-			 * path in the request ccb.
-			 */
-			phl = 0;
-			device = TAILQ_FIRST(&target->ed_entries);
-			if (device != NULL) {
-				phl = CAN_SRCH_HI_SPARSE(device);
-				if (device->lun_id == 0)
-					device = TAILQ_NEXT(device, links);
-			}
-			if ((lun_id != 0) || (device != NULL)) {
-				if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl) {
-					lun_id++;
-					next_target = 0;
-				}
-			}
-			if (lun_id == request_ccb->ccb_h.target_lun
-			    || lun_id > scan_info->cpi->max_lun)
-				next_target = 1;
 		} else {
-
+			mtx_unlock(&target->luns_mtx);
 			device = request_ccb->ccb_h.path->device;
-
-			if ((SCSI_QUIRK(device)->quirks &
-			    CAM_QUIRK_NOLUNS) == 0) {
-				/* Try the next lun */
-				if (lun_id < (CAM_SCSI2_MAXLUN-1)
-				  || CAN_SRCH_HI_DENSE(device)) {
-					lun_id++;
+			/* Continue sequential LUN scan if: */
+			/*  -- we have more LUNs that need recheck */
+			mtx_lock(&target->bus->eb_mtx);
+			nextdev = device;
+			while ((nextdev = TAILQ_NEXT(nextdev, links)) != NULL)
+				if ((nextdev->flags & CAM_DEV_UNCONFIGURED) == 0)
+					break;
+			mtx_unlock(&target->bus->eb_mtx);
+			if (nextdev != NULL) {
+				next_target = 0;
+			/*  -- stop if CAM_QUIRK_NOLUNS is set. */
+			} else if (SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOLUNS) {
+				next_target = 1;
+			/*  -- this LUN is connected and its SCSI version
+			 *     allows more LUNs. */
+			} else if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
+				if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
+				    CAN_SRCH_HI_DENSE(device))
 					next_target = 0;
-				}
+			/*  -- this LUN is disconnected, its SCSI version
+			 *     allows more LUNs and we guess they may be. */
+			} else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
+				if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
+				    CAN_SRCH_HI_SPARSE(device))
+					next_target = 0;
 			}
-			if (lun_id == request_ccb->ccb_h.target_lun
-			    || lun_id > scan_info->cpi->max_lun)
-				next_target = 1;
+			if (next_target == 0) {
+				lun_id++;
+				if (lun_id > scan_info->cpi->max_lun)
+					next_target = 1;
+			}
 		}
 
 		/*
@@ -2119,6 +2187,7 @@
 				}
 			}
 			if (done) {
+				mtx_unlock(mtx);
 				xpt_free_ccb(request_ccb);
 				xpt_free_ccb((union ccb *)scan_info->cpi);
 				request_ccb = scan_info->request_ccb;
@@ -2132,13 +2201,15 @@
 			}
 
 			if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
+				mtx_unlock(mtx);
 				xpt_free_ccb(request_ccb);
 				break;
 			}
-			status = xpt_create_path(&path, xpt_periph,
+			status = xpt_create_path(&path, NULL,
 			    scan_info->request_ccb->ccb_h.path_id,
 			    scan_info->counter, 0);
 			if (status != CAM_REQ_CMP) {
+				mtx_unlock(mtx);
 				printf("scsi_scan_bus: xpt_create_path failed"
 				    " with status %#x, bus scan halted\n",
 			       	    status);
@@ -2154,11 +2225,12 @@
 			    request_ccb->ccb_h.pinfo.priority);
 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 			request_ccb->ccb_h.cbfcnp = scsi_scan_bus;
+			request_ccb->ccb_h.flags |= CAM_UNLOCKED;
 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 			request_ccb->crcn.flags =
 			    scan_info->request_ccb->crcn.flags;
 		} else {
-			status = xpt_create_path(&path, xpt_periph,
+			status = xpt_create_path(&path, NULL,
 						 path_id, target_id, lun_id);
 			/*
 			 * Free the old request path- we're done with it. We
@@ -2177,10 +2249,12 @@
 				      request_ccb->ccb_h.pinfo.priority);
 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 			request_ccb->ccb_h.cbfcnp = scsi_scan_bus;
+			request_ccb->ccb_h.flags |= CAM_UNLOCKED;
 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 			request_ccb->crcn.flags =
 				scan_info->request_ccb->crcn.flags;
 		}
+		mtx_unlock(mtx);
 		xpt_action(request_ccb);
 		break;
 	}
@@ -2197,6 +2271,7 @@
 	cam_status status;
 	struct cam_path *new_path;
 	struct cam_periph *old_periph;
+	int lock;
 
 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n"));
 
@@ -2231,7 +2306,7 @@
 			    "can't continue\n");
 			return;
 		}
-		status = xpt_create_path(&new_path, xpt_periph,
+		status = xpt_create_path(&new_path, NULL,
 					  path->bus->path_id,
 					  path->target->target_id,
 					  path->device->lun_id);
@@ -2244,9 +2319,13 @@
 		xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT);
 		request_ccb->ccb_h.cbfcnp = xptscandone;
 		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
+		request_ccb->ccb_h.flags |= CAM_UNLOCKED;
 		request_ccb->crcn.flags = flags;
 	}
 
+	lock = (xpt_path_owned(path) == 0);
+	if (lock)
+		xpt_path_lock(path);
 	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
 		if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
 			probe_softc *softc;
@@ -2272,6 +2351,8 @@
 			xpt_done(request_ccb);
 		}
 	}
+	if (lock)
+		xpt_path_unlock(path);
 }
 
 static void
@@ -2285,7 +2366,6 @@
 static struct cam_ed *
 scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 {
-	struct cam_path path;
 	struct scsi_quirk_entry *quirk;
 	struct cam_ed *device;
 
@@ -2310,22 +2390,6 @@
 	device->device_id_len = 0;
 	device->supported_vpds = NULL;
 	device->supported_vpds_len = 0;
-
-	/*
-	 * XXX should be limited by number of CCBs this bus can
-	 * do.
-	 */
-	bus->sim->max_ccbs += device->ccbq.devq_openings;
-	if (lun_id != CAM_LUN_WILDCARD) {
-		xpt_compile_path(&path,
-				 NULL,
-				 bus->path_id,
-				 target->target_id,
-				 lun_id);
-		scsi_devise_transport(&path);
-		xpt_release_path(&path);
-	}
-
 	return (device);
 }
 
@@ -2379,7 +2443,7 @@
 			path->device->transport_version =
 			    otherdev->transport_version;
 		} else {
-			/* Until we know better, opt for safty */
+			/* Until we know better, opt for safety */
 			path->device->protocol_version = 2;
 			if (path->device->transport == XPORT_SPI)
 				path->device->transport_version = 2;
@@ -2498,6 +2562,21 @@
 			memcpy(cdai->buf, device->rcap_buf, amt);
 		}
 		break;
+	case CDAI_TYPE_EXT_INQ:
+		/*
+		 * We fetch extended inquiry data during probe, if
+		 * available.  We don't allow changing it.
+		 */
+		if (cdai->flags & CDAI_FLAG_STORE) 
+			return;
+		cdai->provsiz = device->ext_inq_len;
+		if (device->ext_inq_len == 0)
+			break;
+		amt = device->ext_inq_len;
+		if (cdai->provsiz > cdai->bufsiz)
+			amt = cdai->bufsiz;
+		memcpy(cdai->buf, device->ext_inq, amt);
+		break;
 	default:
 		return;
 	}
@@ -2504,15 +2583,8 @@
 	start_ccb->ccb_h.status = CAM_REQ_CMP;
 
 	if (cdai->flags & CDAI_FLAG_STORE) {
-		int owned;
-
-		owned = mtx_owned(start_ccb->ccb_h.path->bus->sim->mtx);
-		if (owned == 0)
-			mtx_lock(start_ccb->ccb_h.path->bus->sim->mtx);
 		xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
 			  (void *)(uintptr_t)cdai->buftype);
-		if (owned == 0)
-			mtx_unlock(start_ccb->ccb_h.path->bus->sim->mtx);
 	}
 }
 
@@ -2524,7 +2596,7 @@
 	case XPT_SET_TRAN_SETTINGS:
 	{
 		scsi_set_transfer_settings(&start_ccb->cts,
-					   start_ccb->ccb_h.path->device,
+					   start_ccb->ccb_h.path,
 					   /*async_update*/FALSE);
 		break;
 	}
@@ -2537,14 +2609,6 @@
 			      start_ccb->ccb_h.path, start_ccb->crcn.flags,
 			      start_ccb);
 		break;
-	case XPT_GET_TRAN_SETTINGS:
-	{
-		struct cam_sim *sim;
-
-		sim = start_ccb->ccb_h.path->bus->sim;
-		(*(sim->sim_action))(sim, start_ccb);
-		break;
-	}
 	case XPT_DEV_ADVINFO:
 	{
 		scsi_dev_advinfo(start_ccb);
@@ -2557,7 +2621,7 @@
 }
 
 static void
-scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
+scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path,
 			   int async_update)
 {
 	struct	ccb_pathinq cpi;
@@ -2564,10 +2628,10 @@
 	struct	ccb_trans_settings cur_cts;
 	struct	ccb_trans_settings_scsi *scsi;
 	struct	ccb_trans_settings_scsi *cur_scsi;
-	struct	cam_sim *sim;
 	struct	scsi_inquiry_data *inq_data;
+	struct	cam_ed *device;
 
-	if (device == NULL) {
+	if (path == NULL || (device = path->device) == NULL) {
 		cts->ccb_h.status = CAM_PATH_INVALID;
 		xpt_done((union ccb *)cts);
 		return;
@@ -2584,7 +2648,7 @@
 		cts->protocol_version = device->protocol_version;
 
 	if (cts->protocol != device->protocol) {
-		xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
+		xpt_print(path, "Uninitialized Protocol %x:%x?\n",
 		       cts->protocol, device->protocol);
 		cts->protocol = device->protocol;
 	}
@@ -2591,7 +2655,7 @@
 
 	if (cts->protocol_version > device->protocol_version) {
 		if (bootverbose) {
-			xpt_print(cts->ccb_h.path, "Down reving Protocol "
+			xpt_print(path, "Down reving Protocol "
 			    "Version from %d to %d?\n", cts->protocol_version,
 			    device->protocol_version);
 		}
@@ -2609,7 +2673,7 @@
 		cts->transport_version = device->transport_version;
 
 	if (cts->transport != device->transport) {
-		xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
+		xpt_print(path, "Uninitialized Transport %x:%x?\n",
 		    cts->transport, device->transport);
 		cts->transport = device->transport;
 	}
@@ -2616,7 +2680,7 @@
 
 	if (cts->transport_version > device->transport_version) {
 		if (bootverbose) {
-			xpt_print(cts->ccb_h.path, "Down reving Transport "
+			xpt_print(path, "Down reving Transport "
 			    "Version from %d to %d?\n", cts->transport_version,
 			    device->transport_version);
 		}
@@ -2623,8 +2687,6 @@
 		cts->transport_version = device->transport_version;
 	}
 
-	sim = cts->ccb_h.path->bus->sim;
-
 	/*
 	 * Nothing more of interest to do unless
 	 * this is a device connected via the
@@ -2632,13 +2694,13 @@
 	 */
 	if (cts->protocol != PROTO_SCSI) {
 		if (async_update == FALSE)
-			(*(sim->sim_action))(sim, (union ccb *)cts);
+			xpt_action_default((union ccb *)cts);
 		return;
 	}
 
 	inq_data = &device->inq_data;
 	scsi = &cts->proto_specific.scsi;
-	xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE);
+	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
 	cpi.ccb_h.func_code = XPT_PATH_INQ;
 	xpt_action((union ccb *)&cpi);
 
@@ -2659,11 +2721,11 @@
 		 * Perform sanity checking against what the
 		 * controller and device can do.
 		 */
-		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE);
+		xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE);
 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 		cur_cts.type = cts->type;
 		xpt_action((union ccb *)&cur_cts);
-		if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+		if (cam_ccb_status((union ccb *)&cur_cts) != CAM_REQ_CMP) {
 			return;
 		}
 		cur_scsi = &cur_cts.proto_specific.scsi;
@@ -2780,7 +2842,7 @@
 		 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
 				   CTS_SPI_VALID_SYNC_OFFSET|
 				   CTS_SPI_VALID_BUS_WIDTH)) != 0)
-			scsi_toggle_tags(cts->ccb_h.path);
+			scsi_toggle_tags(path);
 	}
 
 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS
@@ -2817,12 +2879,12 @@
 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 			} else {
-				xpt_stop_tags(cts->ccb_h.path);
+				xpt_stop_tags(path);
 			}
 		}
 	}
 	if (async_update == FALSE)
-		(*(sim->sim_action))(sim, (union ccb *)cts);
+		xpt_action_default((union ccb *)cts);
 }
 
 static void
@@ -2850,10 +2912,10 @@
 		cts.transport_version = XPORT_VERSION_UNSPECIFIED;
 		cts.proto_specific.scsi.flags = 0;
 		cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
-		scsi_set_transfer_settings(&cts, path->device,
+		scsi_set_transfer_settings(&cts, path,
 					  /*async_update*/TRUE);
 		cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
-		scsi_set_transfer_settings(&cts, path->device,
+		scsi_set_transfer_settings(&cts, path,
 					  /*async_update*/TRUE);
 	}
 }
@@ -2924,10 +2986,14 @@
 		xpt_release_device(device);
 	} else if (async_code == AC_TRANSFER_NEG) {
 		struct ccb_trans_settings *settings;
+		struct cam_path path;
 
 		settings = (struct ccb_trans_settings *)async_arg;
-		scsi_set_transfer_settings(settings, device,
+		xpt_compile_path(&path, NULL, bus->path_id, target->target_id,
+				 device->lun_id);
+		scsi_set_transfer_settings(settings, &path,
 					  /*async_update*/TRUE);
+		xpt_release_path(&path);
 	}
 }
 
@@ -2941,13 +3007,13 @@
 	u_int	freq;
 	u_int	mb;
 
-	mtx_assert(periph->sim->mtx, MA_OWNED);
+	cam_periph_assert(periph, MA_OWNED);
 
 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
 	xpt_action((union ccb*)&cts);
-	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+	if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP)
 		return;
 	/* Ask the SIM for its base transfer speed */
 	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);



More information about the Midnightbsd-cvs mailing list