[Midnightbsd-cvs] src [10172] trunk/sys/amd64/vmm: add vmm

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Fri Jun 1 18:54:08 EDT 2018


Revision: 10172
          http://svnweb.midnightbsd.org/src/?rev=10172
Author:   laffer1
Date:     2018-06-01 18:54:07 -0400 (Fri, 01 Jun 2018)
Log Message:
-----------
add vmm

Added Paths:
-----------
    trunk/sys/amd64/vmm/
    trunk/sys/amd64/vmm/amd/
    trunk/sys/amd64/vmm/amd/amdv.c
    trunk/sys/amd64/vmm/amd/npt.c
    trunk/sys/amd64/vmm/amd/npt.h
    trunk/sys/amd64/vmm/amd/svm.c
    trunk/sys/amd64/vmm/amd/svm.h
    trunk/sys/amd64/vmm/amd/svm_genassym.c
    trunk/sys/amd64/vmm/amd/svm_msr.c
    trunk/sys/amd64/vmm/amd/svm_msr.h
    trunk/sys/amd64/vmm/amd/svm_softc.h
    trunk/sys/amd64/vmm/amd/svm_support.S
    trunk/sys/amd64/vmm/amd/vmcb.c
    trunk/sys/amd64/vmm/amd/vmcb.h
    trunk/sys/amd64/vmm/intel/
    trunk/sys/amd64/vmm/intel/ept.c
    trunk/sys/amd64/vmm/intel/ept.h
    trunk/sys/amd64/vmm/intel/vmcs.c
    trunk/sys/amd64/vmm/intel/vmcs.h
    trunk/sys/amd64/vmm/intel/vmx.c
    trunk/sys/amd64/vmm/intel/vmx.h
    trunk/sys/amd64/vmm/intel/vmx_controls.h
    trunk/sys/amd64/vmm/intel/vmx_cpufunc.h
    trunk/sys/amd64/vmm/intel/vmx_genassym.c
    trunk/sys/amd64/vmm/intel/vmx_msr.c
    trunk/sys/amd64/vmm/intel/vmx_msr.h
    trunk/sys/amd64/vmm/intel/vmx_support.S
    trunk/sys/amd64/vmm/intel/vtd.c
    trunk/sys/amd64/vmm/io/
    trunk/sys/amd64/vmm/io/iommu.c
    trunk/sys/amd64/vmm/io/iommu.h
    trunk/sys/amd64/vmm/io/ppt.c
    trunk/sys/amd64/vmm/io/ppt.h
    trunk/sys/amd64/vmm/io/vatpic.c
    trunk/sys/amd64/vmm/io/vatpic.h
    trunk/sys/amd64/vmm/io/vatpit.c
    trunk/sys/amd64/vmm/io/vatpit.h
    trunk/sys/amd64/vmm/io/vhpet.c
    trunk/sys/amd64/vmm/io/vhpet.h
    trunk/sys/amd64/vmm/io/vioapic.c
    trunk/sys/amd64/vmm/io/vioapic.h
    trunk/sys/amd64/vmm/io/vlapic.c
    trunk/sys/amd64/vmm/io/vlapic.h
    trunk/sys/amd64/vmm/io/vlapic_priv.h
    trunk/sys/amd64/vmm/io/vpmtmr.c
    trunk/sys/amd64/vmm/io/vpmtmr.h
    trunk/sys/amd64/vmm/io/vrtc.c
    trunk/sys/amd64/vmm/io/vrtc.h
    trunk/sys/amd64/vmm/vmm.c
    trunk/sys/amd64/vmm/vmm_dev.c
    trunk/sys/amd64/vmm/vmm_host.c
    trunk/sys/amd64/vmm/vmm_host.h
    trunk/sys/amd64/vmm/vmm_instruction_emul.c
    trunk/sys/amd64/vmm/vmm_ioport.c
    trunk/sys/amd64/vmm/vmm_ioport.h
    trunk/sys/amd64/vmm/vmm_ipi.c
    trunk/sys/amd64/vmm/vmm_ipi.h
    trunk/sys/amd64/vmm/vmm_ktr.h
    trunk/sys/amd64/vmm/vmm_lapic.c
    trunk/sys/amd64/vmm/vmm_lapic.h
    trunk/sys/amd64/vmm/vmm_mem.c
    trunk/sys/amd64/vmm/vmm_mem.h
    trunk/sys/amd64/vmm/vmm_stat.c
    trunk/sys/amd64/vmm/vmm_stat.h
    trunk/sys/amd64/vmm/vmm_support.S
    trunk/sys/amd64/vmm/vmm_util.c
    trunk/sys/amd64/vmm/vmm_util.h
    trunk/sys/amd64/vmm/x86.c
    trunk/sys/amd64/vmm/x86.h

Added: trunk/sys/amd64/vmm/amd/amdv.c
===================================================================
--- trunk/sys/amd64/vmm/amd/amdv.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/amdv.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,134 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/amd/amdv.c 284900 2015-06-28 03:22:26Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/amdv.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+
+#include <machine/vmm.h>
+#include "io/iommu.h"
+
+static int
+amd_iommu_init(void)
+{
+
+	printf("amd_iommu_init: not implemented\n");
+	return (ENXIO);
+}
+
+static void
+amd_iommu_cleanup(void)
+{
+
+	printf("amd_iommu_cleanup: not implemented\n");
+}
+
+static void
+amd_iommu_enable(void)
+{
+
+	printf("amd_iommu_enable: not implemented\n");
+}
+
+static void
+amd_iommu_disable(void)
+{
+
+	printf("amd_iommu_disable: not implemented\n");
+}
+
+static void *
+amd_iommu_create_domain(vm_paddr_t maxaddr)
+{
+
+	printf("amd_iommu_create_domain: not implemented\n");
+	return (NULL);
+}
+
+static void
+amd_iommu_destroy_domain(void *domain)
+{
+
+	printf("amd_iommu_destroy_domain: not implemented\n");
+}
+
+static uint64_t
+amd_iommu_create_mapping(void *domain, vm_paddr_t gpa, vm_paddr_t hpa,
+			 uint64_t len)
+{
+
+	printf("amd_iommu_create_mapping: not implemented\n");
+	return (0);
+}
+
+static uint64_t
+amd_iommu_remove_mapping(void *domain, vm_paddr_t gpa, uint64_t len)
+{
+
+	printf("amd_iommu_remove_mapping: not implemented\n");
+	return (0);
+}
+
+static void
+amd_iommu_add_device(void *domain, uint16_t rid)
+{
+
+	printf("amd_iommu_add_device: not implemented\n");
+}
+
+static void
+amd_iommu_remove_device(void *domain, uint16_t rid)
+{
+
+	printf("amd_iommu_remove_device: not implemented\n");
+}
+
+static void
+amd_iommu_invalidate_tlb(void *domain)
+{
+
+	printf("amd_iommu_invalidate_tlb: not implemented\n");
+}
+
+struct iommu_ops iommu_ops_amd = {
+	amd_iommu_init,
+	amd_iommu_cleanup,
+	amd_iommu_enable,
+	amd_iommu_disable,
+	amd_iommu_create_domain,
+	amd_iommu_destroy_domain,
+	amd_iommu_create_mapping,
+	amd_iommu_remove_mapping,
+	amd_iommu_add_device,
+	amd_iommu_remove_device,
+	amd_iommu_invalidate_tlb,
+};


Property changes on: trunk/sys/amd64/vmm/amd/amdv.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/npt.c
===================================================================
--- trunk/sys/amd64/vmm/amd/npt.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/npt.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,88 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/npt.c 276403 2014-12-30 08:24:14Z neel $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+
+#include <machine/pmap.h>
+
+#include "npt.h"
+
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, npt, CTLFLAG_RW, NULL, NULL);
+
+static int npt_flags;
+SYSCTL_INT(_hw_vmm_npt, OID_AUTO, pmap_flags, CTLFLAG_RD,
+	&npt_flags, 0, NULL);
+
+#define NPT_IPIMASK	0xFF
+
+/*
+ * AMD nested page table init.
+ */
+int
+svm_npt_init(int ipinum)
+{
+	int enable_superpage = 1;
+
+	npt_flags = ipinum & NPT_IPIMASK;
+	TUNABLE_INT_FETCH("hw.vmm.npt.enable_superpage", &enable_superpage);
+	if (enable_superpage)
+		npt_flags |= PMAP_PDE_SUPERPAGE; 
+	
+	return (0);
+}
+
+static int
+npt_pinit(pmap_t pmap)
+{
+
+	return (pmap_pinit_type(pmap, PT_RVI, npt_flags));
+}
+
+struct vmspace *
+svm_npt_alloc(vm_offset_t min, vm_offset_t max)
+{
+	
+	return (vmspace_alloc(min, max, npt_pinit));
+}
+
+void
+svm_npt_free(struct vmspace *vmspace)
+{
+
+	vmspace_free(vmspace);
+}


Property changes on: trunk/sys/amd64/vmm/amd/npt.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/npt.h
===================================================================
--- trunk/sys/amd64/vmm/amd/npt.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/npt.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,37 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/amd/npt.h 276403 2014-12-30 08:24:14Z neel $
+ */
+
+#ifndef _SVM_NPT_H_
+#define _SVM_NPT_H_
+
+int 	svm_npt_init(int ipinum);
+struct	vmspace *svm_npt_alloc(vm_offset_t min, vm_offset_t max);
+void	svm_npt_free(struct vmspace *vmspace);
+
+#endif /* _SVM_NPT_H_ */


Property changes on: trunk/sys/amd64/vmm/amd/npt.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/svm.c
===================================================================
--- trunk/sys/amd64/vmm/amd/svm.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/svm.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,2211 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013, Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm.c 330069 2018-02-27 14:47:56Z avg $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/smp.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/cpufunc.h>
+#include <machine/psl.h>
+#include <machine/pmap.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+#include <machine/smp.h>
+#include <machine/vmm.h>
+#include <machine/vmm_dev.h>
+#include <machine/vmm_instruction_emul.h>
+
+#include "vmm_lapic.h"
+#include "vmm_stat.h"
+#include "vmm_ktr.h"
+#include "vmm_ioport.h"
+#include "vatpic.h"
+#include "vlapic.h"
+#include "vlapic_priv.h"
+
+#include "x86.h"
+#include "vmcb.h"
+#include "svm.h"
+#include "svm_softc.h"
+#include "svm_msr.h"
+#include "npt.h"
+
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL);
+
+/*
+ * SVM CPUID function 0x8000_000A, edx bit decoding.
+ */
+#define AMD_CPUID_SVM_NP		BIT(0)  /* Nested paging or RVI */
+#define AMD_CPUID_SVM_LBR		BIT(1)  /* Last branch virtualization */
+#define AMD_CPUID_SVM_SVML		BIT(2)  /* SVM lock */
+#define AMD_CPUID_SVM_NRIP_SAVE		BIT(3)  /* Next RIP is saved */
+#define AMD_CPUID_SVM_TSC_RATE		BIT(4)  /* TSC rate control. */
+#define AMD_CPUID_SVM_VMCB_CLEAN	BIT(5)  /* VMCB state caching */
+#define AMD_CPUID_SVM_FLUSH_BY_ASID	BIT(6)  /* Flush by ASID */
+#define AMD_CPUID_SVM_DECODE_ASSIST	BIT(7)  /* Decode assist */
+#define AMD_CPUID_SVM_PAUSE_INC		BIT(10) /* Pause intercept filter. */
+#define AMD_CPUID_SVM_PAUSE_FTH		BIT(12) /* Pause filter threshold */
+#define	AMD_CPUID_SVM_AVIC		BIT(13)	/* AVIC present */
+
+#define	VMCB_CACHE_DEFAULT	(VMCB_CACHE_ASID 	|	\
+				VMCB_CACHE_IOPM		|	\
+				VMCB_CACHE_I		|	\
+				VMCB_CACHE_TPR		|	\
+				VMCB_CACHE_CR2		|	\
+				VMCB_CACHE_CR		|	\
+				VMCB_CACHE_DT		|	\
+				VMCB_CACHE_SEG		|	\
+				VMCB_CACHE_NP)
+
+static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
+SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
+    0, NULL);
+
+static MALLOC_DEFINE(M_SVM, "svm", "svm");
+static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
+
+/* Per-CPU context area. */
+extern struct pcpu __pcpu[];
+
+static uint32_t svm_feature = ~0U;	/* AMD SVM features. */
+SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0,
+    "SVM features advertised by CPUID.8000000AH:EDX");
+
+static int disable_npf_assist;
+SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN,
+    &disable_npf_assist, 0, NULL);
+
+/* Maximum ASIDs supported by the processor */
+static uint32_t nasid;
+SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0,
+    "Number of ASIDs supported by this processor");
+
+/* Current ASID generation for each host cpu */
+static struct asid asid[MAXCPU];
+
+/* 
+ * SVM host state saved area of size 4KB for each core.
+ */
+static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
+static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
+static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
+
+static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
+
+static __inline int
+flush_by_asid(void)
+{
+
+	return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
+}
+
+static __inline int
+decode_assist(void)
+{
+
+	return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
+}
+
+static void
+svm_disable(void *arg __unused)
+{
+	uint64_t efer;
+
+	efer = rdmsr(MSR_EFER);
+	efer &= ~EFER_SVM;
+	wrmsr(MSR_EFER, efer);
+}
+
+/*
+ * Disable SVM on all CPUs.
+ */
+static int
+svm_cleanup(void)
+{
+
+	smp_rendezvous(NULL, svm_disable, NULL, NULL);
+	return (0);
+}
+
+/*
+ * Verify that all the features required by bhyve are available.
+ */
+static int
+check_svm_features(void)
+{
+	u_int regs[4];
+
+	/* CPUID Fn8000_000A is for SVM */
+	do_cpuid(0x8000000A, regs);
+	svm_feature &= regs[3];
+
+	/*
+	 * The number of ASIDs can be configured to be less than what is
+	 * supported by the hardware but not more.
+	 */
+	if (nasid == 0 || nasid > regs[1])
+		nasid = regs[1];
+	KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid));
+
+	/* bhyve requires the Nested Paging feature */
+	if (!(svm_feature & AMD_CPUID_SVM_NP)) {
+		printf("SVM: Nested Paging feature not available.\n");
+		return (ENXIO);
+	}
+
+	/* bhyve requires the NRIP Save feature */
+	if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
+		printf("SVM: NRIP Save feature not available.\n");
+		return (ENXIO);
+	}
+
+	return (0);
+}
+
+static void
+svm_enable(void *arg __unused)
+{
+	uint64_t efer;
+
+	efer = rdmsr(MSR_EFER);
+	efer |= EFER_SVM;
+	wrmsr(MSR_EFER, efer);
+
+	wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu]));
+}
+
+/*
+ * Return 1 if SVM is enabled on this processor and 0 otherwise.
+ */
+static int
+svm_available(void)
+{
+	uint64_t msr;
+
+	/* Section 15.4 Enabling SVM from APM2. */
+	if ((amd_feature2 & AMDID2_SVM) == 0) {
+		printf("SVM: not available.\n");
+		return (0);
+	}
+
+	msr = rdmsr(MSR_VM_CR);
+	if ((msr & VM_CR_SVMDIS) != 0) {
+		printf("SVM: disabled by BIOS.\n");
+		return (0);
+	}
+
+	return (1);
+}
+
+static int
+svm_init(int ipinum)
+{
+	int error, cpu;
+
+	if (!svm_available())
+		return (ENXIO);
+
+	error = check_svm_features();
+	if (error)
+		return (error);
+
+	vmcb_clean &= VMCB_CACHE_DEFAULT;
+
+	for (cpu = 0; cpu < MAXCPU; cpu++) {
+		/*
+		 * Initialize the host ASIDs to their "highest" valid values.
+		 *
+		 * The next ASID allocation will rollover both 'gen' and 'num'
+		 * and start off the sequence at {1,1}.
+		 */
+		asid[cpu].gen = ~0UL;
+		asid[cpu].num = nasid - 1;
+	}
+
+	svm_msr_init();
+	svm_npt_init(ipinum);
+
+	/* Enable SVM on all CPUs */
+	smp_rendezvous(NULL, svm_enable, NULL, NULL);
+
+	return (0);
+}
+
+static void
+svm_restore(void)
+{
+
+	svm_enable(NULL);
+}		
+
+/* Pentium compatible MSRs */
+#define MSR_PENTIUM_START 	0	
+#define MSR_PENTIUM_END 	0x1FFF
+/* AMD 6th generation and Intel compatible MSRs */
+#define MSR_AMD6TH_START 	0xC0000000UL	
+#define MSR_AMD6TH_END 		0xC0001FFFUL	
+/* AMD 7th and 8th generation compatible MSRs */
+#define MSR_AMD7TH_START 	0xC0010000UL	
+#define MSR_AMD7TH_END 		0xC0011FFFUL	
+
+/*
+ * Get the index and bit position for a MSR in permission bitmap.
+ * Two bits are used for each MSR: lower bit for read and higher bit for write.
+ */
+static int
+svm_msr_index(uint64_t msr, int *index, int *bit)
+{
+	uint32_t base, off;
+
+	*index = -1;
+	*bit = (msr % 4) * 2;
+	base = 0;
+
+	if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) {
+		*index = msr / 4;
+		return (0);
+	}
+
+	base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 
+	if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
+		off = (msr - MSR_AMD6TH_START); 
+		*index = (off + base) / 4;
+		return (0);
+	} 
+
+	base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
+	if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
+		off = (msr - MSR_AMD7TH_START);
+		*index = (off + base) / 4;
+		return (0);
+	}
+
+	return (EINVAL);
+}
+
+/*
+ * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
+ */
+static void
+svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
+{
+	int index, bit, error;
+
+	error = svm_msr_index(msr, &index, &bit);
+	KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr));
+	KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
+	    ("%s: invalid index %d for msr %#lx", __func__, index, msr));
+	KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
+	    "msr %#lx", __func__, bit, msr));
+
+	if (read)
+		perm_bitmap[index] &= ~(1UL << bit);
+
+	if (write)
+		perm_bitmap[index] &= ~(2UL << bit);
+}
+
+static void
+svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
+{
+
+	svm_msr_perm(perm_bitmap, msr, true, true);
+}
+
+static void
+svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
+{
+
+	svm_msr_perm(perm_bitmap, msr, true, false);
+}
+
+static __inline int
+svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
+{
+	struct vmcb_ctrl *ctrl;
+
+	KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+	return (ctrl->intercept[idx] & bitmask ? 1 : 0);
+}
+
+static __inline void
+svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
+    int enabled)
+{
+	struct vmcb_ctrl *ctrl;
+	uint32_t oldval;
+
+	KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+	oldval = ctrl->intercept[idx];
+
+	if (enabled)
+		ctrl->intercept[idx] |= bitmask;
+	else
+		ctrl->intercept[idx] &= ~bitmask;
+
+	if (ctrl->intercept[idx] != oldval) {
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
+		VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
+		    "from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
+	}
+}
+
+static __inline void
+svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
+{
+
+	svm_set_intercept(sc, vcpu, off, bitmask, 0);
+}
+
+static __inline void
+svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
+{
+
+	svm_set_intercept(sc, vcpu, off, bitmask, 1);
+}
+
+static void
+vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
+    uint64_t msrpm_base_pa, uint64_t np_pml4)
+{
+	struct vmcb_ctrl *ctrl;
+	struct vmcb_state *state;
+	uint32_t mask;
+	int n;
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+	state = svm_get_vmcb_state(sc, vcpu);
+
+	ctrl->iopm_base_pa = iopm_base_pa;
+	ctrl->msrpm_base_pa = msrpm_base_pa;
+
+	/* Enable nested paging */
+	ctrl->np_enable = 1;
+	ctrl->n_cr3 = np_pml4;
+
+	/*
+	 * Intercept accesses to the control registers that are not shadowed
+	 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
+	 */
+	for (n = 0; n < 16; n++) {
+		mask = (BIT(n) << 16) | BIT(n);
+		if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
+			svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
+		else
+			svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
+	}
+
+
+	/*
+	 * Intercept everything when tracing guest exceptions otherwise
+	 * just intercept machine check exception.
+	 */
+	if (vcpu_trace_exceptions(sc->vm, vcpu)) {
+		for (n = 0; n < 32; n++) {
+			/*
+			 * Skip unimplemented vectors in the exception bitmap.
+			 */
+			if (n == 2 || n == 9) {
+				continue;
+			}
+			svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
+		}
+	} else {
+		svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
+	}
+
+	/* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+	    VMCB_INTCPT_FERR_FREEZE);
+
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
+
+	/*
+	 * From section "Canonicalization and Consistency Checks" in APMv2
+	 * the VMRUN intercept bit must be set to pass the consistency check.
+	 */
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
+
+	/*
+	 * The ASID will be set to a non-zero value just before VMRUN.
+	 */
+	ctrl->asid = 0;
+
+	/*
+	 * Section 15.21.1, Interrupt Masking in EFLAGS
+	 * Section 15.21.2, Virtualizing APIC.TPR
+	 *
+	 * This must be set for %rflag and %cr8 isolation of guest and host.
+	 */
+	ctrl->v_intr_masking = 1;
+
+	/* Enable Last Branch Record aka LBR for debugging */
+	ctrl->lbr_virt_en = 1;
+	state->dbgctl = BIT(0);
+
+	/* EFER_SVM must always be set when the guest is executing */
+	state->efer = EFER_SVM;
+
+	/* Set up the PAT to power-on state */
+	state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK)	|
+	    PAT_VALUE(1, PAT_WRITE_THROUGH)	|
+	    PAT_VALUE(2, PAT_UNCACHED)		|
+	    PAT_VALUE(3, PAT_UNCACHEABLE)	|
+	    PAT_VALUE(4, PAT_WRITE_BACK)	|
+	    PAT_VALUE(5, PAT_WRITE_THROUGH)	|
+	    PAT_VALUE(6, PAT_UNCACHED)		|
+	    PAT_VALUE(7, PAT_UNCACHEABLE);
+}
+
+/*
+ * Initialize a virtual machine.
+ */
+static void *
+svm_vminit(struct vm *vm, pmap_t pmap)
+{
+	struct svm_softc *svm_sc;
+	struct svm_vcpu *vcpu;
+	vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
+	int i;
+
+	svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO);
+	if (((uintptr_t)svm_sc & PAGE_MASK) != 0)
+		panic("malloc of svm_softc not aligned on page boundary");
+
+	svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM,
+	    M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
+	if (svm_sc->msr_bitmap == NULL)
+		panic("contigmalloc of SVM MSR bitmap failed");
+	svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM,
+	    M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
+	if (svm_sc->iopm_bitmap == NULL)
+		panic("contigmalloc of SVM IO bitmap failed");
+
+	svm_sc->vm = vm;
+	svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4);
+
+	/*
+	 * Intercept read and write accesses to all MSRs.
+	 */
+	memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE);
+
+	/*
+	 * Access to the following MSRs is redirected to the VMCB when the
+	 * guest is executing. Therefore it is safe to allow the guest to
+	 * read/write these MSRs directly without hypervisor involvement.
+	 */
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
+
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
+	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
+
+	svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
+
+	/*
+	 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
+	 */
+	svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
+
+	/* Intercept access to all I/O ports. */
+	memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
+
+	iopm_pa = vtophys(svm_sc->iopm_bitmap);
+	msrpm_pa = vtophys(svm_sc->msr_bitmap);
+	pml4_pa = svm_sc->nptp;
+	for (i = 0; i < VM_MAXCPU; i++) {
+		vcpu = svm_get_vcpu(svm_sc, i);
+		vcpu->nextrip = ~0;
+		vcpu->lastcpu = NOCPU;
+		vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
+		vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
+		svm_msr_guest_init(svm_sc, i);
+	}
+	return (svm_sc);
+}
+
+/*
+ * Collateral for a generic SVM VM-exit.
+ */
+static void
+vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
+{
+
+	vme->exitcode = VM_EXITCODE_SVM;
+	vme->u.svm.exitcode = code;
+	vme->u.svm.exitinfo1 = info1;
+	vme->u.svm.exitinfo2 = info2;
+}
+
+static int
+svm_cpl(struct vmcb_state *state)
+{
+
+	/*
+	 * From APMv2:
+	 *   "Retrieve the CPL from the CPL field in the VMCB, not
+	 *    from any segment DPL"
+	 */
+	return (state->cpl);
+}
+
+static enum vm_cpu_mode
+svm_vcpu_mode(struct vmcb *vmcb)
+{
+	struct vmcb_segment seg;
+	struct vmcb_state *state;
+	int error;
+
+	state = &vmcb->state;
+
+	if (state->efer & EFER_LMA) {
+		error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
+		KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__,
+		    error));
+
+		/*
+		 * Section 4.8.1 for APM2, check if Code Segment has
+		 * Long attribute set in descriptor.
+		 */
+		if (seg.attrib & VMCB_CS_ATTRIB_L)
+			return (CPU_MODE_64BIT);
+		else
+			return (CPU_MODE_COMPATIBILITY);
+	} else  if (state->cr0 & CR0_PE) {
+		return (CPU_MODE_PROTECTED);
+	} else {
+		return (CPU_MODE_REAL);
+	}
+}
+
+static enum vm_paging_mode
+svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
+{
+
+	if ((cr0 & CR0_PG) == 0)
+		return (PAGING_MODE_FLAT);
+	if ((cr4 & CR4_PAE) == 0)
+		return (PAGING_MODE_32);
+	if (efer & EFER_LME)
+		return (PAGING_MODE_64);
+	else
+		return (PAGING_MODE_PAE);
+}
+
+/*
+ * ins/outs utility routines
+ */
+static uint64_t
+svm_inout_str_index(struct svm_regctx *regs, int in)
+{
+	uint64_t val;
+
+	val = in ? regs->sctx_rdi : regs->sctx_rsi;
+
+	return (val);
+}
+
+static uint64_t
+svm_inout_str_count(struct svm_regctx *regs, int rep)
+{
+	uint64_t val;
+
+	val = rep ? regs->sctx_rcx : 1;
+
+	return (val);
+}
+
+static void
+svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
+    int in, struct vm_inout_str *vis)
+{
+	int error, s;
+
+	if (in) {
+		vis->seg_name = VM_REG_GUEST_ES;
+	} else {
+		/* The segment field has standard encoding */
+		s = (info1 >> 10) & 0x7;
+		vis->seg_name = vm_segment_name(s);
+	}
+
+	error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
+	KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
+}
+
+static int
+svm_inout_str_addrsize(uint64_t info1)
+{
+        uint32_t size;
+
+        size = (info1 >> 7) & 0x7;
+        switch (size) {
+        case 1:
+                return (2);     /* 16 bit */
+        case 2:
+                return (4);     /* 32 bit */
+        case 4:
+                return (8);     /* 64 bit */
+        default:
+                panic("%s: invalid size encoding %d", __func__, size);
+        }
+}
+
+static void
+svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
+{
+	struct vmcb_state *state;
+
+	state = &vmcb->state;
+	paging->cr3 = state->cr3;
+	paging->cpl = svm_cpl(state);
+	paging->cpu_mode = svm_vcpu_mode(vmcb);
+	paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
+	    state->efer);
+}
+
+#define	UNHANDLED 0
+
+/*
+ * Handle guest I/O intercept.
+ */
+static int
+svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
+{
+	struct vmcb_ctrl *ctrl;
+	struct vmcb_state *state;
+	struct svm_regctx *regs;
+	struct vm_inout_str *vis;
+	uint64_t info1;
+	int inout_string;
+
+	state = svm_get_vmcb_state(svm_sc, vcpu);
+	ctrl  = svm_get_vmcb_ctrl(svm_sc, vcpu);
+	regs  = svm_get_guest_regctx(svm_sc, vcpu);
+
+	info1 = ctrl->exitinfo1;
+	inout_string = info1 & BIT(2) ? 1 : 0;
+
+	/*
+	 * The effective segment number in EXITINFO1[12:10] is populated
+	 * only if the processor has the DecodeAssist capability.
+	 *
+	 * XXX this is not specified explicitly in APMv2 but can be verified
+	 * empirically.
+	 */
+	if (inout_string && !decode_assist())
+		return (UNHANDLED);
+
+	vmexit->exitcode 	= VM_EXITCODE_INOUT;
+	vmexit->u.inout.in 	= (info1 & BIT(0)) ? 1 : 0;
+	vmexit->u.inout.string 	= inout_string;
+	vmexit->u.inout.rep 	= (info1 & BIT(3)) ? 1 : 0;
+	vmexit->u.inout.bytes 	= (info1 >> 4) & 0x7;
+	vmexit->u.inout.port 	= (uint16_t)(info1 >> 16);
+	vmexit->u.inout.eax 	= (uint32_t)(state->rax);
+
+	if (inout_string) {
+		vmexit->exitcode = VM_EXITCODE_INOUT_STR;
+		vis = &vmexit->u.inout_str;
+		svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging);
+		vis->rflags = state->rflags;
+		vis->cr0 = state->cr0;
+		vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
+		vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
+		vis->addrsize = svm_inout_str_addrsize(info1);
+		svm_inout_str_seginfo(svm_sc, vcpu, info1,
+		    vmexit->u.inout.in, vis);
+	}
+
+	return (UNHANDLED);
+}
+
+static int
+npf_fault_type(uint64_t exitinfo1)
+{
+
+	if (exitinfo1 & VMCB_NPF_INFO1_W)
+		return (VM_PROT_WRITE);
+	else if (exitinfo1 & VMCB_NPF_INFO1_ID)
+		return (VM_PROT_EXECUTE);
+	else
+		return (VM_PROT_READ);
+}
+
+static bool
+svm_npf_emul_fault(uint64_t exitinfo1)
+{
+	
+	if (exitinfo1 & VMCB_NPF_INFO1_ID) {
+		return (false);
+	}
+
+	if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
+		return (false);
+	}
+
+	if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
+		return (false);
+	}
+
+	return (true);	
+}
+
+static void
+svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
+{
+	struct vm_guest_paging *paging;
+	struct vmcb_segment seg;
+	struct vmcb_ctrl *ctrl;
+	char *inst_bytes;
+	int error, inst_len;
+
+	ctrl = &vmcb->ctrl;
+	paging = &vmexit->u.inst_emul.paging;
+
+	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
+	vmexit->u.inst_emul.gpa = gpa;
+	vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
+	svm_paging_info(vmcb, paging);
+
+	error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
+	KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error));
+
+	switch(paging->cpu_mode) {
+	case CPU_MODE_REAL:
+		vmexit->u.inst_emul.cs_base = seg.base;
+		vmexit->u.inst_emul.cs_d = 0;
+		break;
+	case CPU_MODE_PROTECTED:
+	case CPU_MODE_COMPATIBILITY:
+		vmexit->u.inst_emul.cs_base = seg.base;
+
+		/*
+		 * Section 4.8.1 of APM2, Default Operand Size or D bit.
+		 */
+		vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
+		    1 : 0;
+		break;
+	default:
+		vmexit->u.inst_emul.cs_base = 0;
+		vmexit->u.inst_emul.cs_d = 0;
+		break;	
+	}
+
+	/*
+	 * Copy the instruction bytes into 'vie' if available.
+	 */
+	if (decode_assist() && !disable_npf_assist) {
+		inst_len = ctrl->inst_len;
+		inst_bytes = ctrl->inst_bytes;
+	} else {
+		inst_len = 0;
+		inst_bytes = NULL;
+	}
+	vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
+}
+
+#ifdef KTR
+static const char *
+intrtype_to_str(int intr_type)
+{
+	switch (intr_type) {
+	case VMCB_EVENTINJ_TYPE_INTR:
+		return ("hwintr");
+	case VMCB_EVENTINJ_TYPE_NMI:
+		return ("nmi");
+	case VMCB_EVENTINJ_TYPE_INTn:
+		return ("swintr");
+	case VMCB_EVENTINJ_TYPE_EXCEPTION:
+		return ("exception");
+	default:
+		panic("%s: unknown intr_type %d", __func__, intr_type);
+	}
+}
+#endif
+
+/*
+ * Inject an event to vcpu as described in section 15.20, "Event injection".
+ */
+static void
+svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
+		 uint32_t error, bool ec_valid)
+{
+	struct vmcb_ctrl *ctrl;
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+
+	KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0,
+	    ("%s: event already pending %#lx", __func__, ctrl->eventinj));
+
+	KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d",
+	    __func__, vector));
+
+	switch (intr_type) {
+	case VMCB_EVENTINJ_TYPE_INTR:
+	case VMCB_EVENTINJ_TYPE_NMI:
+	case VMCB_EVENTINJ_TYPE_INTn:
+		break;
+	case VMCB_EVENTINJ_TYPE_EXCEPTION:
+		if (vector >= 0 && vector <= 31 && vector != 2)
+			break;
+		/* FALLTHROUGH */
+	default:
+		panic("%s: invalid intr_type/vector: %d/%d", __func__,
+		    intr_type, vector);
+	}
+	ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID;
+	if (ec_valid) {
+		ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
+		ctrl->eventinj |= (uint64_t)error << 32;
+		VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x",
+		    intrtype_to_str(intr_type), vector, error);
+	} else {
+		VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d",
+		    intrtype_to_str(intr_type), vector);
+	}
+}
+
+static void
+svm_update_virqinfo(struct svm_softc *sc, int vcpu)
+{
+	struct vm *vm;
+	struct vlapic *vlapic;
+	struct vmcb_ctrl *ctrl;
+
+	vm = sc->vm;
+	vlapic = vm_lapic(vm, vcpu);
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+
+	/* Update %cr8 in the emulated vlapic */
+	vlapic_set_cr8(vlapic, ctrl->v_tpr);
+
+	/* Virtual interrupt injection is not used. */
+	KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
+	    "v_intr_vector %d", __func__, ctrl->v_intr_vector));
+}
+
+static void
+svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
+{
+	struct vmcb_ctrl *ctrl;
+	uint64_t intinfo;
+
+	ctrl  = svm_get_vmcb_ctrl(svm_sc, vcpu);
+	intinfo = ctrl->exitintinfo;	
+	if (!VMCB_EXITINTINFO_VALID(intinfo))
+		return;
+
+	/*
+	 * From APMv2, Section "Intercepts during IDT interrupt delivery"
+	 *
+	 * If a #VMEXIT happened during event delivery then record the event
+	 * that was being delivered.
+	 */
+	VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
+		intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
+	vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
+	vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
+}
+
+#ifdef INVARIANTS
+static __inline int
+vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
+{
+
+	return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+	    VMCB_INTCPT_VINTR));
+}
+#endif
+
+static __inline void
+enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
+{
+	struct vmcb_ctrl *ctrl;
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+
+	if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
+		KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
+		KASSERT(vintr_intercept_enabled(sc, vcpu),
+		    ("%s: vintr intercept should be enabled", __func__));
+		return;
+	}
+
+	VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
+	ctrl->v_irq = 1;
+	ctrl->v_ign_tpr = 1;
+	ctrl->v_intr_vector = 0;
+	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
+}
+
+static __inline void
+disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
+{
+	struct vmcb_ctrl *ctrl;
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+
+	if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
+		KASSERT(!vintr_intercept_enabled(sc, vcpu),
+		    ("%s: vintr intercept should be disabled", __func__));
+		return;
+	}
+
+	VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
+	ctrl->v_irq = 0;
+	ctrl->v_intr_vector = 0;
+	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
+}
+
+static int
+svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val)
+{
+	struct vmcb_ctrl *ctrl;
+	int oldval, newval;
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+	oldval = ctrl->intr_shadow;
+	newval = val ? 1 : 0;
+	if (newval != oldval) {
+		ctrl->intr_shadow = newval;
+		VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval);
+	}
+	return (0);
+}
+
+static int
+svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val)
+{
+	struct vmcb_ctrl *ctrl;
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+	*val = ctrl->intr_shadow;
+	return (0);
+}
+
+/*
+ * Once an NMI is injected it blocks delivery of further NMIs until the handler
+ * executes an IRET. The IRET intercept is enabled when an NMI is injected to
+ * to track when the vcpu is done handling the NMI.
+ */
+static int
+nmi_blocked(struct svm_softc *sc, int vcpu)
+{
+	int blocked;
+
+	blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+	    VMCB_INTCPT_IRET);
+	return (blocked);
+}
+
+static void
+enable_nmi_blocking(struct svm_softc *sc, int vcpu)
+{
+
+	KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
+	VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled");
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
+}
+
+static void
+clear_nmi_blocking(struct svm_softc *sc, int vcpu)
+{
+	int error;
+
+	KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
+	VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared");
+	/*
+	 * When the IRET intercept is cleared the vcpu will attempt to execute
+	 * the "iret" when it runs next. However, it is possible to inject
+	 * another NMI into the vcpu before the "iret" has actually executed.
+	 *
+	 * For e.g. if the "iret" encounters a #NPF when accessing the stack
+	 * it will trap back into the hypervisor. If an NMI is pending for
+	 * the vcpu it will be injected into the guest.
+	 *
+	 * XXX this needs to be fixed
+	 */
+	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
+
+	/*
+	 * Set 'intr_shadow' to prevent an NMI from being injected on the
+	 * immediate VMRUN.
+	 */
+	error = svm_modify_intr_shadow(sc, vcpu, 1);
+	KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
+}
+
+#define	EFER_MBZ_BITS	0xFFFFFFFFFFFF0200UL
+
+static int
+svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
+{
+	struct vm_exit *vme;
+	struct vmcb_state *state;
+	uint64_t changed, lma, oldval;
+	int error;
+
+	state = svm_get_vmcb_state(sc, vcpu);
+
+	oldval = state->efer;
+	VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
+
+	newval &= ~0xFE;		/* clear the Read-As-Zero (RAZ) bits */
+	changed = oldval ^ newval;
+
+	if (newval & EFER_MBZ_BITS)
+		goto gpf;
+
+	/* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
+	if (changed & EFER_LME) {
+		if (state->cr0 & CR0_PG)
+			goto gpf;
+	}
+
+	/* EFER.LMA = EFER.LME & CR0.PG */
+	if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0)
+		lma = EFER_LMA;
+	else
+		lma = 0;
+
+	if ((newval & EFER_LMA) != lma)
+		goto gpf;
+
+	if (newval & EFER_NXE) {
+		if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE))
+			goto gpf;
+	}
+
+	/*
+	 * XXX bhyve does not enforce segment limits in 64-bit mode. Until
+	 * this is fixed flag guest attempt to set EFER_LMSLE as an error.
+	 */
+	if (newval & EFER_LMSLE) {
+		vme = vm_exitinfo(sc->vm, vcpu);
+		vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0);
+		*retu = true;
+		return (0);
+	}
+
+	if (newval & EFER_FFXSR) {
+		if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR))
+			goto gpf;
+	}
+
+	if (newval & EFER_TCE) {
+		if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE))
+			goto gpf;
+	}
+
+	error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
+	KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
+	return (0);
+gpf:
+	vm_inject_gp(sc->vm, vcpu);
+	return (0);
+}
+
+static int
+emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
+    bool *retu)
+{
+	int error;
+
+	if (lapic_msr(num))
+		error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
+	else if (num == MSR_EFER)
+		error = svm_write_efer(sc, vcpu, val, retu);
+	else
+		error = svm_wrmsr(sc, vcpu, num, val, retu);
+
+	return (error);
+}
+
+static int
+emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
+{
+	struct vmcb_state *state;
+	struct svm_regctx *ctx;
+	uint64_t result;
+	int error;
+
+	if (lapic_msr(num))
+		error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
+	else
+		error = svm_rdmsr(sc, vcpu, num, &result, retu);
+
+	if (error == 0) {
+		state = svm_get_vmcb_state(sc, vcpu);
+		ctx = svm_get_guest_regctx(sc, vcpu);
+		state->rax = result & 0xffffffff;
+		ctx->sctx_rdx = result >> 32;
+	}
+
+	return (error);
+}
+
+#ifdef KTR
+static const char *
+exit_reason_to_str(uint64_t reason)
+{
+	static char reasonbuf[32];
+
+	switch (reason) {
+	case VMCB_EXIT_INVALID:
+		return ("invalvmcb");
+	case VMCB_EXIT_SHUTDOWN:
+		return ("shutdown");
+	case VMCB_EXIT_NPF:
+		return ("nptfault");
+	case VMCB_EXIT_PAUSE:
+		return ("pause");
+	case VMCB_EXIT_HLT:
+		return ("hlt");
+	case VMCB_EXIT_CPUID:
+		return ("cpuid");
+	case VMCB_EXIT_IO:
+		return ("inout");
+	case VMCB_EXIT_MC:
+		return ("mchk");
+	case VMCB_EXIT_INTR:
+		return ("extintr");
+	case VMCB_EXIT_NMI:
+		return ("nmi");
+	case VMCB_EXIT_VINTR:
+		return ("vintr");
+	case VMCB_EXIT_MSR:
+		return ("msr");
+	case VMCB_EXIT_IRET:
+		return ("iret");
+	case VMCB_EXIT_MONITOR:
+		return ("monitor");
+	case VMCB_EXIT_MWAIT:
+		return ("mwait");
+	default:
+		snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
+		return (reasonbuf);
+	}
+}
+#endif	/* KTR */
+
+/*
+ * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
+ * that are due to instruction intercepts as well as MSR and IOIO intercepts
+ * and exceptions caused by INT3, INTO and BOUND instructions.
+ *
+ * Return 1 if the nRIP is valid and 0 otherwise.
+ */
+static int
+nrip_valid(uint64_t exitcode)
+{
+	switch (exitcode) {
+	case 0x00 ... 0x0F:	/* read of CR0 through CR15 */
+	case 0x10 ... 0x1F:	/* write of CR0 through CR15 */
+	case 0x20 ... 0x2F:	/* read of DR0 through DR15 */
+	case 0x30 ... 0x3F:	/* write of DR0 through DR15 */
+	case 0x43:		/* INT3 */
+	case 0x44:		/* INTO */
+	case 0x45:		/* BOUND */
+	case 0x65 ... 0x7C:	/* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
+	case 0x80 ... 0x8D:	/* VMEXIT_VMRUN ... VMEXIT_XSETBV */
+		return (1);
+	default:
+		return (0);
+	}
+}
+
+static int
+svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
+{
+	struct vmcb *vmcb;
+	struct vmcb_state *state;
+	struct vmcb_ctrl *ctrl;
+	struct svm_regctx *ctx;
+	uint64_t code, info1, info2, val;
+	uint32_t eax, ecx, edx;
+	int error, errcode_valid, handled, idtvec, reflect;
+	bool retu;
+
+	ctx = svm_get_guest_regctx(svm_sc, vcpu);
+	vmcb = svm_get_vmcb(svm_sc, vcpu);
+	state = &vmcb->state;
+	ctrl = &vmcb->ctrl;
+
+	handled = 0;
+	code = ctrl->exitcode;
+	info1 = ctrl->exitinfo1;
+	info2 = ctrl->exitinfo2;
+
+	vmexit->exitcode = VM_EXITCODE_BOGUS;
+	vmexit->rip = state->rip;
+	vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
+
+	vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
+
+	/*
+	 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
+	 * in an inconsistent state and can trigger assertions that would
+	 * never happen otherwise.
+	 */
+	if (code == VMCB_EXIT_INVALID) {
+		vm_exit_svm(vmexit, code, info1, info2);
+		return (0);
+	}
+
+	KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
+	    "injection valid bit is set %#lx", __func__, ctrl->eventinj));
+
+	KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
+	    ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
+	    vmexit->inst_length, code, info1, info2));
+
+	svm_update_virqinfo(svm_sc, vcpu);
+	svm_save_intinfo(svm_sc, vcpu);
+
+	switch (code) {
+	case VMCB_EXIT_IRET:
+		/*
+		 * Restart execution at "iret" but with the intercept cleared.
+		 */
+		vmexit->inst_length = 0;
+		clear_nmi_blocking(svm_sc, vcpu);
+		handled = 1;
+		break;
+	case VMCB_EXIT_VINTR:	/* interrupt window exiting */
+		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
+		handled = 1;
+		break;
+	case VMCB_EXIT_INTR:	/* external interrupt */
+		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
+		handled = 1;
+		break;
+	case VMCB_EXIT_NMI:	/* external NMI */
+		handled = 1;
+		break;
+	case 0x40 ... 0x5F:
+		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
+		reflect = 1;
+		idtvec = code - 0x40;
+		switch (idtvec) {
+		case IDT_MC:
+			/*
+			 * Call the machine check handler by hand. Also don't
+			 * reflect the machine check back into the guest.
+			 */
+			reflect = 0;
+			VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
+			__asm __volatile("int $18");
+			break;
+		case IDT_PF:
+			error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
+			    info2);
+			KASSERT(error == 0, ("%s: error %d updating cr2",
+			    __func__, error));
+			/* fallthru */
+		case IDT_NP:
+		case IDT_SS:
+		case IDT_GP:
+		case IDT_AC:
+		case IDT_TS:
+			errcode_valid = 1;
+			break;
+
+		case IDT_DF:
+			errcode_valid = 1;
+			info1 = 0;
+			break;
+
+		case IDT_BP:
+		case IDT_OF:
+		case IDT_BR:
+			/*
+			 * The 'nrip' field is populated for INT3, INTO and
+			 * BOUND exceptions and this also implies that
+			 * 'inst_length' is non-zero.
+			 *
+			 * Reset 'inst_length' to zero so the guest %rip at
+			 * event injection is identical to what it was when
+			 * the exception originally happened.
+			 */
+			VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d "
+			    "to zero before injecting exception %d",
+			    vmexit->inst_length, idtvec);
+			vmexit->inst_length = 0;
+			/* fallthru */
+		default:
+			errcode_valid = 0;
+			info1 = 0;
+			break;
+		}
+		KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) "
+		    "when reflecting exception %d into guest",
+		    vmexit->inst_length, idtvec));
+
+		if (reflect) {
+			/* Reflect the exception back into the guest */
+			VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception "
+			    "%d/%#x into the guest", idtvec, (int)info1);
+			error = vm_inject_exception(svm_sc->vm, vcpu, idtvec,
+			    errcode_valid, info1, 0);
+			KASSERT(error == 0, ("%s: vm_inject_exception error %d",
+			    __func__, error));
+		}
+		handled = 1;
+		break;
+	case VMCB_EXIT_MSR:	/* MSR access. */
+		eax = state->rax;
+		ecx = ctx->sctx_rcx;
+		edx = ctx->sctx_rdx;
+		retu = false;	
+
+		if (info1) {
+			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
+			val = (uint64_t)edx << 32 | eax;
+			VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
+			    ecx, val);
+			if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
+				vmexit->exitcode = VM_EXITCODE_WRMSR;
+				vmexit->u.msr.code = ecx;
+				vmexit->u.msr.wval = val;
+			} else if (!retu) {
+				handled = 1;
+			} else {
+				KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
+				    ("emulate_wrmsr retu with bogus exitcode"));
+			}
+		} else {
+			VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
+			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
+			if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
+				vmexit->exitcode = VM_EXITCODE_RDMSR;
+				vmexit->u.msr.code = ecx;
+			} else if (!retu) {
+				handled = 1;
+			} else {
+				KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
+				    ("emulate_rdmsr retu with bogus exitcode"));
+			}
+		}
+		break;
+	case VMCB_EXIT_IO:
+		handled = svm_handle_io(svm_sc, vcpu, vmexit);
+		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
+		break;
+	case VMCB_EXIT_CPUID:
+		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
+		handled = x86_emulate_cpuid(svm_sc->vm, vcpu,
+		    (uint32_t *)&state->rax,
+		    (uint32_t *)&ctx->sctx_rbx,
+		    (uint32_t *)&ctx->sctx_rcx,
+		    (uint32_t *)&ctx->sctx_rdx);
+		break;
+	case VMCB_EXIT_HLT:
+		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
+		vmexit->exitcode = VM_EXITCODE_HLT;
+		vmexit->u.hlt.rflags = state->rflags;
+		break;
+	case VMCB_EXIT_PAUSE:
+		vmexit->exitcode = VM_EXITCODE_PAUSE;
+		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
+		break;
+	case VMCB_EXIT_NPF:
+		/* EXITINFO2 contains the faulting guest physical address */
+		if (info1 & VMCB_NPF_INFO1_RSV) {
+			VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
+			    "reserved bits set: info1(%#lx) info2(%#lx)",
+			    info1, info2);
+		} else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
+			vmexit->exitcode = VM_EXITCODE_PAGING;
+			vmexit->u.paging.gpa = info2;
+			vmexit->u.paging.fault_type = npf_fault_type(info1);
+			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
+			VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
+			    "on gpa %#lx/%#lx at rip %#lx",
+			    info2, info1, state->rip);
+		} else if (svm_npf_emul_fault(info1)) {
+			svm_handle_inst_emul(vmcb, info2, vmexit);
+			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
+			VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "
+			    "for gpa %#lx/%#lx at rip %#lx",
+			    info2, info1, state->rip);
+		}
+		break;
+	case VMCB_EXIT_MONITOR:
+		vmexit->exitcode = VM_EXITCODE_MONITOR;
+		break;
+	case VMCB_EXIT_MWAIT:
+		vmexit->exitcode = VM_EXITCODE_MWAIT;
+		break;
+	default:
+		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
+		break;
+	}	
+
+	VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d",
+	    handled ? "handled" : "unhandled", exit_reason_to_str(code),
+	    vmexit->rip, vmexit->inst_length);
+
+	if (handled) {
+		vmexit->rip += vmexit->inst_length;
+		vmexit->inst_length = 0;
+		state->rip = vmexit->rip;
+	} else {
+		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
+			/*
+			 * If this VM exit was not claimed by anybody then
+			 * treat it as a generic SVM exit.
+			 */
+			vm_exit_svm(vmexit, code, info1, info2);
+		} else {
+			/*
+			 * The exitcode and collateral have been populated.
+			 * The VM exit will be processed further in userland.
+			 */
+		}
+	}
+	return (handled);
+}
+
+static void
+svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
+{
+	uint64_t intinfo;
+
+	if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo))
+		return;
+
+	KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
+	    "valid: %#lx", __func__, intinfo));
+
+	svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
+		VMCB_EXITINTINFO_VECTOR(intinfo),
+		VMCB_EXITINTINFO_EC(intinfo),
+		VMCB_EXITINTINFO_EC_VALID(intinfo));
+	vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
+	VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo);
+}
+
+/*
+ * Inject event to virtual cpu.
+ */
+static void
+svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
+{
+	struct vmcb_ctrl *ctrl;
+	struct vmcb_state *state;
+	struct svm_vcpu *vcpustate;
+	uint8_t v_tpr;
+	int vector, need_intr_window;
+	int extint_pending;
+
+	state = svm_get_vmcb_state(sc, vcpu);
+	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
+	vcpustate = svm_get_vcpu(sc, vcpu);
+
+	need_intr_window = 0;
+
+	if (vcpustate->nextrip != state->rip) {
+		ctrl->intr_shadow = 0;
+		VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking "
+		    "cleared due to rip change: %#lx/%#lx",
+		    vcpustate->nextrip, state->rip);
+	}
+
+	/*
+	 * Inject pending events or exceptions for this vcpu.
+	 *
+	 * An event might be pending because the previous #VMEXIT happened
+	 * during event delivery (i.e. ctrl->exitintinfo).
+	 *
+	 * An event might also be pending because an exception was injected
+	 * by the hypervisor (e.g. #PF during instruction emulation).
+	 */
+	svm_inj_intinfo(sc, vcpu);
+
+	/* NMI event has priority over interrupts. */
+	if (vm_nmi_pending(sc->vm, vcpu)) {
+		if (nmi_blocked(sc, vcpu)) {
+			/*
+			 * Can't inject another NMI if the guest has not
+			 * yet executed an "iret" after the last NMI.
+			 */
+			VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due "
+			    "to NMI-blocking");
+		} else if (ctrl->intr_shadow) {
+			/*
+			 * Can't inject an NMI if the vcpu is in an intr_shadow.
+			 */
+			VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to "
+			    "interrupt shadow");
+			need_intr_window = 1;
+			goto done;
+		} else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
+			/*
+			 * If there is already an exception/interrupt pending
+			 * then defer the NMI until after that.
+			 */
+			VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to "
+			    "eventinj %#lx", ctrl->eventinj);
+
+			/*
+			 * Use self-IPI to trigger a VM-exit as soon as
+			 * possible after the event injection is completed.
+			 *
+			 * This works only if the external interrupt exiting
+			 * is at a lower priority than the event injection.
+			 *
+			 * Although not explicitly specified in APMv2 the
+			 * relative priorities were verified empirically.
+			 */
+			ipi_cpu(curcpu, IPI_AST);	/* XXX vmm_ipinum? */
+		} else {
+			vm_nmi_clear(sc->vm, vcpu);
+
+			/* Inject NMI, vector number is not used */
+			svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
+			    IDT_NMI, 0, false);
+
+			/* virtual NMI blocking is now in effect */
+			enable_nmi_blocking(sc, vcpu);
+
+			VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI");
+		}
+	}
+
+	extint_pending = vm_extint_pending(sc->vm, vcpu);
+	if (!extint_pending) {
+		if (!vlapic_pending_intr(vlapic, &vector))
+			goto done;
+		KASSERT(vector >= 16 && vector <= 255,
+		    ("invalid vector %d from local APIC", vector));
+	} else {
+		/* Ask the legacy pic for a vector to inject */
+		vatpic_pending_intr(sc->vm, &vector);
+		KASSERT(vector >= 0 && vector <= 255,
+		    ("invalid vector %d from INTR", vector));
+	}
+
+	/*
+	 * If the guest has disabled interrupts or is in an interrupt shadow
+	 * then we cannot inject the pending interrupt.
+	 */
+	if ((state->rflags & PSL_I) == 0) {
+		VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
+		    "rflags %#lx", vector, state->rflags);
+		need_intr_window = 1;
+		goto done;
+	}
+
+	if (ctrl->intr_shadow) {
+		VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to "
+		    "interrupt shadow", vector);
+		need_intr_window = 1;
+		goto done;
+	}
+
+	if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
+		VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
+		    "eventinj %#lx", vector, ctrl->eventinj);
+		need_intr_window = 1;
+		goto done;
+	}
+
+	svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
+
+	if (!extint_pending) {
+		vlapic_intr_accepted(vlapic, vector);
+	} else {
+		vm_extint_clear(sc->vm, vcpu);
+		vatpic_intr_accepted(sc->vm, vector);
+	}
+
+	/*
+	 * Force a VM-exit as soon as the vcpu is ready to accept another
+	 * interrupt. This is done because the PIC might have another vector
+	 * that it wants to inject. Also, if the APIC has a pending interrupt
+	 * that was preempted by the ExtInt then it allows us to inject the
+	 * APIC vector as soon as possible.
+	 */
+	need_intr_window = 1;
+done:
+	/*
+	 * The guest can modify the TPR by writing to %CR8. In guest mode
+	 * the processor reflects this write to V_TPR without hypervisor
+	 * intervention.
+	 *
+	 * The guest can also modify the TPR by writing to it via the memory
+	 * mapped APIC page. In this case, the write will be emulated by the
+	 * hypervisor. For this reason V_TPR must be updated before every
+	 * VMRUN.
+	 */
+	v_tpr = vlapic_get_cr8(vlapic);
+	KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr));
+	if (ctrl->v_tpr != v_tpr) {
+		VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
+		    ctrl->v_tpr, v_tpr);
+		ctrl->v_tpr = v_tpr;
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+	}
+
+	if (need_intr_window) {
+		/*
+		 * We use V_IRQ in conjunction with the VINTR intercept to
+		 * trap into the hypervisor as soon as a virtual interrupt
+		 * can be delivered.
+		 *
+		 * Since injected events are not subject to intercept checks
+		 * we need to ensure that the V_IRQ is not actually going to
+		 * be delivered on VM entry. The KASSERT below enforces this.
+		 */
+		KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
+		    (state->rflags & PSL_I) == 0 || ctrl->intr_shadow,
+		    ("Bogus intr_window_exiting: eventinj (%#lx), "
+		    "intr_shadow (%u), rflags (%#lx)",
+		    ctrl->eventinj, ctrl->intr_shadow, state->rflags));
+		enable_intr_window_exiting(sc, vcpu);
+	} else {
+		disable_intr_window_exiting(sc, vcpu);
+	}
+}
+
+static __inline void
+restore_host_tss(void)
+{
+	struct system_segment_descriptor *tss_sd;
+
+	/*
+	 * The TSS descriptor was in use prior to launching the guest so it
+	 * has been marked busy.
+	 *
+	 * 'ltr' requires the descriptor to be marked available so change the
+	 * type to "64-bit available TSS".
+	 */
+	tss_sd = PCPU_GET(tss);
+	tss_sd->sd_type = SDT_SYSTSS;
+	ltr(GSEL(GPROC0_SEL, SEL_KPL));
+}
+
+static void
+check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
+{
+	struct svm_vcpu *vcpustate;
+	struct vmcb_ctrl *ctrl;
+	long eptgen;
+	bool alloc_asid;
+
+	KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not "
+	    "active on cpu %u", __func__, thiscpu));
+
+	vcpustate = svm_get_vcpu(sc, vcpuid);
+	ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
+
+	/*
+	 * The TLB entries associated with the vcpu's ASID are not valid
+	 * if either of the following conditions is true:
+	 *
+	 * 1. The vcpu's ASID generation is different than the host cpu's
+	 *    ASID generation. This happens when the vcpu migrates to a new
+	 *    host cpu. It can also happen when the number of vcpus executing
+	 *    on a host cpu is greater than the number of ASIDs available.
+	 *
+	 * 2. The pmap generation number is different than the value cached in
+	 *    the 'vcpustate'. This happens when the host invalidates pages
+	 *    belonging to the guest.
+	 *
+	 *	asidgen		eptgen	      Action
+	 *	mismatch	mismatch
+	 *	   0		   0		(a)
+	 *	   0		   1		(b1) or (b2)
+	 *	   1		   0		(c)
+	 *	   1		   1		(d)
+	 *
+	 * (a) There is no mismatch in eptgen or ASID generation and therefore
+	 *     no further action is needed.
+	 *
+	 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
+	 *      retained and the TLB entries associated with this ASID
+	 *      are flushed by VMRUN.
+	 *
+	 * (b2) If the cpu does not support FlushByAsid then a new ASID is
+	 *      allocated.
+	 *
+	 * (c) A new ASID is allocated.
+	 *
+	 * (d) A new ASID is allocated.
+	 */
+
+	alloc_asid = false;
+	eptgen = pmap->pm_eptgen;
+	ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
+
+	if (vcpustate->asid.gen != asid[thiscpu].gen) {
+		alloc_asid = true;	/* (c) and (d) */
+	} else if (vcpustate->eptgen != eptgen) {
+		if (flush_by_asid())
+			ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;	/* (b1) */
+		else
+			alloc_asid = true;			/* (b2) */
+	} else {
+		/*
+		 * This is the common case (a).
+		 */
+		KASSERT(!alloc_asid, ("ASID allocation not necessary"));
+		KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING,
+		    ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl));
+	}
+
+	if (alloc_asid) {
+		if (++asid[thiscpu].num >= nasid) {
+			asid[thiscpu].num = 1;
+			if (++asid[thiscpu].gen == 0)
+				asid[thiscpu].gen = 1;
+			/*
+			 * If this cpu does not support "flush-by-asid"
+			 * then flush the entire TLB on a generation
+			 * bump. Subsequent ASID allocation in this
+			 * generation can be done without a TLB flush.
+			 */
+			if (!flush_by_asid())
+				ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
+		}
+		vcpustate->asid.gen = asid[thiscpu].gen;
+		vcpustate->asid.num = asid[thiscpu].num;
+
+		ctrl->asid = vcpustate->asid.num;
+		svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
+		/*
+		 * If this cpu supports "flush-by-asid" then the TLB
+		 * was not flushed after the generation bump. The TLB
+		 * is flushed selectively after every new ASID allocation.
+		 */
+		if (flush_by_asid())
+			ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
+	}
+	vcpustate->eptgen = eptgen;
+
+	KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
+	KASSERT(ctrl->asid == vcpustate->asid.num,
+	    ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
+}
+
+static __inline void
+disable_gintr(void)
+{
+
+	__asm __volatile("clgi");
+}
+
+static __inline void
+enable_gintr(void)
+{
+
+        __asm __volatile("stgi");
+}
+
+/*
+ * Start vcpu with specified RIP.
+ */
+static int
+svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 
+	struct vm_eventinfo *evinfo)
+{
+	struct svm_regctx *gctx;
+	struct svm_softc *svm_sc;
+	struct svm_vcpu *vcpustate;
+	struct vmcb_state *state;
+	struct vmcb_ctrl *ctrl;
+	struct vm_exit *vmexit;
+	struct vlapic *vlapic;
+	struct vm *vm;
+	uint64_t vmcb_pa;
+	int handled;
+
+	svm_sc = arg;
+	vm = svm_sc->vm;
+
+	vcpustate = svm_get_vcpu(svm_sc, vcpu);
+	state = svm_get_vmcb_state(svm_sc, vcpu);
+	ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
+	vmexit = vm_exitinfo(vm, vcpu);
+	vlapic = vm_lapic(vm, vcpu);
+
+	gctx = svm_get_guest_regctx(svm_sc, vcpu);
+	vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
+
+	if (vcpustate->lastcpu != curcpu) {
+		/*
+		 * Force new ASID allocation by invalidating the generation.
+		 */
+		vcpustate->asid.gen = 0;
+
+		/*
+		 * Invalidate the VMCB state cache by marking all fields dirty.
+		 */
+		svm_set_dirty(svm_sc, vcpu, 0xffffffff);
+
+		/*
+		 * XXX
+		 * Setting 'vcpustate->lastcpu' here is bit premature because
+		 * we may return from this function without actually executing
+		 * the VMRUN  instruction. This could happen if a rendezvous
+		 * or an AST is pending on the first time through the loop.
+		 *
+		 * This works for now but any new side-effects of vcpu
+		 * migration should take this case into account.
+		 */
+		vcpustate->lastcpu = curcpu;
+		vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
+	}
+
+	svm_msr_guest_enter(svm_sc, vcpu);
+
+	/* Update Guest RIP */
+	state->rip = rip;
+
+	do {
+		/*
+		 * Disable global interrupts to guarantee atomicity during
+		 * loading of guest state. This includes not only the state
+		 * loaded by the "vmrun" instruction but also software state
+		 * maintained by the hypervisor: suspended and rendezvous
+		 * state, NPT generation number, vlapic interrupts etc.
+		 */
+		disable_gintr();
+
+		if (vcpu_suspended(evinfo)) {
+			enable_gintr();
+			vm_exit_suspended(vm, vcpu, state->rip);
+			break;
+		}
+
+		if (vcpu_rendezvous_pending(evinfo)) {
+			enable_gintr();
+			vm_exit_rendezvous(vm, vcpu, state->rip);
+			break;
+		}
+
+		if (vcpu_reqidle(evinfo)) {
+			enable_gintr();
+			vm_exit_reqidle(vm, vcpu, state->rip);
+			break;
+		}
+
+		/* We are asked to give the cpu by scheduler. */
+		if (vcpu_should_yield(vm, vcpu)) {
+			enable_gintr();
+			vm_exit_astpending(vm, vcpu, state->rip);
+			break;
+		}
+
+		svm_inj_interrupts(svm_sc, vcpu, vlapic);
+
+		/* Activate the nested pmap on 'curcpu' */
+		CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active);
+
+		/*
+		 * Check the pmap generation and the ASID generation to
+		 * ensure that the vcpu does not use stale TLB mappings.
+		 */
+		check_asid(svm_sc, vcpu, pmap, curcpu);
+
+		ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
+		vcpustate->dirty = 0;
+		VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
+
+		/* Launch Virtual Machine. */
+		VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
+		svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]);
+
+		CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
+
+		/*
+		 * The host GDTR and IDTR is saved by VMRUN and restored
+		 * automatically on #VMEXIT. However, the host TSS needs
+		 * to be restored explicitly.
+		 */
+		restore_host_tss();
+
+		/* #VMEXIT disables interrupts so re-enable them here. */ 
+		enable_gintr();
+
+		/* Update 'nextrip' */
+		vcpustate->nextrip = state->rip;
+
+		/* Handle #VMEXIT and if required return to user space. */
+		handled = svm_vmexit(svm_sc, vcpu, vmexit);
+	} while (handled);
+
+	svm_msr_guest_exit(svm_sc, vcpu);
+
+	return (0);
+}
+
+static void
+svm_vmcleanup(void *arg)
+{
+	struct svm_softc *sc = arg;
+
+	contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM);
+	contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM);
+	free(sc, M_SVM);
+}
+
+static register_t *
+swctx_regptr(struct svm_regctx *regctx, int reg)
+{
+
+	switch (reg) {
+	case VM_REG_GUEST_RBX:
+		return (&regctx->sctx_rbx);
+	case VM_REG_GUEST_RCX:
+		return (&regctx->sctx_rcx);
+	case VM_REG_GUEST_RDX:
+		return (&regctx->sctx_rdx);
+	case VM_REG_GUEST_RDI:
+		return (&regctx->sctx_rdi);
+	case VM_REG_GUEST_RSI:
+		return (&regctx->sctx_rsi);
+	case VM_REG_GUEST_RBP:
+		return (&regctx->sctx_rbp);
+	case VM_REG_GUEST_R8:
+		return (&regctx->sctx_r8);
+	case VM_REG_GUEST_R9:
+		return (&regctx->sctx_r9);
+	case VM_REG_GUEST_R10:
+		return (&regctx->sctx_r10);
+	case VM_REG_GUEST_R11:
+		return (&regctx->sctx_r11);
+	case VM_REG_GUEST_R12:
+		return (&regctx->sctx_r12);
+	case VM_REG_GUEST_R13:
+		return (&regctx->sctx_r13);
+	case VM_REG_GUEST_R14:
+		return (&regctx->sctx_r14);
+	case VM_REG_GUEST_R15:
+		return (&regctx->sctx_r15);
+	default:
+		return (NULL);
+	}
+}
+
+static int
+svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
+{
+	struct svm_softc *svm_sc;
+	register_t *reg;
+
+	svm_sc = arg;
+
+	if (ident == VM_REG_GUEST_INTR_SHADOW) {
+		return (svm_get_intr_shadow(svm_sc, vcpu, val));
+	}
+
+	if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
+		return (0);
+	}
+
+	reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
+
+	if (reg != NULL) {
+		*val = *reg;
+		return (0);
+	}
+
+	VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident);
+	return (EINVAL);
+}
+
+static int
+svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
+{
+	struct svm_softc *svm_sc;
+	register_t *reg;
+
+	svm_sc = arg;
+
+	if (ident == VM_REG_GUEST_INTR_SHADOW) {
+		return (svm_modify_intr_shadow(svm_sc, vcpu, val));
+	}
+
+	if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
+		return (0);
+	}
+
+	reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
+
+	if (reg != NULL) {
+		*reg = val;
+		return (0);
+	}
+
+	/*
+	 * XXX deal with CR3 and invalidate TLB entries tagged with the
+	 * vcpu's ASID. This needs to be treated differently depending on
+	 * whether 'running' is true/false.
+	 */
+
+	VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident);
+	return (EINVAL);
+}
+
+static int
+svm_setcap(void *arg, int vcpu, int type, int val)
+{
+	struct svm_softc *sc;
+	int error;
+
+	sc = arg;
+	error = 0;
+	switch (type) {
+	case VM_CAP_HALT_EXIT:
+		svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+		    VMCB_INTCPT_HLT, val);
+		break;
+	case VM_CAP_PAUSE_EXIT:
+		svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+		    VMCB_INTCPT_PAUSE, val);
+		break;
+	case VM_CAP_UNRESTRICTED_GUEST:
+		/* Unrestricted guest execution cannot be disabled in SVM */
+		if (val == 0)
+			error = EINVAL;
+		break;
+	default:
+		error = ENOENT;
+		break;
+	}
+	return (error);
+}
+
+static int
+svm_getcap(void *arg, int vcpu, int type, int *retval)
+{
+	struct svm_softc *sc;
+	int error;
+
+	sc = arg;
+	error = 0;
+
+	switch (type) {
+	case VM_CAP_HALT_EXIT:
+		*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+		    VMCB_INTCPT_HLT);
+		break;
+	case VM_CAP_PAUSE_EXIT:
+		*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+		    VMCB_INTCPT_PAUSE);
+		break;
+	case VM_CAP_UNRESTRICTED_GUEST:
+		*retval = 1;	/* unrestricted guest is always enabled */
+		break;
+	default:
+		error = ENOENT;
+		break;
+	}
+	return (error);
+}
+
+static struct vlapic *
+svm_vlapic_init(void *arg, int vcpuid)
+{
+	struct svm_softc *svm_sc;
+	struct vlapic *vlapic;
+
+	svm_sc = arg;
+	vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
+	vlapic->vm = svm_sc->vm;
+	vlapic->vcpuid = vcpuid;
+	vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
+
+	vlapic_init(vlapic);
+
+	return (vlapic);
+}
+
+static void
+svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
+{
+
+        vlapic_cleanup(vlapic);
+        free(vlapic, M_SVM_VLAPIC);
+}
+
+struct vmm_ops vmm_ops_amd = {
+	svm_init,
+	svm_cleanup,
+	svm_restore,
+	svm_vminit,
+	svm_vmrun,
+	svm_vmcleanup,
+	svm_getreg,
+	svm_setreg,
+	vmcb_getdesc,
+	vmcb_setdesc,
+	svm_getcap,
+	svm_setcap,
+	svm_npt_alloc,
+	svm_npt_free,
+	svm_vlapic_init,
+	svm_vlapic_cleanup	
+};


Property changes on: trunk/sys/amd64/vmm/amd/svm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/svm.h
===================================================================
--- trunk/sys/amd64/vmm/amd/svm.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/svm.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,57 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/amd/svm.h 285015 2015-07-01 19:46:57Z neel $
+ */
+
+#ifndef _SVM_H_
+#define _SVM_H_
+
+struct pcpu;
+
+/*
+ * Guest register state that is saved outside the VMCB.
+ */
+struct svm_regctx {
+	register_t	sctx_rbp;
+	register_t	sctx_rbx;
+	register_t	sctx_rcx;
+	register_t	sctx_rdx;
+	register_t	sctx_rdi;
+	register_t	sctx_rsi;
+	register_t	sctx_r8;
+	register_t	sctx_r9;
+	register_t	sctx_r10;
+	register_t	sctx_r11;
+	register_t	sctx_r12;
+	register_t	sctx_r13;
+	register_t	sctx_r14;
+	register_t	sctx_r15;
+};
+
+void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu);
+
+#endif /* _SVM_H_ */


Property changes on: trunk/sys/amd64/vmm/amd/svm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/svm_genassym.c
===================================================================
--- trunk/sys/amd64/vmm/amd/svm_genassym.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/svm_genassym.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,51 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm_genassym.c 285015 2015-07-01 19:46:57Z neel $");
+
+#include <sys/param.h>
+#include <sys/assym.h>
+#include <x86/specialreg.h>
+
+#include "svm.h"
+
+ASSYM(SCTX_RBX, offsetof(struct svm_regctx, sctx_rbx));
+ASSYM(SCTX_RCX, offsetof(struct svm_regctx, sctx_rcx));
+ASSYM(SCTX_RBP, offsetof(struct svm_regctx, sctx_rbp));
+ASSYM(SCTX_RDX, offsetof(struct svm_regctx, sctx_rdx));
+ASSYM(SCTX_RDI, offsetof(struct svm_regctx, sctx_rdi));
+ASSYM(SCTX_RSI, offsetof(struct svm_regctx, sctx_rsi));
+ASSYM(SCTX_R8,  offsetof(struct svm_regctx, sctx_r8));
+ASSYM(SCTX_R9,  offsetof(struct svm_regctx, sctx_r9));
+ASSYM(SCTX_R10, offsetof(struct svm_regctx, sctx_r10));
+ASSYM(SCTX_R11, offsetof(struct svm_regctx, sctx_r11));
+ASSYM(SCTX_R12, offsetof(struct svm_regctx, sctx_r12));
+ASSYM(SCTX_R13, offsetof(struct svm_regctx, sctx_r13));
+ASSYM(SCTX_R14, offsetof(struct svm_regctx, sctx_r14));
+ASSYM(SCTX_R15, offsetof(struct svm_regctx, sctx_r15));
+ASSYM(MSR_GSBASE, MSR_GSBASE);


Property changes on: trunk/sys/amd64/vmm/amd/svm_genassym.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/svm_msr.c
===================================================================
--- trunk/sys/amd64/vmm/amd/svm_msr.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/svm_msr.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,166 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014, Neel Natu (neel at freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm_msr.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/systm.h>
+
+#include <machine/cpufunc.h>
+#include <machine/specialreg.h>
+#include <machine/vmm.h>
+
+#include "svm.h"
+#include "vmcb.h"
+#include "svm_softc.h"
+#include "svm_msr.h"
+
+#ifndef MSR_AMDK8_IPM
+#define	MSR_AMDK8_IPM	0xc0010055
+#endif
+
+enum {
+	IDX_MSR_LSTAR,
+	IDX_MSR_CSTAR,
+	IDX_MSR_STAR,
+	IDX_MSR_SF_MASK,
+	HOST_MSR_NUM		/* must be the last enumeration */
+};
+
+static uint64_t host_msrs[HOST_MSR_NUM];
+
+void
+svm_msr_init(void)
+{
+	/* 
+	 * It is safe to cache the values of the following MSRs because they
+	 * don't change based on curcpu, curproc or curthread.
+	 */
+	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
+	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
+	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
+	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
+}
+
+void
+svm_msr_guest_init(struct svm_softc *sc, int vcpu)
+{
+	/*
+	 * All the MSRs accessible to the guest are either saved/restored by
+	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
+	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
+	 *
+	 * There are no guest MSRs that are saved/restored "by hand" so nothing
+	 * more to do here.
+	 */
+	return;
+}
+
+void
+svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
+{
+	/*
+	 * Save host MSRs (if any) and restore guest MSRs (if any).
+	 */
+}
+
+void
+svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
+{
+	/*
+	 * Save guest MSRs (if any) and restore host MSRs.
+	 */
+	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
+	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
+	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
+	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
+
+	/* MSR_KGSBASE will be restored on the way back to userspace */
+}
+
+int
+svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
+    bool *retu)
+{
+	int error = 0;
+
+	switch (num) {
+	case MSR_MCG_CAP:
+	case MSR_MCG_STATUS:
+		*result = 0;
+		break;
+	case MSR_MTRRcap:
+	case MSR_MTRRdefType:
+	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
+	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
+	case MSR_MTRR64kBase:
+	case MSR_SYSCFG:
+		*result = 0;
+		break;
+	case MSR_AMDK8_IPM:
+		*result = 0;
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+
+	return (error);
+}
+
+int
+svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
+{
+	int error = 0;
+
+	switch (num) {
+	case MSR_MCG_CAP:
+	case MSR_MCG_STATUS:
+		break;		/* ignore writes */
+	case MSR_MTRRcap:
+		vm_inject_gp(sc->vm, vcpu);
+		break;
+	case MSR_MTRRdefType:
+	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
+	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
+	case MSR_MTRR64kBase:
+	case MSR_SYSCFG:
+		break;		/* Ignore writes */
+	case MSR_AMDK8_IPM:
+		/*
+		 * Ignore writes to the "Interrupt Pending Message" MSR.
+		 */
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+
+	return (error);
+}


Property changes on: trunk/sys/amd64/vmm/amd/svm_msr.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/svm_msr.h
===================================================================
--- trunk/sys/amd64/vmm/amd/svm_msr.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/svm_msr.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,45 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Neel Natu (neel at freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/amd/svm_msr.h 276403 2014-12-30 08:24:14Z neel $
+ */
+
+#ifndef _SVM_MSR_H_
+#define	_SVM_MSR_H_
+
+struct svm_softc;
+
+void svm_msr_init(void);
+void svm_msr_guest_init(struct svm_softc *sc, int vcpu);
+void svm_msr_guest_enter(struct svm_softc *sc, int vcpu);
+void svm_msr_guest_exit(struct svm_softc *sc, int vcpu);
+
+int svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
+    bool *retu);
+int svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
+    bool *retu);
+
+#endif	/* _SVM_MSR_H_ */


Property changes on: trunk/sys/amd64/vmm/amd/svm_msr.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/svm_softc.h
===================================================================
--- trunk/sys/amd64/vmm/amd/svm_softc.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/svm_softc.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,115 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/amd/svm_softc.h 328842 2018-02-04 13:54:43Z avg $
+ */
+
+#ifndef _SVM_SOFTC_H_
+#define _SVM_SOFTC_H_
+
+#define SVM_IO_BITMAP_SIZE	(3 * PAGE_SIZE)
+#define SVM_MSR_BITMAP_SIZE	(2 * PAGE_SIZE)
+
+struct asid {
+	uint64_t	gen;	/* range is [1, ~0UL] */
+	uint32_t	num;	/* range is [1, nasid - 1] */
+};
+
+/*
+ * XXX separate out 'struct vmcb' from 'svm_vcpu' to avoid wasting space
+ * due to VMCB alignment requirements.
+ */
+struct svm_vcpu {
+	struct vmcb	vmcb;	 /* hardware saved vcpu context */
+	struct svm_regctx swctx; /* software saved vcpu context */
+	uint64_t	vmcb_pa; /* VMCB physical address */
+	uint64_t	nextrip; /* next instruction to be executed by guest */
+        int		lastcpu; /* host cpu that the vcpu last ran on */
+	uint32_t	dirty;	 /* state cache bits that must be cleared */
+	long		eptgen;	 /* pmap->pm_eptgen when the vcpu last ran */
+	struct asid	asid;
+} __aligned(PAGE_SIZE);
+
+/*
+ * SVM softc, one per virtual machine.
+ */
+struct svm_softc {
+	uint8_t apic_page[VM_MAXCPU][PAGE_SIZE];
+	struct svm_vcpu vcpu[VM_MAXCPU];
+	vm_offset_t 	nptp;			    /* nested page table */
+	uint8_t		*iopm_bitmap;    /* shared by all vcpus */
+	uint8_t		*msr_bitmap;    /* shared by all vcpus */
+	struct vm	*vm;
+};
+
+CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0);
+
+static __inline struct svm_vcpu *
+svm_get_vcpu(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu]));
+}
+
+static __inline struct vmcb *
+svm_get_vmcb(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb));
+}
+
+static __inline struct vmcb_state *
+svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb.state));
+}
+
+static __inline struct vmcb_ctrl *
+svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb.ctrl));
+}
+
+static __inline struct svm_regctx *
+svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].swctx));
+}
+
+static __inline void
+svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
+{
+        struct svm_vcpu *vcpustate;
+
+        vcpustate = svm_get_vcpu(sc, vcpu);
+
+        vcpustate->dirty |= dirtybits;
+}
+
+#endif /* _SVM_SOFTC_H_ */


Property changes on: trunk/sys/amd64/vmm/amd/svm_softc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/svm_support.S
===================================================================
--- trunk/sys/amd64/vmm/amd/svm_support.S	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/svm_support.S	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,162 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013, Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/amd/svm_support.S 330713 2018-03-10 00:44:33Z tychon $
+ */
+#include <machine/asmacros.h>
+
+#include "svm_assym.h"
+
+/*
+ * Be friendly to DTrace FBT's prologue/epilogue pattern matching.
+ *
+ * They are also responsible for saving/restoring the host %rbp across VMRUN.
+ */
+#define	VENTER  push %rbp ; mov %rsp,%rbp
+#define	VLEAVE  pop %rbp
+
+#define	VMLOAD	.byte 0x0f, 0x01, 0xda
+#define	VMRUN	.byte 0x0f, 0x01, 0xd8
+#define	VMSAVE	.byte 0x0f, 0x01, 0xdb
+
+/*
+ * svm_launch(uint64_t vmcb, struct svm_regctx *gctx, struct pcpu *pcpu)
+ * %rdi: physical address of VMCB
+ * %rsi: pointer to guest context
+ * %rdx: pointer to the pcpu data
+ */
+ENTRY(svm_launch)
+	VENTER
+
+	/* save pointer to the pcpu data */
+	push %rdx
+
+	/*
+	 * Host register state saved across a VMRUN.
+	 *
+	 * All "callee saved registers" except:
+	 * %rsp: because it is preserved by the processor across VMRUN.
+	 * %rbp: because it is saved/restored by the function prologue/epilogue.
+	 */
+	push %rbx
+	push %r12
+	push %r13
+	push %r14
+	push %r15
+
+	/* Save the physical address of the VMCB in %rax */
+	movq %rdi, %rax
+
+	push %rsi		/* push guest context pointer on the stack */
+
+	/*
+	 * Restore guest state.
+	 */
+	movq SCTX_R8(%rsi), %r8
+	movq SCTX_R9(%rsi), %r9
+	movq SCTX_R10(%rsi), %r10
+	movq SCTX_R11(%rsi), %r11
+	movq SCTX_R12(%rsi), %r12
+	movq SCTX_R13(%rsi), %r13
+	movq SCTX_R14(%rsi), %r14
+	movq SCTX_R15(%rsi), %r15
+	movq SCTX_RBP(%rsi), %rbp
+	movq SCTX_RBX(%rsi), %rbx
+	movq SCTX_RCX(%rsi), %rcx
+	movq SCTX_RDX(%rsi), %rdx
+	movq SCTX_RDI(%rsi), %rdi
+	movq SCTX_RSI(%rsi), %rsi	/* %rsi must be restored last */
+
+	VMLOAD
+	VMRUN
+	VMSAVE
+
+	pop %rax		/* pop guest context pointer from the stack */
+
+	/*
+	 * Save guest state.
+	 */
+	movq %r8, SCTX_R8(%rax)
+	movq %r9, SCTX_R9(%rax)
+	movq %r10, SCTX_R10(%rax)
+	movq %r11, SCTX_R11(%rax)
+	movq %r12, SCTX_R12(%rax)
+	movq %r13, SCTX_R13(%rax)
+	movq %r14, SCTX_R14(%rax)
+	movq %r15, SCTX_R15(%rax)
+	movq %rbp, SCTX_RBP(%rax)
+	movq %rbx, SCTX_RBX(%rax)
+	movq %rcx, SCTX_RCX(%rax)
+	movq %rdx, SCTX_RDX(%rax)
+	movq %rdi, SCTX_RDI(%rax)
+	movq %rsi, SCTX_RSI(%rax)
+
+	/*
+	 * To prevent malicious branch target predictions from
+	 * affecting the host, overwrite all entries in the RSB upon
+	 * exiting a guest.
+	 */
+	mov $16, %ecx	/* 16 iterations, two calls per loop */
+	mov %rsp, %rax
+0:	call 2f		/* create an RSB entry. */
+1:	pause
+	call 1b		/* capture rogue speculation. */
+2:	call 2f		/* create an RSB entry. */
+1:	pause
+	call 1b		/* capture rogue speculation. */
+2:	sub $1, %ecx
+	jnz 0b
+	mov %rax, %rsp
+
+	/* Restore host state */
+	pop %r15
+	pop %r14
+	pop %r13
+	pop %r12
+	pop %rbx
+
+	/* Restore %GS.base to point to the host's pcpu data */
+	pop %rdx
+	mov %edx, %eax
+	shr $32, %rdx
+	mov $MSR_GSBASE, %rcx
+	wrmsr
+
+	/*
+	 * Clobber the remaining registers with guest contents so they
+	 * can't be misused.
+	 */
+	xor %rbp, %rbp
+	xor %rdi, %rdi
+	xor %rsi, %rsi
+	xor %r8, %r8
+	xor %r9, %r9
+	xor %r10, %r10
+	xor %r11, %r11
+
+	VLEAVE
+	ret
+END(svm_launch)


Property changes on: trunk/sys/amd64/vmm/amd/svm_support.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/vmcb.c
===================================================================
--- trunk/sys/amd64/vmm/amd/vmcb.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/vmcb.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,443 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/vmcb.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/segments.h>
+#include <machine/specialreg.h>
+#include <machine/vmm.h>
+
+#include "vmm_ktr.h"
+
+#include "vmcb.h"
+#include "svm.h"
+#include "svm_softc.h"
+
+/*
+ * The VMCB aka Virtual Machine Control Block is a 4KB aligned page
+ * in memory that describes the virtual machine.
+ *
+ * The VMCB contains:
+ * - instructions or events in the guest to intercept
+ * - control bits that modify execution environment of the guest
+ * - guest processor state (e.g. general purpose registers)
+ */
+
+/*
+ * Return VMCB segment area.
+ */
+static struct vmcb_segment *
+vmcb_segptr(struct vmcb *vmcb, int type)
+{
+	struct vmcb_state *state;
+	struct vmcb_segment *seg;
+
+	state = &vmcb->state;
+
+	switch (type) {
+	case VM_REG_GUEST_CS:
+		seg = &state->cs;
+		break;
+
+	case VM_REG_GUEST_DS:
+		seg = &state->ds;
+		break;
+
+	case VM_REG_GUEST_ES:
+		seg = &state->es;
+		break;
+
+	case VM_REG_GUEST_FS:
+		seg = &state->fs;
+		break;
+
+	case VM_REG_GUEST_GS:
+		seg = &state->gs;
+		break;
+
+	case VM_REG_GUEST_SS:
+		seg = &state->ss;
+		break;
+
+	case VM_REG_GUEST_GDTR:
+		seg = &state->gdt;
+		break;
+
+	case VM_REG_GUEST_IDTR:
+		seg = &state->idt;
+		break;
+
+	case VM_REG_GUEST_LDTR:
+		seg = &state->ldt;
+		break;
+
+	case VM_REG_GUEST_TR:
+		seg = &state->tr;
+		break;
+
+	default:
+		seg = NULL;
+		break;
+	}
+
+	return (seg);
+}
+
+static int
+vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
+	uint64_t *val)
+{
+	struct vmcb *vmcb;
+	int off, bytes;
+	char *ptr;
+
+	vmcb	= svm_get_vmcb(softc, vcpu);
+	off	= VMCB_ACCESS_OFFSET(ident);
+	bytes	= VMCB_ACCESS_BYTES(ident);
+
+	if ((off + bytes) >= sizeof (struct vmcb))
+		return (EINVAL);
+
+	ptr = (char *)vmcb;
+
+	if (!write)
+		*val = 0;
+
+	switch (bytes) {
+	case 8:
+	case 4:
+	case 2:
+		if (write)
+			memcpy(ptr + off, val, bytes);
+		else
+			memcpy(val, ptr + off, bytes);
+		break;
+	default:
+		VCPU_CTR1(softc->vm, vcpu,
+		    "Invalid size %d for VMCB access: %d", bytes);
+		return (EINVAL);
+	}
+
+	/* Invalidate all VMCB state cached by h/w. */
+	if (write)
+		svm_set_dirty(softc, vcpu, 0xffffffff);
+
+	return (0);
+}
+
+/*
+ * Read from segment selector, control and general purpose register of VMCB.
+ */
+int
+vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
+{
+	struct vmcb *vmcb;
+	struct vmcb_state *state;
+	struct vmcb_segment *seg;
+	int err;
+
+	vmcb = svm_get_vmcb(sc, vcpu);
+	state = &vmcb->state;
+	err = 0;
+
+	if (VMCB_ACCESS_OK(ident))
+		return (vmcb_access(sc, vcpu, 0, ident, retval));
+
+	switch (ident) {
+	case VM_REG_GUEST_CR0:
+		*retval = state->cr0;
+		break;
+
+	case VM_REG_GUEST_CR2:
+		*retval = state->cr2;
+		break;
+
+	case VM_REG_GUEST_CR3:
+		*retval = state->cr3;
+		break;
+
+	case VM_REG_GUEST_CR4:
+		*retval = state->cr4;
+		break;
+
+	case VM_REG_GUEST_DR7:
+		*retval = state->dr7;
+		break;
+
+	case VM_REG_GUEST_EFER:
+		*retval = state->efer;
+		break;
+
+	case VM_REG_GUEST_RAX:
+		*retval = state->rax;
+		break;
+
+	case VM_REG_GUEST_RFLAGS:
+		*retval = state->rflags;
+		break;
+
+	case VM_REG_GUEST_RIP:
+		*retval = state->rip;
+		break;
+
+	case VM_REG_GUEST_RSP:
+		*retval = state->rsp;
+		break;
+
+	case VM_REG_GUEST_CS:
+	case VM_REG_GUEST_DS:
+	case VM_REG_GUEST_ES:
+	case VM_REG_GUEST_FS:
+	case VM_REG_GUEST_GS:
+	case VM_REG_GUEST_SS:
+	case VM_REG_GUEST_LDTR:
+	case VM_REG_GUEST_TR:
+		seg = vmcb_segptr(vmcb, ident);
+		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
+		    __func__, ident));
+		*retval = seg->selector;
+		break;
+
+	case VM_REG_GUEST_GDTR:
+	case VM_REG_GUEST_IDTR:
+		/* GDTR and IDTR don't have segment selectors */
+		err = EINVAL;
+		break;
+	default:
+		err =  EINVAL;
+		break;
+	}
+
+	return (err);
+}
+
+/*
+ * Write to segment selector, control and general purpose register of VMCB.
+ */
+int
+vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
+{
+	struct vmcb *vmcb;
+	struct vmcb_state *state;
+	struct vmcb_segment *seg;
+	int err, dirtyseg;
+
+	vmcb = svm_get_vmcb(sc, vcpu);
+	state = &vmcb->state;
+	dirtyseg = 0;
+	err = 0;
+
+	if (VMCB_ACCESS_OK(ident))
+		return (vmcb_access(sc, vcpu, 1, ident, &val));
+
+	switch (ident) {
+	case VM_REG_GUEST_CR0:
+		state->cr0 = val;
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
+		break;
+
+	case VM_REG_GUEST_CR2:
+		state->cr2 = val;
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
+		break;
+
+	case VM_REG_GUEST_CR3:
+		state->cr3 = val;
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
+		break;
+
+	case VM_REG_GUEST_CR4:
+		state->cr4 = val;
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
+		break;
+
+	case VM_REG_GUEST_DR7:
+		state->dr7 = val;
+		break;
+
+	case VM_REG_GUEST_EFER:
+		/* EFER_SVM must always be set when the guest is executing */
+		state->efer = val | EFER_SVM;
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
+		break;
+
+	case VM_REG_GUEST_RAX:
+		state->rax = val;
+		break;
+
+	case VM_REG_GUEST_RFLAGS:
+		state->rflags = val;
+		break;
+
+	case VM_REG_GUEST_RIP:
+		state->rip = val;
+		break;
+
+	case VM_REG_GUEST_RSP:
+		state->rsp = val;
+		break;
+
+	case VM_REG_GUEST_CS:
+	case VM_REG_GUEST_DS:
+	case VM_REG_GUEST_ES:
+	case VM_REG_GUEST_SS:
+		dirtyseg = 1;		/* FALLTHROUGH */
+	case VM_REG_GUEST_FS:
+	case VM_REG_GUEST_GS:
+	case VM_REG_GUEST_LDTR:
+	case VM_REG_GUEST_TR:
+		seg = vmcb_segptr(vmcb, ident);
+		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
+		    __func__, ident));
+		seg->selector = val;
+		if (dirtyseg)
+			svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
+		break;
+
+	case VM_REG_GUEST_GDTR:
+	case VM_REG_GUEST_IDTR:
+		/* GDTR and IDTR don't have segment selectors */
+		err = EINVAL;
+		break;
+	default:
+		err = EINVAL;
+		break;
+	}
+
+	return (err);
+}
+
+int
+vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
+{
+	struct vmcb_segment *seg;
+
+	seg = vmcb_segptr(vmcb, ident);
+	if (seg != NULL) {
+		bcopy(seg, seg2, sizeof(struct vmcb_segment));
+		return (0);
+	} else {
+		return (EINVAL);
+	}
+}
+
+int
+vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+{
+	struct vmcb *vmcb;
+	struct svm_softc *sc;
+	struct vmcb_segment *seg;
+	uint16_t attrib;
+
+	sc = arg;
+	vmcb = svm_get_vmcb(sc, vcpu);
+
+	seg = vmcb_segptr(vmcb, reg);
+	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
+	    __func__, reg));
+
+	seg->base = desc->base;
+	seg->limit = desc->limit;
+	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
+		/*
+		 * Map seg_desc access to VMCB attribute format.
+		 *
+		 * SVM uses the 'P' bit in the segment attributes to indicate a
+		 * NULL segment so clear it if the segment is marked unusable.
+		 */
+		attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
+		if (SEG_DESC_UNUSABLE(desc->access)) {
+			attrib &= ~0x80;
+		}
+		seg->attrib = attrib;
+	}
+
+	VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
+	    "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
+
+	switch (reg) {
+	case VM_REG_GUEST_CS:
+	case VM_REG_GUEST_DS:
+	case VM_REG_GUEST_ES:
+	case VM_REG_GUEST_SS:
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
+		break;
+	case VM_REG_GUEST_GDTR:
+	case VM_REG_GUEST_IDTR:
+		svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
+		break;
+	default:
+		break;
+	}
+
+	return (0);
+}
+
+int
+vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+{
+	struct vmcb *vmcb;
+	struct svm_softc *sc;
+	struct vmcb_segment *seg;
+
+	sc = arg;
+	vmcb = svm_get_vmcb(sc, vcpu);
+	seg = vmcb_segptr(vmcb, reg);
+	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
+	    __func__, reg));
+
+	desc->base = seg->base;
+	desc->limit = seg->limit;
+	desc->access = 0;
+
+	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
+		/* Map seg_desc access to VMCB attribute format */
+		desc->access = ((seg->attrib & 0xF00) << 4) |
+		    (seg->attrib & 0xFF);
+
+		/*
+		 * VT-x uses bit 16 to indicate a segment that has been loaded
+		 * with a NULL selector (aka unusable). The 'desc->access'
+		 * field is interpreted in the VT-x format by the
+		 * processor-independent code.
+		 *
+		 * SVM uses the 'P' bit to convey the same information so
+		 * convert it into the VT-x format. For more details refer to
+		 * section "Segment State in the VMCB" in APMv2.
+		 */
+		if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
+			if ((desc->access & 0x80) == 0)
+				desc->access |= 0x10000;  /* Unusable segment */
+		}
+	}
+
+	return (0);
+}


Property changes on: trunk/sys/amd64/vmm/amd/vmcb.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/amd/vmcb.h
===================================================================
--- trunk/sys/amd64/vmm/amd/vmcb.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/amd/vmcb.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,335 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/amd/vmcb.h 328002 2018-01-15 12:17:26Z avg $
+ */
+
+#ifndef _VMCB_H_
+#define	_VMCB_H_
+
+struct svm_softc;
+
+#define BIT(n)			(1ULL << n)
+
+/*
+ * Secure Virtual Machine: AMD64 Programmer's Manual Vol2, Chapter 15
+ * Layout of VMCB: AMD64 Programmer's Manual Vol2, Appendix B
+ */
+
+/* vmcb_ctrl->intercept[] array indices */
+#define	VMCB_CR_INTCPT		0
+#define	VMCB_DR_INTCPT		1
+#define	VMCB_EXC_INTCPT		2
+#define	VMCB_CTRL1_INTCPT	3
+#define	VMCB_CTRL2_INTCPT	4
+
+/* intercept[VMCB_CTRL1_INTCPT] fields */
+#define	VMCB_INTCPT_INTR		BIT(0)
+#define	VMCB_INTCPT_NMI			BIT(1)
+#define	VMCB_INTCPT_SMI			BIT(2)
+#define	VMCB_INTCPT_INIT		BIT(3)
+#define	VMCB_INTCPT_VINTR		BIT(4)
+#define	VMCB_INTCPT_CR0_WRITE		BIT(5)
+#define	VMCB_INTCPT_IDTR_READ		BIT(6)
+#define	VMCB_INTCPT_GDTR_READ		BIT(7)
+#define	VMCB_INTCPT_LDTR_READ		BIT(8)
+#define	VMCB_INTCPT_TR_READ		BIT(9)
+#define	VMCB_INTCPT_IDTR_WRITE		BIT(10)
+#define	VMCB_INTCPT_GDTR_WRITE		BIT(11)
+#define	VMCB_INTCPT_LDTR_WRITE		BIT(12)
+#define	VMCB_INTCPT_TR_WRITE		BIT(13)
+#define	VMCB_INTCPT_RDTSC		BIT(14)
+#define	VMCB_INTCPT_RDPMC		BIT(15)
+#define	VMCB_INTCPT_PUSHF		BIT(16)
+#define	VMCB_INTCPT_POPF		BIT(17)
+#define	VMCB_INTCPT_CPUID		BIT(18)
+#define	VMCB_INTCPT_RSM			BIT(19)
+#define	VMCB_INTCPT_IRET		BIT(20)
+#define	VMCB_INTCPT_INTn		BIT(21)
+#define	VMCB_INTCPT_INVD		BIT(22)
+#define	VMCB_INTCPT_PAUSE		BIT(23)
+#define	VMCB_INTCPT_HLT			BIT(24)
+#define	VMCB_INTCPT_INVPG		BIT(25)
+#define	VMCB_INTCPT_INVPGA		BIT(26)
+#define	VMCB_INTCPT_IO			BIT(27)
+#define	VMCB_INTCPT_MSR			BIT(28)
+#define	VMCB_INTCPT_TASK_SWITCH		BIT(29)
+#define	VMCB_INTCPT_FERR_FREEZE		BIT(30)
+#define	VMCB_INTCPT_SHUTDOWN		BIT(31)
+
+/* intercept[VMCB_CTRL2_INTCPT] fields */
+#define	VMCB_INTCPT_VMRUN		BIT(0)
+#define	VMCB_INTCPT_VMMCALL		BIT(1)
+#define	VMCB_INTCPT_VMLOAD		BIT(2)
+#define	VMCB_INTCPT_VMSAVE		BIT(3)
+#define	VMCB_INTCPT_STGI		BIT(4)
+#define	VMCB_INTCPT_CLGI		BIT(5)
+#define	VMCB_INTCPT_SKINIT		BIT(6)
+#define	VMCB_INTCPT_RDTSCP		BIT(7)
+#define	VMCB_INTCPT_ICEBP		BIT(8)
+#define	VMCB_INTCPT_WBINVD		BIT(9)
+#define	VMCB_INTCPT_MONITOR		BIT(10)
+#define	VMCB_INTCPT_MWAIT		BIT(11)
+#define	VMCB_INTCPT_MWAIT_ARMED		BIT(12)
+#define	VMCB_INTCPT_XSETBV		BIT(13)
+
+/* VMCB TLB control */
+#define	VMCB_TLB_FLUSH_NOTHING		0	/* Flush nothing */
+#define	VMCB_TLB_FLUSH_ALL		1	/* Flush entire TLB */
+#define	VMCB_TLB_FLUSH_GUEST		3	/* Flush all guest entries */
+#define	VMCB_TLB_FLUSH_GUEST_NONGLOBAL	7	/* Flush guest non-PG entries */
+
+/* VMCB state caching */
+#define	VMCB_CACHE_NONE		0	/* No caching */
+#define	VMCB_CACHE_I		BIT(0)	/* Intercept, TSC off, Pause filter */
+#define	VMCB_CACHE_IOPM		BIT(1)	/* I/O and MSR permission */
+#define	VMCB_CACHE_ASID		BIT(2)	/* ASID */
+#define	VMCB_CACHE_TPR		BIT(3)	/* V_TPR to V_INTR_VECTOR */
+#define	VMCB_CACHE_NP		BIT(4)	/* Nested Paging */
+#define	VMCB_CACHE_CR		BIT(5)	/* CR0, CR3, CR4 & EFER */
+#define	VMCB_CACHE_DR		BIT(6)	/* Debug registers */
+#define	VMCB_CACHE_DT		BIT(7)	/* GDT/IDT */
+#define	VMCB_CACHE_SEG		BIT(8)	/* User segments, CPL */
+#define	VMCB_CACHE_CR2		BIT(9)	/* page fault address */
+#define	VMCB_CACHE_LBR		BIT(10)	/* Last branch */
+
+/* VMCB control event injection */
+#define	VMCB_EVENTINJ_EC_VALID		BIT(11)	/* Error Code valid */
+#define	VMCB_EVENTINJ_VALID		BIT(31)	/* Event valid */
+
+/* Event types that can be injected */
+#define	VMCB_EVENTINJ_TYPE_INTR		0
+#define	VMCB_EVENTINJ_TYPE_NMI		2
+#define	VMCB_EVENTINJ_TYPE_EXCEPTION	3
+#define	VMCB_EVENTINJ_TYPE_INTn		4
+
+/* VMCB exit code, APM vol2 Appendix C */
+#define	VMCB_EXIT_MC			0x52
+#define	VMCB_EXIT_INTR			0x60
+#define	VMCB_EXIT_NMI			0x61
+#define	VMCB_EXIT_VINTR			0x64
+#define	VMCB_EXIT_PUSHF			0x70
+#define	VMCB_EXIT_POPF			0x71
+#define	VMCB_EXIT_CPUID			0x72
+#define	VMCB_EXIT_IRET			0x74
+#define	VMCB_EXIT_PAUSE			0x77
+#define	VMCB_EXIT_HLT			0x78
+#define	VMCB_EXIT_IO			0x7B
+#define	VMCB_EXIT_MSR			0x7C
+#define	VMCB_EXIT_SHUTDOWN		0x7F
+#define	VMCB_EXIT_VMSAVE		0x83
+#define	VMCB_EXIT_MONITOR		0x8A
+#define	VMCB_EXIT_MWAIT			0x8B
+#define	VMCB_EXIT_NPF			0x400
+#define	VMCB_EXIT_INVALID		-1
+
+/*
+ * Nested page fault.
+ * Bit definitions to decode EXITINFO1.
+ */
+#define	VMCB_NPF_INFO1_P		BIT(0) /* Nested page present. */
+#define	VMCB_NPF_INFO1_W		BIT(1) /* Access was write. */
+#define	VMCB_NPF_INFO1_U		BIT(2) /* Access was user access. */
+#define	VMCB_NPF_INFO1_RSV		BIT(3) /* Reserved bits present. */
+#define	VMCB_NPF_INFO1_ID		BIT(4) /* Code read. */
+
+#define	VMCB_NPF_INFO1_GPA		BIT(32) /* Guest physical address. */
+#define	VMCB_NPF_INFO1_GPT		BIT(33) /* Guest page table. */
+
+/*
+ * EXITINTINFO, Interrupt exit info for all intrecepts.
+ * Section 15.7.2, Intercepts during IDT Interrupt Delivery.
+ */
+#define VMCB_EXITINTINFO_VECTOR(x)	((x) & 0xFF)
+#define VMCB_EXITINTINFO_TYPE(x)	(((x) >> 8) & 0x7)
+#define VMCB_EXITINTINFO_EC_VALID(x)	(((x) & BIT(11)) ? 1 : 0)
+#define VMCB_EXITINTINFO_VALID(x)	(((x) & BIT(31)) ? 1 : 0)
+#define VMCB_EXITINTINFO_EC(x)		(((x) >> 32) & 0xFFFFFFFF)
+
+/* Offset of various VMCB fields. */
+#define	VMCB_OFF_CTRL(x)		(x)
+#define	VMCB_OFF_STATE(x)		((x) + 0x400)
+
+#define	VMCB_OFF_CR_INTERCEPT		VMCB_OFF_CTRL(0x0)
+#define	VMCB_OFF_DR_INTERCEPT		VMCB_OFF_CTRL(0x4)
+#define	VMCB_OFF_EXC_INTERCEPT		VMCB_OFF_CTRL(0x8)
+#define	VMCB_OFF_INST1_INTERCEPT	VMCB_OFF_CTRL(0xC)
+#define	VMCB_OFF_INST2_INTERCEPT	VMCB_OFF_CTRL(0x10)
+#define	VMCB_OFF_IO_PERM		VMCB_OFF_CTRL(0x40)
+#define	VMCB_OFF_MSR_PERM		VMCB_OFF_CTRL(0x48)
+#define	VMCB_OFF_TSC_OFFSET		VMCB_OFF_CTRL(0x50)
+#define	VMCB_OFF_ASID			VMCB_OFF_CTRL(0x58)
+#define	VMCB_OFF_TLB_CTRL		VMCB_OFF_CTRL(0x5C)
+#define	VMCB_OFF_VIRQ			VMCB_OFF_CTRL(0x60)
+#define	VMCB_OFF_EXIT_REASON		VMCB_OFF_CTRL(0x70)
+#define	VMCB_OFF_EXITINFO1		VMCB_OFF_CTRL(0x78)
+#define	VMCB_OFF_EXITINFO2		VMCB_OFF_CTRL(0x80)
+#define	VMCB_OFF_EXITINTINFO		VMCB_OFF_CTRL(0x88)
+#define	VMCB_OFF_AVIC_BAR		VMCB_OFF_CTRL(0x98)
+#define	VMCB_OFF_NPT_BASE		VMCB_OFF_CTRL(0xB0)
+#define	VMCB_OFF_AVIC_PAGE		VMCB_OFF_CTRL(0xE0)
+#define	VMCB_OFF_AVIC_LT		VMCB_OFF_CTRL(0xF0)
+#define	VMCB_OFF_AVIC_PT		VMCB_OFF_CTRL(0xF8)
+#define	VMCB_OFF_SYSENTER_CS		VMCB_OFF_STATE(0x228)
+#define	VMCB_OFF_SYSENTER_ESP		VMCB_OFF_STATE(0x230)
+#define	VMCB_OFF_SYSENTER_EIP		VMCB_OFF_STATE(0x238)
+#define	VMCB_OFF_GUEST_PAT		VMCB_OFF_STATE(0x268)
+
+/*
+ * Encode the VMCB offset and bytes that we want to read from VMCB.
+ */
+#define	VMCB_ACCESS(o, w)		(0x80000000 | (((w) & 0xF) << 16) | \
+					((o) & 0xFFF))
+#define	VMCB_ACCESS_OK(v)               ((v) & 0x80000000 )
+#define	VMCB_ACCESS_BYTES(v)            (((v) >> 16) & 0xF)
+#define	VMCB_ACCESS_OFFSET(v)           ((v) & 0xFFF)
+
+#ifdef _KERNEL
+/* VMCB save state area segment format */
+struct vmcb_segment {
+	uint16_t	selector;
+	uint16_t	attrib;
+	uint32_t	limit;
+	uint64_t	base;
+} __attribute__ ((__packed__));
+CTASSERT(sizeof(struct vmcb_segment) == 16);
+
+/* Code segment descriptor attribute in 12 bit format as saved by VMCB. */
+#define	VMCB_CS_ATTRIB_L		BIT(9)	/* Long mode. */
+#define	VMCB_CS_ATTRIB_D		BIT(10)	/* OPerand size bit. */
+
+/*
+ * The VMCB is divided into two areas - the first one contains various
+ * control bits including the intercept vector and the second one contains
+ * the guest state.
+ */
+
+/* VMCB control area - padded up to 1024 bytes */
+struct vmcb_ctrl {
+	uint32_t intercept[5];	/* all intercepts */
+	uint8_t	 pad1[0x28];	/* Offsets 0x14-0x3B are reserved. */
+	uint16_t pause_filthresh; /* Offset 0x3C, PAUSE filter threshold */
+	uint16_t pause_filcnt;  /* Offset 0x3E, PAUSE filter count */
+	uint64_t iopm_base_pa;	/* 0x40: IOPM_BASE_PA */
+	uint64_t msrpm_base_pa; /* 0x48: MSRPM_BASE_PA */
+	uint64_t tsc_offset;	/* 0x50: TSC_OFFSET */
+	uint32_t asid;		/* 0x58: Guest ASID */
+	uint8_t	 tlb_ctrl;	/* 0x5C: TLB_CONTROL */
+	uint8_t  pad2[3];	/* 0x5D-0x5F: Reserved. */
+	uint8_t	 v_tpr;		/* 0x60: V_TPR, guest CR8 */
+	uint8_t	 v_irq:1;	/* Is virtual interrupt pending? */
+	uint8_t	:7; 		/* Padding */
+	uint8_t v_intr_prio:4;	/* 0x62: Priority for virtual interrupt. */
+	uint8_t v_ign_tpr:1;
+	uint8_t :3;
+	uint8_t	v_intr_masking:1; /* Guest and host sharing of RFLAGS. */
+	uint8_t	:7;
+	uint8_t	v_intr_vector;	/* 0x64: Vector for virtual interrupt. */
+	uint8_t pad3[3];	/* 0x65-0x67 Reserved. */
+	uint64_t intr_shadow:1; /* 0x68: Interrupt shadow, section15.2.1 APM2 */
+	uint64_t :63;
+	uint64_t exitcode;	/* 0x70, Exitcode */
+	uint64_t exitinfo1;	/* 0x78, EXITINFO1 */
+	uint64_t exitinfo2;	/* 0x80, EXITINFO2 */
+	uint64_t exitintinfo;	/* 0x88, Interrupt exit value. */
+	uint64_t np_enable:1;   /* 0x90, Nested paging enable. */
+	uint64_t :63;
+	uint8_t  pad4[0x10];	/* 0x98-0xA7 reserved. */
+	uint64_t eventinj;	/* 0xA8, Event injection. */
+	uint64_t n_cr3;		/* B0, Nested page table. */
+	uint64_t lbr_virt_en:1;	/* Enable LBR virtualization. */
+	uint64_t :63;
+	uint32_t vmcb_clean;	/* 0xC0: VMCB clean bits for caching */
+	uint32_t :32;		/* 0xC4: Reserved */
+	uint64_t nrip;		/* 0xC8: Guest next nRIP. */
+	uint8_t	inst_len;	/* 0xD0: #NPF decode assist */
+	uint8_t	inst_bytes[15];
+	uint8_t	padd6[0x320];
+} __attribute__ ((__packed__));
+CTASSERT(sizeof(struct vmcb_ctrl) == 1024);
+
+struct vmcb_state {
+	struct   vmcb_segment es;
+	struct   vmcb_segment cs;
+	struct   vmcb_segment ss;
+	struct   vmcb_segment ds;
+	struct   vmcb_segment fs;
+	struct   vmcb_segment gs;
+	struct   vmcb_segment gdt;
+	struct   vmcb_segment ldt;
+	struct   vmcb_segment idt;
+	struct   vmcb_segment tr;
+	uint8_t	 pad1[0x2b];		/* Reserved: 0xA0-0xCA */
+	uint8_t	 cpl;
+	uint8_t  pad2[4];
+	uint64_t efer;
+	uint8_t	 pad3[0x70];		/* Reserved: 0xd8-0x147 */
+	uint64_t cr4;
+	uint64_t cr3;			/* Guest CR3 */
+	uint64_t cr0;
+	uint64_t dr7;
+	uint64_t dr6;
+	uint64_t rflags;
+	uint64_t rip;
+	uint8_t	 pad4[0x58]; 		/* Reserved: 0x180-0x1D7 */
+	uint64_t rsp;
+	uint8_t	 pad5[0x18]; 		/* Reserved 0x1E0-0x1F7 */
+	uint64_t rax;
+	uint64_t star;
+	uint64_t lstar;
+	uint64_t cstar;
+	uint64_t sfmask;
+	uint64_t kernelgsbase;
+	uint64_t sysenter_cs;
+	uint64_t sysenter_esp;
+	uint64_t sysenter_eip;
+	uint64_t cr2;
+	uint8_t	 pad6[0x20];
+	uint64_t g_pat;
+	uint64_t dbgctl;
+	uint64_t br_from;
+	uint64_t br_to;
+	uint64_t int_from;
+	uint64_t int_to;
+	uint8_t	 pad7[0x968];		/* Reserved upto end of VMCB */
+} __attribute__ ((__packed__));
+CTASSERT(sizeof(struct vmcb_state) == 0xC00);
+
+struct vmcb {
+	struct vmcb_ctrl ctrl;
+	struct vmcb_state state;
+} __attribute__ ((__packed__));
+CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
+CTASSERT(offsetof(struct vmcb, state) == 0x400);
+
+int	vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval);
+int	vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val);
+int	vmcb_setdesc(void *arg, int vcpu, int ident, struct seg_desc *desc);
+int	vmcb_getdesc(void *arg, int vcpu, int ident, struct seg_desc *desc);
+int	vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg);
+
+#endif /* _KERNEL */
+#endif /* _VMCB_H_ */


Property changes on: trunk/sys/amd64/vmm/amd/vmcb.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/ept.c
===================================================================
--- trunk/sys/amd64/vmm/intel/ept.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/ept.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,207 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/ept.c 276349 2014-12-28 21:27:13Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/ept.c 276349 2014-12-28 21:27:13Z neel $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+
+#include <machine/vmm.h>
+
+#include "vmx_cpufunc.h"
+#include "vmm_ipi.h"
+#include "ept.h"
+
+#define	EPT_SUPPORTS_EXEC_ONLY(cap)	((cap) & (1UL << 0))
+#define	EPT_PWL4(cap)			((cap) & (1UL << 6))
+#define	EPT_MEMORY_TYPE_WB(cap)		((cap) & (1UL << 14))
+#define	EPT_PDE_SUPERPAGE(cap)		((cap) & (1UL << 16))	/* 2MB pages */
+#define	EPT_PDPTE_SUPERPAGE(cap)	((cap) & (1UL << 17))	/* 1GB pages */
+#define	INVEPT_SUPPORTED(cap)		((cap) & (1UL << 20))
+#define	AD_BITS_SUPPORTED(cap)		((cap) & (1UL << 21))
+#define	INVVPID_SUPPORTED(cap)		((cap) & (1UL << 32))
+
+#define	INVVPID_ALL_TYPES_MASK		0xF0000000000UL
+#define	INVVPID_ALL_TYPES_SUPPORTED(cap)	\
+	(((cap) & INVVPID_ALL_TYPES_MASK) == INVVPID_ALL_TYPES_MASK)
+
+#define	INVEPT_ALL_TYPES_MASK		0x6000000UL
+#define	INVEPT_ALL_TYPES_SUPPORTED(cap)		\
+	(((cap) & INVEPT_ALL_TYPES_MASK) == INVEPT_ALL_TYPES_MASK)
+
+#define	EPT_PWLEVELS		4		/* page walk levels */
+#define	EPT_ENABLE_AD_BITS	(1 << 6)
+
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, ept, CTLFLAG_RW, NULL, NULL);
+
+static int ept_enable_ad_bits;
+
+static int ept_pmap_flags;
+SYSCTL_INT(_hw_vmm_ept, OID_AUTO, pmap_flags, CTLFLAG_RD,
+    &ept_pmap_flags, 0, NULL);
+
+int
+ept_init(int ipinum)
+{
+	int use_hw_ad_bits, use_superpages, use_exec_only;
+	uint64_t cap;
+
+	cap = rdmsr(MSR_VMX_EPT_VPID_CAP);
+
+	/*
+	 * Verify that:
+	 * - page walk length is 4 steps
+	 * - extended page tables can be laid out in write-back memory
+	 * - invvpid instruction with all possible types is supported
+	 * - invept instruction with all possible types is supported
+	 */
+	if (!EPT_PWL4(cap) ||
+	    !EPT_MEMORY_TYPE_WB(cap) ||
+	    !INVVPID_SUPPORTED(cap) ||
+	    !INVVPID_ALL_TYPES_SUPPORTED(cap) ||
+	    !INVEPT_SUPPORTED(cap) ||
+	    !INVEPT_ALL_TYPES_SUPPORTED(cap))
+		return (EINVAL);
+
+	ept_pmap_flags = ipinum & PMAP_NESTED_IPIMASK;
+
+	use_superpages = 1;
+	TUNABLE_INT_FETCH("hw.vmm.ept.use_superpages", &use_superpages);
+	if (use_superpages && EPT_PDE_SUPERPAGE(cap))
+		ept_pmap_flags |= PMAP_PDE_SUPERPAGE;	/* 2MB superpage */
+
+	use_hw_ad_bits = 1;
+	TUNABLE_INT_FETCH("hw.vmm.ept.use_hw_ad_bits", &use_hw_ad_bits);
+	if (use_hw_ad_bits && AD_BITS_SUPPORTED(cap))
+		ept_enable_ad_bits = 1;
+	else
+		ept_pmap_flags |= PMAP_EMULATE_AD_BITS;
+
+	use_exec_only = 1;
+	TUNABLE_INT_FETCH("hw.vmm.ept.use_exec_only", &use_exec_only);
+	if (use_exec_only && EPT_SUPPORTS_EXEC_ONLY(cap))
+		ept_pmap_flags |= PMAP_SUPPORTS_EXEC_ONLY;
+
+	return (0);
+}
+
+#if 0
+static void
+ept_dump(uint64_t *ptp, int nlevels)
+{
+	int i, t, tabs;
+	uint64_t *ptpnext, ptpval;
+
+	if (--nlevels < 0)
+		return;
+
+	tabs = 3 - nlevels;
+	for (t = 0; t < tabs; t++)
+		printf("\t");
+	printf("PTP = %p\n", ptp);
+
+	for (i = 0; i < 512; i++) {
+		ptpval = ptp[i];
+
+		if (ptpval == 0)
+			continue;
+		
+		for (t = 0; t < tabs; t++)
+			printf("\t");
+		printf("%3d 0x%016lx\n", i, ptpval);
+
+		if (nlevels != 0 && (ptpval & EPT_PG_SUPERPAGE) == 0) {
+			ptpnext = (uint64_t *)
+				  PHYS_TO_DMAP(ptpval & EPT_ADDR_MASK);
+			ept_dump(ptpnext, nlevels);
+		}
+	}
+}
+#endif
+
+static void
+invept_single_context(void *arg)
+{
+	struct invept_desc desc = *(struct invept_desc *)arg;
+
+	invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
+}
+
+void
+ept_invalidate_mappings(u_long eptp)
+{
+	struct invept_desc invept_desc = { 0 };
+
+	invept_desc.eptp = eptp;
+
+	smp_rendezvous(NULL, invept_single_context, NULL, &invept_desc);
+}
+
+static int
+ept_pinit(pmap_t pmap)
+{
+
+	return (pmap_pinit_type(pmap, PT_EPT, ept_pmap_flags));
+}
+
+struct vmspace *
+ept_vmspace_alloc(vm_offset_t min, vm_offset_t max)
+{
+
+	return (vmspace_alloc(min, max, ept_pinit));
+}
+
+void
+ept_vmspace_free(struct vmspace *vmspace)
+{
+
+	vmspace_free(vmspace);
+}
+
+uint64_t
+eptp(uint64_t pml4)
+{
+	uint64_t eptp_val;
+
+	eptp_val = pml4 | (EPT_PWLEVELS - 1) << 3 | PAT_WRITE_BACK;
+	if (ept_enable_ad_bits)
+		eptp_val |= EPT_ENABLE_AD_BITS;
+
+	return (eptp_val);
+}


Property changes on: trunk/sys/amd64/vmm/intel/ept.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/ept.h
===================================================================
--- trunk/sys/amd64/vmm/intel/ept.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/ept.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,40 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/ept.h 266339 2014-05-17 19:11:08Z jhb $
+ */
+
+#ifndef	_EPT_H_
+#define	_EPT_H_
+
+struct vmx;
+
+int	ept_init(int ipinum);
+void	ept_invalidate_mappings(u_long eptp);
+struct vmspace *ept_vmspace_alloc(vm_offset_t min, vm_offset_t max);
+void	ept_vmspace_free(struct vmspace *vmspace);
+uint64_t eptp(uint64_t pml4);
+#endif


Property changes on: trunk/sys/amd64/vmm/intel/ept.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmcs.c
===================================================================
--- trunk/sys/amd64/vmm/intel/vmcs.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmcs.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,518 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmcs.c 330713 2018-03-10 00:44:33Z tychon $
+ */
+
+#include "opt_ddb.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmcs.c 330713 2018-03-10 00:44:33Z tychon $");
+
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/pcpu.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/segments.h>
+#include <machine/vmm.h>
+#include "vmm_host.h"
+#include "vmx_cpufunc.h"
+#include "vmcs.h"
+#include "ept.h"
+#include "vmx.h"
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+SYSCTL_DECL(_hw_vmm_vmx);
+
+static int no_flush_rsb;
+SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, no_flush_rsb, CTLFLAG_RW,
+    &no_flush_rsb, 0, "Do not flush RSB upon vmexit");
+
+static uint64_t
+vmcs_fix_regval(uint32_t encoding, uint64_t val)
+{
+
+	switch (encoding) {
+	case VMCS_GUEST_CR0:
+		val = vmx_fix_cr0(val);
+		break;
+	case VMCS_GUEST_CR4:
+		val = vmx_fix_cr4(val);
+		break;
+	default:
+		break;
+	}
+	return (val);
+}
+
+static uint32_t
+vmcs_field_encoding(int ident)
+{
+	switch (ident) {
+	case VM_REG_GUEST_CR0:
+		return (VMCS_GUEST_CR0);
+	case VM_REG_GUEST_CR3:
+		return (VMCS_GUEST_CR3);
+	case VM_REG_GUEST_CR4:
+		return (VMCS_GUEST_CR4);
+	case VM_REG_GUEST_DR7:
+		return (VMCS_GUEST_DR7);
+	case VM_REG_GUEST_RSP:
+		return (VMCS_GUEST_RSP);
+	case VM_REG_GUEST_RIP:
+		return (VMCS_GUEST_RIP);
+	case VM_REG_GUEST_RFLAGS:
+		return (VMCS_GUEST_RFLAGS);
+	case VM_REG_GUEST_ES:
+		return (VMCS_GUEST_ES_SELECTOR);
+	case VM_REG_GUEST_CS:
+		return (VMCS_GUEST_CS_SELECTOR);
+	case VM_REG_GUEST_SS:
+		return (VMCS_GUEST_SS_SELECTOR);
+	case VM_REG_GUEST_DS:
+		return (VMCS_GUEST_DS_SELECTOR);
+	case VM_REG_GUEST_FS:
+		return (VMCS_GUEST_FS_SELECTOR);
+	case VM_REG_GUEST_GS:
+		return (VMCS_GUEST_GS_SELECTOR);
+	case VM_REG_GUEST_TR:
+		return (VMCS_GUEST_TR_SELECTOR);
+	case VM_REG_GUEST_LDTR:
+		return (VMCS_GUEST_LDTR_SELECTOR);
+	case VM_REG_GUEST_EFER:
+		return (VMCS_GUEST_IA32_EFER);
+	case VM_REG_GUEST_PDPTE0:
+		return (VMCS_GUEST_PDPTE0);
+	case VM_REG_GUEST_PDPTE1:
+		return (VMCS_GUEST_PDPTE1);
+	case VM_REG_GUEST_PDPTE2:
+		return (VMCS_GUEST_PDPTE2);
+	case VM_REG_GUEST_PDPTE3:
+		return (VMCS_GUEST_PDPTE3);
+	default:
+		return (-1);
+	}
+
+}
+
+static int
+vmcs_seg_desc_encoding(int seg, uint32_t *base, uint32_t *lim, uint32_t *acc)
+{
+
+	switch (seg) {
+	case VM_REG_GUEST_ES:
+		*base = VMCS_GUEST_ES_BASE;
+		*lim = VMCS_GUEST_ES_LIMIT;
+		*acc = VMCS_GUEST_ES_ACCESS_RIGHTS;
+		break;
+	case VM_REG_GUEST_CS:
+		*base = VMCS_GUEST_CS_BASE;
+		*lim = VMCS_GUEST_CS_LIMIT;
+		*acc = VMCS_GUEST_CS_ACCESS_RIGHTS;
+		break;
+	case VM_REG_GUEST_SS:
+		*base = VMCS_GUEST_SS_BASE;
+		*lim = VMCS_GUEST_SS_LIMIT;
+		*acc = VMCS_GUEST_SS_ACCESS_RIGHTS;
+		break;
+	case VM_REG_GUEST_DS:
+		*base = VMCS_GUEST_DS_BASE;
+		*lim = VMCS_GUEST_DS_LIMIT;
+		*acc = VMCS_GUEST_DS_ACCESS_RIGHTS;
+		break;
+	case VM_REG_GUEST_FS:
+		*base = VMCS_GUEST_FS_BASE;
+		*lim = VMCS_GUEST_FS_LIMIT;
+		*acc = VMCS_GUEST_FS_ACCESS_RIGHTS;
+		break;
+	case VM_REG_GUEST_GS:
+		*base = VMCS_GUEST_GS_BASE;
+		*lim = VMCS_GUEST_GS_LIMIT;
+		*acc = VMCS_GUEST_GS_ACCESS_RIGHTS;
+		break;
+	case VM_REG_GUEST_TR:
+		*base = VMCS_GUEST_TR_BASE;
+		*lim = VMCS_GUEST_TR_LIMIT;
+		*acc = VMCS_GUEST_TR_ACCESS_RIGHTS;
+		break;
+	case VM_REG_GUEST_LDTR:
+		*base = VMCS_GUEST_LDTR_BASE;
+		*lim = VMCS_GUEST_LDTR_LIMIT;
+		*acc = VMCS_GUEST_LDTR_ACCESS_RIGHTS;
+		break;
+	case VM_REG_GUEST_IDTR:
+		*base = VMCS_GUEST_IDTR_BASE;
+		*lim = VMCS_GUEST_IDTR_LIMIT;
+		*acc = VMCS_INVALID_ENCODING;
+		break;
+	case VM_REG_GUEST_GDTR:
+		*base = VMCS_GUEST_GDTR_BASE;
+		*lim = VMCS_GUEST_GDTR_LIMIT;
+		*acc = VMCS_INVALID_ENCODING;
+		break;
+	default:
+		return (EINVAL);
+	}
+
+	return (0);
+}
+
+int
+vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *retval)
+{
+	int error;
+	uint32_t encoding;
+
+	/*
+	 * If we need to get at vmx-specific state in the VMCS we can bypass
+	 * the translation of 'ident' to 'encoding' by simply setting the
+	 * sign bit. As it so happens the upper 16 bits are reserved (i.e
+	 * set to 0) in the encodings for the VMCS so we are free to use the
+	 * sign bit.
+	 */
+	if (ident < 0)
+		encoding = ident & 0x7fffffff;
+	else
+		encoding = vmcs_field_encoding(ident);
+
+	if (encoding == (uint32_t)-1)
+		return (EINVAL);
+
+	if (!running)
+		VMPTRLD(vmcs);
+
+	error = vmread(encoding, retval);
+
+	if (!running)
+		VMCLEAR(vmcs);
+
+	return (error);
+}
+
+int
+vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val)
+{
+	int error;
+	uint32_t encoding;
+
+	if (ident < 0)
+		encoding = ident & 0x7fffffff;
+	else
+		encoding = vmcs_field_encoding(ident);
+
+	if (encoding == (uint32_t)-1)
+		return (EINVAL);
+
+	val = vmcs_fix_regval(encoding, val);
+
+	if (!running)
+		VMPTRLD(vmcs);
+
+	error = vmwrite(encoding, val);
+
+	if (!running)
+		VMCLEAR(vmcs);
+
+	return (error);
+}
+
+int
+vmcs_setdesc(struct vmcs *vmcs, int running, int seg, struct seg_desc *desc)
+{
+	int error;
+	uint32_t base, limit, access;
+
+	error = vmcs_seg_desc_encoding(seg, &base, &limit, &access);
+	if (error != 0)
+		panic("vmcs_setdesc: invalid segment register %d", seg);
+
+	if (!running)
+		VMPTRLD(vmcs);
+	if ((error = vmwrite(base, desc->base)) != 0)
+		goto done;
+
+	if ((error = vmwrite(limit, desc->limit)) != 0)
+		goto done;
+
+	if (access != VMCS_INVALID_ENCODING) {
+		if ((error = vmwrite(access, desc->access)) != 0)
+			goto done;
+	}
+done:
+	if (!running)
+		VMCLEAR(vmcs);
+	return (error);
+}
+
+int
+vmcs_getdesc(struct vmcs *vmcs, int running, int seg, struct seg_desc *desc)
+{
+	int error;
+	uint32_t base, limit, access;
+	uint64_t u64;
+
+	error = vmcs_seg_desc_encoding(seg, &base, &limit, &access);
+	if (error != 0)
+		panic("vmcs_getdesc: invalid segment register %d", seg);
+
+	if (!running)
+		VMPTRLD(vmcs);
+	if ((error = vmread(base, &u64)) != 0)
+		goto done;
+	desc->base = u64;
+
+	if ((error = vmread(limit, &u64)) != 0)
+		goto done;
+	desc->limit = u64;
+
+	if (access != VMCS_INVALID_ENCODING) {
+		if ((error = vmread(access, &u64)) != 0)
+			goto done;
+		desc->access = u64;
+	}
+done:
+	if (!running)
+		VMCLEAR(vmcs);
+	return (error);
+}
+
+int
+vmcs_set_msr_save(struct vmcs *vmcs, u_long g_area, u_int g_count)
+{
+	int error;
+
+	VMPTRLD(vmcs);
+
+	/*
+	 * Guest MSRs are saved in the VM-exit MSR-store area.
+	 * Guest MSRs are loaded from the VM-entry MSR-load area.
+	 * Both areas point to the same location in memory.
+	 */
+	if ((error = vmwrite(VMCS_EXIT_MSR_STORE, g_area)) != 0)
+		goto done;
+	if ((error = vmwrite(VMCS_EXIT_MSR_STORE_COUNT, g_count)) != 0)
+		goto done;
+
+	if ((error = vmwrite(VMCS_ENTRY_MSR_LOAD, g_area)) != 0)
+		goto done;
+	if ((error = vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, g_count)) != 0)
+		goto done;
+
+	error = 0;
+done:
+	VMCLEAR(vmcs);
+	return (error);
+}
+
+int
+vmcs_init(struct vmcs *vmcs)
+{
+	int error, codesel, datasel, tsssel;
+	u_long cr0, cr4, efer;
+	uint64_t pat, fsbase, idtrbase;
+
+	codesel = vmm_get_host_codesel();
+	datasel = vmm_get_host_datasel();
+	tsssel = vmm_get_host_tsssel();
+
+	/*
+	 * Make sure we have a "current" VMCS to work with.
+	 */
+	VMPTRLD(vmcs);
+
+	/* Host state */
+
+	/* Initialize host IA32_PAT MSR */
+	pat = vmm_get_host_pat();
+	if ((error = vmwrite(VMCS_HOST_IA32_PAT, pat)) != 0)
+		goto done;
+
+	/* Load the IA32_EFER MSR */
+	efer = vmm_get_host_efer();
+	if ((error = vmwrite(VMCS_HOST_IA32_EFER, efer)) != 0)
+		goto done;
+
+	/* Load the control registers */
+
+	cr0 = vmm_get_host_cr0();
+	if ((error = vmwrite(VMCS_HOST_CR0, cr0)) != 0)
+		goto done;
+	
+	cr4 = vmm_get_host_cr4() | CR4_VMXE;
+	if ((error = vmwrite(VMCS_HOST_CR4, cr4)) != 0)
+		goto done;
+
+	/* Load the segment selectors */
+	if ((error = vmwrite(VMCS_HOST_ES_SELECTOR, datasel)) != 0)
+		goto done;
+
+	if ((error = vmwrite(VMCS_HOST_CS_SELECTOR, codesel)) != 0)
+		goto done;
+
+	if ((error = vmwrite(VMCS_HOST_SS_SELECTOR, datasel)) != 0)
+		goto done;
+
+	if ((error = vmwrite(VMCS_HOST_DS_SELECTOR, datasel)) != 0)
+		goto done;
+
+	if ((error = vmwrite(VMCS_HOST_FS_SELECTOR, datasel)) != 0)
+		goto done;
+
+	if ((error = vmwrite(VMCS_HOST_GS_SELECTOR, datasel)) != 0)
+		goto done;
+
+	if ((error = vmwrite(VMCS_HOST_TR_SELECTOR, tsssel)) != 0)
+		goto done;
+
+	/*
+	 * Load the Base-Address for %fs and idtr.
+	 *
+	 * Note that we exclude %gs, tss and gdtr here because their base
+	 * address is pcpu specific.
+	 */
+	fsbase = vmm_get_host_fsbase();
+	if ((error = vmwrite(VMCS_HOST_FS_BASE, fsbase)) != 0)
+		goto done;
+
+	idtrbase = vmm_get_host_idtrbase();
+	if ((error = vmwrite(VMCS_HOST_IDTR_BASE, idtrbase)) != 0)
+		goto done;
+
+	/* instruction pointer */
+	if (no_flush_rsb) {
+		if ((error = vmwrite(VMCS_HOST_RIP,
+		    (u_long)vmx_exit_guest)) != 0)
+			goto done;
+	} else {
+		if ((error = vmwrite(VMCS_HOST_RIP,
+		    (u_long)vmx_exit_guest_flush_rsb)) != 0)
+			goto done;
+	}
+
+	/* link pointer */
+	if ((error = vmwrite(VMCS_LINK_POINTER, ~0)) != 0)
+		goto done;
+done:
+	VMCLEAR(vmcs);
+	return (error);
+}
+
+#ifdef DDB
+extern int vmxon_enabled[];
+
+DB_SHOW_COMMAND(vmcs, db_show_vmcs)
+{
+	uint64_t cur_vmcs, val;
+	uint32_t exit;
+
+	if (!vmxon_enabled[curcpu]) {
+		db_printf("VMX not enabled\n");
+		return;
+	}
+
+	if (have_addr) {
+		db_printf("Only current VMCS supported\n");
+		return;
+	}
+
+	vmptrst(&cur_vmcs);
+	if (cur_vmcs == VMCS_INITIAL) {
+		db_printf("No current VM context\n");
+		return;
+	}
+	db_printf("VMCS: %jx\n", cur_vmcs);
+	db_printf("VPID: %lu\n", vmcs_read(VMCS_VPID));
+	db_printf("Activity: ");
+	val = vmcs_read(VMCS_GUEST_ACTIVITY);
+	switch (val) {
+	case 0:
+		db_printf("Active");
+		break;
+	case 1:
+		db_printf("HLT");
+		break;
+	case 2:
+		db_printf("Shutdown");
+		break;
+	case 3:
+		db_printf("Wait for SIPI");
+		break;
+	default:
+		db_printf("Unknown: %#lx", val);
+	}
+	db_printf("\n");
+	exit = vmcs_read(VMCS_EXIT_REASON);
+	if (exit & 0x80000000)
+		db_printf("Entry Failure Reason: %u\n", exit & 0xffff);
+	else
+		db_printf("Exit Reason: %u\n", exit & 0xffff);
+	db_printf("Qualification: %#lx\n", vmcs_exit_qualification());
+	db_printf("Guest Linear Address: %#lx\n",
+	    vmcs_read(VMCS_GUEST_LINEAR_ADDRESS));
+	switch (exit & 0x8000ffff) {
+	case EXIT_REASON_EXCEPTION:
+	case EXIT_REASON_EXT_INTR:
+		val = vmcs_read(VMCS_EXIT_INTR_INFO);
+		db_printf("Interrupt Type: ");
+		switch (val >> 8 & 0x7) {
+		case 0:
+			db_printf("external");
+			break;
+		case 2:
+			db_printf("NMI");
+			break;
+		case 3:
+			db_printf("HW exception");
+			break;
+		case 4:
+			db_printf("SW exception");
+			break;
+		default:
+			db_printf("?? %lu", val >> 8 & 0x7);
+			break;
+		}
+		db_printf("  Vector: %lu", val & 0xff);
+		if (val & 0x800)
+			db_printf("  Error Code: %lx",
+			    vmcs_read(VMCS_EXIT_INTR_ERRCODE));
+		db_printf("\n");
+		break;
+	case EXIT_REASON_EPT_FAULT:
+	case EXIT_REASON_EPT_MISCONFIG:
+		db_printf("Guest Physical Address: %#lx\n",
+		    vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS));
+		break;
+	}
+	db_printf("VM-instruction error: %#lx\n", vmcs_instruction_error());
+}
+#endif


Property changes on: trunk/sys/amd64/vmm/intel/vmcs.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmcs.h
===================================================================
--- trunk/sys/amd64/vmm/intel/vmcs.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmcs.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,402 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmcs.h 276403 2014-12-30 08:24:14Z neel $
+ */
+
+#ifndef _VMCS_H_
+#define	_VMCS_H_
+
+#ifdef _KERNEL
+struct vmcs {
+	uint32_t	identifier;
+	uint32_t	abort_code;
+	char		_impl_specific[PAGE_SIZE - sizeof(uint32_t) * 2];
+};
+CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
+
+/* MSR save region is composed of an array of 'struct msr_entry' */
+struct msr_entry {
+	uint32_t	index;
+	uint32_t	reserved;
+	uint64_t	val;
+
+};
+
+int vmcs_set_msr_save(struct vmcs *vmcs, u_long g_area, u_int g_count);
+int	vmcs_init(struct vmcs *vmcs);
+int	vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *rv);
+int	vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val);
+int	vmcs_getdesc(struct vmcs *vmcs, int running, int ident,
+		     struct seg_desc *desc);
+int	vmcs_setdesc(struct vmcs *vmcs, int running, int ident,
+		     struct seg_desc *desc);
+
+/*
+ * Avoid header pollution caused by inline use of 'vtophys()' in vmx_cpufunc.h
+ */
+#ifdef _VMX_CPUFUNC_H_
+static __inline uint64_t
+vmcs_read(uint32_t encoding)
+{
+	int error;
+	uint64_t val;
+
+	error = vmread(encoding, &val);
+	KASSERT(error == 0, ("vmcs_read(%u) error %d", encoding, error));
+	return (val);
+}
+
+static __inline void
+vmcs_write(uint32_t encoding, uint64_t val)
+{
+	int error;
+
+	error = vmwrite(encoding, val);
+	KASSERT(error == 0, ("vmcs_write(%u) error %d", encoding, error));
+}
+#endif	/* _VMX_CPUFUNC_H_ */
+
+#define	vmexit_instruction_length()	vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH)
+#define	vmcs_guest_rip()		vmcs_read(VMCS_GUEST_RIP)
+#define	vmcs_instruction_error()	vmcs_read(VMCS_INSTRUCTION_ERROR)
+#define	vmcs_exit_reason()		(vmcs_read(VMCS_EXIT_REASON) & 0xffff)
+#define	vmcs_exit_qualification()	vmcs_read(VMCS_EXIT_QUALIFICATION)
+#define	vmcs_guest_cr3()		vmcs_read(VMCS_GUEST_CR3)
+#define	vmcs_gpa()			vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS)
+#define	vmcs_gla()			vmcs_read(VMCS_GUEST_LINEAR_ADDRESS)
+#define	vmcs_idt_vectoring_info()	vmcs_read(VMCS_IDT_VECTORING_INFO)
+#define	vmcs_idt_vectoring_err()	vmcs_read(VMCS_IDT_VECTORING_ERROR)
+
+#endif	/* _KERNEL */
+
+#define	VMCS_INITIAL			0xffffffffffffffff
+
+#define	VMCS_IDENT(encoding)		((encoding) | 0x80000000)
+/*
+ * VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B.
+ */
+#define	VMCS_INVALID_ENCODING		0xffffffff
+
+/* 16-bit control fields */
+#define	VMCS_VPID			0x00000000
+#define	VMCS_PIR_VECTOR			0x00000002
+
+/* 16-bit guest-state fields */
+#define	VMCS_GUEST_ES_SELECTOR		0x00000800
+#define	VMCS_GUEST_CS_SELECTOR		0x00000802
+#define	VMCS_GUEST_SS_SELECTOR		0x00000804
+#define	VMCS_GUEST_DS_SELECTOR		0x00000806
+#define	VMCS_GUEST_FS_SELECTOR		0x00000808
+#define	VMCS_GUEST_GS_SELECTOR		0x0000080A
+#define	VMCS_GUEST_LDTR_SELECTOR	0x0000080C
+#define	VMCS_GUEST_TR_SELECTOR		0x0000080E
+#define	VMCS_GUEST_INTR_STATUS		0x00000810
+
+/* 16-bit host-state fields */
+#define	VMCS_HOST_ES_SELECTOR		0x00000C00
+#define	VMCS_HOST_CS_SELECTOR		0x00000C02
+#define	VMCS_HOST_SS_SELECTOR		0x00000C04
+#define	VMCS_HOST_DS_SELECTOR		0x00000C06
+#define	VMCS_HOST_FS_SELECTOR		0x00000C08
+#define	VMCS_HOST_GS_SELECTOR		0x00000C0A
+#define	VMCS_HOST_TR_SELECTOR		0x00000C0C
+
+/* 64-bit control fields */
+#define	VMCS_IO_BITMAP_A		0x00002000
+#define	VMCS_IO_BITMAP_B		0x00002002
+#define	VMCS_MSR_BITMAP			0x00002004
+#define	VMCS_EXIT_MSR_STORE		0x00002006
+#define	VMCS_EXIT_MSR_LOAD		0x00002008
+#define	VMCS_ENTRY_MSR_LOAD		0x0000200A
+#define	VMCS_EXECUTIVE_VMCS		0x0000200C
+#define	VMCS_TSC_OFFSET			0x00002010
+#define	VMCS_VIRTUAL_APIC		0x00002012
+#define	VMCS_APIC_ACCESS		0x00002014
+#define	VMCS_PIR_DESC			0x00002016
+#define	VMCS_EPTP			0x0000201A
+#define	VMCS_EOI_EXIT0			0x0000201C
+#define	VMCS_EOI_EXIT1			0x0000201E
+#define	VMCS_EOI_EXIT2			0x00002020
+#define	VMCS_EOI_EXIT3			0x00002022
+#define	VMCS_EOI_EXIT(vector)		(VMCS_EOI_EXIT0 + ((vector) / 64) * 2)
+
+/* 64-bit read-only fields */
+#define	VMCS_GUEST_PHYSICAL_ADDRESS	0x00002400
+
+/* 64-bit guest-state fields */
+#define	VMCS_LINK_POINTER		0x00002800
+#define	VMCS_GUEST_IA32_DEBUGCTL	0x00002802
+#define	VMCS_GUEST_IA32_PAT		0x00002804
+#define	VMCS_GUEST_IA32_EFER		0x00002806
+#define	VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
+#define	VMCS_GUEST_PDPTE0		0x0000280A
+#define	VMCS_GUEST_PDPTE1		0x0000280C
+#define	VMCS_GUEST_PDPTE2		0x0000280E
+#define	VMCS_GUEST_PDPTE3		0x00002810
+
+/* 64-bit host-state fields */
+#define	VMCS_HOST_IA32_PAT		0x00002C00
+#define	VMCS_HOST_IA32_EFER		0x00002C02
+#define	VMCS_HOST_IA32_PERF_GLOBAL_CTRL	0x00002C04
+
+/* 32-bit control fields */
+#define	VMCS_PIN_BASED_CTLS		0x00004000
+#define	VMCS_PRI_PROC_BASED_CTLS	0x00004002
+#define	VMCS_EXCEPTION_BITMAP		0x00004004
+#define	VMCS_PF_ERROR_MASK		0x00004006
+#define	VMCS_PF_ERROR_MATCH		0x00004008
+#define	VMCS_CR3_TARGET_COUNT		0x0000400A
+#define	VMCS_EXIT_CTLS			0x0000400C
+#define	VMCS_EXIT_MSR_STORE_COUNT	0x0000400E
+#define	VMCS_EXIT_MSR_LOAD_COUNT	0x00004010
+#define	VMCS_ENTRY_CTLS			0x00004012
+#define	VMCS_ENTRY_MSR_LOAD_COUNT	0x00004014
+#define	VMCS_ENTRY_INTR_INFO		0x00004016
+#define	VMCS_ENTRY_EXCEPTION_ERROR	0x00004018
+#define	VMCS_ENTRY_INST_LENGTH		0x0000401A
+#define	VMCS_TPR_THRESHOLD		0x0000401C
+#define	VMCS_SEC_PROC_BASED_CTLS	0x0000401E
+#define	VMCS_PLE_GAP			0x00004020
+#define	VMCS_PLE_WINDOW			0x00004022
+
+/* 32-bit read-only data fields */
+#define	VMCS_INSTRUCTION_ERROR		0x00004400
+#define	VMCS_EXIT_REASON		0x00004402
+#define	VMCS_EXIT_INTR_INFO		0x00004404
+#define	VMCS_EXIT_INTR_ERRCODE		0x00004406
+#define	VMCS_IDT_VECTORING_INFO		0x00004408
+#define	VMCS_IDT_VECTORING_ERROR	0x0000440A
+#define	VMCS_EXIT_INSTRUCTION_LENGTH	0x0000440C
+#define	VMCS_EXIT_INSTRUCTION_INFO	0x0000440E
+
+/* 32-bit guest-state fields */
+#define	VMCS_GUEST_ES_LIMIT		0x00004800
+#define	VMCS_GUEST_CS_LIMIT		0x00004802
+#define	VMCS_GUEST_SS_LIMIT		0x00004804
+#define	VMCS_GUEST_DS_LIMIT		0x00004806
+#define	VMCS_GUEST_FS_LIMIT		0x00004808
+#define	VMCS_GUEST_GS_LIMIT		0x0000480A
+#define	VMCS_GUEST_LDTR_LIMIT		0x0000480C
+#define	VMCS_GUEST_TR_LIMIT		0x0000480E
+#define	VMCS_GUEST_GDTR_LIMIT		0x00004810
+#define	VMCS_GUEST_IDTR_LIMIT		0x00004812
+#define	VMCS_GUEST_ES_ACCESS_RIGHTS	0x00004814
+#define	VMCS_GUEST_CS_ACCESS_RIGHTS	0x00004816
+#define	VMCS_GUEST_SS_ACCESS_RIGHTS	0x00004818
+#define	VMCS_GUEST_DS_ACCESS_RIGHTS	0x0000481A
+#define	VMCS_GUEST_FS_ACCESS_RIGHTS	0x0000481C
+#define	VMCS_GUEST_GS_ACCESS_RIGHTS	0x0000481E
+#define	VMCS_GUEST_LDTR_ACCESS_RIGHTS	0x00004820
+#define	VMCS_GUEST_TR_ACCESS_RIGHTS	0x00004822
+#define	VMCS_GUEST_INTERRUPTIBILITY	0x00004824
+#define	VMCS_GUEST_ACTIVITY		0x00004826
+#define VMCS_GUEST_SMBASE		0x00004828
+#define	VMCS_GUEST_IA32_SYSENTER_CS	0x0000482A
+#define	VMCS_PREEMPTION_TIMER_VALUE	0x0000482E
+
+/* 32-bit host state fields */
+#define	VMCS_HOST_IA32_SYSENTER_CS	0x00004C00
+
+/* Natural Width control fields */
+#define	VMCS_CR0_MASK			0x00006000
+#define	VMCS_CR4_MASK			0x00006002
+#define	VMCS_CR0_SHADOW			0x00006004
+#define	VMCS_CR4_SHADOW			0x00006006
+#define	VMCS_CR3_TARGET0		0x00006008
+#define	VMCS_CR3_TARGET1		0x0000600A
+#define	VMCS_CR3_TARGET2		0x0000600C
+#define	VMCS_CR3_TARGET3		0x0000600E
+
+/* Natural Width read-only fields */
+#define	VMCS_EXIT_QUALIFICATION		0x00006400
+#define	VMCS_IO_RCX			0x00006402
+#define	VMCS_IO_RSI			0x00006404
+#define	VMCS_IO_RDI			0x00006406
+#define	VMCS_IO_RIP			0x00006408
+#define	VMCS_GUEST_LINEAR_ADDRESS	0x0000640A
+
+/* Natural Width guest-state fields */
+#define	VMCS_GUEST_CR0			0x00006800
+#define	VMCS_GUEST_CR3			0x00006802
+#define	VMCS_GUEST_CR4			0x00006804
+#define	VMCS_GUEST_ES_BASE		0x00006806
+#define	VMCS_GUEST_CS_BASE		0x00006808
+#define	VMCS_GUEST_SS_BASE		0x0000680A
+#define	VMCS_GUEST_DS_BASE		0x0000680C
+#define	VMCS_GUEST_FS_BASE		0x0000680E
+#define	VMCS_GUEST_GS_BASE		0x00006810
+#define	VMCS_GUEST_LDTR_BASE		0x00006812
+#define	VMCS_GUEST_TR_BASE		0x00006814
+#define	VMCS_GUEST_GDTR_BASE		0x00006816
+#define	VMCS_GUEST_IDTR_BASE		0x00006818
+#define	VMCS_GUEST_DR7			0x0000681A
+#define	VMCS_GUEST_RSP			0x0000681C
+#define	VMCS_GUEST_RIP			0x0000681E
+#define	VMCS_GUEST_RFLAGS		0x00006820
+#define	VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
+#define	VMCS_GUEST_IA32_SYSENTER_ESP	0x00006824
+#define	VMCS_GUEST_IA32_SYSENTER_EIP	0x00006826
+
+/* Natural Width host-state fields */
+#define	VMCS_HOST_CR0			0x00006C00
+#define	VMCS_HOST_CR3			0x00006C02
+#define	VMCS_HOST_CR4			0x00006C04
+#define	VMCS_HOST_FS_BASE		0x00006C06
+#define	VMCS_HOST_GS_BASE		0x00006C08
+#define	VMCS_HOST_TR_BASE		0x00006C0A
+#define	VMCS_HOST_GDTR_BASE		0x00006C0C
+#define	VMCS_HOST_IDTR_BASE		0x00006C0E
+#define	VMCS_HOST_IA32_SYSENTER_ESP	0x00006C10
+#define	VMCS_HOST_IA32_SYSENTER_EIP	0x00006C12
+#define	VMCS_HOST_RSP			0x00006C14
+#define	VMCS_HOST_RIP			0x00006c16
+
+/*
+ * VM instruction error numbers
+ */
+#define	VMRESUME_WITH_NON_LAUNCHED_VMCS	5
+
+/*
+ * VMCS exit reasons
+ */
+#define EXIT_REASON_EXCEPTION		0
+#define EXIT_REASON_EXT_INTR		1
+#define EXIT_REASON_TRIPLE_FAULT	2
+#define EXIT_REASON_INIT		3
+#define EXIT_REASON_SIPI		4
+#define EXIT_REASON_IO_SMI		5
+#define EXIT_REASON_SMI			6
+#define EXIT_REASON_INTR_WINDOW		7
+#define EXIT_REASON_NMI_WINDOW		8
+#define EXIT_REASON_TASK_SWITCH		9
+#define EXIT_REASON_CPUID		10
+#define EXIT_REASON_GETSEC		11
+#define EXIT_REASON_HLT			12
+#define EXIT_REASON_INVD		13
+#define EXIT_REASON_INVLPG		14
+#define EXIT_REASON_RDPMC		15
+#define EXIT_REASON_RDTSC		16
+#define EXIT_REASON_RSM			17
+#define EXIT_REASON_VMCALL		18
+#define EXIT_REASON_VMCLEAR		19
+#define EXIT_REASON_VMLAUNCH		20
+#define EXIT_REASON_VMPTRLD		21
+#define EXIT_REASON_VMPTRST		22
+#define EXIT_REASON_VMREAD		23
+#define EXIT_REASON_VMRESUME		24
+#define EXIT_REASON_VMWRITE		25
+#define EXIT_REASON_VMXOFF		26
+#define EXIT_REASON_VMXON		27
+#define EXIT_REASON_CR_ACCESS		28
+#define EXIT_REASON_DR_ACCESS		29
+#define EXIT_REASON_INOUT		30
+#define EXIT_REASON_RDMSR		31
+#define EXIT_REASON_WRMSR		32
+#define EXIT_REASON_INVAL_VMCS		33
+#define EXIT_REASON_INVAL_MSR		34
+#define EXIT_REASON_MWAIT		36
+#define EXIT_REASON_MTF			37
+#define EXIT_REASON_MONITOR		39
+#define EXIT_REASON_PAUSE		40
+#define EXIT_REASON_MCE_DURING_ENTRY	41
+#define EXIT_REASON_TPR			43
+#define EXIT_REASON_APIC_ACCESS		44
+#define	EXIT_REASON_VIRTUALIZED_EOI	45
+#define EXIT_REASON_GDTR_IDTR		46
+#define EXIT_REASON_LDTR_TR		47
+#define EXIT_REASON_EPT_FAULT		48
+#define EXIT_REASON_EPT_MISCONFIG	49
+#define EXIT_REASON_INVEPT		50
+#define EXIT_REASON_RDTSCP		51
+#define EXIT_REASON_VMX_PREEMPT		52
+#define EXIT_REASON_INVVPID		53
+#define EXIT_REASON_WBINVD		54
+#define EXIT_REASON_XSETBV		55
+#define	EXIT_REASON_APIC_WRITE		56
+
+/*
+ * NMI unblocking due to IRET.
+ *
+ * Applies to VM-exits due to hardware exception or EPT fault.
+ */
+#define	EXIT_QUAL_NMIUDTI	(1 << 12)
+/*
+ * VMCS interrupt information fields
+ */
+#define	VMCS_INTR_VALID		(1U << 31)
+#define	VMCS_INTR_T_MASK	0x700		/* Interruption-info type */
+#define	VMCS_INTR_T_HWINTR	(0 << 8)
+#define	VMCS_INTR_T_NMI		(2 << 8)
+#define	VMCS_INTR_T_HWEXCEPTION	(3 << 8)
+#define	VMCS_INTR_T_SWINTR	(4 << 8)
+#define	VMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)
+#define	VMCS_INTR_T_SWEXCEPTION	(6 << 8)
+#define	VMCS_INTR_DEL_ERRCODE	(1 << 11)
+
+/*
+ * VMCS IDT-Vectoring information fields
+ */
+#define	VMCS_IDT_VEC_VALID		(1U << 31)
+#define	VMCS_IDT_VEC_ERRCODE_VALID	(1 << 11)
+
+/*
+ * VMCS Guest interruptibility field
+ */
+#define	VMCS_INTERRUPTIBILITY_STI_BLOCKING	(1 << 0)
+#define	VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING	(1 << 1)
+#define	VMCS_INTERRUPTIBILITY_SMI_BLOCKING	(1 << 2)
+#define	VMCS_INTERRUPTIBILITY_NMI_BLOCKING	(1 << 3)
+
+/*
+ * Exit qualification for EXIT_REASON_INVAL_VMCS
+ */
+#define	EXIT_QUAL_NMI_WHILE_STI_BLOCKING	3
+
+/*
+ * Exit qualification for EPT violation
+ */
+#define	EPT_VIOLATION_DATA_READ		(1UL << 0)
+#define	EPT_VIOLATION_DATA_WRITE	(1UL << 1)
+#define	EPT_VIOLATION_INST_FETCH	(1UL << 2)
+#define	EPT_VIOLATION_GPA_READABLE	(1UL << 3)
+#define	EPT_VIOLATION_GPA_WRITEABLE	(1UL << 4)
+#define	EPT_VIOLATION_GPA_EXECUTABLE	(1UL << 5)
+#define	EPT_VIOLATION_GLA_VALID		(1UL << 7)
+#define	EPT_VIOLATION_XLAT_VALID	(1UL << 8)
+
+/*
+ * Exit qualification for APIC-access VM exit
+ */
+#define	APIC_ACCESS_OFFSET(qual)	((qual) & 0xFFF)
+#define	APIC_ACCESS_TYPE(qual)		(((qual) >> 12) & 0xF)
+
+/*
+ * Exit qualification for APIC-write VM exit
+ */
+#define	APIC_WRITE_OFFSET(qual)		((qual) & 0xFFF)
+
+#endif


Property changes on: trunk/sys/amd64/vmm/intel/vmcs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmx.c
===================================================================
--- trunk/sys/amd64/vmm/intel/vmx.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmx.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,3442 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 295124 2016-02-01 14:56:11Z grehan $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 295124 2016-02-01 14:56:11Z grehan $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/smp.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/psl.h>
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+#include <machine/segments.h>
+#include <machine/smp.h>
+#include <machine/specialreg.h>
+#include <machine/vmparam.h>
+
+#include <machine/vmm.h>
+#include <machine/vmm_dev.h>
+#include <machine/vmm_instruction_emul.h>
+#include "vmm_lapic.h"
+#include "vmm_host.h"
+#include "vmm_ioport.h"
+#include "vmm_ipi.h"
+#include "vmm_ktr.h"
+#include "vmm_stat.h"
+#include "vatpic.h"
+#include "vlapic.h"
+#include "vlapic_priv.h"
+
+#include "ept.h"
+#include "vmx_cpufunc.h"
+#include "vmx.h"
+#include "vmx_msr.h"
+#include "x86.h"
+#include "vmx_controls.h"
+
+#define	PINBASED_CTLS_ONE_SETTING					\
+	(PINBASED_EXTINT_EXITING	|				\
+	 PINBASED_NMI_EXITING		|				\
+	 PINBASED_VIRTUAL_NMI)
+#define	PINBASED_CTLS_ZERO_SETTING	0
+
+#define PROCBASED_CTLS_WINDOW_SETTING					\
+	(PROCBASED_INT_WINDOW_EXITING	|				\
+	 PROCBASED_NMI_WINDOW_EXITING)
+
+#define	PROCBASED_CTLS_ONE_SETTING 					\
+	(PROCBASED_SECONDARY_CONTROLS	|				\
+	 PROCBASED_MWAIT_EXITING	|				\
+	 PROCBASED_MONITOR_EXITING	|				\
+	 PROCBASED_IO_EXITING		|				\
+	 PROCBASED_MSR_BITMAPS		|				\
+	 PROCBASED_CTLS_WINDOW_SETTING	|				\
+	 PROCBASED_CR8_LOAD_EXITING	|				\
+	 PROCBASED_CR8_STORE_EXITING)
+#define	PROCBASED_CTLS_ZERO_SETTING	\
+	(PROCBASED_CR3_LOAD_EXITING |	\
+	PROCBASED_CR3_STORE_EXITING |	\
+	PROCBASED_IO_BITMAPS)
+
+#define	PROCBASED_CTLS2_ONE_SETTING	PROCBASED2_ENABLE_EPT
+#define	PROCBASED_CTLS2_ZERO_SETTING	0
+
+#define	VM_EXIT_CTLS_ONE_SETTING					\
+	(VM_EXIT_HOST_LMA			|			\
+	VM_EXIT_SAVE_EFER			|			\
+	VM_EXIT_LOAD_EFER			|			\
+	VM_EXIT_ACKNOWLEDGE_INTERRUPT)
+
+#define	VM_EXIT_CTLS_ZERO_SETTING	VM_EXIT_SAVE_DEBUG_CONTROLS
+
+#define	VM_ENTRY_CTLS_ONE_SETTING	(VM_ENTRY_LOAD_EFER)
+
+#define	VM_ENTRY_CTLS_ZERO_SETTING					\
+	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
+	VM_ENTRY_INTO_SMM			|			\
+	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
+
+#define	HANDLED		1
+#define	UNHANDLED	0
+
+static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
+static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
+
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
+
+int vmxon_enabled[MAXCPU];
+static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
+static uint32_t exit_ctls, entry_ctls;
+
+static uint64_t cr0_ones_mask, cr0_zeros_mask;
+SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
+	     &cr0_ones_mask, 0, NULL);
+SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
+	     &cr0_zeros_mask, 0, NULL);
+
+static uint64_t cr4_ones_mask, cr4_zeros_mask;
+SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
+	     &cr4_ones_mask, 0, NULL);
+SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
+	     &cr4_zeros_mask, 0, NULL);
+
+static int vmx_initialized;
+SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
+	   &vmx_initialized, 0, "Intel VMX initialized");
+
+/*
+ * Optional capabilities
+ */
+static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
+
+static int cap_halt_exit;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
+    "HLT triggers a VM-exit");
+
+static int cap_pause_exit;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
+    0, "PAUSE triggers a VM-exit");
+
+static int cap_unrestricted_guest;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
+    &cap_unrestricted_guest, 0, "Unrestricted guests");
+
+static int cap_monitor_trap;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
+    &cap_monitor_trap, 0, "Monitor trap flag");
+
+static int cap_invpcid;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
+    0, "Guests are allowed to use INVPCID");
+
+static int virtual_interrupt_delivery;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
+    &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
+
+static int posted_interrupts;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
+    &posted_interrupts, 0, "APICv posted interrupt support");
+
+static int pirvec;
+SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
+    &pirvec, 0, "APICv posted interrupt vector");
+
+static struct unrhdr *vpid_unr;
+static u_int vpid_alloc_failed;
+SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
+	    &vpid_alloc_failed, 0, NULL);
+
+/*
+ * Use the last page below 4GB as the APIC access address. This address is
+ * occupied by the boot firmware so it is guaranteed that it will not conflict
+ * with a page in system memory.
+ */
+#define	APIC_ACCESS_ADDRESS	0xFFFFF000
+
+static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
+static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
+static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
+static void vmx_inject_pir(struct vlapic *vlapic);
+
+#ifdef KTR
+static const char *
+exit_reason_to_str(int reason)
+{
+	static char reasonbuf[32];
+
+	switch (reason) {
+	case EXIT_REASON_EXCEPTION:
+		return "exception";
+	case EXIT_REASON_EXT_INTR:
+		return "extint";
+	case EXIT_REASON_TRIPLE_FAULT:
+		return "triplefault";
+	case EXIT_REASON_INIT:
+		return "init";
+	case EXIT_REASON_SIPI:
+		return "sipi";
+	case EXIT_REASON_IO_SMI:
+		return "iosmi";
+	case EXIT_REASON_SMI:
+		return "smi";
+	case EXIT_REASON_INTR_WINDOW:
+		return "intrwindow";
+	case EXIT_REASON_NMI_WINDOW:
+		return "nmiwindow";
+	case EXIT_REASON_TASK_SWITCH:
+		return "taskswitch";
+	case EXIT_REASON_CPUID:
+		return "cpuid";
+	case EXIT_REASON_GETSEC:
+		return "getsec";
+	case EXIT_REASON_HLT:
+		return "hlt";
+	case EXIT_REASON_INVD:
+		return "invd";
+	case EXIT_REASON_INVLPG:
+		return "invlpg";
+	case EXIT_REASON_RDPMC:
+		return "rdpmc";
+	case EXIT_REASON_RDTSC:
+		return "rdtsc";
+	case EXIT_REASON_RSM:
+		return "rsm";
+	case EXIT_REASON_VMCALL:
+		return "vmcall";
+	case EXIT_REASON_VMCLEAR:
+		return "vmclear";
+	case EXIT_REASON_VMLAUNCH:
+		return "vmlaunch";
+	case EXIT_REASON_VMPTRLD:
+		return "vmptrld";
+	case EXIT_REASON_VMPTRST:
+		return "vmptrst";
+	case EXIT_REASON_VMREAD:
+		return "vmread";
+	case EXIT_REASON_VMRESUME:
+		return "vmresume";
+	case EXIT_REASON_VMWRITE:
+		return "vmwrite";
+	case EXIT_REASON_VMXOFF:
+		return "vmxoff";
+	case EXIT_REASON_VMXON:
+		return "vmxon";
+	case EXIT_REASON_CR_ACCESS:
+		return "craccess";
+	case EXIT_REASON_DR_ACCESS:
+		return "draccess";
+	case EXIT_REASON_INOUT:
+		return "inout";
+	case EXIT_REASON_RDMSR:
+		return "rdmsr";
+	case EXIT_REASON_WRMSR:
+		return "wrmsr";
+	case EXIT_REASON_INVAL_VMCS:
+		return "invalvmcs";
+	case EXIT_REASON_INVAL_MSR:
+		return "invalmsr";
+	case EXIT_REASON_MWAIT:
+		return "mwait";
+	case EXIT_REASON_MTF:
+		return "mtf";
+	case EXIT_REASON_MONITOR:
+		return "monitor";
+	case EXIT_REASON_PAUSE:
+		return "pause";
+	case EXIT_REASON_MCE_DURING_ENTRY:
+		return "mce-during-entry";
+	case EXIT_REASON_TPR:
+		return "tpr";
+	case EXIT_REASON_APIC_ACCESS:
+		return "apic-access";
+	case EXIT_REASON_GDTR_IDTR:
+		return "gdtridtr";
+	case EXIT_REASON_LDTR_TR:
+		return "ldtrtr";
+	case EXIT_REASON_EPT_FAULT:
+		return "eptfault";
+	case EXIT_REASON_EPT_MISCONFIG:
+		return "eptmisconfig";
+	case EXIT_REASON_INVEPT:
+		return "invept";
+	case EXIT_REASON_RDTSCP:
+		return "rdtscp";
+	case EXIT_REASON_VMX_PREEMPT:
+		return "vmxpreempt";
+	case EXIT_REASON_INVVPID:
+		return "invvpid";
+	case EXIT_REASON_WBINVD:
+		return "wbinvd";
+	case EXIT_REASON_XSETBV:
+		return "xsetbv";
+	case EXIT_REASON_APIC_WRITE:
+		return "apic-write";
+	default:
+		snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
+		return (reasonbuf);
+	}
+}
+#endif	/* KTR */
+
+static int
+vmx_allow_x2apic_msrs(struct vmx *vmx)
+{
+	int i, error;
+
+	error = 0;
+
+	/*
+	 * Allow readonly access to the following x2APIC MSRs from the guest.
+	 */
+	error += guest_msr_ro(vmx, MSR_APIC_ID);
+	error += guest_msr_ro(vmx, MSR_APIC_VERSION);
+	error += guest_msr_ro(vmx, MSR_APIC_LDR);
+	error += guest_msr_ro(vmx, MSR_APIC_SVR);
+
+	for (i = 0; i < 8; i++)
+		error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
+
+	for (i = 0; i < 8; i++)
+		error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
+	
+	for (i = 0; i < 8; i++)
+		error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
+
+	error += guest_msr_ro(vmx, MSR_APIC_ESR);
+	error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
+	error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
+	error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
+	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
+	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
+	error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
+	error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
+	error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
+	error += guest_msr_ro(vmx, MSR_APIC_ICR);
+
+	/*
+	 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
+	 *
+	 * These registers get special treatment described in the section
+	 * "Virtualizing MSR-Based APIC Accesses".
+	 */
+	error += guest_msr_rw(vmx, MSR_APIC_TPR);
+	error += guest_msr_rw(vmx, MSR_APIC_EOI);
+	error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
+
+	return (error);
+}
+
+u_long
+vmx_fix_cr0(u_long cr0)
+{
+
+	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
+}
+
+u_long
+vmx_fix_cr4(u_long cr4)
+{
+
+	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
+}
+
+static void
+vpid_free(int vpid)
+{
+	if (vpid < 0 || vpid > 0xffff)
+		panic("vpid_free: invalid vpid %d", vpid);
+
+	/*
+	 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
+	 * the unit number allocator.
+	 */
+
+	if (vpid > VM_MAXCPU)
+		free_unr(vpid_unr, vpid);
+}
+
+static void
+vpid_alloc(uint16_t *vpid, int num)
+{
+	int i, x;
+
+	if (num <= 0 || num > VM_MAXCPU)
+		panic("invalid number of vpids requested: %d", num);
+
+	/*
+	 * If the "enable vpid" execution control is not enabled then the
+	 * VPID is required to be 0 for all vcpus.
+	 */
+	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
+		for (i = 0; i < num; i++)
+			vpid[i] = 0;
+		return;
+	}
+
+	/*
+	 * Allocate a unique VPID for each vcpu from the unit number allocator.
+	 */
+	for (i = 0; i < num; i++) {
+		x = alloc_unr(vpid_unr);
+		if (x == -1)
+			break;
+		else
+			vpid[i] = x;
+	}
+
+	if (i < num) {
+		atomic_add_int(&vpid_alloc_failed, 1);
+
+		/*
+		 * If the unit number allocator does not have enough unique
+		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
+		 *
+		 * These VPIDs are not be unique across VMs but this does not
+		 * affect correctness because the combined mappings are also
+		 * tagged with the EP4TA which is unique for each VM.
+		 *
+		 * It is still sub-optimal because the invvpid will invalidate
+		 * combined mappings for a particular VPID across all EP4TAs.
+		 */
+		while (i-- > 0)
+			vpid_free(vpid[i]);
+
+		for (i = 0; i < num; i++)
+			vpid[i] = i + 1;
+	}
+}
+
+static void
+vpid_init(void)
+{
+	/*
+	 * VPID 0 is required when the "enable VPID" execution control is
+	 * disabled.
+	 *
+	 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
+	 * unit number allocator does not have sufficient unique VPIDs to
+	 * satisfy the allocation.
+	 *
+	 * The remaining VPIDs are managed by the unit number allocator.
+	 */
+	vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
+}
+
+static void
+vmx_disable(void *arg __unused)
+{
+	struct invvpid_desc invvpid_desc = { 0 };
+	struct invept_desc invept_desc = { 0 };
+
+	if (vmxon_enabled[curcpu]) {
+		/*
+		 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
+		 *
+		 * VMXON or VMXOFF are not required to invalidate any TLB
+		 * caching structures. This prevents potential retention of
+		 * cached information in the TLB between distinct VMX episodes.
+		 */
+		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
+		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
+		vmxoff();
+	}
+	load_cr4(rcr4() & ~CR4_VMXE);
+}
+
+static int
+vmx_cleanup(void)
+{
+	
+	if (pirvec != 0)
+		vmm_ipi_free(pirvec);
+
+	if (vpid_unr != NULL) {
+		delete_unrhdr(vpid_unr);
+		vpid_unr = NULL;
+	}
+
+	smp_rendezvous(NULL, vmx_disable, NULL, NULL);
+
+	return (0);
+}
+
+static void
+vmx_enable(void *arg __unused)
+{
+	int error;
+	uint64_t feature_control;
+
+	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
+	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
+	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
+		wrmsr(MSR_IA32_FEATURE_CONTROL,
+		    feature_control | IA32_FEATURE_CONTROL_VMX_EN |
+		    IA32_FEATURE_CONTROL_LOCK);
+	}
+
+	load_cr4(rcr4() | CR4_VMXE);
+
+	*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
+	error = vmxon(vmxon_region[curcpu]);
+	if (error == 0)
+		vmxon_enabled[curcpu] = 1;
+}
+
+static void
+vmx_restore(void)
+{
+
+	if (vmxon_enabled[curcpu])
+		vmxon(vmxon_region[curcpu]);
+}
+
+static int
+vmx_init(int ipinum)
+{
+	int error, use_tpr_shadow;
+	uint64_t basic, fixed0, fixed1, feature_control;
+	uint32_t tmp, procbased2_vid_bits;
+
+	/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
+	if (!(cpu_feature2 & CPUID2_VMX)) {
+		printf("vmx_init: processor does not support VMX operation\n");
+		return (ENXIO);
+	}
+
+	/*
+	 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
+	 * are set (bits 0 and 2 respectively).
+	 */
+	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
+	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
+	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
+		printf("vmx_init: VMX operation disabled by BIOS\n");
+		return (ENXIO);
+	}
+
+	/*
+	 * Verify capabilities MSR_VMX_BASIC:
+	 * - bit 54 indicates support for INS/OUTS decoding
+	 */
+	basic = rdmsr(MSR_VMX_BASIC);
+	if ((basic & (1UL << 54)) == 0) {
+		printf("vmx_init: processor does not support desired basic "
+		    "capabilities\n");
+		return (EINVAL);
+	}
+
+	/* Check support for primary processor-based VM-execution controls */
+	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
+			       MSR_VMX_TRUE_PROCBASED_CTLS,
+			       PROCBASED_CTLS_ONE_SETTING,
+			       PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
+	if (error) {
+		printf("vmx_init: processor does not support desired primary "
+		       "processor-based controls\n");
+		return (error);
+	}
+
+	/* Clear the processor-based ctl bits that are set on demand */
+	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
+
+	/* Check support for secondary processor-based VM-execution controls */
+	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
+			       MSR_VMX_PROCBASED_CTLS2,
+			       PROCBASED_CTLS2_ONE_SETTING,
+			       PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
+	if (error) {
+		printf("vmx_init: processor does not support desired secondary "
+		       "processor-based controls\n");
+		return (error);
+	}
+
+	/* Check support for VPID */
+	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
+			       PROCBASED2_ENABLE_VPID, 0, &tmp);
+	if (error == 0)
+		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
+
+	/* Check support for pin-based VM-execution controls */
+	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
+			       MSR_VMX_TRUE_PINBASED_CTLS,
+			       PINBASED_CTLS_ONE_SETTING,
+			       PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
+	if (error) {
+		printf("vmx_init: processor does not support desired "
+		       "pin-based controls\n");
+		return (error);
+	}
+
+	/* Check support for VM-exit controls */
+	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
+			       VM_EXIT_CTLS_ONE_SETTING,
+			       VM_EXIT_CTLS_ZERO_SETTING,
+			       &exit_ctls);
+	if (error) {
+		printf("vmx_init: processor does not support desired "
+		    "exit controls\n");
+		return (error);
+	}
+
+	/* Check support for VM-entry controls */
+	error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
+	    VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
+	    &entry_ctls);
+	if (error) {
+		printf("vmx_init: processor does not support desired "
+		    "entry controls\n");
+		return (error);
+	}
+
+	/*
+	 * Check support for optional features by testing them
+	 * as individual bits
+	 */
+	cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
+					MSR_VMX_TRUE_PROCBASED_CTLS,
+					PROCBASED_HLT_EXITING, 0,
+					&tmp) == 0);
+
+	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
+					MSR_VMX_PROCBASED_CTLS,
+					PROCBASED_MTF, 0,
+					&tmp) == 0);
+
+	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
+					 MSR_VMX_TRUE_PROCBASED_CTLS,
+					 PROCBASED_PAUSE_EXITING, 0,
+					 &tmp) == 0);
+
+	cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
+					MSR_VMX_PROCBASED_CTLS2,
+					PROCBASED2_UNRESTRICTED_GUEST, 0,
+				        &tmp) == 0);
+
+	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
+	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
+	    &tmp) == 0);
+
+	/*
+	 * Check support for virtual interrupt delivery.
+	 */
+	procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
+	    PROCBASED2_VIRTUALIZE_X2APIC_MODE |
+	    PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
+	    PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
+
+	use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
+	    MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
+	    &tmp) == 0);
+
+	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
+	    procbased2_vid_bits, 0, &tmp);
+	if (error == 0 && use_tpr_shadow) {
+		virtual_interrupt_delivery = 1;
+		TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
+		    &virtual_interrupt_delivery);
+	}
+
+	if (virtual_interrupt_delivery) {
+		procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
+		procbased_ctls2 |= procbased2_vid_bits;
+		procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
+
+		/*
+		 * No need to emulate accesses to %CR8 if virtual
+		 * interrupt delivery is enabled.
+		 */
+		procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
+		procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
+
+		/*
+		 * Check for Posted Interrupts only if Virtual Interrupt
+		 * Delivery is enabled.
+		 */
+		error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
+		    MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
+		    &tmp);
+		if (error == 0) {
+			pirvec = vmm_ipi_alloc();
+			if (pirvec == 0) {
+				if (bootverbose) {
+					printf("vmx_init: unable to allocate "
+					    "posted interrupt vector\n");
+				}
+			} else {
+				posted_interrupts = 1;
+				TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
+				    &posted_interrupts);
+			}
+		}
+	}
+
+	if (posted_interrupts)
+		    pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
+
+	/* Initialize EPT */
+	error = ept_init(ipinum);
+	if (error) {
+		printf("vmx_init: ept initialization failed (%d)\n", error);
+		return (error);
+	}
+
+	/*
+	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
+	 */
+	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
+	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
+	cr0_ones_mask = fixed0 & fixed1;
+	cr0_zeros_mask = ~fixed0 & ~fixed1;
+
+	/*
+	 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
+	 * if unrestricted guest execution is allowed.
+	 */
+	if (cap_unrestricted_guest)
+		cr0_ones_mask &= ~(CR0_PG | CR0_PE);
+
+	/*
+	 * Do not allow the guest to set CR0_NW or CR0_CD.
+	 */
+	cr0_zeros_mask |= (CR0_NW | CR0_CD);
+
+	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
+	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
+	cr4_ones_mask = fixed0 & fixed1;
+	cr4_zeros_mask = ~fixed0 & ~fixed1;
+
+	vpid_init();
+
+	vmx_msr_init();
+
+	/* enable VMX operation */
+	smp_rendezvous(NULL, vmx_enable, NULL, NULL);
+
+	vmx_initialized = 1;
+
+	return (0);
+}
+
+static void
+vmx_trigger_hostintr(int vector)
+{
+	uintptr_t func;
+	struct gate_descriptor *gd;
+
+	gd = &idt[vector];
+
+	KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
+	    "invalid vector %d", vector));
+	KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
+	    vector));
+	KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
+	    "has invalid type %d", vector, gd->gd_type));
+	KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
+	    "has invalid dpl %d", vector, gd->gd_dpl));
+	KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
+	    "for vector %d has invalid selector %d", vector, gd->gd_selector));
+	KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
+	    "IST %d", vector, gd->gd_ist));
+
+	func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
+	vmx_call_isr(func);
+}
+
+static int
+vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
+{
+	int error, mask_ident, shadow_ident;
+	uint64_t mask_value;
+
+	if (which != 0 && which != 4)
+		panic("vmx_setup_cr_shadow: unknown cr%d", which);
+
+	if (which == 0) {
+		mask_ident = VMCS_CR0_MASK;
+		mask_value = cr0_ones_mask | cr0_zeros_mask;
+		shadow_ident = VMCS_CR0_SHADOW;
+	} else {
+		mask_ident = VMCS_CR4_MASK;
+		mask_value = cr4_ones_mask | cr4_zeros_mask;
+		shadow_ident = VMCS_CR4_SHADOW;
+	}
+
+	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
+	if (error)
+		return (error);
+
+	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
+	if (error)
+		return (error);
+
+	return (0);
+}
+#define	vmx_setup_cr0_shadow(vmcs,init)	vmx_setup_cr_shadow(0, (vmcs), (init))
+#define	vmx_setup_cr4_shadow(vmcs,init)	vmx_setup_cr_shadow(4, (vmcs), (init))
+
+static void *
+vmx_vminit(struct vm *vm, pmap_t pmap)
+{
+	uint16_t vpid[VM_MAXCPU];
+	int i, error;
+	struct vmx *vmx;
+	struct vmcs *vmcs;
+	uint32_t exc_bitmap;
+
+	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
+	if ((uintptr_t)vmx & PAGE_MASK) {
+		panic("malloc of struct vmx not aligned on %d byte boundary",
+		      PAGE_SIZE);
+	}
+	vmx->vm = vm;
+
+	vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
+
+	/*
+	 * Clean up EPTP-tagged guest physical and combined mappings
+	 *
+	 * VMX transitions are not required to invalidate any guest physical
+	 * mappings. So, it may be possible for stale guest physical mappings
+	 * to be present in the processor TLBs.
+	 *
+	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
+	 */
+	ept_invalidate_mappings(vmx->eptp);
+
+	msr_bitmap_initialize(vmx->msr_bitmap);
+
+	/*
+	 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
+	 * The guest FSBASE and GSBASE are saved and restored during
+	 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
+	 * always restored from the vmcs host state area on vm-exit.
+	 *
+	 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
+	 * how they are saved/restored so can be directly accessed by the
+	 * guest.
+	 *
+	 * MSR_EFER is saved and restored in the guest VMCS area on a
+	 * VM exit and entry respectively. It is also restored from the
+	 * host VMCS area on a VM exit.
+	 *
+	 * The TSC MSR is exposed read-only. Writes are disallowed as
+	 * that will impact the host TSC.  If the guest does a write
+	 * the "use TSC offsetting" execution control is enabled and the
+	 * difference between the host TSC and the guest TSC is written
+	 * into the TSC offset in the VMCS.
+	 */
+	if (guest_msr_rw(vmx, MSR_GSBASE) ||
+	    guest_msr_rw(vmx, MSR_FSBASE) ||
+	    guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
+	    guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
+	    guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
+	    guest_msr_rw(vmx, MSR_EFER) ||
+	    guest_msr_ro(vmx, MSR_TSC))
+		panic("vmx_vminit: error setting guest msr access");
+
+	vpid_alloc(vpid, VM_MAXCPU);
+
+	if (virtual_interrupt_delivery) {
+		error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
+		    APIC_ACCESS_ADDRESS);
+		/* XXX this should really return an error to the caller */
+		KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
+	}
+
+	for (i = 0; i < VM_MAXCPU; i++) {
+		vmcs = &vmx->vmcs[i];
+		vmcs->identifier = vmx_revision();
+		error = vmclear(vmcs);
+		if (error != 0) {
+			panic("vmx_vminit: vmclear error %d on vcpu %d\n",
+			      error, i);
+		}
+
+		vmx_msr_guest_init(vmx, i);
+
+		error = vmcs_init(vmcs);
+		KASSERT(error == 0, ("vmcs_init error %d", error));
+
+		VMPTRLD(vmcs);
+		error = 0;
+		error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
+		error += vmwrite(VMCS_EPTP, vmx->eptp);
+		error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
+		error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
+		error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
+		error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
+		error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
+		error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
+		error += vmwrite(VMCS_VPID, vpid[i]);
+
+		/* exception bitmap */
+		if (vcpu_trace_exceptions(vm, i))
+			exc_bitmap = 0xffffffff;
+		else
+			exc_bitmap = 1 << IDT_MC;
+		error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
+
+		if (virtual_interrupt_delivery) {
+			error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
+			error += vmwrite(VMCS_VIRTUAL_APIC,
+			    vtophys(&vmx->apic_page[i]));
+			error += vmwrite(VMCS_EOI_EXIT0, 0);
+			error += vmwrite(VMCS_EOI_EXIT1, 0);
+			error += vmwrite(VMCS_EOI_EXIT2, 0);
+			error += vmwrite(VMCS_EOI_EXIT3, 0);
+		}
+		if (posted_interrupts) {
+			error += vmwrite(VMCS_PIR_VECTOR, pirvec);
+			error += vmwrite(VMCS_PIR_DESC,
+			    vtophys(&vmx->pir_desc[i]));
+		}
+		VMCLEAR(vmcs);
+		KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
+
+		vmx->cap[i].set = 0;
+		vmx->cap[i].proc_ctls = procbased_ctls;
+		vmx->cap[i].proc_ctls2 = procbased_ctls2;
+
+		vmx->state[i].nextrip = ~0;
+		vmx->state[i].lastcpu = NOCPU;
+		vmx->state[i].vpid = vpid[i];
+
+		/*
+		 * Set up the CR0/4 shadows, and init the read shadow
+		 * to the power-on register value from the Intel Sys Arch.
+		 *  CR0 - 0x60000010
+		 *  CR4 - 0
+		 */
+		error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
+		if (error != 0)
+			panic("vmx_setup_cr0_shadow %d", error);
+
+		error = vmx_setup_cr4_shadow(vmcs, 0);
+		if (error != 0)
+			panic("vmx_setup_cr4_shadow %d", error);
+
+		vmx->ctx[i].pmap = pmap;
+	}
+
+	return (vmx);
+}
+
+static int
+vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
+{
+	int handled, func;
+	
+	func = vmxctx->guest_rax;
+
+	handled = x86_emulate_cpuid(vm, vcpu,
+				    (uint32_t*)(&vmxctx->guest_rax),
+				    (uint32_t*)(&vmxctx->guest_rbx),
+				    (uint32_t*)(&vmxctx->guest_rcx),
+				    (uint32_t*)(&vmxctx->guest_rdx));
+	return (handled);
+}
+
+static __inline void
+vmx_run_trace(struct vmx *vmx, int vcpu)
+{
+#ifdef KTR
+	VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
+#endif
+}
+
+static __inline void
+vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
+	       int handled)
+{
+#ifdef KTR
+	VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
+		 handled ? "handled" : "unhandled",
+		 exit_reason_to_str(exit_reason), rip);
+#endif
+}
+
+static __inline void
+vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
+{
+#ifdef KTR
+	VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
+#endif
+}
+
+static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
+static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
+
+/*
+ * Invalidate guest mappings identified by its vpid from the TLB.
+ */
+static __inline void
+vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
+{
+	struct vmxstate *vmxstate;
+	struct invvpid_desc invvpid_desc;
+
+	vmxstate = &vmx->state[vcpu];
+	if (vmxstate->vpid == 0)
+		return;
+
+	if (!running) {
+		/*
+		 * Set the 'lastcpu' to an invalid host cpu.
+		 *
+		 * This will invalidate TLB entries tagged with the vcpu's
+		 * vpid the next time it runs via vmx_set_pcpu_defaults().
+		 */
+		vmxstate->lastcpu = NOCPU;
+		return;
+	}
+
+	KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
+	    "critical section", __func__, vcpu));
+
+	/*
+	 * Invalidate all mappings tagged with 'vpid'
+	 *
+	 * We do this because this vcpu was executing on a different host
+	 * cpu when it last ran. We do not track whether it invalidated
+	 * mappings associated with its 'vpid' during that run. So we must
+	 * assume that the mappings associated with 'vpid' on 'curcpu' are
+	 * stale and invalidate them.
+	 *
+	 * Note that we incur this penalty only when the scheduler chooses to
+	 * move the thread associated with this vcpu between host cpus.
+	 *
+	 * Note also that this will invalidate mappings tagged with 'vpid'
+	 * for "all" EP4TAs.
+	 */
+	if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
+		invvpid_desc._res1 = 0;
+		invvpid_desc._res2 = 0;
+		invvpid_desc.vpid = vmxstate->vpid;
+		invvpid_desc.linear_addr = 0;
+		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
+		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
+	} else {
+		/*
+		 * The invvpid can be skipped if an invept is going to
+		 * be performed before entering the guest. The invept
+		 * will invalidate combined mappings tagged with
+		 * 'vmx->eptp' for all vpids.
+		 */
+		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
+	}
+}
+
+static void
+vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
+{
+	struct vmxstate *vmxstate;
+
+	vmxstate = &vmx->state[vcpu];
+	if (vmxstate->lastcpu == curcpu)
+		return;
+
+	vmxstate->lastcpu = curcpu;
+
+	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
+
+	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
+	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
+	vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
+	vmx_invvpid(vmx, vcpu, pmap, 1);
+}
+
+/*
+ * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
+ */
+CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
+
+static void __inline
+vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
+{
+
+	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
+		vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
+		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
+		VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
+	}
+}
+
+static void __inline
+vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
+{
+
+	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
+	    ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
+	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
+	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
+	VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
+}
+
+static void __inline
+vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
+{
+
+	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
+		vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
+		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
+		VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
+	}
+}
+
+static void __inline
+vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
+{
+
+	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
+	    ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
+	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
+	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
+	VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
+}
+
+int
+vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
+{
+	int error;
+
+	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
+		vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET;
+		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
+		VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting");
+	}
+
+	error = vmwrite(VMCS_TSC_OFFSET, offset);
+
+	return (error);
+}
+
+#define	NMI_BLOCKING	(VMCS_INTERRUPTIBILITY_NMI_BLOCKING |		\
+			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
+#define	HWINTR_BLOCKING	(VMCS_INTERRUPTIBILITY_STI_BLOCKING |		\
+			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
+
+static void
+vmx_inject_nmi(struct vmx *vmx, int vcpu)
+{
+	uint32_t gi, info;
+
+	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+	KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
+	    "interruptibility-state %#x", gi));
+
+	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
+	KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
+	    "VM-entry interruption information %#x", info));
+
+	/*
+	 * Inject the virtual NMI. The vector must be the NMI IDT entry
+	 * or the VMCS entry check will fail.
+	 */
+	info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
+	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
+
+	VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
+
+	/* Clear the request */
+	vm_nmi_clear(vmx->vm, vcpu);
+}
+
+static void
+vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
+    uint64_t guestrip)
+{
+	int vector, need_nmi_exiting, extint_pending;
+	uint64_t rflags, entryinfo;
+	uint32_t gi, info;
+
+	if (vmx->state[vcpu].nextrip != guestrip) {
+		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+		if (gi & HWINTR_BLOCKING) {
+			VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
+			    "cleared due to rip change: %#lx/%#lx",
+			    vmx->state[vcpu].nextrip, guestrip);
+			gi &= ~HWINTR_BLOCKING;
+			vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
+		}
+	}
+
+	if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
+		KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
+		    "intinfo is not valid: %#lx", __func__, entryinfo));
+
+		info = vmcs_read(VMCS_ENTRY_INTR_INFO);
+		KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
+		     "pending exception: %#lx/%#x", __func__, entryinfo, info));
+
+		info = entryinfo;
+		vector = info & 0xff;
+		if (vector == IDT_BP || vector == IDT_OF) {
+			/*
+			 * VT-x requires #BP and #OF to be injected as software
+			 * exceptions.
+			 */
+			info &= ~VMCS_INTR_T_MASK;
+			info |= VMCS_INTR_T_SWEXCEPTION;
+		}
+
+		if (info & VMCS_INTR_DEL_ERRCODE)
+			vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
+
+		vmcs_write(VMCS_ENTRY_INTR_INFO, info);
+	}
+
+	if (vm_nmi_pending(vmx->vm, vcpu)) {
+		/*
+		 * If there are no conditions blocking NMI injection then
+		 * inject it directly here otherwise enable "NMI window
+		 * exiting" to inject it as soon as we can.
+		 *
+		 * We also check for STI_BLOCKING because some implementations
+		 * don't allow NMI injection in this case. If we are running
+		 * on a processor that doesn't have this restriction it will
+		 * immediately exit and the NMI will be injected in the
+		 * "NMI window exiting" handler.
+		 */
+		need_nmi_exiting = 1;
+		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+		if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
+			info = vmcs_read(VMCS_ENTRY_INTR_INFO);
+			if ((info & VMCS_INTR_VALID) == 0) {
+				vmx_inject_nmi(vmx, vcpu);
+				need_nmi_exiting = 0;
+			} else {
+				VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
+				    "due to VM-entry intr info %#x", info);
+			}
+		} else {
+			VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
+			    "Guest Interruptibility-state %#x", gi);
+		}
+
+		if (need_nmi_exiting)
+			vmx_set_nmi_window_exiting(vmx, vcpu);
+	}
+
+	extint_pending = vm_extint_pending(vmx->vm, vcpu);
+
+	if (!extint_pending && virtual_interrupt_delivery) {
+		vmx_inject_pir(vlapic);
+		return;
+	}
+
+	/*
+	 * If interrupt-window exiting is already in effect then don't bother
+	 * checking for pending interrupts. This is just an optimization and
+	 * not needed for correctness.
+	 */
+	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
+		VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
+		    "pending int_window_exiting");
+		return;
+	}
+
+	if (!extint_pending) {
+		/* Ask the local apic for a vector to inject */
+		if (!vlapic_pending_intr(vlapic, &vector))
+			return;
+
+		/*
+		 * From the Intel SDM, Volume 3, Section "Maskable
+		 * Hardware Interrupts":
+		 * - maskable interrupt vectors [16,255] can be delivered
+		 *   through the local APIC.
+		*/
+		KASSERT(vector >= 16 && vector <= 255,
+		    ("invalid vector %d from local APIC", vector));
+	} else {
+		/* Ask the legacy pic for a vector to inject */
+		vatpic_pending_intr(vmx->vm, &vector);
+
+		/*
+		 * From the Intel SDM, Volume 3, Section "Maskable
+		 * Hardware Interrupts":
+		 * - maskable interrupt vectors [0,255] can be delivered
+		 *   through the INTR pin.
+		 */
+		KASSERT(vector >= 0 && vector <= 255,
+		    ("invalid vector %d from INTR", vector));
+	}
+
+	/* Check RFLAGS.IF and the interruptibility state of the guest */
+	rflags = vmcs_read(VMCS_GUEST_RFLAGS);
+	if ((rflags & PSL_I) == 0) {
+		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
+		    "rflags %#lx", vector, rflags);
+		goto cantinject;
+	}
+
+	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+	if (gi & HWINTR_BLOCKING) {
+		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
+		    "Guest Interruptibility-state %#x", vector, gi);
+		goto cantinject;
+	}
+
+	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
+	if (info & VMCS_INTR_VALID) {
+		/*
+		 * This is expected and could happen for multiple reasons:
+		 * - A vectoring VM-entry was aborted due to astpending
+		 * - A VM-exit happened during event injection.
+		 * - An exception was injected above.
+		 * - An NMI was injected above or after "NMI window exiting"
+		 */
+		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
+		    "VM-entry intr info %#x", vector, info);
+		goto cantinject;
+	}
+
+	/* Inject the interrupt */
+	info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
+	info |= vector;
+	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
+
+	if (!extint_pending) {
+		/* Update the Local APIC ISR */
+		vlapic_intr_accepted(vlapic, vector);
+	} else {
+		vm_extint_clear(vmx->vm, vcpu);
+		vatpic_intr_accepted(vmx->vm, vector);
+
+		/*
+		 * After we accepted the current ExtINT the PIC may
+		 * have posted another one.  If that is the case, set
+		 * the Interrupt Window Exiting execution control so
+		 * we can inject that one too.
+		 *
+		 * Also, interrupt window exiting allows us to inject any
+		 * pending APIC vector that was preempted by the ExtINT
+		 * as soon as possible. This applies both for the software
+		 * emulated vlapic and the hardware assisted virtual APIC.
+		 */
+		vmx_set_int_window_exiting(vmx, vcpu);
+	}
+
+	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
+
+	return;
+
+cantinject:
+	/*
+	 * Set the Interrupt Window Exiting execution control so we can inject
+	 * the interrupt as soon as blocking condition goes away.
+	 */
+	vmx_set_int_window_exiting(vmx, vcpu);
+}
+
+/*
+ * If the Virtual NMIs execution control is '1' then the logical processor
+ * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
+ * the VMCS. An IRET instruction in VMX non-root operation will remove any
+ * virtual-NMI blocking.
+ *
+ * This unblocking occurs even if the IRET causes a fault. In this case the
+ * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
+ */
+static void
+vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
+{
+	uint32_t gi;
+
+	VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
+	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+	gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
+	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
+}
+
+static void
+vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
+{
+	uint32_t gi;
+
+	VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
+	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+	gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
+	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
+}
+
+static void
+vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
+{
+	uint32_t gi;
+
+	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+	KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
+	    ("NMI blocking is not in effect %#x", gi));
+}
+
+static int
+vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
+{
+	struct vmxctx *vmxctx;
+	uint64_t xcrval;
+	const struct xsave_limits *limits;
+
+	vmxctx = &vmx->ctx[vcpu];
+	limits = vmm_get_xsave_limits();
+
+	/*
+	 * Note that the processor raises a GP# fault on its own if
+	 * xsetbv is executed for CPL != 0, so we do not have to
+	 * emulate that fault here.
+	 */
+
+	/* Only xcr0 is supported. */
+	if (vmxctx->guest_rcx != 0) {
+		vm_inject_gp(vmx->vm, vcpu);
+		return (HANDLED);
+	}
+
+	/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
+	if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
+		vm_inject_ud(vmx->vm, vcpu);
+		return (HANDLED);
+	}
+
+	xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
+	if ((xcrval & ~limits->xcr0_allowed) != 0) {
+		vm_inject_gp(vmx->vm, vcpu);
+		return (HANDLED);
+	}
+
+	if (!(xcrval & XFEATURE_ENABLED_X87)) {
+		vm_inject_gp(vmx->vm, vcpu);
+		return (HANDLED);
+	}
+
+	/* AVX (YMM_Hi128) requires SSE. */
+	if (xcrval & XFEATURE_ENABLED_AVX &&
+	    (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
+		vm_inject_gp(vmx->vm, vcpu);
+		return (HANDLED);
+	}
+
+	/*
+	 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
+	 * ZMM_Hi256, and Hi16_ZMM.
+	 */
+	if (xcrval & XFEATURE_AVX512 &&
+	    (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
+	    (XFEATURE_AVX512 | XFEATURE_AVX)) {
+		vm_inject_gp(vmx->vm, vcpu);
+		return (HANDLED);
+	}
+
+	/*
+	 * Intel MPX requires both bound register state flags to be
+	 * set.
+	 */
+	if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
+	    ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
+		vm_inject_gp(vmx->vm, vcpu);
+		return (HANDLED);
+	}
+
+	/*
+	 * This runs "inside" vmrun() with the guest's FPU state, so
+	 * modifying xcr0 directly modifies the guest's xcr0, not the
+	 * host's.
+	 */
+	load_xcr(0, xcrval);
+	return (HANDLED);
+}
+
+static uint64_t
+vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
+{
+	const struct vmxctx *vmxctx;
+
+	vmxctx = &vmx->ctx[vcpu];
+
+	switch (ident) {
+	case 0:
+		return (vmxctx->guest_rax);
+	case 1:
+		return (vmxctx->guest_rcx);
+	case 2:
+		return (vmxctx->guest_rdx);
+	case 3:
+		return (vmxctx->guest_rbx);
+	case 4:
+		return (vmcs_read(VMCS_GUEST_RSP));
+	case 5:
+		return (vmxctx->guest_rbp);
+	case 6:
+		return (vmxctx->guest_rsi);
+	case 7:
+		return (vmxctx->guest_rdi);
+	case 8:
+		return (vmxctx->guest_r8);
+	case 9:
+		return (vmxctx->guest_r9);
+	case 10:
+		return (vmxctx->guest_r10);
+	case 11:
+		return (vmxctx->guest_r11);
+	case 12:
+		return (vmxctx->guest_r12);
+	case 13:
+		return (vmxctx->guest_r13);
+	case 14:
+		return (vmxctx->guest_r14);
+	case 15:
+		return (vmxctx->guest_r15);
+	default:
+		panic("invalid vmx register %d", ident);
+	}
+}
+
+static void
+vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
+{
+	struct vmxctx *vmxctx;
+
+	vmxctx = &vmx->ctx[vcpu];
+
+	switch (ident) {
+	case 0:
+		vmxctx->guest_rax = regval;
+		break;
+	case 1:
+		vmxctx->guest_rcx = regval;
+		break;
+	case 2:
+		vmxctx->guest_rdx = regval;
+		break;
+	case 3:
+		vmxctx->guest_rbx = regval;
+		break;
+	case 4:
+		vmcs_write(VMCS_GUEST_RSP, regval);
+		break;
+	case 5:
+		vmxctx->guest_rbp = regval;
+		break;
+	case 6:
+		vmxctx->guest_rsi = regval;
+		break;
+	case 7:
+		vmxctx->guest_rdi = regval;
+		break;
+	case 8:
+		vmxctx->guest_r8 = regval;
+		break;
+	case 9:
+		vmxctx->guest_r9 = regval;
+		break;
+	case 10:
+		vmxctx->guest_r10 = regval;
+		break;
+	case 11:
+		vmxctx->guest_r11 = regval;
+		break;
+	case 12:
+		vmxctx->guest_r12 = regval;
+		break;
+	case 13:
+		vmxctx->guest_r13 = regval;
+		break;
+	case 14:
+		vmxctx->guest_r14 = regval;
+		break;
+	case 15:
+		vmxctx->guest_r15 = regval;
+		break;
+	default:
+		panic("invalid vmx register %d", ident);
+	}
+}
+
+static int
+vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+	uint64_t crval, regval;
+
+	/* We only handle mov to %cr0 at this time */
+	if ((exitqual & 0xf0) != 0x00)
+		return (UNHANDLED);
+
+	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+
+	vmcs_write(VMCS_CR0_SHADOW, regval);
+
+	crval = regval | cr0_ones_mask;
+	crval &= ~cr0_zeros_mask;
+	vmcs_write(VMCS_GUEST_CR0, crval);
+
+	if (regval & CR0_PG) {
+		uint64_t efer, entry_ctls;
+
+		/*
+		 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
+		 * the "IA-32e mode guest" bit in VM-entry control must be
+		 * equal.
+		 */
+		efer = vmcs_read(VMCS_GUEST_IA32_EFER);
+		if (efer & EFER_LME) {
+			efer |= EFER_LMA;
+			vmcs_write(VMCS_GUEST_IA32_EFER, efer);
+			entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
+			entry_ctls |= VM_ENTRY_GUEST_LMA;
+			vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
+		}
+	}
+
+	return (HANDLED);
+}
+
+static int
+vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+	uint64_t crval, regval;
+
+	/* We only handle mov to %cr4 at this time */
+	if ((exitqual & 0xf0) != 0x00)
+		return (UNHANDLED);
+
+	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+
+	vmcs_write(VMCS_CR4_SHADOW, regval);
+
+	crval = regval | cr4_ones_mask;
+	crval &= ~cr4_zeros_mask;
+	vmcs_write(VMCS_GUEST_CR4, crval);
+
+	return (HANDLED);
+}
+
+static int
+vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+	struct vlapic *vlapic;
+	uint64_t cr8;
+	int regnum;
+
+	/* We only handle mov %cr8 to/from a register at this time. */
+	if ((exitqual & 0xe0) != 0x00) {
+		return (UNHANDLED);
+	}
+
+	vlapic = vm_lapic(vmx->vm, vcpu);
+	regnum = (exitqual >> 8) & 0xf;
+	if (exitqual & 0x10) {
+		cr8 = vlapic_get_cr8(vlapic);
+		vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
+	} else {
+		cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
+		vlapic_set_cr8(vlapic, cr8);
+	}
+
+	return (HANDLED);
+}
+
+/*
+ * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
+ */
+static int
+vmx_cpl(void)
+{
+	uint32_t ssar;
+
+	ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
+	return ((ssar >> 5) & 0x3);
+}
+
+static enum vm_cpu_mode
+vmx_cpu_mode(void)
+{
+	uint32_t csar;
+
+	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
+		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
+		if (csar & 0x2000)
+			return (CPU_MODE_64BIT);	/* CS.L = 1 */
+		else
+			return (CPU_MODE_COMPATIBILITY);
+	} else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
+		return (CPU_MODE_PROTECTED);
+	} else {
+		return (CPU_MODE_REAL);
+	}
+}
+
+static enum vm_paging_mode
+vmx_paging_mode(void)
+{
+
+	if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
+		return (PAGING_MODE_FLAT);
+	if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
+		return (PAGING_MODE_32);
+	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
+		return (PAGING_MODE_64);
+	else
+		return (PAGING_MODE_PAE);
+}
+
+static uint64_t
+inout_str_index(struct vmx *vmx, int vcpuid, int in)
+{
+	uint64_t val;
+	int error;
+	enum vm_reg_name reg;
+
+	reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
+	error = vmx_getreg(vmx, vcpuid, reg, &val);
+	KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
+	return (val);
+}
+
+static uint64_t
+inout_str_count(struct vmx *vmx, int vcpuid, int rep)
+{
+	uint64_t val;
+	int error;
+
+	if (rep) {
+		error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
+		KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
+	} else {
+		val = 1;
+	}
+	return (val);
+}
+
+static int
+inout_str_addrsize(uint32_t inst_info)
+{
+	uint32_t size;
+
+	size = (inst_info >> 7) & 0x7;
+	switch (size) {
+	case 0:
+		return (2);	/* 16 bit */
+	case 1:
+		return (4);	/* 32 bit */
+	case 2:
+		return (8);	/* 64 bit */
+	default:
+		panic("%s: invalid size encoding %d", __func__, size);
+	}
+}
+
+static void
+inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
+    struct vm_inout_str *vis)
+{
+	int error, s;
+
+	if (in) {
+		vis->seg_name = VM_REG_GUEST_ES;
+	} else {
+		s = (inst_info >> 15) & 0x7;
+		vis->seg_name = vm_segment_name(s);
+	}
+
+	error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
+	KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
+}
+
+static void
+vmx_paging_info(struct vm_guest_paging *paging)
+{
+	paging->cr3 = vmcs_guest_cr3();
+	paging->cpl = vmx_cpl();
+	paging->cpu_mode = vmx_cpu_mode();
+	paging->paging_mode = vmx_paging_mode();
+}
+
+static void
+vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
+{
+	struct vm_guest_paging *paging;
+	uint32_t csar;
+
+	paging = &vmexit->u.inst_emul.paging;
+
+	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
+	vmexit->inst_length = 0;
+	vmexit->u.inst_emul.gpa = gpa;
+	vmexit->u.inst_emul.gla = gla;
+	vmx_paging_info(paging);
+	switch (paging->cpu_mode) {
+	case CPU_MODE_REAL:
+		vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
+		vmexit->u.inst_emul.cs_d = 0;
+		break;
+	case CPU_MODE_PROTECTED:
+	case CPU_MODE_COMPATIBILITY:
+		vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
+		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
+		vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
+		break;
+	default:
+		vmexit->u.inst_emul.cs_base = 0;
+		vmexit->u.inst_emul.cs_d = 0;
+		break;
+	}
+	vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
+}
+
+static int
+ept_fault_type(uint64_t ept_qual)
+{
+	int fault_type;
+
+	if (ept_qual & EPT_VIOLATION_DATA_WRITE)
+		fault_type = VM_PROT_WRITE;
+	else if (ept_qual & EPT_VIOLATION_INST_FETCH)
+		fault_type = VM_PROT_EXECUTE;
+	else
+		fault_type= VM_PROT_READ;
+
+	return (fault_type);
+}
+
+static boolean_t
+ept_emulation_fault(uint64_t ept_qual)
+{
+	int read, write;
+
+	/* EPT fault on an instruction fetch doesn't make sense here */
+	if (ept_qual & EPT_VIOLATION_INST_FETCH)
+		return (FALSE);
+
+	/* EPT fault must be a read fault or a write fault */
+	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
+	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
+	if ((read | write) == 0)
+		return (FALSE);
+
+	/*
+	 * The EPT violation must have been caused by accessing a
+	 * guest-physical address that is a translation of a guest-linear
+	 * address.
+	 */
+	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
+	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
+		return (FALSE);
+	}
+
+	return (TRUE);
+}
+
+static __inline int
+apic_access_virtualization(struct vmx *vmx, int vcpuid)
+{
+	uint32_t proc_ctls2;
+
+	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
+	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
+}
+
+static __inline int
+x2apic_virtualization(struct vmx *vmx, int vcpuid)
+{
+	uint32_t proc_ctls2;
+
+	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
+	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
+}
+
+static int
+vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
+    uint64_t qual)
+{
+	int error, handled, offset;
+	uint32_t *apic_regs, vector;
+	bool retu;
+
+	handled = HANDLED;
+	offset = APIC_WRITE_OFFSET(qual);
+
+	if (!apic_access_virtualization(vmx, vcpuid)) {
+		/*
+		 * In general there should not be any APIC write VM-exits
+		 * unless APIC-access virtualization is enabled.
+		 *
+		 * However self-IPI virtualization can legitimately trigger
+		 * an APIC-write VM-exit so treat it specially.
+		 */
+		if (x2apic_virtualization(vmx, vcpuid) &&
+		    offset == APIC_OFFSET_SELF_IPI) {
+			apic_regs = (uint32_t *)(vlapic->apic_page);
+			vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
+			vlapic_self_ipi_handler(vlapic, vector);
+			return (HANDLED);
+		} else
+			return (UNHANDLED);
+	}
+
+	switch (offset) {
+	case APIC_OFFSET_ID:
+		vlapic_id_write_handler(vlapic);
+		break;
+	case APIC_OFFSET_LDR:
+		vlapic_ldr_write_handler(vlapic);
+		break;
+	case APIC_OFFSET_DFR:
+		vlapic_dfr_write_handler(vlapic);
+		break;
+	case APIC_OFFSET_SVR:
+		vlapic_svr_write_handler(vlapic);
+		break;
+	case APIC_OFFSET_ESR:
+		vlapic_esr_write_handler(vlapic);
+		break;
+	case APIC_OFFSET_ICR_LOW:
+		retu = false;
+		error = vlapic_icrlo_write_handler(vlapic, &retu);
+		if (error != 0 || retu)
+			handled = UNHANDLED;
+		break;
+	case APIC_OFFSET_CMCI_LVT:
+	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
+		vlapic_lvt_write_handler(vlapic, offset);
+		break;
+	case APIC_OFFSET_TIMER_ICR:
+		vlapic_icrtmr_write_handler(vlapic);
+		break;
+	case APIC_OFFSET_TIMER_DCR:
+		vlapic_dcr_write_handler(vlapic);
+		break;
+	default:
+		handled = UNHANDLED;
+		break;
+	}
+	return (handled);
+}
+
+static bool
+apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
+{
+
+	if (apic_access_virtualization(vmx, vcpuid) &&
+	    (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
+		return (true);
+	else
+		return (false);
+}
+
+static int
+vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
+{
+	uint64_t qual;
+	int access_type, offset, allowed;
+
+	if (!apic_access_virtualization(vmx, vcpuid))
+		return (UNHANDLED);
+
+	qual = vmexit->u.vmx.exit_qualification;
+	access_type = APIC_ACCESS_TYPE(qual);
+	offset = APIC_ACCESS_OFFSET(qual);
+
+	allowed = 0;
+	if (access_type == 0) {
+		/*
+		 * Read data access to the following registers is expected.
+		 */
+		switch (offset) {
+		case APIC_OFFSET_APR:
+		case APIC_OFFSET_PPR:
+		case APIC_OFFSET_RRR:
+		case APIC_OFFSET_CMCI_LVT:
+		case APIC_OFFSET_TIMER_CCR:
+			allowed = 1;
+			break;
+		default:
+			break;
+		}
+	} else if (access_type == 1) {
+		/*
+		 * Write data access to the following registers is expected.
+		 */
+		switch (offset) {
+		case APIC_OFFSET_VER:
+		case APIC_OFFSET_APR:
+		case APIC_OFFSET_PPR:
+		case APIC_OFFSET_RRR:
+		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
+		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
+		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
+		case APIC_OFFSET_CMCI_LVT:
+		case APIC_OFFSET_TIMER_CCR:
+			allowed = 1;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (allowed) {
+		vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
+		    VIE_INVALID_GLA);
+	}
+
+	/*
+	 * Regardless of whether the APIC-access is allowed this handler
+	 * always returns UNHANDLED:
+	 * - if the access is allowed then it is handled by emulating the
+	 *   instruction that caused the VM-exit (outside the critical section)
+	 * - if the access is not allowed then it will be converted to an
+	 *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
+	 */
+	return (UNHANDLED);
+}
+
+static enum task_switch_reason
+vmx_task_switch_reason(uint64_t qual)
+{
+	int reason;
+
+	reason = (qual >> 30) & 0x3;
+	switch (reason) {
+	case 0:
+		return (TSR_CALL);
+	case 1:
+		return (TSR_IRET);
+	case 2:
+		return (TSR_JMP);
+	case 3:
+		return (TSR_IDT_GATE);
+	default:
+		panic("%s: invalid reason %d", __func__, reason);
+	}
+}
+
+static int
+emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
+{
+	int error;
+
+	if (lapic_msr(num))
+		error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
+	else
+		error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
+
+	return (error);
+}
+
+static int
+emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
+{
+	struct vmxctx *vmxctx;
+	uint64_t result;
+	uint32_t eax, edx;
+	int error;
+
+	if (lapic_msr(num))
+		error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
+	else
+		error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
+
+	if (error == 0) {
+		eax = result;
+		vmxctx = &vmx->ctx[vcpuid];
+		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
+		KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
+
+		edx = result >> 32;
+		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
+		KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
+	}
+
+	return (error);
+}
+
+static int
+vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
+{
+	int error, errcode, errcode_valid, handled, in;
+	struct vmxctx *vmxctx;
+	struct vlapic *vlapic;
+	struct vm_inout_str *vis;
+	struct vm_task_switch *ts;
+	uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
+	uint32_t intr_type, intr_vec, reason;
+	uint64_t exitintinfo, qual, gpa;
+	bool retu;
+
+	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
+	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
+
+	handled = UNHANDLED;
+	vmxctx = &vmx->ctx[vcpu];
+
+	qual = vmexit->u.vmx.exit_qualification;
+	reason = vmexit->u.vmx.exit_reason;
+	vmexit->exitcode = VM_EXITCODE_BOGUS;
+
+	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
+
+	/*
+	 * VM-entry failures during or after loading guest state.
+	 *
+	 * These VM-exits are uncommon but must be handled specially
+	 * as most VM-exit fields are not populated as usual.
+	 */
+	if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
+		VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
+		__asm __volatile("int $18");
+		return (1);
+	}
+
+	/*
+	 * VM exits that can be triggered during event delivery need to
+	 * be handled specially by re-injecting the event if the IDT
+	 * vectoring information field's valid bit is set.
+	 *
+	 * See "Information for VM Exits During Event Delivery" in Intel SDM
+	 * for details.
+	 */
+	idtvec_info = vmcs_idt_vectoring_info();
+	if (idtvec_info & VMCS_IDT_VEC_VALID) {
+		idtvec_info &= ~(1 << 12); /* clear undefined bit */
+		exitintinfo = idtvec_info;
+		if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
+			idtvec_err = vmcs_idt_vectoring_err();
+			exitintinfo |= (uint64_t)idtvec_err << 32;
+		}
+		error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
+		KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
+		    __func__, error));
+
+		/*
+		 * If 'virtual NMIs' are being used and the VM-exit
+		 * happened while injecting an NMI during the previous
+		 * VM-entry, then clear "blocking by NMI" in the
+		 * Guest Interruptibility-State so the NMI can be
+		 * reinjected on the subsequent VM-entry.
+		 *
+		 * However, if the NMI was being delivered through a task
+		 * gate, then the new task must start execution with NMIs
+		 * blocked so don't clear NMI blocking in this case.
+		 */
+		intr_type = idtvec_info & VMCS_INTR_T_MASK;
+		if (intr_type == VMCS_INTR_T_NMI) {
+			if (reason != EXIT_REASON_TASK_SWITCH)
+				vmx_clear_nmi_blocking(vmx, vcpu);
+			else
+				vmx_assert_nmi_blocking(vmx, vcpu);
+		}
+
+		/*
+		 * Update VM-entry instruction length if the event being
+		 * delivered was a software interrupt or software exception.
+		 */
+		if (intr_type == VMCS_INTR_T_SWINTR ||
+		    intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
+		    intr_type == VMCS_INTR_T_SWEXCEPTION) {
+			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
+		}
+	}
+
+	switch (reason) {
+	case EXIT_REASON_TASK_SWITCH:
+		ts = &vmexit->u.task_switch;
+		ts->tsssel = qual & 0xffff;
+		ts->reason = vmx_task_switch_reason(qual);
+		ts->ext = 0;
+		ts->errcode_valid = 0;
+		vmx_paging_info(&ts->paging);
+		/*
+		 * If the task switch was due to a CALL, JMP, IRET, software
+		 * interrupt (INT n) or software exception (INT3, INTO),
+		 * then the saved %rip references the instruction that caused
+		 * the task switch. The instruction length field in the VMCS
+		 * is valid in this case.
+		 *
+		 * In all other cases (e.g., NMI, hardware exception) the
+		 * saved %rip is one that would have been saved in the old TSS
+		 * had the task switch completed normally so the instruction
+		 * length field is not needed in this case and is explicitly
+		 * set to 0.
+		 */
+		if (ts->reason == TSR_IDT_GATE) {
+			KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
+			    ("invalid idtvec_info %#x for IDT task switch",
+			    idtvec_info));
+			intr_type = idtvec_info & VMCS_INTR_T_MASK;
+			if (intr_type != VMCS_INTR_T_SWINTR &&
+			    intr_type != VMCS_INTR_T_SWEXCEPTION &&
+			    intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
+				/* Task switch triggered by external event */
+				ts->ext = 1;
+				vmexit->inst_length = 0;
+				if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
+					ts->errcode_valid = 1;
+					ts->errcode = vmcs_idt_vectoring_err();
+				}
+			}
+		}
+		vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
+		VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
+		    "%s errcode 0x%016lx", ts->reason, ts->tsssel,
+		    ts->ext ? "external" : "internal",
+		    ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
+		break;
+	case EXIT_REASON_CR_ACCESS:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
+		switch (qual & 0xf) {
+		case 0:
+			handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
+			break;
+		case 4:
+			handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
+			break;
+		case 8:
+			handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
+			break;
+		}
+		break;
+	case EXIT_REASON_RDMSR:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
+		retu = false;
+		ecx = vmxctx->guest_rcx;
+		VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
+		error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
+		if (error) {
+			vmexit->exitcode = VM_EXITCODE_RDMSR;
+			vmexit->u.msr.code = ecx;
+		} else if (!retu) {
+			handled = HANDLED;
+		} else {
+			/* Return to userspace with a valid exitcode */
+			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
+			    ("emulate_rdmsr retu with bogus exitcode"));
+		}
+		break;
+	case EXIT_REASON_WRMSR:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
+		retu = false;
+		eax = vmxctx->guest_rax;
+		ecx = vmxctx->guest_rcx;
+		edx = vmxctx->guest_rdx;
+		VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
+		    ecx, (uint64_t)edx << 32 | eax);
+		error = emulate_wrmsr(vmx, vcpu, ecx,
+		    (uint64_t)edx << 32 | eax, &retu);
+		if (error) {
+			vmexit->exitcode = VM_EXITCODE_WRMSR;
+			vmexit->u.msr.code = ecx;
+			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
+		} else if (!retu) {
+			handled = HANDLED;
+		} else {
+			/* Return to userspace with a valid exitcode */
+			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
+			    ("emulate_wrmsr retu with bogus exitcode"));
+		}
+		break;
+	case EXIT_REASON_HLT:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
+		vmexit->exitcode = VM_EXITCODE_HLT;
+		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
+		break;
+	case EXIT_REASON_MTF:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
+		vmexit->exitcode = VM_EXITCODE_MTRAP;
+		vmexit->inst_length = 0;
+		break;
+	case EXIT_REASON_PAUSE:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
+		vmexit->exitcode = VM_EXITCODE_PAUSE;
+		break;
+	case EXIT_REASON_INTR_WINDOW:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
+		vmx_clear_int_window_exiting(vmx, vcpu);
+		return (1);
+	case EXIT_REASON_EXT_INTR:
+		/*
+		 * External interrupts serve only to cause VM exits and allow
+		 * the host interrupt handler to run.
+		 *
+		 * If this external interrupt triggers a virtual interrupt
+		 * to a VM, then that state will be recorded by the
+		 * host interrupt handler in the VM's softc. We will inject
+		 * this virtual interrupt during the subsequent VM enter.
+		 */
+		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
+
+		/*
+		 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
+		 * This appears to be a bug in VMware Fusion?
+		 */
+		if (!(intr_info & VMCS_INTR_VALID))
+			return (1);
+		KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
+		    (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
+		    ("VM exit interruption info invalid: %#x", intr_info));
+		vmx_trigger_hostintr(intr_info & 0xff);
+
+		/*
+		 * This is special. We want to treat this as an 'handled'
+		 * VM-exit but not increment the instruction pointer.
+		 */
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
+		return (1);
+	case EXIT_REASON_NMI_WINDOW:
+		/* Exit to allow the pending virtual NMI to be injected */
+		if (vm_nmi_pending(vmx->vm, vcpu))
+			vmx_inject_nmi(vmx, vcpu);
+		vmx_clear_nmi_window_exiting(vmx, vcpu);
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
+		return (1);
+	case EXIT_REASON_INOUT:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
+		vmexit->exitcode = VM_EXITCODE_INOUT;
+		vmexit->u.inout.bytes = (qual & 0x7) + 1;
+		vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
+		vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
+		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
+		vmexit->u.inout.port = (uint16_t)(qual >> 16);
+		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
+		if (vmexit->u.inout.string) {
+			inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
+			vmexit->exitcode = VM_EXITCODE_INOUT_STR;
+			vis = &vmexit->u.inout_str;
+			vmx_paging_info(&vis->paging);
+			vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
+			vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
+			vis->index = inout_str_index(vmx, vcpu, in);
+			vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
+			vis->addrsize = inout_str_addrsize(inst_info);
+			inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
+		}
+		break;
+	case EXIT_REASON_CPUID:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
+		handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
+		break;
+	case EXIT_REASON_EXCEPTION:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
+		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
+		KASSERT((intr_info & VMCS_INTR_VALID) != 0,
+		    ("VM exit interruption info invalid: %#x", intr_info));
+
+		intr_vec = intr_info & 0xff;
+		intr_type = intr_info & VMCS_INTR_T_MASK;
+
+		/*
+		 * If Virtual NMIs control is 1 and the VM-exit is due to a
+		 * fault encountered during the execution of IRET then we must
+		 * restore the state of "virtual-NMI blocking" before resuming
+		 * the guest.
+		 *
+		 * See "Resuming Guest Software after Handling an Exception".
+		 * See "Information for VM Exits Due to Vectored Events".
+		 */
+		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
+		    (intr_vec != IDT_DF) &&
+		    (intr_info & EXIT_QUAL_NMIUDTI) != 0)
+			vmx_restore_nmi_blocking(vmx, vcpu);
+
+		/*
+		 * The NMI has already been handled in vmx_exit_handle_nmi().
+		 */
+		if (intr_type == VMCS_INTR_T_NMI)
+			return (1);
+
+		/*
+		 * Call the machine check handler by hand. Also don't reflect
+		 * the machine check back into the guest.
+		 */
+		if (intr_vec == IDT_MC) {
+			VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
+			__asm __volatile("int $18");
+			return (1);
+		}
+
+		if (intr_vec == IDT_PF) {
+			error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
+			KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
+			    __func__, error));
+		}
+
+		/*
+		 * Software exceptions exhibit trap-like behavior. This in
+		 * turn requires populating the VM-entry instruction length
+		 * so that the %rip in the trap frame is past the INT3/INTO
+		 * instruction.
+		 */
+		if (intr_type == VMCS_INTR_T_SWEXCEPTION)
+			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
+
+		/* Reflect all other exceptions back into the guest */
+		errcode_valid = errcode = 0;
+		if (intr_info & VMCS_INTR_DEL_ERRCODE) {
+			errcode_valid = 1;
+			errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
+		}
+		VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
+		    "the guest", intr_vec, errcode);
+		error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
+		    errcode_valid, errcode, 0);
+		KASSERT(error == 0, ("%s: vm_inject_exception error %d",
+		    __func__, error));
+		return (1);
+
+	case EXIT_REASON_EPT_FAULT:
+		/*
+		 * If 'gpa' lies within the address space allocated to
+		 * memory then this must be a nested page fault otherwise
+		 * this must be an instruction that accesses MMIO space.
+		 */
+		gpa = vmcs_gpa();
+		if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
+		    apic_access_fault(vmx, vcpu, gpa)) {
+			vmexit->exitcode = VM_EXITCODE_PAGING;
+			vmexit->inst_length = 0;
+			vmexit->u.paging.gpa = gpa;
+			vmexit->u.paging.fault_type = ept_fault_type(qual);
+			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
+		} else if (ept_emulation_fault(qual)) {
+			vmexit_inst_emul(vmexit, gpa, vmcs_gla());
+			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
+		}
+		/*
+		 * If Virtual NMIs control is 1 and the VM-exit is due to an
+		 * EPT fault during the execution of IRET then we must restore
+		 * the state of "virtual-NMI blocking" before resuming.
+		 *
+		 * See description of "NMI unblocking due to IRET" in
+		 * "Exit Qualification for EPT Violations".
+		 */
+		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
+		    (qual & EXIT_QUAL_NMIUDTI) != 0)
+			vmx_restore_nmi_blocking(vmx, vcpu);
+		break;
+	case EXIT_REASON_VIRTUALIZED_EOI:
+		vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
+		vmexit->u.ioapic_eoi.vector = qual & 0xFF;
+		vmexit->inst_length = 0;	/* trap-like */
+		break;
+	case EXIT_REASON_APIC_ACCESS:
+		handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
+		break;
+	case EXIT_REASON_APIC_WRITE:
+		/*
+		 * APIC-write VM exit is trap-like so the %rip is already
+		 * pointing to the next instruction.
+		 */
+		vmexit->inst_length = 0;
+		vlapic = vm_lapic(vmx->vm, vcpu);
+		handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
+		break;
+	case EXIT_REASON_XSETBV:
+		handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
+		break;
+	case EXIT_REASON_MONITOR:
+		vmexit->exitcode = VM_EXITCODE_MONITOR;
+		break;
+	case EXIT_REASON_MWAIT:
+		vmexit->exitcode = VM_EXITCODE_MWAIT;
+		break;
+	default:
+		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
+		break;
+	}
+
+	if (handled) {
+		/*
+		 * It is possible that control is returned to userland
+		 * even though we were able to handle the VM exit in the
+		 * kernel.
+		 *
+		 * In such a case we want to make sure that the userland
+		 * restarts guest execution at the instruction *after*
+		 * the one we just processed. Therefore we update the
+		 * guest rip in the VMCS and in 'vmexit'.
+		 */
+		vmexit->rip += vmexit->inst_length;
+		vmexit->inst_length = 0;
+		vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
+	} else {
+		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
+			/*
+			 * If this VM exit was not claimed by anybody then
+			 * treat it as a generic VMX exit.
+			 */
+			vmexit->exitcode = VM_EXITCODE_VMX;
+			vmexit->u.vmx.status = VM_SUCCESS;
+			vmexit->u.vmx.inst_type = 0;
+			vmexit->u.vmx.inst_error = 0;
+		} else {
+			/*
+			 * The exitcode and collateral have been populated.
+			 * The VM exit will be processed further in userland.
+			 */
+		}
+	}
+	return (handled);
+}
+
+static __inline void
+vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
+{
+
+	KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
+	    ("vmx_exit_inst_error: invalid inst_fail_status %d",
+	    vmxctx->inst_fail_status));
+
+	vmexit->inst_length = 0;
+	vmexit->exitcode = VM_EXITCODE_VMX;
+	vmexit->u.vmx.status = vmxctx->inst_fail_status;
+	vmexit->u.vmx.inst_error = vmcs_instruction_error();
+	vmexit->u.vmx.exit_reason = ~0;
+	vmexit->u.vmx.exit_qualification = ~0;
+
+	switch (rc) {
+	case VMX_VMRESUME_ERROR:
+	case VMX_VMLAUNCH_ERROR:
+	case VMX_INVEPT_ERROR:
+		vmexit->u.vmx.inst_type = rc;
+		break;
+	default:
+		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
+	}
+}
+
+/*
+ * If the NMI-exiting VM execution control is set to '1' then an NMI in
+ * non-root operation causes a VM-exit. NMI blocking is in effect so it is
+ * sufficient to simply vector to the NMI handler via a software interrupt.
+ * However, this must be done before maskable interrupts are enabled
+ * otherwise the "iret" issued by an interrupt handler will incorrectly
+ * clear NMI blocking.
+ */
+static __inline void
+vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
+{
+	uint32_t intr_info;
+
+	KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
+
+	if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
+		return;
+
+	intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
+	KASSERT((intr_info & VMCS_INTR_VALID) != 0,
+	    ("VM exit interruption info invalid: %#x", intr_info));
+
+	if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
+		KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
+		    "to NMI has invalid vector: %#x", intr_info));
+		VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
+		__asm __volatile("int $2");
+	}
+}
+
+static int
+vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
+    struct vm_eventinfo *evinfo)
+{
+	int rc, handled, launched;
+	struct vmx *vmx;
+	struct vm *vm;
+	struct vmxctx *vmxctx;
+	struct vmcs *vmcs;
+	struct vm_exit *vmexit;
+	struct vlapic *vlapic;
+	uint32_t exit_reason;
+
+	vmx = arg;
+	vm = vmx->vm;
+	vmcs = &vmx->vmcs[vcpu];
+	vmxctx = &vmx->ctx[vcpu];
+	vlapic = vm_lapic(vm, vcpu);
+	vmexit = vm_exitinfo(vm, vcpu);
+	launched = 0;
+
+	KASSERT(vmxctx->pmap == pmap,
+	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
+
+	vmx_msr_guest_enter(vmx, vcpu);
+
+	VMPTRLD(vmcs);
+
+	/*
+	 * XXX
+	 * We do this every time because we may setup the virtual machine
+	 * from a different process than the one that actually runs it.
+	 *
+	 * If the life of a virtual machine was spent entirely in the context
+	 * of a single process we could do this once in vmx_vminit().
+	 */
+	vmcs_write(VMCS_HOST_CR3, rcr3());
+
+	vmcs_write(VMCS_GUEST_RIP, rip);
+	vmx_set_pcpu_defaults(vmx, vcpu, pmap);
+	do {
+		KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
+		    "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
+
+		handled = UNHANDLED;
+		/*
+		 * Interrupts are disabled from this point on until the
+		 * guest starts executing. This is done for the following
+		 * reasons:
+		 *
+		 * If an AST is asserted on this thread after the check below,
+		 * then the IPI_AST notification will not be lost, because it
+		 * will cause a VM exit due to external interrupt as soon as
+		 * the guest state is loaded.
+		 *
+		 * A posted interrupt after 'vmx_inject_interrupts()' will
+		 * not be "lost" because it will be held pending in the host
+		 * APIC because interrupts are disabled. The pending interrupt
+		 * will be recognized as soon as the guest state is loaded.
+		 *
+		 * The same reasoning applies to the IPI generated by
+		 * pmap_invalidate_ept().
+		 */
+		disable_intr();
+		vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
+
+		/*
+		 * Check for vcpu suspension after injecting events because
+		 * vmx_inject_interrupts() can suspend the vcpu due to a
+		 * triple fault.
+		 */
+		if (vcpu_suspended(evinfo)) {
+			enable_intr();
+			vm_exit_suspended(vmx->vm, vcpu, rip);
+			break;
+		}
+
+		if (vcpu_rendezvous_pending(evinfo)) {
+			enable_intr();
+			vm_exit_rendezvous(vmx->vm, vcpu, rip);
+			break;
+		}
+
+		if (vcpu_reqidle(evinfo)) {
+			enable_intr();
+			vm_exit_reqidle(vmx->vm, vcpu, rip);
+			break;
+		}
+
+		if (vcpu_should_yield(vm, vcpu)) {
+			enable_intr();
+			vm_exit_astpending(vmx->vm, vcpu, rip);
+			vmx_astpending_trace(vmx, vcpu, rip);
+			handled = HANDLED;
+			break;
+		}
+
+		vmx_run_trace(vmx, vcpu);
+		rc = vmx_enter_guest(vmxctx, vmx, launched);
+
+		/* Collect some information for VM exit processing */
+		vmexit->rip = rip = vmcs_guest_rip();
+		vmexit->inst_length = vmexit_instruction_length();
+		vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
+		vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
+
+		/* Update 'nextrip' */
+		vmx->state[vcpu].nextrip = rip;
+
+		if (rc == VMX_GUEST_VMEXIT) {
+			vmx_exit_handle_nmi(vmx, vcpu, vmexit);
+			enable_intr();
+			handled = vmx_exit_process(vmx, vcpu, vmexit);
+		} else {
+			enable_intr();
+			vmx_exit_inst_error(vmxctx, rc, vmexit);
+		}
+		launched = 1;
+		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
+		rip = vmexit->rip;
+	} while (handled);
+
+	/*
+	 * If a VM exit has been handled then the exitcode must be BOGUS
+	 * If a VM exit is not handled then the exitcode must not be BOGUS
+	 */
+	if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
+	    (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
+		panic("Mismatch between handled (%d) and exitcode (%d)",
+		      handled, vmexit->exitcode);
+	}
+
+	if (!handled)
+		vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
+
+	VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
+	    vmexit->exitcode);
+
+	VMCLEAR(vmcs);
+	vmx_msr_guest_exit(vmx, vcpu);
+
+	return (0);
+}
+
+static void
+vmx_vmcleanup(void *arg)
+{
+	int i;
+	struct vmx *vmx = arg;
+
+	if (apic_access_virtualization(vmx, 0))
+		vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
+
+	for (i = 0; i < VM_MAXCPU; i++)
+		vpid_free(vmx->state[i].vpid);
+
+	free(vmx, M_VMX);
+
+	return;
+}
+
+static register_t *
+vmxctx_regptr(struct vmxctx *vmxctx, int reg)
+{
+
+	switch (reg) {
+	case VM_REG_GUEST_RAX:
+		return (&vmxctx->guest_rax);
+	case VM_REG_GUEST_RBX:
+		return (&vmxctx->guest_rbx);
+	case VM_REG_GUEST_RCX:
+		return (&vmxctx->guest_rcx);
+	case VM_REG_GUEST_RDX:
+		return (&vmxctx->guest_rdx);
+	case VM_REG_GUEST_RSI:
+		return (&vmxctx->guest_rsi);
+	case VM_REG_GUEST_RDI:
+		return (&vmxctx->guest_rdi);
+	case VM_REG_GUEST_RBP:
+		return (&vmxctx->guest_rbp);
+	case VM_REG_GUEST_R8:
+		return (&vmxctx->guest_r8);
+	case VM_REG_GUEST_R9:
+		return (&vmxctx->guest_r9);
+	case VM_REG_GUEST_R10:
+		return (&vmxctx->guest_r10);
+	case VM_REG_GUEST_R11:
+		return (&vmxctx->guest_r11);
+	case VM_REG_GUEST_R12:
+		return (&vmxctx->guest_r12);
+	case VM_REG_GUEST_R13:
+		return (&vmxctx->guest_r13);
+	case VM_REG_GUEST_R14:
+		return (&vmxctx->guest_r14);
+	case VM_REG_GUEST_R15:
+		return (&vmxctx->guest_r15);
+	case VM_REG_GUEST_CR2:
+		return (&vmxctx->guest_cr2);
+	default:
+		break;
+	}
+	return (NULL);
+}
+
+static int
+vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
+{
+	register_t *regp;
+
+	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
+		*retval = *regp;
+		return (0);
+	} else
+		return (EINVAL);
+}
+
+static int
+vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
+{
+	register_t *regp;
+
+	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
+		*regp = val;
+		return (0);
+	} else
+		return (EINVAL);
+}
+
+static int
+vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
+{
+	uint64_t gi;
+	int error;
+
+	error = vmcs_getreg(&vmx->vmcs[vcpu], running, 
+	    VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
+	*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
+	return (error);
+}
+
+static int
+vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
+{
+	struct vmcs *vmcs;
+	uint64_t gi;
+	int error, ident;
+
+	/*
+	 * Forcing the vcpu into an interrupt shadow is not supported.
+	 */
+	if (val) {
+		error = EINVAL;
+		goto done;
+	}
+
+	vmcs = &vmx->vmcs[vcpu];
+	ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
+	error = vmcs_getreg(vmcs, running, ident, &gi);
+	if (error == 0) {
+		gi &= ~HWINTR_BLOCKING;
+		error = vmcs_setreg(vmcs, running, ident, gi);
+	}
+done:
+	VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
+	    error ? "failed" : "succeeded");
+	return (error);
+}
+
+static int
+vmx_shadow_reg(int reg)
+{
+	int shreg;
+
+	shreg = -1;
+
+	switch (reg) {
+	case VM_REG_GUEST_CR0:
+		shreg = VMCS_CR0_SHADOW;
+                break;
+        case VM_REG_GUEST_CR4:
+		shreg = VMCS_CR4_SHADOW;
+		break;
+	default:
+		break;
+	}
+
+	return (shreg);
+}
+
+static int
+vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
+{
+	int running, hostcpu;
+	struct vmx *vmx = arg;
+
+	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+	if (running && hostcpu != curcpu)
+		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
+
+	if (reg == VM_REG_GUEST_INTR_SHADOW)
+		return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
+
+	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
+		return (0);
+
+	return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
+}
+
+static int
+vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
+{
+	int error, hostcpu, running, shadow;
+	uint64_t ctls;
+	pmap_t pmap;
+	struct vmx *vmx = arg;
+
+	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+	if (running && hostcpu != curcpu)
+		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
+
+	if (reg == VM_REG_GUEST_INTR_SHADOW)
+		return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
+
+	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
+		return (0);
+
+	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
+
+	if (error == 0) {
+		/*
+		 * If the "load EFER" VM-entry control is 1 then the
+		 * value of EFER.LMA must be identical to "IA-32e mode guest"
+		 * bit in the VM-entry control.
+		 */
+		if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
+		    (reg == VM_REG_GUEST_EFER)) {
+			vmcs_getreg(&vmx->vmcs[vcpu], running,
+				    VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
+			if (val & EFER_LMA)
+				ctls |= VM_ENTRY_GUEST_LMA;
+			else
+				ctls &= ~VM_ENTRY_GUEST_LMA;
+			vmcs_setreg(&vmx->vmcs[vcpu], running,
+				    VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
+		}
+
+		shadow = vmx_shadow_reg(reg);
+		if (shadow > 0) {
+			/*
+			 * Store the unmodified value in the shadow
+			 */			
+			error = vmcs_setreg(&vmx->vmcs[vcpu], running,
+				    VMCS_IDENT(shadow), val);
+		}
+
+		if (reg == VM_REG_GUEST_CR3) {
+			/*
+			 * Invalidate the guest vcpu's TLB mappings to emulate
+			 * the behavior of updating %cr3.
+			 *
+			 * XXX the processor retains global mappings when %cr3
+			 * is updated but vmx_invvpid() does not.
+			 */
+			pmap = vmx->ctx[vcpu].pmap;
+			vmx_invvpid(vmx, vcpu, pmap, running);
+		}
+	}
+
+	return (error);
+}
+
+static int
+vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+{
+	int hostcpu, running;
+	struct vmx *vmx = arg;
+
+	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+	if (running && hostcpu != curcpu)
+		panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
+
+	return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
+}
+
+static int
+vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+{
+	int hostcpu, running;
+	struct vmx *vmx = arg;
+
+	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+	if (running && hostcpu != curcpu)
+		panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
+
+	return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
+}
+
+static int
+vmx_getcap(void *arg, int vcpu, int type, int *retval)
+{
+	struct vmx *vmx = arg;
+	int vcap;
+	int ret;
+
+	ret = ENOENT;
+
+	vcap = vmx->cap[vcpu].set;
+
+	switch (type) {
+	case VM_CAP_HALT_EXIT:
+		if (cap_halt_exit)
+			ret = 0;
+		break;
+	case VM_CAP_PAUSE_EXIT:
+		if (cap_pause_exit)
+			ret = 0;
+		break;
+	case VM_CAP_MTRAP_EXIT:
+		if (cap_monitor_trap)
+			ret = 0;
+		break;
+	case VM_CAP_UNRESTRICTED_GUEST:
+		if (cap_unrestricted_guest)
+			ret = 0;
+		break;
+	case VM_CAP_ENABLE_INVPCID:
+		if (cap_invpcid)
+			ret = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (ret == 0)
+		*retval = (vcap & (1 << type)) ? 1 : 0;
+
+	return (ret);
+}
+
+static int
+vmx_setcap(void *arg, int vcpu, int type, int val)
+{
+	struct vmx *vmx = arg;
+	struct vmcs *vmcs = &vmx->vmcs[vcpu];
+	uint32_t baseval;
+	uint32_t *pptr;
+	int error;
+	int flag;
+	int reg;
+	int retval;
+
+	retval = ENOENT;
+	pptr = NULL;
+
+	switch (type) {
+	case VM_CAP_HALT_EXIT:
+		if (cap_halt_exit) {
+			retval = 0;
+			pptr = &vmx->cap[vcpu].proc_ctls;
+			baseval = *pptr;
+			flag = PROCBASED_HLT_EXITING;
+			reg = VMCS_PRI_PROC_BASED_CTLS;
+		}
+		break;
+	case VM_CAP_MTRAP_EXIT:
+		if (cap_monitor_trap) {
+			retval = 0;
+			pptr = &vmx->cap[vcpu].proc_ctls;
+			baseval = *pptr;
+			flag = PROCBASED_MTF;
+			reg = VMCS_PRI_PROC_BASED_CTLS;
+		}
+		break;
+	case VM_CAP_PAUSE_EXIT:
+		if (cap_pause_exit) {
+			retval = 0;
+			pptr = &vmx->cap[vcpu].proc_ctls;
+			baseval = *pptr;
+			flag = PROCBASED_PAUSE_EXITING;
+			reg = VMCS_PRI_PROC_BASED_CTLS;
+		}
+		break;
+	case VM_CAP_UNRESTRICTED_GUEST:
+		if (cap_unrestricted_guest) {
+			retval = 0;
+			pptr = &vmx->cap[vcpu].proc_ctls2;
+			baseval = *pptr;
+			flag = PROCBASED2_UNRESTRICTED_GUEST;
+			reg = VMCS_SEC_PROC_BASED_CTLS;
+		}
+		break;
+	case VM_CAP_ENABLE_INVPCID:
+		if (cap_invpcid) {
+			retval = 0;
+			pptr = &vmx->cap[vcpu].proc_ctls2;
+			baseval = *pptr;
+			flag = PROCBASED2_ENABLE_INVPCID;
+			reg = VMCS_SEC_PROC_BASED_CTLS;
+		}
+		break;
+	default:
+		break;
+	}
+
+	if (retval == 0) {
+		if (val) {
+			baseval |= flag;
+		} else {
+			baseval &= ~flag;
+		}
+		VMPTRLD(vmcs);
+		error = vmwrite(reg, baseval);
+		VMCLEAR(vmcs);
+
+		if (error) {
+			retval = error;
+		} else {
+			/*
+			 * Update optional stored flags, and record
+			 * setting
+			 */
+			if (pptr != NULL) {
+				*pptr = baseval;
+			}
+
+			if (val) {
+				vmx->cap[vcpu].set |= (1 << type);
+			} else {
+				vmx->cap[vcpu].set &= ~(1 << type);
+			}
+		}
+	}
+
+        return (retval);
+}
+
+struct vlapic_vtx {
+	struct vlapic	vlapic;
+	struct pir_desc	*pir_desc;
+	struct vmx	*vmx;
+};
+
+#define	VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)	\
+do {									\
+	VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",	\
+	    level ? "level" : "edge", vector);				\
+	VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);	\
+	VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);	\
+	VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);	\
+	VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);	\
+	VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
+} while (0)
+
+/*
+ * vlapic->ops handlers that utilize the APICv hardware assist described in
+ * Chapter 29 of the Intel SDM.
+ */
+static int
+vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
+{
+	struct vlapic_vtx *vlapic_vtx;
+	struct pir_desc *pir_desc;
+	uint64_t mask;
+	int idx, notify;
+
+	vlapic_vtx = (struct vlapic_vtx *)vlapic;
+	pir_desc = vlapic_vtx->pir_desc;
+
+	/*
+	 * Keep track of interrupt requests in the PIR descriptor. This is
+	 * because the virtual APIC page pointed to by the VMCS cannot be
+	 * modified if the vcpu is running.
+	 */
+	idx = vector / 64;
+	mask = 1UL << (vector % 64);
+	atomic_set_long(&pir_desc->pir[idx], mask);
+	notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
+
+	VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
+	    level, "vmx_set_intr_ready");
+	return (notify);
+}
+
+static int
+vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
+{
+	struct vlapic_vtx *vlapic_vtx;
+	struct pir_desc *pir_desc;
+	struct LAPIC *lapic;
+	uint64_t pending, pirval;
+	uint32_t ppr, vpr;
+	int i;
+
+	/*
+	 * This function is only expected to be called from the 'HLT' exit
+	 * handler which does not care about the vector that is pending.
+	 */
+	KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
+
+	vlapic_vtx = (struct vlapic_vtx *)vlapic;
+	pir_desc = vlapic_vtx->pir_desc;
+
+	pending = atomic_load_acq_long(&pir_desc->pending);
+	if (!pending)
+		return (0);	/* common case */
+
+	/*
+	 * If there is an interrupt pending then it will be recognized only
+	 * if its priority is greater than the processor priority.
+	 *
+	 * Special case: if the processor priority is zero then any pending
+	 * interrupt will be recognized.
+	 */
+	lapic = vlapic->apic_page;
+	ppr = lapic->ppr & 0xf0;
+	if (ppr == 0)
+		return (1);
+
+	VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
+	    lapic->ppr);
+
+	for (i = 3; i >= 0; i--) {
+		pirval = pir_desc->pir[i];
+		if (pirval != 0) {
+			vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
+			return (vpr > ppr);
+		}
+	}
+	return (0);
+}
+
+static void
+vmx_intr_accepted(struct vlapic *vlapic, int vector)
+{
+
+	panic("vmx_intr_accepted: not expected to be called");
+}
+
+static void
+vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
+{
+	struct vlapic_vtx *vlapic_vtx;
+	struct vmx *vmx;
+	struct vmcs *vmcs;
+	uint64_t mask, val;
+
+	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
+	KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
+	    ("vmx_set_tmr: vcpu cannot be running"));
+
+	vlapic_vtx = (struct vlapic_vtx *)vlapic;
+	vmx = vlapic_vtx->vmx;
+	vmcs = &vmx->vmcs[vlapic->vcpuid];
+	mask = 1UL << (vector % 64);
+
+	VMPTRLD(vmcs);
+	val = vmcs_read(VMCS_EOI_EXIT(vector));
+	if (level)
+		val |= mask;
+	else
+		val &= ~mask;
+	vmcs_write(VMCS_EOI_EXIT(vector), val);
+	VMCLEAR(vmcs);
+}
+
+static void
+vmx_enable_x2apic_mode(struct vlapic *vlapic)
+{
+	struct vmx *vmx;
+	struct vmcs *vmcs;
+	uint32_t proc_ctls2;
+	int vcpuid, error;
+
+	vcpuid = vlapic->vcpuid;
+	vmx = ((struct vlapic_vtx *)vlapic)->vmx;
+	vmcs = &vmx->vmcs[vcpuid];
+
+	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
+	KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
+	    ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
+
+	proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
+	proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
+	vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
+
+	VMPTRLD(vmcs);
+	vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
+	VMCLEAR(vmcs);
+
+	if (vlapic->vcpuid == 0) {
+		/*
+		 * The nested page table mappings are shared by all vcpus
+		 * so unmap the APIC access page just once.
+		 */
+		error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
+		KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
+		    __func__, error));
+
+		/*
+		 * The MSR bitmap is shared by all vcpus so modify it only
+		 * once in the context of vcpu 0.
+		 */
+		error = vmx_allow_x2apic_msrs(vmx);
+		KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
+		    __func__, error));
+	}
+}
+
+static void
+vmx_post_intr(struct vlapic *vlapic, int hostcpu)
+{
+
+	ipi_cpu(hostcpu, pirvec);
+}
+
+/*
+ * Transfer the pending interrupts in the PIR descriptor to the IRR
+ * in the virtual APIC page.
+ */
+static void
+vmx_inject_pir(struct vlapic *vlapic)
+{
+	struct vlapic_vtx *vlapic_vtx;
+	struct pir_desc *pir_desc;
+	struct LAPIC *lapic;
+	uint64_t val, pirval;
+	int rvi, pirbase = -1;
+	uint16_t intr_status_old, intr_status_new;
+
+	vlapic_vtx = (struct vlapic_vtx *)vlapic;
+	pir_desc = vlapic_vtx->pir_desc;
+	if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
+		VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
+		    "no posted interrupt pending");
+		return;
+	}
+
+	pirval = 0;
+	pirbase = -1;
+	lapic = vlapic->apic_page;
+
+	val = atomic_readandclear_long(&pir_desc->pir[0]);
+	if (val != 0) {
+		lapic->irr0 |= val;
+		lapic->irr1 |= val >> 32;
+		pirbase = 0;
+		pirval = val;
+	}
+
+	val = atomic_readandclear_long(&pir_desc->pir[1]);
+	if (val != 0) {
+		lapic->irr2 |= val;
+		lapic->irr3 |= val >> 32;
+		pirbase = 64;
+		pirval = val;
+	}
+
+	val = atomic_readandclear_long(&pir_desc->pir[2]);
+	if (val != 0) {
+		lapic->irr4 |= val;
+		lapic->irr5 |= val >> 32;
+		pirbase = 128;
+		pirval = val;
+	}
+
+	val = atomic_readandclear_long(&pir_desc->pir[3]);
+	if (val != 0) {
+		lapic->irr6 |= val;
+		lapic->irr7 |= val >> 32;
+		pirbase = 192;
+		pirval = val;
+	}
+
+	VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
+
+	/*
+	 * Update RVI so the processor can evaluate pending virtual
+	 * interrupts on VM-entry.
+	 *
+	 * It is possible for pirval to be 0 here, even though the
+	 * pending bit has been set. The scenario is:
+	 * CPU-Y is sending a posted interrupt to CPU-X, which
+	 * is running a guest and processing posted interrupts in h/w.
+	 * CPU-X will eventually exit and the state seen in s/w is
+	 * the pending bit set, but no PIR bits set.
+	 *
+	 *      CPU-X                      CPU-Y
+	 *   (vm running)                (host running)
+	 *   rx posted interrupt
+	 *   CLEAR pending bit
+	 *				 SET PIR bit
+	 *   READ/CLEAR PIR bits
+	 *				 SET pending bit
+	 *   (vm exit)
+	 *   pending bit set, PIR 0
+	 */
+	if (pirval != 0) {
+		rvi = pirbase + flsl(pirval) - 1;
+		intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
+		intr_status_new = (intr_status_old & 0xFF00) | rvi;
+		if (intr_status_new > intr_status_old) {
+			vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
+			VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
+			    "guest_intr_status changed from 0x%04x to 0x%04x",
+			    intr_status_old, intr_status_new);
+		}
+	}
+}
+
+static struct vlapic *
+vmx_vlapic_init(void *arg, int vcpuid)
+{
+	struct vmx *vmx;
+	struct vlapic *vlapic;
+	struct vlapic_vtx *vlapic_vtx;
+	
+	vmx = arg;
+
+	vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
+	vlapic->vm = vmx->vm;
+	vlapic->vcpuid = vcpuid;
+	vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
+
+	vlapic_vtx = (struct vlapic_vtx *)vlapic;
+	vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
+	vlapic_vtx->vmx = vmx;
+
+	if (virtual_interrupt_delivery) {
+		vlapic->ops.set_intr_ready = vmx_set_intr_ready;
+		vlapic->ops.pending_intr = vmx_pending_intr;
+		vlapic->ops.intr_accepted = vmx_intr_accepted;
+		vlapic->ops.set_tmr = vmx_set_tmr;
+		vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
+	}
+
+	if (posted_interrupts)
+		vlapic->ops.post_intr = vmx_post_intr;
+
+	vlapic_init(vlapic);
+
+	return (vlapic);
+}
+
+static void
+vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
+{
+
+	vlapic_cleanup(vlapic);
+	free(vlapic, M_VLAPIC);
+}
+
+struct vmm_ops vmm_ops_intel = {
+	vmx_init,
+	vmx_cleanup,
+	vmx_restore,
+	vmx_vminit,
+	vmx_run,
+	vmx_vmcleanup,
+	vmx_getreg,
+	vmx_setreg,
+	vmx_getdesc,
+	vmx_setdesc,
+	vmx_getcap,
+	vmx_setcap,
+	ept_vmspace_alloc,
+	ept_vmspace_free,
+	vmx_vlapic_init,
+	vmx_vlapic_cleanup,
+};


Property changes on: trunk/sys/amd64/vmm/intel/vmx.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmx.h
===================================================================
--- trunk/sys/amd64/vmm/intel/vmx.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmx.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,144 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.h 330713 2018-03-10 00:44:33Z tychon $
+ */
+
+#ifndef _VMX_H_
+#define	_VMX_H_
+
+#include "vmcs.h"
+
+struct pmap;
+
+struct vmxctx {
+	register_t	guest_rdi;		/* Guest state */
+	register_t	guest_rsi;
+	register_t	guest_rdx;
+	register_t	guest_rcx;
+	register_t	guest_r8;
+	register_t	guest_r9;
+	register_t	guest_rax;
+	register_t	guest_rbx;
+	register_t	guest_rbp;
+	register_t	guest_r10;
+	register_t	guest_r11;
+	register_t	guest_r12;
+	register_t	guest_r13;
+	register_t	guest_r14;
+	register_t	guest_r15;
+	register_t	guest_cr2;
+
+	register_t	host_r15;		/* Host state */
+	register_t	host_r14;
+	register_t	host_r13;
+	register_t	host_r12;
+	register_t	host_rbp;
+	register_t	host_rsp;
+	register_t	host_rbx;
+	/*
+	 * XXX todo debug registers and fpu state
+	 */
+
+	int		inst_fail_status;
+
+	/*
+	 * The pmap needs to be deactivated in vmx_enter_guest()
+	 * so keep a copy of the 'pmap' in each vmxctx.
+	 */
+	struct pmap	*pmap;
+};
+
+struct vmxcap {
+	int	set;
+	uint32_t proc_ctls;
+	uint32_t proc_ctls2;
+};
+
+struct vmxstate {
+	uint64_t nextrip;	/* next instruction to be executed by guest */
+	int	lastcpu;	/* host cpu that this 'vcpu' last ran on */
+	uint16_t vpid;
+};
+
+struct apic_page {
+	uint32_t reg[PAGE_SIZE / 4];
+};
+CTASSERT(sizeof(struct apic_page) == PAGE_SIZE);
+
+/* Posted Interrupt Descriptor (described in section 29.6 of the Intel SDM) */
+struct pir_desc {
+	uint64_t	pir[4];
+	uint64_t	pending;
+	uint64_t	unused[3];
+} __aligned(64);
+CTASSERT(sizeof(struct pir_desc) == 64);
+
+/* Index into the 'guest_msrs[]' array */
+enum {
+	IDX_MSR_LSTAR,
+	IDX_MSR_CSTAR,
+	IDX_MSR_STAR,
+	IDX_MSR_SF_MASK,
+	IDX_MSR_KGSBASE,
+	IDX_MSR_PAT,
+	GUEST_MSR_NUM		/* must be the last enumeration */
+};
+
+/* virtual machine softc */
+struct vmx {
+	struct vmcs	vmcs[VM_MAXCPU];	/* one vmcs per virtual cpu */
+	struct apic_page apic_page[VM_MAXCPU];	/* one apic page per vcpu */
+	char		msr_bitmap[PAGE_SIZE];
+	struct pir_desc	pir_desc[VM_MAXCPU];
+	uint64_t	guest_msrs[VM_MAXCPU][GUEST_MSR_NUM];
+	struct vmxctx	ctx[VM_MAXCPU];
+	struct vmxcap	cap[VM_MAXCPU];
+	struct vmxstate	state[VM_MAXCPU];
+	uint64_t	eptp;
+	struct vm	*vm;
+	long		eptgen[MAXCPU];		/* cached pmap->pm_eptgen */
+};
+CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
+CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
+CTASSERT((offsetof(struct vmx, pir_desc[0]) & 63) == 0);
+
+#define	VMX_GUEST_VMEXIT	0
+#define	VMX_VMRESUME_ERROR	1
+#define	VMX_VMLAUNCH_ERROR	2
+#define	VMX_INVEPT_ERROR	3
+int	vmx_enter_guest(struct vmxctx *ctx, struct vmx *vmx, int launched);
+void	vmx_call_isr(uintptr_t entry);
+
+u_long	vmx_fix_cr0(u_long cr0);
+u_long	vmx_fix_cr4(u_long cr4);
+
+int	vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset);
+
+extern char	vmx_exit_guest[];
+extern char	vmx_exit_guest_flush_rsb[];
+
+#endif


Property changes on: trunk/sys/amd64/vmm/intel/vmx.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmx_controls.h
===================================================================
--- trunk/sys/amd64/vmm/intel/vmx_controls.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmx_controls.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,97 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_controls.h 266339 2014-05-17 19:11:08Z jhb $
+ */
+
+#ifndef _VMX_CONTROLS_H_
+#define	_VMX_CONTROLS_H_
+
+/* Pin-Based VM-Execution Controls */
+#define	PINBASED_EXTINT_EXITING		(1 << 0)
+#define	PINBASED_NMI_EXITING		(1 << 3)
+#define	PINBASED_VIRTUAL_NMI		(1 << 5)
+#define	PINBASED_PREMPTION_TIMER	(1 << 6)
+#define	PINBASED_POSTED_INTERRUPT	(1 << 7)
+
+/* Primary Processor-Based VM-Execution Controls */
+#define	PROCBASED_INT_WINDOW_EXITING	(1 << 2)
+#define	PROCBASED_TSC_OFFSET		(1 << 3)
+#define	PROCBASED_HLT_EXITING		(1 << 7)
+#define	PROCBASED_INVLPG_EXITING	(1 << 9)
+#define	PROCBASED_MWAIT_EXITING		(1 << 10)
+#define	PROCBASED_RDPMC_EXITING		(1 << 11)
+#define	PROCBASED_RDTSC_EXITING		(1 << 12)
+#define	PROCBASED_CR3_LOAD_EXITING	(1 << 15)
+#define	PROCBASED_CR3_STORE_EXITING	(1 << 16)
+#define	PROCBASED_CR8_LOAD_EXITING	(1 << 19)
+#define	PROCBASED_CR8_STORE_EXITING	(1 << 20)
+#define	PROCBASED_USE_TPR_SHADOW	(1 << 21)
+#define	PROCBASED_NMI_WINDOW_EXITING	(1 << 22)
+#define PROCBASED_MOV_DR_EXITING	(1 << 23)
+#define	PROCBASED_IO_EXITING		(1 << 24)
+#define	PROCBASED_IO_BITMAPS		(1 << 25)
+#define	PROCBASED_MTF			(1 << 27)
+#define	PROCBASED_MSR_BITMAPS		(1 << 28)
+#define	PROCBASED_MONITOR_EXITING	(1 << 29)
+#define	PROCBASED_PAUSE_EXITING		(1 << 30)
+#define	PROCBASED_SECONDARY_CONTROLS	(1U << 31)
+
+/* Secondary Processor-Based VM-Execution Controls */
+#define	PROCBASED2_VIRTUALIZE_APIC_ACCESSES	(1 << 0)
+#define	PROCBASED2_ENABLE_EPT			(1 << 1)
+#define	PROCBASED2_DESC_TABLE_EXITING		(1 << 2)
+#define	PROCBASED2_ENABLE_RDTSCP		(1 << 3)
+#define	PROCBASED2_VIRTUALIZE_X2APIC_MODE	(1 << 4)
+#define	PROCBASED2_ENABLE_VPID			(1 << 5)
+#define	PROCBASED2_WBINVD_EXITING		(1 << 6)
+#define	PROCBASED2_UNRESTRICTED_GUEST		(1 << 7)
+#define	PROCBASED2_APIC_REGISTER_VIRTUALIZATION	(1 << 8)
+#define	PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY	(1 << 9)
+#define	PROCBASED2_PAUSE_LOOP_EXITING		(1 << 10)
+#define	PROCBASED2_ENABLE_INVPCID		(1 << 12)
+
+/* VM Exit Controls */
+#define	VM_EXIT_SAVE_DEBUG_CONTROLS	(1 << 2)
+#define	VM_EXIT_HOST_LMA		(1 << 9)
+#define	VM_EXIT_LOAD_PERF_GLOBAL_CTRL	(1 << 12)
+#define	VM_EXIT_ACKNOWLEDGE_INTERRUPT	(1 << 15)
+#define	VM_EXIT_SAVE_PAT		(1 << 18)
+#define	VM_EXIT_LOAD_PAT		(1 << 19)
+#define	VM_EXIT_SAVE_EFER		(1 << 20)
+#define	VM_EXIT_LOAD_EFER		(1 << 21)
+#define	VM_EXIT_SAVE_PREEMPTION_TIMER	(1 << 22)
+
+/* VM Entry Controls */
+#define	VM_ENTRY_LOAD_DEBUG_CONTROLS	(1 << 2)
+#define	VM_ENTRY_GUEST_LMA		(1 << 9)
+#define	VM_ENTRY_INTO_SMM		(1 << 10)
+#define	VM_ENTRY_DEACTIVATE_DUAL_MONITOR (1 << 11)
+#define	VM_ENTRY_LOAD_PERF_GLOBAL_CTRL	(1 << 13)
+#define	VM_ENTRY_LOAD_PAT		(1 << 14)
+#define	VM_ENTRY_LOAD_EFER		(1 << 15)
+
+#endif


Property changes on: trunk/sys/amd64/vmm/intel/vmx_controls.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmx_cpufunc.h
===================================================================
--- trunk/sys/amd64/vmm/intel/vmx_cpufunc.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmx_cpufunc.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,219 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_cpufunc.h 245678 2013-01-20 03:42:49Z neel $
+ */
+
+#ifndef	_VMX_CPUFUNC_H_
+#define	_VMX_CPUFUNC_H_
+
+struct vmcs;
+
+/*
+ * Section 5.2 "Conventions" from Intel Architecture Manual 2B.
+ *
+ *			error
+ * VMsucceed		  0
+ * VMFailInvalid	  1
+ * VMFailValid		  2	see also VMCS VM-Instruction Error Field
+ */
+#define	VM_SUCCESS		0
+#define	VM_FAIL_INVALID		1
+#define	VM_FAIL_VALID		2
+#define	VMX_SET_ERROR_CODE \
+	"	jnc 1f;"						\
+	"	mov $1, %[error];"	/* CF: error = 1 */		\
+	"	jmp 3f;"						\
+	"1:	jnz 2f;"						\
+	"	mov $2, %[error];"	/* ZF: error = 2 */		\
+	"	jmp 3f;"						\
+	"2:	mov $0, %[error];"					\
+	"3:"
+
+/* returns 0 on success and non-zero on failure */
+static __inline int
+vmxon(char *region)
+{
+	int error;
+	uint64_t addr;
+
+	addr = vtophys(region);
+	__asm __volatile("vmxon %[addr];"
+			 VMX_SET_ERROR_CODE
+			 : [error] "=r" (error)
+			 : [addr] "m" (*(uint64_t *)&addr)
+			 : "memory");
+
+	return (error);
+}
+
+/* returns 0 on success and non-zero on failure */
+static __inline int
+vmclear(struct vmcs *vmcs)
+{
+	int error;
+	uint64_t addr;
+
+	addr = vtophys(vmcs);
+	__asm __volatile("vmclear %[addr];"
+			 VMX_SET_ERROR_CODE
+			 : [error] "=r" (error)
+			 : [addr] "m" (*(uint64_t *)&addr)
+			 : "memory");
+	return (error);
+}
+
+static __inline void
+vmxoff(void)
+{
+
+	__asm __volatile("vmxoff");
+}
+
+static __inline void
+vmptrst(uint64_t *addr)
+{
+
+	__asm __volatile("vmptrst %[addr]" :: [addr]"m" (*addr) : "memory");
+}
+
+static __inline int
+vmptrld(struct vmcs *vmcs)
+{
+	int error;
+	uint64_t addr;
+
+	addr = vtophys(vmcs);
+	__asm __volatile("vmptrld %[addr];"
+			 VMX_SET_ERROR_CODE
+			 : [error] "=r" (error)
+			 : [addr] "m" (*(uint64_t *)&addr)
+			 : "memory");
+	return (error);
+}
+
+static __inline int
+vmwrite(uint64_t reg, uint64_t val)
+{
+	int error;
+
+	__asm __volatile("vmwrite %[val], %[reg];"
+			 VMX_SET_ERROR_CODE
+			 : [error] "=r" (error)
+			 : [val] "r" (val), [reg] "r" (reg)
+			 : "memory");
+
+	return (error);
+}
+
+static __inline int
+vmread(uint64_t r, uint64_t *addr)
+{
+	int error;
+
+	__asm __volatile("vmread %[r], %[addr];"
+			 VMX_SET_ERROR_CODE
+			 : [error] "=r" (error)
+			 : [r] "r" (r), [addr] "m" (*addr)
+			 : "memory");
+
+	return (error);
+}
+
+static void __inline
+VMCLEAR(struct vmcs *vmcs)
+{
+	int err;
+
+	err = vmclear(vmcs);
+	if (err != 0)
+		panic("%s: vmclear(%p) error %d", __func__, vmcs, err);
+
+	critical_exit();
+}
+
+static void __inline
+VMPTRLD(struct vmcs *vmcs)
+{
+	int err;
+
+	critical_enter();
+
+	err = vmptrld(vmcs);
+	if (err != 0)
+		panic("%s: vmptrld(%p) error %d", __func__, vmcs, err);
+}
+
+#define	INVVPID_TYPE_ADDRESS		0UL
+#define	INVVPID_TYPE_SINGLE_CONTEXT	1UL
+#define	INVVPID_TYPE_ALL_CONTEXTS	2UL
+
+struct invvpid_desc {
+	uint16_t	vpid;
+	uint16_t	_res1;
+	uint32_t	_res2;
+	uint64_t	linear_addr;
+};
+CTASSERT(sizeof(struct invvpid_desc) == 16);
+
+static void __inline
+invvpid(uint64_t type, struct invvpid_desc desc)
+{
+	int error;
+
+	__asm __volatile("invvpid %[desc], %[type];"
+			 VMX_SET_ERROR_CODE
+			 : [error] "=r" (error)
+			 : [desc] "m" (desc), [type] "r" (type)
+			 : "memory");
+
+	if (error)
+		panic("invvpid error %d", error);
+}
+
+#define	INVEPT_TYPE_SINGLE_CONTEXT	1UL
+#define	INVEPT_TYPE_ALL_CONTEXTS	2UL
+struct invept_desc {
+	uint64_t	eptp;
+	uint64_t	_res;
+};
+CTASSERT(sizeof(struct invept_desc) == 16);
+
+static void __inline
+invept(uint64_t type, struct invept_desc desc)
+{
+	int error;
+
+	__asm __volatile("invept %[desc], %[type];"
+			 VMX_SET_ERROR_CODE
+			 : [error] "=r" (error)
+			 : [desc] "m" (desc), [type] "r" (type)
+			 : "memory");
+
+	if (error)
+		panic("invept error %d", error);
+}
+#endif


Property changes on: trunk/sys/amd64/vmm/intel/vmx_cpufunc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmx_genassym.c
===================================================================
--- trunk/sys/amd64/vmm/intel/vmx_genassym.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmx_genassym.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,89 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_genassym.c 268953 2014-07-21 19:08:02Z jhb $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_genassym.c 268953 2014-07-21 19:08:02Z jhb $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/assym.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/vmm.h>
+#include "vmx_cpufunc.h"
+#include "vmx.h"
+
+ASSYM(VMXCTX_GUEST_RDI, offsetof(struct vmxctx, guest_rdi));
+ASSYM(VMXCTX_GUEST_RSI, offsetof(struct vmxctx, guest_rsi));
+ASSYM(VMXCTX_GUEST_RDX, offsetof(struct vmxctx, guest_rdx));
+ASSYM(VMXCTX_GUEST_RCX, offsetof(struct vmxctx, guest_rcx));
+ASSYM(VMXCTX_GUEST_R8, offsetof(struct vmxctx, guest_r8));
+ASSYM(VMXCTX_GUEST_R9, offsetof(struct vmxctx, guest_r9));
+ASSYM(VMXCTX_GUEST_RAX, offsetof(struct vmxctx, guest_rax));
+ASSYM(VMXCTX_GUEST_RBX, offsetof(struct vmxctx, guest_rbx));
+ASSYM(VMXCTX_GUEST_RBP, offsetof(struct vmxctx, guest_rbp));
+ASSYM(VMXCTX_GUEST_R10, offsetof(struct vmxctx, guest_r10));
+ASSYM(VMXCTX_GUEST_R11, offsetof(struct vmxctx, guest_r11));
+ASSYM(VMXCTX_GUEST_R12, offsetof(struct vmxctx, guest_r12));
+ASSYM(VMXCTX_GUEST_R13, offsetof(struct vmxctx, guest_r13));
+ASSYM(VMXCTX_GUEST_R14, offsetof(struct vmxctx, guest_r14));
+ASSYM(VMXCTX_GUEST_R15, offsetof(struct vmxctx, guest_r15));
+ASSYM(VMXCTX_GUEST_CR2, offsetof(struct vmxctx, guest_cr2));
+
+ASSYM(VMXCTX_HOST_R15, offsetof(struct vmxctx, host_r15));
+ASSYM(VMXCTX_HOST_R14, offsetof(struct vmxctx, host_r14));
+ASSYM(VMXCTX_HOST_R13, offsetof(struct vmxctx, host_r13));
+ASSYM(VMXCTX_HOST_R12, offsetof(struct vmxctx, host_r12));
+ASSYM(VMXCTX_HOST_RBP, offsetof(struct vmxctx, host_rbp));
+ASSYM(VMXCTX_HOST_RSP, offsetof(struct vmxctx, host_rsp));
+ASSYM(VMXCTX_HOST_RBX, offsetof(struct vmxctx, host_rbx));
+
+ASSYM(VMXCTX_INST_FAIL_STATUS, offsetof(struct vmxctx, inst_fail_status));
+ASSYM(VMXCTX_PMAP, offsetof(struct vmxctx, pmap));
+
+ASSYM(VMX_EPTGEN, offsetof(struct vmx, eptgen));
+ASSYM(VMX_EPTP, offsetof(struct vmx, eptp));
+
+ASSYM(VM_FAIL_INVALID,	VM_FAIL_INVALID);
+ASSYM(VM_FAIL_VALID,	VM_FAIL_VALID);
+ASSYM(VMX_GUEST_VMEXIT, VMX_GUEST_VMEXIT);
+ASSYM(VMX_VMRESUME_ERROR, VMX_VMRESUME_ERROR);
+ASSYM(VMX_VMLAUNCH_ERROR, VMX_VMLAUNCH_ERROR);
+ASSYM(VMX_INVEPT_ERROR, VMX_INVEPT_ERROR);
+
+ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
+
+ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
+ASSYM(PM_EPTGEN, offsetof(struct pmap, pm_eptgen));
+
+ASSYM(KERNEL_SS, GSEL(GDATA_SEL, SEL_KPL));
+ASSYM(KERNEL_CS, GSEL(GCODE_SEL, SEL_KPL));


Property changes on: trunk/sys/amd64/vmm/intel/vmx_genassym.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmx_msr.c
===================================================================
--- trunk/sys/amd64/vmm/intel/vmx_msr.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmx_msr.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,487 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_msr.c 284900 2015-06-28 03:22:26Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_msr.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/clock.h>
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+#include <machine/vmm.h>
+
+#include "vmx.h"
+#include "vmx_msr.h"
+
+static boolean_t
+vmx_ctl_allows_one_setting(uint64_t msr_val, int bitpos)
+{
+
+	if (msr_val & (1UL << (bitpos + 32)))
+		return (TRUE);
+	else
+		return (FALSE);
+}
+
+static boolean_t
+vmx_ctl_allows_zero_setting(uint64_t msr_val, int bitpos)
+{
+
+	if ((msr_val & (1UL << bitpos)) == 0)
+		return (TRUE);
+	else
+		return (FALSE);
+}
+
+uint32_t
+vmx_revision(void)
+{
+
+	return (rdmsr(MSR_VMX_BASIC) & 0xffffffff);
+}
+
+/*
+ * Generate a bitmask to be used for the VMCS execution control fields.
+ *
+ * The caller specifies what bits should be set to one in 'ones_mask'
+ * and what bits should be set to zero in 'zeros_mask'. The don't-care
+ * bits are set to the default value. The default values are obtained
+ * based on "Algorithm 3" in Section 27.5.1 "Algorithms for Determining
+ * VMX Capabilities".
+ *
+ * Returns zero on success and non-zero on error.
+ */
+int
+vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
+	       uint32_t zeros_mask, uint32_t *retval)
+{
+	int i;
+	uint64_t val, trueval;
+	boolean_t true_ctls_avail, one_allowed, zero_allowed;
+
+	/* We cannot ask the same bit to be set to both '1' and '0' */
+	if ((ones_mask ^ zeros_mask) != (ones_mask | zeros_mask))
+		return (EINVAL);
+
+	if (rdmsr(MSR_VMX_BASIC) & (1UL << 55))
+		true_ctls_avail = TRUE;
+	else
+		true_ctls_avail = FALSE;
+
+	val = rdmsr(ctl_reg);
+	if (true_ctls_avail)
+		trueval = rdmsr(true_ctl_reg);		/* step c */
+	else
+		trueval = val;				/* step a */
+
+	for (i = 0; i < 32; i++) {
+		one_allowed = vmx_ctl_allows_one_setting(trueval, i);
+		zero_allowed = vmx_ctl_allows_zero_setting(trueval, i);
+
+		KASSERT(one_allowed || zero_allowed,
+			("invalid zero/one setting for bit %d of ctl 0x%0x, "
+			 "truectl 0x%0x\n", i, ctl_reg, true_ctl_reg));
+
+		if (zero_allowed && !one_allowed) {		/* b(i),c(i) */
+			if (ones_mask & (1 << i))
+				return (EINVAL);
+			*retval &= ~(1 << i);
+		} else if (one_allowed && !zero_allowed) {	/* b(i),c(i) */
+			if (zeros_mask & (1 << i))
+				return (EINVAL);
+			*retval |= 1 << i;
+		} else {
+			if (zeros_mask & (1 << i))	/* b(ii),c(ii) */
+				*retval &= ~(1 << i);
+			else if (ones_mask & (1 << i)) /* b(ii), c(ii) */
+				*retval |= 1 << i;
+			else if (!true_ctls_avail)
+				*retval &= ~(1 << i);	/* b(iii) */
+			else if (vmx_ctl_allows_zero_setting(val, i))/* c(iii)*/
+				*retval &= ~(1 << i);
+			else if (vmx_ctl_allows_one_setting(val, i)) /* c(iv) */
+				*retval |= 1 << i;
+			else {
+				panic("vmx_set_ctlreg: unable to determine "
+				      "correct value of ctl bit %d for msr "
+				      "0x%0x and true msr 0x%0x", i, ctl_reg,
+				      true_ctl_reg);
+			}
+		}
+	}
+
+	return (0);
+}
+
+void
+msr_bitmap_initialize(char *bitmap)
+{
+
+	memset(bitmap, 0xff, PAGE_SIZE);
+}
+
+int
+msr_bitmap_change_access(char *bitmap, u_int msr, int access)
+{
+	int byte, bit;
+
+	if (msr <= 0x00001FFF)
+		byte = msr / 8;
+	else if (msr >= 0xC0000000 && msr <= 0xC0001FFF)
+		byte = 1024 + (msr - 0xC0000000) / 8;
+	else
+		return (EINVAL);
+
+	bit = msr & 0x7;
+
+	if (access & MSR_BITMAP_ACCESS_READ)
+		bitmap[byte] &= ~(1 << bit);
+	else
+		bitmap[byte] |= 1 << bit;
+
+	byte += 2048;
+	if (access & MSR_BITMAP_ACCESS_WRITE)
+		bitmap[byte] &= ~(1 << bit);
+	else
+		bitmap[byte] |= 1 << bit;
+
+	return (0);
+}
+
+static uint64_t misc_enable;
+static uint64_t platform_info;
+static uint64_t turbo_ratio_limit;
+static uint64_t host_msrs[GUEST_MSR_NUM];
+
+static bool
+nehalem_cpu(void)
+{
+	u_int family, model;
+
+	/*
+	 * The family:model numbers belonging to the Nehalem microarchitecture
+	 * are documented in Section 35.5, Intel SDM dated Feb 2014.
+	 */
+	family = CPUID_TO_FAMILY(cpu_id);
+	model = CPUID_TO_MODEL(cpu_id);
+	if (family == 0x6) {
+		switch (model) {
+		case 0x1A:
+		case 0x1E:
+		case 0x1F:
+		case 0x2E:
+			return (true);
+		default:
+			break;
+		}
+	}
+	return (false);
+}
+
+static bool
+westmere_cpu(void)
+{
+	u_int family, model;
+
+	/*
+	 * The family:model numbers belonging to the Westmere microarchitecture
+	 * are documented in Section 35.6, Intel SDM dated Feb 2014.
+	 */
+	family = CPUID_TO_FAMILY(cpu_id);
+	model = CPUID_TO_MODEL(cpu_id);
+	if (family == 0x6) {
+		switch (model) {
+		case 0x25:
+		case 0x2C:
+			return (true);
+		default:
+			break;
+		}
+	}
+	return (false);
+}
+
+static bool
+pat_valid(uint64_t val)
+{
+	int i, pa;
+
+	/*
+	 * From Intel SDM: Table "Memory Types That Can Be Encoded With PAT"
+	 *
+	 * Extract PA0 through PA7 and validate that each one encodes a
+	 * valid memory type.
+	 */
+	for (i = 0; i < 8; i++) {
+		pa = (val >> (i * 8)) & 0xff;
+		if (pa == 2 || pa == 3 || pa >= 8)
+			return (false);
+	}
+	return (true);
+}
+
+void
+vmx_msr_init(void)
+{
+	uint64_t bus_freq, ratio;
+	int i;
+
+	/*
+	 * It is safe to cache the values of the following MSRs because
+	 * they don't change based on curcpu, curproc or curthread.
+	 */
+	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
+	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
+	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
+	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
+
+	/*
+	 * Initialize emulated MSRs
+	 */
+	misc_enable = rdmsr(MSR_IA32_MISC_ENABLE);
+	/*
+	 * Set mandatory bits
+	 *  11:   branch trace disabled
+	 *  12:   PEBS unavailable
+	 * Clear unsupported features
+	 *  16:   SpeedStep enable
+	 *  18:   enable MONITOR FSM
+	 */
+	misc_enable |= (1 << 12) | (1 << 11);
+	misc_enable &= ~((1 << 18) | (1 << 16));
+
+	if (nehalem_cpu() || westmere_cpu())
+		bus_freq = 133330000;		/* 133Mhz */
+	else
+		bus_freq = 100000000;		/* 100Mhz */
+
+	/*
+	 * XXXtime
+	 * The ratio should really be based on the virtual TSC frequency as
+	 * opposed to the host TSC.
+	 */
+	ratio = (tsc_freq / bus_freq) & 0xff;
+
+	/*
+	 * The register definition is based on the micro-architecture
+	 * but the following bits are always the same:
+	 * [15:8]  Maximum Non-Turbo Ratio
+	 * [28]    Programmable Ratio Limit for Turbo Mode
+	 * [29]    Programmable TDC-TDP Limit for Turbo Mode
+	 * [47:40] Maximum Efficiency Ratio
+	 *
+	 * The other bits can be safely set to 0 on all
+	 * micro-architectures up to Haswell.
+	 */
+	platform_info = (ratio << 8) | (ratio << 40);
+
+	/*
+	 * The number of valid bits in the MSR_TURBO_RATIO_LIMITx register is
+	 * dependent on the maximum cores per package supported by the micro-
+	 * architecture. For e.g., Westmere supports 6 cores per package and
+	 * uses the low 48 bits. Sandybridge support 8 cores per package and
+	 * uses up all 64 bits.
+	 *
+	 * However, the unused bits are reserved so we pretend that all bits
+	 * in this MSR are valid.
+	 */
+	for (i = 0; i < 8; i++)
+		turbo_ratio_limit = (turbo_ratio_limit << 8) | ratio;
+}
+
+void
+vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
+{
+	uint64_t *guest_msrs;
+
+	guest_msrs = vmx->guest_msrs[vcpuid];
+
+	/*
+	 * The permissions bitmap is shared between all vcpus so initialize it
+	 * once when initializing the vBSP.
+	 */
+	if (vcpuid == 0) {
+		guest_msr_rw(vmx, MSR_LSTAR);
+		guest_msr_rw(vmx, MSR_CSTAR);
+		guest_msr_rw(vmx, MSR_STAR);
+		guest_msr_rw(vmx, MSR_SF_MASK);
+		guest_msr_rw(vmx, MSR_KGSBASE);
+	}
+
+	/*
+	 * Initialize guest IA32_PAT MSR with default value after reset.
+	 */
+	guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) |
+	    PAT_VALUE(1, PAT_WRITE_THROUGH)	|
+	    PAT_VALUE(2, PAT_UNCACHED)		|
+	    PAT_VALUE(3, PAT_UNCACHEABLE)	|
+	    PAT_VALUE(4, PAT_WRITE_BACK)	|
+	    PAT_VALUE(5, PAT_WRITE_THROUGH)	|
+	    PAT_VALUE(6, PAT_UNCACHED)		|
+	    PAT_VALUE(7, PAT_UNCACHEABLE);
+
+	return;
+}
+
+void
+vmx_msr_guest_enter(struct vmx *vmx, int vcpuid)
+{
+	uint64_t *guest_msrs = vmx->guest_msrs[vcpuid];
+
+	/* Save host MSRs (if any) and restore guest MSRs */
+	wrmsr(MSR_LSTAR, guest_msrs[IDX_MSR_LSTAR]);
+	wrmsr(MSR_CSTAR, guest_msrs[IDX_MSR_CSTAR]);
+	wrmsr(MSR_STAR, guest_msrs[IDX_MSR_STAR]);
+	wrmsr(MSR_SF_MASK, guest_msrs[IDX_MSR_SF_MASK]);
+	wrmsr(MSR_KGSBASE, guest_msrs[IDX_MSR_KGSBASE]);
+}
+
+void
+vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
+{
+	uint64_t *guest_msrs = vmx->guest_msrs[vcpuid];
+
+	/* Save guest MSRs */
+	guest_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
+	guest_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
+	guest_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
+	guest_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
+	guest_msrs[IDX_MSR_KGSBASE] = rdmsr(MSR_KGSBASE);
+
+	/* Restore host MSRs */
+	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
+	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
+	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
+	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
+
+	/* MSR_KGSBASE will be restored on the way back to userspace */
+}
+
+int
+vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
+{
+	const uint64_t *guest_msrs;
+	int error;
+
+	guest_msrs = vmx->guest_msrs[vcpuid];
+	error = 0;
+
+	switch (num) {
+	case MSR_MCG_CAP:
+	case MSR_MCG_STATUS:
+		*val = 0;
+		break;
+	case MSR_MTRRcap:
+	case MSR_MTRRdefType:
+	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
+	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
+	case MSR_MTRR64kBase:
+		*val = 0;
+		break;
+	case MSR_IA32_MISC_ENABLE:
+		*val = misc_enable;
+		break;
+	case MSR_PLATFORM_INFO:
+		*val = platform_info;
+		break;
+	case MSR_TURBO_RATIO_LIMIT:
+	case MSR_TURBO_RATIO_LIMIT1:
+		*val = turbo_ratio_limit;
+		break;
+	case MSR_PAT:
+		*val = guest_msrs[IDX_MSR_PAT];
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+	return (error);
+}
+
+int
+vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
+{
+	uint64_t *guest_msrs;
+	uint64_t changed;
+	int error;
+	
+	guest_msrs = vmx->guest_msrs[vcpuid];
+	error = 0;
+
+	switch (num) {
+	case MSR_MCG_CAP:
+	case MSR_MCG_STATUS:
+		break;		/* ignore writes */
+	case MSR_MTRRcap:
+		vm_inject_gp(vmx->vm, vcpuid);
+		break;
+	case MSR_MTRRdefType:
+	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
+	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
+	case MSR_MTRR64kBase:
+		break;		/* Ignore writes */
+	case MSR_IA32_MISC_ENABLE:
+		changed = val ^ misc_enable;
+		/*
+		 * If the host has disabled the NX feature then the guest
+		 * also cannot use it. However, a Linux guest will try to
+		 * enable the NX feature by writing to the MISC_ENABLE MSR.
+		 *
+		 * This can be safely ignored because the memory management
+		 * code looks at CPUID.80000001H:EDX.NX to check if the
+		 * functionality is actually enabled.
+		 */
+		changed &= ~(1UL << 34);
+
+		/*
+		 * Punt to userspace if any other bits are being modified.
+		 */
+		if (changed)
+			error = EINVAL;
+
+		break;
+	case MSR_PAT:
+		if (pat_valid(val))
+			guest_msrs[IDX_MSR_PAT] = val;
+		else
+			vm_inject_gp(vmx->vm, vcpuid);
+		break;
+	case MSR_TSC:
+		error = vmx_set_tsc_offset(vmx, vcpuid, val - rdtsc());
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+
+	return (error);
+}


Property changes on: trunk/sys/amd64/vmm/intel/vmx_msr.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmx_msr.h
===================================================================
--- trunk/sys/amd64/vmm/intel/vmx_msr.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmx_msr.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,71 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_msr.h 276349 2014-12-28 21:27:13Z neel $
+ */
+
+#ifndef _VMX_MSR_H_
+#define	_VMX_MSR_H_
+
+struct vmx;
+
+void vmx_msr_init(void);
+void vmx_msr_guest_init(struct vmx *vmx, int vcpuid);
+void vmx_msr_guest_enter(struct vmx *vmx, int vcpuid);
+void vmx_msr_guest_exit(struct vmx *vmx, int vcpuid);
+int vmx_rdmsr(struct vmx *, int vcpuid, u_int num, uint64_t *val, bool *retu);
+int vmx_wrmsr(struct vmx *, int vcpuid, u_int num, uint64_t val, bool *retu);
+
+uint32_t vmx_revision(void);
+
+int vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
+		   uint32_t zeros_mask, uint32_t *retval);
+
+/*
+ * According to Section 21.10.4 "Software Access to Related Structures",
+ * changes to data structures pointed to by the VMCS must be made only when
+ * there is no logical processor with a current VMCS that points to the
+ * data structure.
+ *
+ * This pretty much limits us to configuring the MSR bitmap before VMCS
+ * initialization for SMP VMs. Unless of course we do it the hard way - which
+ * would involve some form of synchronization between the vcpus to vmclear
+ * all VMCSs' that point to the bitmap.
+ */
+#define	MSR_BITMAP_ACCESS_NONE	0x0
+#define	MSR_BITMAP_ACCESS_READ	0x1
+#define	MSR_BITMAP_ACCESS_WRITE	0x2
+#define	MSR_BITMAP_ACCESS_RW	(MSR_BITMAP_ACCESS_READ|MSR_BITMAP_ACCESS_WRITE)
+void	msr_bitmap_initialize(char *bitmap);
+int	msr_bitmap_change_access(char *bitmap, u_int msr, int access);
+
+#define	guest_msr_rw(vmx, msr) \
+    msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
+
+#define	guest_msr_ro(vmx, msr) \
+    msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ)
+
+#endif


Property changes on: trunk/sys/amd64/vmm/intel/vmx_msr.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vmx_support.S
===================================================================
--- trunk/sys/amd64/vmm/intel/vmx_support.S	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vmx_support.S	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,326 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * Copyright (c) 2013 Neel Natu <neel at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_support.S 330713 2018-03-10 00:44:33Z tychon $
+ */
+
+#include <machine/asmacros.h>
+
+#include "vmx_assym.h"
+
+#ifdef SMP
+#define	LK	lock ;
+#else
+#define	LK
+#endif
+
+/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
+#define VENTER  push %rbp ; mov %rsp,%rbp
+#define VLEAVE  pop %rbp
+
+/*
+ * Save the guest context.
+ */
+#define	VMX_GUEST_SAVE							\
+	movq	%rdi,VMXCTX_GUEST_RDI(%rsp);				\
+	movq	%rsi,VMXCTX_GUEST_RSI(%rsp);				\
+	movq	%rdx,VMXCTX_GUEST_RDX(%rsp);				\
+	movq	%rcx,VMXCTX_GUEST_RCX(%rsp);				\
+	movq	%r8,VMXCTX_GUEST_R8(%rsp);				\
+	movq	%r9,VMXCTX_GUEST_R9(%rsp);				\
+	movq	%rax,VMXCTX_GUEST_RAX(%rsp);				\
+	movq	%rbx,VMXCTX_GUEST_RBX(%rsp);				\
+	movq	%rbp,VMXCTX_GUEST_RBP(%rsp);				\
+	movq	%r10,VMXCTX_GUEST_R10(%rsp);				\
+	movq	%r11,VMXCTX_GUEST_R11(%rsp);				\
+	movq	%r12,VMXCTX_GUEST_R12(%rsp);				\
+	movq	%r13,VMXCTX_GUEST_R13(%rsp);				\
+	movq	%r14,VMXCTX_GUEST_R14(%rsp);				\
+	movq	%r15,VMXCTX_GUEST_R15(%rsp);				\
+	movq	%cr2,%rdi;						\
+	movq	%rdi,VMXCTX_GUEST_CR2(%rsp);				\
+	movq	%rsp,%rdi;
+
+/*
+ * Assumes that %rdi holds a pointer to the 'vmxctx'.
+ *
+ * On "return" all registers are updated to reflect guest state. The two
+ * exceptions are %rip and %rsp. These registers are atomically switched
+ * by hardware from the guest area of the vmcs.
+ *
+ * We modify %rsp to point to the 'vmxctx' so we can use it to restore
+ * host context in case of an error with 'vmlaunch' or 'vmresume'.
+ */
+#define	VMX_GUEST_RESTORE						\
+	movq	%rdi,%rsp;						\
+	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
+	movq	%rsi,%cr2;						\
+	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
+	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
+	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
+	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
+	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
+	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
+	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
+	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
+	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
+	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
+	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
+	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
+	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
+	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
+	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
+
+/*
+ * Clobber the remaining registers with guest contents so they can't
+ * be misused.
+ */
+#define	VMX_GUEST_CLOBBER						\
+	xor	%rax, %rax;						\
+	xor	%rcx, %rcx;						\
+	xor	%rdx, %rdx;						\
+	xor	%rsi, %rsi;						\
+	xor	%r8, %r8;						\
+	xor	%r9, %r9;						\
+	xor	%r10, %r10;						\
+	xor	%r11, %r11;
+
+/*
+ * Save and restore the host context.
+ *
+ * Assumes that %rdi holds a pointer to the 'vmxctx'.
+ */
+#define	VMX_HOST_SAVE							\
+	movq    %r15, VMXCTX_HOST_R15(%rdi);				\
+	movq    %r14, VMXCTX_HOST_R14(%rdi);				\
+	movq    %r13, VMXCTX_HOST_R13(%rdi);				\
+	movq    %r12, VMXCTX_HOST_R12(%rdi);				\
+	movq    %rbp, VMXCTX_HOST_RBP(%rdi);				\
+	movq    %rsp, VMXCTX_HOST_RSP(%rdi);				\
+	movq    %rbx, VMXCTX_HOST_RBX(%rdi);				\
+
+#define	VMX_HOST_RESTORE						\
+	movq	VMXCTX_HOST_R15(%rdi), %r15;				\
+	movq	VMXCTX_HOST_R14(%rdi), %r14;				\
+	movq	VMXCTX_HOST_R13(%rdi), %r13;				\
+	movq	VMXCTX_HOST_R12(%rdi), %r12;				\
+	movq	VMXCTX_HOST_RBP(%rdi), %rbp;				\
+	movq	VMXCTX_HOST_RSP(%rdi), %rsp;				\
+	movq	VMXCTX_HOST_RBX(%rdi), %rbx;				\
+
+/*
+ * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
+ * %rdi: pointer to the 'vmxctx'
+ * %rsi: pointer to the 'vmx'
+ * %edx: launch state of the VMCS
+ * Interrupts must be disabled on entry.
+ */
+ENTRY(vmx_enter_guest)
+	VENTER
+	/*
+	 * Save host state before doing anything else.
+	 */
+	VMX_HOST_SAVE
+
+	/*
+	 * Activate guest pmap on this cpu.
+	 */
+	movq	VMXCTX_PMAP(%rdi), %r11
+	movl	PCPU(CPUID), %eax
+	LK btsl	%eax, PM_ACTIVE(%r11)
+
+	/*
+	 * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
+	 * then we must invalidate all mappings associated with this EPTP.
+	 */
+	movq	PM_EPTGEN(%r11), %r10
+	cmpq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
+	je	guest_restore
+
+	/* Refresh 'vmx->eptgen[curcpu]' */
+	movq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
+
+	/* Setup the invept descriptor on the host stack */
+	mov	%rsp, %r11
+	movq	VMX_EPTP(%rsi), %rax
+	movq	%rax, -16(%r11)
+	movq	$0x0, -8(%r11)
+	mov	$0x1, %eax		/* Single context invalidate */
+	invept	-16(%r11), %rax
+	jbe	invept_error		/* Check invept instruction error */
+
+guest_restore:
+	cmpl	$0, %edx
+	je	do_launch
+
+	VMX_GUEST_RESTORE
+	vmresume
+	/*
+	 * In the common case 'vmresume' returns back to the host through
+	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
+	 *
+	 * If there is an error we return VMX_VMRESUME_ERROR to the caller.
+	 */
+	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
+	movl	$VMX_VMRESUME_ERROR, %eax
+	jmp	decode_inst_error
+
+do_launch:
+	VMX_GUEST_RESTORE
+	vmlaunch
+	/*
+	 * In the common case 'vmlaunch' returns back to the host through
+	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
+	 *
+	 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
+	 */
+	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
+	movl	$VMX_VMLAUNCH_ERROR, %eax
+	jmp	decode_inst_error
+
+invept_error:
+	movl	$VMX_INVEPT_ERROR, %eax
+	jmp	decode_inst_error
+
+decode_inst_error:
+	movl	$VM_FAIL_VALID, %r11d
+	jz	inst_error
+	movl	$VM_FAIL_INVALID, %r11d
+inst_error:
+	movl	%r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
+
+	/*
+	 * The return value is already populated in %eax so we cannot use
+	 * it as a scratch register beyond this point.
+	 */
+
+	/*
+	 * Deactivate guest pmap from this cpu.
+	 */
+	movq	VMXCTX_PMAP(%rdi), %r11
+	movl	PCPU(CPUID), %r10d
+	LK btrl	%r10d, PM_ACTIVE(%r11)
+
+	VMX_HOST_RESTORE
+	VLEAVE
+	ret
+
+/*
+ * Non-error VM-exit from the guest. Make this a label so it can
+ * be used by C code when setting up the VMCS.
+ * The VMCS-restored %rsp points to the struct vmxctx
+ */
+	ALIGN_TEXT
+	.globl	vmx_exit_guest_flush_rsb
+vmx_exit_guest_flush_rsb:
+	/*
+	 * Save guest state that is not automatically saved in the vmcs.
+	 */
+	VMX_GUEST_SAVE
+
+	/*
+	 * Deactivate guest pmap from this cpu.
+	 */
+	movq	VMXCTX_PMAP(%rdi), %r11
+	movl	PCPU(CPUID), %r10d
+	LK btrl	%r10d, PM_ACTIVE(%r11)
+
+	VMX_HOST_RESTORE
+
+	VMX_GUEST_CLOBBER
+
+	/*
+	 * To prevent malicious branch target predictions from
+	 * affecting the host, overwrite all entries in the RSB upon
+	 * exiting a guest.
+	 */
+	mov	$16, %ecx	/* 16 iterations, two calls per loop */
+	mov	%rsp, %rax
+0:	call	2f		/* create an RSB entry. */
+1:	pause
+	call	1b		/* capture rogue speculation. */
+2:	call	2f		/* create an RSB entry. */
+1:	pause
+	call	1b		/* capture rogue speculation. */
+2:	sub	$1, %ecx
+	jnz	0b
+	mov	%rax, %rsp
+
+	/*
+	 * This will return to the caller of 'vmx_enter_guest()' with a return
+	 * value of VMX_GUEST_VMEXIT.
+	 */
+	movl	$VMX_GUEST_VMEXIT, %eax
+	VLEAVE
+	ret
+
+	.globl	vmx_exit_guest
+vmx_exit_guest:
+	/*
+	 * Save guest state that is not automatically saved in the vmcs.
+	 */
+	VMX_GUEST_SAVE
+
+	/*
+	 * Deactivate guest pmap from this cpu.
+	 */
+	movq	VMXCTX_PMAP(%rdi), %r11
+	movl	PCPU(CPUID), %r10d
+	LK btrl	%r10d, PM_ACTIVE(%r11)
+
+	VMX_HOST_RESTORE
+
+	VMX_GUEST_CLOBBER
+
+	/*
+	 * This will return to the caller of 'vmx_enter_guest()' with a return
+	 * value of VMX_GUEST_VMEXIT.
+	 */
+	movl	$VMX_GUEST_VMEXIT, %eax
+	VLEAVE
+	ret
+END(vmx_enter_guest)
+
+/*
+ * %rdi = interrupt handler entry point
+ *
+ * Calling sequence described in the "Instruction Set Reference" for the "INT"
+ * instruction in Intel SDM, Vol 2.
+ */
+ENTRY(vmx_call_isr)
+	VENTER
+	mov	%rsp, %r11			/* save %rsp */
+	and	$~0xf, %rsp			/* align on 16-byte boundary */
+	pushq	$KERNEL_SS			/* %ss */
+	pushq	%r11				/* %rsp */
+	pushfq					/* %rflags */
+	pushq	$KERNEL_CS			/* %cs */
+	cli					/* disable interrupts */
+	callq	*%rdi				/* push %rip and call isr */
+	VLEAVE
+	ret
+END(vmx_call_isr)


Property changes on: trunk/sys/amd64/vmm/intel/vmx_support.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/intel/vtd.c
===================================================================
--- trunk/sys/amd64/vmm/intel/vtd.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/intel/vtd.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,689 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/intel/vtd.c 279470 2015-03-01 04:22:06Z rstone $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vtd.c 279470 2015-03-01 04:22:06Z rstone $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <dev/pci/pcireg.h>
+
+#include <machine/vmparam.h>
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include "io/iommu.h"
+
+/*
+ * Documented in the "Intel Virtualization Technology for Directed I/O",
+ * Architecture Spec, September 2008.
+ */
+
+/* Section 10.4 "Register Descriptions" */
+struct vtdmap {
+	volatile uint32_t	version;
+	volatile uint32_t	res0;
+	volatile uint64_t	cap;
+	volatile uint64_t	ext_cap;
+	volatile uint32_t	gcr;
+	volatile uint32_t	gsr;
+	volatile uint64_t	rta;
+	volatile uint64_t	ccr;
+};
+
+#define	VTD_CAP_SAGAW(cap)	(((cap) >> 8) & 0x1F)
+#define	VTD_CAP_ND(cap)		((cap) & 0x7)
+#define	VTD_CAP_CM(cap)		(((cap) >> 7) & 0x1)
+#define	VTD_CAP_SPS(cap)	(((cap) >> 34) & 0xF)
+#define	VTD_CAP_RWBF(cap)	(((cap) >> 4) & 0x1)
+
+#define	VTD_ECAP_DI(ecap)	(((ecap) >> 2) & 0x1)
+#define	VTD_ECAP_COHERENCY(ecap) ((ecap) & 0x1)
+#define	VTD_ECAP_IRO(ecap)	(((ecap) >> 8) & 0x3FF)
+
+#define	VTD_GCR_WBF		(1 << 27)
+#define	VTD_GCR_SRTP		(1 << 30)
+#define	VTD_GCR_TE		(1U << 31)
+
+#define	VTD_GSR_WBFS		(1 << 27)
+#define	VTD_GSR_RTPS		(1 << 30)
+#define	VTD_GSR_TES		(1U << 31)
+
+#define	VTD_CCR_ICC		(1UL << 63)	/* invalidate context cache */
+#define	VTD_CCR_CIRG_GLOBAL	(1UL << 61)	/* global invalidation */
+
+#define	VTD_IIR_IVT		(1UL << 63)	/* invalidation IOTLB */
+#define	VTD_IIR_IIRG_GLOBAL	(1ULL << 60)	/* global IOTLB invalidation */
+#define	VTD_IIR_IIRG_DOMAIN	(2ULL << 60)	/* domain IOTLB invalidation */
+#define	VTD_IIR_IIRG_PAGE	(3ULL << 60)	/* page IOTLB invalidation */
+#define	VTD_IIR_DRAIN_READS	(1ULL << 49)	/* drain pending DMA reads */
+#define	VTD_IIR_DRAIN_WRITES	(1ULL << 48)	/* drain pending DMA writes */
+#define	VTD_IIR_DOMAIN_P	32
+
+#define	VTD_ROOT_PRESENT	0x1
+#define	VTD_CTX_PRESENT		0x1
+#define	VTD_CTX_TT_ALL		(1UL << 2)
+
+#define	VTD_PTE_RD		(1UL << 0)
+#define	VTD_PTE_WR		(1UL << 1)
+#define	VTD_PTE_SUPERPAGE	(1UL << 7)
+#define	VTD_PTE_ADDR_M		(0x000FFFFFFFFFF000UL)
+
+#define VTD_RID2IDX(rid)	(((rid) & 0xff) * 2)
+
+struct domain {
+	uint64_t	*ptp;		/* first level page table page */
+	int		pt_levels;	/* number of page table levels */
+	int		addrwidth;	/* 'AW' field in context entry */
+	int		spsmask;	/* supported super page sizes */
+	u_int		id;		/* domain id */
+	vm_paddr_t	maxaddr;	/* highest address to be mapped */
+	SLIST_ENTRY(domain) next;
+};
+
+static SLIST_HEAD(, domain) domhead;
+
+#define	DRHD_MAX_UNITS	8
+static int		drhd_num;
+static struct vtdmap	*vtdmaps[DRHD_MAX_UNITS];
+static int		max_domains;
+typedef int		(*drhd_ident_func_t)(void);
+
+static uint64_t root_table[PAGE_SIZE / sizeof(uint64_t)] __aligned(4096);
+static uint64_t ctx_tables[256][PAGE_SIZE / sizeof(uint64_t)] __aligned(4096);
+
+static MALLOC_DEFINE(M_VTD, "vtd", "vtd");
+
+static int
+vtd_max_domains(struct vtdmap *vtdmap)
+{
+	int nd;
+
+	nd = VTD_CAP_ND(vtdmap->cap);
+
+	switch (nd) {
+	case 0:
+		return (16);
+	case 1:
+		return (64);
+	case 2:
+		return (256);
+	case 3:
+		return (1024);
+	case 4:
+		return (4 * 1024);
+	case 5:
+		return (16 * 1024);
+	case 6:
+		return (64 * 1024);
+	default:
+		panic("vtd_max_domains: invalid value of nd (0x%0x)", nd);
+	}
+}
+
+static u_int
+domain_id(void)
+{
+	u_int id;
+	struct domain *dom;
+
+	/* Skip domain id 0 - it is reserved when Caching Mode field is set */
+	for (id = 1; id < max_domains; id++) {
+		SLIST_FOREACH(dom, &domhead, next) {
+			if (dom->id == id)
+				break;
+		}
+		if (dom == NULL)
+			break;		/* found it */
+	}
+	
+	if (id >= max_domains)
+		panic("domain ids exhausted");
+
+	return (id);
+}
+
+static void
+vtd_wbflush(struct vtdmap *vtdmap)
+{
+
+	if (VTD_ECAP_COHERENCY(vtdmap->ext_cap) == 0)
+		pmap_invalidate_cache();
+
+	if (VTD_CAP_RWBF(vtdmap->cap)) {
+		vtdmap->gcr = VTD_GCR_WBF;
+		while ((vtdmap->gsr & VTD_GSR_WBFS) != 0)
+			;
+	}
+}
+
+static void
+vtd_ctx_global_invalidate(struct vtdmap *vtdmap)
+{
+
+	vtdmap->ccr = VTD_CCR_ICC | VTD_CCR_CIRG_GLOBAL;
+	while ((vtdmap->ccr & VTD_CCR_ICC) != 0)
+		;
+}
+
+static void
+vtd_iotlb_global_invalidate(struct vtdmap *vtdmap)
+{
+	int offset;
+	volatile uint64_t *iotlb_reg, val;
+
+	vtd_wbflush(vtdmap);
+
+	offset = VTD_ECAP_IRO(vtdmap->ext_cap) * 16;
+	iotlb_reg = (volatile uint64_t *)((caddr_t)vtdmap + offset + 8);
+	
+	*iotlb_reg =  VTD_IIR_IVT | VTD_IIR_IIRG_GLOBAL |
+		      VTD_IIR_DRAIN_READS | VTD_IIR_DRAIN_WRITES;
+
+	while (1) {
+		val = *iotlb_reg;
+		if ((val & VTD_IIR_IVT) == 0)
+			break;
+	}
+}
+
+static void
+vtd_translation_enable(struct vtdmap *vtdmap)
+{
+
+	vtdmap->gcr = VTD_GCR_TE;
+	while ((vtdmap->gsr & VTD_GSR_TES) == 0)
+		;
+}
+
+static void
+vtd_translation_disable(struct vtdmap *vtdmap)
+{
+
+	vtdmap->gcr = 0;
+	while ((vtdmap->gsr & VTD_GSR_TES) != 0)
+		;
+}
+
+static int
+vtd_init(void)
+{
+	int i, units, remaining;
+	struct vtdmap *vtdmap;
+	vm_paddr_t ctx_paddr;
+	char *end, envname[32];
+	unsigned long mapaddr;
+	ACPI_STATUS status;
+	ACPI_TABLE_DMAR *dmar;
+	ACPI_DMAR_HEADER *hdr;
+	ACPI_DMAR_HARDWARE_UNIT *drhd;
+
+	/*
+	 * Allow the user to override the ACPI DMAR table by specifying the
+	 * physical address of each remapping unit.
+	 *
+	 * The following example specifies two remapping units at
+	 * physical addresses 0xfed90000 and 0xfeda0000 respectively.
+	 * set vtd.regmap.0.addr=0xfed90000
+	 * set vtd.regmap.1.addr=0xfeda0000
+	 */
+	for (units = 0; units < DRHD_MAX_UNITS; units++) {
+		snprintf(envname, sizeof(envname), "vtd.regmap.%d.addr", units);
+		if (getenv_ulong(envname, &mapaddr) == 0)
+			break;
+		vtdmaps[units] = (struct vtdmap *)PHYS_TO_DMAP(mapaddr);
+	}
+
+	if (units > 0)
+		goto skip_dmar;
+
+	/* Search for DMAR table. */
+	status = AcpiGetTable(ACPI_SIG_DMAR, 0, (ACPI_TABLE_HEADER **)&dmar);
+	if (ACPI_FAILURE(status))
+		return (ENXIO);
+
+	end = (char *)dmar + dmar->Header.Length;
+	remaining = dmar->Header.Length - sizeof(ACPI_TABLE_DMAR);
+	while (remaining > sizeof(ACPI_DMAR_HEADER)) {
+		hdr = (ACPI_DMAR_HEADER *)(end - remaining);
+		if (hdr->Length > remaining)
+			break;
+		/*
+		 * From Intel VT-d arch spec, version 1.3:
+		 * BIOS implementations must report mapping structures
+		 * in numerical order, i.e. All remapping structures of
+		 * type 0 (DRHD) enumerated before remapping structures of
+		 * type 1 (RMRR) and so forth.
+		 */
+		if (hdr->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT)
+			break;
+
+		drhd = (ACPI_DMAR_HARDWARE_UNIT *)hdr;
+		vtdmaps[units++] = (struct vtdmap *)PHYS_TO_DMAP(drhd->Address);
+		if (units >= DRHD_MAX_UNITS)
+			break;
+		remaining -= hdr->Length;
+	}
+
+	if (units <= 0)
+		return (ENXIO);
+
+skip_dmar:
+	drhd_num = units;
+	vtdmap = vtdmaps[0];
+
+	if (VTD_CAP_CM(vtdmap->cap) != 0)
+		panic("vtd_init: invalid caching mode");
+
+	max_domains = vtd_max_domains(vtdmap);
+
+	/*
+	 * Set up the root-table to point to the context-entry tables
+	 */
+	for (i = 0; i < 256; i++) {
+		ctx_paddr = vtophys(ctx_tables[i]);
+		if (ctx_paddr & PAGE_MASK)
+			panic("ctx table (0x%0lx) not page aligned", ctx_paddr);
+
+		root_table[i * 2] = ctx_paddr | VTD_ROOT_PRESENT;
+	}
+
+	return (0);
+}
+
+static void
+vtd_cleanup(void)
+{
+}
+
+static void
+vtd_enable(void)
+{
+	int i;
+	struct vtdmap *vtdmap;
+
+	for (i = 0; i < drhd_num; i++) {
+		vtdmap = vtdmaps[i];
+		vtd_wbflush(vtdmap);
+
+		/* Update the root table address */
+		vtdmap->rta = vtophys(root_table);
+		vtdmap->gcr = VTD_GCR_SRTP;
+		while ((vtdmap->gsr & VTD_GSR_RTPS) == 0)
+			;
+
+		vtd_ctx_global_invalidate(vtdmap);
+		vtd_iotlb_global_invalidate(vtdmap);
+
+		vtd_translation_enable(vtdmap);
+	}
+}
+
+static void
+vtd_disable(void)
+{
+	int i;
+	struct vtdmap *vtdmap;
+
+	for (i = 0; i < drhd_num; i++) {
+		vtdmap = vtdmaps[i];
+		vtd_translation_disable(vtdmap);
+	}
+}
+
+static void
+vtd_add_device(void *arg, uint16_t rid)
+{
+	int idx;
+	uint64_t *ctxp;
+	struct domain *dom = arg;
+	vm_paddr_t pt_paddr;
+	struct vtdmap *vtdmap;
+	uint8_t bus;
+
+	vtdmap = vtdmaps[0];
+	bus = PCI_RID2BUS(rid);
+	ctxp = ctx_tables[bus];
+	pt_paddr = vtophys(dom->ptp);
+	idx = VTD_RID2IDX(rid);
+
+	if (ctxp[idx] & VTD_CTX_PRESENT) {
+		panic("vtd_add_device: device %x is already owned by "
+		      "domain %d", rid,
+		      (uint16_t)(ctxp[idx + 1] >> 8));
+	}
+
+	/*
+	 * Order is important. The 'present' bit is set only after all fields
+	 * of the context pointer are initialized.
+	 */
+	ctxp[idx + 1] = dom->addrwidth | (dom->id << 8);
+
+	if (VTD_ECAP_DI(vtdmap->ext_cap))
+		ctxp[idx] = VTD_CTX_TT_ALL;
+	else
+		ctxp[idx] = 0;
+
+	ctxp[idx] |= pt_paddr | VTD_CTX_PRESENT;
+
+	/*
+	 * 'Not Present' entries are not cached in either the Context Cache
+	 * or in the IOTLB, so there is no need to invalidate either of them.
+	 */
+}
+
+static void
+vtd_remove_device(void *arg, uint16_t rid)
+{
+	int i, idx;
+	uint64_t *ctxp;
+	struct vtdmap *vtdmap;
+	uint8_t bus;
+
+	bus = PCI_RID2BUS(rid);
+	ctxp = ctx_tables[bus];
+	idx = VTD_RID2IDX(rid);
+
+	/*
+	 * Order is important. The 'present' bit is must be cleared first.
+	 */
+	ctxp[idx] = 0;
+	ctxp[idx + 1] = 0;
+
+	/*
+	 * Invalidate the Context Cache and the IOTLB.
+	 *
+	 * XXX use device-selective invalidation for Context Cache
+	 * XXX use domain-selective invalidation for IOTLB
+	 */
+	for (i = 0; i < drhd_num; i++) {
+		vtdmap = vtdmaps[i];
+		vtd_ctx_global_invalidate(vtdmap);
+		vtd_iotlb_global_invalidate(vtdmap);
+	}
+}
+
+#define	CREATE_MAPPING	0
+#define	REMOVE_MAPPING	1
+
+static uint64_t
+vtd_update_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len,
+		   int remove)
+{
+	struct domain *dom;
+	int i, spshift, ptpshift, ptpindex, nlevels;
+	uint64_t spsize, *ptp;
+
+	dom = arg;
+	ptpindex = 0;
+	ptpshift = 0;
+
+	KASSERT(gpa + len > gpa, ("%s: invalid gpa range %#lx/%#lx", __func__,
+	    gpa, len));
+	KASSERT(gpa + len <= dom->maxaddr, ("%s: gpa range %#lx/%#lx beyond "
+	    "domain maxaddr %#lx", __func__, gpa, len, dom->maxaddr));
+
+	if (gpa & PAGE_MASK)
+		panic("vtd_create_mapping: unaligned gpa 0x%0lx", gpa);
+
+	if (hpa & PAGE_MASK)
+		panic("vtd_create_mapping: unaligned hpa 0x%0lx", hpa);
+
+	if (len & PAGE_MASK)
+		panic("vtd_create_mapping: unaligned len 0x%0lx", len);
+
+	/*
+	 * Compute the size of the mapping that we can accomodate.
+	 *
+	 * This is based on three factors:
+	 * - supported super page size
+	 * - alignment of the region starting at 'gpa' and 'hpa'
+	 * - length of the region 'len'
+	 */
+	spshift = 48;
+	for (i = 3; i >= 0; i--) {
+		spsize = 1UL << spshift;
+		if ((dom->spsmask & (1 << i)) != 0 &&
+		    (gpa & (spsize - 1)) == 0 &&
+		    (hpa & (spsize - 1)) == 0 &&
+		    (len >= spsize)) {
+			break;
+		}
+		spshift -= 9;
+	}
+
+	ptp = dom->ptp;
+	nlevels = dom->pt_levels;
+	while (--nlevels >= 0) {
+		ptpshift = 12 + nlevels * 9;
+		ptpindex = (gpa >> ptpshift) & 0x1FF;
+
+		/* We have reached the leaf mapping */
+		if (spshift >= ptpshift) {
+			break;
+		}
+
+		/*
+		 * We are working on a non-leaf page table page.
+		 *
+		 * Create a downstream page table page if necessary and point
+		 * to it from the current page table.
+		 */
+		if (ptp[ptpindex] == 0) {
+			void *nlp = malloc(PAGE_SIZE, M_VTD, M_WAITOK | M_ZERO);
+			ptp[ptpindex] = vtophys(nlp)| VTD_PTE_RD | VTD_PTE_WR;
+		}
+
+		ptp = (uint64_t *)PHYS_TO_DMAP(ptp[ptpindex] & VTD_PTE_ADDR_M);
+	}
+
+	if ((gpa & ((1UL << ptpshift) - 1)) != 0)
+		panic("gpa 0x%lx and ptpshift %d mismatch", gpa, ptpshift);
+
+	/*
+	 * Update the 'gpa' -> 'hpa' mapping
+	 */
+	if (remove) {
+		ptp[ptpindex] = 0;
+	} else {
+		ptp[ptpindex] = hpa | VTD_PTE_RD | VTD_PTE_WR;
+
+		if (nlevels > 0)
+			ptp[ptpindex] |= VTD_PTE_SUPERPAGE;
+	}
+
+	return (1UL << ptpshift);
+}
+
+static uint64_t
+vtd_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len)
+{
+
+	return (vtd_update_mapping(arg, gpa, hpa, len, CREATE_MAPPING));
+}
+
+static uint64_t
+vtd_remove_mapping(void *arg, vm_paddr_t gpa, uint64_t len)
+{
+
+	return (vtd_update_mapping(arg, gpa, 0, len, REMOVE_MAPPING));
+}
+
+static void
+vtd_invalidate_tlb(void *dom)
+{
+	int i;
+	struct vtdmap *vtdmap;
+
+	/*
+	 * Invalidate the IOTLB.
+	 * XXX use domain-selective invalidation for IOTLB
+	 */
+	for (i = 0; i < drhd_num; i++) {
+		vtdmap = vtdmaps[i];
+		vtd_iotlb_global_invalidate(vtdmap);
+	}
+}
+
+static void *
+vtd_create_domain(vm_paddr_t maxaddr)
+{
+	struct domain *dom;
+	vm_paddr_t addr;
+	int tmp, i, gaw, agaw, sagaw, res, pt_levels, addrwidth;
+	struct vtdmap *vtdmap;
+
+	if (drhd_num <= 0)
+		panic("vtd_create_domain: no dma remapping hardware available");
+
+	vtdmap = vtdmaps[0];
+
+	/*
+	 * Calculate AGAW.
+	 * Section 3.4.2 "Adjusted Guest Address Width", Architecture Spec.
+	 */
+	addr = 0;
+	for (gaw = 0; addr < maxaddr; gaw++)
+		addr = 1ULL << gaw;
+
+	res = (gaw - 12) % 9;
+	if (res == 0)
+		agaw = gaw;
+	else
+		agaw = gaw + 9 - res;
+
+	if (agaw > 64)
+		agaw = 64;
+
+	/*
+	 * Select the smallest Supported AGAW and the corresponding number
+	 * of page table levels.
+	 */
+	pt_levels = 2;
+	sagaw = 30;
+	addrwidth = 0;
+	tmp = VTD_CAP_SAGAW(vtdmap->cap);
+	for (i = 0; i < 5; i++) {
+		if ((tmp & (1 << i)) != 0 && sagaw >= agaw)
+			break;
+		pt_levels++;
+		addrwidth++;
+		sagaw += 9;
+		if (sagaw > 64)
+			sagaw = 64;
+	}
+
+	if (i >= 5) {
+		panic("vtd_create_domain: SAGAW 0x%lx does not support AGAW %d",
+		      VTD_CAP_SAGAW(vtdmap->cap), agaw);
+	}
+
+	dom = malloc(sizeof(struct domain), M_VTD, M_ZERO | M_WAITOK);
+	dom->pt_levels = pt_levels;
+	dom->addrwidth = addrwidth;
+	dom->id = domain_id();
+	dom->maxaddr = maxaddr;
+	dom->ptp = malloc(PAGE_SIZE, M_VTD, M_ZERO | M_WAITOK);
+	if ((uintptr_t)dom->ptp & PAGE_MASK)
+		panic("vtd_create_domain: ptp (%p) not page aligned", dom->ptp);
+
+#ifdef notyet
+	/*
+	 * XXX superpage mappings for the iommu do not work correctly.
+	 *
+	 * By default all physical memory is mapped into the host_domain.
+	 * When a VM is allocated wired memory the pages belonging to it
+	 * are removed from the host_domain and added to the vm's domain.
+	 *
+	 * If the page being removed was mapped using a superpage mapping
+	 * in the host_domain then we need to demote the mapping before
+	 * removing the page.
+	 *
+	 * There is not any code to deal with the demotion at the moment
+	 * so we disable superpage mappings altogether.
+	 */
+	dom->spsmask = VTD_CAP_SPS(vtdmap->cap);
+#endif
+
+	SLIST_INSERT_HEAD(&domhead, dom, next);
+
+	return (dom);
+}
+
+static void
+vtd_free_ptp(uint64_t *ptp, int level)
+{
+	int i;
+	uint64_t *nlp;
+
+	if (level > 1) {
+		for (i = 0; i < 512; i++) {
+			if ((ptp[i] & (VTD_PTE_RD | VTD_PTE_WR)) == 0)
+				continue;
+			if ((ptp[i] & VTD_PTE_SUPERPAGE) != 0)
+				continue;
+			nlp = (uint64_t *)PHYS_TO_DMAP(ptp[i] & VTD_PTE_ADDR_M);
+			vtd_free_ptp(nlp, level - 1);
+		}
+	}
+
+	bzero(ptp, PAGE_SIZE);
+	free(ptp, M_VTD);
+}
+
+static void
+vtd_destroy_domain(void *arg)
+{
+	struct domain *dom;
+	
+	dom = arg;
+
+	SLIST_REMOVE(&domhead, dom, domain, next);
+	vtd_free_ptp(dom->ptp, dom->pt_levels);
+	free(dom, M_VTD);
+}
+
+struct iommu_ops iommu_ops_intel = {
+	vtd_init,
+	vtd_cleanup,
+	vtd_enable,
+	vtd_disable,
+	vtd_create_domain,
+	vtd_destroy_domain,
+	vtd_create_mapping,
+	vtd_remove_mapping,
+	vtd_add_device,
+	vtd_remove_device,
+	vtd_invalidate_tlb,
+};


Property changes on: trunk/sys/amd64/vmm/intel/vtd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/iommu.c
===================================================================
--- trunk/sys/amd64/vmm/io/iommu.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/iommu.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,312 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/iommu.c 325900 2017-11-16 18:22:03Z jhb $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/iommu.c 325900 2017-11-16 18:22:03Z jhb $");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/sysctl.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+
+#include "vmm_util.h"
+#include "vmm_mem.h"
+#include "iommu.h"
+
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, iommu, CTLFLAG_RW, 0, "bhyve iommu parameters");
+
+static int iommu_avail;
+SYSCTL_INT(_hw_vmm_iommu, OID_AUTO, initialized, CTLFLAG_RD, &iommu_avail,
+    0, "bhyve iommu initialized?");
+
+static int iommu_enable = 1;
+SYSCTL_INT(_hw_vmm_iommu, OID_AUTO, enable, CTLFLAG_RDTUN, &iommu_enable, 0,
+    "Enable use of I/O MMU (required for PCI passthrough).");
+
+static struct iommu_ops *ops;
+static void *host_domain;
+
+static __inline int
+IOMMU_INIT(void)
+{
+	if (ops != NULL)
+		return ((*ops->init)());
+	else
+		return (ENXIO);
+}
+
+static __inline void
+IOMMU_CLEANUP(void)
+{
+	if (ops != NULL && iommu_avail)
+		(*ops->cleanup)();
+}
+
+static __inline void *
+IOMMU_CREATE_DOMAIN(vm_paddr_t maxaddr)
+{
+
+	if (ops != NULL && iommu_avail)
+		return ((*ops->create_domain)(maxaddr));
+	else
+		return (NULL);
+}
+
+static __inline void
+IOMMU_DESTROY_DOMAIN(void *dom)
+{
+
+	if (ops != NULL && iommu_avail)
+		(*ops->destroy_domain)(dom);
+}
+
+static __inline uint64_t
+IOMMU_CREATE_MAPPING(void *domain, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len)
+{
+
+	if (ops != NULL && iommu_avail)
+		return ((*ops->create_mapping)(domain, gpa, hpa, len));
+	else
+		return (len);		/* XXX */
+}
+
+static __inline uint64_t
+IOMMU_REMOVE_MAPPING(void *domain, vm_paddr_t gpa, uint64_t len)
+{
+
+	if (ops != NULL && iommu_avail)
+		return ((*ops->remove_mapping)(domain, gpa, len));
+	else
+		return (len);		/* XXX */
+}
+
+static __inline void
+IOMMU_ADD_DEVICE(void *domain, uint16_t rid)
+{
+
+	if (ops != NULL && iommu_avail)
+		(*ops->add_device)(domain, rid);
+}
+
+static __inline void
+IOMMU_REMOVE_DEVICE(void *domain, uint16_t rid)
+{
+
+	if (ops != NULL && iommu_avail)
+		(*ops->remove_device)(domain, rid);
+}
+
+static __inline void
+IOMMU_INVALIDATE_TLB(void *domain)
+{
+
+	if (ops != NULL && iommu_avail)
+		(*ops->invalidate_tlb)(domain);
+}
+
+static __inline void
+IOMMU_ENABLE(void)
+{
+
+	if (ops != NULL && iommu_avail)
+		(*ops->enable)();
+}
+
+static __inline void
+IOMMU_DISABLE(void)
+{
+
+	if (ops != NULL && iommu_avail)
+		(*ops->disable)();
+}
+
+static void
+iommu_init(void)
+{
+	int error, bus, slot, func;
+	vm_paddr_t maxaddr;
+	devclass_t dc;
+	device_t dev;
+
+	if (!iommu_enable)
+		return;
+
+	if (vmm_is_intel())
+		ops = &iommu_ops_intel;
+	else if (vmm_is_amd())
+		ops = &iommu_ops_amd;
+	else
+		ops = NULL;
+
+	error = IOMMU_INIT();
+	if (error)
+		return;
+
+	iommu_avail = 1;
+
+	/*
+	 * Create a domain for the devices owned by the host
+	 */
+	maxaddr = vmm_mem_maxaddr();
+	host_domain = IOMMU_CREATE_DOMAIN(maxaddr);
+	if (host_domain == NULL) {
+		printf("iommu_init: unable to create a host domain");
+		IOMMU_CLEANUP();
+		ops = NULL;
+		iommu_avail = 0;
+		return;
+	}
+
+	/*
+	 * Create 1:1 mappings from '0' to 'maxaddr' for devices assigned to
+	 * the host
+	 */
+	iommu_create_mapping(host_domain, 0, 0, maxaddr);
+
+	dc = devclass_find("ppt");
+	for (bus = 0; bus <= PCI_BUSMAX; bus++) {
+		for (slot = 0; slot <= PCI_SLOTMAX; slot++) {
+			for (func = 0; func <= PCI_FUNCMAX; func++) {
+				dev = pci_find_dbsf(0, bus, slot, func);
+				if (dev == NULL)
+					continue;
+
+				/* Skip passthrough devices. */
+				if (dc != NULL &&
+				    device_get_devclass(dev) == dc)
+					continue;
+
+				/*
+				 * Everything else belongs to the host
+				 * domain.
+				 */
+				iommu_add_device(host_domain,
+				    pci_get_rid(dev));
+			}
+		}
+	}
+	IOMMU_ENABLE();
+
+}
+
+void
+iommu_cleanup(void)
+{
+	IOMMU_DISABLE();
+	IOMMU_DESTROY_DOMAIN(host_domain);
+	IOMMU_CLEANUP();
+}
+
+void *
+iommu_create_domain(vm_paddr_t maxaddr)
+{
+	static volatile int iommu_initted;
+
+	if (iommu_initted < 2) {
+		if (atomic_cmpset_int(&iommu_initted, 0, 1)) {
+			iommu_init();
+			atomic_store_rel_int(&iommu_initted, 2);
+		} else
+			while (iommu_initted == 1)
+				cpu_spinwait();
+	}
+	return (IOMMU_CREATE_DOMAIN(maxaddr));
+}
+
+void
+iommu_destroy_domain(void *dom)
+{
+
+	IOMMU_DESTROY_DOMAIN(dom);
+}
+
+void
+iommu_create_mapping(void *dom, vm_paddr_t gpa, vm_paddr_t hpa, size_t len)
+{
+	uint64_t mapped, remaining;
+
+	remaining = len;
+
+	while (remaining > 0) {
+		mapped = IOMMU_CREATE_MAPPING(dom, gpa, hpa, remaining);
+		gpa += mapped;
+		hpa += mapped;
+		remaining -= mapped;
+	}
+}
+
+void
+iommu_remove_mapping(void *dom, vm_paddr_t gpa, size_t len)
+{
+	uint64_t unmapped, remaining;
+
+	remaining = len;
+
+	while (remaining > 0) {
+		unmapped = IOMMU_REMOVE_MAPPING(dom, gpa, remaining);
+		gpa += unmapped;
+		remaining -= unmapped;
+	}
+}
+
+void *
+iommu_host_domain(void)
+{
+
+	return (host_domain);
+}
+
+void
+iommu_add_device(void *dom, uint16_t rid)
+{
+
+	IOMMU_ADD_DEVICE(dom, rid);
+}
+
+void
+iommu_remove_device(void *dom, uint16_t rid)
+{
+
+	IOMMU_REMOVE_DEVICE(dom, rid);
+}
+
+void
+iommu_invalidate_tlb(void *domain)
+{
+
+	IOMMU_INVALIDATE_TLB(domain);
+}


Property changes on: trunk/sys/amd64/vmm/io/iommu.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/iommu.h
===================================================================
--- trunk/sys/amd64/vmm/io/iommu.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/iommu.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,75 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/iommu.h 306472 2016-09-30 01:42:29Z jhb $
+ */
+
+#ifndef _IO_IOMMU_H_
+#define	_IO_IOMMU_H_
+
+typedef int (*iommu_init_func_t)(void);
+typedef void (*iommu_cleanup_func_t)(void);
+typedef void (*iommu_enable_func_t)(void);
+typedef void (*iommu_disable_func_t)(void);
+typedef void *(*iommu_create_domain_t)(vm_paddr_t maxaddr);
+typedef void (*iommu_destroy_domain_t)(void *domain);
+typedef uint64_t (*iommu_create_mapping_t)(void *domain, vm_paddr_t gpa,
+					   vm_paddr_t hpa, uint64_t len);
+typedef uint64_t (*iommu_remove_mapping_t)(void *domain, vm_paddr_t gpa,
+					   uint64_t len);
+typedef void (*iommu_add_device_t)(void *domain, uint16_t rid);
+typedef void (*iommu_remove_device_t)(void *dom, uint16_t rid);
+typedef void (*iommu_invalidate_tlb_t)(void *dom);
+
+struct iommu_ops {
+	iommu_init_func_t	init;		/* module wide */
+	iommu_cleanup_func_t	cleanup;
+	iommu_enable_func_t	enable;
+	iommu_disable_func_t	disable;
+
+	iommu_create_domain_t	create_domain;	/* domain-specific */
+	iommu_destroy_domain_t	destroy_domain;
+	iommu_create_mapping_t	create_mapping;
+	iommu_remove_mapping_t	remove_mapping;
+	iommu_add_device_t	add_device;
+	iommu_remove_device_t	remove_device;
+	iommu_invalidate_tlb_t	invalidate_tlb;
+};
+
+extern struct iommu_ops iommu_ops_intel;
+extern struct iommu_ops iommu_ops_amd;
+
+void	iommu_cleanup(void);
+void	*iommu_host_domain(void);
+void	*iommu_create_domain(vm_paddr_t maxaddr);
+void	iommu_destroy_domain(void *dom);
+void	iommu_create_mapping(void *dom, vm_paddr_t gpa, vm_paddr_t hpa,
+			     size_t len);
+void	iommu_remove_mapping(void *dom, vm_paddr_t gpa, size_t len);
+void	iommu_add_device(void *dom, uint16_t rid);
+void	iommu_remove_device(void *dom, uint16_t rid);
+void	iommu_invalidate_tlb(void *domain);
+#endif


Property changes on: trunk/sys/amd64/vmm/io/iommu.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/ppt.c
===================================================================
--- trunk/sys/amd64/vmm/io/ppt.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/ppt.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,672 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/ppt.c 325900 2017-11-16 18:22:03Z jhb $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/ppt.c 325900 2017-11-16 18:22:03Z jhb $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/pciio.h>
+#include <sys/rman.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include <machine/resource.h>
+
+#include <machine/vmm.h>
+#include <machine/vmm_dev.h>
+
+#include "vmm_lapic.h"
+#include "vmm_ktr.h"
+
+#include "iommu.h"
+#include "ppt.h"
+
+/* XXX locking */
+
+#define	MAX_MSIMSGS	32
+
+/*
+ * If the MSI-X table is located in the middle of a BAR then that MMIO
+ * region gets split into two segments - one segment above the MSI-X table
+ * and the other segment below the MSI-X table - with a hole in place of
+ * the MSI-X table so accesses to it can be trapped and emulated.
+ *
+ * So, allocate a MMIO segment for each BAR register + 1 additional segment.
+ */
+#define	MAX_MMIOSEGS	((PCIR_MAX_BAR_0 + 1) + 1)
+
+MALLOC_DEFINE(M_PPTMSIX, "pptmsix", "Passthru MSI-X resources");
+
+struct pptintr_arg {				/* pptintr(pptintr_arg) */
+	struct pptdev	*pptdev;
+	uint64_t	addr;
+	uint64_t	msg_data;
+};
+
+struct pptseg {
+	vm_paddr_t	gpa;
+	size_t		len;
+	int		wired;
+};
+
+struct pptdev {
+	device_t	dev;
+	struct vm	*vm;			/* owner of this device */
+	TAILQ_ENTRY(pptdev)	next;
+	struct pptseg mmio[MAX_MMIOSEGS];
+	struct {
+		int	num_msgs;		/* guest state */
+
+		int	startrid;		/* host state */
+		struct resource *res[MAX_MSIMSGS];
+		void	*cookie[MAX_MSIMSGS];
+		struct pptintr_arg arg[MAX_MSIMSGS];
+	} msi;
+
+	struct {
+		int num_msgs;
+		int startrid;
+		int msix_table_rid;
+		struct resource *msix_table_res;
+		struct resource **res;
+		void **cookie;
+		struct pptintr_arg *arg;
+	} msix;
+};
+
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, ppt, CTLFLAG_RW, 0, "bhyve passthru devices");
+
+static int num_pptdevs;
+SYSCTL_INT(_hw_vmm_ppt, OID_AUTO, devices, CTLFLAG_RD, &num_pptdevs, 0,
+    "number of pci passthru devices");
+
+static TAILQ_HEAD(, pptdev) pptdev_list = TAILQ_HEAD_INITIALIZER(pptdev_list);
+
+static int
+ppt_probe(device_t dev)
+{
+	int bus, slot, func;
+	struct pci_devinfo *dinfo;
+
+	dinfo = (struct pci_devinfo *)device_get_ivars(dev);
+
+	bus = pci_get_bus(dev);
+	slot = pci_get_slot(dev);
+	func = pci_get_function(dev);
+
+	/*
+	 * To qualify as a pci passthrough device a device must:
+	 * - be allowed by administrator to be used in this role
+	 * - be an endpoint device
+	 */
+	if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
+		return (ENXIO);
+	else if (vmm_is_pptdev(bus, slot, func))
+		return (0);
+	else
+		/*
+		 * Returning BUS_PROBE_NOWILDCARD here matches devices that the
+		 * SR-IOV infrastructure specified as "ppt" passthrough devices.
+		 * All normal devices that did not have "ppt" specified as their
+		 * driver will not be matched by this.
+		 */
+		return (BUS_PROBE_NOWILDCARD);
+}
+
+static int
+ppt_attach(device_t dev)
+{
+	struct pptdev *ppt;
+
+	ppt = device_get_softc(dev);
+
+	iommu_remove_device(iommu_host_domain(), pci_get_rid(dev));
+	num_pptdevs++;
+	TAILQ_INSERT_TAIL(&pptdev_list, ppt, next);
+	ppt->dev = dev;
+
+	if (bootverbose)
+		device_printf(dev, "attached\n");
+
+	return (0);
+}
+
+static int
+ppt_detach(device_t dev)
+{
+	struct pptdev *ppt;
+
+	ppt = device_get_softc(dev);
+
+	if (ppt->vm != NULL)
+		return (EBUSY);
+	num_pptdevs--;
+	TAILQ_REMOVE(&pptdev_list, ppt, next);
+	pci_disable_busmaster(dev);
+	iommu_add_device(iommu_host_domain(), pci_get_rid(dev));
+
+	return (0);
+}
+
+static device_method_t ppt_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe,		ppt_probe),
+	DEVMETHOD(device_attach,	ppt_attach),
+	DEVMETHOD(device_detach,	ppt_detach),
+	{0, 0}
+};
+
+static devclass_t ppt_devclass;
+DEFINE_CLASS_0(ppt, ppt_driver, ppt_methods, sizeof(struct pptdev));
+DRIVER_MODULE(ppt, pci, ppt_driver, ppt_devclass, NULL, NULL);
+
+static struct pptdev *
+ppt_find(int bus, int slot, int func)
+{
+	device_t dev;
+	struct pptdev *ppt;
+	int b, s, f;
+
+	TAILQ_FOREACH(ppt, &pptdev_list, next) {
+		dev = ppt->dev;
+		b = pci_get_bus(dev);
+		s = pci_get_slot(dev);
+		f = pci_get_function(dev);
+		if (bus == b && slot == s && func == f)
+			return (ppt);
+	}
+	return (NULL);
+}
+
+static void
+ppt_unmap_mmio(struct vm *vm, struct pptdev *ppt)
+{
+	int i;
+	struct pptseg *seg;
+
+	for (i = 0; i < MAX_MMIOSEGS; i++) {
+		seg = &ppt->mmio[i];
+		if (seg->len == 0)
+			continue;
+		(void)vm_unmap_mmio(vm, seg->gpa, seg->len);
+		bzero(seg, sizeof(struct pptseg));
+	}
+}
+
+static void
+ppt_teardown_msi(struct pptdev *ppt)
+{
+	int i, rid;
+	void *cookie;
+	struct resource *res;
+
+	if (ppt->msi.num_msgs == 0)
+		return;
+
+	for (i = 0; i < ppt->msi.num_msgs; i++) {
+		rid = ppt->msi.startrid + i;
+		res = ppt->msi.res[i];
+		cookie = ppt->msi.cookie[i];
+
+		if (cookie != NULL)
+			bus_teardown_intr(ppt->dev, res, cookie);
+
+		if (res != NULL)
+			bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
+		
+		ppt->msi.res[i] = NULL;
+		ppt->msi.cookie[i] = NULL;
+	}
+
+	if (ppt->msi.startrid == 1)
+		pci_release_msi(ppt->dev);
+
+	ppt->msi.num_msgs = 0;
+}
+
+static void 
+ppt_teardown_msix_intr(struct pptdev *ppt, int idx)
+{
+	int rid;
+	struct resource *res;
+	void *cookie;
+
+	rid = ppt->msix.startrid + idx;
+	res = ppt->msix.res[idx];
+	cookie = ppt->msix.cookie[idx];
+
+	if (cookie != NULL) 
+		bus_teardown_intr(ppt->dev, res, cookie);
+
+	if (res != NULL) 
+		bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
+
+	ppt->msix.res[idx] = NULL;
+	ppt->msix.cookie[idx] = NULL;
+}
+
+static void 
+ppt_teardown_msix(struct pptdev *ppt)
+{
+	int i;
+
+	if (ppt->msix.num_msgs == 0) 
+		return;
+
+	for (i = 0; i < ppt->msix.num_msgs; i++) 
+		ppt_teardown_msix_intr(ppt, i);
+
+	if (ppt->msix.msix_table_res) {
+		bus_release_resource(ppt->dev, SYS_RES_MEMORY, 
+				     ppt->msix.msix_table_rid,
+				     ppt->msix.msix_table_res);
+		ppt->msix.msix_table_res = NULL;
+		ppt->msix.msix_table_rid = 0;
+	}
+
+	free(ppt->msix.res, M_PPTMSIX);
+	free(ppt->msix.cookie, M_PPTMSIX);
+	free(ppt->msix.arg, M_PPTMSIX);
+
+	pci_release_msi(ppt->dev);
+
+	ppt->msix.num_msgs = 0;
+}
+
+int
+ppt_avail_devices(void)
+{
+
+	return (num_pptdevs);
+}
+
+int
+ppt_assigned_devices(struct vm *vm)
+{
+	struct pptdev *ppt;
+	int num;
+
+	num = 0;
+	TAILQ_FOREACH(ppt, &pptdev_list, next) {
+		if (ppt->vm == vm)
+			num++;
+	}
+	return (num);
+}
+
+boolean_t
+ppt_is_mmio(struct vm *vm, vm_paddr_t gpa)
+{
+	int i;
+	struct pptdev *ppt;
+	struct pptseg *seg;
+
+	TAILQ_FOREACH(ppt, &pptdev_list, next) {
+		if (ppt->vm != vm)
+			continue;
+
+		for (i = 0; i < MAX_MMIOSEGS; i++) {
+			seg = &ppt->mmio[i];
+			if (seg->len == 0)
+				continue;
+			if (gpa >= seg->gpa && gpa < seg->gpa + seg->len)
+				return (TRUE);
+		}
+	}
+
+	return (FALSE);
+}
+
+int
+ppt_assign_device(struct vm *vm, int bus, int slot, int func)
+{
+	struct pptdev *ppt;
+
+	ppt = ppt_find(bus, slot, func);
+	if (ppt != NULL) {
+		/*
+		 * If this device is owned by a different VM then we
+		 * cannot change its owner.
+		 */
+		if (ppt->vm != NULL && ppt->vm != vm)
+			return (EBUSY);
+
+		pci_save_state(ppt->dev);
+		pcie_flr(ppt->dev,
+		    max(pcie_get_max_completion_timeout(ppt->dev) / 1000, 10),
+		    true);
+		pci_restore_state(ppt->dev);
+		ppt->vm = vm;
+		iommu_add_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev));
+		return (0);
+	}
+	return (ENOENT);
+}
+
+int
+ppt_unassign_device(struct vm *vm, int bus, int slot, int func)
+{
+	struct pptdev *ppt;
+
+	ppt = ppt_find(bus, slot, func);
+	if (ppt != NULL) {
+		/*
+		 * If this device is not owned by this 'vm' then bail out.
+		 */
+		if (ppt->vm != vm)
+			return (EBUSY);
+
+		pci_save_state(ppt->dev);
+		pcie_flr(ppt->dev,
+		    max(pcie_get_max_completion_timeout(ppt->dev) / 1000, 10),
+		    true);
+		pci_restore_state(ppt->dev);
+		ppt_unmap_mmio(vm, ppt);
+		ppt_teardown_msi(ppt);
+		ppt_teardown_msix(ppt);
+		iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev));
+		ppt->vm = NULL;
+		return (0);
+	}
+	return (ENOENT);
+}
+
+int
+ppt_unassign_all(struct vm *vm)
+{
+	struct pptdev *ppt;
+	int bus, slot, func;
+	device_t dev;
+
+	TAILQ_FOREACH(ppt, &pptdev_list, next) {
+		if (ppt->vm == vm) {
+			dev = ppt->dev;
+			bus = pci_get_bus(dev);
+			slot = pci_get_slot(dev);
+			func = pci_get_function(dev);
+			vm_unassign_pptdev(vm, bus, slot, func);
+		}
+	}
+
+	return (0);
+}
+
+int
+ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
+	     vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
+{
+	int i, error;
+	struct pptseg *seg;
+	struct pptdev *ppt;
+
+	ppt = ppt_find(bus, slot, func);
+	if (ppt != NULL) {
+		if (ppt->vm != vm)
+			return (EBUSY);
+
+		for (i = 0; i < MAX_MMIOSEGS; i++) {
+			seg = &ppt->mmio[i];
+			if (seg->len == 0) {
+				error = vm_map_mmio(vm, gpa, len, hpa);
+				if (error == 0) {
+					seg->gpa = gpa;
+					seg->len = len;
+				}
+				return (error);
+			}
+		}
+		return (ENOSPC);
+	}
+	return (ENOENT);
+}
+
+static int
+pptintr(void *arg)
+{
+	struct pptdev *ppt;
+	struct pptintr_arg *pptarg;
+	
+	pptarg = arg;
+	ppt = pptarg->pptdev;
+
+	if (ppt->vm != NULL)
+		lapic_intr_msi(ppt->vm, pptarg->addr, pptarg->msg_data);
+	else {
+		/*
+		 * XXX
+		 * This is not expected to happen - panic?
+		 */
+	}
+
+	/*
+	 * For legacy interrupts give other filters a chance in case
+	 * the interrupt was not generated by the passthrough device.
+	 */
+	if (ppt->msi.startrid == 0)
+		return (FILTER_STRAY);
+	else
+		return (FILTER_HANDLED);
+}
+
+int
+ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func,
+	      uint64_t addr, uint64_t msg, int numvec)
+{
+	int i, rid, flags;
+	int msi_count, startrid, error, tmp;
+	struct pptdev *ppt;
+
+	if (numvec < 0 || numvec > MAX_MSIMSGS)
+		return (EINVAL);
+
+	ppt = ppt_find(bus, slot, func);
+	if (ppt == NULL)
+		return (ENOENT);
+	if (ppt->vm != vm)		/* Make sure we own this device */
+		return (EBUSY);
+
+	/* Free any allocated resources */
+	ppt_teardown_msi(ppt);
+
+	if (numvec == 0)		/* nothing more to do */
+		return (0);
+
+	flags = RF_ACTIVE;
+	msi_count = pci_msi_count(ppt->dev);
+	if (msi_count == 0) {
+		startrid = 0;		/* legacy interrupt */
+		msi_count = 1;
+		flags |= RF_SHAREABLE;
+	} else
+		startrid = 1;		/* MSI */
+
+	/*
+	 * The device must be capable of supporting the number of vectors
+	 * the guest wants to allocate.
+	 */
+	if (numvec > msi_count)
+		return (EINVAL);
+
+	/*
+	 * Make sure that we can allocate all the MSI vectors that are needed
+	 * by the guest.
+	 */
+	if (startrid == 1) {
+		tmp = numvec;
+		error = pci_alloc_msi(ppt->dev, &tmp);
+		if (error)
+			return (error);
+		else if (tmp != numvec) {
+			pci_release_msi(ppt->dev);
+			return (ENOSPC);
+		} else {
+			/* success */
+		}
+	}
+	
+	ppt->msi.startrid = startrid;
+
+	/*
+	 * Allocate the irq resource and attach it to the interrupt handler.
+	 */
+	for (i = 0; i < numvec; i++) {
+		ppt->msi.num_msgs = i + 1;
+		ppt->msi.cookie[i] = NULL;
+
+		rid = startrid + i;
+		ppt->msi.res[i] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ,
+							 &rid, flags);
+		if (ppt->msi.res[i] == NULL)
+			break;
+
+		ppt->msi.arg[i].pptdev = ppt;
+		ppt->msi.arg[i].addr = addr;
+		ppt->msi.arg[i].msg_data = msg + i;
+
+		error = bus_setup_intr(ppt->dev, ppt->msi.res[i],
+				       INTR_TYPE_NET | INTR_MPSAFE,
+				       pptintr, NULL, &ppt->msi.arg[i],
+				       &ppt->msi.cookie[i]);
+		if (error != 0)
+			break;
+	}
+	
+	if (i < numvec) {
+		ppt_teardown_msi(ppt);
+		return (ENXIO);
+	}
+
+	return (0);
+}
+
+int
+ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func,
+	       int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
+{
+	struct pptdev *ppt;
+	struct pci_devinfo *dinfo;
+	int numvec, alloced, rid, error;
+	size_t res_size, cookie_size, arg_size;
+
+	ppt = ppt_find(bus, slot, func);
+	if (ppt == NULL)
+		return (ENOENT);
+	if (ppt->vm != vm)		/* Make sure we own this device */
+		return (EBUSY);
+
+	dinfo = device_get_ivars(ppt->dev);
+	if (!dinfo) 
+		return (ENXIO);
+
+	/* 
+	 * First-time configuration:
+	 * 	Allocate the MSI-X table
+	 *	Allocate the IRQ resources
+	 *	Set up some variables in ppt->msix
+	 */
+	if (ppt->msix.num_msgs == 0) {
+		numvec = pci_msix_count(ppt->dev);
+		if (numvec <= 0)
+			return (EINVAL);
+
+		ppt->msix.startrid = 1;
+		ppt->msix.num_msgs = numvec;
+
+		res_size = numvec * sizeof(ppt->msix.res[0]);
+		cookie_size = numvec * sizeof(ppt->msix.cookie[0]);
+		arg_size = numvec * sizeof(ppt->msix.arg[0]);
+
+		ppt->msix.res = malloc(res_size, M_PPTMSIX, M_WAITOK | M_ZERO);
+		ppt->msix.cookie = malloc(cookie_size, M_PPTMSIX,
+					  M_WAITOK | M_ZERO);
+		ppt->msix.arg = malloc(arg_size, M_PPTMSIX, M_WAITOK | M_ZERO);
+
+		rid = dinfo->cfg.msix.msix_table_bar;
+		ppt->msix.msix_table_res = bus_alloc_resource_any(ppt->dev,
+					       SYS_RES_MEMORY, &rid, RF_ACTIVE);
+
+		if (ppt->msix.msix_table_res == NULL) {
+			ppt_teardown_msix(ppt);
+			return (ENOSPC);
+		}
+		ppt->msix.msix_table_rid = rid;
+
+		alloced = numvec;
+		error = pci_alloc_msix(ppt->dev, &alloced);
+		if (error || alloced != numvec) {
+			ppt_teardown_msix(ppt);
+			return (error == 0 ? ENOSPC: error);
+		}
+	}
+
+	if ((vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
+		/* Tear down the IRQ if it's already set up */
+		ppt_teardown_msix_intr(ppt, idx);
+
+		/* Allocate the IRQ resource */
+		ppt->msix.cookie[idx] = NULL;
+		rid = ppt->msix.startrid + idx;
+		ppt->msix.res[idx] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ,
+							    &rid, RF_ACTIVE);
+		if (ppt->msix.res[idx] == NULL)
+			return (ENXIO);
+	
+		ppt->msix.arg[idx].pptdev = ppt;
+		ppt->msix.arg[idx].addr = addr;
+		ppt->msix.arg[idx].msg_data = msg;
+	
+		/* Setup the MSI-X interrupt */
+		error = bus_setup_intr(ppt->dev, ppt->msix.res[idx],
+				       INTR_TYPE_NET | INTR_MPSAFE,
+				       pptintr, NULL, &ppt->msix.arg[idx],
+				       &ppt->msix.cookie[idx]);
+	
+		if (error != 0) {
+			bus_teardown_intr(ppt->dev, ppt->msix.res[idx], ppt->msix.cookie[idx]);
+			bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, ppt->msix.res[idx]);
+			ppt->msix.cookie[idx] = NULL;
+			ppt->msix.res[idx] = NULL;
+			return (ENXIO);
+		}
+	} else {
+		/* Masked, tear it down if it's already been set up */
+		ppt_teardown_msix_intr(ppt, idx);
+	}
+
+	return (0);
+}


Property changes on: trunk/sys/amd64/vmm/io/ppt.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/ppt.h
===================================================================
--- trunk/sys/amd64/vmm/io/ppt.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/ppt.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,55 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/ppt.h 267070 2014-06-04 17:57:48Z jhb $
+ */
+
+#ifndef _IO_PPT_H_
+#define	_IO_PPT_H_
+
+int	ppt_unassign_all(struct vm *vm);
+int	ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
+		     vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
+int	ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func,
+		      uint64_t addr, uint64_t msg, int numvec);
+int	ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func,
+		int idx, uint64_t addr, uint64_t msg, uint32_t vector_control);
+int	ppt_assigned_devices(struct vm *vm);
+boolean_t ppt_is_mmio(struct vm *vm, vm_paddr_t gpa);
+
+/*
+ * Returns the number of devices sequestered by the ppt driver for assignment
+ * to virtual machines.
+ */
+int	ppt_avail_devices(void);
+
+/*
+ * The following functions should never be called directly.
+ * Use 'vm_assign_pptdev()' and 'vm_unassign_pptdev()' instead.
+ */
+int	ppt_assign_device(struct vm *vm, int bus, int slot, int func);
+int	ppt_unassign_device(struct vm *vm, int bus, int slot, int func);
+#endif


Property changes on: trunk/sys/amd64/vmm/io/ppt.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vatpic.c
===================================================================
--- trunk/sys/amd64/vmm/io/vatpic.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vatpic.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,809 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/vatpic.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+
+#include <x86/apicreg.h>
+#include <dev/ic/i8259.h>
+
+#include <machine/vmm.h>
+
+#include "vmm_ktr.h"
+#include "vmm_lapic.h"
+#include "vioapic.h"
+#include "vatpic.h"
+
+static MALLOC_DEFINE(M_VATPIC, "atpic", "bhyve virtual atpic (8259)");
+
+#define	VATPIC_LOCK(vatpic)		mtx_lock_spin(&((vatpic)->mtx))
+#define	VATPIC_UNLOCK(vatpic)		mtx_unlock_spin(&((vatpic)->mtx))
+#define	VATPIC_LOCKED(vatpic)		mtx_owned(&((vatpic)->mtx))
+
+enum irqstate {
+	IRQSTATE_ASSERT,
+	IRQSTATE_DEASSERT,
+	IRQSTATE_PULSE
+};
+
+struct atpic {
+	bool		ready;
+	int		icw_num;
+	int		rd_cmd_reg;
+
+	bool		aeoi;
+	bool		poll;
+	bool		rotate;
+	bool		sfn;		/* special fully-nested mode */
+
+	int		irq_base;
+	uint8_t		request;	/* Interrupt Request Register (IIR) */
+	uint8_t		service;	/* Interrupt Service (ISR) */
+	uint8_t		mask;		/* Interrupt Mask Register (IMR) */
+	uint8_t		smm;		/* special mask mode */
+
+	int		acnt[8];	/* sum of pin asserts and deasserts */
+	int		lowprio;	/* lowest priority irq */
+
+	bool		intr_raised;
+};
+
+struct vatpic {
+	struct vm	*vm;
+	struct mtx	mtx;
+	struct atpic	atpic[2];
+	uint8_t		elc[2];
+};
+
+#define	VATPIC_CTR0(vatpic, fmt)					\
+	VM_CTR0((vatpic)->vm, fmt)
+
+#define	VATPIC_CTR1(vatpic, fmt, a1)					\
+	VM_CTR1((vatpic)->vm, fmt, a1)
+
+#define	VATPIC_CTR2(vatpic, fmt, a1, a2)				\
+	VM_CTR2((vatpic)->vm, fmt, a1, a2)
+
+#define	VATPIC_CTR3(vatpic, fmt, a1, a2, a3)				\
+	VM_CTR3((vatpic)->vm, fmt, a1, a2, a3)
+
+#define	VATPIC_CTR4(vatpic, fmt, a1, a2, a3, a4)			\
+	VM_CTR4((vatpic)->vm, fmt, a1, a2, a3, a4)
+
+/*
+ * Loop over all the pins in priority order from highest to lowest.
+ */
+#define	ATPIC_PIN_FOREACH(pinvar, atpic, tmpvar)			\
+	for (tmpvar = 0, pinvar = (atpic->lowprio + 1) & 0x7;		\
+	    tmpvar < 8;							\
+	    tmpvar++, pinvar = (pinvar + 1) & 0x7)
+
+static void vatpic_set_pinstate(struct vatpic *vatpic, int pin, bool newstate);
+
+static __inline bool
+master_atpic(struct vatpic *vatpic, struct atpic *atpic)
+{
+
+	if (atpic == &vatpic->atpic[0])
+		return (true);
+	else
+		return (false);
+}
+
+static __inline int
+vatpic_get_highest_isrpin(struct atpic *atpic)
+{
+	int bit, pin;
+	int i;
+
+	ATPIC_PIN_FOREACH(pin, atpic, i) {
+                bit = (1 << pin);
+
+		if (atpic->service & bit) {
+			/*
+			 * An IS bit that is masked by an IMR bit will not be
+			 * cleared by a non-specific EOI in Special Mask Mode.
+			 */
+			if (atpic->smm && (atpic->mask & bit) != 0)
+				continue;
+			else
+				return (pin);
+		}
+	}
+
+	return (-1);
+}
+
+static __inline int
+vatpic_get_highest_irrpin(struct atpic *atpic)
+{
+	int serviced;
+	int bit, pin, tmp;
+
+	/*
+	 * In 'Special Fully-Nested Mode' when an interrupt request from
+	 * a slave is in service, the slave is not locked out from the
+	 * master's priority logic.
+	 */
+	serviced = atpic->service;
+	if (atpic->sfn)
+		serviced &= ~(1 << 2);
+
+	/*
+	 * In 'Special Mask Mode', when a mask bit is set in OCW1 it inhibits
+	 * further interrupts at that level and enables interrupts from all
+	 * other levels that are not masked. In other words the ISR has no
+	 * bearing on the levels that can generate interrupts.
+	 */
+	if (atpic->smm)
+		serviced = 0;
+
+	ATPIC_PIN_FOREACH(pin, atpic, tmp) {
+		bit = 1 << pin;
+
+		/*
+		 * If there is already an interrupt in service at the same
+		 * or higher priority then bail.
+		 */
+		if ((serviced & bit) != 0)
+			break;
+
+		/*
+		 * If an interrupt is asserted and not masked then return
+		 * the corresponding 'pin' to the caller.
+		 */
+		if ((atpic->request & bit) != 0 && (atpic->mask & bit) == 0)
+			return (pin);
+	}
+
+	return (-1);
+}
+
+static void
+vatpic_notify_intr(struct vatpic *vatpic)
+{
+	struct atpic *atpic;
+	int pin;
+
+	KASSERT(VATPIC_LOCKED(vatpic), ("vatpic_notify_intr not locked"));
+
+	/*
+	 * First check the slave.
+	 */
+	atpic = &vatpic->atpic[1];
+	if (!atpic->intr_raised &&
+	    (pin = vatpic_get_highest_irrpin(atpic)) != -1) {
+		VATPIC_CTR4(vatpic, "atpic slave notify pin = %d "
+		    "(imr 0x%x irr 0x%x isr 0x%x)", pin,
+		    atpic->mask, atpic->request, atpic->service);
+
+		/*
+		 * Cascade the request from the slave to the master.
+		 */
+		atpic->intr_raised = true;
+		vatpic_set_pinstate(vatpic, 2, true);
+		vatpic_set_pinstate(vatpic, 2, false);
+	} else {
+		VATPIC_CTR3(vatpic, "atpic slave no eligible interrupts "
+		    "(imr 0x%x irr 0x%x isr 0x%x)",
+		    atpic->mask, atpic->request, atpic->service);
+	}
+
+	/*
+	 * Then check the master.
+	 */
+	atpic = &vatpic->atpic[0];
+	if (!atpic->intr_raised &&
+	    (pin = vatpic_get_highest_irrpin(atpic)) != -1) {
+		VATPIC_CTR4(vatpic, "atpic master notify pin = %d "
+		    "(imr 0x%x irr 0x%x isr 0x%x)", pin,
+		    atpic->mask, atpic->request, atpic->service);
+
+		/*
+		 * From Section 3.6.2, "Interrupt Modes", in the
+		 * MPtable Specification, Version 1.4
+		 *
+		 * PIC interrupts are routed to both the Local APIC
+		 * and the I/O APIC to support operation in 1 of 3
+		 * modes.
+		 *
+		 * 1. Legacy PIC Mode: the PIC effectively bypasses
+		 * all APIC components.  In this mode the local APIC is
+		 * disabled and LINT0 is reconfigured as INTR to
+		 * deliver the PIC interrupt directly to the CPU.
+		 *
+		 * 2. Virtual Wire Mode: the APIC is treated as a
+		 * virtual wire which delivers interrupts from the PIC
+		 * to the CPU.  In this mode LINT0 is programmed as
+		 * ExtINT to indicate that the PIC is the source of
+		 * the interrupt.
+		 *
+		 * 3. Virtual Wire Mode via I/O APIC: PIC interrupts are
+		 * fielded by the I/O APIC and delivered to the appropriate
+		 * CPU.  In this mode the I/O APIC input 0 is programmed
+		 * as ExtINT to indicate that the PIC is the source of the
+		 * interrupt.
+		 */
+		atpic->intr_raised = true;
+		lapic_set_local_intr(vatpic->vm, -1, APIC_LVT_LINT0);
+		vioapic_pulse_irq(vatpic->vm, 0);
+	} else {
+		VATPIC_CTR3(vatpic, "atpic master no eligible interrupts "
+		    "(imr 0x%x irr 0x%x isr 0x%x)",
+		    atpic->mask, atpic->request, atpic->service);
+	}
+}
+
+static int
+vatpic_icw1(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
+{
+	VATPIC_CTR1(vatpic, "atpic icw1 0x%x", val);
+
+	atpic->ready = false;
+
+	atpic->icw_num = 1;
+	atpic->request = 0;
+	atpic->mask = 0;
+	atpic->lowprio = 7;
+	atpic->rd_cmd_reg = 0;
+	atpic->poll = 0;
+	atpic->smm = 0;
+
+	if ((val & ICW1_SNGL) != 0) {
+		VATPIC_CTR0(vatpic, "vatpic cascade mode required");
+		return (-1);
+	}
+
+	if ((val & ICW1_IC4) == 0) {
+		VATPIC_CTR0(vatpic, "vatpic icw4 required");
+		return (-1);
+	}
+
+	atpic->icw_num++;
+
+	return (0);
+}
+
+static int
+vatpic_icw2(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
+{
+	VATPIC_CTR1(vatpic, "atpic icw2 0x%x", val);
+
+	atpic->irq_base = val & 0xf8;
+
+	atpic->icw_num++;
+
+	return (0);
+}
+
+static int
+vatpic_icw3(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
+{
+	VATPIC_CTR1(vatpic, "atpic icw3 0x%x", val);
+
+	atpic->icw_num++;
+
+	return (0);
+}
+
+static int
+vatpic_icw4(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
+{
+	VATPIC_CTR1(vatpic, "atpic icw4 0x%x", val);
+
+	if ((val & ICW4_8086) == 0) {
+		VATPIC_CTR0(vatpic, "vatpic microprocessor mode required");
+		return (-1);
+	}
+
+	if ((val & ICW4_AEOI) != 0)
+		atpic->aeoi = true;
+
+	if ((val & ICW4_SFNM) != 0) {
+		if (master_atpic(vatpic, atpic)) {
+			atpic->sfn = true;
+		} else {
+			VATPIC_CTR1(vatpic, "Ignoring special fully nested "
+			    "mode on slave atpic: %#x", val);
+		}
+	}
+
+	atpic->icw_num = 0;
+	atpic->ready = true;
+
+	return (0);
+}
+
+static int
+vatpic_ocw1(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
+{
+	VATPIC_CTR1(vatpic, "atpic ocw1 0x%x", val);
+
+	atpic->mask = val & 0xff;
+
+	return (0);
+}
+
+static int
+vatpic_ocw2(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
+{
+	VATPIC_CTR1(vatpic, "atpic ocw2 0x%x", val);
+
+	atpic->rotate = ((val & OCW2_R) != 0);
+
+	if ((val & OCW2_EOI) != 0) {
+		int isr_bit;
+
+		if ((val & OCW2_SL) != 0) {
+			/* specific EOI */
+			isr_bit = val & 0x7;
+		} else {
+			/* non-specific EOI */
+			isr_bit = vatpic_get_highest_isrpin(atpic);
+		}
+
+		if (isr_bit != -1) {
+			atpic->service &= ~(1 << isr_bit);
+
+			if (atpic->rotate)
+				atpic->lowprio = isr_bit;
+		}
+	} else if ((val & OCW2_SL) != 0 && atpic->rotate == true) {
+		/* specific priority */
+		atpic->lowprio = val & 0x7;
+	}
+
+	return (0);
+}
+
+static int
+vatpic_ocw3(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
+{
+	VATPIC_CTR1(vatpic, "atpic ocw3 0x%x", val);
+
+	if (val & OCW3_ESMM) {
+		atpic->smm = val & OCW3_SMM ? 1 : 0;
+		VATPIC_CTR2(vatpic, "%s atpic special mask mode %s",
+		    master_atpic(vatpic, atpic) ? "master" : "slave",
+		    atpic->smm ?  "enabled" : "disabled");
+	}
+
+	if (val & OCW3_RR) {
+		/* read register command */
+		atpic->rd_cmd_reg = val & OCW3_RIS;
+
+		/* Polling mode */
+		atpic->poll = ((val & OCW3_P) != 0);
+	}
+
+	return (0);
+}
+
+static void
+vatpic_set_pinstate(struct vatpic *vatpic, int pin, bool newstate)
+{
+	struct atpic *atpic;
+	int oldcnt, newcnt;
+	bool level;
+
+	KASSERT(pin >= 0 && pin < 16,
+	    ("vatpic_set_pinstate: invalid pin number %d", pin));
+	KASSERT(VATPIC_LOCKED(vatpic),
+	    ("vatpic_set_pinstate: vatpic is not locked"));
+
+	atpic = &vatpic->atpic[pin >> 3];
+
+	oldcnt = atpic->acnt[pin & 0x7];
+	if (newstate)
+		atpic->acnt[pin & 0x7]++;
+	else
+		atpic->acnt[pin & 0x7]--;
+	newcnt = atpic->acnt[pin & 0x7];
+
+	if (newcnt < 0) {
+		VATPIC_CTR2(vatpic, "atpic pin%d: bad acnt %d", pin, newcnt);
+	}
+
+	level = ((vatpic->elc[pin >> 3] & (1 << (pin & 0x7))) != 0);
+
+	if ((oldcnt == 0 && newcnt == 1) || (newcnt > 0 && level == true)) {
+		/* rising edge or level */
+		VATPIC_CTR1(vatpic, "atpic pin%d: asserted", pin);
+		atpic->request |= (1 << (pin & 0x7));
+	} else if (oldcnt == 1 && newcnt == 0) {
+		/* falling edge */
+		VATPIC_CTR1(vatpic, "atpic pin%d: deasserted", pin);
+		if (level)
+			atpic->request &= ~(1 << (pin & 0x7));
+	} else {
+		VATPIC_CTR3(vatpic, "atpic pin%d: %s, ignored, acnt %d",
+		    pin, newstate ? "asserted" : "deasserted", newcnt);
+	}
+
+	vatpic_notify_intr(vatpic);
+}
+
+static int
+vatpic_set_irqstate(struct vm *vm, int irq, enum irqstate irqstate)
+{
+	struct vatpic *vatpic;
+	struct atpic *atpic;
+
+	if (irq < 0 || irq > 15)
+		return (EINVAL);
+
+	vatpic = vm_atpic(vm);
+	atpic = &vatpic->atpic[irq >> 3];
+
+	if (atpic->ready == false)
+		return (0);
+
+	VATPIC_LOCK(vatpic);
+	switch (irqstate) {
+	case IRQSTATE_ASSERT:
+		vatpic_set_pinstate(vatpic, irq, true);
+		break;
+	case IRQSTATE_DEASSERT:
+		vatpic_set_pinstate(vatpic, irq, false);
+		break;
+	case IRQSTATE_PULSE:
+		vatpic_set_pinstate(vatpic, irq, true);
+		vatpic_set_pinstate(vatpic, irq, false);
+		break;
+	default:
+		panic("vatpic_set_irqstate: invalid irqstate %d", irqstate);
+	}
+	VATPIC_UNLOCK(vatpic);
+
+	return (0);
+}
+
+int
+vatpic_assert_irq(struct vm *vm, int irq)
+{
+	return (vatpic_set_irqstate(vm, irq, IRQSTATE_ASSERT));
+}
+
+int
+vatpic_deassert_irq(struct vm *vm, int irq)
+{
+	return (vatpic_set_irqstate(vm, irq, IRQSTATE_DEASSERT));
+}
+
+int
+vatpic_pulse_irq(struct vm *vm, int irq)
+{
+	return (vatpic_set_irqstate(vm, irq, IRQSTATE_PULSE));
+}
+
+int
+vatpic_set_irq_trigger(struct vm *vm, int irq, enum vm_intr_trigger trigger)
+{
+	struct vatpic *vatpic;
+
+	if (irq < 0 || irq > 15)
+		return (EINVAL);
+
+	/*
+	 * See comment in vatpic_elc_handler.  These IRQs must be
+	 * edge triggered.
+	 */
+	if (trigger == LEVEL_TRIGGER) {
+		switch (irq) {
+		case 0:
+		case 1:
+		case 2:
+		case 8:
+		case 13:
+			return (EINVAL);
+		}
+	}
+
+	vatpic = vm_atpic(vm);
+
+	VATPIC_LOCK(vatpic);
+
+	if (trigger == LEVEL_TRIGGER)
+		vatpic->elc[irq >> 3] |=  1 << (irq & 0x7);
+	else
+		vatpic->elc[irq >> 3] &=  ~(1 << (irq & 0x7));
+
+	VATPIC_UNLOCK(vatpic);
+
+	return (0);
+}
+
+void
+vatpic_pending_intr(struct vm *vm, int *vecptr)
+{
+	struct vatpic *vatpic;
+	struct atpic *atpic;
+	int pin;
+
+	vatpic = vm_atpic(vm);
+
+	atpic = &vatpic->atpic[0];
+
+	VATPIC_LOCK(vatpic);
+
+	pin = vatpic_get_highest_irrpin(atpic);
+	if (pin == 2) {
+		atpic = &vatpic->atpic[1];
+		pin = vatpic_get_highest_irrpin(atpic);
+	}
+
+	/*
+	 * If there are no pins active at this moment then return the spurious
+	 * interrupt vector instead.
+	 */
+	if (pin == -1)
+		pin = 7;
+
+	KASSERT(pin >= 0 && pin <= 7, ("%s: invalid pin %d", __func__, pin));
+	*vecptr = atpic->irq_base + pin;
+
+	VATPIC_UNLOCK(vatpic);
+}
+
+static void
+vatpic_pin_accepted(struct atpic *atpic, int pin)
+{
+	atpic->intr_raised = false;
+
+	if (atpic->acnt[pin] == 0)
+		atpic->request &= ~(1 << pin);
+
+	if (atpic->aeoi == true) {
+		if (atpic->rotate == true)
+			atpic->lowprio = pin;
+	} else {
+		atpic->service |= (1 << pin);
+	}
+}
+
+void
+vatpic_intr_accepted(struct vm *vm, int vector)
+{
+	struct vatpic *vatpic;
+	int pin;
+
+	vatpic = vm_atpic(vm);
+
+	VATPIC_LOCK(vatpic);
+
+	pin = vector & 0x7;
+
+	if ((vector & ~0x7) == vatpic->atpic[1].irq_base) {
+		vatpic_pin_accepted(&vatpic->atpic[1], pin);
+		/*
+		 * If this vector originated from the slave,
+		 * accept the cascaded interrupt too.
+		 */
+		vatpic_pin_accepted(&vatpic->atpic[0], 2);
+	} else {
+		vatpic_pin_accepted(&vatpic->atpic[0], pin);
+	}
+
+	vatpic_notify_intr(vatpic);
+
+	VATPIC_UNLOCK(vatpic);
+}
+
+static int
+vatpic_read(struct vatpic *vatpic, struct atpic *atpic, bool in, int port,
+	    int bytes, uint32_t *eax)
+{
+	int pin;
+
+	VATPIC_LOCK(vatpic);
+
+	if (atpic->poll) {
+		atpic->poll = 0;
+		pin = vatpic_get_highest_irrpin(atpic);
+		if (pin >= 0) {
+			vatpic_pin_accepted(atpic, pin);
+			*eax = 0x80 | pin;
+		} else {
+			*eax = 0;
+		}
+	} else {
+		if (port & ICU_IMR_OFFSET) {
+			/* read interrrupt mask register */
+			*eax = atpic->mask;
+		} else {
+			if (atpic->rd_cmd_reg == OCW3_RIS) {
+				/* read interrupt service register */
+				*eax = atpic->service;
+			} else {
+				/* read interrupt request register */
+				*eax = atpic->request;
+			}
+		}
+	}
+
+	VATPIC_UNLOCK(vatpic);
+
+	return (0);
+
+}
+
+static int
+vatpic_write(struct vatpic *vatpic, struct atpic *atpic, bool in, int port,
+    int bytes, uint32_t *eax)
+{
+	int error;
+	uint8_t val;
+
+	error = 0;
+	val = *eax;
+
+	VATPIC_LOCK(vatpic);
+
+	if (port & ICU_IMR_OFFSET) {
+		switch (atpic->icw_num) {
+		case 2:
+			error = vatpic_icw2(vatpic, atpic, val);
+			break;
+		case 3:
+			error = vatpic_icw3(vatpic, atpic, val);
+			break;
+		case 4:
+			error = vatpic_icw4(vatpic, atpic, val);
+			break;
+		default:
+			error = vatpic_ocw1(vatpic, atpic, val);
+			break;
+		}
+	} else {
+		if (val & (1 << 4))
+			error = vatpic_icw1(vatpic, atpic, val);
+
+		if (atpic->ready) {
+			if (val & (1 << 3))
+				error = vatpic_ocw3(vatpic, atpic, val);
+			else
+				error = vatpic_ocw2(vatpic, atpic, val);
+		}
+	}
+
+	if (atpic->ready)
+		vatpic_notify_intr(vatpic);
+
+	VATPIC_UNLOCK(vatpic);
+
+	return (error);
+}
+
+int
+vatpic_master_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
+{
+	struct vatpic *vatpic;
+	struct atpic *atpic;
+
+	vatpic = vm_atpic(vm);
+	atpic = &vatpic->atpic[0];
+
+	if (bytes != 1)
+		return (-1);
+ 
+	if (in) {
+		return (vatpic_read(vatpic, atpic, in, port, bytes, eax));
+	}
+ 
+	return (vatpic_write(vatpic, atpic, in, port, bytes, eax));
+}
+
+int
+vatpic_slave_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
+{
+	struct vatpic *vatpic;
+	struct atpic *atpic;
+
+	vatpic = vm_atpic(vm);
+	atpic = &vatpic->atpic[1];
+
+	if (bytes != 1)
+		return (-1);
+
+	if (in) {
+		return (vatpic_read(vatpic, atpic, in, port, bytes, eax));
+	}
+
+	return (vatpic_write(vatpic, atpic, in, port, bytes, eax));
+}
+
+int
+vatpic_elc_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
+{
+	struct vatpic *vatpic;
+	bool is_master;
+
+	vatpic = vm_atpic(vm);
+	is_master = (port == IO_ELCR1);
+
+	if (bytes != 1)
+		return (-1);
+
+	VATPIC_LOCK(vatpic);
+
+	if (in) {
+		if (is_master)
+			*eax = vatpic->elc[0];
+		else
+			*eax = vatpic->elc[1];
+	} else {
+		/*
+		 * For the master PIC the cascade channel (IRQ2), the
+		 * heart beat timer (IRQ0), and the keyboard
+		 * controller (IRQ1) cannot be programmed for level
+		 * mode.
+		 *
+		 * For the slave PIC the real time clock (IRQ8) and
+		 * the floating point error interrupt (IRQ13) cannot
+		 * be programmed for level mode.
+		 */
+		if (is_master)
+			vatpic->elc[0] = (*eax & 0xf8);
+		else
+			vatpic->elc[1] = (*eax & 0xde);
+	}
+
+	VATPIC_UNLOCK(vatpic);
+
+	return (0);
+}
+
+struct vatpic *
+vatpic_init(struct vm *vm)
+{
+	struct vatpic *vatpic;
+
+	vatpic = malloc(sizeof(struct vatpic), M_VATPIC, M_WAITOK | M_ZERO);
+	vatpic->vm = vm;
+
+	mtx_init(&vatpic->mtx, "vatpic lock", NULL, MTX_SPIN);
+
+	return (vatpic);
+}
+
+void
+vatpic_cleanup(struct vatpic *vatpic)
+{
+	free(vatpic, M_VATPIC);
+}


Property changes on: trunk/sys/amd64/vmm/io/vatpic.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vatpic.h
===================================================================
--- trunk/sys/amd64/vmm/io/vatpic.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vatpic.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,58 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vatpic.h 276429 2014-12-30 22:22:46Z neel $
+ */
+
+#ifndef _VATPIC_H_
+#define	_VATPIC_H_
+
+#include <isa/isareg.h>
+
+#define	ICU_IMR_OFFSET	1
+
+#define	IO_ELCR1	0x4d0
+#define	IO_ELCR2	0x4d1
+
+struct vatpic *vatpic_init(struct vm *vm);
+void vatpic_cleanup(struct vatpic *vatpic);
+
+int vatpic_master_handler(struct vm *vm, int vcpuid, bool in, int port,
+    int bytes, uint32_t *eax);
+int vatpic_slave_handler(struct vm *vm, int vcpuid, bool in, int port,
+    int bytes, uint32_t *eax);
+int vatpic_elc_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax);
+
+int vatpic_assert_irq(struct vm *vm, int irq);
+int vatpic_deassert_irq(struct vm *vm, int irq);
+int vatpic_pulse_irq(struct vm *vm, int irq);
+int vatpic_set_irq_trigger(struct vm *vm, int irq, enum vm_intr_trigger trigger);
+
+void vatpic_pending_intr(struct vm *vm, int *vecptr);
+void vatpic_intr_accepted(struct vm *vm, int vector);
+
+#endif	/* _VATPIC_H_ */


Property changes on: trunk/sys/amd64/vmm/io/vatpic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vatpit.c
===================================================================
--- trunk/sys/amd64/vmm/io/vatpit.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vatpit.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,458 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/vatpit.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+
+#include <machine/vmm.h>
+
+#include "vmm_ktr.h"
+#include "vatpic.h"
+#include "vioapic.h"
+#include "vatpit.h"
+
+static MALLOC_DEFINE(M_VATPIT, "atpit", "bhyve virtual atpit (8254)");
+
+#define	VATPIT_LOCK(vatpit)		mtx_lock_spin(&((vatpit)->mtx))
+#define	VATPIT_UNLOCK(vatpit)		mtx_unlock_spin(&((vatpit)->mtx))
+#define	VATPIT_LOCKED(vatpit)		mtx_owned(&((vatpit)->mtx))
+
+#define	TIMER_SEL_MASK		0xc0
+#define	TIMER_RW_MASK		0x30
+#define	TIMER_MODE_MASK		0x0f
+#define	TIMER_SEL_READBACK	0xc0
+
+#define	TIMER_STS_OUT		0x80
+#define	TIMER_STS_NULLCNT	0x40
+
+#define	TIMER_RB_LCTR		0x20
+#define	TIMER_RB_LSTATUS	0x10
+#define	TIMER_RB_CTR_2		0x08
+#define	TIMER_RB_CTR_1		0x04
+#define	TIMER_RB_CTR_0		0x02
+
+#define	TMR2_OUT_STS		0x20
+
+#define	PIT_8254_FREQ		1193182
+#define	TIMER_DIV(freq, hz)	(((freq) + (hz) / 2) / (hz))
+
+struct vatpit_callout_arg {
+	struct vatpit	*vatpit;
+	int		channel_num;
+};
+
+
+struct channel {
+	int		mode;
+	uint16_t	initial;	/* initial counter value */
+	sbintime_t	now_sbt;	/* uptime when counter was loaded */
+	uint8_t		cr[2];
+	uint8_t		ol[2];
+	bool		slatched;	/* status latched */
+	uint8_t		status;
+	int		crbyte;
+	int		olbyte;
+	int		frbyte;
+	struct callout	callout;
+	sbintime_t	callout_sbt;	/* target time */
+	struct vatpit_callout_arg callout_arg;
+};
+
+struct vatpit {
+	struct vm	*vm;
+	struct mtx	mtx;
+
+	sbintime_t	freq_sbt;
+
+	struct channel	channel[3];
+};
+
+static void pit_timer_start_cntr0(struct vatpit *vatpit);
+
+static int
+vatpit_get_out(struct vatpit *vatpit, int channel)
+{
+	struct channel *c;
+	sbintime_t delta_ticks;
+	int out;
+
+	c = &vatpit->channel[channel];
+
+	switch (c->mode) {
+	case TIMER_INTTC:
+		delta_ticks = (sbinuptime() - c->now_sbt) / vatpit->freq_sbt;
+		out = ((c->initial - delta_ticks) <= 0);
+		break;
+	default:
+		out = 0;
+		break;
+	}
+
+	return (out);
+}
+
+static void
+vatpit_callout_handler(void *a)
+{
+	struct vatpit_callout_arg *arg = a;
+	struct vatpit *vatpit;
+	struct callout *callout;
+	struct channel *c;
+
+	vatpit = arg->vatpit;
+	c = &vatpit->channel[arg->channel_num];
+	callout = &c->callout;
+
+	VM_CTR1(vatpit->vm, "atpit t%d fired", arg->channel_num);
+
+	VATPIT_LOCK(vatpit);
+
+	if (callout_pending(callout))		/* callout was reset */
+		goto done;
+
+	if (!callout_active(callout))		/* callout was stopped */
+		goto done;
+
+	callout_deactivate(callout);
+
+	if (c->mode == TIMER_RATEGEN) {
+		pit_timer_start_cntr0(vatpit);
+	}
+
+	vatpic_pulse_irq(vatpit->vm, 0);
+	vioapic_pulse_irq(vatpit->vm, 2);
+
+done:
+	VATPIT_UNLOCK(vatpit);
+	return;
+}
+
+static void
+pit_timer_start_cntr0(struct vatpit *vatpit)
+{
+	struct channel *c;
+	sbintime_t now, delta, precision;
+
+	c = &vatpit->channel[0];
+	if (c->initial != 0) {
+		delta = c->initial * vatpit->freq_sbt;
+		precision = delta >> tc_precexp;
+		c->callout_sbt = c->callout_sbt + delta;
+
+		/*
+		 * Reset 'callout_sbt' if the time that the callout
+		 * was supposed to fire is more than 'c->initial'
+		 * ticks in the past.
+		 */
+		now = sbinuptime();
+		if (c->callout_sbt < now)
+			c->callout_sbt = now + delta;
+
+		callout_reset_sbt(&c->callout, c->callout_sbt,
+		    precision, vatpit_callout_handler, &c->callout_arg,
+		    C_ABSOLUTE);
+	}
+}
+
+static uint16_t
+pit_update_counter(struct vatpit *vatpit, struct channel *c, bool latch)
+{
+	uint16_t lval;
+	sbintime_t delta_ticks;
+
+	/* cannot latch a new value until the old one has been consumed */
+	if (latch && c->olbyte != 0)
+		return (0);
+
+	if (c->initial == 0) {
+		/*
+		 * This is possibly an o/s bug - reading the value of
+		 * the timer without having set up the initial value.
+		 *
+		 * The original user-space version of this code set
+		 * the timer to 100hz in this condition; do the same
+		 * here.
+		 */
+		c->initial = TIMER_DIV(PIT_8254_FREQ, 100);
+		c->now_sbt = sbinuptime();
+		c->status &= ~TIMER_STS_NULLCNT;
+	}
+
+	delta_ticks = (sbinuptime() - c->now_sbt) / vatpit->freq_sbt;
+
+	lval = c->initial - delta_ticks % c->initial;
+
+	if (latch) {
+		c->olbyte = 2;
+		c->ol[1] = lval;		/* LSB */
+		c->ol[0] = lval >> 8;		/* MSB */
+	}
+
+	return (lval);
+}
+
+static int
+pit_readback1(struct vatpit *vatpit, int channel, uint8_t cmd)
+{
+	struct channel *c;
+
+	c = &vatpit->channel[channel];
+
+	/*
+	 * Latch the count/status of the timer if not already latched.
+	 * N.B. that the count/status latch-select bits are active-low.
+	 */
+	if (!(cmd & TIMER_RB_LCTR) && !c->olbyte) {
+		(void) pit_update_counter(vatpit, c, true);
+	}
+
+	if (!(cmd & TIMER_RB_LSTATUS) && !c->slatched) {
+		c->slatched = true;
+		/*
+		 * For mode 0, see if the elapsed time is greater
+		 * than the initial value - this results in the
+		 * output pin being set to 1 in the status byte.
+		 */
+		if (c->mode == TIMER_INTTC && vatpit_get_out(vatpit, channel))
+			c->status |= TIMER_STS_OUT;
+		else
+			c->status &= ~TIMER_STS_OUT;
+	}
+
+	return (0);
+}
+
+static int
+pit_readback(struct vatpit *vatpit, uint8_t cmd)
+{
+	int error;
+
+	/*
+	 * The readback command can apply to all timers.
+	 */
+	error = 0;
+	if (cmd & TIMER_RB_CTR_0)
+		error = pit_readback1(vatpit, 0, cmd);
+	if (!error && cmd & TIMER_RB_CTR_1)
+		error = pit_readback1(vatpit, 1, cmd);
+	if (!error && cmd & TIMER_RB_CTR_2)
+		error = pit_readback1(vatpit, 2, cmd);
+
+	return (error);
+}
+
+
+static int
+vatpit_update_mode(struct vatpit *vatpit, uint8_t val)
+{
+	struct channel *c;
+	int sel, rw, mode;
+
+	sel = val & TIMER_SEL_MASK;
+	rw = val & TIMER_RW_MASK;
+	mode = val & TIMER_MODE_MASK;
+
+	if (sel == TIMER_SEL_READBACK)
+		return (pit_readback(vatpit, val));
+
+	if (rw != TIMER_LATCH && rw != TIMER_16BIT)
+		return (-1);
+
+	if (rw != TIMER_LATCH) {
+		/*
+		 * Counter mode is not affected when issuing a
+		 * latch command.
+		 */
+		if (mode != TIMER_INTTC &&
+		    mode != TIMER_RATEGEN &&
+		    mode != TIMER_SQWAVE &&
+		    mode != TIMER_SWSTROBE)
+			return (-1);
+	}
+
+	c = &vatpit->channel[sel >> 6];
+	if (rw == TIMER_LATCH)
+		pit_update_counter(vatpit, c, true);
+	else {
+		c->mode = mode;
+		c->olbyte = 0;	/* reset latch after reprogramming */
+		c->status |= TIMER_STS_NULLCNT;
+	}
+
+	return (0);
+}
+
+int
+vatpit_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
+{
+	struct vatpit *vatpit;
+	struct channel *c;
+	uint8_t val;
+	int error;
+
+	vatpit = vm_atpit(vm);
+
+	if (bytes != 1)
+		return (-1);
+
+	val = *eax;
+
+	if (port == TIMER_MODE) {
+		if (in) {
+			VM_CTR0(vatpit->vm, "vatpit attempt to read mode");
+			return (-1);
+		}
+
+		VATPIT_LOCK(vatpit);
+		error = vatpit_update_mode(vatpit, val);
+		VATPIT_UNLOCK(vatpit);
+
+		return (error);
+	}
+
+	/* counter ports */
+	KASSERT(port >= TIMER_CNTR0 && port <= TIMER_CNTR2,
+	    ("invalid port 0x%x", port));
+	c = &vatpit->channel[port - TIMER_CNTR0];
+
+	VATPIT_LOCK(vatpit);
+	if (in && c->slatched) {
+		/*
+		 * Return the status byte if latched
+		 */
+		*eax = c->status;
+		c->slatched = false;
+		c->status = 0;
+	} else if (in) {
+		/*
+		 * The spec says that once the output latch is completely
+		 * read it should revert to "following" the counter. Use
+		 * the free running counter for this case (i.e. Linux
+		 * TSC calibration). Assuming the access mode is 16-bit,
+		 * toggle the MSB/LSB bit on each read.
+		 */
+		if (c->olbyte == 0) {
+			uint16_t tmp;
+
+			tmp = pit_update_counter(vatpit, c, false);
+			if (c->frbyte)
+				tmp >>= 8;
+			tmp &= 0xff;
+			*eax = tmp;
+			c->frbyte ^= 1;
+		}  else
+			*eax = c->ol[--c->olbyte];
+	} else {
+		c->cr[c->crbyte++] = *eax;
+		if (c->crbyte == 2) {
+			c->status &= ~TIMER_STS_NULLCNT;
+			c->frbyte = 0;
+			c->crbyte = 0;
+			c->initial = c->cr[0] | (uint16_t)c->cr[1] << 8;
+			c->now_sbt = sbinuptime();
+			/* Start an interval timer for channel 0 */
+			if (port == TIMER_CNTR0) {
+				c->callout_sbt = c->now_sbt;
+				pit_timer_start_cntr0(vatpit);
+			}
+			if (c->initial == 0)
+				c->initial = 0xffff;
+		}
+	}
+	VATPIT_UNLOCK(vatpit);
+
+	return (0);
+}
+
+int
+vatpit_nmisc_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
+{
+	struct vatpit *vatpit;
+
+	vatpit = vm_atpit(vm);
+
+	if (in) {
+			VATPIT_LOCK(vatpit);
+			if (vatpit_get_out(vatpit, 2))
+				*eax = TMR2_OUT_STS;
+			else
+				*eax = 0;
+
+			VATPIT_UNLOCK(vatpit);
+	}
+
+	return (0);
+}
+
+struct vatpit *
+vatpit_init(struct vm *vm)
+{
+	struct vatpit *vatpit;
+	struct bintime bt;
+	struct vatpit_callout_arg *arg;
+	int i;
+
+	vatpit = malloc(sizeof(struct vatpit), M_VATPIT, M_WAITOK | M_ZERO);
+	vatpit->vm = vm;
+
+	mtx_init(&vatpit->mtx, "vatpit lock", NULL, MTX_SPIN);
+
+	FREQ2BT(PIT_8254_FREQ, &bt);
+	vatpit->freq_sbt = bttosbt(bt);
+
+	for (i = 0; i < 3; i++) {
+		callout_init(&vatpit->channel[i].callout, true);
+		arg = &vatpit->channel[i].callout_arg;
+		arg->vatpit = vatpit;
+		arg->channel_num = i;
+	}
+
+	return (vatpit);
+}
+
+void
+vatpit_cleanup(struct vatpit *vatpit)
+{
+	int i;
+
+	for (i = 0; i < 3; i++)
+		callout_drain(&vatpit->channel[i].callout);
+
+	free(vatpit, M_VATPIT);
+}


Property changes on: trunk/sys/amd64/vmm/io/vatpit.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vatpit.h
===================================================================
--- trunk/sys/amd64/vmm/io/vatpit.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vatpit.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,46 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vatpit.h 276429 2014-12-30 22:22:46Z neel $
+ */
+
+#ifndef _VATPIT_H_
+#define	_VATPIT_H_
+
+#include <machine/timerreg.h>
+
+#define	NMISC_PORT	0x61
+
+struct vatpit *vatpit_init(struct vm *vm);
+void vatpit_cleanup(struct vatpit *vatpit);
+
+int vatpit_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax);
+int vatpit_nmisc_handler(struct vm *vm, int vcpuid, bool in, int port,
+    int bytes, uint32_t *eax);
+
+#endif	/* _VATPIT_H_ */


Property changes on: trunk/sys/amd64/vmm/io/vatpit.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vhpet.c
===================================================================
--- trunk/sys/amd64/vmm/io/vhpet.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vhpet.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,760 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * Copyright (c) 2013 Neel Natu <neel at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vhpet.c 284900 2015-06-28 03:22:26Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/vhpet.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+
+#include <dev/acpica/acpi_hpet.h>
+
+#include <machine/vmm.h>
+#include <machine/vmm_dev.h>
+
+#include "vmm_lapic.h"
+#include "vatpic.h"
+#include "vioapic.h"
+#include "vhpet.h"
+
+#include "vmm_ktr.h"
+
+static MALLOC_DEFINE(M_VHPET, "vhpet", "bhyve virtual hpet");
+
+#define	HPET_FREQ	10000000		/* 10.0 Mhz */
+#define	FS_PER_S	1000000000000000ul
+
+/* Timer N Configuration and Capabilities Register */
+#define	HPET_TCAP_RO_MASK	(HPET_TCAP_INT_ROUTE 	|		\
+				 HPET_TCAP_FSB_INT_DEL	|		\
+				 HPET_TCAP_SIZE		|		\
+				 HPET_TCAP_PER_INT)
+/*
+ * HPET requires at least 3 timers and up to 32 timers per block.
+ */
+#define	VHPET_NUM_TIMERS	8
+CTASSERT(VHPET_NUM_TIMERS >= 3 && VHPET_NUM_TIMERS <= 32);
+
+struct vhpet_callout_arg {
+	struct vhpet *vhpet;
+	int timer_num;
+};
+
+struct vhpet {
+	struct vm	*vm;
+	struct mtx	mtx;
+	sbintime_t	freq_sbt;
+
+	uint64_t	config;		/* Configuration */
+	uint64_t	isr;		/* Interrupt Status */
+	uint32_t	countbase;	/* HPET counter base value */
+	sbintime_t	countbase_sbt;	/* uptime corresponding to base value */
+
+	struct {
+		uint64_t	cap_config;	/* Configuration */
+		uint64_t	msireg;		/* FSB interrupt routing */
+		uint32_t	compval;	/* Comparator */
+		uint32_t	comprate;
+		struct callout	callout;
+		sbintime_t	callout_sbt;	/* time when counter==compval */
+		struct vhpet_callout_arg arg;
+	} timer[VHPET_NUM_TIMERS];
+};
+
+#define	VHPET_LOCK(vhp)		mtx_lock(&((vhp)->mtx))
+#define	VHPET_UNLOCK(vhp)	mtx_unlock(&((vhp)->mtx))
+
+static void vhpet_start_timer(struct vhpet *vhpet, int n, uint32_t counter,
+    sbintime_t now);
+
+static uint64_t
+vhpet_capabilities(void)
+{
+	uint64_t cap = 0;
+
+	cap |= 0x8086 << 16;			/* vendor id */
+	cap |= (VHPET_NUM_TIMERS - 1) << 8;	/* number of timers */
+	cap |= 1;				/* revision */
+	cap &= ~HPET_CAP_COUNT_SIZE;		/* 32-bit timer */
+
+	cap &= 0xffffffff;
+	cap |= (FS_PER_S / HPET_FREQ) << 32;	/* tick period in fs */
+
+	return (cap);
+}
+
+static __inline bool
+vhpet_counter_enabled(struct vhpet *vhpet)
+{
+
+	return ((vhpet->config & HPET_CNF_ENABLE) ? true : false);
+}
+
+static __inline bool
+vhpet_timer_msi_enabled(struct vhpet *vhpet, int n)
+{
+	const uint64_t msi_enable = HPET_TCAP_FSB_INT_DEL | HPET_TCNF_FSB_EN;
+
+	if ((vhpet->timer[n].cap_config & msi_enable) == msi_enable)
+		return (true);
+	else
+		return (false);
+}
+
+static __inline int
+vhpet_timer_ioapic_pin(struct vhpet *vhpet, int n)
+{
+	/*
+	 * If the timer is configured to use MSI then treat it as if the
+	 * timer is not connected to the ioapic.
+	 */
+	if (vhpet_timer_msi_enabled(vhpet, n))
+		return (0);
+
+	return ((vhpet->timer[n].cap_config & HPET_TCNF_INT_ROUTE) >> 9);
+}
+
+static uint32_t
+vhpet_counter(struct vhpet *vhpet, sbintime_t *nowptr)
+{
+	uint32_t val;
+	sbintime_t now, delta;
+
+	val = vhpet->countbase;
+	if (vhpet_counter_enabled(vhpet)) {
+		now = sbinuptime();
+		delta = now - vhpet->countbase_sbt;
+		KASSERT(delta >= 0, ("vhpet_counter: uptime went backwards: "
+		    "%#lx to %#lx", vhpet->countbase_sbt, now));
+		val += delta / vhpet->freq_sbt;
+		if (nowptr != NULL)
+			*nowptr = now;
+	} else {
+		/*
+		 * The sbinuptime corresponding to the 'countbase' is
+		 * meaningless when the counter is disabled. Make sure
+		 * that the the caller doesn't want to use it.
+		 */
+		KASSERT(nowptr == NULL, ("vhpet_counter: nowptr must be NULL"));
+	}
+	return (val);
+}
+
+static void
+vhpet_timer_clear_isr(struct vhpet *vhpet, int n)
+{
+	int pin;
+
+	if (vhpet->isr & (1 << n)) {
+		pin = vhpet_timer_ioapic_pin(vhpet, n);
+		KASSERT(pin != 0, ("vhpet timer %d irq incorrectly routed", n));
+		vioapic_deassert_irq(vhpet->vm, pin);
+		vhpet->isr &= ~(1 << n);
+	}
+}
+
+static __inline bool
+vhpet_periodic_timer(struct vhpet *vhpet, int n)
+{
+
+	return ((vhpet->timer[n].cap_config & HPET_TCNF_TYPE) != 0);
+}
+
+static __inline bool
+vhpet_timer_interrupt_enabled(struct vhpet *vhpet, int n)
+{
+
+	return ((vhpet->timer[n].cap_config & HPET_TCNF_INT_ENB) != 0);
+}
+
+static __inline bool
+vhpet_timer_edge_trig(struct vhpet *vhpet, int n)
+{
+
+	KASSERT(!vhpet_timer_msi_enabled(vhpet, n), ("vhpet_timer_edge_trig: "
+	    "timer %d is using MSI", n));
+
+	if ((vhpet->timer[n].cap_config & HPET_TCNF_INT_TYPE) == 0)
+		return (true);
+	else
+		return (false);
+}
+
+static void
+vhpet_timer_interrupt(struct vhpet *vhpet, int n)
+{
+	int pin;
+
+	/* If interrupts are not enabled for this timer then just return. */
+	if (!vhpet_timer_interrupt_enabled(vhpet, n))
+		return;
+
+	/*
+	 * If a level triggered interrupt is already asserted then just return.
+	 */
+	if ((vhpet->isr & (1 << n)) != 0) {
+		VM_CTR1(vhpet->vm, "hpet t%d intr is already asserted", n);
+		return;
+	}
+
+	if (vhpet_timer_msi_enabled(vhpet, n)) {
+		lapic_intr_msi(vhpet->vm, vhpet->timer[n].msireg >> 32,
+		    vhpet->timer[n].msireg & 0xffffffff);
+		return;
+	}	
+
+	pin = vhpet_timer_ioapic_pin(vhpet, n);
+	if (pin == 0) {
+		VM_CTR1(vhpet->vm, "hpet t%d intr is not routed to ioapic", n);
+		return;
+	}
+
+	if (vhpet_timer_edge_trig(vhpet, n)) {
+		vioapic_pulse_irq(vhpet->vm, pin);
+	} else {
+		vhpet->isr |= 1 << n;
+		vioapic_assert_irq(vhpet->vm, pin);
+	}
+}
+
+static void
+vhpet_adjust_compval(struct vhpet *vhpet, int n, uint32_t counter)
+{
+	uint32_t compval, comprate, compnext;
+
+	KASSERT(vhpet->timer[n].comprate != 0, ("hpet t%d is not periodic", n));
+
+	compval = vhpet->timer[n].compval;
+	comprate = vhpet->timer[n].comprate;
+
+	/*
+	 * Calculate the comparator value to be used for the next periodic
+	 * interrupt.
+	 *
+	 * This function is commonly called from the callout handler.
+	 * In this scenario the 'counter' is ahead of 'compval'. To find
+	 * the next value to program into the accumulator we divide the
+	 * number space between 'compval' and 'counter' into 'comprate'
+	 * sized units. The 'compval' is rounded up such that is "ahead"
+	 * of 'counter'.
+	 */
+	compnext = compval + ((counter - compval) / comprate + 1) * comprate;
+
+	vhpet->timer[n].compval = compnext;
+}
+
+static void
+vhpet_handler(void *a)
+{
+	int n;
+	uint32_t counter;
+	sbintime_t now;
+	struct vhpet *vhpet;
+	struct callout *callout;
+	struct vhpet_callout_arg *arg;
+
+	arg = a;
+	vhpet = arg->vhpet;
+	n = arg->timer_num;
+	callout = &vhpet->timer[n].callout;
+
+	VM_CTR1(vhpet->vm, "hpet t%d fired", n);
+
+	VHPET_LOCK(vhpet);
+
+	if (callout_pending(callout))		/* callout was reset */
+		goto done;
+
+	if (!callout_active(callout))		/* callout was stopped */
+		goto done;
+
+	callout_deactivate(callout);
+
+	if (!vhpet_counter_enabled(vhpet))
+		panic("vhpet(%p) callout with counter disabled", vhpet);
+
+	counter = vhpet_counter(vhpet, &now);
+	vhpet_start_timer(vhpet, n, counter, now);
+	vhpet_timer_interrupt(vhpet, n);
+done:
+	VHPET_UNLOCK(vhpet);
+	return;
+}
+
+static void
+vhpet_stop_timer(struct vhpet *vhpet, int n, sbintime_t now)
+{
+
+	VM_CTR1(vhpet->vm, "hpet t%d stopped", n);
+	callout_stop(&vhpet->timer[n].callout);
+
+	/*
+	 * If the callout was scheduled to expire in the past but hasn't
+	 * had a chance to execute yet then trigger the timer interrupt
+	 * here. Failing to do so will result in a missed timer interrupt
+	 * in the guest. This is especially bad in one-shot mode because
+	 * the next interrupt has to wait for the counter to wrap around.
+	 */
+	if (vhpet->timer[n].callout_sbt < now) {
+		VM_CTR1(vhpet->vm, "hpet t%d interrupt triggered after "
+		    "stopping timer", n);
+		vhpet_timer_interrupt(vhpet, n);
+	}
+}
+
+static void
+vhpet_start_timer(struct vhpet *vhpet, int n, uint32_t counter, sbintime_t now)
+{
+	sbintime_t delta, precision;
+
+	if (vhpet->timer[n].comprate != 0)
+		vhpet_adjust_compval(vhpet, n, counter);
+	else {
+		/*
+		 * In one-shot mode it is the guest's responsibility to make
+		 * sure that the comparator value is not in the "past". The
+		 * hardware doesn't have any belt-and-suspenders to deal with
+		 * this so we don't either.
+		 */
+	}
+
+	delta = (vhpet->timer[n].compval - counter) * vhpet->freq_sbt;
+	precision = delta >> tc_precexp;
+	vhpet->timer[n].callout_sbt = now + delta;
+	callout_reset_sbt(&vhpet->timer[n].callout, vhpet->timer[n].callout_sbt,
+	    precision, vhpet_handler, &vhpet->timer[n].arg, C_ABSOLUTE);
+}
+
+static void
+vhpet_start_counting(struct vhpet *vhpet)
+{
+	int i;
+
+	vhpet->countbase_sbt = sbinuptime();
+	for (i = 0; i < VHPET_NUM_TIMERS; i++) {
+		/*
+		 * Restart the timers based on the value of the main counter
+		 * when it stopped counting.
+		 */
+		vhpet_start_timer(vhpet, i, vhpet->countbase,
+		    vhpet->countbase_sbt);
+	}
+}
+
+static void
+vhpet_stop_counting(struct vhpet *vhpet, uint32_t counter, sbintime_t now)
+{
+	int i;
+
+	vhpet->countbase = counter;
+	for (i = 0; i < VHPET_NUM_TIMERS; i++)
+		vhpet_stop_timer(vhpet, i, now);
+}
+
+static __inline void
+update_register(uint64_t *regptr, uint64_t data, uint64_t mask)
+{
+
+	*regptr &= ~mask;
+	*regptr |= (data & mask);
+}
+
+static void
+vhpet_timer_update_config(struct vhpet *vhpet, int n, uint64_t data,
+    uint64_t mask)
+{
+	bool clear_isr;
+	int old_pin, new_pin;
+	uint32_t allowed_irqs;
+	uint64_t oldval, newval;
+
+	if (vhpet_timer_msi_enabled(vhpet, n) ||
+	    vhpet_timer_edge_trig(vhpet, n)) {
+		if (vhpet->isr & (1 << n))
+			panic("vhpet timer %d isr should not be asserted", n);
+	}
+	old_pin = vhpet_timer_ioapic_pin(vhpet, n);
+	oldval = vhpet->timer[n].cap_config;
+
+	newval = oldval;
+	update_register(&newval, data, mask);
+	newval &= ~(HPET_TCAP_RO_MASK | HPET_TCNF_32MODE);
+	newval |= oldval & HPET_TCAP_RO_MASK;
+
+	if (newval == oldval)
+		return;
+
+	vhpet->timer[n].cap_config = newval;
+	VM_CTR2(vhpet->vm, "hpet t%d cap_config set to 0x%016x", n, newval);
+
+	/*
+	 * Validate the interrupt routing in the HPET_TCNF_INT_ROUTE field.
+	 * If it does not match the bits set in HPET_TCAP_INT_ROUTE then set
+	 * it to the default value of 0.
+	 */
+	allowed_irqs = vhpet->timer[n].cap_config >> 32;
+	new_pin = vhpet_timer_ioapic_pin(vhpet, n);
+	if (new_pin != 0 && (allowed_irqs & (1 << new_pin)) == 0) {
+		VM_CTR3(vhpet->vm, "hpet t%d configured invalid irq %d, "
+		    "allowed_irqs 0x%08x", n, new_pin, allowed_irqs);
+		new_pin = 0;
+		vhpet->timer[n].cap_config &= ~HPET_TCNF_INT_ROUTE;
+	}
+
+	if (!vhpet_periodic_timer(vhpet, n))
+		vhpet->timer[n].comprate = 0;
+
+	/*
+	 * If the timer's ISR bit is set then clear it in the following cases:
+	 * - interrupt is disabled
+	 * - interrupt type is changed from level to edge or fsb.
+	 * - interrupt routing is changed
+	 *
+	 * This is to ensure that this timer's level triggered interrupt does
+	 * not remain asserted forever.
+	 */
+	if (vhpet->isr & (1 << n)) {
+		KASSERT(old_pin != 0, ("timer %d isr asserted to ioapic pin %d",
+		    n, old_pin));
+		if (!vhpet_timer_interrupt_enabled(vhpet, n))
+			clear_isr = true;
+		else if (vhpet_timer_msi_enabled(vhpet, n))
+			clear_isr = true;
+		else if (vhpet_timer_edge_trig(vhpet, n))
+			clear_isr = true;
+		else if (vhpet_timer_ioapic_pin(vhpet, n) != old_pin)
+			clear_isr = true;
+		else
+			clear_isr = false;
+
+		if (clear_isr) {
+			VM_CTR1(vhpet->vm, "hpet t%d isr cleared due to "
+			    "configuration change", n);
+			vioapic_deassert_irq(vhpet->vm, old_pin);
+			vhpet->isr &= ~(1 << n);
+		}
+	}
+}
+
+int
+vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val, int size,
+    void *arg)
+{
+	struct vhpet *vhpet;
+	uint64_t data, mask, oldval, val64;
+	uint32_t isr_clear_mask, old_compval, old_comprate, counter;
+	sbintime_t now, *nowptr;
+	int i, offset;
+
+	vhpet = vm_hpet(vm);
+	offset = gpa - VHPET_BASE;
+
+	VHPET_LOCK(vhpet);
+
+	/* Accesses to the HPET should be 4 or 8 bytes wide */
+	switch (size) {
+	case 8:
+		mask = 0xffffffffffffffff;
+		data = val;
+		break;
+	case 4:
+		mask = 0xffffffff;
+		data = val;
+		if ((offset & 0x4) != 0) {
+			mask <<= 32;
+			data <<= 32;
+		} 
+		break;
+	default:
+		VM_CTR2(vhpet->vm, "hpet invalid mmio write: "
+		    "offset 0x%08x, size %d", offset, size);
+		goto done;
+	}
+
+	/* Access to the HPET should be naturally aligned to its width */
+	if (offset & (size - 1)) {
+		VM_CTR2(vhpet->vm, "hpet invalid mmio write: "
+		    "offset 0x%08x, size %d", offset, size);
+		goto done;
+	}
+
+	if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) {
+		/*
+		 * Get the most recent value of the counter before updating
+		 * the 'config' register. If the HPET is going to be disabled
+		 * then we need to update 'countbase' with the value right
+		 * before it is disabled.
+		 */
+		nowptr = vhpet_counter_enabled(vhpet) ? &now : NULL;
+		counter = vhpet_counter(vhpet, nowptr);
+		oldval = vhpet->config;
+		update_register(&vhpet->config, data, mask);
+
+		/*
+		 * LegacyReplacement Routing is not supported so clear the
+		 * bit explicitly.
+		 */
+		vhpet->config &= ~HPET_CNF_LEG_RT;
+
+		if ((oldval ^ vhpet->config) & HPET_CNF_ENABLE) {
+			if (vhpet_counter_enabled(vhpet)) {
+				vhpet_start_counting(vhpet);
+				VM_CTR0(vhpet->vm, "hpet enabled");
+			} else {
+				vhpet_stop_counting(vhpet, counter, now);
+				VM_CTR0(vhpet->vm, "hpet disabled");
+			}
+		}
+		goto done;
+	}
+
+	if (offset == HPET_ISR || offset == HPET_ISR + 4) {
+		isr_clear_mask = vhpet->isr & data;
+		for (i = 0; i < VHPET_NUM_TIMERS; i++) {
+			if ((isr_clear_mask & (1 << i)) != 0) {
+				VM_CTR1(vhpet->vm, "hpet t%d isr cleared", i);
+				vhpet_timer_clear_isr(vhpet, i);
+			}
+		}
+		goto done;
+	}
+
+	if (offset == HPET_MAIN_COUNTER || offset == HPET_MAIN_COUNTER + 4) {
+		/* Zero-extend the counter to 64-bits before updating it */
+		val64 = vhpet_counter(vhpet, NULL);
+		update_register(&val64, data, mask);
+		vhpet->countbase = val64;
+		if (vhpet_counter_enabled(vhpet))
+			vhpet_start_counting(vhpet);
+		goto done;
+	}
+
+	for (i = 0; i < VHPET_NUM_TIMERS; i++) {
+		if (offset == HPET_TIMER_CAP_CNF(i) ||
+		    offset == HPET_TIMER_CAP_CNF(i) + 4) {
+			vhpet_timer_update_config(vhpet, i, data, mask);
+			break;
+		}
+
+		if (offset == HPET_TIMER_COMPARATOR(i) ||
+		    offset == HPET_TIMER_COMPARATOR(i) + 4) {
+			old_compval = vhpet->timer[i].compval;
+			old_comprate = vhpet->timer[i].comprate;
+			if (vhpet_periodic_timer(vhpet, i)) {
+				/*
+				 * In periodic mode writes to the comparator
+				 * change the 'compval' register only if the
+				 * HPET_TCNF_VAL_SET bit is set in the config
+				 * register.
+				 */
+				val64 = vhpet->timer[i].comprate;
+				update_register(&val64, data, mask);
+				vhpet->timer[i].comprate = val64;
+				if ((vhpet->timer[i].cap_config &
+				    HPET_TCNF_VAL_SET) != 0) {
+					vhpet->timer[i].compval = val64;
+				}
+			} else {
+				KASSERT(vhpet->timer[i].comprate == 0,
+				    ("vhpet one-shot timer %d has invalid "
+				    "rate %u", i, vhpet->timer[i].comprate));
+				val64 = vhpet->timer[i].compval;
+				update_register(&val64, data, mask);
+				vhpet->timer[i].compval = val64;
+			}
+			vhpet->timer[i].cap_config &= ~HPET_TCNF_VAL_SET;
+
+			if (vhpet->timer[i].compval != old_compval ||
+			    vhpet->timer[i].comprate != old_comprate) {
+				if (vhpet_counter_enabled(vhpet)) {
+					counter = vhpet_counter(vhpet, &now);
+					vhpet_start_timer(vhpet, i, counter,
+					    now);
+				}
+			}
+			break;
+		}
+
+		if (offset == HPET_TIMER_FSB_VAL(i) ||
+		    offset == HPET_TIMER_FSB_ADDR(i)) {
+			update_register(&vhpet->timer[i].msireg, data, mask);
+			break;
+		}
+	}
+done:
+	VHPET_UNLOCK(vhpet);
+	return (0);
+}
+
+int
+vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *rval, int size,
+    void *arg)
+{
+	int i, offset;
+	struct vhpet *vhpet;
+	uint64_t data;
+
+	vhpet = vm_hpet(vm);
+	offset = gpa - VHPET_BASE;
+
+	VHPET_LOCK(vhpet);
+
+	/* Accesses to the HPET should be 4 or 8 bytes wide */
+	if (size != 4 && size != 8) {
+		VM_CTR2(vhpet->vm, "hpet invalid mmio read: "
+		    "offset 0x%08x, size %d", offset, size);
+		data = 0;
+		goto done;
+	}
+
+	/* Access to the HPET should be naturally aligned to its width */
+	if (offset & (size - 1)) {
+		VM_CTR2(vhpet->vm, "hpet invalid mmio read: "
+		    "offset 0x%08x, size %d", offset, size);
+		data = 0;
+		goto done;
+	}
+
+	if (offset == HPET_CAPABILITIES || offset == HPET_CAPABILITIES + 4) {
+		data = vhpet_capabilities();
+		goto done;	
+	}
+
+	if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) {
+		data = vhpet->config;
+		goto done;
+	}
+
+	if (offset == HPET_ISR || offset == HPET_ISR + 4) {
+		data = vhpet->isr;
+		goto done;
+	}
+
+	if (offset == HPET_MAIN_COUNTER || offset == HPET_MAIN_COUNTER + 4) {
+		data = vhpet_counter(vhpet, NULL);
+		goto done;
+	}
+
+	for (i = 0; i < VHPET_NUM_TIMERS; i++) {
+		if (offset == HPET_TIMER_CAP_CNF(i) ||
+		    offset == HPET_TIMER_CAP_CNF(i) + 4) {
+			data = vhpet->timer[i].cap_config;
+			break;
+		}
+
+		if (offset == HPET_TIMER_COMPARATOR(i) ||
+		    offset == HPET_TIMER_COMPARATOR(i) + 4) {
+			data = vhpet->timer[i].compval;
+			break;
+		}
+
+		if (offset == HPET_TIMER_FSB_VAL(i) ||
+		    offset == HPET_TIMER_FSB_ADDR(i)) {
+			data = vhpet->timer[i].msireg;
+			break;
+		}
+	}
+
+	if (i >= VHPET_NUM_TIMERS)
+		data = 0;
+done:
+	VHPET_UNLOCK(vhpet);
+
+	if (size == 4) {
+		if (offset & 0x4)
+			data >>= 32;
+	}
+	*rval = data;
+	return (0);
+}
+
+struct vhpet *
+vhpet_init(struct vm *vm)
+{
+	int i, pincount;
+	struct vhpet *vhpet;
+	uint64_t allowed_irqs;
+	struct vhpet_callout_arg *arg;
+	struct bintime bt;
+
+	vhpet = malloc(sizeof(struct vhpet), M_VHPET, M_WAITOK | M_ZERO);
+        vhpet->vm = vm;
+	mtx_init(&vhpet->mtx, "vhpet lock", NULL, MTX_DEF);
+
+	FREQ2BT(HPET_FREQ, &bt);
+	vhpet->freq_sbt = bttosbt(bt);
+
+	pincount = vioapic_pincount(vm);
+	if (pincount >= 24)
+		allowed_irqs = 0x00f00000;	/* irqs 20, 21, 22 and 23 */
+	else
+		allowed_irqs = 0;
+
+	/*
+	 * Initialize HPET timer hardware state.
+	 */
+	for (i = 0; i < VHPET_NUM_TIMERS; i++) {
+		vhpet->timer[i].cap_config = allowed_irqs << 32;
+		vhpet->timer[i].cap_config |= HPET_TCAP_PER_INT;
+		vhpet->timer[i].cap_config |= HPET_TCAP_FSB_INT_DEL;
+
+		vhpet->timer[i].compval = 0xffffffff;
+		callout_init(&vhpet->timer[i].callout, 1);
+
+		arg = &vhpet->timer[i].arg;
+		arg->vhpet = vhpet;
+		arg->timer_num = i;
+	}
+
+	return (vhpet);
+}
+
+void
+vhpet_cleanup(struct vhpet *vhpet)
+{
+	int i;
+
+	for (i = 0; i < VHPET_NUM_TIMERS; i++)
+		callout_drain(&vhpet->timer[i].callout);
+
+	free(vhpet, M_VHPET);
+}
+
+int
+vhpet_getcap(struct vm_hpet_cap *cap)
+{
+
+	cap->capabilities = vhpet_capabilities();
+	return (0);
+}


Property changes on: trunk/sys/amd64/vmm/io/vhpet.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vhpet.h
===================================================================
--- trunk/sys/amd64/vmm/io/vhpet.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vhpet.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,45 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * Copyright (c) 2013 Neel Natu <neel at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vhpet.h 261088 2014-01-23 20:21:39Z jhb $
+ */
+
+#ifndef _VHPET_H_
+#define	_VHPET_H_
+
+#define	VHPET_BASE	0xfed00000
+#define	VHPET_SIZE	1024
+
+struct vhpet *vhpet_init(struct vm *vm);
+void 	vhpet_cleanup(struct vhpet *vhpet);
+int	vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val,
+	    int size, void *arg);
+int	vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *val,
+	    int size, void *arg);
+int	vhpet_getcap(struct vm_hpet_cap *cap);
+
+#endif	/* _VHPET_H_ */


Property changes on: trunk/sys/amd64/vmm/io/vhpet.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vioapic.c
===================================================================
--- trunk/sys/amd64/vmm/io/vioapic.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vioapic.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,500 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * Copyright (c) 2013 Neel Natu <neel at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vioapic.c 284900 2015-06-28 03:22:26Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/vioapic.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <x86/apicreg.h>
+#include <machine/vmm.h>
+
+#include "vmm_ktr.h"
+#include "vmm_lapic.h"
+#include "vlapic.h"
+#include "vioapic.h"
+
+#define	IOREGSEL	0x00
+#define	IOWIN		0x10
+
+#define	REDIR_ENTRIES	24
+#define	RTBL_RO_BITS	((uint64_t)(IOART_REM_IRR | IOART_DELIVS))
+
+struct vioapic {
+	struct vm	*vm;
+	struct mtx	mtx;
+	uint32_t	id;
+	uint32_t	ioregsel;
+	struct {
+		uint64_t reg;
+		int	 acnt;	/* sum of pin asserts (+1) and deasserts (-1) */
+	} rtbl[REDIR_ENTRIES];
+};
+
+#define	VIOAPIC_LOCK(vioapic)		mtx_lock_spin(&((vioapic)->mtx))
+#define	VIOAPIC_UNLOCK(vioapic)		mtx_unlock_spin(&((vioapic)->mtx))
+#define	VIOAPIC_LOCKED(vioapic)		mtx_owned(&((vioapic)->mtx))
+
+static MALLOC_DEFINE(M_VIOAPIC, "vioapic", "bhyve virtual ioapic");
+
+#define	VIOAPIC_CTR1(vioapic, fmt, a1)					\
+	VM_CTR1((vioapic)->vm, fmt, a1)
+
+#define	VIOAPIC_CTR2(vioapic, fmt, a1, a2)				\
+	VM_CTR2((vioapic)->vm, fmt, a1, a2)
+
+#define	VIOAPIC_CTR3(vioapic, fmt, a1, a2, a3)				\
+	VM_CTR3((vioapic)->vm, fmt, a1, a2, a3)
+
+#define	VIOAPIC_CTR4(vioapic, fmt, a1, a2, a3, a4)			\
+	VM_CTR4((vioapic)->vm, fmt, a1, a2, a3, a4)
+
+#ifdef KTR
+static const char *
+pinstate_str(bool asserted)
+{
+
+	if (asserted)
+		return ("asserted");
+	else
+		return ("deasserted");
+}
+#endif
+
+static void
+vioapic_send_intr(struct vioapic *vioapic, int pin)
+{
+	int vector, delmode;
+	uint32_t low, high, dest;
+	bool level, phys;
+
+	KASSERT(pin >= 0 && pin < REDIR_ENTRIES,
+	    ("vioapic_set_pinstate: invalid pin number %d", pin));
+
+	KASSERT(VIOAPIC_LOCKED(vioapic),
+	    ("vioapic_set_pinstate: vioapic is not locked"));
+
+	low = vioapic->rtbl[pin].reg;
+	high = vioapic->rtbl[pin].reg >> 32;
+
+	if ((low & IOART_INTMASK) == IOART_INTMSET) {
+		VIOAPIC_CTR1(vioapic, "ioapic pin%d: masked", pin);
+		return;
+	}
+
+	phys = ((low & IOART_DESTMOD) == IOART_DESTPHY);
+	delmode = low & IOART_DELMOD;
+	level = low & IOART_TRGRLVL ? true : false;
+	if (level)
+		vioapic->rtbl[pin].reg |= IOART_REM_IRR;
+
+	vector = low & IOART_INTVEC;
+	dest = high >> APIC_ID_SHIFT;
+	vlapic_deliver_intr(vioapic->vm, level, dest, phys, delmode, vector);
+}
+
+static void
+vioapic_set_pinstate(struct vioapic *vioapic, int pin, bool newstate)
+{
+	int oldcnt, newcnt;
+	bool needintr;
+
+	KASSERT(pin >= 0 && pin < REDIR_ENTRIES,
+	    ("vioapic_set_pinstate: invalid pin number %d", pin));
+
+	KASSERT(VIOAPIC_LOCKED(vioapic),
+	    ("vioapic_set_pinstate: vioapic is not locked"));
+
+	oldcnt = vioapic->rtbl[pin].acnt;
+	if (newstate)
+		vioapic->rtbl[pin].acnt++;
+	else
+		vioapic->rtbl[pin].acnt--;
+	newcnt = vioapic->rtbl[pin].acnt;
+
+	if (newcnt < 0) {
+		VIOAPIC_CTR2(vioapic, "ioapic pin%d: bad acnt %d",
+		    pin, newcnt);
+	}
+
+	needintr = false;
+	if (oldcnt == 0 && newcnt == 1) {
+		needintr = true;
+		VIOAPIC_CTR1(vioapic, "ioapic pin%d: asserted", pin);
+	} else if (oldcnt == 1 && newcnt == 0) {
+		VIOAPIC_CTR1(vioapic, "ioapic pin%d: deasserted", pin);
+	} else {
+		VIOAPIC_CTR3(vioapic, "ioapic pin%d: %s, ignored, acnt %d",
+		    pin, pinstate_str(newstate), newcnt);
+	}
+
+	if (needintr)
+		vioapic_send_intr(vioapic, pin);
+}
+
+enum irqstate {
+	IRQSTATE_ASSERT,
+	IRQSTATE_DEASSERT,
+	IRQSTATE_PULSE
+};
+
+static int
+vioapic_set_irqstate(struct vm *vm, int irq, enum irqstate irqstate)
+{
+	struct vioapic *vioapic;
+
+	if (irq < 0 || irq >= REDIR_ENTRIES)
+		return (EINVAL);
+
+	vioapic = vm_ioapic(vm);
+
+	VIOAPIC_LOCK(vioapic);
+	switch (irqstate) {
+	case IRQSTATE_ASSERT:
+		vioapic_set_pinstate(vioapic, irq, true);
+		break;
+	case IRQSTATE_DEASSERT:
+		vioapic_set_pinstate(vioapic, irq, false);
+		break;
+	case IRQSTATE_PULSE:
+		vioapic_set_pinstate(vioapic, irq, true);
+		vioapic_set_pinstate(vioapic, irq, false);
+		break;
+	default:
+		panic("vioapic_set_irqstate: invalid irqstate %d", irqstate);
+	}
+	VIOAPIC_UNLOCK(vioapic);
+
+	return (0);
+}
+
+int
+vioapic_assert_irq(struct vm *vm, int irq)
+{
+
+	return (vioapic_set_irqstate(vm, irq, IRQSTATE_ASSERT));
+}
+
+int
+vioapic_deassert_irq(struct vm *vm, int irq)
+{
+
+	return (vioapic_set_irqstate(vm, irq, IRQSTATE_DEASSERT));
+}
+
+int
+vioapic_pulse_irq(struct vm *vm, int irq)
+{
+
+	return (vioapic_set_irqstate(vm, irq, IRQSTATE_PULSE));
+}
+
+/*
+ * Reset the vlapic's trigger-mode register to reflect the ioapic pin
+ * configuration.
+ */
+static void
+vioapic_update_tmr(struct vm *vm, int vcpuid, void *arg)
+{
+	struct vioapic *vioapic;
+	struct vlapic *vlapic;
+	uint32_t low, high, dest;
+	int delmode, pin, vector;
+	bool level, phys;
+
+	vlapic = vm_lapic(vm, vcpuid);
+	vioapic = vm_ioapic(vm);
+
+	VIOAPIC_LOCK(vioapic);
+	/*
+	 * Reset all vectors to be edge-triggered.
+	 */
+	vlapic_reset_tmr(vlapic);
+	for (pin = 0; pin < REDIR_ENTRIES; pin++) {
+		low = vioapic->rtbl[pin].reg;
+		high = vioapic->rtbl[pin].reg >> 32;
+
+		level = low & IOART_TRGRLVL ? true : false;
+		if (!level)
+			continue;
+
+		/*
+		 * For a level-triggered 'pin' let the vlapic figure out if
+		 * an assertion on this 'pin' would result in an interrupt
+		 * being delivered to it. If yes, then it will modify the
+		 * TMR bit associated with this vector to level-triggered.
+		 */
+		phys = ((low & IOART_DESTMOD) == IOART_DESTPHY);
+		delmode = low & IOART_DELMOD;
+		vector = low & IOART_INTVEC;
+		dest = high >> APIC_ID_SHIFT;
+		vlapic_set_tmr_level(vlapic, dest, phys, delmode, vector);
+	}
+	VIOAPIC_UNLOCK(vioapic);
+}
+
+static uint32_t
+vioapic_read(struct vioapic *vioapic, int vcpuid, uint32_t addr)
+{
+	int regnum, pin, rshift;
+
+	regnum = addr & 0xff;
+	switch (regnum) {
+	case IOAPIC_ID:
+		return (vioapic->id);
+		break;
+	case IOAPIC_VER:
+		return (((REDIR_ENTRIES - 1) << MAXREDIRSHIFT) | 0x11);
+		break;
+	case IOAPIC_ARB:
+		return (vioapic->id);
+		break;
+	default:
+		break;
+	}
+
+	/* redirection table entries */
+	if (regnum >= IOAPIC_REDTBL &&
+	    regnum < IOAPIC_REDTBL + REDIR_ENTRIES * 2) {
+		pin = (regnum - IOAPIC_REDTBL) / 2;
+		if ((regnum - IOAPIC_REDTBL) % 2)
+			rshift = 32;
+		else
+			rshift = 0;
+
+		return (vioapic->rtbl[pin].reg >> rshift);
+	}
+
+	return (0);
+}
+
+static void
+vioapic_write(struct vioapic *vioapic, int vcpuid, uint32_t addr, uint32_t data)
+{
+	uint64_t data64, mask64;
+	uint64_t last, changed;
+	int regnum, pin, lshift;
+	cpuset_t allvcpus;
+
+	regnum = addr & 0xff;
+	switch (regnum) {
+	case IOAPIC_ID:
+		vioapic->id = data & APIC_ID_MASK;
+		break;
+	case IOAPIC_VER:
+	case IOAPIC_ARB:
+		/* readonly */
+		break;
+	default:
+		break;
+	}
+
+	/* redirection table entries */
+	if (regnum >= IOAPIC_REDTBL &&
+	    regnum < IOAPIC_REDTBL + REDIR_ENTRIES * 2) {
+		pin = (regnum - IOAPIC_REDTBL) / 2;
+		if ((regnum - IOAPIC_REDTBL) % 2)
+			lshift = 32;
+		else
+			lshift = 0;
+
+		last = vioapic->rtbl[pin].reg;
+
+		data64 = (uint64_t)data << lshift;
+		mask64 = (uint64_t)0xffffffff << lshift;
+		vioapic->rtbl[pin].reg &= ~mask64 | RTBL_RO_BITS;
+		vioapic->rtbl[pin].reg |= data64 & ~RTBL_RO_BITS;
+
+		VIOAPIC_CTR2(vioapic, "ioapic pin%d: redir table entry %#lx",
+		    pin, vioapic->rtbl[pin].reg);
+
+		/*
+		 * If any fields in the redirection table entry (except mask
+		 * or polarity) have changed then rendezvous all the vcpus
+		 * to update their vlapic trigger-mode registers.
+		 */
+		changed = last ^ vioapic->rtbl[pin].reg;
+		if (changed & ~(IOART_INTMASK | IOART_INTPOL)) {
+			VIOAPIC_CTR1(vioapic, "ioapic pin%d: recalculate "
+			    "vlapic trigger-mode register", pin);
+			VIOAPIC_UNLOCK(vioapic);
+			allvcpus = vm_active_cpus(vioapic->vm);
+			vm_smp_rendezvous(vioapic->vm, vcpuid, allvcpus,
+			    vioapic_update_tmr, NULL);
+			VIOAPIC_LOCK(vioapic);
+		}
+
+		/*
+		 * Generate an interrupt if the following conditions are met:
+		 * - pin is not masked
+		 * - previous interrupt has been EOIed
+		 * - pin level is asserted
+		 */
+		if ((vioapic->rtbl[pin].reg & IOART_INTMASK) == IOART_INTMCLR &&
+		    (vioapic->rtbl[pin].reg & IOART_REM_IRR) == 0 &&
+		    (vioapic->rtbl[pin].acnt > 0)) {
+			VIOAPIC_CTR2(vioapic, "ioapic pin%d: asserted at rtbl "
+			    "write, acnt %d", pin, vioapic->rtbl[pin].acnt);
+			vioapic_send_intr(vioapic, pin);
+		}
+	}
+}
+
+static int
+vioapic_mmio_rw(struct vioapic *vioapic, int vcpuid, uint64_t gpa,
+    uint64_t *data, int size, bool doread)
+{
+	uint64_t offset;
+
+	offset = gpa - VIOAPIC_BASE;
+
+	/*
+	 * The IOAPIC specification allows 32-bit wide accesses to the
+	 * IOREGSEL (offset 0) and IOWIN (offset 16) registers.
+	 */
+	if (size != 4 || (offset != IOREGSEL && offset != IOWIN)) {
+		if (doread)
+			*data = 0;
+		return (0);
+	}
+
+	VIOAPIC_LOCK(vioapic);
+	if (offset == IOREGSEL) {
+		if (doread)
+			*data = vioapic->ioregsel;
+		else
+			vioapic->ioregsel = *data;
+	} else {
+		if (doread) {
+			*data = vioapic_read(vioapic, vcpuid,
+			    vioapic->ioregsel);
+		} else {
+			vioapic_write(vioapic, vcpuid, vioapic->ioregsel,
+			    *data);
+		}
+	}
+	VIOAPIC_UNLOCK(vioapic);
+
+	return (0);
+}
+
+int
+vioapic_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *rval,
+    int size, void *arg)
+{
+	int error;
+	struct vioapic *vioapic;
+
+	vioapic = vm_ioapic(vm);
+	error = vioapic_mmio_rw(vioapic, vcpuid, gpa, rval, size, true);
+	return (error);
+}
+
+int
+vioapic_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t wval,
+    int size, void *arg)
+{
+	int error;
+	struct vioapic *vioapic;
+
+	vioapic = vm_ioapic(vm);
+	error = vioapic_mmio_rw(vioapic, vcpuid, gpa, &wval, size, false);
+	return (error);
+}
+
+void
+vioapic_process_eoi(struct vm *vm, int vcpuid, int vector)
+{
+	struct vioapic *vioapic;
+	int pin;
+
+	KASSERT(vector >= 0 && vector < 256,
+	    ("vioapic_process_eoi: invalid vector %d", vector));
+
+	vioapic = vm_ioapic(vm);
+	VIOAPIC_CTR1(vioapic, "ioapic processing eoi for vector %d", vector);
+
+	/*
+	 * XXX keep track of the pins associated with this vector instead
+	 * of iterating on every single pin each time.
+	 */
+	VIOAPIC_LOCK(vioapic);
+	for (pin = 0; pin < REDIR_ENTRIES; pin++) {
+		if ((vioapic->rtbl[pin].reg & IOART_REM_IRR) == 0)
+			continue;
+		if ((vioapic->rtbl[pin].reg & IOART_INTVEC) != vector)
+			continue;
+		vioapic->rtbl[pin].reg &= ~IOART_REM_IRR;
+		if (vioapic->rtbl[pin].acnt > 0) {
+			VIOAPIC_CTR2(vioapic, "ioapic pin%d: asserted at eoi, "
+			    "acnt %d", pin, vioapic->rtbl[pin].acnt);
+			vioapic_send_intr(vioapic, pin);
+		}
+	}
+	VIOAPIC_UNLOCK(vioapic);
+}
+
+struct vioapic *
+vioapic_init(struct vm *vm)
+{
+	int i;
+	struct vioapic *vioapic;
+
+	vioapic = malloc(sizeof(struct vioapic), M_VIOAPIC, M_WAITOK | M_ZERO);
+
+	vioapic->vm = vm;
+	mtx_init(&vioapic->mtx, "vioapic lock", NULL, MTX_SPIN);
+
+	/* Initialize all redirection entries to mask all interrupts */
+	for (i = 0; i < REDIR_ENTRIES; i++)
+		vioapic->rtbl[i].reg = 0x0001000000010000UL;
+
+	return (vioapic);
+}
+
+void
+vioapic_cleanup(struct vioapic *vioapic)
+{
+
+	free(vioapic, M_VIOAPIC);
+}
+
+int
+vioapic_pincount(struct vm *vm)
+{
+
+	return (REDIR_ENTRIES);
+}


Property changes on: trunk/sys/amd64/vmm/io/vioapic.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vioapic.h
===================================================================
--- trunk/sys/amd64/vmm/io/vioapic.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vioapic.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,51 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * Copyright (c) 2013 Neel Natu <neel at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vioapic.h 261088 2014-01-23 20:21:39Z jhb $
+ */
+
+#ifndef _VIOAPIC_H_
+#define	_VIOAPIC_H_
+
+#define	VIOAPIC_BASE	0xFEC00000
+#define	VIOAPIC_SIZE	4096
+
+struct vioapic *vioapic_init(struct vm *vm);
+void	vioapic_cleanup(struct vioapic *vioapic);
+
+int	vioapic_assert_irq(struct vm *vm, int irq);
+int	vioapic_deassert_irq(struct vm *vm, int irq);
+int	vioapic_pulse_irq(struct vm *vm, int irq);
+
+int	vioapic_mmio_write(void *vm, int vcpuid, uint64_t gpa,
+	    uint64_t wval, int size, void *arg);
+int	vioapic_mmio_read(void *vm, int vcpuid, uint64_t gpa,
+	    uint64_t *rval, int size, void *arg);
+
+int	vioapic_pincount(struct vm *vm);
+void	vioapic_process_eoi(struct vm *vm, int vcpuid, int vector);
+#endif


Property changes on: trunk/sys/amd64/vmm/io/vioapic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vlapic.c
===================================================================
--- trunk/sys/amd64/vmm/io/vlapic.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vlapic.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,1656 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vlapic.c 284900 2015-06-28 03:22:26Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/vlapic.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/smp.h>
+
+#include <x86/specialreg.h>
+#include <x86/apicreg.h>
+
+#include <machine/clock.h>
+#include <machine/smp.h>
+
+#include <machine/vmm.h>
+
+#include "vmm_ipi.h"
+#include "vmm_lapic.h"
+#include "vmm_ktr.h"
+#include "vmm_stat.h"
+
+#include "vlapic.h"
+#include "vlapic_priv.h"
+#include "vioapic.h"
+
+#define	PRIO(x)			((x) >> 4)
+
+#define VLAPIC_VERSION		(16)
+
+#define	x2apic(vlapic)	(((vlapic)->msr_apicbase & APICBASE_X2APIC) ? 1 : 0)
+
+/*
+ * The 'vlapic->timer_mtx' is used to provide mutual exclusion between the
+ * vlapic_callout_handler() and vcpu accesses to:
+ * - timer_freq_bt, timer_period_bt, timer_fire_bt
+ * - timer LVT register
+ */
+#define	VLAPIC_TIMER_LOCK(vlapic)	mtx_lock_spin(&((vlapic)->timer_mtx))
+#define	VLAPIC_TIMER_UNLOCK(vlapic)	mtx_unlock_spin(&((vlapic)->timer_mtx))
+#define	VLAPIC_TIMER_LOCKED(vlapic)	mtx_owned(&((vlapic)->timer_mtx))
+
+/*
+ * APIC timer frequency:
+ * - arbitrary but chosen to be in the ballpark of contemporary hardware.
+ * - power-of-two to avoid loss of precision when converted to a bintime.
+ */
+#define VLAPIC_BUS_FREQ		(128 * 1024 * 1024)
+
+static __inline uint32_t
+vlapic_get_id(struct vlapic *vlapic)
+{
+
+	if (x2apic(vlapic))
+		return (vlapic->vcpuid);
+	else
+		return (vlapic->vcpuid << 24);
+}
+
+static uint32_t
+x2apic_ldr(struct vlapic *vlapic)
+{
+	int apicid;
+	uint32_t ldr;
+
+	apicid = vlapic_get_id(vlapic);
+	ldr = 1 << (apicid & 0xf);
+	ldr |= (apicid & 0xffff0) << 12;
+	return (ldr);
+}
+
+void
+vlapic_dfr_write_handler(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic;
+
+	lapic = vlapic->apic_page;
+	if (x2apic(vlapic)) {
+		VM_CTR1(vlapic->vm, "ignoring write to DFR in x2apic mode: %#x",
+		    lapic->dfr);
+		lapic->dfr = 0;
+		return;
+	}
+
+	lapic->dfr &= APIC_DFR_MODEL_MASK;
+	lapic->dfr |= APIC_DFR_RESERVED;
+
+	if ((lapic->dfr & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_FLAT)
+		VLAPIC_CTR0(vlapic, "vlapic DFR in Flat Model");
+	else if ((lapic->dfr & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_CLUSTER)
+		VLAPIC_CTR0(vlapic, "vlapic DFR in Cluster Model");
+	else
+		VLAPIC_CTR1(vlapic, "DFR in Unknown Model %#x", lapic->dfr);
+}
+
+void
+vlapic_ldr_write_handler(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic;
+
+	lapic = vlapic->apic_page;
+
+	/* LDR is read-only in x2apic mode */
+	if (x2apic(vlapic)) {
+		VLAPIC_CTR1(vlapic, "ignoring write to LDR in x2apic mode: %#x",
+		    lapic->ldr);
+		lapic->ldr = x2apic_ldr(vlapic);
+	} else {
+		lapic->ldr &= ~APIC_LDR_RESERVED;
+		VLAPIC_CTR1(vlapic, "vlapic LDR set to %#x", lapic->ldr);
+	}
+}
+
+void
+vlapic_id_write_handler(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic;
+	
+	/*
+	 * We don't allow the ID register to be modified so reset it back to
+	 * its default value.
+	 */
+	lapic = vlapic->apic_page;
+	lapic->id = vlapic_get_id(vlapic);
+}
+
+static int
+vlapic_timer_divisor(uint32_t dcr)
+{
+	switch (dcr & 0xB) {
+	case APIC_TDCR_1:
+		return (1);
+	case APIC_TDCR_2:
+		return (2);
+	case APIC_TDCR_4:
+		return (4);
+	case APIC_TDCR_8:
+		return (8);
+	case APIC_TDCR_16:
+		return (16);
+	case APIC_TDCR_32:
+		return (32);
+	case APIC_TDCR_64:
+		return (64);
+	case APIC_TDCR_128:
+		return (128);
+	default:
+		panic("vlapic_timer_divisor: invalid dcr 0x%08x", dcr);
+	}
+}
+
+#if 0
+static inline void
+vlapic_dump_lvt(uint32_t offset, uint32_t *lvt)
+{
+	printf("Offset %x: lvt %08x (V:%02x DS:%x M:%x)\n", offset,
+	    *lvt, *lvt & APIC_LVTT_VECTOR, *lvt & APIC_LVTT_DS,
+	    *lvt & APIC_LVTT_M);
+}
+#endif
+
+static uint32_t
+vlapic_get_ccr(struct vlapic *vlapic)
+{
+	struct bintime bt_now, bt_rem;
+	struct LAPIC *lapic;
+	uint32_t ccr;
+	
+	ccr = 0;
+	lapic = vlapic->apic_page;
+
+	VLAPIC_TIMER_LOCK(vlapic);
+	if (callout_active(&vlapic->callout)) {
+		/*
+		 * If the timer is scheduled to expire in the future then
+		 * compute the value of 'ccr' based on the remaining time.
+		 */
+		binuptime(&bt_now);
+		if (bintime_cmp(&vlapic->timer_fire_bt, &bt_now, >)) {
+			bt_rem = vlapic->timer_fire_bt;
+			bintime_sub(&bt_rem, &bt_now);
+			ccr += bt_rem.sec * BT2FREQ(&vlapic->timer_freq_bt);
+			ccr += bt_rem.frac / vlapic->timer_freq_bt.frac;
+		}
+	}
+	KASSERT(ccr <= lapic->icr_timer, ("vlapic_get_ccr: invalid ccr %#x, "
+	    "icr_timer is %#x", ccr, lapic->icr_timer));
+	VLAPIC_CTR2(vlapic, "vlapic ccr_timer = %#x, icr_timer = %#x",
+	    ccr, lapic->icr_timer);
+	VLAPIC_TIMER_UNLOCK(vlapic);
+	return (ccr);
+}
+
+void
+vlapic_dcr_write_handler(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic;
+	int divisor;
+	
+	lapic = vlapic->apic_page;
+	VLAPIC_TIMER_LOCK(vlapic);
+
+	divisor = vlapic_timer_divisor(lapic->dcr_timer);
+	VLAPIC_CTR2(vlapic, "vlapic dcr_timer=%#x, divisor=%d",
+	    lapic->dcr_timer, divisor);
+
+	/*
+	 * Update the timer frequency and the timer period.
+	 *
+	 * XXX changes to the frequency divider will not take effect until
+	 * the timer is reloaded.
+	 */
+	FREQ2BT(VLAPIC_BUS_FREQ / divisor, &vlapic->timer_freq_bt);
+	vlapic->timer_period_bt = vlapic->timer_freq_bt;
+	bintime_mul(&vlapic->timer_period_bt, lapic->icr_timer);
+
+	VLAPIC_TIMER_UNLOCK(vlapic);
+}
+
+void
+vlapic_esr_write_handler(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic;
+	
+	lapic = vlapic->apic_page;
+	lapic->esr = vlapic->esr_pending;
+	vlapic->esr_pending = 0;
+}
+
+int
+vlapic_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
+{
+	struct LAPIC *lapic;
+	uint32_t *irrptr, *tmrptr, mask;
+	int idx;
+
+	KASSERT(vector >= 0 && vector < 256, ("invalid vector %d", vector));
+
+	lapic = vlapic->apic_page;
+	if (!(lapic->svr & APIC_SVR_ENABLE)) {
+		VLAPIC_CTR1(vlapic, "vlapic is software disabled, ignoring "
+		    "interrupt %d", vector);
+		return (0);
+	}
+
+	if (vector < 16) {
+		vlapic_set_error(vlapic, APIC_ESR_RECEIVE_ILLEGAL_VECTOR);
+		VLAPIC_CTR1(vlapic, "vlapic ignoring interrupt to vector %d",
+		    vector);
+		return (1);
+	}
+
+	if (vlapic->ops.set_intr_ready)
+		return ((*vlapic->ops.set_intr_ready)(vlapic, vector, level));
+
+	idx = (vector / 32) * 4;
+	mask = 1 << (vector % 32);
+
+	irrptr = &lapic->irr0;
+	atomic_set_int(&irrptr[idx], mask);
+
+	/*
+	 * Verify that the trigger-mode of the interrupt matches with
+	 * the vlapic TMR registers.
+	 */
+	tmrptr = &lapic->tmr0;
+	if ((tmrptr[idx] & mask) != (level ? mask : 0)) {
+		VLAPIC_CTR3(vlapic, "vlapic TMR[%d] is 0x%08x but "
+		    "interrupt is %s-triggered", idx / 4, tmrptr[idx],
+		    level ? "level" : "edge");
+	}
+
+	VLAPIC_CTR_IRR(vlapic, "vlapic_set_intr_ready");
+	return (1);
+}
+
+static __inline uint32_t *
+vlapic_get_lvtptr(struct vlapic *vlapic, uint32_t offset)
+{
+	struct LAPIC	*lapic = vlapic->apic_page;
+	int 		 i;
+
+	switch (offset) {
+	case APIC_OFFSET_CMCI_LVT:
+		return (&lapic->lvt_cmci);
+	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
+		i = (offset - APIC_OFFSET_TIMER_LVT) >> 2;
+		return ((&lapic->lvt_timer) + i);;
+	default:
+		panic("vlapic_get_lvt: invalid LVT\n");
+	}
+}
+
+static __inline int
+lvt_off_to_idx(uint32_t offset)
+{
+	int index;
+
+	switch (offset) {
+	case APIC_OFFSET_CMCI_LVT:
+		index = APIC_LVT_CMCI;
+		break;
+	case APIC_OFFSET_TIMER_LVT:
+		index = APIC_LVT_TIMER;
+		break;
+	case APIC_OFFSET_THERM_LVT:
+		index = APIC_LVT_THERMAL;
+		break;
+	case APIC_OFFSET_PERF_LVT:
+		index = APIC_LVT_PMC;
+		break;
+	case APIC_OFFSET_LINT0_LVT:
+		index = APIC_LVT_LINT0;
+		break;
+	case APIC_OFFSET_LINT1_LVT:
+		index = APIC_LVT_LINT1;
+		break;
+	case APIC_OFFSET_ERROR_LVT:
+		index = APIC_LVT_ERROR;
+		break;
+	default:
+		index = -1;
+		break;
+	}
+	KASSERT(index >= 0 && index <= VLAPIC_MAXLVT_INDEX, ("lvt_off_to_idx: "
+	    "invalid lvt index %d for offset %#x", index, offset));
+
+	return (index);
+}
+
+static __inline uint32_t
+vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset)
+{
+	int idx;
+	uint32_t val;
+
+	idx = lvt_off_to_idx(offset);
+	val = atomic_load_acq_32(&vlapic->lvt_last[idx]);
+	return (val);
+}
+
+void
+vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
+{
+	uint32_t *lvtptr, mask, val;
+	struct LAPIC *lapic;
+	int idx;
+	
+	lapic = vlapic->apic_page;
+	lvtptr = vlapic_get_lvtptr(vlapic, offset);	
+	val = *lvtptr;
+	idx = lvt_off_to_idx(offset);
+
+	if (!(lapic->svr & APIC_SVR_ENABLE))
+		val |= APIC_LVT_M;
+	mask = APIC_LVT_M | APIC_LVT_DS | APIC_LVT_VECTOR;
+	switch (offset) {
+	case APIC_OFFSET_TIMER_LVT:
+		mask |= APIC_LVTT_TM;
+		break;
+	case APIC_OFFSET_ERROR_LVT:
+		break;
+	case APIC_OFFSET_LINT0_LVT:
+	case APIC_OFFSET_LINT1_LVT:
+		mask |= APIC_LVT_TM | APIC_LVT_RIRR | APIC_LVT_IIPP;
+		/* FALLTHROUGH */
+	default:
+		mask |= APIC_LVT_DM;
+		break;
+	}
+	val &= mask;
+	*lvtptr = val;
+	atomic_store_rel_32(&vlapic->lvt_last[idx], val);
+}
+
+static void
+vlapic_mask_lvts(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic = vlapic->apic_page;
+
+	lapic->lvt_cmci |= APIC_LVT_M;
+	vlapic_lvt_write_handler(vlapic, APIC_OFFSET_CMCI_LVT);
+
+	lapic->lvt_timer |= APIC_LVT_M;
+	vlapic_lvt_write_handler(vlapic, APIC_OFFSET_TIMER_LVT);
+
+	lapic->lvt_thermal |= APIC_LVT_M;
+	vlapic_lvt_write_handler(vlapic, APIC_OFFSET_THERM_LVT);
+
+	lapic->lvt_pcint |= APIC_LVT_M;
+	vlapic_lvt_write_handler(vlapic, APIC_OFFSET_PERF_LVT);
+
+	lapic->lvt_lint0 |= APIC_LVT_M;
+	vlapic_lvt_write_handler(vlapic, APIC_OFFSET_LINT0_LVT);
+
+	lapic->lvt_lint1 |= APIC_LVT_M;
+	vlapic_lvt_write_handler(vlapic, APIC_OFFSET_LINT1_LVT);
+
+	lapic->lvt_error |= APIC_LVT_M;
+	vlapic_lvt_write_handler(vlapic, APIC_OFFSET_ERROR_LVT);
+}
+
+static int
+vlapic_fire_lvt(struct vlapic *vlapic, uint32_t lvt)
+{
+	uint32_t vec, mode;
+
+	if (lvt & APIC_LVT_M)
+		return (0);
+
+	vec = lvt & APIC_LVT_VECTOR;
+	mode = lvt & APIC_LVT_DM;
+
+	switch (mode) {
+	case APIC_LVT_DM_FIXED:
+		if (vec < 16) {
+			vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR);
+			return (0);
+		}
+		if (vlapic_set_intr_ready(vlapic, vec, false))
+			vcpu_notify_event(vlapic->vm, vlapic->vcpuid, true);
+		break;
+	case APIC_LVT_DM_NMI:
+		vm_inject_nmi(vlapic->vm, vlapic->vcpuid);
+		break;
+	case APIC_LVT_DM_EXTINT:
+		vm_inject_extint(vlapic->vm, vlapic->vcpuid);
+		break;
+	default:
+		// Other modes ignored
+		return (0);
+	}
+	return (1);
+}
+
+#if 1
+static void
+dump_isrvec_stk(struct vlapic *vlapic)
+{
+	int i;
+	uint32_t *isrptr;
+
+	isrptr = &vlapic->apic_page->isr0;
+	for (i = 0; i < 8; i++)
+		printf("ISR%d 0x%08x\n", i, isrptr[i * 4]);
+
+	for (i = 0; i <= vlapic->isrvec_stk_top; i++)
+		printf("isrvec_stk[%d] = %d\n", i, vlapic->isrvec_stk[i]);
+}
+#endif
+
+/*
+ * Algorithm adopted from section "Interrupt, Task and Processor Priority"
+ * in Intel Architecture Manual Vol 3a.
+ */
+static void
+vlapic_update_ppr(struct vlapic *vlapic)
+{
+	int isrvec, tpr, ppr;
+
+	/*
+	 * Note that the value on the stack at index 0 is always 0.
+	 *
+	 * This is a placeholder for the value of ISRV when none of the
+	 * bits is set in the ISRx registers.
+	 */
+	isrvec = vlapic->isrvec_stk[vlapic->isrvec_stk_top];
+	tpr = vlapic->apic_page->tpr;
+
+#if 1
+	{
+		int i, lastprio, curprio, vector, idx;
+		uint32_t *isrptr;
+
+		if (vlapic->isrvec_stk_top == 0 && isrvec != 0)
+			panic("isrvec_stk is corrupted: %d", isrvec);
+
+		/*
+		 * Make sure that the priority of the nested interrupts is
+		 * always increasing.
+		 */
+		lastprio = -1;
+		for (i = 1; i <= vlapic->isrvec_stk_top; i++) {
+			curprio = PRIO(vlapic->isrvec_stk[i]);
+			if (curprio <= lastprio) {
+				dump_isrvec_stk(vlapic);
+				panic("isrvec_stk does not satisfy invariant");
+			}
+			lastprio = curprio;
+		}
+
+		/*
+		 * Make sure that each bit set in the ISRx registers has a
+		 * corresponding entry on the isrvec stack.
+		 */
+		i = 1;
+		isrptr = &vlapic->apic_page->isr0;
+		for (vector = 0; vector < 256; vector++) {
+			idx = (vector / 32) * 4;
+			if (isrptr[idx] & (1 << (vector % 32))) {
+				if (i > vlapic->isrvec_stk_top ||
+				    vlapic->isrvec_stk[i] != vector) {
+					dump_isrvec_stk(vlapic);
+					panic("ISR and isrvec_stk out of sync");
+				}
+				i++;
+			}
+		}
+	}
+#endif
+
+	if (PRIO(tpr) >= PRIO(isrvec))
+		ppr = tpr;
+	else
+		ppr = isrvec & 0xf0;
+
+	vlapic->apic_page->ppr = ppr;
+	VLAPIC_CTR1(vlapic, "vlapic_update_ppr 0x%02x", ppr);
+}
+
+static VMM_STAT(VLAPIC_GRATUITOUS_EOI, "EOI without any in-service interrupt");
+
+static void
+vlapic_process_eoi(struct vlapic *vlapic)
+{
+	struct LAPIC	*lapic = vlapic->apic_page;
+	uint32_t	*isrptr, *tmrptr;
+	int		i, idx, bitpos, vector;
+
+	isrptr = &lapic->isr0;
+	tmrptr = &lapic->tmr0;
+
+	for (i = 7; i >= 0; i--) {
+		idx = i * 4;
+		bitpos = fls(isrptr[idx]);
+		if (bitpos-- != 0) {
+			if (vlapic->isrvec_stk_top <= 0) {
+				panic("invalid vlapic isrvec_stk_top %d",
+				      vlapic->isrvec_stk_top);
+			}
+			isrptr[idx] &= ~(1 << bitpos);
+			vector = i * 32 + bitpos;
+			VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "EOI vector %d",
+			    vector);
+			VLAPIC_CTR_ISR(vlapic, "vlapic_process_eoi");
+			vlapic->isrvec_stk_top--;
+			vlapic_update_ppr(vlapic);
+			if ((tmrptr[idx] & (1 << bitpos)) != 0) {
+				vioapic_process_eoi(vlapic->vm, vlapic->vcpuid,
+				    vector);
+			}
+			return;
+		}
+	}
+	VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "Gratuitous EOI");
+	vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_GRATUITOUS_EOI, 1);
+}
+
+static __inline int
+vlapic_get_lvt_field(uint32_t lvt, uint32_t mask)
+{
+
+	return (lvt & mask);
+}
+
+static __inline int
+vlapic_periodic_timer(struct vlapic *vlapic)
+{
+	uint32_t lvt;
+	
+	lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT);
+
+	return (vlapic_get_lvt_field(lvt, APIC_LVTT_TM_PERIODIC));
+}
+
+static VMM_STAT(VLAPIC_INTR_ERROR, "error interrupts generated by vlapic");
+
+void
+vlapic_set_error(struct vlapic *vlapic, uint32_t mask)
+{
+	uint32_t lvt;
+
+	vlapic->esr_pending |= mask;
+	if (vlapic->esr_firing)
+		return;
+	vlapic->esr_firing = 1;
+
+	// The error LVT always uses the fixed delivery mode.
+	lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_ERROR_LVT);
+	if (vlapic_fire_lvt(vlapic, lvt | APIC_LVT_DM_FIXED)) {
+		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_ERROR, 1);
+	}
+	vlapic->esr_firing = 0;
+}
+
+static VMM_STAT(VLAPIC_INTR_TIMER, "timer interrupts generated by vlapic");
+
+static void
+vlapic_fire_timer(struct vlapic *vlapic)
+{
+	uint32_t lvt;
+
+	KASSERT(VLAPIC_TIMER_LOCKED(vlapic), ("vlapic_fire_timer not locked"));
+	
+	// The timer LVT always uses the fixed delivery mode.
+	lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT);
+	if (vlapic_fire_lvt(vlapic, lvt | APIC_LVT_DM_FIXED)) {
+		VLAPIC_CTR0(vlapic, "vlapic timer fired");
+		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_TIMER, 1);
+	}
+}
+
+static VMM_STAT(VLAPIC_INTR_CMC,
+    "corrected machine check interrupts generated by vlapic");
+
+void
+vlapic_fire_cmci(struct vlapic *vlapic)
+{
+	uint32_t lvt;
+
+	lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_CMCI_LVT);
+	if (vlapic_fire_lvt(vlapic, lvt)) {
+		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_CMC, 1);
+	}
+}
+
+static VMM_STAT_ARRAY(LVTS_TRIGGERRED, VLAPIC_MAXLVT_INDEX + 1,
+    "lvts triggered");
+
+int
+vlapic_trigger_lvt(struct vlapic *vlapic, int vector)
+{
+	uint32_t lvt;
+
+	if (vlapic_enabled(vlapic) == false) {
+		/*
+		 * When the local APIC is global/hardware disabled,
+		 * LINT[1:0] pins are configured as INTR and NMI pins,
+		 * respectively.
+		*/
+		switch (vector) {
+			case APIC_LVT_LINT0:
+				vm_inject_extint(vlapic->vm, vlapic->vcpuid);
+				break;
+			case APIC_LVT_LINT1:
+				vm_inject_nmi(vlapic->vm, vlapic->vcpuid);
+				break;
+			default:
+				break;
+		}
+		return (0);
+	}
+
+	switch (vector) {
+	case APIC_LVT_LINT0:
+		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_LINT0_LVT);
+		break;
+	case APIC_LVT_LINT1:
+		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_LINT1_LVT);
+		break;
+	case APIC_LVT_TIMER:
+		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT);
+		lvt |= APIC_LVT_DM_FIXED;
+		break;
+	case APIC_LVT_ERROR:
+		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_ERROR_LVT);
+		lvt |= APIC_LVT_DM_FIXED;
+		break;
+	case APIC_LVT_PMC:
+		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_PERF_LVT);
+		break;
+	case APIC_LVT_THERMAL:
+		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_THERM_LVT);
+		break;
+	case APIC_LVT_CMCI:
+		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_CMCI_LVT);
+		break;
+	default:
+		return (EINVAL);
+	}
+	if (vlapic_fire_lvt(vlapic, lvt)) {
+		vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
+		    LVTS_TRIGGERRED, vector, 1);
+	}
+	return (0);
+}
+
+static void
+vlapic_callout_handler(void *arg)
+{
+	struct vlapic *vlapic;
+	struct bintime bt, btnow;
+	sbintime_t rem_sbt;
+
+	vlapic = arg;
+
+	VLAPIC_TIMER_LOCK(vlapic);
+	if (callout_pending(&vlapic->callout))	/* callout was reset */
+		goto done;
+
+	if (!callout_active(&vlapic->callout))	/* callout was stopped */
+		goto done;
+
+	callout_deactivate(&vlapic->callout);
+
+	vlapic_fire_timer(vlapic);
+
+	if (vlapic_periodic_timer(vlapic)) {
+		binuptime(&btnow);
+		KASSERT(bintime_cmp(&btnow, &vlapic->timer_fire_bt, >=),
+		    ("vlapic callout at %#lx.%#lx, expected at %#lx.#%lx",
+		    btnow.sec, btnow.frac, vlapic->timer_fire_bt.sec,
+		    vlapic->timer_fire_bt.frac));
+
+		/*
+		 * Compute the delta between when the timer was supposed to
+		 * fire and the present time.
+		 */
+		bt = btnow;
+		bintime_sub(&bt, &vlapic->timer_fire_bt);
+
+		rem_sbt = bttosbt(vlapic->timer_period_bt);
+		if (bintime_cmp(&bt, &vlapic->timer_period_bt, <)) {
+			/*
+			 * Adjust the time until the next countdown downward
+			 * to account for the lost time.
+			 */
+			rem_sbt -= bttosbt(bt);
+		} else {
+			/*
+			 * If the delta is greater than the timer period then
+			 * just reset our time base instead of trying to catch
+			 * up.
+			 */
+			vlapic->timer_fire_bt = btnow;
+			VLAPIC_CTR2(vlapic, "vlapic timer lagging by %lu "
+			    "usecs, period is %lu usecs - resetting time base",
+			    bttosbt(bt) / SBT_1US,
+			    bttosbt(vlapic->timer_period_bt) / SBT_1US);
+		}
+
+		bintime_add(&vlapic->timer_fire_bt, &vlapic->timer_period_bt);
+		callout_reset_sbt(&vlapic->callout, rem_sbt, 0,
+		    vlapic_callout_handler, vlapic, 0);
+	}
+done:
+	VLAPIC_TIMER_UNLOCK(vlapic);
+}
+
+void
+vlapic_icrtmr_write_handler(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic;
+	sbintime_t sbt;
+	uint32_t icr_timer;
+
+	VLAPIC_TIMER_LOCK(vlapic);
+
+	lapic = vlapic->apic_page;
+	icr_timer = lapic->icr_timer;
+
+	vlapic->timer_period_bt = vlapic->timer_freq_bt;
+	bintime_mul(&vlapic->timer_period_bt, icr_timer);
+
+	if (icr_timer != 0) {
+		binuptime(&vlapic->timer_fire_bt);
+		bintime_add(&vlapic->timer_fire_bt, &vlapic->timer_period_bt);
+
+		sbt = bttosbt(vlapic->timer_period_bt);
+		callout_reset_sbt(&vlapic->callout, sbt, 0,
+		    vlapic_callout_handler, vlapic, 0);
+	} else
+		callout_stop(&vlapic->callout);
+
+	VLAPIC_TIMER_UNLOCK(vlapic);
+}
+
+/*
+ * This function populates 'dmask' with the set of vcpus that match the
+ * addressing specified by the (dest, phys, lowprio) tuple.
+ * 
+ * 'x2apic_dest' specifies whether 'dest' is interpreted as x2APIC (32-bit)
+ * or xAPIC (8-bit) destination field.
+ */
+static void
+vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys,
+    bool lowprio, bool x2apic_dest)
+{
+	struct vlapic *vlapic;
+	uint32_t dfr, ldr, ldest, cluster;
+	uint32_t mda_flat_ldest, mda_cluster_ldest, mda_ldest, mda_cluster_id;
+	cpuset_t amask;
+	int vcpuid;
+
+	if ((x2apic_dest && dest == 0xffffffff) ||
+	    (!x2apic_dest && dest == 0xff)) {
+		/*
+		 * Broadcast in both logical and physical modes.
+		 */
+		*dmask = vm_active_cpus(vm);
+		return;
+	}
+
+	if (phys) {
+		/*
+		 * Physical mode: destination is APIC ID.
+		 */
+		CPU_ZERO(dmask);
+		vcpuid = vm_apicid2vcpuid(vm, dest);
+		if (vcpuid < VM_MAXCPU)
+			CPU_SET(vcpuid, dmask);
+	} else {
+		/*
+		 * In the "Flat Model" the MDA is interpreted as an 8-bit wide
+		 * bitmask. This model is only avilable in the xAPIC mode.
+		 */
+		mda_flat_ldest = dest & 0xff;
+
+		/*
+		 * In the "Cluster Model" the MDA is used to identify a
+		 * specific cluster and a set of APICs in that cluster.
+		 */
+		if (x2apic_dest) {
+			mda_cluster_id = dest >> 16;
+			mda_cluster_ldest = dest & 0xffff;
+		} else {
+			mda_cluster_id = (dest >> 4) & 0xf;
+			mda_cluster_ldest = dest & 0xf;
+		}
+
+		/*
+		 * Logical mode: match each APIC that has a bit set
+		 * in it's LDR that matches a bit in the ldest.
+		 */
+		CPU_ZERO(dmask);
+		amask = vm_active_cpus(vm);
+		while ((vcpuid = CPU_FFS(&amask)) != 0) {
+			vcpuid--;
+			CPU_CLR(vcpuid, &amask);
+
+			vlapic = vm_lapic(vm, vcpuid);
+			dfr = vlapic->apic_page->dfr;
+			ldr = vlapic->apic_page->ldr;
+
+			if ((dfr & APIC_DFR_MODEL_MASK) ==
+			    APIC_DFR_MODEL_FLAT) {
+				ldest = ldr >> 24;
+				mda_ldest = mda_flat_ldest;
+			} else if ((dfr & APIC_DFR_MODEL_MASK) ==
+			    APIC_DFR_MODEL_CLUSTER) {
+				if (x2apic(vlapic)) {
+					cluster = ldr >> 16;
+					ldest = ldr & 0xffff;
+				} else {
+					cluster = ldr >> 28;
+					ldest = (ldr >> 24) & 0xf;
+				}
+				if (cluster != mda_cluster_id)
+					continue;
+				mda_ldest = mda_cluster_ldest;
+			} else {
+				/*
+				 * Guest has configured a bad logical
+				 * model for this vcpu - skip it.
+				 */
+				VLAPIC_CTR1(vlapic, "vlapic has bad logical "
+				    "model %x - cannot deliver interrupt", dfr);
+				continue;
+			}
+
+			if ((mda_ldest & ldest) != 0) {
+				CPU_SET(vcpuid, dmask);
+				if (lowprio)
+					break;
+			}
+		}
+	}
+}
+
+static VMM_STAT_ARRAY(IPIS_SENT, VM_MAXCPU, "ipis sent to vcpu");
+
+static void
+vlapic_set_tpr(struct vlapic *vlapic, uint8_t val)
+{
+	struct LAPIC *lapic = vlapic->apic_page;
+
+	if (lapic->tpr != val) {
+		VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vlapic TPR changed "
+		    "from %#x to %#x", lapic->tpr, val);
+		lapic->tpr = val;
+		vlapic_update_ppr(vlapic);
+	}
+}
+
+static uint8_t
+vlapic_get_tpr(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic = vlapic->apic_page;
+
+	return (lapic->tpr);
+}
+
+void
+vlapic_set_cr8(struct vlapic *vlapic, uint64_t val)
+{
+	uint8_t tpr;
+
+	if (val & ~0xf) {
+		vm_inject_gp(vlapic->vm, vlapic->vcpuid);
+		return;
+	}
+
+	tpr = val << 4;
+	vlapic_set_tpr(vlapic, tpr);
+}
+
+uint64_t
+vlapic_get_cr8(struct vlapic *vlapic)
+{
+	uint8_t tpr;
+
+	tpr = vlapic_get_tpr(vlapic);
+	return (tpr >> 4);
+}
+
+int
+vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
+{
+	int i;
+	bool phys;
+	cpuset_t dmask;
+	uint64_t icrval;
+	uint32_t dest, vec, mode;
+	struct vlapic *vlapic2;
+	struct vm_exit *vmexit;
+	struct LAPIC *lapic;
+
+	lapic = vlapic->apic_page;
+	lapic->icr_lo &= ~APIC_DELSTAT_PEND;
+	icrval = ((uint64_t)lapic->icr_hi << 32) | lapic->icr_lo;
+
+	if (x2apic(vlapic))
+		dest = icrval >> 32;
+	else
+		dest = icrval >> (32 + 24);
+	vec = icrval & APIC_VECTOR_MASK;
+	mode = icrval & APIC_DELMODE_MASK;
+
+	if (mode == APIC_DELMODE_FIXED && vec < 16) {
+		vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR);
+		VLAPIC_CTR1(vlapic, "Ignoring invalid IPI %d", vec);
+		return (0);
+	}
+
+	VLAPIC_CTR2(vlapic, "icrlo 0x%016lx triggered ipi %d", icrval, vec);
+
+	if (mode == APIC_DELMODE_FIXED || mode == APIC_DELMODE_NMI) {
+		switch (icrval & APIC_DEST_MASK) {
+		case APIC_DEST_DESTFLD:
+			phys = ((icrval & APIC_DESTMODE_LOG) == 0);
+			vlapic_calcdest(vlapic->vm, &dmask, dest, phys, false,
+			    x2apic(vlapic));
+			break;
+		case APIC_DEST_SELF:
+			CPU_SETOF(vlapic->vcpuid, &dmask);
+			break;
+		case APIC_DEST_ALLISELF:
+			dmask = vm_active_cpus(vlapic->vm);
+			break;
+		case APIC_DEST_ALLESELF:
+			dmask = vm_active_cpus(vlapic->vm);
+			CPU_CLR(vlapic->vcpuid, &dmask);
+			break;
+		default:
+			CPU_ZERO(&dmask);	/* satisfy gcc */
+			break;
+		}
+
+		while ((i = CPU_FFS(&dmask)) != 0) {
+			i--;
+			CPU_CLR(i, &dmask);
+			if (mode == APIC_DELMODE_FIXED) {
+				lapic_intr_edge(vlapic->vm, i, vec);
+				vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
+						    IPIS_SENT, i, 1);
+				VLAPIC_CTR2(vlapic, "vlapic sending ipi %d "
+				    "to vcpuid %d", vec, i);
+			} else {
+				vm_inject_nmi(vlapic->vm, i);
+				VLAPIC_CTR1(vlapic, "vlapic sending ipi nmi "
+				    "to vcpuid %d", i);
+			}
+		}
+
+		return (0);	/* handled completely in the kernel */
+	}
+
+	if (mode == APIC_DELMODE_INIT) {
+		if ((icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT)
+			return (0);
+
+		if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) {
+			vlapic2 = vm_lapic(vlapic->vm, dest);
+
+			/* move from INIT to waiting-for-SIPI state */
+			if (vlapic2->boot_state == BS_INIT) {
+				vlapic2->boot_state = BS_SIPI;
+			}
+
+			return (0);
+		}
+	}
+
+	if (mode == APIC_DELMODE_STARTUP) {
+		if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) {
+			vlapic2 = vm_lapic(vlapic->vm, dest);
+
+			/*
+			 * Ignore SIPIs in any state other than wait-for-SIPI
+			 */
+			if (vlapic2->boot_state != BS_SIPI)
+				return (0);
+
+			vlapic2->boot_state = BS_RUNNING;
+
+			*retu = true;
+			vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid);
+			vmexit->exitcode = VM_EXITCODE_SPINUP_AP;
+			vmexit->u.spinup_ap.vcpu = dest;
+			vmexit->u.spinup_ap.rip = vec << PAGE_SHIFT;
+
+			return (0);
+		}
+	}
+
+	/*
+	 * This will cause a return to userland.
+	 */
+	return (1);
+}
+
+void
+vlapic_self_ipi_handler(struct vlapic *vlapic, uint64_t val)
+{
+	int vec;
+
+	KASSERT(x2apic(vlapic), ("SELF_IPI does not exist in xAPIC mode"));
+
+	vec = val & 0xff;
+	lapic_intr_edge(vlapic->vm, vlapic->vcpuid, vec);
+	vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid, IPIS_SENT,
+	    vlapic->vcpuid, 1);
+	VLAPIC_CTR1(vlapic, "vlapic self-ipi %d", vec);
+}
+
+int
+vlapic_pending_intr(struct vlapic *vlapic, int *vecptr)
+{
+	struct LAPIC	*lapic = vlapic->apic_page;
+	int	  	 idx, i, bitpos, vector;
+	uint32_t	*irrptr, val;
+
+	if (vlapic->ops.pending_intr)
+		return ((*vlapic->ops.pending_intr)(vlapic, vecptr));
+
+	irrptr = &lapic->irr0;
+
+	for (i = 7; i >= 0; i--) {
+		idx = i * 4;
+		val = atomic_load_acq_int(&irrptr[idx]);
+		bitpos = fls(val);
+		if (bitpos != 0) {
+			vector = i * 32 + (bitpos - 1);
+			if (PRIO(vector) > PRIO(lapic->ppr)) {
+				VLAPIC_CTR1(vlapic, "pending intr %d", vector);
+				if (vecptr != NULL)
+					*vecptr = vector;
+				return (1);
+			} else 
+				break;
+		}
+	}
+	return (0);
+}
+
+void
+vlapic_intr_accepted(struct vlapic *vlapic, int vector)
+{
+	struct LAPIC	*lapic = vlapic->apic_page;
+	uint32_t	*irrptr, *isrptr;
+	int		idx, stk_top;
+
+	if (vlapic->ops.intr_accepted)
+		return ((*vlapic->ops.intr_accepted)(vlapic, vector));
+
+	/*
+	 * clear the ready bit for vector being accepted in irr 
+	 * and set the vector as in service in isr.
+	 */
+	idx = (vector / 32) * 4;
+
+	irrptr = &lapic->irr0;
+	atomic_clear_int(&irrptr[idx], 1 << (vector % 32));
+	VLAPIC_CTR_IRR(vlapic, "vlapic_intr_accepted");
+
+	isrptr = &lapic->isr0;
+	isrptr[idx] |= 1 << (vector % 32);
+	VLAPIC_CTR_ISR(vlapic, "vlapic_intr_accepted");
+
+	/*
+	 * Update the PPR
+	 */
+	vlapic->isrvec_stk_top++;
+
+	stk_top = vlapic->isrvec_stk_top;
+	if (stk_top >= ISRVEC_STK_SIZE)
+		panic("isrvec_stk_top overflow %d", stk_top);
+
+	vlapic->isrvec_stk[stk_top] = vector;
+	vlapic_update_ppr(vlapic);
+}
+
+void
+vlapic_svr_write_handler(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic;
+	uint32_t old, new, changed;
+
+	lapic = vlapic->apic_page;
+
+	new = lapic->svr;
+	old = vlapic->svr_last;
+	vlapic->svr_last = new;
+
+	changed = old ^ new;
+	if ((changed & APIC_SVR_ENABLE) != 0) {
+		if ((new & APIC_SVR_ENABLE) == 0) {
+			/*
+			 * The apic is now disabled so stop the apic timer
+			 * and mask all the LVT entries.
+			 */
+			VLAPIC_CTR0(vlapic, "vlapic is software-disabled");
+			VLAPIC_TIMER_LOCK(vlapic);
+			callout_stop(&vlapic->callout);
+			VLAPIC_TIMER_UNLOCK(vlapic);
+			vlapic_mask_lvts(vlapic);
+		} else {
+			/*
+			 * The apic is now enabled so restart the apic timer
+			 * if it is configured in periodic mode.
+			 */
+			VLAPIC_CTR0(vlapic, "vlapic is software-enabled");
+			if (vlapic_periodic_timer(vlapic))
+				vlapic_icrtmr_write_handler(vlapic);
+		}
+	}
+}
+
+int
+vlapic_read(struct vlapic *vlapic, int mmio_access, uint64_t offset,
+    uint64_t *data, bool *retu)
+{
+	struct LAPIC	*lapic = vlapic->apic_page;
+	uint32_t	*reg;
+	int		 i;
+
+	/* Ignore MMIO accesses in x2APIC mode */
+	if (x2apic(vlapic) && mmio_access) {
+		VLAPIC_CTR1(vlapic, "MMIO read from offset %#lx in x2APIC mode",
+		    offset);
+		*data = 0;
+		goto done;
+	}
+
+	if (!x2apic(vlapic) && !mmio_access) {
+		/*
+		 * XXX Generate GP fault for MSR accesses in xAPIC mode
+		 */
+		VLAPIC_CTR1(vlapic, "x2APIC MSR read from offset %#lx in "
+		    "xAPIC mode", offset);
+		*data = 0;
+		goto done;
+	}
+
+	if (offset > sizeof(*lapic)) {
+		*data = 0;
+		goto done;
+	}
+	
+	offset &= ~3;
+	switch(offset)
+	{
+		case APIC_OFFSET_ID:
+			*data = lapic->id;
+			break;
+		case APIC_OFFSET_VER:
+			*data = lapic->version;
+			break;
+		case APIC_OFFSET_TPR:
+			*data = vlapic_get_tpr(vlapic);
+			break;
+		case APIC_OFFSET_APR:
+			*data = lapic->apr;
+			break;
+		case APIC_OFFSET_PPR:
+			*data = lapic->ppr;
+			break;
+		case APIC_OFFSET_EOI:
+			*data = lapic->eoi;
+			break;
+		case APIC_OFFSET_LDR:
+			*data = lapic->ldr;
+			break;
+		case APIC_OFFSET_DFR:
+			*data = lapic->dfr;
+			break;
+		case APIC_OFFSET_SVR:
+			*data = lapic->svr;
+			break;
+		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
+			i = (offset - APIC_OFFSET_ISR0) >> 2;
+			reg = &lapic->isr0;
+			*data = *(reg + i);
+			break;
+		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
+			i = (offset - APIC_OFFSET_TMR0) >> 2;
+			reg = &lapic->tmr0;
+			*data = *(reg + i);
+			break;
+		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
+			i = (offset - APIC_OFFSET_IRR0) >> 2;
+			reg = &lapic->irr0;
+			*data = atomic_load_acq_int(reg + i);
+			break;
+		case APIC_OFFSET_ESR:
+			*data = lapic->esr;
+			break;
+		case APIC_OFFSET_ICR_LOW: 
+			*data = lapic->icr_lo;
+			if (x2apic(vlapic))
+				*data |= (uint64_t)lapic->icr_hi << 32;
+			break;
+		case APIC_OFFSET_ICR_HI: 
+			*data = lapic->icr_hi;
+			break;
+		case APIC_OFFSET_CMCI_LVT:
+		case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
+			*data = vlapic_get_lvt(vlapic, offset);	
+#ifdef INVARIANTS
+			reg = vlapic_get_lvtptr(vlapic, offset);
+			KASSERT(*data == *reg, ("inconsistent lvt value at "
+			    "offset %#lx: %#lx/%#x", offset, *data, *reg));
+#endif
+			break;
+		case APIC_OFFSET_TIMER_ICR:
+			*data = lapic->icr_timer;
+			break;
+		case APIC_OFFSET_TIMER_CCR:
+			*data = vlapic_get_ccr(vlapic);
+			break;
+		case APIC_OFFSET_TIMER_DCR:
+			*data = lapic->dcr_timer;
+			break;
+		case APIC_OFFSET_SELF_IPI:
+			/*
+			 * XXX generate a GP fault if vlapic is in x2apic mode
+			 */
+			*data = 0;
+			break;
+		case APIC_OFFSET_RRR:
+		default:
+			*data = 0;
+			break;
+	}
+done:
+	VLAPIC_CTR2(vlapic, "vlapic read offset %#x, data %#lx", offset, *data);
+	return 0;
+}
+
+int
+vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
+    uint64_t data, bool *retu)
+{
+	struct LAPIC	*lapic = vlapic->apic_page;
+	uint32_t	*regptr;
+	int		retval;
+
+	KASSERT((offset & 0xf) == 0 && offset < PAGE_SIZE,
+	    ("vlapic_write: invalid offset %#lx", offset));
+
+	VLAPIC_CTR2(vlapic, "vlapic write offset %#lx, data %#lx",
+	    offset, data);
+
+	if (offset > sizeof(*lapic))
+		return (0);
+
+	/* Ignore MMIO accesses in x2APIC mode */
+	if (x2apic(vlapic) && mmio_access) {
+		VLAPIC_CTR2(vlapic, "MMIO write of %#lx to offset %#lx "
+		    "in x2APIC mode", data, offset);
+		return (0);
+	}
+
+	/*
+	 * XXX Generate GP fault for MSR accesses in xAPIC mode
+	 */
+	if (!x2apic(vlapic) && !mmio_access) {
+		VLAPIC_CTR2(vlapic, "x2APIC MSR write of %#lx to offset %#lx "
+		    "in xAPIC mode", data, offset);
+		return (0);
+	}
+
+	retval = 0;
+	switch(offset)
+	{
+		case APIC_OFFSET_ID:
+			lapic->id = data;
+			vlapic_id_write_handler(vlapic);
+			break;
+		case APIC_OFFSET_TPR:
+			vlapic_set_tpr(vlapic, data & 0xff);
+			break;
+		case APIC_OFFSET_EOI:
+			vlapic_process_eoi(vlapic);
+			break;
+		case APIC_OFFSET_LDR:
+			lapic->ldr = data;
+			vlapic_ldr_write_handler(vlapic);
+			break;
+		case APIC_OFFSET_DFR:
+			lapic->dfr = data;
+			vlapic_dfr_write_handler(vlapic);
+			break;
+		case APIC_OFFSET_SVR:
+			lapic->svr = data;
+			vlapic_svr_write_handler(vlapic);
+			break;
+		case APIC_OFFSET_ICR_LOW: 
+			lapic->icr_lo = data;
+			if (x2apic(vlapic))
+				lapic->icr_hi = data >> 32;
+			retval = vlapic_icrlo_write_handler(vlapic, retu);
+			break;
+		case APIC_OFFSET_ICR_HI:
+			lapic->icr_hi = data;
+			break;
+		case APIC_OFFSET_CMCI_LVT:
+		case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
+			regptr = vlapic_get_lvtptr(vlapic, offset);
+			*regptr = data;
+			vlapic_lvt_write_handler(vlapic, offset);
+			break;
+		case APIC_OFFSET_TIMER_ICR:
+			lapic->icr_timer = data;
+			vlapic_icrtmr_write_handler(vlapic);
+			break;
+
+		case APIC_OFFSET_TIMER_DCR:
+			lapic->dcr_timer = data;
+			vlapic_dcr_write_handler(vlapic);
+			break;
+
+		case APIC_OFFSET_ESR:
+			vlapic_esr_write_handler(vlapic);
+			break;
+
+		case APIC_OFFSET_SELF_IPI:
+			if (x2apic(vlapic))
+				vlapic_self_ipi_handler(vlapic, data);
+			break;
+
+		case APIC_OFFSET_VER:
+		case APIC_OFFSET_APR:
+		case APIC_OFFSET_PPR:
+		case APIC_OFFSET_RRR:
+		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
+		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
+		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
+		case APIC_OFFSET_TIMER_CCR:
+		default:
+			// Read only.
+			break;
+	}
+
+	return (retval);
+}
+
+static void
+vlapic_reset(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic;
+	
+	lapic = vlapic->apic_page;
+	bzero(lapic, sizeof(struct LAPIC));
+
+	lapic->id = vlapic_get_id(vlapic);
+	lapic->version = VLAPIC_VERSION;
+	lapic->version |= (VLAPIC_MAXLVT_INDEX << MAXLVTSHIFT);
+	lapic->dfr = 0xffffffff;
+	lapic->svr = APIC_SVR_VECTOR;
+	vlapic_mask_lvts(vlapic);
+	vlapic_reset_tmr(vlapic);
+
+	lapic->dcr_timer = 0;
+	vlapic_dcr_write_handler(vlapic);
+
+	if (vlapic->vcpuid == 0)
+		vlapic->boot_state = BS_RUNNING;	/* BSP */
+	else
+		vlapic->boot_state = BS_INIT;		/* AP */
+
+	vlapic->svr_last = lapic->svr;
+}
+
+void
+vlapic_init(struct vlapic *vlapic)
+{
+	KASSERT(vlapic->vm != NULL, ("vlapic_init: vm is not initialized"));
+	KASSERT(vlapic->vcpuid >= 0 && vlapic->vcpuid < VM_MAXCPU,
+	    ("vlapic_init: vcpuid is not initialized"));
+	KASSERT(vlapic->apic_page != NULL, ("vlapic_init: apic_page is not "
+	    "initialized"));
+
+	/*
+	 * If the vlapic is configured in x2apic mode then it will be
+	 * accessed in the critical section via the MSR emulation code.
+	 *
+	 * Therefore the timer mutex must be a spinlock because blockable
+	 * mutexes cannot be acquired in a critical section.
+	 */
+	mtx_init(&vlapic->timer_mtx, "vlapic timer mtx", NULL, MTX_SPIN);
+	callout_init(&vlapic->callout, 1);
+
+	vlapic->msr_apicbase = DEFAULT_APIC_BASE | APICBASE_ENABLED;
+
+	if (vlapic->vcpuid == 0)
+		vlapic->msr_apicbase |= APICBASE_BSP;
+
+	vlapic_reset(vlapic);
+}
+
+void
+vlapic_cleanup(struct vlapic *vlapic)
+{
+
+	callout_drain(&vlapic->callout);
+}
+
+uint64_t
+vlapic_get_apicbase(struct vlapic *vlapic)
+{
+
+	return (vlapic->msr_apicbase);
+}
+
+int
+vlapic_set_apicbase(struct vlapic *vlapic, uint64_t new)
+{
+
+	if (vlapic->msr_apicbase != new) {
+		VLAPIC_CTR2(vlapic, "Changing APIC_BASE MSR from %#lx to %#lx "
+		    "not supported", vlapic->msr_apicbase, new);
+		return (-1);
+	}
+
+	return (0);
+}
+
+void
+vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
+{
+	struct vlapic *vlapic;
+	struct LAPIC *lapic;
+
+	vlapic = vm_lapic(vm, vcpuid);
+
+	if (state == X2APIC_DISABLED)
+		vlapic->msr_apicbase &= ~APICBASE_X2APIC;
+	else
+		vlapic->msr_apicbase |= APICBASE_X2APIC;
+
+	/*
+	 * Reset the local APIC registers whose values are mode-dependent.
+	 *
+	 * XXX this works because the APIC mode can be changed only at vcpu
+	 * initialization time.
+	 */
+	lapic = vlapic->apic_page;
+	lapic->id = vlapic_get_id(vlapic);
+	if (x2apic(vlapic)) {
+		lapic->ldr = x2apic_ldr(vlapic);
+		lapic->dfr = 0;
+	} else {
+		lapic->ldr = 0;
+		lapic->dfr = 0xffffffff;
+	}
+
+	if (state == X2APIC_ENABLED) {
+		if (vlapic->ops.enable_x2apic_mode)
+			(*vlapic->ops.enable_x2apic_mode)(vlapic);
+	}
+}
+
+void
+vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
+    int delmode, int vec)
+{
+	bool lowprio;
+	int vcpuid;
+	cpuset_t dmask;
+
+	if (delmode != IOART_DELFIXED &&
+	    delmode != IOART_DELLOPRI &&
+	    delmode != IOART_DELEXINT) {
+		VM_CTR1(vm, "vlapic intr invalid delmode %#x", delmode);
+		return;
+	}
+	lowprio = (delmode == IOART_DELLOPRI);
+
+	/*
+	 * We don't provide any virtual interrupt redirection hardware so
+	 * all interrupts originating from the ioapic or MSI specify the
+	 * 'dest' in the legacy xAPIC format.
+	 */
+	vlapic_calcdest(vm, &dmask, dest, phys, lowprio, false);
+
+	while ((vcpuid = CPU_FFS(&dmask)) != 0) {
+		vcpuid--;
+		CPU_CLR(vcpuid, &dmask);
+		if (delmode == IOART_DELEXINT) {
+			vm_inject_extint(vm, vcpuid);
+		} else {
+			lapic_set_intr(vm, vcpuid, vec, level);
+		}
+	}
+}
+
+void
+vlapic_post_intr(struct vlapic *vlapic, int hostcpu, int ipinum)
+{
+	/*
+	 * Post an interrupt to the vcpu currently running on 'hostcpu'.
+	 *
+	 * This is done by leveraging features like Posted Interrupts (Intel)
+	 * Doorbell MSR (AMD AVIC) that avoid a VM exit.
+	 *
+	 * If neither of these features are available then fallback to
+	 * sending an IPI to 'hostcpu'.
+	 */
+	if (vlapic->ops.post_intr)
+		(*vlapic->ops.post_intr)(vlapic, hostcpu);
+	else
+		ipi_cpu(hostcpu, ipinum);
+}
+
+bool
+vlapic_enabled(struct vlapic *vlapic)
+{
+	struct LAPIC *lapic = vlapic->apic_page;
+
+	if ((vlapic->msr_apicbase & APICBASE_ENABLED) != 0 &&
+	    (lapic->svr & APIC_SVR_ENABLE) != 0)
+		return (true);
+	else
+		return (false);
+}
+
+static void
+vlapic_set_tmr(struct vlapic *vlapic, int vector, bool level)
+{
+	struct LAPIC *lapic;
+	uint32_t *tmrptr, mask;
+	int idx;
+
+	lapic = vlapic->apic_page;
+	tmrptr = &lapic->tmr0;
+	idx = (vector / 32) * 4;
+	mask = 1 << (vector % 32);
+	if (level)
+		tmrptr[idx] |= mask;
+	else
+		tmrptr[idx] &= ~mask;
+
+	if (vlapic->ops.set_tmr != NULL)
+		(*vlapic->ops.set_tmr)(vlapic, vector, level);
+}
+
+void
+vlapic_reset_tmr(struct vlapic *vlapic)
+{
+	int vector;
+
+	VLAPIC_CTR0(vlapic, "vlapic resetting all vectors to edge-triggered");
+
+	for (vector = 0; vector <= 255; vector++)
+		vlapic_set_tmr(vlapic, vector, false);
+}
+
+void
+vlapic_set_tmr_level(struct vlapic *vlapic, uint32_t dest, bool phys,
+    int delmode, int vector)
+{
+	cpuset_t dmask;
+	bool lowprio;
+
+	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
+
+	/*
+	 * A level trigger is valid only for fixed and lowprio delivery modes.
+	 */
+	if (delmode != APIC_DELMODE_FIXED && delmode != APIC_DELMODE_LOWPRIO) {
+		VLAPIC_CTR1(vlapic, "Ignoring level trigger-mode for "
+		    "delivery-mode %d", delmode);
+		return;
+	}
+
+	lowprio = (delmode == APIC_DELMODE_LOWPRIO);
+	vlapic_calcdest(vlapic->vm, &dmask, dest, phys, lowprio, false);
+
+	if (!CPU_ISSET(vlapic->vcpuid, &dmask))
+		return;
+
+	VLAPIC_CTR1(vlapic, "vector %d set to level-triggered", vector);
+	vlapic_set_tmr(vlapic, vector, true);
+}


Property changes on: trunk/sys/amd64/vmm/io/vlapic.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vlapic.h
===================================================================
--- trunk/sys/amd64/vmm/io/vlapic.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vlapic.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,110 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vlapic.h 270073 2014-08-17 01:16:40Z grehan $
+ */
+
+#ifndef _VLAPIC_H_
+#define	_VLAPIC_H_
+
+struct vm;
+enum x2apic_state;
+
+int vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
+    uint64_t data, bool *retu);
+int vlapic_read(struct vlapic *vlapic, int mmio_access, uint64_t offset,
+    uint64_t *data, bool *retu);
+
+/*
+ * Returns 0 if there is no eligible vector that can be delivered to the
+ * guest at this time and non-zero otherwise.
+ *
+ * If an eligible vector number is found and 'vecptr' is not NULL then it will
+ * be stored in the location pointed to by 'vecptr'.
+ *
+ * Note that the vector does not automatically transition to the ISR as a
+ * result of calling this function.
+ */
+int vlapic_pending_intr(struct vlapic *vlapic, int *vecptr);
+
+/*
+ * Transition 'vector' from IRR to ISR. This function is called with the
+ * vector returned by 'vlapic_pending_intr()' when the guest is able to
+ * accept this interrupt (i.e. RFLAGS.IF = 1 and no conditions exist that
+ * block interrupt delivery).
+ */
+void vlapic_intr_accepted(struct vlapic *vlapic, int vector);
+
+/*
+ * Returns 1 if the vcpu needs to be notified of the interrupt and 0 otherwise.
+ */
+int vlapic_set_intr_ready(struct vlapic *vlapic, int vector, bool level);
+
+/*
+ * Post an interrupt to the vcpu running on 'hostcpu'. This will use a
+ * hardware assist if available (e.g. Posted Interrupt) or fall back to
+ * sending an 'ipinum' to interrupt the 'hostcpu'.
+ */
+void vlapic_post_intr(struct vlapic *vlapic, int hostcpu, int ipinum);
+
+void vlapic_set_error(struct vlapic *vlapic, uint32_t mask);
+void vlapic_fire_cmci(struct vlapic *vlapic);
+int vlapic_trigger_lvt(struct vlapic *vlapic, int vector);
+
+uint64_t vlapic_get_apicbase(struct vlapic *vlapic);
+int vlapic_set_apicbase(struct vlapic *vlapic, uint64_t val);
+void vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state s);
+bool vlapic_enabled(struct vlapic *vlapic);
+
+void vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
+    int delmode, int vec);
+
+/* Reset the trigger-mode bits for all vectors to be edge-triggered */
+void vlapic_reset_tmr(struct vlapic *vlapic);
+
+/*
+ * Set the trigger-mode bit associated with 'vector' to level-triggered if
+ * the (dest,phys,delmode) tuple resolves to an interrupt being delivered to
+ * this 'vlapic'.
+ */
+void vlapic_set_tmr_level(struct vlapic *vlapic, uint32_t dest, bool phys,
+    int delmode, int vector);
+
+void vlapic_set_cr8(struct vlapic *vlapic, uint64_t val);
+uint64_t vlapic_get_cr8(struct vlapic *vlapic);
+
+/* APIC write handlers */
+void vlapic_id_write_handler(struct vlapic *vlapic);
+void vlapic_ldr_write_handler(struct vlapic *vlapic);
+void vlapic_dfr_write_handler(struct vlapic *vlapic);
+void vlapic_svr_write_handler(struct vlapic *vlapic);
+void vlapic_esr_write_handler(struct vlapic *vlapic);
+int vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu);
+void vlapic_icrtmr_write_handler(struct vlapic *vlapic);
+void vlapic_dcr_write_handler(struct vlapic *vlapic);
+void vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset);
+void vlapic_self_ipi_handler(struct vlapic *vlapic, uint64_t val);
+#endif	/* _VLAPIC_H_ */


Property changes on: trunk/sys/amd64/vmm/io/vlapic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vlapic_priv.h
===================================================================
--- trunk/sys/amd64/vmm/io/vlapic_priv.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vlapic_priv.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,191 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Neel Natu <neel at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vlapic_priv.h 267447 2014-06-13 19:10:40Z jhb $
+ */
+
+#ifndef _VLAPIC_PRIV_H_
+#define	_VLAPIC_PRIV_H_
+
+#include <x86/apicreg.h>
+
+/*
+ * APIC Register:		Offset	   Description
+ */
+#define APIC_OFFSET_ID		0x20	/* Local APIC ID		*/
+#define APIC_OFFSET_VER		0x30	/* Local APIC Version		*/
+#define APIC_OFFSET_TPR		0x80	/* Task Priority Register	*/
+#define APIC_OFFSET_APR		0x90	/* Arbitration Priority		*/
+#define APIC_OFFSET_PPR		0xA0	/* Processor Priority Register	*/
+#define APIC_OFFSET_EOI		0xB0	/* EOI Register			*/
+#define APIC_OFFSET_RRR		0xC0	/* Remote read			*/
+#define APIC_OFFSET_LDR		0xD0	/* Logical Destination		*/
+#define APIC_OFFSET_DFR		0xE0	/* Destination Format Register	*/
+#define APIC_OFFSET_SVR		0xF0	/* Spurious Vector Register	*/
+#define APIC_OFFSET_ISR0	0x100	/* In Service Register		*/
+#define APIC_OFFSET_ISR1	0x110
+#define APIC_OFFSET_ISR2	0x120
+#define APIC_OFFSET_ISR3	0x130
+#define APIC_OFFSET_ISR4	0x140
+#define APIC_OFFSET_ISR5	0x150
+#define APIC_OFFSET_ISR6	0x160
+#define APIC_OFFSET_ISR7	0x170
+#define APIC_OFFSET_TMR0	0x180	/* Trigger Mode Register	*/
+#define APIC_OFFSET_TMR1	0x190
+#define APIC_OFFSET_TMR2	0x1A0
+#define APIC_OFFSET_TMR3	0x1B0
+#define APIC_OFFSET_TMR4	0x1C0
+#define APIC_OFFSET_TMR5	0x1D0
+#define APIC_OFFSET_TMR6	0x1E0
+#define APIC_OFFSET_TMR7	0x1F0
+#define APIC_OFFSET_IRR0	0x200	/* Interrupt Request Register	*/
+#define APIC_OFFSET_IRR1	0x210
+#define APIC_OFFSET_IRR2	0x220
+#define APIC_OFFSET_IRR3	0x230
+#define APIC_OFFSET_IRR4	0x240
+#define APIC_OFFSET_IRR5	0x250
+#define APIC_OFFSET_IRR6	0x260
+#define APIC_OFFSET_IRR7	0x270
+#define APIC_OFFSET_ESR		0x280	/* Error Status Register	*/
+#define APIC_OFFSET_CMCI_LVT	0x2F0	/* Local Vector Table (CMCI)	*/
+#define APIC_OFFSET_ICR_LOW	0x300	/* Interrupt Command Register	*/
+#define APIC_OFFSET_ICR_HI	0x310
+#define APIC_OFFSET_TIMER_LVT	0x320	/* Local Vector Table (Timer)	*/
+#define APIC_OFFSET_THERM_LVT	0x330	/* Local Vector Table (Thermal)	*/
+#define APIC_OFFSET_PERF_LVT	0x340	/* Local Vector Table (PMC)	*/
+#define APIC_OFFSET_LINT0_LVT	0x350	/* Local Vector Table (LINT0)	*/
+#define APIC_OFFSET_LINT1_LVT	0x360	/* Local Vector Table (LINT1)	*/
+#define APIC_OFFSET_ERROR_LVT	0x370	/* Local Vector Table (ERROR)	*/
+#define APIC_OFFSET_TIMER_ICR	0x380	/* Timer's Initial Count	*/
+#define APIC_OFFSET_TIMER_CCR	0x390	/* Timer's Current Count	*/
+#define APIC_OFFSET_TIMER_DCR	0x3E0	/* Timer's Divide Configuration	*/
+#define	APIC_OFFSET_SELF_IPI	0x3F0	/* Self IPI register */
+
+#define	VLAPIC_CTR0(vlapic, format)					\
+	VCPU_CTR0((vlapic)->vm, (vlapic)->vcpuid, format)
+
+#define	VLAPIC_CTR1(vlapic, format, p1)					\
+	VCPU_CTR1((vlapic)->vm, (vlapic)->vcpuid, format, p1)
+
+#define	VLAPIC_CTR2(vlapic, format, p1, p2)				\
+	VCPU_CTR2((vlapic)->vm, (vlapic)->vcpuid, format, p1, p2)
+
+#define	VLAPIC_CTR3(vlapic, format, p1, p2, p3)				\
+	VCPU_CTR3((vlapic)->vm, (vlapic)->vcpuid, format, p1, p2, p3)
+
+#define	VLAPIC_CTR_IRR(vlapic, msg)					\
+do {									\
+	uint32_t *irrptr = &(vlapic)->apic_page->irr0;			\
+	irrptr[0] = irrptr[0];	/* silence compiler */			\
+	VLAPIC_CTR1((vlapic), msg " irr0 0x%08x", irrptr[0 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " irr1 0x%08x", irrptr[1 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " irr2 0x%08x", irrptr[2 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " irr3 0x%08x", irrptr[3 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " irr4 0x%08x", irrptr[4 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " irr5 0x%08x", irrptr[5 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " irr6 0x%08x", irrptr[6 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " irr7 0x%08x", irrptr[7 << 2]);	\
+} while (0)
+
+#define	VLAPIC_CTR_ISR(vlapic, msg)					\
+do {									\
+	uint32_t *isrptr = &(vlapic)->apic_page->isr0;			\
+	isrptr[0] = isrptr[0];	/* silence compiler */			\
+	VLAPIC_CTR1((vlapic), msg " isr0 0x%08x", isrptr[0 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " isr1 0x%08x", isrptr[1 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " isr2 0x%08x", isrptr[2 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " isr3 0x%08x", isrptr[3 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " isr4 0x%08x", isrptr[4 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " isr5 0x%08x", isrptr[5 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " isr6 0x%08x", isrptr[6 << 2]);	\
+	VLAPIC_CTR1((vlapic), msg " isr7 0x%08x", isrptr[7 << 2]);	\
+} while (0)
+
+enum boot_state {
+	BS_INIT,
+	BS_SIPI,
+	BS_RUNNING
+};
+
+/*
+ * 16 priority levels with at most one vector injected per level.
+ */
+#define	ISRVEC_STK_SIZE		(16 + 1)
+
+#define VLAPIC_MAXLVT_INDEX	APIC_LVT_CMCI
+
+struct vlapic;
+
+struct vlapic_ops {
+	int (*set_intr_ready)(struct vlapic *vlapic, int vector, bool level);
+	int (*pending_intr)(struct vlapic *vlapic, int *vecptr);
+	void (*intr_accepted)(struct vlapic *vlapic, int vector);
+	void (*post_intr)(struct vlapic *vlapic, int hostcpu);
+	void (*set_tmr)(struct vlapic *vlapic, int vector, bool level);
+	void (*enable_x2apic_mode)(struct vlapic *vlapic);
+};
+
+struct vlapic {
+	struct vm		*vm;
+	int			vcpuid;
+	struct LAPIC		*apic_page;
+	struct vlapic_ops	ops;
+
+	uint32_t		esr_pending;
+	int			esr_firing;
+
+	struct callout	callout;	/* vlapic timer */
+	struct bintime	timer_fire_bt;	/* callout expiry time */
+	struct bintime	timer_freq_bt;	/* timer frequency */
+	struct bintime	timer_period_bt; /* timer period */
+	struct mtx	timer_mtx;
+
+	/*
+	 * The 'isrvec_stk' is a stack of vectors injected by the local apic.
+	 * A vector is popped from the stack when the processor does an EOI.
+	 * The vector on the top of the stack is used to compute the
+	 * Processor Priority in conjunction with the TPR.
+	 */
+	uint8_t		isrvec_stk[ISRVEC_STK_SIZE];
+	int		isrvec_stk_top;
+
+	uint64_t	msr_apicbase;
+	enum boot_state	boot_state;
+
+	/*
+	 * Copies of some registers in the virtual APIC page. We do this for
+	 * a couple of different reasons:
+	 * - to be able to detect what changed (e.g. svr_last)
+	 * - to maintain a coherent snapshot of the register (e.g. lvt_last)
+	 */
+	uint32_t	svr_last;
+	uint32_t	lvt_last[VLAPIC_MAXLVT_INDEX + 1];
+};
+
+void vlapic_init(struct vlapic *vlapic);
+void vlapic_cleanup(struct vlapic *vlapic);
+
+#endif	/* _VLAPIC_PRIV_H_ */


Property changes on: trunk/sys/amd64/vmm/io/vlapic_priv.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vpmtmr.c
===================================================================
--- trunk/sys/amd64/vmm/io/vpmtmr.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vpmtmr.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,104 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014, Neel Natu (neel at freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/vpmtmr.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+
+#include <machine/vmm.h>
+
+#include "vpmtmr.h"
+
+/*
+ * The ACPI Power Management timer is a free-running 24- or 32-bit
+ * timer with a frequency of 3.579545MHz
+ *
+ * This implementation will be 32-bits
+ */
+
+#define PMTMR_FREQ	3579545  /* 3.579545MHz */
+
+struct vpmtmr {
+	sbintime_t	freq_sbt;
+	sbintime_t	baseuptime;
+	uint32_t	baseval;
+};
+
+static MALLOC_DEFINE(M_VPMTMR, "vpmtmr", "bhyve virtual acpi timer");
+
+struct vpmtmr *
+vpmtmr_init(struct vm *vm)
+{
+	struct vpmtmr *vpmtmr;
+	struct bintime bt;
+
+	vpmtmr = malloc(sizeof(struct vpmtmr), M_VPMTMR, M_WAITOK | M_ZERO);
+	vpmtmr->baseuptime = sbinuptime();
+	vpmtmr->baseval = 0;
+
+	FREQ2BT(PMTMR_FREQ, &bt);
+	vpmtmr->freq_sbt = bttosbt(bt);
+
+	return (vpmtmr);
+}
+
+void
+vpmtmr_cleanup(struct vpmtmr *vpmtmr)
+{
+
+	free(vpmtmr, M_VPMTMR);
+}
+
+int
+vpmtmr_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *val)
+{
+	struct vpmtmr *vpmtmr;
+	sbintime_t now, delta;
+
+	if (!in || bytes != 4)
+		return (-1);
+
+	vpmtmr = vm_pmtmr(vm);
+
+	/*
+	 * No locking needed because 'baseuptime' and 'baseval' are
+	 * written only during initialization.
+	 */
+	now = sbinuptime();
+	delta = now - vpmtmr->baseuptime;
+	KASSERT(delta >= 0, ("vpmtmr_handler: uptime went backwards: "
+	    "%#lx to %#lx", vpmtmr->baseuptime, now));
+	*val = vpmtmr->baseval + delta / vpmtmr->freq_sbt;
+
+	return (0);
+}


Property changes on: trunk/sys/amd64/vmm/io/vpmtmr.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vpmtmr.h
===================================================================
--- trunk/sys/amd64/vmm/io/vpmtmr.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vpmtmr.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,43 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Neel Natu (neel at freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vpmtmr.h 276429 2014-12-30 22:22:46Z neel $
+ */
+
+#ifndef _VPMTMR_H_
+#define	_VPMTMR_H_
+
+#define	IO_PMTMR 0x408
+
+struct vpmtmr;
+
+struct vpmtmr *vpmtmr_init(struct vm *vm);
+void vpmtmr_cleanup(struct vpmtmr *pmtmr);
+
+int vpmtmr_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *val);
+
+#endif


Property changes on: trunk/sys/amd64/vmm/io/vpmtmr.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vrtc.c
===================================================================
--- trunk/sys/amd64/vmm/io/vrtc.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vrtc.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,1020 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014, Neel Natu (neel at freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/vrtc.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/queue.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/clock.h>
+#include <sys/sysctl.h>
+
+#include <machine/vmm.h>
+
+#include <isa/rtc.h>
+
+#include "vmm_ktr.h"
+#include "vatpic.h"
+#include "vioapic.h"
+#include "vrtc.h"
+
+/* Register layout of the RTC */
+struct rtcdev {
+	uint8_t	sec;
+	uint8_t	alarm_sec;
+	uint8_t	min;
+	uint8_t	alarm_min;
+	uint8_t	hour;
+	uint8_t	alarm_hour;
+	uint8_t	day_of_week;
+	uint8_t	day_of_month;
+	uint8_t	month;
+	uint8_t	year;
+	uint8_t	reg_a;
+	uint8_t	reg_b;
+	uint8_t	reg_c;
+	uint8_t	reg_d;
+	uint8_t	nvram[36];
+	uint8_t	century;
+	uint8_t	nvram2[128 - 51];
+} __packed;
+CTASSERT(sizeof(struct rtcdev) == 128);
+CTASSERT(offsetof(struct rtcdev, century) == RTC_CENTURY);
+
+struct vrtc {
+	struct vm	*vm;
+	struct mtx	mtx;
+	struct callout	callout;
+	u_int		addr;		/* RTC register to read or write */
+	sbintime_t	base_uptime;
+	time_t		base_rtctime;
+	struct rtcdev	rtcdev;
+};
+
+#define	VRTC_LOCK(vrtc)		mtx_lock(&((vrtc)->mtx))
+#define	VRTC_UNLOCK(vrtc)	mtx_unlock(&((vrtc)->mtx))
+#define	VRTC_LOCKED(vrtc)	mtx_owned(&((vrtc)->mtx))
+
+/*
+ * RTC time is considered "broken" if:
+ * - RTC updates are halted by the guest
+ * - RTC date/time fields have invalid values
+ */
+#define	VRTC_BROKEN_TIME	((time_t)-1)
+
+#define	RTC_IRQ			8
+#define	RTCSB_BIN		0x04
+#define	RTCSB_ALL_INTRS		(RTCSB_UINTR | RTCSB_AINTR | RTCSB_PINTR)
+#define	rtc_halted(vrtc)	((vrtc->rtcdev.reg_b & RTCSB_HALT) != 0)
+#define	aintr_enabled(vrtc)	(((vrtc)->rtcdev.reg_b & RTCSB_AINTR) != 0)
+#define	pintr_enabled(vrtc)	(((vrtc)->rtcdev.reg_b & RTCSB_PINTR) != 0)
+#define	uintr_enabled(vrtc)	(((vrtc)->rtcdev.reg_b & RTCSB_UINTR) != 0)
+
+static void vrtc_callout_handler(void *arg);
+static void vrtc_set_reg_c(struct vrtc *vrtc, uint8_t newval);
+
+static MALLOC_DEFINE(M_VRTC, "vrtc", "bhyve virtual rtc");
+
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, vrtc, CTLFLAG_RW, NULL, NULL);
+
+static int rtc_flag_broken_time = 1;
+SYSCTL_INT(_hw_vmm_vrtc, OID_AUTO, flag_broken_time, CTLFLAG_RDTUN,
+    &rtc_flag_broken_time, 0, "Stop guest when invalid RTC time is detected");
+
+static __inline bool
+divider_enabled(int reg_a)
+{
+	/*
+	 * The RTC is counting only when dividers are not held in reset.
+	 */
+	return ((reg_a & 0x70) == 0x20);
+}
+
+static __inline bool
+update_enabled(struct vrtc *vrtc)
+{
+	/*
+	 * RTC date/time can be updated only if:
+	 * - divider is not held in reset
+	 * - guest has not disabled updates
+	 * - the date/time fields have valid contents
+	 */
+	if (!divider_enabled(vrtc->rtcdev.reg_a))
+		return (false);
+
+	if (rtc_halted(vrtc))
+		return (false);
+
+	if (vrtc->base_rtctime == VRTC_BROKEN_TIME)
+		return (false);
+
+	return (true);
+}
+
+static time_t
+vrtc_curtime(struct vrtc *vrtc, sbintime_t *basetime)
+{
+	sbintime_t now, delta;
+	time_t t, secs;
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	t = vrtc->base_rtctime;
+	*basetime = vrtc->base_uptime;
+	if (update_enabled(vrtc)) {
+		now = sbinuptime();
+		delta = now - vrtc->base_uptime;
+		KASSERT(delta >= 0, ("vrtc_curtime: uptime went backwards: "
+		    "%#lx to %#lx", vrtc->base_uptime, now));
+		secs = delta / SBT_1S;
+		t += secs;
+		*basetime += secs * SBT_1S;
+	}
+	return (t);
+}
+
+static __inline uint8_t
+rtcset(struct rtcdev *rtc, int val)
+{
+
+	KASSERT(val >= 0 && val < 100, ("%s: invalid bin2bcd index %d",
+	    __func__, val));
+
+	return ((rtc->reg_b & RTCSB_BIN) ? val : bin2bcd_data[val]);
+}
+
+static void
+secs_to_rtc(time_t rtctime, struct vrtc *vrtc, int force_update)
+{
+	struct clocktime ct;
+	struct timespec ts;
+	struct rtcdev *rtc;
+	int hour;
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	if (rtctime < 0) {
+		KASSERT(rtctime == VRTC_BROKEN_TIME,
+		    ("%s: invalid vrtc time %#lx", __func__, rtctime));
+		return;
+	}
+
+	/*
+	 * If the RTC is halted then the guest has "ownership" of the
+	 * date/time fields. Don't update the RTC date/time fields in
+	 * this case (unless forced).
+	 */
+	if (rtc_halted(vrtc) && !force_update)
+		return;
+
+	ts.tv_sec = rtctime;
+	ts.tv_nsec = 0;
+	clock_ts_to_ct(&ts, &ct);
+
+	KASSERT(ct.sec >= 0 && ct.sec <= 59, ("invalid clocktime sec %d",
+	    ct.sec));
+	KASSERT(ct.min >= 0 && ct.min <= 59, ("invalid clocktime min %d",
+	    ct.min));
+	KASSERT(ct.hour >= 0 && ct.hour <= 23, ("invalid clocktime hour %d",
+	    ct.hour));
+	KASSERT(ct.dow >= 0 && ct.dow <= 6, ("invalid clocktime wday %d",
+	    ct.dow));
+	KASSERT(ct.day >= 1 && ct.day <= 31, ("invalid clocktime mday %d",
+	    ct.day));
+	KASSERT(ct.mon >= 1 && ct.mon <= 12, ("invalid clocktime month %d",
+	    ct.mon));
+	KASSERT(ct.year >= POSIX_BASE_YEAR, ("invalid clocktime year %d",
+	    ct.year));
+
+	rtc = &vrtc->rtcdev;
+	rtc->sec = rtcset(rtc, ct.sec);
+	rtc->min = rtcset(rtc, ct.min);
+
+	if (rtc->reg_b & RTCSB_24HR) {
+		hour = ct.hour;
+	} else {
+		/*
+		 * Convert to the 12-hour format.
+		 */
+		switch (ct.hour) {
+		case 0:			/* 12 AM */
+		case 12:		/* 12 PM */
+			hour = 12;
+			break;
+		default:
+			/*
+			 * The remaining 'ct.hour' values are interpreted as:
+			 * [1  - 11] ->  1 - 11 AM
+			 * [13 - 23] ->  1 - 11 PM
+			 */
+			hour = ct.hour % 12;
+			break;
+		}
+	}
+
+	rtc->hour = rtcset(rtc, hour);
+
+	if ((rtc->reg_b & RTCSB_24HR) == 0 && ct.hour >= 12)
+		rtc->hour |= 0x80;	    /* set MSB to indicate PM */
+
+	rtc->day_of_week = rtcset(rtc, ct.dow + 1);
+	rtc->day_of_month = rtcset(rtc, ct.day);
+	rtc->month = rtcset(rtc, ct.mon);
+	rtc->year = rtcset(rtc, ct.year % 100);
+	rtc->century = rtcset(rtc, ct.year / 100);
+}
+
+static int
+rtcget(struct rtcdev *rtc, int val, int *retval)
+{
+	uint8_t upper, lower;
+
+	if (rtc->reg_b & RTCSB_BIN) {
+		*retval = val;
+		return (0);
+	}
+
+	lower = val & 0xf;
+	upper = (val >> 4) & 0xf;
+
+	if (lower > 9 || upper > 9)
+		return (-1);
+
+	*retval = upper * 10 + lower;
+	return (0);
+}
+
+static time_t
+rtc_to_secs(struct vrtc *vrtc)
+{
+	struct clocktime ct;
+	struct timespec ts;
+	struct rtcdev *rtc;
+	struct vm *vm;
+	int century, error, hour, pm, year;
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	vm = vrtc->vm;
+	rtc = &vrtc->rtcdev;
+
+	bzero(&ct, sizeof(struct clocktime));
+
+	error = rtcget(rtc, rtc->sec, &ct.sec);
+	if (error || ct.sec < 0 || ct.sec > 59) {
+		VM_CTR2(vm, "Invalid RTC sec %#x/%d", rtc->sec, ct.sec);
+		goto fail;
+	}
+
+	error = rtcget(rtc, rtc->min, &ct.min);
+	if (error || ct.min < 0 || ct.min > 59) {
+		VM_CTR2(vm, "Invalid RTC min %#x/%d", rtc->min, ct.min);
+		goto fail;
+	}
+
+	pm = 0;
+	hour = rtc->hour;
+	if ((rtc->reg_b & RTCSB_24HR) == 0) {
+		if (hour & 0x80) {
+			hour &= ~0x80;
+			pm = 1;
+		}
+	}
+	error = rtcget(rtc, hour, &ct.hour);
+	if ((rtc->reg_b & RTCSB_24HR) == 0) {
+		if (ct.hour >= 1 && ct.hour <= 12) {
+			/*
+			 * Convert from 12-hour format to internal 24-hour
+			 * representation as follows:
+			 *
+			 *    12-hour format		ct.hour
+			 *	12	AM		0
+			 *	1 - 11	AM		1 - 11
+			 *	12	PM		12
+			 *	1 - 11	PM		13 - 23
+			 */
+			if (ct.hour == 12)
+				ct.hour = 0;
+			if (pm)
+				ct.hour += 12;
+		} else {
+			VM_CTR2(vm, "Invalid RTC 12-hour format %#x/%d",
+			    rtc->hour, ct.hour);
+			goto fail;
+		}
+	}
+
+	if (error || ct.hour < 0 || ct.hour > 23) {
+		VM_CTR2(vm, "Invalid RTC hour %#x/%d", rtc->hour, ct.hour);
+		goto fail;
+	}
+
+	/*
+	 * Ignore 'rtc->dow' because some guests like Linux don't bother
+	 * setting it at all while others like OpenBSD/i386 set it incorrectly. 
+	 *
+	 * clock_ct_to_ts() does not depend on 'ct.dow' anyways so ignore it.
+	 */
+	ct.dow = -1;
+
+	error = rtcget(rtc, rtc->day_of_month, &ct.day);
+	if (error || ct.day < 1 || ct.day > 31) {
+		VM_CTR2(vm, "Invalid RTC mday %#x/%d", rtc->day_of_month,
+		    ct.day);
+		goto fail;
+	}
+
+	error = rtcget(rtc, rtc->month, &ct.mon);
+	if (error || ct.mon < 1 || ct.mon > 12) {
+		VM_CTR2(vm, "Invalid RTC month %#x/%d", rtc->month, ct.mon);
+		goto fail;
+	}
+
+	error = rtcget(rtc, rtc->year, &year);
+	if (error || year < 0 || year > 99) {
+		VM_CTR2(vm, "Invalid RTC year %#x/%d", rtc->year, year);
+		goto fail;
+	}
+
+	error = rtcget(rtc, rtc->century, &century);
+	ct.year = century * 100 + year;
+	if (error || ct.year < POSIX_BASE_YEAR) {
+		VM_CTR2(vm, "Invalid RTC century %#x/%d", rtc->century,
+		    ct.year);
+		goto fail;
+	}
+
+	error = clock_ct_to_ts(&ct, &ts);
+	if (error || ts.tv_sec < 0) {
+		VM_CTR3(vm, "Invalid RTC clocktime.date %04d-%02d-%02d",
+		    ct.year, ct.mon, ct.day);
+		VM_CTR3(vm, "Invalid RTC clocktime.time %02d:%02d:%02d",
+		    ct.hour, ct.min, ct.sec);
+		goto fail;
+	}
+	return (ts.tv_sec);		/* success */
+fail:
+	/*
+	 * Stop updating the RTC if the date/time fields programmed by
+	 * the guest are invalid.
+	 */
+	VM_CTR0(vrtc->vm, "Invalid RTC date/time programming detected");
+	return (VRTC_BROKEN_TIME);
+}
+
+static int
+vrtc_time_update(struct vrtc *vrtc, time_t newtime, sbintime_t newbase)
+{
+	struct rtcdev *rtc;
+	sbintime_t oldbase;
+	time_t oldtime;
+	uint8_t alarm_sec, alarm_min, alarm_hour;
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	rtc = &vrtc->rtcdev;
+	alarm_sec = rtc->alarm_sec;
+	alarm_min = rtc->alarm_min;
+	alarm_hour = rtc->alarm_hour;
+
+	oldtime = vrtc->base_rtctime;
+	VM_CTR2(vrtc->vm, "Updating RTC secs from %#lx to %#lx",
+	    oldtime, newtime);
+
+	oldbase = vrtc->base_uptime;
+	VM_CTR2(vrtc->vm, "Updating RTC base uptime from %#lx to %#lx",
+	    oldbase, newbase);
+	vrtc->base_uptime = newbase;
+
+	if (newtime == oldtime)
+		return (0);
+
+	/*
+	 * If 'newtime' indicates that RTC updates are disabled then just
+	 * record that and return. There is no need to do alarm interrupt
+	 * processing in this case.
+	 */
+	if (newtime == VRTC_BROKEN_TIME) {
+		vrtc->base_rtctime = VRTC_BROKEN_TIME;
+		return (0);
+	}
+
+	/*
+	 * Return an error if RTC updates are halted by the guest.
+	 */
+	if (rtc_halted(vrtc)) {
+		VM_CTR0(vrtc->vm, "RTC update halted by guest");
+		return (EBUSY);
+	}
+
+	do {
+		/*
+		 * If the alarm interrupt is enabled and 'oldtime' is valid
+		 * then visit all the seconds between 'oldtime' and 'newtime'
+		 * to check for the alarm condition.
+		 *
+		 * Otherwise move the RTC time forward directly to 'newtime'.
+		 */
+		if (aintr_enabled(vrtc) && oldtime != VRTC_BROKEN_TIME)
+			vrtc->base_rtctime++;
+		else
+			vrtc->base_rtctime = newtime;
+
+		if (aintr_enabled(vrtc)) {
+			/*
+			 * Update the RTC date/time fields before checking
+			 * if the alarm conditions are satisfied.
+			 */
+			secs_to_rtc(vrtc->base_rtctime, vrtc, 0);
+
+			if ((alarm_sec >= 0xC0 || alarm_sec == rtc->sec) &&
+			    (alarm_min >= 0xC0 || alarm_min == rtc->min) &&
+			    (alarm_hour >= 0xC0 || alarm_hour == rtc->hour)) {
+				vrtc_set_reg_c(vrtc, rtc->reg_c | RTCIR_ALARM);
+			}
+		}
+	} while (vrtc->base_rtctime != newtime);
+
+	if (uintr_enabled(vrtc))
+		vrtc_set_reg_c(vrtc, rtc->reg_c | RTCIR_UPDATE);
+
+	return (0);
+}
+
+static sbintime_t
+vrtc_freq(struct vrtc *vrtc)
+{
+	int ratesel;
+
+	static sbintime_t pf[16] = {
+		0,
+		SBT_1S / 256,
+		SBT_1S / 128,
+		SBT_1S / 8192,
+		SBT_1S / 4096,
+		SBT_1S / 2048,
+		SBT_1S / 1024,
+		SBT_1S / 512,
+		SBT_1S / 256,
+		SBT_1S / 128,
+		SBT_1S / 64,
+		SBT_1S / 32,
+		SBT_1S / 16,
+		SBT_1S / 8,
+		SBT_1S / 4,
+		SBT_1S / 2,
+	};
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	/*
+	 * If both periodic and alarm interrupts are enabled then use the
+	 * periodic frequency to drive the callout. The minimum periodic
+	 * frequency (2 Hz) is higher than the alarm frequency (1 Hz) so
+	 * piggyback the alarm on top of it. The same argument applies to
+	 * the update interrupt.
+	 */
+	if (pintr_enabled(vrtc) && divider_enabled(vrtc->rtcdev.reg_a)) {
+		ratesel = vrtc->rtcdev.reg_a & 0xf;
+		return (pf[ratesel]);
+	} else if (aintr_enabled(vrtc) && update_enabled(vrtc)) {
+		return (SBT_1S);
+	} else if (uintr_enabled(vrtc) && update_enabled(vrtc)) {
+		return (SBT_1S);
+	} else {
+		return (0);
+	}
+}
+
+static void
+vrtc_callout_reset(struct vrtc *vrtc, sbintime_t freqsbt)
+{
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	if (freqsbt == 0) {
+		if (callout_active(&vrtc->callout)) {
+			VM_CTR0(vrtc->vm, "RTC callout stopped");
+			callout_stop(&vrtc->callout);
+		}
+		return;
+	}
+	VM_CTR1(vrtc->vm, "RTC callout frequency %d hz", SBT_1S / freqsbt);
+	callout_reset_sbt(&vrtc->callout, freqsbt, 0, vrtc_callout_handler,
+	    vrtc, 0);
+}
+
+static void
+vrtc_callout_handler(void *arg)
+{
+	struct vrtc *vrtc = arg;
+	sbintime_t freqsbt, basetime;
+	time_t rtctime;
+	int error;
+
+	VM_CTR0(vrtc->vm, "vrtc callout fired");
+
+	VRTC_LOCK(vrtc);
+	if (callout_pending(&vrtc->callout))	/* callout was reset */
+		goto done;
+
+	if (!callout_active(&vrtc->callout))	/* callout was stopped */
+		goto done;
+
+	callout_deactivate(&vrtc->callout);
+
+	KASSERT((vrtc->rtcdev.reg_b & RTCSB_ALL_INTRS) != 0,
+	    ("gratuitous vrtc callout"));
+
+	if (pintr_enabled(vrtc))
+		vrtc_set_reg_c(vrtc, vrtc->rtcdev.reg_c | RTCIR_PERIOD);
+
+	if (aintr_enabled(vrtc) || uintr_enabled(vrtc)) {
+		rtctime = vrtc_curtime(vrtc, &basetime);
+		error = vrtc_time_update(vrtc, rtctime, basetime);
+		KASSERT(error == 0, ("%s: vrtc_time_update error %d",
+		    __func__, error));
+	}
+
+	freqsbt = vrtc_freq(vrtc);
+	KASSERT(freqsbt != 0, ("%s: vrtc frequency cannot be zero", __func__));
+	vrtc_callout_reset(vrtc, freqsbt);
+done:
+	VRTC_UNLOCK(vrtc);
+}
+
+static __inline void
+vrtc_callout_check(struct vrtc *vrtc, sbintime_t freq)
+{
+	int active;
+
+	active = callout_active(&vrtc->callout) ? 1 : 0;
+	KASSERT((freq == 0 && !active) || (freq != 0 && active),
+	    ("vrtc callout %s with frequency %#lx",
+	    active ? "active" : "inactive", freq));
+}
+
+static void
+vrtc_set_reg_c(struct vrtc *vrtc, uint8_t newval)
+{
+	struct rtcdev *rtc;
+	int oldirqf, newirqf;
+	uint8_t oldval, changed;
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	rtc = &vrtc->rtcdev;
+	newval &= RTCIR_ALARM | RTCIR_PERIOD | RTCIR_UPDATE;
+
+	oldirqf = rtc->reg_c & RTCIR_INT;
+	if ((aintr_enabled(vrtc) && (newval & RTCIR_ALARM) != 0) ||
+	    (pintr_enabled(vrtc) && (newval & RTCIR_PERIOD) != 0) ||
+	    (uintr_enabled(vrtc) && (newval & RTCIR_UPDATE) != 0)) {
+		newirqf = RTCIR_INT;
+	} else {
+		newirqf = 0;
+	}
+
+	oldval = rtc->reg_c;
+	rtc->reg_c = newirqf | newval;
+	changed = oldval ^ rtc->reg_c;
+	if (changed) {
+		VM_CTR2(vrtc->vm, "RTC reg_c changed from %#x to %#x",
+		    oldval, rtc->reg_c);
+	}
+
+	if (!oldirqf && newirqf) {
+		VM_CTR1(vrtc->vm, "RTC irq %d asserted", RTC_IRQ);
+		vatpic_pulse_irq(vrtc->vm, RTC_IRQ);
+		vioapic_pulse_irq(vrtc->vm, RTC_IRQ);
+	} else if (oldirqf && !newirqf) {
+		VM_CTR1(vrtc->vm, "RTC irq %d deasserted", RTC_IRQ);
+	}
+}
+
+static int
+vrtc_set_reg_b(struct vrtc *vrtc, uint8_t newval)
+{
+	struct rtcdev *rtc;
+	sbintime_t oldfreq, newfreq, basetime;
+	time_t curtime, rtctime;
+	int error;
+	uint8_t oldval, changed;
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	rtc = &vrtc->rtcdev;
+	oldval = rtc->reg_b;
+	oldfreq = vrtc_freq(vrtc);
+
+	rtc->reg_b = newval;
+	changed = oldval ^ newval;
+	if (changed) {
+		VM_CTR2(vrtc->vm, "RTC reg_b changed from %#x to %#x",
+		    oldval, newval);
+	}
+
+	if (changed & RTCSB_HALT) {
+		if ((newval & RTCSB_HALT) == 0) {
+			rtctime = rtc_to_secs(vrtc);
+			basetime = sbinuptime();
+			if (rtctime == VRTC_BROKEN_TIME) {
+				if (rtc_flag_broken_time)
+					return (-1);
+			}
+		} else {
+			curtime = vrtc_curtime(vrtc, &basetime);
+			KASSERT(curtime == vrtc->base_rtctime, ("%s: mismatch "
+			    "between vrtc basetime (%#lx) and curtime (%#lx)",
+			    __func__, vrtc->base_rtctime, curtime));
+
+			/*
+			 * Force a refresh of the RTC date/time fields so
+			 * they reflect the time right before the guest set
+			 * the HALT bit.
+			 */
+			secs_to_rtc(curtime, vrtc, 1);
+
+			/*
+			 * Updates are halted so mark 'base_rtctime' to denote
+			 * that the RTC date/time is in flux.
+			 */
+			rtctime = VRTC_BROKEN_TIME;
+			rtc->reg_b &= ~RTCSB_UINTR;
+		}
+		error = vrtc_time_update(vrtc, rtctime, basetime);
+		KASSERT(error == 0, ("vrtc_time_update error %d", error));
+	}
+
+	/*
+	 * Side effect of changes to the interrupt enable bits.
+	 */
+	if (changed & RTCSB_ALL_INTRS)
+		vrtc_set_reg_c(vrtc, vrtc->rtcdev.reg_c);
+
+	/*
+	 * Change the callout frequency if it has changed.
+	 */
+	newfreq = vrtc_freq(vrtc);
+	if (newfreq != oldfreq)
+		vrtc_callout_reset(vrtc, newfreq);
+	else
+		vrtc_callout_check(vrtc, newfreq);
+
+	/*
+	 * The side effect of bits that control the RTC date/time format
+	 * is handled lazily when those fields are actually read.
+	 */
+	return (0);
+}
+
+static void
+vrtc_set_reg_a(struct vrtc *vrtc, uint8_t newval)
+{
+	sbintime_t oldfreq, newfreq;
+	uint8_t oldval, changed;
+
+	KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__));
+
+	newval &= ~RTCSA_TUP;
+	oldval = vrtc->rtcdev.reg_a;
+	oldfreq = vrtc_freq(vrtc);
+
+	if (divider_enabled(oldval) && !divider_enabled(newval)) {
+		VM_CTR2(vrtc->vm, "RTC divider held in reset at %#lx/%#lx",
+		    vrtc->base_rtctime, vrtc->base_uptime);
+	} else if (!divider_enabled(oldval) && divider_enabled(newval)) {
+		/*
+		 * If the dividers are coming out of reset then update
+		 * 'base_uptime' before this happens. This is done to
+		 * maintain the illusion that the RTC date/time was frozen
+		 * while the dividers were disabled.
+		 */
+		vrtc->base_uptime = sbinuptime();
+		VM_CTR2(vrtc->vm, "RTC divider out of reset at %#lx/%#lx",
+		    vrtc->base_rtctime, vrtc->base_uptime);
+	} else {
+		/* NOTHING */
+	}
+
+	vrtc->rtcdev.reg_a = newval;
+	changed = oldval ^ newval;
+	if (changed) {
+		VM_CTR2(vrtc->vm, "RTC reg_a changed from %#x to %#x",
+		    oldval, newval);
+	}
+
+	/*
+	 * Side effect of changes to rate select and divider enable bits.
+	 */
+	newfreq = vrtc_freq(vrtc);
+	if (newfreq != oldfreq)
+		vrtc_callout_reset(vrtc, newfreq);
+	else
+		vrtc_callout_check(vrtc, newfreq);
+}
+
+int
+vrtc_set_time(struct vm *vm, time_t secs)
+{
+	struct vrtc *vrtc;
+	int error;
+
+	vrtc = vm_rtc(vm);
+	VRTC_LOCK(vrtc);
+	error = vrtc_time_update(vrtc, secs, sbinuptime());
+	VRTC_UNLOCK(vrtc);
+
+	if (error) {
+		VM_CTR2(vrtc->vm, "Error %d setting RTC time to %#lx", error,
+		    secs);
+	} else {
+		VM_CTR1(vrtc->vm, "RTC time set to %#lx", secs);
+	}
+
+	return (error);
+}
+
+time_t
+vrtc_get_time(struct vm *vm)
+{
+	struct vrtc *vrtc;
+	sbintime_t basetime;
+	time_t t;
+
+	vrtc = vm_rtc(vm);
+	VRTC_LOCK(vrtc);
+	t = vrtc_curtime(vrtc, &basetime);
+	VRTC_UNLOCK(vrtc);
+
+	return (t);
+}
+
+int
+vrtc_nvram_write(struct vm *vm, int offset, uint8_t value)
+{
+	struct vrtc *vrtc;
+	uint8_t *ptr;
+
+	vrtc = vm_rtc(vm);
+
+	/*
+	 * Don't allow writes to RTC control registers or the date/time fields.
+	 */
+	if (offset < offsetof(struct rtcdev, nvram[0]) ||
+	    offset == RTC_CENTURY || offset >= sizeof(struct rtcdev)) {
+		VM_CTR1(vrtc->vm, "RTC nvram write to invalid offset %d",
+		    offset);
+		return (EINVAL);
+	}
+
+	VRTC_LOCK(vrtc);
+	ptr = (uint8_t *)(&vrtc->rtcdev);
+	ptr[offset] = value;
+	VM_CTR2(vrtc->vm, "RTC nvram write %#x to offset %#x", value, offset);
+	VRTC_UNLOCK(vrtc);
+
+	return (0);
+}
+
+int
+vrtc_nvram_read(struct vm *vm, int offset, uint8_t *retval)
+{
+	struct vrtc *vrtc;
+	sbintime_t basetime;
+	time_t curtime;
+	uint8_t *ptr;
+
+	/*
+	 * Allow all offsets in the RTC to be read.
+	 */
+	if (offset < 0 || offset >= sizeof(struct rtcdev))
+		return (EINVAL);
+
+	vrtc = vm_rtc(vm);
+	VRTC_LOCK(vrtc);
+
+	/*
+	 * Update RTC date/time fields if necessary.
+	 */
+	if (offset < 10 || offset == RTC_CENTURY) {
+		curtime = vrtc_curtime(vrtc, &basetime);
+		secs_to_rtc(curtime, vrtc, 0);
+	}
+
+	ptr = (uint8_t *)(&vrtc->rtcdev);
+	*retval = ptr[offset];
+
+	VRTC_UNLOCK(vrtc);
+	return (0);
+}
+
+int
+vrtc_addr_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *val)
+{
+	struct vrtc *vrtc;
+
+	vrtc = vm_rtc(vm);
+
+	if (bytes != 1)
+		return (-1);
+
+	if (in) {
+		*val = 0xff;
+		return (0);
+	}
+
+	VRTC_LOCK(vrtc);
+	vrtc->addr = *val & 0x7f;
+	VRTC_UNLOCK(vrtc);
+
+	return (0);
+}
+
+int
+vrtc_data_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *val)
+{
+	struct vrtc *vrtc;
+	struct rtcdev *rtc;
+	sbintime_t basetime;
+	time_t curtime;
+	int error, offset;
+
+	vrtc = vm_rtc(vm);
+	rtc = &vrtc->rtcdev;
+
+	if (bytes != 1)
+		return (-1);
+
+	VRTC_LOCK(vrtc);
+	offset = vrtc->addr;
+	if (offset >= sizeof(struct rtcdev)) {
+		VRTC_UNLOCK(vrtc);
+		return (-1);
+	}
+
+	error = 0;
+	curtime = vrtc_curtime(vrtc, &basetime);
+	vrtc_time_update(vrtc, curtime, basetime);
+
+	/*
+	 * Update RTC date/time fields if necessary.
+	 *
+	 * This is not just for reads of the RTC. The side-effect of writing
+	 * the century byte requires other RTC date/time fields (e.g. sec)
+	 * to be updated here.
+	 */
+	if (offset < 10 || offset == RTC_CENTURY)
+		secs_to_rtc(curtime, vrtc, 0);
+
+	if (in) {
+		if (offset == 12) {
+			/*
+			 * XXX
+			 * reg_c interrupt flags are updated only if the
+			 * corresponding interrupt enable bit in reg_b is set.
+			 */
+			*val = vrtc->rtcdev.reg_c;
+			vrtc_set_reg_c(vrtc, 0);
+		} else {
+			*val = *((uint8_t *)rtc + offset);
+		}
+		VCPU_CTR2(vm, vcpuid, "Read value %#x from RTC offset %#x",
+		    *val, offset);
+	} else {
+		switch (offset) {
+		case 10:
+			VCPU_CTR1(vm, vcpuid, "RTC reg_a set to %#x", *val);
+			vrtc_set_reg_a(vrtc, *val);
+			break;
+		case 11:
+			VCPU_CTR1(vm, vcpuid, "RTC reg_b set to %#x", *val);
+			error = vrtc_set_reg_b(vrtc, *val);
+			break;
+		case 12:
+			VCPU_CTR1(vm, vcpuid, "RTC reg_c set to %#x (ignored)",
+			    *val);
+			break;
+		case 13:
+			VCPU_CTR1(vm, vcpuid, "RTC reg_d set to %#x (ignored)",
+			    *val);
+			break;
+		case 0:
+			/*
+			 * High order bit of 'seconds' is readonly.
+			 */
+			*val &= 0x7f;
+			/* FALLTHRU */
+		default:
+			VCPU_CTR2(vm, vcpuid, "RTC offset %#x set to %#x",
+			    offset, *val);
+			*((uint8_t *)rtc + offset) = *val;
+			break;
+		}
+
+		/*
+		 * XXX some guests (e.g. OpenBSD) write the century byte
+		 * outside of RTCSB_HALT so re-calculate the RTC date/time.
+		 */
+		if (offset == RTC_CENTURY && !rtc_halted(vrtc)) {
+			curtime = rtc_to_secs(vrtc);
+			error = vrtc_time_update(vrtc, curtime, sbinuptime());
+			KASSERT(!error, ("vrtc_time_update error %d", error));
+			if (curtime == VRTC_BROKEN_TIME && rtc_flag_broken_time)
+				error = -1;
+		}
+	}
+	VRTC_UNLOCK(vrtc);
+	return (error);
+}
+
+void
+vrtc_reset(struct vrtc *vrtc)
+{
+	struct rtcdev *rtc;
+
+	VRTC_LOCK(vrtc);
+
+	rtc = &vrtc->rtcdev;
+	vrtc_set_reg_b(vrtc, rtc->reg_b & ~(RTCSB_ALL_INTRS | RTCSB_SQWE));
+	vrtc_set_reg_c(vrtc, 0);
+	KASSERT(!callout_active(&vrtc->callout), ("rtc callout still active"));
+
+	VRTC_UNLOCK(vrtc);
+}
+
+struct vrtc *
+vrtc_init(struct vm *vm)
+{
+	struct vrtc *vrtc;
+	struct rtcdev *rtc;
+	time_t curtime;
+
+	vrtc = malloc(sizeof(struct vrtc), M_VRTC, M_WAITOK | M_ZERO);
+	vrtc->vm = vm;
+	mtx_init(&vrtc->mtx, "vrtc lock", NULL, MTX_DEF);
+	callout_init(&vrtc->callout, 1);
+
+	/* Allow dividers to keep time but disable everything else */
+	rtc = &vrtc->rtcdev;
+	rtc->reg_a = 0x20;
+	rtc->reg_b = RTCSB_24HR;
+	rtc->reg_c = 0;
+	rtc->reg_d = RTCSD_PWR;
+
+	/* Reset the index register to a safe value. */
+	vrtc->addr = RTC_STATUSD;
+
+	/*
+	 * Initialize RTC time to 00:00:00 Jan 1, 1970.
+	 */
+	curtime = 0;
+
+	VRTC_LOCK(vrtc);
+	vrtc->base_rtctime = VRTC_BROKEN_TIME;
+	vrtc_time_update(vrtc, curtime, sbinuptime());
+	secs_to_rtc(curtime, vrtc, 0);
+	VRTC_UNLOCK(vrtc);
+
+	return (vrtc);
+}
+
+void
+vrtc_cleanup(struct vrtc *vrtc)
+{
+
+	callout_drain(&vrtc->callout);
+	free(vrtc, M_VRTC);
+}


Property changes on: trunk/sys/amd64/vmm/io/vrtc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/io/vrtc.h
===================================================================
--- trunk/sys/amd64/vmm/io/vrtc.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/io/vrtc.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,51 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Neel Natu (neel at freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/io/vrtc.h 284894 2015-06-27 22:48:22Z neel $
+ */
+
+#ifndef _VRTC_H_
+#define	_VRTC_H_
+
+#include <isa/isareg.h>
+
+struct vrtc;
+
+struct vrtc *vrtc_init(struct vm *vm);
+void vrtc_cleanup(struct vrtc *vrtc);
+void vrtc_reset(struct vrtc *vrtc);
+
+time_t vrtc_get_time(struct vm *vm);
+int vrtc_set_time(struct vm *vm, time_t secs);
+int vrtc_nvram_write(struct vm *vm, int offset, uint8_t value);
+int vrtc_nvram_read(struct vm *vm, int offset, uint8_t *retval);
+
+int vrtc_addr_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *val);
+int vrtc_data_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *val);
+
+#endif


Property changes on: trunk/sys/amd64/vmm/io/vrtc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm.c
===================================================================
--- trunk/sys/amd64/vmm/vmm.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,2595 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm.c 306472 2016-09-30 01:42:29Z jhb $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm.c 306472 2016-09-30 01:42:29Z jhb $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <sys/malloc.h>
+#include <sys/pcpu.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_param.h>
+
+#include <machine/cpu.h>
+#include <machine/vm.h>
+#include <machine/pcb.h>
+#include <machine/smp.h>
+#include <x86/psl.h>
+#include <x86/apicreg.h>
+#include <machine/vmparam.h>
+
+#include <machine/vmm.h>
+#include <machine/vmm_dev.h>
+#include <machine/vmm_instruction_emul.h>
+
+#include "vmm_ioport.h"
+#include "vmm_ktr.h"
+#include "vmm_host.h"
+#include "vmm_mem.h"
+#include "vmm_util.h"
+#include "vatpic.h"
+#include "vatpit.h"
+#include "vhpet.h"
+#include "vioapic.h"
+#include "vlapic.h"
+#include "vpmtmr.h"
+#include "vrtc.h"
+#include "vmm_ipi.h"
+#include "vmm_stat.h"
+#include "vmm_lapic.h"
+
+#include "io/ppt.h"
+#include "io/iommu.h"
+
+struct vlapic;
+
+/*
+ * Initialization:
+ * (a) allocated when vcpu is created
+ * (i) initialized when vcpu is created and when it is reinitialized
+ * (o) initialized the first time the vcpu is created
+ * (x) initialized before use
+ */
+struct vcpu {
+	struct mtx 	mtx;		/* (o) protects 'state' and 'hostcpu' */
+	enum vcpu_state	state;		/* (o) vcpu state */
+	int		hostcpu;	/* (o) vcpu's host cpu */
+	int		reqidle;	/* (i) request vcpu to idle */
+	struct vlapic	*vlapic;	/* (i) APIC device model */
+	enum x2apic_state x2apic_state;	/* (i) APIC mode */
+	uint64_t	exitintinfo;	/* (i) events pending at VM exit */
+	int		nmi_pending;	/* (i) NMI pending */
+	int		extint_pending;	/* (i) INTR pending */
+	int	exception_pending;	/* (i) exception pending */
+	int	exc_vector;		/* (x) exception collateral */
+	int	exc_errcode_valid;
+	uint32_t exc_errcode;
+	struct savefpu	*guestfpu;	/* (a,i) guest fpu state */
+	uint64_t	guest_xcr0;	/* (i) guest %xcr0 register */
+	void		*stats;		/* (a,i) statistics */
+	struct vm_exit	exitinfo;	/* (x) exit reason and collateral */
+	uint64_t	nextrip;	/* (x) next instruction to execute */
+};
+
+#define	vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
+#define	vcpu_lock_init(v)	mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
+#define	vcpu_lock(v)		mtx_lock_spin(&((v)->mtx))
+#define	vcpu_unlock(v)		mtx_unlock_spin(&((v)->mtx))
+#define	vcpu_assert_locked(v)	mtx_assert(&((v)->mtx), MA_OWNED)
+
+struct mem_seg {
+	size_t	len;
+	bool	sysmem;
+	struct vm_object *object;
+};
+#define	VM_MAX_MEMSEGS	2
+
+struct mem_map {
+	vm_paddr_t	gpa;
+	size_t		len;
+	vm_ooffset_t	segoff;
+	int		segid;
+	int		prot;
+	int		flags;
+};
+#define	VM_MAX_MEMMAPS	4
+
+/*
+ * Initialization:
+ * (o) initialized the first time the VM is created
+ * (i) initialized when VM is created and when it is reinitialized
+ * (x) initialized before use
+ */
+struct vm {
+	void		*cookie;		/* (i) cpu-specific data */
+	void		*iommu;			/* (x) iommu-specific data */
+	struct vhpet	*vhpet;			/* (i) virtual HPET */
+	struct vioapic	*vioapic;		/* (i) virtual ioapic */
+	struct vatpic	*vatpic;		/* (i) virtual atpic */
+	struct vatpit	*vatpit;		/* (i) virtual atpit */
+	struct vpmtmr	*vpmtmr;		/* (i) virtual ACPI PM timer */
+	struct vrtc	*vrtc;			/* (o) virtual RTC */
+	volatile cpuset_t active_cpus;		/* (i) active vcpus */
+	int		suspend;		/* (i) stop VM execution */
+	volatile cpuset_t suspended_cpus; 	/* (i) suspended vcpus */
+	volatile cpuset_t halted_cpus;		/* (x) cpus in a hard halt */
+	cpuset_t	rendezvous_req_cpus;	/* (x) rendezvous requested */
+	cpuset_t	rendezvous_done_cpus;	/* (x) rendezvous finished */
+	void		*rendezvous_arg;	/* (x) rendezvous func/arg */
+	vm_rendezvous_func_t rendezvous_func;
+	struct mtx	rendezvous_mtx;		/* (o) rendezvous lock */
+	struct mem_map	mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */
+	struct mem_seg	mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */
+	struct vmspace	*vmspace;		/* (o) guest's address space */
+	char		name[VM_MAX_NAMELEN];	/* (o) virtual machine name */
+	struct vcpu	vcpu[VM_MAXCPU];	/* (i) guest vcpus */
+};
+
+static int vmm_initialized;
+
+static struct vmm_ops *ops;
+#define	VMM_INIT(num)	(ops != NULL ? (*ops->init)(num) : 0)
+#define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
+#define	VMM_RESUME()	(ops != NULL ? (*ops->resume)() : 0)
+
+#define	VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
+#define	VMRUN(vmi, vcpu, rip, pmap, evinfo) \
+	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) : ENXIO)
+#define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
+#define	VMSPACE_ALLOC(min, max) \
+	(ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
+#define	VMSPACE_FREE(vmspace) \
+	(ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
+#define	VMGETREG(vmi, vcpu, num, retval)		\
+	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
+#define	VMSETREG(vmi, vcpu, num, val)		\
+	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
+#define	VMGETDESC(vmi, vcpu, num, desc)		\
+	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
+#define	VMSETDESC(vmi, vcpu, num, desc)		\
+	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
+#define	VMGETCAP(vmi, vcpu, num, retval)	\
+	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
+#define	VMSETCAP(vmi, vcpu, num, val)		\
+	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
+#define	VLAPIC_INIT(vmi, vcpu)			\
+	(ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
+#define	VLAPIC_CLEANUP(vmi, vlapic)		\
+	(ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
+
+#define	fpu_start_emulating()	load_cr0(rcr0() | CR0_TS)
+#define	fpu_stop_emulating()	clts()
+
+static MALLOC_DEFINE(M_VM, "vm", "vm");
+
+/* statistics */
+static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
+
+SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
+
+/*
+ * Halt the guest if all vcpus are executing a HLT instruction with
+ * interrupts disabled.
+ */
+static int halt_detection_enabled = 1;
+TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled);
+SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
+    &halt_detection_enabled, 0,
+    "Halt VM if all vcpus execute HLT with interrupts disabled");
+
+static int vmm_ipinum;
+SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
+    "IPI vector used for vcpu notifications");
+
+static int trace_guest_exceptions;
+SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
+    &trace_guest_exceptions, 0,
+    "Trap into hypervisor on all guest exceptions and reflect them back");
+
+static void vm_free_memmap(struct vm *vm, int ident);
+static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
+static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
+
+#ifdef KTR
+static const char *
+vcpu_state2str(enum vcpu_state state)
+{
+
+	switch (state) {
+	case VCPU_IDLE:
+		return ("idle");
+	case VCPU_FROZEN:
+		return ("frozen");
+	case VCPU_RUNNING:
+		return ("running");
+	case VCPU_SLEEPING:
+		return ("sleeping");
+	default:
+		return ("unknown");
+	}
+}
+#endif
+
+static void
+vcpu_cleanup(struct vm *vm, int i, bool destroy)
+{
+	struct vcpu *vcpu = &vm->vcpu[i];
+
+	VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
+	if (destroy) {
+		vmm_stat_free(vcpu->stats);	
+		fpu_save_area_free(vcpu->guestfpu);
+	}
+}
+
+static void
+vcpu_init(struct vm *vm, int vcpu_id, bool create)
+{
+	struct vcpu *vcpu;
+
+	KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
+	    ("vcpu_init: invalid vcpu %d", vcpu_id));
+	  
+	vcpu = &vm->vcpu[vcpu_id];
+
+	if (create) {
+		KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
+		    "initialized", vcpu_id));
+		vcpu_lock_init(vcpu);
+		vcpu->state = VCPU_IDLE;
+		vcpu->hostcpu = NOCPU;
+		vcpu->guestfpu = fpu_save_area_alloc();
+		vcpu->stats = vmm_stat_alloc();
+	}
+
+	vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
+	vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
+	vcpu->reqidle = 0;
+	vcpu->exitintinfo = 0;
+	vcpu->nmi_pending = 0;
+	vcpu->extint_pending = 0;
+	vcpu->exception_pending = 0;
+	vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
+	fpu_save_area_reset(vcpu->guestfpu);
+	vmm_stat_init(vcpu->stats);
+}
+
+int
+vcpu_trace_exceptions(struct vm *vm, int vcpuid)
+{
+
+	return (trace_guest_exceptions);
+}
+
+struct vm_exit *
+vm_exitinfo(struct vm *vm, int cpuid)
+{
+	struct vcpu *vcpu;
+
+	if (cpuid < 0 || cpuid >= VM_MAXCPU)
+		panic("vm_exitinfo: invalid cpuid %d", cpuid);
+
+	vcpu = &vm->vcpu[cpuid];
+
+	return (&vcpu->exitinfo);
+}
+
+static void
+vmm_resume(void)
+{
+	VMM_RESUME();
+}
+
+static int
+vmm_init(void)
+{
+	int error;
+
+	vmm_host_state_init();
+
+	vmm_ipinum = vmm_ipi_alloc();
+	if (vmm_ipinum == 0)
+		vmm_ipinum = IPI_AST;
+
+	error = vmm_mem_init();
+	if (error)
+		return (error);
+	
+	if (vmm_is_intel())
+		ops = &vmm_ops_intel;
+	else if (vmm_is_amd())
+		ops = &vmm_ops_amd;
+	else
+		return (ENXIO);
+
+	vmm_resume_p = vmm_resume;
+
+	return (VMM_INIT(vmm_ipinum));
+}
+
+static int
+vmm_handler(module_t mod, int what, void *arg)
+{
+	int error;
+
+	switch (what) {
+	case MOD_LOAD:
+		vmmdev_init();
+		error = vmm_init();
+		if (error == 0)
+			vmm_initialized = 1;
+		break;
+	case MOD_UNLOAD:
+		error = vmmdev_cleanup();
+		if (error == 0) {
+			vmm_resume_p = NULL;
+			iommu_cleanup();
+			if (vmm_ipinum != IPI_AST)
+				vmm_ipi_free(vmm_ipinum);
+			error = VMM_CLEANUP();
+			/*
+			 * Something bad happened - prevent new
+			 * VMs from being created
+			 */
+			if (error)
+				vmm_initialized = 0;
+		}
+		break;
+	default:
+		error = 0;
+		break;
+	}
+	return (error);
+}
+
+static moduledata_t vmm_kmod = {
+	"vmm",
+	vmm_handler,
+	NULL
+};
+
+/*
+ * vmm initialization has the following dependencies:
+ *
+ * - VT-x initialization requires smp_rendezvous() and therefore must happen
+ *   after SMP is fully functional (after SI_SUB_SMP).
+ */
+DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
+MODULE_VERSION(vmm, 1);
+
+static void
+vm_init(struct vm *vm, bool create)
+{
+	int i;
+
+	vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
+	vm->iommu = NULL;
+	vm->vioapic = vioapic_init(vm);
+	vm->vhpet = vhpet_init(vm);
+	vm->vatpic = vatpic_init(vm);
+	vm->vatpit = vatpit_init(vm);
+	vm->vpmtmr = vpmtmr_init(vm);
+	if (create)
+		vm->vrtc = vrtc_init(vm);
+
+	CPU_ZERO(&vm->active_cpus);
+
+	vm->suspend = 0;
+	CPU_ZERO(&vm->suspended_cpus);
+
+	for (i = 0; i < VM_MAXCPU; i++)
+		vcpu_init(vm, i, create);
+}
+
+int
+vm_create(const char *name, struct vm **retvm)
+{
+	struct vm *vm;
+	struct vmspace *vmspace;
+
+	/*
+	 * If vmm.ko could not be successfully initialized then don't attempt
+	 * to create the virtual machine.
+	 */
+	if (!vmm_initialized)
+		return (ENXIO);
+
+	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
+		return (EINVAL);
+
+	vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
+	if (vmspace == NULL)
+		return (ENOMEM);
+
+	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
+	strcpy(vm->name, name);
+	vm->vmspace = vmspace;
+	mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
+
+	vm_init(vm, true);
+
+	*retvm = vm;
+	return (0);
+}
+
+static void
+vm_cleanup(struct vm *vm, bool destroy)
+{
+	struct mem_map *mm;
+	int i;
+
+	ppt_unassign_all(vm);
+
+	if (vm->iommu != NULL)
+		iommu_destroy_domain(vm->iommu);
+
+	if (destroy)
+		vrtc_cleanup(vm->vrtc);
+	else
+		vrtc_reset(vm->vrtc);
+	vpmtmr_cleanup(vm->vpmtmr);
+	vatpit_cleanup(vm->vatpit);
+	vhpet_cleanup(vm->vhpet);
+	vatpic_cleanup(vm->vatpic);
+	vioapic_cleanup(vm->vioapic);
+
+	for (i = 0; i < VM_MAXCPU; i++)
+		vcpu_cleanup(vm, i, destroy);
+
+	VMCLEANUP(vm->cookie);
+
+	/*
+	 * System memory is removed from the guest address space only when
+	 * the VM is destroyed. This is because the mapping remains the same
+	 * across VM reset.
+	 *
+	 * Device memory can be relocated by the guest (e.g. using PCI BARs)
+	 * so those mappings are removed on a VM reset.
+	 */
+	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+		mm = &vm->mem_maps[i];
+		if (destroy || !sysmem_mapping(vm, mm))
+			vm_free_memmap(vm, i);
+	}
+
+	if (destroy) {
+		for (i = 0; i < VM_MAX_MEMSEGS; i++)
+			vm_free_memseg(vm, i);
+
+		VMSPACE_FREE(vm->vmspace);
+		vm->vmspace = NULL;
+	}
+}
+
+void
+vm_destroy(struct vm *vm)
+{
+	vm_cleanup(vm, true);
+	free(vm, M_VM);
+}
+
+int
+vm_reinit(struct vm *vm)
+{
+	int error;
+
+	/*
+	 * A virtual machine can be reset only if all vcpus are suspended.
+	 */
+	if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
+		vm_cleanup(vm, false);
+		vm_init(vm, false);
+		error = 0;
+	} else {
+		error = EBUSY;
+	}
+
+	return (error);
+}
+
+const char *
+vm_name(struct vm *vm)
+{
+	return (vm->name);
+}
+
+int
+vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
+{
+	vm_object_t obj;
+
+	if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
+		return (ENOMEM);
+	else
+		return (0);
+}
+
+int
+vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
+{
+
+	vmm_mmio_free(vm->vmspace, gpa, len);
+	return (0);
+}
+
+/*
+ * Return 'true' if 'gpa' is allocated in the guest address space.
+ *
+ * This function is called in the context of a running vcpu which acts as
+ * an implicit lock on 'vm->mem_maps[]'.
+ */
+bool
+vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
+{
+	struct mem_map *mm;
+	int i;
+
+#ifdef INVARIANTS
+	int hostcpu, state;
+	state = vcpu_get_state(vm, vcpuid, &hostcpu);
+	KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
+	    ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
+#endif
+
+	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+		mm = &vm->mem_maps[i];
+		if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
+			return (true);		/* 'gpa' is sysmem or devmem */
+	}
+
+	if (ppt_is_mmio(vm, gpa))
+		return (true);			/* 'gpa' is pci passthru mmio */
+
+	return (false);
+}
+
+int
+vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
+{
+	struct mem_seg *seg;
+	vm_object_t obj;
+
+	if (ident < 0 || ident >= VM_MAX_MEMSEGS)
+		return (EINVAL);
+
+	if (len == 0 || (len & PAGE_MASK))
+		return (EINVAL);
+
+	seg = &vm->mem_segs[ident];
+	if (seg->object != NULL) {
+		if (seg->len == len && seg->sysmem == sysmem)
+			return (EEXIST);
+		else
+			return (EINVAL);
+	}
+
+	obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT);
+	if (obj == NULL)
+		return (ENOMEM);
+
+	seg->len = len;
+	seg->object = obj;
+	seg->sysmem = sysmem;
+	return (0);
+}
+
+int
+vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
+    vm_object_t *objptr)
+{
+	struct mem_seg *seg;
+
+	if (ident < 0 || ident >= VM_MAX_MEMSEGS)
+		return (EINVAL);
+
+	seg = &vm->mem_segs[ident];
+	if (len)
+		*len = seg->len;
+	if (sysmem)
+		*sysmem = seg->sysmem;
+	if (objptr)
+		*objptr = seg->object;
+	return (0);
+}
+
+void
+vm_free_memseg(struct vm *vm, int ident)
+{
+	struct mem_seg *seg;
+
+	KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
+	    ("%s: invalid memseg ident %d", __func__, ident));
+
+	seg = &vm->mem_segs[ident];
+	if (seg->object != NULL) {
+		vm_object_deallocate(seg->object);
+		bzero(seg, sizeof(struct mem_seg));
+	}
+}
+
+int
+vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
+    size_t len, int prot, int flags)
+{
+	struct mem_seg *seg;
+	struct mem_map *m, *map;
+	vm_ooffset_t last;
+	int i, error;
+
+	if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
+		return (EINVAL);
+
+	if (flags & ~VM_MEMMAP_F_WIRED)
+		return (EINVAL);
+
+	if (segid < 0 || segid >= VM_MAX_MEMSEGS)
+		return (EINVAL);
+
+	seg = &vm->mem_segs[segid];
+	if (seg->object == NULL)
+		return (EINVAL);
+
+	last = first + len;
+	if (first < 0 || first >= last || last > seg->len)
+		return (EINVAL);
+
+	if ((gpa | first | last) & PAGE_MASK)
+		return (EINVAL);
+
+	map = NULL;
+	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+		m = &vm->mem_maps[i];
+		if (m->len == 0) {
+			map = m;
+			break;
+		}
+	}
+
+	if (map == NULL)
+		return (ENOSPC);
+
+	error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
+	    len, 0, VMFS_NO_SPACE, prot, prot, 0);
+	if (error != KERN_SUCCESS)
+		return (EFAULT);
+
+	vm_object_reference(seg->object);
+
+	if (flags & VM_MEMMAP_F_WIRED) {
+		error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
+		    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+		if (error != KERN_SUCCESS) {
+			vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
+			return (EFAULT);
+		}
+	}
+
+	map->gpa = gpa;
+	map->len = len;
+	map->segoff = first;
+	map->segid = segid;
+	map->prot = prot;
+	map->flags = flags;
+	return (0);
+}
+
+int
+vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
+    vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
+{
+	struct mem_map *mm, *mmnext;
+	int i;
+
+	mmnext = NULL;
+	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+		mm = &vm->mem_maps[i];
+		if (mm->len == 0 || mm->gpa < *gpa)
+			continue;
+		if (mmnext == NULL || mm->gpa < mmnext->gpa)
+			mmnext = mm;
+	}
+
+	if (mmnext != NULL) {
+		*gpa = mmnext->gpa;
+		if (segid)
+			*segid = mmnext->segid;
+		if (segoff)
+			*segoff = mmnext->segoff;
+		if (len)
+			*len = mmnext->len;
+		if (prot)
+			*prot = mmnext->prot;
+		if (flags)
+			*flags = mmnext->flags;
+		return (0);
+	} else {
+		return (ENOENT);
+	}
+}
+
+static void
+vm_free_memmap(struct vm *vm, int ident)
+{
+	struct mem_map *mm;
+	int error;
+
+	mm = &vm->mem_maps[ident];
+	if (mm->len) {
+		error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
+		    mm->gpa + mm->len);
+		KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
+		    __func__, error));
+		bzero(mm, sizeof(struct mem_map));
+	}
+}
+
+static __inline bool
+sysmem_mapping(struct vm *vm, struct mem_map *mm)
+{
+
+	if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
+		return (true);
+	else
+		return (false);
+}
+
+static vm_paddr_t
+sysmem_maxaddr(struct vm *vm)
+{
+	struct mem_map *mm;
+	vm_paddr_t maxaddr;
+	int i;
+
+	maxaddr = 0;
+	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+		mm = &vm->mem_maps[i];
+		if (sysmem_mapping(vm, mm)) {
+			if (maxaddr < mm->gpa + mm->len)
+				maxaddr = mm->gpa + mm->len;
+		}
+	}
+	return (maxaddr);
+}
+
+static void
+vm_iommu_modify(struct vm *vm, boolean_t map)
+{
+	int i, sz;
+	vm_paddr_t gpa, hpa;
+	struct mem_map *mm;
+	void *vp, *cookie, *host_domain;
+
+	sz = PAGE_SIZE;
+	host_domain = iommu_host_domain();
+
+	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+		mm = &vm->mem_maps[i];
+		if (!sysmem_mapping(vm, mm))
+			continue;
+
+		if (map) {
+			KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
+			    ("iommu map found invalid memmap %#lx/%#lx/%#x",
+			    mm->gpa, mm->len, mm->flags));
+			if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
+				continue;
+			mm->flags |= VM_MEMMAP_F_IOMMU;
+		} else {
+			if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
+				continue;
+			mm->flags &= ~VM_MEMMAP_F_IOMMU;
+			KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
+			    ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
+			    mm->gpa, mm->len, mm->flags));
+		}
+
+		gpa = mm->gpa;
+		while (gpa < mm->gpa + mm->len) {
+			vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
+					 &cookie);
+			KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
+			    vm_name(vm), gpa));
+
+			vm_gpa_release(cookie);
+
+			hpa = DMAP_TO_PHYS((uintptr_t)vp);
+			if (map) {
+				iommu_create_mapping(vm->iommu, gpa, hpa, sz);
+				iommu_remove_mapping(host_domain, hpa, sz);
+			} else {
+				iommu_remove_mapping(vm->iommu, gpa, sz);
+				iommu_create_mapping(host_domain, hpa, hpa, sz);
+			}
+
+			gpa += PAGE_SIZE;
+		}
+	}
+
+	/*
+	 * Invalidate the cached translations associated with the domain
+	 * from which pages were removed.
+	 */
+	if (map)
+		iommu_invalidate_tlb(host_domain);
+	else
+		iommu_invalidate_tlb(vm->iommu);
+}
+
+#define	vm_iommu_unmap(vm)	vm_iommu_modify((vm), FALSE)
+#define	vm_iommu_map(vm)	vm_iommu_modify((vm), TRUE)
+
+int
+vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
+{
+	int error;
+
+	error = ppt_unassign_device(vm, bus, slot, func);
+	if (error)
+		return (error);
+
+	if (ppt_assigned_devices(vm) == 0)
+		vm_iommu_unmap(vm);
+
+	return (0);
+}
+
+int
+vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
+{
+	int error;
+	vm_paddr_t maxaddr;
+
+	/* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
+	if (ppt_assigned_devices(vm) == 0) {
+		KASSERT(vm->iommu == NULL,
+		    ("vm_assign_pptdev: iommu must be NULL"));
+		maxaddr = sysmem_maxaddr(vm);
+		vm->iommu = iommu_create_domain(maxaddr);
+		if (vm->iommu == NULL)
+			return (ENXIO);
+		vm_iommu_map(vm);
+	}
+
+	error = ppt_assign_device(vm, bus, slot, func);
+	return (error);
+}
+
+void *
+vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
+	    void **cookie)
+{
+	int i, count, pageoff;
+	struct mem_map *mm;
+	vm_page_t m;
+#ifdef INVARIANTS
+	/*
+	 * All vcpus are frozen by ioctls that modify the memory map
+	 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
+	 * guaranteed if at least one vcpu is in the VCPU_FROZEN state.
+	 */
+	int state;
+	KASSERT(vcpuid >= -1 && vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d",
+	    __func__, vcpuid));
+	for (i = 0; i < VM_MAXCPU; i++) {
+		if (vcpuid != -1 && vcpuid != i)
+			continue;
+		state = vcpu_get_state(vm, i, NULL);
+		KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
+		    __func__, state));
+	}
+#endif
+	pageoff = gpa & PAGE_MASK;
+	if (len > PAGE_SIZE - pageoff)
+		panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
+
+	count = 0;
+	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+		mm = &vm->mem_maps[i];
+		if (sysmem_mapping(vm, mm) && gpa >= mm->gpa &&
+		    gpa < mm->gpa + mm->len) {
+			count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
+			    trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
+			break;
+		}
+	}
+
+	if (count == 1) {
+		*cookie = m;
+		return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
+	} else {
+		*cookie = NULL;
+		return (NULL);
+	}
+}
+
+void
+vm_gpa_release(void *cookie)
+{
+	vm_page_t m = cookie;
+
+	vm_page_lock(m);
+	vm_page_unhold(m);
+	vm_page_unlock(m);
+}
+
+int
+vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
+{
+
+	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (reg >= VM_REG_LAST)
+		return (EINVAL);
+
+	return (VMGETREG(vm->cookie, vcpu, reg, retval));
+}
+
+int
+vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
+{
+	struct vcpu *vcpu;
+	int error;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (reg >= VM_REG_LAST)
+		return (EINVAL);
+
+	error = VMSETREG(vm->cookie, vcpuid, reg, val);
+	if (error || reg != VM_REG_GUEST_RIP)
+		return (error);
+
+	/* Set 'nextrip' to match the value of %rip */
+	VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
+	vcpu = &vm->vcpu[vcpuid];
+	vcpu->nextrip = val;
+	return (0);
+}
+
+static boolean_t
+is_descriptor_table(int reg)
+{
+
+	switch (reg) {
+	case VM_REG_GUEST_IDTR:
+	case VM_REG_GUEST_GDTR:
+		return (TRUE);
+	default:
+		return (FALSE);
+	}
+}
+
+static boolean_t
+is_segment_register(int reg)
+{
+	
+	switch (reg) {
+	case VM_REG_GUEST_ES:
+	case VM_REG_GUEST_CS:
+	case VM_REG_GUEST_SS:
+	case VM_REG_GUEST_DS:
+	case VM_REG_GUEST_FS:
+	case VM_REG_GUEST_GS:
+	case VM_REG_GUEST_TR:
+	case VM_REG_GUEST_LDTR:
+		return (TRUE);
+	default:
+		return (FALSE);
+	}
+}
+
+int
+vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
+		struct seg_desc *desc)
+{
+
+	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (!is_segment_register(reg) && !is_descriptor_table(reg))
+		return (EINVAL);
+
+	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
+}
+
+int
+vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
+		struct seg_desc *desc)
+{
+	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (!is_segment_register(reg) && !is_descriptor_table(reg))
+		return (EINVAL);
+
+	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
+}
+
+static void
+restore_guest_fpustate(struct vcpu *vcpu)
+{
+
+	/* flush host state to the pcb */
+	fpuexit(curthread);
+
+	/* restore guest FPU state */
+	fpu_stop_emulating();
+	fpurestore(vcpu->guestfpu);
+
+	/* restore guest XCR0 if XSAVE is enabled in the host */
+	if (rcr4() & CR4_XSAVE)
+		load_xcr(0, vcpu->guest_xcr0);
+
+	/*
+	 * The FPU is now "dirty" with the guest's state so turn on emulation
+	 * to trap any access to the FPU by the host.
+	 */
+	fpu_start_emulating();
+}
+
+static void
+save_guest_fpustate(struct vcpu *vcpu)
+{
+
+	if ((rcr0() & CR0_TS) == 0)
+		panic("fpu emulation not enabled in host!");
+
+	/* save guest XCR0 and restore host XCR0 */
+	if (rcr4() & CR4_XSAVE) {
+		vcpu->guest_xcr0 = rxcr(0);
+		load_xcr(0, vmm_get_host_xcr0());
+	}
+
+	/* save guest FPU state */
+	fpu_stop_emulating();
+	fpusave(vcpu->guestfpu);
+	fpu_start_emulating();
+}
+
+static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
+
+static int
+vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
+    bool from_idle)
+{
+	struct vcpu *vcpu;
+	int error;
+
+	vcpu = &vm->vcpu[vcpuid];
+	vcpu_assert_locked(vcpu);
+
+	/*
+	 * State transitions from the vmmdev_ioctl() must always begin from
+	 * the VCPU_IDLE state. This guarantees that there is only a single
+	 * ioctl() operating on a vcpu at any point.
+	 */
+	if (from_idle) {
+		while (vcpu->state != VCPU_IDLE) {
+			vcpu->reqidle = 1;
+			vcpu_notify_event_locked(vcpu, false);
+			VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
+			    "idle requested", vcpu_state2str(vcpu->state));
+			msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
+		}
+	} else {
+		KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
+		    "vcpu idle state"));
+	}
+
+	if (vcpu->state == VCPU_RUNNING) {
+		KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
+		    "mismatch for running vcpu", curcpu, vcpu->hostcpu));
+	} else {
+		KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
+		    "vcpu that is not running", vcpu->hostcpu));
+	}
+
+	/*
+	 * The following state transitions are allowed:
+	 * IDLE -> FROZEN -> IDLE
+	 * FROZEN -> RUNNING -> FROZEN
+	 * FROZEN -> SLEEPING -> FROZEN
+	 */
+	switch (vcpu->state) {
+	case VCPU_IDLE:
+	case VCPU_RUNNING:
+	case VCPU_SLEEPING:
+		error = (newstate != VCPU_FROZEN);
+		break;
+	case VCPU_FROZEN:
+		error = (newstate == VCPU_FROZEN);
+		break;
+	default:
+		error = 1;
+		break;
+	}
+
+	if (error)
+		return (EBUSY);
+
+	VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
+	    vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
+
+	vcpu->state = newstate;
+	if (newstate == VCPU_RUNNING)
+		vcpu->hostcpu = curcpu;
+	else
+		vcpu->hostcpu = NOCPU;
+
+	if (newstate == VCPU_IDLE)
+		wakeup(&vcpu->state);
+
+	return (0);
+}
+
+static void
+vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
+{
+	int error;
+
+	if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
+		panic("Error %d setting state to %d\n", error, newstate);
+}
+
+static void
+vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
+{
+	int error;
+
+	if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
+		panic("Error %d setting state to %d", error, newstate);
+}
+
+static void
+vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
+{
+
+	KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));
+
+	/*
+	 * Update 'rendezvous_func' and execute a write memory barrier to
+	 * ensure that it is visible across all host cpus. This is not needed
+	 * for correctness but it does ensure that all the vcpus will notice
+	 * that the rendezvous is requested immediately.
+	 */
+	vm->rendezvous_func = func;
+	wmb();
+}
+
+#define	RENDEZVOUS_CTR0(vm, vcpuid, fmt)				\
+	do {								\
+		if (vcpuid >= 0)					\
+			VCPU_CTR0(vm, vcpuid, fmt);			\
+		else							\
+			VM_CTR0(vm, fmt);				\
+	} while (0)
+
+static void
+vm_handle_rendezvous(struct vm *vm, int vcpuid)
+{
+
+	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
+	    ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
+
+	mtx_lock(&vm->rendezvous_mtx);
+	while (vm->rendezvous_func != NULL) {
+		/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
+		CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
+
+		if (vcpuid != -1 &&
+		    CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
+		    !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
+			VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
+			(*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
+			CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
+		}
+		if (CPU_CMP(&vm->rendezvous_req_cpus,
+		    &vm->rendezvous_done_cpus) == 0) {
+			VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
+			vm_set_rendezvous_func(vm, NULL);
+			wakeup(&vm->rendezvous_func);
+			break;
+		}
+		RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
+		mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
+		    "vmrndv", 0);
+	}
+	mtx_unlock(&vm->rendezvous_mtx);
+}
+
+/*
+ * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
+ */
+static int
+vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
+{
+	struct vcpu *vcpu;
+	const char *wmesg;
+	int t, vcpu_halted, vm_halted;
+
+	KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
+
+	vcpu = &vm->vcpu[vcpuid];
+	vcpu_halted = 0;
+	vm_halted = 0;
+
+	vcpu_lock(vcpu);
+	while (1) {
+		/*
+		 * Do a final check for pending NMI or interrupts before
+		 * really putting this thread to sleep. Also check for
+		 * software events that would cause this vcpu to wakeup.
+		 *
+		 * These interrupts/events could have happened after the
+		 * vcpu returned from VMRUN() and before it acquired the
+		 * vcpu lock above.
+		 */
+		if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
+			break;
+		if (vm_nmi_pending(vm, vcpuid))
+			break;
+		if (!intr_disabled) {
+			if (vm_extint_pending(vm, vcpuid) ||
+			    vlapic_pending_intr(vcpu->vlapic, NULL)) {
+				break;
+			}
+		}
+
+		/* Don't go to sleep if the vcpu thread needs to yield */
+		if (vcpu_should_yield(vm, vcpuid))
+			break;
+
+		/*
+		 * Some Linux guests implement "halt" by having all vcpus
+		 * execute HLT with interrupts disabled. 'halted_cpus' keeps
+		 * track of the vcpus that have entered this state. When all
+		 * vcpus enter the halted state the virtual machine is halted.
+		 */
+		if (intr_disabled) {
+			wmesg = "vmhalt";
+			VCPU_CTR0(vm, vcpuid, "Halted");
+			if (!vcpu_halted && halt_detection_enabled) {
+				vcpu_halted = 1;
+				CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
+			}
+			if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
+				vm_halted = 1;
+				break;
+			}
+		} else {
+			wmesg = "vmidle";
+		}
+
+		t = ticks;
+		vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
+		/*
+		 * XXX msleep_spin() cannot be interrupted by signals so
+		 * wake up periodically to check pending signals.
+		 */
+		msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
+		vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
+		vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
+	}
+
+	if (vcpu_halted)
+		CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
+
+	vcpu_unlock(vcpu);
+
+	if (vm_halted)
+		vm_suspend(vm, VM_SUSPEND_HALT);
+
+	return (0);
+}
+
+static int
+vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
+{
+	int rv, ftype;
+	struct vm_map *map;
+	struct vcpu *vcpu;
+	struct vm_exit *vme;
+
+	vcpu = &vm->vcpu[vcpuid];
+	vme = &vcpu->exitinfo;
+
+	KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
+	    __func__, vme->inst_length));
+
+	ftype = vme->u.paging.fault_type;
+	KASSERT(ftype == VM_PROT_READ ||
+	    ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
+	    ("vm_handle_paging: invalid fault_type %d", ftype));
+
+	if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
+		rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
+		    vme->u.paging.gpa, ftype);
+		if (rv == 0) {
+			VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
+			    ftype == VM_PROT_READ ? "accessed" : "dirty",
+			    vme->u.paging.gpa);
+			goto done;
+		}
+	}
+
+	map = &vm->vmspace->vm_map;
+	rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
+
+	VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
+	    "ftype = %d", rv, vme->u.paging.gpa, ftype);
+
+	if (rv != KERN_SUCCESS)
+		return (EFAULT);
+done:
+	return (0);
+}
+
+static int
+vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
+{
+	struct vie *vie;
+	struct vcpu *vcpu;
+	struct vm_exit *vme;
+	uint64_t gla, gpa, cs_base;
+	struct vm_guest_paging *paging;
+	mem_region_read_t mread;
+	mem_region_write_t mwrite;
+	enum vm_cpu_mode cpu_mode;
+	int cs_d, error, fault;
+
+	vcpu = &vm->vcpu[vcpuid];
+	vme = &vcpu->exitinfo;
+
+	KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
+	    __func__, vme->inst_length));
+
+	gla = vme->u.inst_emul.gla;
+	gpa = vme->u.inst_emul.gpa;
+	cs_base = vme->u.inst_emul.cs_base;
+	cs_d = vme->u.inst_emul.cs_d;
+	vie = &vme->u.inst_emul.vie;
+	paging = &vme->u.inst_emul.paging;
+	cpu_mode = paging->cpu_mode;
+
+	VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
+
+	/* Fetch, decode and emulate the faulting instruction */
+	if (vie->num_valid == 0) {
+		error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
+		    cs_base, VIE_INST_SIZE, vie, &fault);
+	} else {
+		/*
+		 * The instruction bytes have already been copied into 'vie'
+		 */
+		error = fault = 0;
+	}
+	if (error || fault)
+		return (error);
+
+	if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
+		VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
+		    vme->rip + cs_base);
+		*retu = true;	    /* dump instruction bytes in userspace */
+		return (0);
+	}
+
+	/*
+	 * Update 'nextrip' based on the length of the emulated instruction.
+	 */
+	vme->inst_length = vie->num_processed;
+	vcpu->nextrip += vie->num_processed;
+	VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
+	    "decoding", vcpu->nextrip);
+ 
+	/* return to userland unless this is an in-kernel emulated device */
+	if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
+		mread = lapic_mmio_read;
+		mwrite = lapic_mmio_write;
+	} else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
+		mread = vioapic_mmio_read;
+		mwrite = vioapic_mmio_write;
+	} else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
+		mread = vhpet_mmio_read;
+		mwrite = vhpet_mmio_write;
+	} else {
+		*retu = true;
+		return (0);
+	}
+
+	error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
+	    mread, mwrite, retu);
+
+	return (error);
+}
+
+static int
+vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
+{
+	int i, done;
+	struct vcpu *vcpu;
+
+	done = 0;
+	vcpu = &vm->vcpu[vcpuid];
+
+	CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
+
+	/*
+	 * Wait until all 'active_cpus' have suspended themselves.
+	 *
+	 * Since a VM may be suspended at any time including when one or
+	 * more vcpus are doing a rendezvous we need to call the rendezvous
+	 * handler while we are waiting to prevent a deadlock.
+	 */
+	vcpu_lock(vcpu);
+	while (1) {
+		if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
+			VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
+			break;
+		}
+
+		if (vm->rendezvous_func == NULL) {
+			VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
+			vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
+			msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
+			vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
+		} else {
+			VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
+			vcpu_unlock(vcpu);
+			vm_handle_rendezvous(vm, vcpuid);
+			vcpu_lock(vcpu);
+		}
+	}
+	vcpu_unlock(vcpu);
+
+	/*
+	 * Wakeup the other sleeping vcpus and return to userspace.
+	 */
+	for (i = 0; i < VM_MAXCPU; i++) {
+		if (CPU_ISSET(i, &vm->suspended_cpus)) {
+			vcpu_notify_event(vm, i, false);
+		}
+	}
+
+	*retu = true;
+	return (0);
+}
+
+static int
+vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
+{
+	struct vcpu *vcpu = &vm->vcpu[vcpuid];
+
+	vcpu_lock(vcpu);
+	KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
+	vcpu->reqidle = 0;
+	vcpu_unlock(vcpu);
+	*retu = true;
+	return (0);
+}
+
+int
+vm_suspend(struct vm *vm, enum vm_suspend_how how)
+{
+	int i;
+
+	if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
+		return (EINVAL);
+
+	if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
+		VM_CTR2(vm, "virtual machine already suspended %d/%d",
+		    vm->suspend, how);
+		return (EALREADY);
+	}
+
+	VM_CTR1(vm, "virtual machine successfully suspended %d", how);
+
+	/*
+	 * Notify all active vcpus that they are now suspended.
+	 */
+	for (i = 0; i < VM_MAXCPU; i++) {
+		if (CPU_ISSET(i, &vm->active_cpus))
+			vcpu_notify_event(vm, i, false);
+	}
+
+	return (0);
+}
+
+void
+vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
+{
+	struct vm_exit *vmexit;
+
+	KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
+	    ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
+
+	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit->rip = rip;
+	vmexit->inst_length = 0;
+	vmexit->exitcode = VM_EXITCODE_SUSPENDED;
+	vmexit->u.suspended.how = vm->suspend;
+}
+
+void
+vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
+{
+	struct vm_exit *vmexit;
+
+	KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
+
+	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit->rip = rip;
+	vmexit->inst_length = 0;
+	vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
+	vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
+}
+
+void
+vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip)
+{
+	struct vm_exit *vmexit;
+
+	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit->rip = rip;
+	vmexit->inst_length = 0;
+	vmexit->exitcode = VM_EXITCODE_REQIDLE;
+	vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
+}
+
+void
+vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
+{
+	struct vm_exit *vmexit;
+
+	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit->rip = rip;
+	vmexit->inst_length = 0;
+	vmexit->exitcode = VM_EXITCODE_BOGUS;
+	vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
+}
+
+int
+vm_run(struct vm *vm, struct vm_run *vmrun)
+{
+	struct vm_eventinfo evinfo;
+	int error, vcpuid;
+	struct vcpu *vcpu;
+	struct pcb *pcb;
+	uint64_t tscval;
+	struct vm_exit *vme;
+	bool retu, intr_disabled;
+	pmap_t pmap;
+
+	vcpuid = vmrun->cpuid;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (!CPU_ISSET(vcpuid, &vm->active_cpus))
+		return (EINVAL);
+
+	if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
+		return (EINVAL);
+
+	pmap = vmspace_pmap(vm->vmspace);
+	vcpu = &vm->vcpu[vcpuid];
+	vme = &vcpu->exitinfo;
+	evinfo.rptr = &vm->rendezvous_func;
+	evinfo.sptr = &vm->suspend;
+	evinfo.iptr = &vcpu->reqidle;
+restart:
+	critical_enter();
+
+	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
+	    ("vm_run: absurd pm_active"));
+
+	tscval = rdtsc();
+
+	pcb = PCPU_GET(curpcb);
+	set_pcb_flags(pcb, PCB_FULL_IRET);
+
+	restore_guest_fpustate(vcpu);
+
+	vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
+	error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
+	vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
+
+	save_guest_fpustate(vcpu);
+
+	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
+
+	critical_exit();
+
+	if (error == 0) {
+		retu = false;
+		vcpu->nextrip = vme->rip + vme->inst_length;
+		switch (vme->exitcode) {
+		case VM_EXITCODE_REQIDLE:
+			error = vm_handle_reqidle(vm, vcpuid, &retu);
+			break;
+		case VM_EXITCODE_SUSPENDED:
+			error = vm_handle_suspend(vm, vcpuid, &retu);
+			break;
+		case VM_EXITCODE_IOAPIC_EOI:
+			vioapic_process_eoi(vm, vcpuid,
+			    vme->u.ioapic_eoi.vector);
+			break;
+		case VM_EXITCODE_RENDEZVOUS:
+			vm_handle_rendezvous(vm, vcpuid);
+			error = 0;
+			break;
+		case VM_EXITCODE_HLT:
+			intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
+			error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
+			break;
+		case VM_EXITCODE_PAGING:
+			error = vm_handle_paging(vm, vcpuid, &retu);
+			break;
+		case VM_EXITCODE_INST_EMUL:
+			error = vm_handle_inst_emul(vm, vcpuid, &retu);
+			break;
+		case VM_EXITCODE_INOUT:
+		case VM_EXITCODE_INOUT_STR:
+			error = vm_handle_inout(vm, vcpuid, vme, &retu);
+			break;
+		case VM_EXITCODE_MONITOR:
+		case VM_EXITCODE_MWAIT:
+			vm_inject_ud(vm, vcpuid);
+			break;
+		default:
+			retu = true;	/* handled in userland */
+			break;
+		}
+	}
+
+	if (error == 0 && retu == false)
+		goto restart;
+
+	VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
+
+	/* copy the exit information */
+	bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
+	return (error);
+}
+
+int
+vm_restart_instruction(void *arg, int vcpuid)
+{
+	struct vm *vm;
+	struct vcpu *vcpu;
+	enum vcpu_state state;
+	uint64_t rip;
+	int error;
+
+	vm = arg;
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	vcpu = &vm->vcpu[vcpuid];
+	state = vcpu_get_state(vm, vcpuid, NULL);
+	if (state == VCPU_RUNNING) {
+		/*
+		 * When a vcpu is "running" the next instruction is determined
+		 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
+		 * Thus setting 'inst_length' to zero will cause the current
+		 * instruction to be restarted.
+		 */
+		vcpu->exitinfo.inst_length = 0;
+		VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
+		    "setting inst_length to zero", vcpu->exitinfo.rip);
+	} else if (state == VCPU_FROZEN) {
+		/*
+		 * When a vcpu is "frozen" it is outside the critical section
+		 * around VMRUN() and 'nextrip' points to the next instruction.
+		 * Thus instruction restart is achieved by setting 'nextrip'
+		 * to the vcpu's %rip.
+		 */
+		error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
+		KASSERT(!error, ("%s: error %d getting rip", __func__, error));
+		VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
+		    "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
+		vcpu->nextrip = rip;
+	} else {
+		panic("%s: invalid state %d", __func__, state);
+	}
+	return (0);
+}
+
+int
+vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
+{
+	struct vcpu *vcpu;
+	int type, vector;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	if (info & VM_INTINFO_VALID) {
+		type = info & VM_INTINFO_TYPE;
+		vector = info & 0xff;
+		if (type == VM_INTINFO_NMI && vector != IDT_NMI)
+			return (EINVAL);
+		if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
+			return (EINVAL);
+		if (info & VM_INTINFO_RSVD)
+			return (EINVAL);
+	} else {
+		info = 0;
+	}
+	VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
+	vcpu->exitintinfo = info;
+	return (0);
+}
+
+enum exc_class {
+	EXC_BENIGN,
+	EXC_CONTRIBUTORY,
+	EXC_PAGEFAULT
+};
+
+#define	IDT_VE	20	/* Virtualization Exception (Intel specific) */
+
+static enum exc_class
+exception_class(uint64_t info)
+{
+	int type, vector;
+
+	KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
+	type = info & VM_INTINFO_TYPE;
+	vector = info & 0xff;
+
+	/* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
+	switch (type) {
+	case VM_INTINFO_HWINTR:
+	case VM_INTINFO_SWINTR:
+	case VM_INTINFO_NMI:
+		return (EXC_BENIGN);
+	default:
+		/*
+		 * Hardware exception.
+		 *
+		 * SVM and VT-x use identical type values to represent NMI,
+		 * hardware interrupt and software interrupt.
+		 *
+		 * SVM uses type '3' for all exceptions. VT-x uses type '3'
+		 * for exceptions except #BP and #OF. #BP and #OF use a type
+		 * value of '5' or '6'. Therefore we don't check for explicit
+		 * values of 'type' to classify 'intinfo' into a hardware
+		 * exception.
+		 */
+		break;
+	}
+
+	switch (vector) {
+	case IDT_PF:
+	case IDT_VE:
+		return (EXC_PAGEFAULT);
+	case IDT_DE:
+	case IDT_TS:
+	case IDT_NP:
+	case IDT_SS:
+	case IDT_GP:
+		return (EXC_CONTRIBUTORY);
+	default:
+		return (EXC_BENIGN);
+	}
+}
+
+static int
+nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
+    uint64_t *retinfo)
+{
+	enum exc_class exc1, exc2;
+	int type1, vector1;
+
+	KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
+	KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
+
+	/*
+	 * If an exception occurs while attempting to call the double-fault
+	 * handler the processor enters shutdown mode (aka triple fault).
+	 */
+	type1 = info1 & VM_INTINFO_TYPE;
+	vector1 = info1 & 0xff;
+	if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
+		VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
+		    info1, info2);
+		vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
+		*retinfo = 0;
+		return (0);
+	}
+
+	/*
+	 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
+	 */
+	exc1 = exception_class(info1);
+	exc2 = exception_class(info2);
+	if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
+	    (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
+		/* Convert nested fault into a double fault. */
+		*retinfo = IDT_DF;
+		*retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
+		*retinfo |= VM_INTINFO_DEL_ERRCODE;
+	} else {
+		/* Handle exceptions serially */
+		*retinfo = info2;
+	}
+	return (1);
+}
+
+static uint64_t
+vcpu_exception_intinfo(struct vcpu *vcpu)
+{
+	uint64_t info = 0;
+
+	if (vcpu->exception_pending) {
+		info = vcpu->exc_vector & 0xff;
+		info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
+		if (vcpu->exc_errcode_valid) {
+			info |= VM_INTINFO_DEL_ERRCODE;
+			info |= (uint64_t)vcpu->exc_errcode << 32;
+		}
+	}
+	return (info);
+}
+
+int
+vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
+{
+	struct vcpu *vcpu;
+	uint64_t info1, info2;
+	int valid;
+
+	KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	info1 = vcpu->exitintinfo;
+	vcpu->exitintinfo = 0;
+
+	info2 = 0;
+	if (vcpu->exception_pending) {
+		info2 = vcpu_exception_intinfo(vcpu);
+		vcpu->exception_pending = 0;
+		VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
+		    vcpu->exc_vector, info2);
+	}
+
+	if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
+		valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
+	} else if (info1 & VM_INTINFO_VALID) {
+		*retinfo = info1;
+		valid = 1;
+	} else if (info2 & VM_INTINFO_VALID) {
+		*retinfo = info2;
+		valid = 1;
+	} else {
+		valid = 0;
+	}
+
+	if (valid) {
+		VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
+		    "retinfo(%#lx)", __func__, info1, info2, *retinfo);
+	}
+
+	return (valid);
+}
+
+int
+vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
+{
+	struct vcpu *vcpu;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	vcpu = &vm->vcpu[vcpuid];
+	*info1 = vcpu->exitintinfo;
+	*info2 = vcpu_exception_intinfo(vcpu);
+	return (0);
+}
+
+int
+vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
+    uint32_t errcode, int restart_instruction)
+{
+	struct vcpu *vcpu;
+	uint64_t regval;
+	int error;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (vector < 0 || vector >= 32)
+		return (EINVAL);
+
+	/*
+	 * A double fault exception should never be injected directly into
+	 * the guest. It is a derived exception that results from specific
+	 * combinations of nested faults.
+	 */
+	if (vector == IDT_DF)
+		return (EINVAL);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	if (vcpu->exception_pending) {
+		VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
+		    "pending exception %d", vector, vcpu->exc_vector);
+		return (EBUSY);
+	}
+
+	if (errcode_valid) {
+		/*
+		 * Exceptions don't deliver an error code in real mode.
+		 */
+		error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, &regval);
+		KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
+		if (!(regval & CR0_PE))
+			errcode_valid = 0;
+	}
+
+	/*
+	 * From section 26.6.1 "Interruptibility State" in Intel SDM:
+	 *
+	 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
+	 * one instruction or incurs an exception.
+	 */
+	error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
+	KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
+	    __func__, error));
+
+	if (restart_instruction)
+		vm_restart_instruction(vm, vcpuid);
+
+	vcpu->exception_pending = 1;
+	vcpu->exc_vector = vector;
+	vcpu->exc_errcode = errcode;
+	vcpu->exc_errcode_valid = errcode_valid;
+	VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
+	return (0);
+}
+
+void
+vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
+    int errcode)
+{
+	struct vm *vm;
+	int error, restart_instruction;
+
+	vm = vmarg;
+	restart_instruction = 1;
+
+	error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
+	    errcode, restart_instruction);
+	KASSERT(error == 0, ("vm_inject_exception error %d", error));
+}
+
+void
+vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
+{
+	struct vm *vm;
+	int error;
+
+	vm = vmarg;
+	VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
+	    error_code, cr2);
+
+	error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
+	KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
+
+	vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
+}
+
+static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
+
+int
+vm_inject_nmi(struct vm *vm, int vcpuid)
+{
+	struct vcpu *vcpu;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	vcpu->nmi_pending = 1;
+	vcpu_notify_event(vm, vcpuid, false);
+	return (0);
+}
+
+int
+vm_nmi_pending(struct vm *vm, int vcpuid)
+{
+	struct vcpu *vcpu;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	return (vcpu->nmi_pending);
+}
+
+void
+vm_nmi_clear(struct vm *vm, int vcpuid)
+{
+	struct vcpu *vcpu;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	if (vcpu->nmi_pending == 0)
+		panic("vm_nmi_clear: inconsistent nmi_pending state");
+
+	vcpu->nmi_pending = 0;
+	vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
+}
+
+static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
+
+int
+vm_inject_extint(struct vm *vm, int vcpuid)
+{
+	struct vcpu *vcpu;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	vcpu->extint_pending = 1;
+	vcpu_notify_event(vm, vcpuid, false);
+	return (0);
+}
+
+int
+vm_extint_pending(struct vm *vm, int vcpuid)
+{
+	struct vcpu *vcpu;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	return (vcpu->extint_pending);
+}
+
+void
+vm_extint_clear(struct vm *vm, int vcpuid)
+{
+	struct vcpu *vcpu;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	if (vcpu->extint_pending == 0)
+		panic("vm_extint_clear: inconsistent extint_pending state");
+
+	vcpu->extint_pending = 0;
+	vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
+}
+
+int
+vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
+{
+	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (type < 0 || type >= VM_CAP_MAX)
+		return (EINVAL);
+
+	return (VMGETCAP(vm->cookie, vcpu, type, retval));
+}
+
+int
+vm_set_capability(struct vm *vm, int vcpu, int type, int val)
+{
+	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (type < 0 || type >= VM_CAP_MAX)
+		return (EINVAL);
+
+	return (VMSETCAP(vm->cookie, vcpu, type, val));
+}
+
+struct vlapic *
+vm_lapic(struct vm *vm, int cpu)
+{
+	return (vm->vcpu[cpu].vlapic);
+}
+
+struct vioapic *
+vm_ioapic(struct vm *vm)
+{
+
+	return (vm->vioapic);
+}
+
+struct vhpet *
+vm_hpet(struct vm *vm)
+{
+
+	return (vm->vhpet);
+}
+
+boolean_t
+vmm_is_pptdev(int bus, int slot, int func)
+{
+	int found, i, n;
+	int b, s, f;
+	char *val, *cp, *cp2;
+
+	/*
+	 * XXX
+	 * The length of an environment variable is limited to 128 bytes which
+	 * puts an upper limit on the number of passthru devices that may be
+	 * specified using a single environment variable.
+	 *
+	 * Work around this by scanning multiple environment variable
+	 * names instead of a single one - yuck!
+	 */
+	const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
+
+	/* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
+	found = 0;
+	for (i = 0; names[i] != NULL && !found; i++) {
+		cp = val = getenv(names[i]);
+		while (cp != NULL && *cp != '\0') {
+			if ((cp2 = strchr(cp, ' ')) != NULL)
+				*cp2 = '\0';
+
+			n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
+			if (n == 3 && bus == b && slot == s && func == f) {
+				found = 1;
+				break;
+			}
+		
+			if (cp2 != NULL)
+				*cp2++ = ' ';
+
+			cp = cp2;
+		}
+		freeenv(val);
+	}
+	return (found);
+}
+
+void *
+vm_iommu_domain(struct vm *vm)
+{
+
+	return (vm->iommu);
+}
+
+int
+vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
+    bool from_idle)
+{
+	int error;
+	struct vcpu *vcpu;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	vcpu_lock(vcpu);
+	error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
+	vcpu_unlock(vcpu);
+
+	return (error);
+}
+
+enum vcpu_state
+vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
+{
+	struct vcpu *vcpu;
+	enum vcpu_state state;
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
+
+	vcpu = &vm->vcpu[vcpuid];
+
+	vcpu_lock(vcpu);
+	state = vcpu->state;
+	if (hostcpu != NULL)
+		*hostcpu = vcpu->hostcpu;
+	vcpu_unlock(vcpu);
+
+	return (state);
+}
+
+int
+vm_activate_cpu(struct vm *vm, int vcpuid)
+{
+
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (CPU_ISSET(vcpuid, &vm->active_cpus))
+		return (EBUSY);
+
+	VCPU_CTR0(vm, vcpuid, "activated");
+	CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
+	return (0);
+}
+
+cpuset_t
+vm_active_cpus(struct vm *vm)
+{
+
+	return (vm->active_cpus);
+}
+
+cpuset_t
+vm_suspended_cpus(struct vm *vm)
+{
+
+	return (vm->suspended_cpus);
+}
+
+void *
+vcpu_stats(struct vm *vm, int vcpuid)
+{
+
+	return (vm->vcpu[vcpuid].stats);
+}
+
+int
+vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
+{
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	*state = vm->vcpu[vcpuid].x2apic_state;
+
+	return (0);
+}
+
+int
+vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
+{
+	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (state >= X2APIC_STATE_LAST)
+		return (EINVAL);
+
+	vm->vcpu[vcpuid].x2apic_state = state;
+
+	vlapic_set_x2apic_state(vm, vcpuid, state);
+
+	return (0);
+}
+
+/*
+ * This function is called to ensure that a vcpu "sees" a pending event
+ * as soon as possible:
+ * - If the vcpu thread is sleeping then it is woken up.
+ * - If the vcpu is running on a different host_cpu then an IPI will be directed
+ *   to the host_cpu to cause the vcpu to trap into the hypervisor.
+ */
+static void
+vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
+{
+	int hostcpu;
+
+	hostcpu = vcpu->hostcpu;
+	if (vcpu->state == VCPU_RUNNING) {
+		KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
+		if (hostcpu != curcpu) {
+			if (lapic_intr) {
+				vlapic_post_intr(vcpu->vlapic, hostcpu,
+				    vmm_ipinum);
+			} else {
+				ipi_cpu(hostcpu, vmm_ipinum);
+			}
+		} else {
+			/*
+			 * If the 'vcpu' is running on 'curcpu' then it must
+			 * be sending a notification to itself (e.g. SELF_IPI).
+			 * The pending event will be picked up when the vcpu
+			 * transitions back to guest context.
+			 */
+		}
+	} else {
+		KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
+		    "with hostcpu %d", vcpu->state, hostcpu));
+		if (vcpu->state == VCPU_SLEEPING)
+			wakeup_one(vcpu);
+	}
+}
+
+void
+vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
+{
+	struct vcpu *vcpu = &vm->vcpu[vcpuid];
+
+	vcpu_lock(vcpu);
+	vcpu_notify_event_locked(vcpu, lapic_intr);
+	vcpu_unlock(vcpu);
+}
+
+struct vmspace *
+vm_get_vmspace(struct vm *vm)
+{
+
+	return (vm->vmspace);
+}
+
+int
+vm_apicid2vcpuid(struct vm *vm, int apicid)
+{
+	/*
+	 * XXX apic id is assumed to be numerically identical to vcpu id
+	 */
+	return (apicid);
+}
+
+void
+vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
+    vm_rendezvous_func_t func, void *arg)
+{
+	int i;
+
+	/*
+	 * Enforce that this function is called without any locks
+	 */
+	WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
+	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
+	    ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
+
+restart:
+	mtx_lock(&vm->rendezvous_mtx);
+	if (vm->rendezvous_func != NULL) {
+		/*
+		 * If a rendezvous is already in progress then we need to
+		 * call the rendezvous handler in case this 'vcpuid' is one
+		 * of the targets of the rendezvous.
+		 */
+		RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
+		mtx_unlock(&vm->rendezvous_mtx);
+		vm_handle_rendezvous(vm, vcpuid);
+		goto restart;
+	}
+	KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
+	    "rendezvous is still in progress"));
+
+	RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
+	vm->rendezvous_req_cpus = dest;
+	CPU_ZERO(&vm->rendezvous_done_cpus);
+	vm->rendezvous_arg = arg;
+	vm_set_rendezvous_func(vm, func);
+	mtx_unlock(&vm->rendezvous_mtx);
+
+	/*
+	 * Wake up any sleeping vcpus and trigger a VM-exit in any running
+	 * vcpus so they handle the rendezvous as soon as possible.
+	 */
+	for (i = 0; i < VM_MAXCPU; i++) {
+		if (CPU_ISSET(i, &dest))
+			vcpu_notify_event(vm, i, false);
+	}
+
+	vm_handle_rendezvous(vm, vcpuid);
+}
+
+struct vatpic *
+vm_atpic(struct vm *vm)
+{
+	return (vm->vatpic);
+}
+
+struct vatpit *
+vm_atpit(struct vm *vm)
+{
+	return (vm->vatpit);
+}
+
+struct vpmtmr *
+vm_pmtmr(struct vm *vm)
+{
+
+	return (vm->vpmtmr);
+}
+
+struct vrtc *
+vm_rtc(struct vm *vm)
+{
+
+	return (vm->vrtc);
+}
+
+enum vm_reg_name
+vm_segment_name(int seg)
+{
+	static enum vm_reg_name seg_names[] = {
+		VM_REG_GUEST_ES,
+		VM_REG_GUEST_CS,
+		VM_REG_GUEST_SS,
+		VM_REG_GUEST_DS,
+		VM_REG_GUEST_FS,
+		VM_REG_GUEST_GS
+	};
+
+	KASSERT(seg >= 0 && seg < nitems(seg_names),
+	    ("%s: invalid segment encoding %d", __func__, seg));
+	return (seg_names[seg]);
+}
+
+void
+vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
+    int num_copyinfo)
+{
+	int idx;
+
+	for (idx = 0; idx < num_copyinfo; idx++) {
+		if (copyinfo[idx].cookie != NULL)
+			vm_gpa_release(copyinfo[idx].cookie);
+	}
+	bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
+}
+
+int
+vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+    uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
+    int num_copyinfo, int *fault)
+{
+	int error, idx, nused;
+	size_t n, off, remaining;
+	void *hva, *cookie;
+	uint64_t gpa;
+
+	bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
+
+	nused = 0;
+	remaining = len;
+	while (remaining > 0) {
+		KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
+		error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
+		if (error || *fault)
+			return (error);
+		off = gpa & PAGE_MASK;
+		n = min(remaining, PAGE_SIZE - off);
+		copyinfo[nused].gpa = gpa;
+		copyinfo[nused].len = n;
+		remaining -= n;
+		gla += n;
+		nused++;
+	}
+
+	for (idx = 0; idx < nused; idx++) {
+		hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,
+		    copyinfo[idx].len, prot, &cookie);
+		if (hva == NULL)
+			break;
+		copyinfo[idx].hva = hva;
+		copyinfo[idx].cookie = cookie;
+	}
+
+	if (idx != nused) {
+		vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
+		return (EFAULT);
+	} else {
+		*fault = 0;
+		return (0);
+	}
+}
+
+void
+vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
+    size_t len)
+{
+	char *dst;
+	int idx;
+	
+	dst = kaddr;
+	idx = 0;
+	while (len > 0) {
+		bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
+		len -= copyinfo[idx].len;
+		dst += copyinfo[idx].len;
+		idx++;
+	}
+}
+
+void
+vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
+    struct vm_copyinfo *copyinfo, size_t len)
+{
+	const char *src;
+	int idx;
+
+	src = kaddr;
+	idx = 0;
+	while (len > 0) {
+		bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
+		len -= copyinfo[idx].len;
+		src += copyinfo[idx].len;
+		idx++;
+	}
+}
+
+/*
+ * Return the amount of in-use and wired memory for the VM. Since
+ * these are global stats, only return the values with for vCPU 0
+ */
+VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
+VMM_STAT_DECLARE(VMM_MEM_WIRED);
+
+static void
+vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
+{
+
+	if (vcpu == 0) {
+		vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
+	       	    PAGE_SIZE * vmspace_resident_count(vm->vmspace));
+	}	
+}
+
+static void
+vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
+{
+
+	if (vcpu == 0) {
+		vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
+	      	    PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
+	}	
+}
+
+VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
+VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);


Property changes on: trunk/sys/amd64/vmm/vmm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_dev.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_dev.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_dev.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,984 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_dev.c 312990 2017-01-30 14:34:04Z avg $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_dev.c 312990 2017-01-30 14:34:04Z avg $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/sysctl.h>
+#include <sys/libkern.h>
+#include <sys/ioccom.h>
+#include <sys/mman.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+
+#include <machine/vmparam.h>
+#include <machine/vmm.h>
+#include <machine/vmm_instruction_emul.h>
+#include <machine/vmm_dev.h>
+
+#include "vmm_lapic.h"
+#include "vmm_stat.h"
+#include "vmm_mem.h"
+#include "io/ppt.h"
+#include "io/vatpic.h"
+#include "io/vioapic.h"
+#include "io/vhpet.h"
+#include "io/vrtc.h"
+
+struct devmem_softc {
+	int	segid;
+	char	*name;
+	struct cdev *cdev;
+	struct vmmdev_softc *sc;
+	SLIST_ENTRY(devmem_softc) link;
+};
+
+struct vmmdev_softc {
+	struct vm	*vm;		/* vm instance cookie */
+	struct cdev	*cdev;
+	SLIST_ENTRY(vmmdev_softc) link;
+	SLIST_HEAD(, devmem_softc) devmem;
+	int		flags;
+};
+#define	VSC_LINKED		0x01
+
+static SLIST_HEAD(, vmmdev_softc) head;
+
+static struct mtx vmmdev_mtx;
+
+static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev");
+
+SYSCTL_DECL(_hw_vmm);
+
+static int devmem_create_cdev(const char *vmname, int id, char *devmem);
+static void devmem_destroy(void *arg);
+
+static int
+vcpu_lock_one(struct vmmdev_softc *sc, int vcpu)
+{
+	int error;
+
+	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true);
+	return (error);
+}
+
+static void
+vcpu_unlock_one(struct vmmdev_softc *sc, int vcpu)
+{
+	enum vcpu_state state;
+
+	state = vcpu_get_state(sc->vm, vcpu, NULL);
+	if (state != VCPU_FROZEN) {
+		panic("vcpu %s(%d) has invalid state %d", vm_name(sc->vm),
+		    vcpu, state);
+	}
+
+	vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
+}
+
+static int
+vcpu_lock_all(struct vmmdev_softc *sc)
+{
+	int error, vcpu;
+
+	for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) {
+		error = vcpu_lock_one(sc, vcpu);
+		if (error)
+			break;
+	}
+
+	if (error) {
+		while (--vcpu >= 0)
+			vcpu_unlock_one(sc, vcpu);
+	}
+
+	return (error);
+}
+
+static void
+vcpu_unlock_all(struct vmmdev_softc *sc)
+{
+	int vcpu;
+
+	for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++)
+		vcpu_unlock_one(sc, vcpu);
+}
+
+static struct vmmdev_softc *
+vmmdev_lookup(const char *name)
+{
+	struct vmmdev_softc *sc;
+
+#ifdef notyet	/* XXX kernel is not compiled with invariants */
+	mtx_assert(&vmmdev_mtx, MA_OWNED);
+#endif
+
+	SLIST_FOREACH(sc, &head, link) {
+		if (strcmp(name, vm_name(sc->vm)) == 0)
+			break;
+	}
+
+	return (sc);
+}
+
+static struct vmmdev_softc *
+vmmdev_lookup2(struct cdev *cdev)
+{
+
+	return (cdev->si_drv1);
+}
+
+static int
+vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags)
+{
+	int error, off, c, prot;
+	vm_paddr_t gpa;
+	void *hpa, *cookie;
+	struct vmmdev_softc *sc;
+
+	sc = vmmdev_lookup2(cdev);
+	if (sc == NULL)
+		return (ENXIO);
+
+	/*
+	 * Get a read lock on the guest memory map by freezing any vcpu.
+	 */
+	error = vcpu_lock_one(sc, VM_MAXCPU - 1);
+	if (error)
+		return (error);
+
+	prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
+	while (uio->uio_resid > 0 && error == 0) {
+		gpa = uio->uio_offset;
+		off = gpa & PAGE_MASK;
+		c = min(uio->uio_resid, PAGE_SIZE - off);
+
+		/*
+		 * The VM has a hole in its physical memory map. If we want to
+		 * use 'dd' to inspect memory beyond the hole we need to
+		 * provide bogus data for memory that lies in the hole.
+		 *
+		 * Since this device does not support lseek(2), dd(1) will
+		 * read(2) blocks of data to simulate the lseek(2).
+		 */
+		hpa = vm_gpa_hold(sc->vm, VM_MAXCPU - 1, gpa, c, prot, &cookie);
+		if (hpa == NULL) {
+			if (uio->uio_rw == UIO_READ)
+				error = uiomove(__DECONST(void *, zero_region),
+				    c, uio);
+			else
+				error = EFAULT;
+		} else {
+			error = uiomove(hpa, c, uio);
+			vm_gpa_release(cookie);
+		}
+	}
+	vcpu_unlock_one(sc, VM_MAXCPU - 1);
+	return (error);
+}
+
+CTASSERT(sizeof(((struct vm_memseg *)0)->name) >= SPECNAMELEN + 1);
+
+static int
+get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg)
+{
+	struct devmem_softc *dsc;
+	int error;
+	bool sysmem;
+
+	error = vm_get_memseg(sc->vm, mseg->segid, &mseg->len, &sysmem, NULL);
+	if (error || mseg->len == 0)
+		return (error);
+
+	if (!sysmem) {
+		SLIST_FOREACH(dsc, &sc->devmem, link) {
+			if (dsc->segid == mseg->segid)
+				break;
+		}
+		KASSERT(dsc != NULL, ("%s: devmem segment %d not found",
+		    __func__, mseg->segid));
+		error = copystr(dsc->name, mseg->name, SPECNAMELEN + 1, NULL);
+	} else {
+		bzero(mseg->name, sizeof(mseg->name));
+	}
+
+	return (error);
+}
+
+static int
+alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg)
+{
+	char *name;
+	int error;
+	bool sysmem;
+
+	error = 0;
+	name = NULL;
+	sysmem = true;
+
+	if (VM_MEMSEG_NAME(mseg)) {
+		sysmem = false;
+		name = malloc(SPECNAMELEN + 1, M_VMMDEV, M_WAITOK);
+		error = copystr(mseg->name, name, SPECNAMELEN + 1, 0);
+		if (error)
+			goto done;
+	}
+
+	error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem);
+	if (error)
+		goto done;
+
+	if (VM_MEMSEG_NAME(mseg)) {
+		error = devmem_create_cdev(vm_name(sc->vm), mseg->segid, name);
+		if (error)
+			vm_free_memseg(sc->vm, mseg->segid);
+		else
+			name = NULL;	/* freed when 'cdev' is destroyed */
+	}
+done:
+	free(name, M_VMMDEV);
+	return (error);
+}
+
+static int
+vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
+	     struct thread *td)
+{
+	int error, vcpu, state_changed, size;
+	cpuset_t *cpuset;
+	struct vmmdev_softc *sc;
+	struct vm_register *vmreg;
+	struct vm_seg_desc *vmsegdesc;
+	struct vm_run *vmrun;
+	struct vm_exception *vmexc;
+	struct vm_lapic_irq *vmirq;
+	struct vm_lapic_msi *vmmsi;
+	struct vm_ioapic_irq *ioapic_irq;
+	struct vm_isa_irq *isa_irq;
+	struct vm_isa_irq_trigger *isa_irq_trigger;
+	struct vm_capability *vmcap;
+	struct vm_pptdev *pptdev;
+	struct vm_pptdev_mmio *pptmmio;
+	struct vm_pptdev_msi *pptmsi;
+	struct vm_pptdev_msix *pptmsix;
+	struct vm_nmi *vmnmi;
+	struct vm_stats *vmstats;
+	struct vm_stat_desc *statdesc;
+	struct vm_x2apic *x2apic;
+	struct vm_gpa_pte *gpapte;
+	struct vm_suspend *vmsuspend;
+	struct vm_gla2gpa *gg;
+	struct vm_activate_cpu *vac;
+	struct vm_cpuset *vm_cpuset;
+	struct vm_intinfo *vmii;
+	struct vm_rtc_time *rtctime;
+	struct vm_rtc_data *rtcdata;
+	struct vm_memmap *mm;
+
+	sc = vmmdev_lookup2(cdev);
+	if (sc == NULL)
+		return (ENXIO);
+
+	error = 0;
+	vcpu = -1;
+	state_changed = 0;
+
+	/*
+	 * Some VMM ioctls can operate only on vcpus that are not running.
+	 */
+	switch (cmd) {
+	case VM_RUN:
+	case VM_GET_REGISTER:
+	case VM_SET_REGISTER:
+	case VM_GET_SEGMENT_DESCRIPTOR:
+	case VM_SET_SEGMENT_DESCRIPTOR:
+	case VM_INJECT_EXCEPTION:
+	case VM_GET_CAPABILITY:
+	case VM_SET_CAPABILITY:
+	case VM_PPTDEV_MSI:
+	case VM_PPTDEV_MSIX:
+	case VM_SET_X2APIC_STATE:
+	case VM_GLA2GPA:
+	case VM_ACTIVATE_CPU:
+	case VM_SET_INTINFO:
+	case VM_GET_INTINFO:
+	case VM_RESTART_INSTRUCTION:
+		/*
+		 * XXX fragile, handle with care
+		 * Assumes that the first field of the ioctl data is the vcpu.
+		 */
+		vcpu = *(int *)data;
+		error = vcpu_lock_one(sc, vcpu);
+		if (error)
+			goto done;
+		state_changed = 1;
+		break;
+
+	case VM_MAP_PPTDEV_MMIO:
+	case VM_BIND_PPTDEV:
+	case VM_UNBIND_PPTDEV:
+	case VM_ALLOC_MEMSEG:
+	case VM_MMAP_MEMSEG:
+	case VM_REINIT:
+		/*
+		 * ioctls that operate on the entire virtual machine must
+		 * prevent all vcpus from running.
+		 */
+		error = vcpu_lock_all(sc);
+		if (error)
+			goto done;
+		state_changed = 2;
+		break;
+
+	case VM_GET_MEMSEG:
+	case VM_MMAP_GETNEXT:
+		/*
+		 * Lock a vcpu to make sure that the memory map cannot be
+		 * modified while it is being inspected.
+		 */
+		vcpu = VM_MAXCPU - 1;
+		error = vcpu_lock_one(sc, vcpu);
+		if (error)
+			goto done;
+		state_changed = 1;
+		break;
+
+	default:
+		break;
+	}
+
+	switch(cmd) {
+	case VM_RUN:
+		vmrun = (struct vm_run *)data;
+		error = vm_run(sc->vm, vmrun);
+		break;
+	case VM_SUSPEND:
+		vmsuspend = (struct vm_suspend *)data;
+		error = vm_suspend(sc->vm, vmsuspend->how);
+		break;
+	case VM_REINIT:
+		error = vm_reinit(sc->vm);
+		break;
+	case VM_STAT_DESC: {
+		statdesc = (struct vm_stat_desc *)data;
+		error = vmm_stat_desc_copy(statdesc->index,
+					statdesc->desc, sizeof(statdesc->desc));
+		break;
+	}
+	case VM_STATS: {
+		CTASSERT(MAX_VM_STATS >= MAX_VMM_STAT_ELEMS);
+		vmstats = (struct vm_stats *)data;
+		getmicrotime(&vmstats->tv);
+		error = vmm_stat_copy(sc->vm, vmstats->cpuid,
+				      &vmstats->num_entries, vmstats->statbuf);
+		break;
+	}
+	case VM_PPTDEV_MSI:
+		pptmsi = (struct vm_pptdev_msi *)data;
+		error = ppt_setup_msi(sc->vm, pptmsi->vcpu,
+				      pptmsi->bus, pptmsi->slot, pptmsi->func,
+				      pptmsi->addr, pptmsi->msg,
+				      pptmsi->numvec);
+		break;
+	case VM_PPTDEV_MSIX:
+		pptmsix = (struct vm_pptdev_msix *)data;
+		error = ppt_setup_msix(sc->vm, pptmsix->vcpu,
+				       pptmsix->bus, pptmsix->slot, 
+				       pptmsix->func, pptmsix->idx,
+				       pptmsix->addr, pptmsix->msg,
+				       pptmsix->vector_control);
+		break;
+	case VM_MAP_PPTDEV_MMIO:
+		pptmmio = (struct vm_pptdev_mmio *)data;
+		error = ppt_map_mmio(sc->vm, pptmmio->bus, pptmmio->slot,
+				     pptmmio->func, pptmmio->gpa, pptmmio->len,
+				     pptmmio->hpa);
+		break;
+	case VM_BIND_PPTDEV:
+		pptdev = (struct vm_pptdev *)data;
+		error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
+					 pptdev->func);
+		break;
+	case VM_UNBIND_PPTDEV:
+		pptdev = (struct vm_pptdev *)data;
+		error = vm_unassign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
+					   pptdev->func);
+		break;
+	case VM_INJECT_EXCEPTION:
+		vmexc = (struct vm_exception *)data;
+		error = vm_inject_exception(sc->vm, vmexc->cpuid,
+		    vmexc->vector, vmexc->error_code_valid, vmexc->error_code,
+		    vmexc->restart_instruction);
+		break;
+	case VM_INJECT_NMI:
+		vmnmi = (struct vm_nmi *)data;
+		error = vm_inject_nmi(sc->vm, vmnmi->cpuid);
+		break;
+	case VM_LAPIC_IRQ:
+		vmirq = (struct vm_lapic_irq *)data;
+		error = lapic_intr_edge(sc->vm, vmirq->cpuid, vmirq->vector);
+		break;
+	case VM_LAPIC_LOCAL_IRQ:
+		vmirq = (struct vm_lapic_irq *)data;
+		error = lapic_set_local_intr(sc->vm, vmirq->cpuid,
+		    vmirq->vector);
+		break;
+	case VM_LAPIC_MSI:
+		vmmsi = (struct vm_lapic_msi *)data;
+		error = lapic_intr_msi(sc->vm, vmmsi->addr, vmmsi->msg);
+		break;
+	case VM_IOAPIC_ASSERT_IRQ:
+		ioapic_irq = (struct vm_ioapic_irq *)data;
+		error = vioapic_assert_irq(sc->vm, ioapic_irq->irq);
+		break;
+	case VM_IOAPIC_DEASSERT_IRQ:
+		ioapic_irq = (struct vm_ioapic_irq *)data;
+		error = vioapic_deassert_irq(sc->vm, ioapic_irq->irq);
+		break;
+	case VM_IOAPIC_PULSE_IRQ:
+		ioapic_irq = (struct vm_ioapic_irq *)data;
+		error = vioapic_pulse_irq(sc->vm, ioapic_irq->irq);
+		break;
+	case VM_IOAPIC_PINCOUNT:
+		*(int *)data = vioapic_pincount(sc->vm);
+		break;
+	case VM_ISA_ASSERT_IRQ:
+		isa_irq = (struct vm_isa_irq *)data;
+		error = vatpic_assert_irq(sc->vm, isa_irq->atpic_irq);
+		if (error == 0 && isa_irq->ioapic_irq != -1)
+			error = vioapic_assert_irq(sc->vm,
+			    isa_irq->ioapic_irq);
+		break;
+	case VM_ISA_DEASSERT_IRQ:
+		isa_irq = (struct vm_isa_irq *)data;
+		error = vatpic_deassert_irq(sc->vm, isa_irq->atpic_irq);
+		if (error == 0 && isa_irq->ioapic_irq != -1)
+			error = vioapic_deassert_irq(sc->vm,
+			    isa_irq->ioapic_irq);
+		break;
+	case VM_ISA_PULSE_IRQ:
+		isa_irq = (struct vm_isa_irq *)data;
+		error = vatpic_pulse_irq(sc->vm, isa_irq->atpic_irq);
+		if (error == 0 && isa_irq->ioapic_irq != -1)
+			error = vioapic_pulse_irq(sc->vm, isa_irq->ioapic_irq);
+		break;
+	case VM_ISA_SET_IRQ_TRIGGER:
+		isa_irq_trigger = (struct vm_isa_irq_trigger *)data;
+		error = vatpic_set_irq_trigger(sc->vm,
+		    isa_irq_trigger->atpic_irq, isa_irq_trigger->trigger);
+		break;
+	case VM_MMAP_GETNEXT:
+		mm = (struct vm_memmap *)data;
+		error = vm_mmap_getnext(sc->vm, &mm->gpa, &mm->segid,
+		    &mm->segoff, &mm->len, &mm->prot, &mm->flags);
+		break;
+	case VM_MMAP_MEMSEG:
+		mm = (struct vm_memmap *)data;
+		error = vm_mmap_memseg(sc->vm, mm->gpa, mm->segid, mm->segoff,
+		    mm->len, mm->prot, mm->flags);
+		break;
+	case VM_ALLOC_MEMSEG:
+		error = alloc_memseg(sc, (struct vm_memseg *)data);
+		break;
+	case VM_GET_MEMSEG:
+		error = get_memseg(sc, (struct vm_memseg *)data);
+		break;
+	case VM_GET_REGISTER:
+		vmreg = (struct vm_register *)data;
+		error = vm_get_register(sc->vm, vmreg->cpuid, vmreg->regnum,
+					&vmreg->regval);
+		break;
+	case VM_SET_REGISTER:
+		vmreg = (struct vm_register *)data;
+		error = vm_set_register(sc->vm, vmreg->cpuid, vmreg->regnum,
+					vmreg->regval);
+		break;
+	case VM_SET_SEGMENT_DESCRIPTOR:
+		vmsegdesc = (struct vm_seg_desc *)data;
+		error = vm_set_seg_desc(sc->vm, vmsegdesc->cpuid,
+					vmsegdesc->regnum,
+					&vmsegdesc->desc);
+		break;
+	case VM_GET_SEGMENT_DESCRIPTOR:
+		vmsegdesc = (struct vm_seg_desc *)data;
+		error = vm_get_seg_desc(sc->vm, vmsegdesc->cpuid,
+					vmsegdesc->regnum,
+					&vmsegdesc->desc);
+		break;
+	case VM_GET_CAPABILITY:
+		vmcap = (struct vm_capability *)data;
+		error = vm_get_capability(sc->vm, vmcap->cpuid,
+					  vmcap->captype,
+					  &vmcap->capval);
+		break;
+	case VM_SET_CAPABILITY:
+		vmcap = (struct vm_capability *)data;
+		error = vm_set_capability(sc->vm, vmcap->cpuid,
+					  vmcap->captype,
+					  vmcap->capval);
+		break;
+	case VM_SET_X2APIC_STATE:
+		x2apic = (struct vm_x2apic *)data;
+		error = vm_set_x2apic_state(sc->vm,
+					    x2apic->cpuid, x2apic->state);
+		break;
+	case VM_GET_X2APIC_STATE:
+		x2apic = (struct vm_x2apic *)data;
+		error = vm_get_x2apic_state(sc->vm,
+					    x2apic->cpuid, &x2apic->state);
+		break;
+	case VM_GET_GPA_PMAP:
+		gpapte = (struct vm_gpa_pte *)data;
+		pmap_get_mapping(vmspace_pmap(vm_get_vmspace(sc->vm)),
+				 gpapte->gpa, gpapte->pte, &gpapte->ptenum);
+		error = 0;
+		break;
+	case VM_GET_HPET_CAPABILITIES:
+		error = vhpet_getcap((struct vm_hpet_cap *)data);
+		break;
+	case VM_GLA2GPA: {
+		CTASSERT(PROT_READ == VM_PROT_READ);
+		CTASSERT(PROT_WRITE == VM_PROT_WRITE);
+		CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
+		gg = (struct vm_gla2gpa *)data;
+		error = vm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
+		    gg->prot, &gg->gpa, &gg->fault);
+		KASSERT(error == 0 || error == EFAULT,
+		    ("%s: vm_gla2gpa unknown error %d", __func__, error));
+		break;
+	}
+	case VM_ACTIVATE_CPU:
+		vac = (struct vm_activate_cpu *)data;
+		error = vm_activate_cpu(sc->vm, vac->vcpuid);
+		break;
+	case VM_GET_CPUS:
+		error = 0;
+		vm_cpuset = (struct vm_cpuset *)data;
+		size = vm_cpuset->cpusetsize;
+		if (size < sizeof(cpuset_t) || size > CPU_MAXSIZE / NBBY) {
+			error = ERANGE;
+			break;
+		}
+		cpuset = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
+		if (vm_cpuset->which == VM_ACTIVE_CPUS)
+			*cpuset = vm_active_cpus(sc->vm);
+		else if (vm_cpuset->which == VM_SUSPENDED_CPUS)
+			*cpuset = vm_suspended_cpus(sc->vm);
+		else
+			error = EINVAL;
+		if (error == 0)
+			error = copyout(cpuset, vm_cpuset->cpus, size);
+		free(cpuset, M_TEMP);
+		break;
+	case VM_SET_INTINFO:
+		vmii = (struct vm_intinfo *)data;
+		error = vm_exit_intinfo(sc->vm, vmii->vcpuid, vmii->info1);
+		break;
+	case VM_GET_INTINFO:
+		vmii = (struct vm_intinfo *)data;
+		error = vm_get_intinfo(sc->vm, vmii->vcpuid, &vmii->info1,
+		    &vmii->info2);
+		break;
+	case VM_RTC_WRITE:
+		rtcdata = (struct vm_rtc_data *)data;
+		error = vrtc_nvram_write(sc->vm, rtcdata->offset,
+		    rtcdata->value);
+		break;
+	case VM_RTC_READ:
+		rtcdata = (struct vm_rtc_data *)data;
+		error = vrtc_nvram_read(sc->vm, rtcdata->offset,
+		    &rtcdata->value);
+		break;
+	case VM_RTC_SETTIME:
+		rtctime = (struct vm_rtc_time *)data;
+		error = vrtc_set_time(sc->vm, rtctime->secs);
+		break;
+	case VM_RTC_GETTIME:
+		error = 0;
+		rtctime = (struct vm_rtc_time *)data;
+		rtctime->secs = vrtc_get_time(sc->vm);
+		break;
+	case VM_RESTART_INSTRUCTION:
+		error = vm_restart_instruction(sc->vm, vcpu);
+		break;
+	default:
+		error = ENOTTY;
+		break;
+	}
+
+	if (state_changed == 1)
+		vcpu_unlock_one(sc, vcpu);
+	else if (state_changed == 2)
+		vcpu_unlock_all(sc);
+
+done:
+	/* Make sure that no handler returns a bogus value like ERESTART */
+	KASSERT(error >= 0, ("vmmdev_ioctl: invalid error return %d", error));
+	return (error);
+}
+
+static int
+vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t mapsize,
+    struct vm_object **objp, int nprot)
+{
+	struct vmmdev_softc *sc;
+	vm_paddr_t gpa;
+	size_t len;
+	vm_ooffset_t segoff, first, last;
+	int error, found, segid;
+	bool sysmem;
+
+	first = *offset;
+	last = first + mapsize;
+	if ((nprot & PROT_EXEC) || first < 0 || first >= last)
+		return (EINVAL);
+
+	sc = vmmdev_lookup2(cdev);
+	if (sc == NULL) {
+		/* virtual machine is in the process of being created */
+		return (EINVAL);
+	}
+
+	/*
+	 * Get a read lock on the guest memory map by freezing any vcpu.
+	 */
+	error = vcpu_lock_one(sc, VM_MAXCPU - 1);
+	if (error)
+		return (error);
+
+	gpa = 0;
+	found = 0;
+	while (!found) {
+		error = vm_mmap_getnext(sc->vm, &gpa, &segid, &segoff, &len,
+		    NULL, NULL);
+		if (error)
+			break;
+
+		if (first >= gpa && last <= gpa + len)
+			found = 1;
+		else
+			gpa += len;
+	}
+
+	if (found) {
+		error = vm_get_memseg(sc->vm, segid, &len, &sysmem, objp);
+		KASSERT(error == 0 && *objp != NULL,
+		    ("%s: invalid memory segment %d", __func__, segid));
+		if (sysmem) {
+			vm_object_reference(*objp);
+			*offset = segoff + (first - gpa);
+		} else {
+			error = EINVAL;
+		}
+	}
+	vcpu_unlock_one(sc, VM_MAXCPU - 1);
+	return (error);
+}
+
+static void
+vmmdev_destroy(void *arg)
+{
+	struct vmmdev_softc *sc = arg;
+	struct devmem_softc *dsc;
+	int error;
+
+	error = vcpu_lock_all(sc);
+	KASSERT(error == 0, ("%s: error %d freezing vcpus", __func__, error));
+
+	while ((dsc = SLIST_FIRST(&sc->devmem)) != NULL) {
+		KASSERT(dsc->cdev == NULL, ("%s: devmem not free", __func__));
+		SLIST_REMOVE_HEAD(&sc->devmem, link);
+		free(dsc->name, M_VMMDEV);
+		free(dsc, M_VMMDEV);
+	}
+
+	if (sc->cdev != NULL)
+		destroy_dev(sc->cdev);
+
+	if (sc->vm != NULL)
+		vm_destroy(sc->vm);
+
+	if ((sc->flags & VSC_LINKED) != 0) {
+		mtx_lock(&vmmdev_mtx);
+		SLIST_REMOVE(&head, sc, vmmdev_softc, link);
+		mtx_unlock(&vmmdev_mtx);
+	}
+
+	free(sc, M_VMMDEV);
+}
+
+static int
+sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS)
+{
+	int error;
+	char buf[VM_MAX_NAMELEN];
+	struct devmem_softc *dsc;
+	struct vmmdev_softc *sc;
+	struct cdev *cdev;
+
+	strlcpy(buf, "beavis", sizeof(buf));
+	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+	if (error != 0 || req->newptr == NULL)
+		return (error);
+
+	mtx_lock(&vmmdev_mtx);
+	sc = vmmdev_lookup(buf);
+	if (sc == NULL || sc->cdev == NULL) {
+		mtx_unlock(&vmmdev_mtx);
+		return (EINVAL);
+	}
+
+	/*
+	 * The 'cdev' will be destroyed asynchronously when 'si_threadcount'
+	 * goes down to 0 so we should not do it again in the callback.
+	 *
+	 * Setting 'sc->cdev' to NULL is also used to indicate that the VM
+	 * is scheduled for destruction.
+	 */
+	cdev = sc->cdev;
+	sc->cdev = NULL;		
+	mtx_unlock(&vmmdev_mtx);
+
+	/*
+	 * Schedule all cdevs to be destroyed:
+	 *
+	 * - any new operations on the 'cdev' will return an error (ENXIO).
+	 *
+	 * - when the 'si_threadcount' dwindles down to zero the 'cdev' will
+	 *   be destroyed and the callback will be invoked in a taskqueue
+	 *   context.
+	 *
+	 * - the 'devmem' cdevs are destroyed before the virtual machine 'cdev'
+	 */
+	SLIST_FOREACH(dsc, &sc->devmem, link) {
+		KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed"));
+		destroy_dev_sched_cb(dsc->cdev, devmem_destroy, dsc);
+	}
+	destroy_dev_sched_cb(cdev, vmmdev_destroy, sc);
+	return (0);
+}
+SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, CTLTYPE_STRING | CTLFLAG_RW,
+	    NULL, 0, sysctl_vmm_destroy, "A", NULL);
+
+static struct cdevsw vmmdevsw = {
+	.d_name		= "vmmdev",
+	.d_version	= D_VERSION,
+	.d_ioctl	= vmmdev_ioctl,
+	.d_mmap_single	= vmmdev_mmap_single,
+	.d_read		= vmmdev_rw,
+	.d_write	= vmmdev_rw,
+};
+
+static int
+sysctl_vmm_create(SYSCTL_HANDLER_ARGS)
+{
+	int error;
+	struct vm *vm;
+	struct cdev *cdev;
+	struct vmmdev_softc *sc, *sc2;
+	char buf[VM_MAX_NAMELEN];
+
+	strlcpy(buf, "beavis", sizeof(buf));
+	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+	if (error != 0 || req->newptr == NULL)
+		return (error);
+
+	mtx_lock(&vmmdev_mtx);
+	sc = vmmdev_lookup(buf);
+	mtx_unlock(&vmmdev_mtx);
+	if (sc != NULL)
+		return (EEXIST);
+
+	error = vm_create(buf, &vm);
+	if (error != 0)
+		return (error);
+
+	sc = malloc(sizeof(struct vmmdev_softc), M_VMMDEV, M_WAITOK | M_ZERO);
+	sc->vm = vm;
+	SLIST_INIT(&sc->devmem);
+
+	/*
+	 * Lookup the name again just in case somebody sneaked in when we
+	 * dropped the lock.
+	 */
+	mtx_lock(&vmmdev_mtx);
+	sc2 = vmmdev_lookup(buf);
+	if (sc2 == NULL) {
+		SLIST_INSERT_HEAD(&head, sc, link);
+		sc->flags |= VSC_LINKED;
+	}
+	mtx_unlock(&vmmdev_mtx);
+
+	if (sc2 != NULL) {
+		vmmdev_destroy(sc);
+		return (EEXIST);
+	}
+
+	error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &vmmdevsw, NULL,
+			   UID_ROOT, GID_WHEEL, 0600, "vmm/%s", buf);
+	if (error != 0) {
+		vmmdev_destroy(sc);
+		return (error);
+	}
+
+	mtx_lock(&vmmdev_mtx);
+	sc->cdev = cdev;
+	sc->cdev->si_drv1 = sc;
+	mtx_unlock(&vmmdev_mtx);
+
+	return (0);
+}
+SYSCTL_PROC(_hw_vmm, OID_AUTO, create, CTLTYPE_STRING | CTLFLAG_RW,
+	    NULL, 0, sysctl_vmm_create, "A", NULL);
+
+void
+vmmdev_init(void)
+{
+	mtx_init(&vmmdev_mtx, "vmm device mutex", NULL, MTX_DEF);
+}
+
+int
+vmmdev_cleanup(void)
+{
+	int error;
+
+	if (SLIST_EMPTY(&head))
+		error = 0;
+	else
+		error = EBUSY;
+
+	return (error);
+}
+
+static int
+devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t len,
+    struct vm_object **objp, int nprot)
+{
+	struct devmem_softc *dsc;
+	vm_ooffset_t first, last;
+	size_t seglen;
+	int error;
+	bool sysmem;
+
+	dsc = cdev->si_drv1;
+	if (dsc == NULL) {
+		/* 'cdev' has been created but is not ready for use */
+		return (ENXIO);
+	}
+
+	first = *offset;
+	last = *offset + len;
+	if ((nprot & PROT_EXEC) || first < 0 || first >= last)
+		return (EINVAL);
+
+	error = vcpu_lock_one(dsc->sc, VM_MAXCPU - 1);
+	if (error)
+		return (error);
+
+	error = vm_get_memseg(dsc->sc->vm, dsc->segid, &seglen, &sysmem, objp);
+	KASSERT(error == 0 && !sysmem && *objp != NULL,
+	    ("%s: invalid devmem segment %d", __func__, dsc->segid));
+
+	vcpu_unlock_one(dsc->sc, VM_MAXCPU - 1);
+
+	if (seglen >= last) {
+		vm_object_reference(*objp);
+		return (0);
+	} else {
+		return (EINVAL);
+	}
+}
+
+static struct cdevsw devmemsw = {
+	.d_name		= "devmem",
+	.d_version	= D_VERSION,
+	.d_mmap_single	= devmem_mmap_single,
+};
+
+static int
+devmem_create_cdev(const char *vmname, int segid, char *devname)
+{
+	struct devmem_softc *dsc;
+	struct vmmdev_softc *sc;
+	struct cdev *cdev;
+	int error;
+
+	error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &devmemsw, NULL,
+	    UID_ROOT, GID_WHEEL, 0600, "vmm.io/%s.%s", vmname, devname);
+	if (error)
+		return (error);
+
+	dsc = malloc(sizeof(struct devmem_softc), M_VMMDEV, M_WAITOK | M_ZERO);
+
+	mtx_lock(&vmmdev_mtx);
+	sc = vmmdev_lookup(vmname);
+	KASSERT(sc != NULL, ("%s: vm %s softc not found", __func__, vmname));
+	if (sc->cdev == NULL) {
+		/* virtual machine is being created or destroyed */
+		mtx_unlock(&vmmdev_mtx);
+		free(dsc, M_VMMDEV);
+		destroy_dev_sched_cb(cdev, NULL, 0);
+		return (ENODEV);
+	}
+
+	dsc->segid = segid;
+	dsc->name = devname;
+	dsc->cdev = cdev;
+	dsc->sc = sc;
+	SLIST_INSERT_HEAD(&sc->devmem, dsc, link);
+	mtx_unlock(&vmmdev_mtx);
+
+	/* The 'cdev' is ready for use after 'si_drv1' is initialized */
+	cdev->si_drv1 = dsc;
+	return (0);
+}
+
+static void
+devmem_destroy(void *arg)
+{
+	struct devmem_softc *dsc = arg;
+
+	KASSERT(dsc->cdev, ("%s: devmem cdev already destroyed", __func__));
+	dsc->cdev = NULL;
+	dsc->sc = NULL;
+}


Property changes on: trunk/sys/amd64/vmm/vmm_dev.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_host.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_host.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_host.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,162 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2012 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_host.c 267427 2014-06-12 19:58:12Z jhb $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_host.c 267427 2014-06-12 19:58:12Z jhb $");
+
+#include <sys/param.h>
+#include <sys/pcpu.h>
+
+#include <machine/cpufunc.h>
+#include <machine/segments.h>
+#include <machine/specialreg.h>
+
+#include "vmm_host.h"
+
+static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4,
+	vmm_host_xcr0;
+static struct xsave_limits vmm_xsave_limits;
+
+void
+vmm_host_state_init(void)
+{
+	int regs[4];
+
+	vmm_host_efer = rdmsr(MSR_EFER);
+	vmm_host_pat = rdmsr(MSR_PAT);
+
+	/*
+	 * We always want CR0.TS to be set when the processor does a VM exit.
+	 *
+	 * With emulation turned on unconditionally after a VM exit, we are
+	 * able to trap inadvertent use of the FPU until the guest FPU state
+	 * has been safely squirreled away.
+	 */
+	vmm_host_cr0 = rcr0() | CR0_TS;
+
+	vmm_host_cr4 = rcr4();
+
+	/*
+	 * Only permit a guest to use XSAVE if the host is using
+	 * XSAVE.  Only permit a guest to use XSAVE features supported
+	 * by the host.  This ensures that the FPU state used by the
+	 * guest is always a subset of the saved guest FPU state.
+	 *
+	 * In addition, only permit known XSAVE features where the
+	 * rules for which features depend on other features is known
+	 * to properly emulate xsetbv.
+	 */
+	if (vmm_host_cr4 & CR4_XSAVE) {
+		vmm_xsave_limits.xsave_enabled = 1;
+		vmm_host_xcr0 = rxcr(0);
+		vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0 &
+		    (XFEATURE_AVX | XFEATURE_MPX | XFEATURE_AVX512);
+
+		cpuid_count(0xd, 0x0, regs);
+		vmm_xsave_limits.xsave_max_size = regs[1];
+	}
+}
+
+uint64_t
+vmm_get_host_pat(void)
+{
+
+	return (vmm_host_pat);
+}
+
+uint64_t
+vmm_get_host_efer(void)
+{
+
+	return (vmm_host_efer);
+}
+
+uint64_t
+vmm_get_host_cr0(void)
+{
+
+	return (vmm_host_cr0);
+}
+
+uint64_t
+vmm_get_host_cr4(void)
+{
+
+	return (vmm_host_cr4);
+}
+
+uint64_t
+vmm_get_host_xcr0(void)
+{
+
+	return (vmm_host_xcr0);
+}
+
+uint64_t
+vmm_get_host_datasel(void)
+{
+
+	return (GSEL(GDATA_SEL, SEL_KPL));
+
+}
+
+uint64_t
+vmm_get_host_codesel(void)
+{
+
+	return (GSEL(GCODE_SEL, SEL_KPL));
+}
+
+uint64_t
+vmm_get_host_tsssel(void)
+{
+
+	return (GSEL(GPROC0_SEL, SEL_KPL));
+}
+
+uint64_t
+vmm_get_host_fsbase(void)
+{
+
+	return (0);
+}
+
+uint64_t
+vmm_get_host_idtrbase(void)
+{
+
+	return (r_idt.rd_base);
+}
+
+const struct xsave_limits *
+vmm_get_xsave_limits(void)
+{
+
+	return (&vmm_xsave_limits);
+}


Property changes on: trunk/sys/amd64/vmm/vmm_host.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_host.h
===================================================================
--- trunk/sys/amd64/vmm/vmm_host.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_host.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,84 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2012 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_host.h 267427 2014-06-12 19:58:12Z jhb $
+ */
+
+#ifndef	_VMM_HOST_H_
+#define	_VMM_HOST_H_
+
+#ifndef	_KERNEL
+#error "no user-servicable parts inside"
+#endif
+
+struct xsave_limits {
+	int		xsave_enabled;
+	uint64_t	xcr0_allowed;
+	uint32_t	xsave_max_size;
+};
+
+void vmm_host_state_init(void);
+
+uint64_t vmm_get_host_pat(void);
+uint64_t vmm_get_host_efer(void);
+uint64_t vmm_get_host_cr0(void);
+uint64_t vmm_get_host_cr4(void);
+uint64_t vmm_get_host_xcr0(void);
+uint64_t vmm_get_host_datasel(void);
+uint64_t vmm_get_host_codesel(void);
+uint64_t vmm_get_host_tsssel(void);
+uint64_t vmm_get_host_fsbase(void);
+uint64_t vmm_get_host_idtrbase(void);
+const struct xsave_limits *vmm_get_xsave_limits(void);
+
+/*
+ * Inline access to host state that is used on every VM entry
+ */
+static __inline uint64_t
+vmm_get_host_trbase(void)
+{
+
+	return ((uint64_t)PCPU_GET(tssp));
+}
+
+static __inline uint64_t
+vmm_get_host_gdtrbase(void)
+{
+
+	return ((uint64_t)&gdt[NGDT * curcpu]);
+}
+
+struct pcpu;
+extern struct pcpu __pcpu[];
+
+static __inline uint64_t
+vmm_get_host_gsbase(void)
+{
+
+	return ((uint64_t)&__pcpu[curcpu]);
+}
+
+#endif


Property changes on: trunk/sys/amd64/vmm/vmm_host.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_instruction_emul.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_instruction_emul.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_instruction_emul.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,2447 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2012 Sandvine, Inc.
+ * Copyright (c) 2012 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_instruction_emul.c 295124 2016-02-01 14:56:11Z grehan $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_instruction_emul.c 295124 2016-02-01 14:56:11Z grehan $");
+
+#ifdef _KERNEL
+#include <sys/param.h>
+#include <sys/pcpu.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/vmparam.h>
+#include <machine/vmm.h>
+#else	/* !_KERNEL */
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/_iovec.h>
+
+#include <machine/vmm.h>
+
+#include <assert.h>
+#include <vmmapi.h>
+#define	KASSERT(exp,msg)	assert((exp))
+#endif	/* _KERNEL */
+
+#include <machine/vmm_instruction_emul.h>
+#include <x86/psl.h>
+#include <x86/specialreg.h>
+
+/* struct vie_op.op_type */
+enum {
+	VIE_OP_TYPE_NONE = 0,
+	VIE_OP_TYPE_MOV,
+	VIE_OP_TYPE_MOVSX,
+	VIE_OP_TYPE_MOVZX,
+	VIE_OP_TYPE_AND,
+	VIE_OP_TYPE_OR,
+	VIE_OP_TYPE_SUB,
+	VIE_OP_TYPE_TWO_BYTE,
+	VIE_OP_TYPE_PUSH,
+	VIE_OP_TYPE_CMP,
+	VIE_OP_TYPE_POP,
+	VIE_OP_TYPE_MOVS,
+	VIE_OP_TYPE_GROUP1,
+	VIE_OP_TYPE_STOS,
+	VIE_OP_TYPE_BITTEST,
+	VIE_OP_TYPE_LAST
+};
+
+/* struct vie_op.op_flags */
+#define	VIE_OP_F_IMM		(1 << 0)  /* 16/32-bit immediate operand */
+#define	VIE_OP_F_IMM8		(1 << 1)  /* 8-bit immediate operand */
+#define	VIE_OP_F_MOFFSET	(1 << 2)  /* 16/32/64-bit immediate moffset */
+#define	VIE_OP_F_NO_MODRM	(1 << 3)
+#define	VIE_OP_F_NO_GLA_VERIFICATION (1 << 4)
+
+static const struct vie_op two_byte_opcodes[256] = {
+	[0xB6] = {
+		.op_byte = 0xB6,
+		.op_type = VIE_OP_TYPE_MOVZX,
+	},
+	[0xB7] = {
+		.op_byte = 0xB7,
+		.op_type = VIE_OP_TYPE_MOVZX,
+	},
+	[0xBA] = {
+		.op_byte = 0xBA,
+		.op_type = VIE_OP_TYPE_BITTEST,
+		.op_flags = VIE_OP_F_IMM8,
+	},
+	[0xBE] = {
+		.op_byte = 0xBE,
+		.op_type = VIE_OP_TYPE_MOVSX,
+	},
+};
+
+static const struct vie_op one_byte_opcodes[256] = {
+	[0x0F] = {
+		.op_byte = 0x0F,
+		.op_type = VIE_OP_TYPE_TWO_BYTE
+	},
+	[0x2B] = {
+		.op_byte = 0x2B,
+		.op_type = VIE_OP_TYPE_SUB,
+	},
+	[0x39] = {
+		.op_byte = 0x39,
+		.op_type = VIE_OP_TYPE_CMP,
+	},
+	[0x3B] = {
+		.op_byte = 0x3B,
+		.op_type = VIE_OP_TYPE_CMP,
+	},
+	[0x88] = {
+		.op_byte = 0x88,
+		.op_type = VIE_OP_TYPE_MOV,
+	},
+	[0x89] = {
+		.op_byte = 0x89,
+		.op_type = VIE_OP_TYPE_MOV,
+	},
+	[0x8A] = {
+		.op_byte = 0x8A,
+		.op_type = VIE_OP_TYPE_MOV,
+	},
+	[0x8B] = {
+		.op_byte = 0x8B,
+		.op_type = VIE_OP_TYPE_MOV,
+	},
+	[0xA1] = {
+		.op_byte = 0xA1,
+		.op_type = VIE_OP_TYPE_MOV,
+		.op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
+	},
+	[0xA3] = {
+		.op_byte = 0xA3,
+		.op_type = VIE_OP_TYPE_MOV,
+		.op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
+	},
+	[0xA4] = {
+		.op_byte = 0xA4,
+		.op_type = VIE_OP_TYPE_MOVS,
+		.op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
+	},
+	[0xA5] = {
+		.op_byte = 0xA5,
+		.op_type = VIE_OP_TYPE_MOVS,
+		.op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
+	},
+	[0xAA] = {
+		.op_byte = 0xAA,
+		.op_type = VIE_OP_TYPE_STOS,
+		.op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
+	},
+	[0xAB] = {
+		.op_byte = 0xAB,
+		.op_type = VIE_OP_TYPE_STOS,
+		.op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
+	},
+	[0xC6] = {
+		/* XXX Group 11 extended opcode - not just MOV */
+		.op_byte = 0xC6,
+		.op_type = VIE_OP_TYPE_MOV,
+		.op_flags = VIE_OP_F_IMM8,
+	},
+	[0xC7] = {
+		.op_byte = 0xC7,
+		.op_type = VIE_OP_TYPE_MOV,
+		.op_flags = VIE_OP_F_IMM,
+	},
+	[0x23] = {
+		.op_byte = 0x23,
+		.op_type = VIE_OP_TYPE_AND,
+	},
+	[0x80] = {
+		/* Group 1 extended opcode */
+		.op_byte = 0x80,
+		.op_type = VIE_OP_TYPE_GROUP1,
+		.op_flags = VIE_OP_F_IMM8,
+	},
+	[0x81] = {
+		/* Group 1 extended opcode */
+		.op_byte = 0x81,
+		.op_type = VIE_OP_TYPE_GROUP1,
+		.op_flags = VIE_OP_F_IMM,
+	},
+	[0x83] = {
+		/* Group 1 extended opcode */
+		.op_byte = 0x83,
+		.op_type = VIE_OP_TYPE_GROUP1,
+		.op_flags = VIE_OP_F_IMM8,
+	},
+	[0x8F] = {
+		/* XXX Group 1A extended opcode - not just POP */
+		.op_byte = 0x8F,
+		.op_type = VIE_OP_TYPE_POP,
+	},
+	[0xFF] = {
+		/* XXX Group 5 extended opcode - not just PUSH */
+		.op_byte = 0xFF,
+		.op_type = VIE_OP_TYPE_PUSH,
+	}
+};
+
+/* struct vie.mod */
+#define	VIE_MOD_INDIRECT		0
+#define	VIE_MOD_INDIRECT_DISP8		1
+#define	VIE_MOD_INDIRECT_DISP32		2
+#define	VIE_MOD_DIRECT			3
+
+/* struct vie.rm */
+#define	VIE_RM_SIB			4
+#define	VIE_RM_DISP32			5
+
+#define	GB				(1024 * 1024 * 1024)
+
+static enum vm_reg_name gpr_map[16] = {
+	VM_REG_GUEST_RAX,
+	VM_REG_GUEST_RCX,
+	VM_REG_GUEST_RDX,
+	VM_REG_GUEST_RBX,
+	VM_REG_GUEST_RSP,
+	VM_REG_GUEST_RBP,
+	VM_REG_GUEST_RSI,
+	VM_REG_GUEST_RDI,
+	VM_REG_GUEST_R8,
+	VM_REG_GUEST_R9,
+	VM_REG_GUEST_R10,
+	VM_REG_GUEST_R11,
+	VM_REG_GUEST_R12,
+	VM_REG_GUEST_R13,
+	VM_REG_GUEST_R14,
+	VM_REG_GUEST_R15
+};
+
+static uint64_t size2mask[] = {
+	[1] = 0xff,
+	[2] = 0xffff,
+	[4] = 0xffffffff,
+	[8] = 0xffffffffffffffff,
+};
+
+static int
+vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
+{
+	int error;
+
+	error = vm_get_register(vm, vcpuid, reg, rval);
+
+	return (error);
+}
+
+static void
+vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
+{
+	*lhbr = 0;
+	*reg = gpr_map[vie->reg];
+
+	/*
+	 * 64-bit mode imposes limitations on accessing legacy high byte
+	 * registers (lhbr).
+	 *
+	 * The legacy high-byte registers cannot be addressed if the REX
+	 * prefix is present. In this case the values 4, 5, 6 and 7 of the
+	 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
+	 *
+	 * If the REX prefix is not present then the values 4, 5, 6 and 7
+	 * of the 'ModRM:reg' field address the legacy high-byte registers,
+	 * %ah, %ch, %dh and %bh respectively.
+	 */
+	if (!vie->rex_present) {
+		if (vie->reg & 0x4) {
+			*lhbr = 1;
+			*reg = gpr_map[vie->reg & 0x3];
+		}
+	}
+}
+
+static int
+vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
+{
+	uint64_t val;
+	int error, lhbr;
+	enum vm_reg_name reg;
+
+	vie_calc_bytereg(vie, &reg, &lhbr);
+	error = vm_get_register(vm, vcpuid, reg, &val);
+
+	/*
+	 * To obtain the value of a legacy high byte register shift the
+	 * base register right by 8 bits (%ah = %rax >> 8).
+	 */
+	if (lhbr)
+		*rval = val >> 8;
+	else
+		*rval = val;
+	return (error);
+}
+
+static int
+vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte)
+{
+	uint64_t origval, val, mask;
+	int error, lhbr;
+	enum vm_reg_name reg;
+
+	vie_calc_bytereg(vie, &reg, &lhbr);
+	error = vm_get_register(vm, vcpuid, reg, &origval);
+	if (error == 0) {
+		val = byte;
+		mask = 0xff;
+		if (lhbr) {
+			/*
+			 * Shift left by 8 to store 'byte' in a legacy high
+			 * byte register.
+			 */
+			val <<= 8;
+			mask <<= 8;
+		}
+		val |= origval & ~mask;
+		error = vm_set_register(vm, vcpuid, reg, val);
+	}
+	return (error);
+}
+
+int
+vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
+		    uint64_t val, int size)
+{
+	int error;
+	uint64_t origval;
+
+	switch (size) {
+	case 1:
+	case 2:
+		error = vie_read_register(vm, vcpuid, reg, &origval);
+		if (error)
+			return (error);
+		val &= size2mask[size];
+		val |= origval & ~size2mask[size];
+		break;
+	case 4:
+		val &= 0xffffffffUL;
+		break;
+	case 8:
+		break;
+	default:
+		return (EINVAL);
+	}
+
+	error = vm_set_register(vm, vcpuid, reg, val);
+	return (error);
+}
+
+#define	RFLAGS_STATUS_BITS    (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
+
+/*
+ * Return the status flags that would result from doing (x - y).
+ */
+#define	GETCC(sz)							\
+static u_long								\
+getcc##sz(uint##sz##_t x, uint##sz##_t y)				\
+{									\
+	u_long rflags;							\
+									\
+	__asm __volatile("sub %2,%1; pushfq; popq %0" :			\
+	    "=r" (rflags), "+r" (x) : "m" (y));				\
+	return (rflags);						\
+} struct __hack
+
+GETCC(8);
+GETCC(16);
+GETCC(32);
+GETCC(64);
+
+static u_long
+getcc(int opsize, uint64_t x, uint64_t y)
+{
+	KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
+	    ("getcc: invalid operand size %d", opsize));
+
+	if (opsize == 1)
+		return (getcc8(x, y));
+	else if (opsize == 2)
+		return (getcc16(x, y));
+	else if (opsize == 4)
+		return (getcc32(x, y));
+	else
+		return (getcc64(x, y));
+}
+
+static int
+emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
+{
+	int error, size;
+	enum vm_reg_name reg;
+	uint8_t byte;
+	uint64_t val;
+
+	size = vie->opsize;
+	error = EINVAL;
+
+	switch (vie->op.op_byte) {
+	case 0x88:
+		/*
+		 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
+		 * 88/r:	mov r/m8, r8
+		 * REX + 88/r:	mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
+		 */
+		size = 1;	/* override for byte operation */
+		error = vie_read_bytereg(vm, vcpuid, vie, &byte);
+		if (error == 0)
+			error = memwrite(vm, vcpuid, gpa, byte, size, arg);
+		break;
+	case 0x89:
+		/*
+		 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
+		 * 89/r:	mov r/m16, r16
+		 * 89/r:	mov r/m32, r32
+		 * REX.W + 89/r	mov r/m64, r64
+		 */
+		reg = gpr_map[vie->reg];
+		error = vie_read_register(vm, vcpuid, reg, &val);
+		if (error == 0) {
+			val &= size2mask[size];
+			error = memwrite(vm, vcpuid, gpa, val, size, arg);
+		}
+		break;
+	case 0x8A:
+		/*
+		 * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
+		 * 8A/r:	mov r8, r/m8
+		 * REX + 8A/r:	mov r8, r/m8
+		 */
+		size = 1;	/* override for byte operation */
+		error = memread(vm, vcpuid, gpa, &val, size, arg);
+		if (error == 0)
+			error = vie_write_bytereg(vm, vcpuid, vie, val);
+		break;
+	case 0x8B:
+		/*
+		 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
+		 * 8B/r:	mov r16, r/m16
+		 * 8B/r:	mov r32, r/m32
+		 * REX.W 8B/r:	mov r64, r/m64
+		 */
+		error = memread(vm, vcpuid, gpa, &val, size, arg);
+		if (error == 0) {
+			reg = gpr_map[vie->reg];
+			error = vie_update_register(vm, vcpuid, reg, val, size);
+		}
+		break;
+	case 0xA1:
+		/*
+		 * MOV from seg:moffset to AX/EAX/RAX
+		 * A1:		mov AX, moffs16
+		 * A1:		mov EAX, moffs32
+		 * REX.W + A1:	mov RAX, moffs64
+		 */
+		error = memread(vm, vcpuid, gpa, &val, size, arg);
+		if (error == 0) {
+			reg = VM_REG_GUEST_RAX;
+			error = vie_update_register(vm, vcpuid, reg, val, size);
+		}
+		break;
+	case 0xA3:
+		/*
+		 * MOV from AX/EAX/RAX to seg:moffset
+		 * A3:		mov moffs16, AX
+		 * A3:		mov moffs32, EAX 
+		 * REX.W + A3:	mov moffs64, RAX
+		 */
+		error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
+		if (error == 0) {
+			val &= size2mask[size];
+			error = memwrite(vm, vcpuid, gpa, val, size, arg);
+		}
+		break;
+	case 0xC6:
+		/*
+		 * MOV from imm8 to mem (ModRM:r/m)
+		 * C6/0		mov r/m8, imm8
+		 * REX + C6/0	mov r/m8, imm8
+		 */
+		size = 1;	/* override for byte operation */
+		error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
+		break;
+	case 0xC7:
+		/*
+		 * MOV from imm16/imm32 to mem (ModRM:r/m)
+		 * C7/0		mov r/m16, imm16
+		 * C7/0		mov r/m32, imm32
+		 * REX.W + C7/0	mov r/m64, imm32 (sign-extended to 64-bits)
+		 */
+		val = vie->immediate & size2mask[size];
+		error = memwrite(vm, vcpuid, gpa, val, size, arg);
+		break;
+	default:
+		break;
+	}
+
+	return (error);
+}
+
+static int
+emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+	     mem_region_read_t memread, mem_region_write_t memwrite,
+	     void *arg)
+{
+	int error, size;
+	enum vm_reg_name reg;
+	uint64_t val;
+
+	size = vie->opsize;
+	error = EINVAL;
+
+	switch (vie->op.op_byte) {
+	case 0xB6:
+		/*
+		 * MOV and zero extend byte from mem (ModRM:r/m) to
+		 * reg (ModRM:reg).
+		 *
+		 * 0F B6/r		movzx r16, r/m8
+		 * 0F B6/r		movzx r32, r/m8
+		 * REX.W + 0F B6/r	movzx r64, r/m8
+		 */
+
+		/* get the first operand */
+		error = memread(vm, vcpuid, gpa, &val, 1, arg);
+		if (error)
+			break;
+
+		/* get the second operand */
+		reg = gpr_map[vie->reg];
+
+		/* zero-extend byte */
+		val = (uint8_t)val;
+
+		/* write the result */
+		error = vie_update_register(vm, vcpuid, reg, val, size);
+		break;
+	case 0xB7:
+		/*
+		 * MOV and zero extend word from mem (ModRM:r/m) to
+		 * reg (ModRM:reg).
+		 *
+		 * 0F B7/r		movzx r32, r/m16
+		 * REX.W + 0F B7/r	movzx r64, r/m16
+		 */
+		error = memread(vm, vcpuid, gpa, &val, 2, arg);
+		if (error)
+			return (error);
+
+		reg = gpr_map[vie->reg];
+
+		/* zero-extend word */
+		val = (uint16_t)val;
+
+		error = vie_update_register(vm, vcpuid, reg, val, size);
+		break;
+	case 0xBE:
+		/*
+		 * MOV and sign extend byte from mem (ModRM:r/m) to
+		 * reg (ModRM:reg).
+		 *
+		 * 0F BE/r		movsx r16, r/m8
+		 * 0F BE/r		movsx r32, r/m8
+		 * REX.W + 0F BE/r	movsx r64, r/m8
+		 */
+
+		/* get the first operand */
+		error = memread(vm, vcpuid, gpa, &val, 1, arg);
+		if (error)
+			break;
+
+		/* get the second operand */
+		reg = gpr_map[vie->reg];
+
+		/* sign extend byte */
+		val = (int8_t)val;
+
+		/* write the result */
+		error = vie_update_register(vm, vcpuid, reg, val, size);
+		break;
+	default:
+		break;
+	}
+	return (error);
+}
+
+/*
+ * Helper function to calculate and validate a linear address.
+ */
+static int
+get_gla(void *vm, int vcpuid, struct vie *vie, struct vm_guest_paging *paging,
+    int opsize, int addrsize, int prot, enum vm_reg_name seg,
+    enum vm_reg_name gpr, uint64_t *gla, int *fault)
+{
+	struct seg_desc desc;
+	uint64_t cr0, val, rflags;
+	int error;
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
+	KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+	KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
+
+	error = vm_get_seg_desc(vm, vcpuid, seg, &desc);
+	KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
+	    __func__, error, seg));
+
+	error = vie_read_register(vm, vcpuid, gpr, &val);
+	KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
+	    error, gpr));
+
+	if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
+	    addrsize, prot, gla)) {
+		if (seg == VM_REG_GUEST_SS)
+			vm_inject_ss(vm, vcpuid, 0);
+		else
+			vm_inject_gp(vm, vcpuid);
+		goto guest_fault;
+	}
+
+	if (vie_canonical_check(paging->cpu_mode, *gla)) {
+		if (seg == VM_REG_GUEST_SS)
+			vm_inject_ss(vm, vcpuid, 0);
+		else
+			vm_inject_gp(vm, vcpuid);
+		goto guest_fault;
+	}
+
+	if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
+		vm_inject_ac(vm, vcpuid, 0);
+		goto guest_fault;
+	}
+
+	*fault = 0;
+	return (0);
+
+guest_fault:
+	*fault = 1;
+	return (0);
+}
+
+static int
+emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+    struct vm_guest_paging *paging, mem_region_read_t memread,
+    mem_region_write_t memwrite, void *arg)
+{
+#ifdef _KERNEL
+	struct vm_copyinfo copyinfo[2];
+#else
+	struct iovec copyinfo[2];
+#endif
+	uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
+	uint64_t rcx, rdi, rsi, rflags;
+	int error, fault, opsize, seg, repeat;
+
+	opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
+	val = 0;
+	error = 0;
+
+	/*
+	 * XXX although the MOVS instruction is only supposed to be used with
+	 * the "rep" prefix some guests like FreeBSD will use "repnz" instead.
+	 *
+	 * Empirically the "repnz" prefix has identical behavior to "rep"
+	 * and the zero flag does not make a difference.
+	 */
+	repeat = vie->repz_present | vie->repnz_present;
+
+	if (repeat) {
+		error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
+		KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
+
+		/*
+		 * The count register is %rcx, %ecx or %cx depending on the
+		 * address size of the instruction.
+		 */
+		if ((rcx & vie_size2mask(vie->addrsize)) == 0) {
+			error = 0;
+			goto done;
+		}
+	}
+
+	/*
+	 *	Source		Destination	Comments
+	 *	--------------------------------------------
+	 * (1)  memory		memory		n/a
+	 * (2)  memory		mmio		emulated
+	 * (3)  mmio		memory		emulated
+	 * (4)  mmio		mmio		emulated
+	 *
+	 * At this point we don't have sufficient information to distinguish
+	 * between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
+	 * out because it will succeed only when operating on regular memory.
+	 *
+	 * XXX the emulation doesn't properly handle the case where 'gpa'
+	 * is straddling the boundary between the normal memory and MMIO.
+	 */
+
+	seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
+	error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
+	    PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault);
+	if (error || fault)
+		goto done;
+
+	error = vm_copy_setup(vm, vcpuid, paging, srcaddr, opsize, PROT_READ,
+	    copyinfo, nitems(copyinfo), &fault);
+	if (error == 0) {
+		if (fault)
+			goto done;	/* Resume guest to handle fault */
+
+		/*
+		 * case (2): read from system memory and write to mmio.
+		 */
+		vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
+		vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+		error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
+		if (error)
+			goto done;
+	} else {
+		/*
+		 * 'vm_copy_setup()' is expected to fail for cases (3) and (4)
+		 * if 'srcaddr' is in the mmio space.
+		 */
+
+		error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
+		    PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr,
+		    &fault);
+		if (error || fault)
+			goto done;
+
+		error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
+		    PROT_WRITE, copyinfo, nitems(copyinfo), &fault);
+		if (error == 0) {
+			if (fault)
+				goto done;    /* Resume guest to handle fault */
+
+			/*
+			 * case (3): read from MMIO and write to system memory.
+			 *
+			 * A MMIO read can have side-effects so we
+			 * commit to it only after vm_copy_setup() is
+			 * successful. If a page-fault needs to be
+			 * injected into the guest then it will happen
+			 * before the MMIO read is attempted.
+			 */
+			error = memread(vm, vcpuid, gpa, &val, opsize, arg);
+			if (error)
+				goto done;
+
+			vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
+			vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+		} else {
+			/*
+			 * Case (4): read from and write to mmio.
+			 *
+			 * Commit to the MMIO read/write (with potential
+			 * side-effects) only after we are sure that the
+			 * instruction is not going to be restarted due
+			 * to address translation faults.
+			 */
+			error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
+			    PROT_READ, &srcgpa, &fault);
+			if (error || fault)
+				goto done;
+
+			error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
+			   PROT_WRITE, &dstgpa, &fault);
+			if (error || fault)
+				goto done;
+
+			error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
+			if (error)
+				goto done;
+
+			error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
+			if (error)
+				goto done;
+		}
+	}
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
+	KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
+	KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+	KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
+
+	if (rflags & PSL_D) {
+		rsi -= opsize;
+		rdi -= opsize;
+	} else {
+		rsi += opsize;
+		rdi += opsize;
+	}
+
+	error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSI, rsi,
+	    vie->addrsize);
+	KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
+
+	error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
+	    vie->addrsize);
+	KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
+
+	if (repeat) {
+		rcx = rcx - 1;
+		error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
+		    rcx, vie->addrsize);
+		KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
+
+		/*
+		 * Repeat the instruction if the count register is not zero.
+		 */
+		if ((rcx & vie_size2mask(vie->addrsize)) != 0)
+			vm_restart_instruction(vm, vcpuid);
+	}
+done:
+	KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d",
+	    __func__, error));
+	return (error);
+}
+
+static int
+emulate_stos(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+    struct vm_guest_paging *paging, mem_region_read_t memread,
+    mem_region_write_t memwrite, void *arg)
+{
+	int error, opsize, repeat;
+	uint64_t val;
+	uint64_t rcx, rdi, rflags;
+
+	opsize = (vie->op.op_byte == 0xAA) ? 1 : vie->opsize;
+	repeat = vie->repz_present | vie->repnz_present;
+
+	if (repeat) {
+		error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
+		KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
+
+		/*
+		 * The count register is %rcx, %ecx or %cx depending on the
+		 * address size of the instruction.
+		 */
+		if ((rcx & vie_size2mask(vie->addrsize)) == 0)
+			return (0);
+	}
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
+	KASSERT(!error, ("%s: error %d getting rax", __func__, error));
+
+	error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
+	if (error)
+		return (error);
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
+	KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+	KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
+
+	if (rflags & PSL_D)
+		rdi -= opsize;
+	else
+		rdi += opsize;
+
+	error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
+	    vie->addrsize);
+	KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
+
+	if (repeat) {
+		rcx = rcx - 1;
+		error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
+		    rcx, vie->addrsize);
+		KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
+
+		/*
+		 * Repeat the instruction if the count register is not zero.
+		 */
+		if ((rcx & vie_size2mask(vie->addrsize)) != 0)
+			vm_restart_instruction(vm, vcpuid);
+	}
+
+	return (0);
+}
+
+static int
+emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
+{
+	int error, size;
+	enum vm_reg_name reg;
+	uint64_t result, rflags, rflags2, val1, val2;
+
+	size = vie->opsize;
+	error = EINVAL;
+
+	switch (vie->op.op_byte) {
+	case 0x23:
+		/*
+		 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
+		 * result in reg.
+		 *
+		 * 23/r		and r16, r/m16
+		 * 23/r		and r32, r/m32
+		 * REX.W + 23/r	and r64, r/m64
+		 */
+
+		/* get the first operand */
+		reg = gpr_map[vie->reg];
+		error = vie_read_register(vm, vcpuid, reg, &val1);
+		if (error)
+			break;
+
+		/* get the second operand */
+		error = memread(vm, vcpuid, gpa, &val2, size, arg);
+		if (error)
+			break;
+
+		/* perform the operation and write the result */
+		result = val1 & val2;
+		error = vie_update_register(vm, vcpuid, reg, result, size);
+		break;
+	case 0x81:
+	case 0x83:
+		/*
+		 * AND mem (ModRM:r/m) with immediate and store the
+		 * result in mem.
+		 *
+		 * 81 /4		and r/m16, imm16
+		 * 81 /4		and r/m32, imm32
+		 * REX.W + 81 /4	and r/m64, imm32 sign-extended to 64
+		 *
+		 * 83 /4		and r/m16, imm8 sign-extended to 16
+		 * 83 /4		and r/m32, imm8 sign-extended to 32
+		 * REX.W + 83/4		and r/m64, imm8 sign-extended to 64
+		 */
+
+		/* get the first operand */
+                error = memread(vm, vcpuid, gpa, &val1, size, arg);
+                if (error)
+			break;
+
+                /*
+		 * perform the operation with the pre-fetched immediate
+		 * operand and write the result
+		 */
+                result = val1 & vie->immediate;
+                error = memwrite(vm, vcpuid, gpa, result, size, arg);
+		break;
+	default:
+		break;
+	}
+	if (error)
+		return (error);
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+	if (error)
+		return (error);
+
+	/*
+	 * OF and CF are cleared; the SF, ZF and PF flags are set according
+	 * to the result; AF is undefined.
+	 *
+	 * The updated status flags are obtained by subtracting 0 from 'result'.
+	 */
+	rflags2 = getcc(size, result, 0);
+	rflags &= ~RFLAGS_STATUS_BITS;
+	rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
+
+	error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+	return (error);
+}
+
+static int
+emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
+{
+	int error, size;
+	uint64_t val1, result, rflags, rflags2;
+
+	size = vie->opsize;
+	error = EINVAL;
+
+	switch (vie->op.op_byte) {
+	case 0x81:
+	case 0x83:
+		/*
+		 * OR mem (ModRM:r/m) with immediate and store the
+		 * result in mem.
+		 *
+		 * 81 /1		or r/m16, imm16
+		 * 81 /1		or r/m32, imm32
+		 * REX.W + 81 /1	or r/m64, imm32 sign-extended to 64
+		 *
+		 * 83 /1		or r/m16, imm8 sign-extended to 16
+		 * 83 /1		or r/m32, imm8 sign-extended to 32
+		 * REX.W + 83/1		or r/m64, imm8 sign-extended to 64
+		 */
+
+		/* get the first operand */
+                error = memread(vm, vcpuid, gpa, &val1, size, arg);
+                if (error)
+			break;
+
+                /*
+		 * perform the operation with the pre-fetched immediate
+		 * operand and write the result
+		 */
+                result = val1 | vie->immediate;
+                error = memwrite(vm, vcpuid, gpa, result, size, arg);
+		break;
+	default:
+		break;
+	}
+	if (error)
+		return (error);
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+	if (error)
+		return (error);
+
+	/*
+	 * OF and CF are cleared; the SF, ZF and PF flags are set according
+	 * to the result; AF is undefined.
+	 *
+	 * The updated status flags are obtained by subtracting 0 from 'result'.
+	 */
+	rflags2 = getcc(size, result, 0);
+	rflags &= ~RFLAGS_STATUS_BITS;
+	rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
+
+	error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+	return (error);
+}
+
+static int
+emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
+{
+	int error, size;
+	uint64_t regop, memop, op1, op2, rflags, rflags2;
+	enum vm_reg_name reg;
+
+	size = vie->opsize;
+	switch (vie->op.op_byte) {
+	case 0x39:
+	case 0x3B:
+		/*
+		 * 39/r		CMP r/m16, r16
+		 * 39/r		CMP r/m32, r32
+		 * REX.W 39/r	CMP r/m64, r64
+		 *
+		 * 3B/r		CMP r16, r/m16
+		 * 3B/r		CMP r32, r/m32
+		 * REX.W + 3B/r	CMP r64, r/m64
+		 *
+		 * Compare the first operand with the second operand and
+		 * set status flags in EFLAGS register. The comparison is
+		 * performed by subtracting the second operand from the first
+		 * operand and then setting the status flags.
+		 */
+
+		/* Get the register operand */
+		reg = gpr_map[vie->reg];
+		error = vie_read_register(vm, vcpuid, reg, &regop);
+		if (error)
+			return (error);
+
+		/* Get the memory operand */
+		error = memread(vm, vcpuid, gpa, &memop, size, arg);
+		if (error)
+			return (error);
+
+		if (vie->op.op_byte == 0x3B) {
+			op1 = regop;
+			op2 = memop;
+		} else {
+			op1 = memop;
+			op2 = regop;
+		}
+		rflags2 = getcc(size, op1, op2);
+		break;
+	case 0x80:
+	case 0x81:
+	case 0x83:
+		/*
+		 * 80 /7		cmp r/m8, imm8
+		 * REX + 80 /7		cmp r/m8, imm8
+		 *
+		 * 81 /7		cmp r/m16, imm16
+		 * 81 /7		cmp r/m32, imm32
+		 * REX.W + 81 /7	cmp r/m64, imm32 sign-extended to 64
+		 *
+		 * 83 /7		cmp r/m16, imm8 sign-extended to 16
+		 * 83 /7		cmp r/m32, imm8 sign-extended to 32
+		 * REX.W + 83 /7	cmp r/m64, imm8 sign-extended to 64
+		 *
+		 * Compare mem (ModRM:r/m) with immediate and set
+		 * status flags according to the results.  The
+		 * comparison is performed by subtracting the
+		 * immediate from the first operand and then setting
+		 * the status flags.
+		 *
+		 */
+		if (vie->op.op_byte == 0x80)
+			size = 1;
+
+		/* get the first operand */
+                error = memread(vm, vcpuid, gpa, &op1, size, arg);
+		if (error)
+			return (error);
+
+		rflags2 = getcc(size, op1, vie->immediate);
+		break;
+	default:
+		return (EINVAL);
+	}
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+	if (error)
+		return (error);
+	rflags &= ~RFLAGS_STATUS_BITS;
+	rflags |= rflags2 & RFLAGS_STATUS_BITS;
+
+	error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+	return (error);
+}
+
+static int
+emulate_sub(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
+{
+	int error, size;
+	uint64_t nval, rflags, rflags2, val1, val2;
+	enum vm_reg_name reg;
+
+	size = vie->opsize;
+	error = EINVAL;
+
+	switch (vie->op.op_byte) {
+	case 0x2B:
+		/*
+		 * SUB r/m from r and store the result in r
+		 * 
+		 * 2B/r            SUB r16, r/m16
+		 * 2B/r            SUB r32, r/m32
+		 * REX.W + 2B/r    SUB r64, r/m64
+		 */
+
+		/* get the first operand */
+		reg = gpr_map[vie->reg];
+		error = vie_read_register(vm, vcpuid, reg, &val1);
+		if (error)
+			break;
+
+		/* get the second operand */
+		error = memread(vm, vcpuid, gpa, &val2, size, arg);
+		if (error)
+			break;
+
+		/* perform the operation and write the result */
+		nval = val1 - val2;
+		error = vie_update_register(vm, vcpuid, reg, nval, size);
+		break;
+	default:
+		break;
+	}
+
+	if (!error) {
+		rflags2 = getcc(size, val1, val2);
+		error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
+		    &rflags);
+		if (error)
+			return (error);
+
+		rflags &= ~RFLAGS_STATUS_BITS;
+		rflags |= rflags2 & RFLAGS_STATUS_BITS;
+		error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
+		    rflags, 8);
+	}
+
+	return (error);
+}
+
+static int
+emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
+    struct vm_guest_paging *paging, mem_region_read_t memread,
+    mem_region_write_t memwrite, void *arg)
+{
+#ifdef _KERNEL
+	struct vm_copyinfo copyinfo[2];
+#else
+	struct iovec copyinfo[2];
+#endif
+	struct seg_desc ss_desc;
+	uint64_t cr0, rflags, rsp, stack_gla, val;
+	int error, fault, size, stackaddrsize, pushop;
+
+	val = 0;
+	size = vie->opsize;
+	pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0;
+
+	/*
+	 * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1
+	 */
+	if (paging->cpu_mode == CPU_MODE_REAL) {
+		stackaddrsize = 2;
+	} else if (paging->cpu_mode == CPU_MODE_64BIT) {
+		/*
+		 * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3
+		 * - Stack pointer size is always 64-bits.
+		 * - PUSH/POP of 32-bit values is not possible in 64-bit mode.
+		 * - 16-bit PUSH/POP is supported by using the operand size
+		 *   override prefix (66H).
+		 */
+		stackaddrsize = 8;
+		size = vie->opsize_override ? 2 : 8;
+	} else {
+		/*
+		 * In protected or compability mode the 'B' flag in the
+		 * stack-segment descriptor determines the size of the
+		 * stack pointer.
+		 */
+		error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc);
+		KASSERT(error == 0, ("%s: error %d getting SS descriptor",
+		    __func__, error));
+		if (SEG_DESC_DEF32(ss_desc.access))
+			stackaddrsize = 4;
+		else
+			stackaddrsize = 2;
+	}
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
+	KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+	KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp);
+	KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
+	if (pushop) {
+		rsp -= size;
+	}
+
+	if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
+	    rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
+	    &stack_gla)) {
+		vm_inject_ss(vm, vcpuid, 0);
+		return (0);
+	}
+
+	if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
+		vm_inject_ss(vm, vcpuid, 0);
+		return (0);
+	}
+
+	if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
+		vm_inject_ac(vm, vcpuid, 0);
+		return (0);
+	}
+
+	error = vm_copy_setup(vm, vcpuid, paging, stack_gla, size,
+	    pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo),
+	    &fault);
+	if (error || fault)
+		return (error);
+
+	if (pushop) {
+		error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
+		if (error == 0)
+			vm_copyout(vm, vcpuid, &val, copyinfo, size);
+	} else {
+		vm_copyin(vm, vcpuid, copyinfo, &val, size);
+		error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
+		rsp += size;
+	}
+	vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+
+	if (error == 0) {
+		error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
+		    stackaddrsize);
+		KASSERT(error == 0, ("error %d updating rsp", error));
+	}
+	return (error);
+}
+
+static int
+emulate_push(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
+    struct vm_guest_paging *paging, mem_region_read_t memread,
+    mem_region_write_t memwrite, void *arg)
+{
+	int error;
+
+	/*
+	 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
+	 *
+	 * PUSH is part of the group 5 extended opcodes and is identified
+	 * by ModRM:reg = b110.
+	 */
+	if ((vie->reg & 7) != 6)
+		return (EINVAL);
+
+	error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
+	    memwrite, arg);
+	return (error);
+}
+
+static int
+emulate_pop(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
+    struct vm_guest_paging *paging, mem_region_read_t memread,
+    mem_region_write_t memwrite, void *arg)
+{
+	int error;
+
+	/*
+	 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
+	 *
+	 * POP is part of the group 1A extended opcodes and is identified
+	 * by ModRM:reg = b000.
+	 */
+	if ((vie->reg & 7) != 0)
+		return (EINVAL);
+
+	error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
+	    memwrite, arg);
+	return (error);
+}
+
+static int
+emulate_group1(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+    struct vm_guest_paging *paging, mem_region_read_t memread,
+    mem_region_write_t memwrite, void *memarg)
+{
+	int error;
+
+	switch (vie->reg & 7) {
+	case 0x1:	/* OR */
+		error = emulate_or(vm, vcpuid, gpa, vie,
+		    memread, memwrite, memarg);
+		break;
+	case 0x4:	/* AND */
+		error = emulate_and(vm, vcpuid, gpa, vie,
+		    memread, memwrite, memarg);
+		break;
+	case 0x7:	/* CMP */
+		error = emulate_cmp(vm, vcpuid, gpa, vie,
+		    memread, memwrite, memarg);
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+
+	return (error);
+}
+
+static int
+emulate_bittest(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+    mem_region_read_t memread, mem_region_write_t memwrite, void *memarg)
+{
+	uint64_t val, rflags;
+	int error, bitmask, bitoff;
+
+	/*
+	 * 0F BA is a Group 8 extended opcode.
+	 *
+	 * Currently we only emulate the 'Bit Test' instruction which is
+	 * identified by a ModR/M:reg encoding of 100b.
+	 */
+	if ((vie->reg & 7) != 4)
+		return (EINVAL);
+
+	error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+	KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
+
+	error = memread(vm, vcpuid, gpa, &val, vie->opsize, memarg);
+	if (error)
+		return (error);
+
+	/*
+	 * Intel SDM, Vol 2, Table 3-2:
+	 * "Range of Bit Positions Specified by Bit Offset Operands"
+	 */
+	bitmask = vie->opsize * 8 - 1;
+	bitoff = vie->immediate & bitmask;
+
+	/* Copy the bit into the Carry flag in %rflags */
+	if (val & (1UL << bitoff))
+		rflags |= PSL_C;
+	else
+		rflags &= ~PSL_C;
+
+	error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+	KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error));
+
+	return (0);
+}
+
+int
+vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+    struct vm_guest_paging *paging, mem_region_read_t memread,
+    mem_region_write_t memwrite, void *memarg)
+{
+	int error;
+
+	if (!vie->decoded)
+		return (EINVAL);
+
+	switch (vie->op.op_type) {
+	case VIE_OP_TYPE_GROUP1:
+		error = emulate_group1(vm, vcpuid, gpa, vie, paging, memread,
+		    memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_POP:
+		error = emulate_pop(vm, vcpuid, gpa, vie, paging, memread,
+		    memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_PUSH:
+		error = emulate_push(vm, vcpuid, gpa, vie, paging, memread,
+		    memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_CMP:
+		error = emulate_cmp(vm, vcpuid, gpa, vie,
+				    memread, memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_MOV:
+		error = emulate_mov(vm, vcpuid, gpa, vie,
+				    memread, memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_MOVSX:
+	case VIE_OP_TYPE_MOVZX:
+		error = emulate_movx(vm, vcpuid, gpa, vie,
+				     memread, memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_MOVS:
+		error = emulate_movs(vm, vcpuid, gpa, vie, paging, memread,
+		    memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_STOS:
+		error = emulate_stos(vm, vcpuid, gpa, vie, paging, memread,
+		    memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_AND:
+		error = emulate_and(vm, vcpuid, gpa, vie,
+				    memread, memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_OR:
+		error = emulate_or(vm, vcpuid, gpa, vie,
+				    memread, memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_SUB:
+		error = emulate_sub(vm, vcpuid, gpa, vie,
+				    memread, memwrite, memarg);
+		break;
+	case VIE_OP_TYPE_BITTEST:
+		error = emulate_bittest(vm, vcpuid, gpa, vie,
+		    memread, memwrite, memarg);
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+
+	return (error);
+}
+
+int
+vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
+{
+	KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
+	    ("%s: invalid size %d", __func__, size));
+	KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
+
+	if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
+		return (0);
+
+	return ((gla & (size - 1)) ? 1 : 0);
+}
+
+int
+vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
+{
+	uint64_t mask;
+
+	if (cpu_mode != CPU_MODE_64BIT)
+		return (0);
+
+	/*
+	 * The value of the bit 47 in the 'gla' should be replicated in the
+	 * most significant 16 bits.
+	 */
+	mask = ~((1UL << 48) - 1);
+	if (gla & (1UL << 47))
+		return ((gla & mask) != mask);
+	else
+		return ((gla & mask) != 0);
+}
+
+uint64_t
+vie_size2mask(int size)
+{
+	KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
+	    ("vie_size2mask: invalid size %d", size));
+	return (size2mask[size]);
+}
+
+int
+vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
+    struct seg_desc *desc, uint64_t offset, int length, int addrsize,
+    int prot, uint64_t *gla)
+{
+	uint64_t firstoff, low_limit, high_limit, segbase;
+	int glasize, type;
+
+	KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
+	    ("%s: invalid segment %d", __func__, seg));
+	KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
+	    ("%s: invalid operand size %d", __func__, length));
+	KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
+	    ("%s: invalid prot %#x", __func__, prot));
+
+	firstoff = offset;
+	if (cpu_mode == CPU_MODE_64BIT) {
+		KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
+		    "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
+		glasize = 8;
+	} else {
+		KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
+		    "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
+		glasize = 4;
+		/*
+		 * If the segment selector is loaded with a NULL selector
+		 * then the descriptor is unusable and attempting to use
+		 * it results in a #GP(0).
+		 */
+		if (SEG_DESC_UNUSABLE(desc->access))
+			return (-1);
+
+		/* 
+		 * The processor generates a #NP exception when a segment
+		 * register is loaded with a selector that points to a
+		 * descriptor that is not present. If this was the case then
+		 * it would have been checked before the VM-exit.
+		 */
+		KASSERT(SEG_DESC_PRESENT(desc->access),
+		    ("segment %d not present: %#x", seg, desc->access));
+
+		/*
+		 * The descriptor type must indicate a code/data segment.
+		 */
+		type = SEG_DESC_TYPE(desc->access);
+		KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
+		    "descriptor type %#x", seg, type));
+
+		if (prot & PROT_READ) {
+			/* #GP on a read access to a exec-only code segment */
+			if ((type & 0xA) == 0x8)
+				return (-1);
+		}
+
+		if (prot & PROT_WRITE) {
+			/*
+			 * #GP on a write access to a code segment or a
+			 * read-only data segment.
+			 */
+			if (type & 0x8)			/* code segment */
+				return (-1);
+
+			if ((type & 0xA) == 0)		/* read-only data seg */
+				return (-1);
+		}
+
+		/*
+		 * 'desc->limit' is fully expanded taking granularity into
+		 * account.
+		 */
+		if ((type & 0xC) == 0x4) {
+			/* expand-down data segment */
+			low_limit = desc->limit + 1;
+			high_limit = SEG_DESC_DEF32(desc->access) ?
+			    0xffffffff : 0xffff;
+		} else {
+			/* code segment or expand-up data segment */
+			low_limit = 0;
+			high_limit = desc->limit;
+		}
+
+		while (length > 0) {
+			offset &= vie_size2mask(addrsize);
+			if (offset < low_limit || offset > high_limit)
+				return (-1);
+			offset++;
+			length--;
+		}
+	}
+
+	/*
+	 * In 64-bit mode all segments except %fs and %gs have a segment
+	 * base address of 0.
+	 */
+	if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
+	    seg != VM_REG_GUEST_GS) {
+		segbase = 0;
+	} else {
+		segbase = desc->base;
+	}
+
+	/*
+	 * Truncate 'firstoff' to the effective address size before adding
+	 * it to the segment base.
+	 */
+	firstoff &= vie_size2mask(addrsize);
+	*gla = (segbase + firstoff) & vie_size2mask(glasize);
+	return (0);
+}
+
+#ifdef _KERNEL
+void
+vie_init(struct vie *vie, const char *inst_bytes, int inst_length)
+{
+	KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
+	    ("%s: invalid instruction length (%d)", __func__, inst_length));
+
+	bzero(vie, sizeof(struct vie));
+
+	vie->base_register = VM_REG_LAST;
+	vie->index_register = VM_REG_LAST;
+	vie->segment_register = VM_REG_LAST;
+
+	if (inst_length) {
+		bcopy(inst_bytes, vie->inst, inst_length);
+		vie->num_valid = inst_length;
+	}
+}
+
+static int
+pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
+{
+	int error_code = 0;
+
+	if (pte & PG_V)
+		error_code |= PGEX_P;
+	if (prot & VM_PROT_WRITE)
+		error_code |= PGEX_W;
+	if (usermode)
+		error_code |= PGEX_U;
+	if (rsvd)
+		error_code |= PGEX_RSV;
+	if (prot & VM_PROT_EXECUTE)
+		error_code |= PGEX_I;
+
+	return (error_code);
+}
+
+static void
+ptp_release(void **cookie)
+{
+	if (*cookie != NULL) {
+		vm_gpa_release(*cookie);
+		*cookie = NULL;
+	}
+}
+
+static void *
+ptp_hold(struct vm *vm, int vcpu, vm_paddr_t ptpphys, size_t len, void **cookie)
+{
+	void *ptr;
+
+	ptp_release(cookie);
+	ptr = vm_gpa_hold(vm, vcpu, ptpphys, len, VM_PROT_RW, cookie);
+	return (ptr);
+}
+
+int
+vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+    uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
+{
+	int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
+	u_int retries;
+	uint64_t *ptpbase, ptpphys, pte, pgsize;
+	uint32_t *ptpbase32, pte32;
+	void *cookie;
+
+	*guest_fault = 0;
+
+	usermode = (paging->cpl == 3 ? 1 : 0);
+	writable = prot & VM_PROT_WRITE;
+	cookie = NULL;
+	retval = 0;
+	retries = 0;
+restart:
+	ptpphys = paging->cr3;		/* root of the page tables */
+	ptp_release(&cookie);
+	if (retries++ > 0)
+		maybe_yield();
+
+	if (vie_canonical_check(paging->cpu_mode, gla)) {
+		/*
+		 * XXX assuming a non-stack reference otherwise a stack fault
+		 * should be generated.
+		 */
+		vm_inject_gp(vm, vcpuid);
+		goto fault;
+	}
+
+	if (paging->paging_mode == PAGING_MODE_FLAT) {
+		*gpa = gla;
+		goto done;
+	}
+
+	if (paging->paging_mode == PAGING_MODE_32) {
+		nlevels = 2;
+		while (--nlevels >= 0) {
+			/* Zero out the lower 12 bits. */
+			ptpphys &= ~0xfff;
+
+			ptpbase32 = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE,
+			    &cookie);
+
+			if (ptpbase32 == NULL)
+				goto error;
+
+			ptpshift = PAGE_SHIFT + nlevels * 10;
+			ptpindex = (gla >> ptpshift) & 0x3FF;
+			pgsize = 1UL << ptpshift;
+
+			pte32 = ptpbase32[ptpindex];
+
+			if ((pte32 & PG_V) == 0 ||
+			    (usermode && (pte32 & PG_U) == 0) ||
+			    (writable && (pte32 & PG_RW) == 0)) {
+				pfcode = pf_error_code(usermode, prot, 0,
+				    pte32);
+				vm_inject_pf(vm, vcpuid, pfcode, gla);
+				goto fault;
+			}
+
+			/*
+			 * Emulate the x86 MMU's management of the accessed
+			 * and dirty flags. While the accessed flag is set
+			 * at every level of the page table, the dirty flag
+			 * is only set at the last level providing the guest
+			 * physical address.
+			 */
+			if ((pte32 & PG_A) == 0) {
+				if (atomic_cmpset_32(&ptpbase32[ptpindex],
+				    pte32, pte32 | PG_A) == 0) {
+					goto restart;
+				}
+			}
+
+			/* XXX must be ignored if CR4.PSE=0 */
+			if (nlevels > 0 && (pte32 & PG_PS) != 0)
+				break;
+
+			ptpphys = pte32;
+		}
+
+		/* Set the dirty bit in the page table entry if necessary */
+		if (writable && (pte32 & PG_M) == 0) {
+			if (atomic_cmpset_32(&ptpbase32[ptpindex],
+			    pte32, pte32 | PG_M) == 0) {
+				goto restart;
+			}
+		}
+
+		/* Zero out the lower 'ptpshift' bits */
+		pte32 >>= ptpshift; pte32 <<= ptpshift;
+		*gpa = pte32 | (gla & (pgsize - 1));
+		goto done;
+	}
+
+	if (paging->paging_mode == PAGING_MODE_PAE) {
+		/* Zero out the lower 5 bits and the upper 32 bits */
+		ptpphys &= 0xffffffe0UL;
+
+		ptpbase = ptp_hold(vm, vcpuid, ptpphys, sizeof(*ptpbase) * 4,
+		    &cookie);
+		if (ptpbase == NULL)
+			goto error;
+
+		ptpindex = (gla >> 30) & 0x3;
+
+		pte = ptpbase[ptpindex];
+
+		if ((pte & PG_V) == 0) {
+			pfcode = pf_error_code(usermode, prot, 0, pte);
+			vm_inject_pf(vm, vcpuid, pfcode, gla);
+			goto fault;
+		}
+
+		ptpphys = pte;
+
+		nlevels = 2;
+	} else
+		nlevels = 4;
+	while (--nlevels >= 0) {
+		/* Zero out the lower 12 bits and the upper 12 bits */
+		ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
+
+		ptpbase = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE, &cookie);
+		if (ptpbase == NULL)
+			goto error;
+
+		ptpshift = PAGE_SHIFT + nlevels * 9;
+		ptpindex = (gla >> ptpshift) & 0x1FF;
+		pgsize = 1UL << ptpshift;
+
+		pte = ptpbase[ptpindex];
+
+		if ((pte & PG_V) == 0 ||
+		    (usermode && (pte & PG_U) == 0) ||
+		    (writable && (pte & PG_RW) == 0)) {
+			pfcode = pf_error_code(usermode, prot, 0, pte);
+			vm_inject_pf(vm, vcpuid, pfcode, gla);
+			goto fault;
+		}
+
+		/* Set the accessed bit in the page table entry */
+		if ((pte & PG_A) == 0) {
+			if (atomic_cmpset_64(&ptpbase[ptpindex],
+			    pte, pte | PG_A) == 0) {
+				goto restart;
+			}
+		}
+
+		if (nlevels > 0 && (pte & PG_PS) != 0) {
+			if (pgsize > 1 * GB) {
+				pfcode = pf_error_code(usermode, prot, 1, pte);
+				vm_inject_pf(vm, vcpuid, pfcode, gla);
+				goto fault;
+			}
+			break;
+		}
+
+		ptpphys = pte;
+	}
+
+	/* Set the dirty bit in the page table entry if necessary */
+	if (writable && (pte & PG_M) == 0) {
+		if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
+			goto restart;
+	}
+
+	/* Zero out the lower 'ptpshift' bits and the upper 12 bits */
+	pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
+	*gpa = pte | (gla & (pgsize - 1));
+done:
+	ptp_release(&cookie);
+	KASSERT(retval == 0 || retval == EFAULT, ("%s: unexpected retval %d",
+	    __func__, retval));
+	return (retval);
+error:
+	retval = EFAULT;
+	goto done;
+fault:
+	*guest_fault = 1;
+	goto done;
+}
+
+int
+vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+    uint64_t rip, int inst_length, struct vie *vie, int *faultptr)
+{
+	struct vm_copyinfo copyinfo[2];
+	int error, prot;
+
+	if (inst_length > VIE_INST_SIZE)
+		panic("vmm_fetch_instruction: invalid length %d", inst_length);
+
+	prot = PROT_READ | PROT_EXEC;
+	error = vm_copy_setup(vm, vcpuid, paging, rip, inst_length, prot,
+	    copyinfo, nitems(copyinfo), faultptr);
+	if (error || *faultptr)
+		return (error);
+
+	vm_copyin(vm, vcpuid, copyinfo, vie->inst, inst_length);
+	vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+	vie->num_valid = inst_length;
+	return (0);
+}
+
+static int
+vie_peek(struct vie *vie, uint8_t *x)
+{
+
+	if (vie->num_processed < vie->num_valid) {
+		*x = vie->inst[vie->num_processed];
+		return (0);
+	} else
+		return (-1);
+}
+
+static void
+vie_advance(struct vie *vie)
+{
+
+	vie->num_processed++;
+}
+
+static bool
+segment_override(uint8_t x, int *seg)
+{
+
+	switch (x) {
+	case 0x2E:
+		*seg = VM_REG_GUEST_CS;
+		break;
+	case 0x36:
+		*seg = VM_REG_GUEST_SS;
+		break;
+	case 0x3E:
+		*seg = VM_REG_GUEST_DS;
+		break;
+	case 0x26:
+		*seg = VM_REG_GUEST_ES;
+		break;
+	case 0x64:
+		*seg = VM_REG_GUEST_FS;
+		break;
+	case 0x65:
+		*seg = VM_REG_GUEST_GS;
+		break;
+	default:
+		return (false);
+	}
+	return (true);
+}
+
+static int
+decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
+{
+	uint8_t x;
+
+	while (1) {
+		if (vie_peek(vie, &x))
+			return (-1);
+
+		if (x == 0x66)
+			vie->opsize_override = 1;
+		else if (x == 0x67)
+			vie->addrsize_override = 1;
+		else if (x == 0xF3)
+			vie->repz_present = 1;
+		else if (x == 0xF2)
+			vie->repnz_present = 1;
+		else if (segment_override(x, &vie->segment_register))
+			vie->segment_override = 1;
+		else
+			break;
+
+		vie_advance(vie);
+	}
+
+	/*
+	 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:
+	 * - Only one REX prefix is allowed per instruction.
+	 * - The REX prefix must immediately precede the opcode byte or the
+	 *   escape opcode byte.
+	 * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3)
+	 *   the mandatory prefix must come before the REX prefix.
+	 */
+	if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) {
+		vie->rex_present = 1;
+		vie->rex_w = x & 0x8 ? 1 : 0;
+		vie->rex_r = x & 0x4 ? 1 : 0;
+		vie->rex_x = x & 0x2 ? 1 : 0;
+		vie->rex_b = x & 0x1 ? 1 : 0;
+		vie_advance(vie);
+	}
+
+	/*
+	 * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1
+	 */
+	if (cpu_mode == CPU_MODE_64BIT) {
+		/*
+		 * Default address size is 64-bits and default operand size
+		 * is 32-bits.
+		 */
+		vie->addrsize = vie->addrsize_override ? 4 : 8;
+		if (vie->rex_w)
+			vie->opsize = 8;
+		else if (vie->opsize_override)
+			vie->opsize = 2;
+		else
+			vie->opsize = 4;
+	} else if (cs_d) {
+		/* Default address and operand sizes are 32-bits */
+		vie->addrsize = vie->addrsize_override ? 2 : 4;
+		vie->opsize = vie->opsize_override ? 2 : 4;
+	} else {
+		/* Default address and operand sizes are 16-bits */
+		vie->addrsize = vie->addrsize_override ? 4 : 2;
+		vie->opsize = vie->opsize_override ? 4 : 2;
+	}
+	return (0);
+}
+
+static int
+decode_two_byte_opcode(struct vie *vie)
+{
+	uint8_t x;
+
+	if (vie_peek(vie, &x))
+		return (-1);
+
+	vie->op = two_byte_opcodes[x];
+
+	if (vie->op.op_type == VIE_OP_TYPE_NONE)
+		return (-1);
+
+	vie_advance(vie);
+	return (0);
+}
+
+static int
+decode_opcode(struct vie *vie)
+{
+	uint8_t x;
+
+	if (vie_peek(vie, &x))
+		return (-1);
+
+	vie->op = one_byte_opcodes[x];
+
+	if (vie->op.op_type == VIE_OP_TYPE_NONE)
+		return (-1);
+
+	vie_advance(vie);
+
+	if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
+		return (decode_two_byte_opcode(vie));
+
+	return (0);
+}
+
+static int
+decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
+{
+	uint8_t x;
+
+	if (vie->op.op_flags & VIE_OP_F_NO_MODRM)
+		return (0);
+
+	if (cpu_mode == CPU_MODE_REAL)
+		return (-1);
+
+	if (vie_peek(vie, &x))
+		return (-1);
+
+	vie->mod = (x >> 6) & 0x3;
+	vie->rm =  (x >> 0) & 0x7;
+	vie->reg = (x >> 3) & 0x7;
+
+	/*
+	 * A direct addressing mode makes no sense in the context of an EPT
+	 * fault. There has to be a memory access involved to cause the
+	 * EPT fault.
+	 */
+	if (vie->mod == VIE_MOD_DIRECT)
+		return (-1);
+
+	if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
+	    (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
+		/*
+		 * Table 2-5: Special Cases of REX Encodings
+		 *
+		 * mod=0, r/m=5 is used in the compatibility mode to
+		 * indicate a disp32 without a base register.
+		 *
+		 * mod!=3, r/m=4 is used in the compatibility mode to
+		 * indicate that the SIB byte is present.
+		 *
+		 * The 'b' bit in the REX prefix is don't care in
+		 * this case.
+		 */
+	} else {
+		vie->rm |= (vie->rex_b << 3);
+	}
+
+	vie->reg |= (vie->rex_r << 3);
+
+	/* SIB */
+	if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
+		goto done;
+
+	vie->base_register = gpr_map[vie->rm];
+
+	switch (vie->mod) {
+	case VIE_MOD_INDIRECT_DISP8:
+		vie->disp_bytes = 1;
+		break;
+	case VIE_MOD_INDIRECT_DISP32:
+		vie->disp_bytes = 4;
+		break;
+	case VIE_MOD_INDIRECT:
+		if (vie->rm == VIE_RM_DISP32) {
+			vie->disp_bytes = 4;
+			/*
+			 * Table 2-7. RIP-Relative Addressing
+			 *
+			 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
+			 * whereas in compatibility mode it just implies disp32.
+			 */
+
+			if (cpu_mode == CPU_MODE_64BIT)
+				vie->base_register = VM_REG_GUEST_RIP;
+			else
+				vie->base_register = VM_REG_LAST;
+		}
+		break;
+	}
+
+done:
+	vie_advance(vie);
+
+	return (0);
+}
+
+static int
+decode_sib(struct vie *vie)
+{
+	uint8_t x;
+
+	/* Proceed only if SIB byte is present */
+	if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
+		return (0);
+
+	if (vie_peek(vie, &x))
+		return (-1);
+
+	/* De-construct the SIB byte */
+	vie->ss = (x >> 6) & 0x3;
+	vie->index = (x >> 3) & 0x7;
+	vie->base = (x >> 0) & 0x7;
+
+	/* Apply the REX prefix modifiers */
+	vie->index |= vie->rex_x << 3;
+	vie->base |= vie->rex_b << 3;
+
+	switch (vie->mod) {
+	case VIE_MOD_INDIRECT_DISP8:
+		vie->disp_bytes = 1;
+		break;
+	case VIE_MOD_INDIRECT_DISP32:
+		vie->disp_bytes = 4;
+		break;
+	}
+
+	if (vie->mod == VIE_MOD_INDIRECT &&
+	    (vie->base == 5 || vie->base == 13)) {
+		/*
+		 * Special case when base register is unused if mod = 0
+		 * and base = %rbp or %r13.
+		 *
+		 * Documented in:
+		 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
+		 * Table 2-5: Special Cases of REX Encodings
+		 */
+		vie->disp_bytes = 4;
+	} else {
+		vie->base_register = gpr_map[vie->base];
+	}
+
+	/*
+	 * All encodings of 'index' are valid except for %rsp (4).
+	 *
+	 * Documented in:
+	 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
+	 * Table 2-5: Special Cases of REX Encodings
+	 */
+	if (vie->index != 4)
+		vie->index_register = gpr_map[vie->index];
+
+	/* 'scale' makes sense only in the context of an index register */
+	if (vie->index_register < VM_REG_LAST)
+		vie->scale = 1 << vie->ss;
+
+	vie_advance(vie);
+
+	return (0);
+}
+
+static int
+decode_displacement(struct vie *vie)
+{
+	int n, i;
+	uint8_t x;
+
+	union {
+		char	buf[4];
+		int8_t	signed8;
+		int32_t	signed32;
+	} u;
+
+	if ((n = vie->disp_bytes) == 0)
+		return (0);
+
+	if (n != 1 && n != 4)
+		panic("decode_displacement: invalid disp_bytes %d", n);
+
+	for (i = 0; i < n; i++) {
+		if (vie_peek(vie, &x))
+			return (-1);
+
+		u.buf[i] = x;
+		vie_advance(vie);
+	}
+
+	if (n == 1)
+		vie->displacement = u.signed8;		/* sign-extended */
+	else
+		vie->displacement = u.signed32;		/* sign-extended */
+
+	return (0);
+}
+
+static int
+decode_immediate(struct vie *vie)
+{
+	int i, n;
+	uint8_t x;
+	union {
+		char	buf[4];
+		int8_t	signed8;
+		int16_t	signed16;
+		int32_t	signed32;
+	} u;
+
+	/* Figure out immediate operand size (if any) */
+	if (vie->op.op_flags & VIE_OP_F_IMM) {
+		/*
+		 * Section 2.2.1.5 "Immediates", Intel SDM:
+		 * In 64-bit mode the typical size of immediate operands
+		 * remains 32-bits. When the operand size if 64-bits, the
+		 * processor sign-extends all immediates to 64-bits prior
+		 * to their use.
+		 */
+		if (vie->opsize == 4 || vie->opsize == 8)
+			vie->imm_bytes = 4;
+		else
+			vie->imm_bytes = 2;
+	} else if (vie->op.op_flags & VIE_OP_F_IMM8) {
+		vie->imm_bytes = 1;
+	}
+
+	if ((n = vie->imm_bytes) == 0)
+		return (0);
+
+	KASSERT(n == 1 || n == 2 || n == 4,
+	    ("%s: invalid number of immediate bytes: %d", __func__, n));
+
+	for (i = 0; i < n; i++) {
+		if (vie_peek(vie, &x))
+			return (-1);
+
+		u.buf[i] = x;
+		vie_advance(vie);
+	}
+
+	/* sign-extend the immediate value before use */
+	if (n == 1)
+		vie->immediate = u.signed8;
+	else if (n == 2)
+		vie->immediate = u.signed16;
+	else
+		vie->immediate = u.signed32;
+
+	return (0);
+}
+
+static int
+decode_moffset(struct vie *vie)
+{
+	int i, n;
+	uint8_t x;
+	union {
+		char	buf[8];
+		uint64_t u64;
+	} u;
+
+	if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0)
+		return (0);
+
+	/*
+	 * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
+	 * The memory offset size follows the address-size of the instruction.
+	 */
+	n = vie->addrsize;
+	KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n));
+
+	u.u64 = 0;
+	for (i = 0; i < n; i++) {
+		if (vie_peek(vie, &x))
+			return (-1);
+
+		u.buf[i] = x;
+		vie_advance(vie);
+	}
+	vie->displacement = u.u64;
+	return (0);
+}
+
+/*
+ * Verify that the 'guest linear address' provided as collateral of the nested
+ * page table fault matches with our instruction decoding.
+ */
+static int
+verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie,
+    enum vm_cpu_mode cpu_mode)
+{
+	int error;
+	uint64_t base, segbase, idx, gla2;
+	enum vm_reg_name seg;
+	struct seg_desc desc;
+
+	/* Skip 'gla' verification */
+	if (gla == VIE_INVALID_GLA)
+		return (0);
+
+	base = 0;
+	if (vie->base_register != VM_REG_LAST) {
+		error = vm_get_register(vm, cpuid, vie->base_register, &base);
+		if (error) {
+			printf("verify_gla: error %d getting base reg %d\n",
+				error, vie->base_register);
+			return (-1);
+		}
+
+		/*
+		 * RIP-relative addressing starts from the following
+		 * instruction
+		 */
+		if (vie->base_register == VM_REG_GUEST_RIP)
+			base += vie->num_processed;
+	}
+
+	idx = 0;
+	if (vie->index_register != VM_REG_LAST) {
+		error = vm_get_register(vm, cpuid, vie->index_register, &idx);
+		if (error) {
+			printf("verify_gla: error %d getting index reg %d\n",
+				error, vie->index_register);
+			return (-1);
+		}
+	}
+
+	/*
+	 * From "Specifying a Segment Selector", Intel SDM, Vol 1
+	 *
+	 * In 64-bit mode, segmentation is generally (but not
+	 * completely) disabled.  The exceptions are the FS and GS
+	 * segments.
+	 *
+	 * In legacy IA-32 mode, when the ESP or EBP register is used
+	 * as the base, the SS segment is the default segment.  For
+	 * other data references, except when relative to stack or
+	 * string destination the DS segment is the default.  These
+	 * can be overridden to allow other segments to be accessed.
+	 */
+	if (vie->segment_override)
+		seg = vie->segment_register;
+	else if (vie->base_register == VM_REG_GUEST_RSP ||
+	    vie->base_register == VM_REG_GUEST_RBP)
+		seg = VM_REG_GUEST_SS;
+	else
+		seg = VM_REG_GUEST_DS;
+	if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
+	    seg != VM_REG_GUEST_GS) {
+		segbase = 0;
+	} else {
+		error = vm_get_seg_desc(vm, cpuid, seg, &desc);
+		if (error) {
+			printf("verify_gla: error %d getting segment"
+			       " descriptor %d", error,
+			       vie->segment_register);
+			return (-1);
+		}
+		segbase = desc.base;
+	}
+
+	gla2 = segbase + base + vie->scale * idx + vie->displacement;
+	gla2 &= size2mask[vie->addrsize];
+	if (gla != gla2) {
+		printf("verify_gla mismatch: segbase(0x%0lx)"
+		       "base(0x%0lx), scale(%d), index(0x%0lx), "
+		       "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
+		       segbase, base, vie->scale, idx, vie->displacement,
+		       gla, gla2);
+		return (-1);
+	}
+
+	return (0);
+}
+
+int
+vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
+		       enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
+{
+
+	if (decode_prefixes(vie, cpu_mode, cs_d))
+		return (-1);
+
+	if (decode_opcode(vie))
+		return (-1);
+
+	if (decode_modrm(vie, cpu_mode))
+		return (-1);
+
+	if (decode_sib(vie))
+		return (-1);
+
+	if (decode_displacement(vie))
+		return (-1);
+
+	if (decode_immediate(vie))
+		return (-1);
+
+	if (decode_moffset(vie))
+		return (-1);
+
+	if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) {
+		if (verify_gla(vm, cpuid, gla, vie, cpu_mode))
+			return (-1);
+	}
+
+	vie->decoded = 1;	/* success */
+
+	return (0);
+}
+#endif	/* _KERNEL */


Property changes on: trunk/sys/amd64/vmm/vmm_instruction_emul.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_ioport.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_ioport.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_ioport.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,177 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_ioport.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/vmm.h>
+#include <machine/vmm_instruction_emul.h>
+
+#include "vatpic.h"
+#include "vatpit.h"
+#include "vpmtmr.h"
+#include "vrtc.h"
+#include "vmm_ioport.h"
+#include "vmm_ktr.h"
+
+#define	MAX_IOPORTS		1280
+
+ioport_handler_func_t ioport_handler[MAX_IOPORTS] = {
+	[TIMER_MODE] = vatpit_handler,
+	[TIMER_CNTR0] = vatpit_handler,
+	[TIMER_CNTR1] = vatpit_handler,
+	[TIMER_CNTR2] = vatpit_handler,
+	[NMISC_PORT] = vatpit_nmisc_handler,
+	[IO_ICU1] = vatpic_master_handler,
+	[IO_ICU1 + ICU_IMR_OFFSET] = vatpic_master_handler,
+	[IO_ICU2] = vatpic_slave_handler,
+	[IO_ICU2 + ICU_IMR_OFFSET] = vatpic_slave_handler,
+	[IO_ELCR1] = vatpic_elc_handler,
+	[IO_ELCR2] = vatpic_elc_handler,
+	[IO_PMTMR] = vpmtmr_handler,
+	[IO_RTC] = vrtc_addr_handler,
+	[IO_RTC + 1] = vrtc_data_handler,
+};
+
+#ifdef KTR
+static const char *
+inout_instruction(struct vm_exit *vmexit)
+{
+	int index;
+
+	static const char *iodesc[] = {
+		"outb", "outw", "outl",
+		"inb", "inw", "inl",
+		"outsb", "outsw", "outsd",
+		"insb", "insw", "insd",
+	};
+
+	switch (vmexit->u.inout.bytes) {
+	case 1:
+		index = 0;
+		break;
+	case 2:
+		index = 1;
+		break;
+	default:
+		index = 2;
+		break;
+	}
+
+	if (vmexit->u.inout.in)
+		index += 3;
+
+	if (vmexit->u.inout.string)
+		index += 6;
+
+	KASSERT(index < nitems(iodesc), ("%s: invalid index %d",
+	    __func__, index));
+
+	return (iodesc[index]);
+}
+#endif	/* KTR */
+
+static int
+emulate_inout_port(struct vm *vm, int vcpuid, struct vm_exit *vmexit,
+    bool *retu)
+{
+	ioport_handler_func_t handler;
+	uint32_t mask, val;
+	int error;
+
+	/*
+	 * If there is no handler for the I/O port then punt to userspace.
+	 */
+	if (vmexit->u.inout.port >= MAX_IOPORTS ||
+	    (handler = ioport_handler[vmexit->u.inout.port]) == NULL) {
+		*retu = true;
+		return (0);
+	}
+
+	mask = vie_size2mask(vmexit->u.inout.bytes);
+
+	if (!vmexit->u.inout.in) {
+		val = vmexit->u.inout.eax & mask;
+	}
+
+	error = (*handler)(vm, vcpuid, vmexit->u.inout.in,
+	    vmexit->u.inout.port, vmexit->u.inout.bytes, &val);
+	if (error) {
+		/*
+		 * The value returned by this function is also the return value
+		 * of vm_run(). This needs to be a positive number otherwise it
+		 * can be interpreted as a "pseudo-error" like ERESTART.
+		 *
+		 * Enforce this by mapping all errors to EIO.
+		 */
+		return (EIO);
+	}
+
+	if (vmexit->u.inout.in) {
+		vmexit->u.inout.eax &= ~mask;
+		vmexit->u.inout.eax |= val & mask;
+		error = vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX,
+		    vmexit->u.inout.eax);
+		KASSERT(error == 0, ("emulate_ioport: error %d setting guest "
+		    "rax register", error));
+	}
+	*retu = false;
+	return (0);
+}
+
+static int
+emulate_inout_str(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
+{
+	*retu = true;
+	return (0);	/* Return to userspace to finish emulation */
+}
+
+int
+vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
+{
+	int bytes, error;
+
+	bytes = vmexit->u.inout.bytes;
+	KASSERT(bytes == 1 || bytes == 2 || bytes == 4,
+	    ("vm_handle_inout: invalid operand size %d", bytes));
+
+	if (vmexit->u.inout.string)
+		error = emulate_inout_str(vm, vcpuid, vmexit, retu);
+	else
+		error = emulate_inout_port(vm, vcpuid, vmexit, retu);
+
+	VCPU_CTR4(vm, vcpuid, "%s%s 0x%04x: %s",
+	    vmexit->u.inout.rep ? "rep " : "",
+	    inout_instruction(vmexit),
+	    vmexit->u.inout.port,
+	    error ? "error" : (*retu ? "userspace" : "handled"));
+
+	return (error);
+}


Property changes on: trunk/sys/amd64/vmm/vmm_ioport.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_ioport.h
===================================================================
--- trunk/sys/amd64/vmm/vmm_ioport.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_ioport.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,38 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_ioport.h 276429 2014-12-30 22:22:46Z neel $
+ */
+
+#ifndef	_VMM_IOPORT_H_
+#define	_VMM_IOPORT_H_
+
+typedef int (*ioport_handler_func_t)(struct vm *vm, int vcpuid,
+    bool in, int port, int bytes, uint32_t *val);
+
+int vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vme, bool *retu);
+
+#endif	/* _VMM_IOPORT_H_ */


Property changes on: trunk/sys/amd64/vmm/vmm_ioport.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_ipi.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_ipi.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_ipi.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,94 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_ipi.c 266339 2014-05-17 19:11:08Z jhb $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_ipi.c 266339 2014-05-17 19:11:08Z jhb $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/bus.h>
+
+#include <machine/intr_machdep.h>
+#include <machine/apicvar.h>
+#include <machine/segments.h>
+#include <machine/md_var.h>
+
+#include <machine/vmm.h>
+#include "vmm_ipi.h"
+
+extern inthand_t IDTVEC(rsvd), IDTVEC(justreturn);
+
+CTASSERT(APIC_SPURIOUS_INT == 255);
+
+int
+vmm_ipi_alloc(void)
+{
+	int idx;
+	uintptr_t func;
+	struct gate_descriptor *ip;
+
+	/*
+	 * Search backwards from the highest IDT vector available for use
+	 * as our IPI vector. We install the 'justreturn' handler at that
+	 * vector and use it to interrupt the vcpus.
+	 *
+	 * We do this because the IPI_AST is heavyweight and saves all
+	 * registers in the trapframe. This is overkill for our use case
+	 * which is simply to EOI the interrupt and return.
+	 */
+	idx = APIC_SPURIOUS_INT;
+	while (--idx >= APIC_IPI_INTS) {
+		ip = &idt[idx];
+		func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
+		if (func == (uintptr_t)&IDTVEC(rsvd)) {
+			setidt(idx , IDTVEC(justreturn), SDT_SYSIGT,
+			       SEL_KPL, 0);
+			return (idx);
+		}
+	}
+	return (0);
+}
+
+void
+vmm_ipi_free(int ipinum)
+{
+	uintptr_t func;
+	struct gate_descriptor *ip;
+
+	KASSERT(ipinum >= APIC_IPI_INTS && ipinum < APIC_SPURIOUS_INT,
+	    ("invalid ipi %d", ipinum));
+
+	ip = &idt[ipinum];
+	func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
+	KASSERT(func == (uintptr_t)&IDTVEC(justreturn),
+	    ("invalid ipi %d", ipinum));
+
+	setidt(ipinum, IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
+}


Property changes on: trunk/sys/amd64/vmm/vmm_ipi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_ipi.h
===================================================================
--- trunk/sys/amd64/vmm/vmm_ipi.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_ipi.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,36 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_ipi.h 266339 2014-05-17 19:11:08Z jhb $
+ */
+
+#ifndef _VMM_IPI_H_
+#define _VMM_IPI_H_
+
+int	vmm_ipi_alloc(void);
+void	vmm_ipi_free(int num);
+
+#endif


Property changes on: trunk/sys/amd64/vmm/vmm_ipi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_ktr.h
===================================================================
--- trunk/sys/amd64/vmm/vmm_ktr.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_ktr.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,70 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_ktr.h 268976 2014-07-22 04:39:16Z jhb $
+ */
+
+#ifndef _VMM_KTR_H_
+#define	_VMM_KTR_H_
+
+#include <sys/ktr.h>
+#include <sys/pcpu.h>
+
+#ifndef KTR_VMM
+#define	KTR_VMM	KTR_GEN
+#endif
+
+#define	VCPU_CTR0(vm, vcpuid, format)					\
+CTR2(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid))
+
+#define	VCPU_CTR1(vm, vcpuid, format, p1)				\
+CTR3(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1))
+
+#define	VCPU_CTR2(vm, vcpuid, format, p1, p2)				\
+CTR4(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2))
+
+#define	VCPU_CTR3(vm, vcpuid, format, p1, p2, p3)			\
+CTR5(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2), (p3))
+
+#define	VCPU_CTR4(vm, vcpuid, format, p1, p2, p3, p4)			\
+CTR6(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid),		\
+    (p1), (p2), (p3), (p4))
+
+#define	VM_CTR0(vm, format)						\
+CTR1(KTR_VMM, "vm %s: " format, vm_name((vm)))
+
+#define	VM_CTR1(vm, format, p1)						\
+CTR2(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1))
+
+#define	VM_CTR2(vm, format, p1, p2)					\
+CTR3(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2))
+
+#define	VM_CTR3(vm, format, p1, p2, p3)					\
+CTR4(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3))
+
+#define	VM_CTR4(vm, format, p1, p2, p3, p4)				\
+CTR5(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3), (p4))
+#endif


Property changes on: trunk/sys/amd64/vmm/vmm_ktr.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_lapic.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_lapic.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_lapic.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,249 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_lapic.c 284899 2015-06-28 01:21:55Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_lapic.c 284899 2015-06-28 01:21:55Z neel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/smp.h>
+
+#include <x86/specialreg.h>
+#include <x86/apicreg.h>
+
+#include <machine/vmm.h>
+#include "vmm_ipi.h"
+#include "vmm_ktr.h"
+#include "vmm_lapic.h"
+#include "vlapic.h"
+
+/*
+ * Some MSI message definitions
+ */
+#define	MSI_X86_ADDR_MASK	0xfff00000
+#define	MSI_X86_ADDR_BASE	0xfee00000
+#define	MSI_X86_ADDR_RH		0x00000008	/* Redirection Hint */
+#define	MSI_X86_ADDR_LOG	0x00000004	/* Destination Mode */
+
+int
+lapic_set_intr(struct vm *vm, int cpu, int vector, bool level)
+{
+	struct vlapic *vlapic;
+
+	if (cpu < 0 || cpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	/*
+	 * According to section "Maskable Hardware Interrupts" in Intel SDM
+	 * vectors 16 through 255 can be delivered through the local APIC.
+	 */
+	if (vector < 16 || vector > 255)
+		return (EINVAL);
+
+	vlapic = vm_lapic(vm, cpu);
+	if (vlapic_set_intr_ready(vlapic, vector, level))
+		vcpu_notify_event(vm, cpu, true);
+	return (0);
+}
+
+int
+lapic_set_local_intr(struct vm *vm, int cpu, int vector)
+{
+	struct vlapic *vlapic;
+	cpuset_t dmask;
+	int error;
+
+	if (cpu < -1 || cpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (cpu == -1)
+		dmask = vm_active_cpus(vm);
+	else
+		CPU_SETOF(cpu, &dmask);
+	error = 0;
+	while ((cpu = CPU_FFS(&dmask)) != 0) {
+		cpu--;
+		CPU_CLR(cpu, &dmask);
+		vlapic = vm_lapic(vm, cpu);
+		error = vlapic_trigger_lvt(vlapic, vector);
+		if (error)
+			break;
+	}
+
+	return (error);
+}
+
+int
+lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
+{
+	int delmode, vec;
+	uint32_t dest;
+	bool phys;
+
+	VM_CTR2(vm, "lapic MSI addr: %#lx msg: %#lx", addr, msg);
+
+	if ((addr & MSI_X86_ADDR_MASK) != MSI_X86_ADDR_BASE) {
+		VM_CTR1(vm, "lapic MSI invalid addr %#lx", addr);
+		return (-1);
+	}
+
+	/*
+	 * Extract the x86-specific fields from the MSI addr/msg
+	 * params according to the Intel Arch spec, Vol3 Ch 10.
+	 *
+	 * The PCI specification does not support level triggered
+	 * MSI/MSI-X so ignore trigger level in 'msg'.
+	 *
+	 * The 'dest' is interpreted as a logical APIC ID if both
+	 * the Redirection Hint and Destination Mode are '1' and
+	 * physical otherwise.
+	 */
+	dest = (addr >> 12) & 0xff;
+	phys = ((addr & (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG)) !=
+	    (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG));
+	delmode = msg & APIC_DELMODE_MASK;
+	vec = msg & 0xff;
+
+	VM_CTR3(vm, "lapic MSI %s dest %#x, vec %d",
+	    phys ? "physical" : "logical", dest, vec);
+
+	vlapic_deliver_intr(vm, LAPIC_TRIG_EDGE, dest, phys, delmode, vec);
+	return (0);
+}
+
+static boolean_t
+x2apic_msr(u_int msr)
+{
+	if (msr >= 0x800 && msr <= 0xBFF)
+		return (TRUE);
+	else
+		return (FALSE);
+}
+
+static u_int
+x2apic_msr_to_regoff(u_int msr)
+{
+
+	return ((msr - 0x800) << 4);
+}
+
+boolean_t
+lapic_msr(u_int msr)
+{
+
+	if (x2apic_msr(msr) || (msr == MSR_APICBASE))
+		return (TRUE);
+	else
+		return (FALSE);
+}
+
+int
+lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval, bool *retu)
+{
+	int error;
+	u_int offset;
+	struct vlapic *vlapic;
+
+	vlapic = vm_lapic(vm, cpu);
+
+	if (msr == MSR_APICBASE) {
+		*rval = vlapic_get_apicbase(vlapic);
+		error = 0;
+	} else {
+		offset = x2apic_msr_to_regoff(msr);
+		error = vlapic_read(vlapic, 0, offset, rval, retu);
+	}
+
+	return (error);
+}
+
+int
+lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t val, bool *retu)
+{
+	int error;
+	u_int offset;
+	struct vlapic *vlapic;
+
+	vlapic = vm_lapic(vm, cpu);
+
+	if (msr == MSR_APICBASE) {
+		error = vlapic_set_apicbase(vlapic, val);
+	} else {
+		offset = x2apic_msr_to_regoff(msr);
+		error = vlapic_write(vlapic, 0, offset, val, retu);
+	}
+
+	return (error);
+}
+
+int
+lapic_mmio_write(void *vm, int cpu, uint64_t gpa, uint64_t wval, int size,
+		 void *arg)
+{
+	int error;
+	uint64_t off;
+	struct vlapic *vlapic;
+
+	off = gpa - DEFAULT_APIC_BASE;
+
+	/*
+	 * Memory mapped local apic accesses must be 4 bytes wide and
+	 * aligned on a 16-byte boundary.
+	 */
+	if (size != 4 || off & 0xf)
+		return (EINVAL);
+
+	vlapic = vm_lapic(vm, cpu);
+	error = vlapic_write(vlapic, 1, off, wval, arg);
+	return (error);
+}
+
+int
+lapic_mmio_read(void *vm, int cpu, uint64_t gpa, uint64_t *rval, int size,
+		void *arg)
+{
+	int error;
+	uint64_t off;
+	struct vlapic *vlapic;
+
+	off = gpa - DEFAULT_APIC_BASE;
+
+	/*
+	 * Memory mapped local apic accesses should be aligned on a
+	 * 16-byte boundary.  They are also suggested to be 4 bytes
+	 * wide, alas not all OSes follow suggestions.
+	 */
+	off &= ~3;
+	if (off & 0xf)
+		return (EINVAL);
+
+	vlapic = vm_lapic(vm, cpu);
+	error = vlapic_read(vlapic, 1, off, rval, arg);
+	return (error);
+}


Property changes on: trunk/sys/amd64/vmm/vmm_lapic.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_lapic.h
===================================================================
--- trunk/sys/amd64/vmm/vmm_lapic.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_lapic.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,76 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_lapic.h 266339 2014-05-17 19:11:08Z jhb $
+ */
+
+#ifndef _VMM_LAPIC_H_
+#define	_VMM_LAPIC_H_
+
+struct vm;
+
+boolean_t lapic_msr(u_int num);
+int	lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval,
+	    bool *retu);
+int	lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t wval,
+	    bool *retu);
+
+int	lapic_mmio_read(void *vm, int cpu, uint64_t gpa,
+			uint64_t *rval, int size, void *arg);
+int	lapic_mmio_write(void *vm, int cpu, uint64_t gpa,
+			 uint64_t wval, int size, void *arg);
+
+/*
+ * Signals to the LAPIC that an interrupt at 'vector' needs to be generated
+ * to the 'cpu', the state is recorded in IRR.
+ */
+int	lapic_set_intr(struct vm *vm, int cpu, int vector, bool trig);
+
+#define	LAPIC_TRIG_LEVEL	true
+#define	LAPIC_TRIG_EDGE		false
+static __inline int
+lapic_intr_level(struct vm *vm, int cpu, int vector)
+{
+
+	return (lapic_set_intr(vm, cpu, vector, LAPIC_TRIG_LEVEL));
+}
+
+static __inline int
+lapic_intr_edge(struct vm *vm, int cpu, int vector)
+{
+
+	return (lapic_set_intr(vm, cpu, vector, LAPIC_TRIG_EDGE));
+}
+
+/*
+ * Triggers the LAPIC local interrupt (LVT) 'vector' on 'cpu'.  'cpu' can
+ * be set to -1 to trigger the interrupt on all CPUs.
+ */
+int	lapic_set_local_intr(struct vm *vm, int cpu, int vector);
+
+int	lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg);
+
+#endif


Property changes on: trunk/sys/amd64/vmm/vmm_lapic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_mem.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_mem.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_mem.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,123 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_mem.c 295124 2016-02-01 14:56:11Z grehan $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_mem.c 295124 2016-02-01 14:56:11Z grehan $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/sglist.h>
+#include <sys/lock.h>
+#include <sys/rwlock.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+
+#include <machine/md_var.h>
+
+#include "vmm_mem.h"
+
+int
+vmm_mem_init(void)
+{
+
+	return (0);
+}
+
+vm_object_t
+vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len,
+	       vm_paddr_t hpa)
+{
+	int error;
+	vm_object_t obj;
+	struct sglist *sg;
+
+	sg = sglist_alloc(1, M_WAITOK);
+	error = sglist_append_phys(sg, hpa, len);
+	KASSERT(error == 0, ("error %d appending physaddr to sglist", error));
+
+	obj = vm_pager_allocate(OBJT_SG, sg, len, VM_PROT_RW, 0, NULL);
+	if (obj != NULL) {
+		/*
+		 * VT-x ignores the MTRR settings when figuring out the
+		 * memory type for translations obtained through EPT.
+		 *
+		 * Therefore we explicitly force the pages provided by
+		 * this object to be mapped as uncacheable.
+		 */
+		VM_OBJECT_WLOCK(obj);
+		error = vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
+		VM_OBJECT_WUNLOCK(obj);
+		if (error != KERN_SUCCESS) {
+			panic("vmm_mmio_alloc: vm_object_set_memattr error %d",
+				error);
+		}
+		error = vm_map_find(&vmspace->vm_map, obj, 0, &gpa, len, 0,
+				    VMFS_NO_SPACE, VM_PROT_RW, VM_PROT_RW, 0);
+		if (error != KERN_SUCCESS) {
+			vm_object_deallocate(obj);
+			obj = NULL;
+		}
+	}
+
+	/*
+	 * Drop the reference on the sglist.
+	 *
+	 * If the scatter/gather object was successfully allocated then it
+	 * has incremented the reference count on the sglist. Dropping the
+	 * initial reference count ensures that the sglist will be freed
+	 * when the object is deallocated.
+	 * 
+	 * If the object could not be allocated then we end up freeing the
+	 * sglist.
+	 */
+	sglist_free(sg);
+
+	return (obj);
+}
+
+void
+vmm_mmio_free(struct vmspace *vmspace, vm_paddr_t gpa, size_t len)
+{
+
+	vm_map_remove(&vmspace->vm_map, gpa, gpa + len);
+}
+
+vm_paddr_t
+vmm_mem_maxaddr(void)
+{
+
+	return (ptoa(Maxmem));
+}


Property changes on: trunk/sys/amd64/vmm/vmm_mem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_mem.h
===================================================================
--- trunk/sys/amd64/vmm/vmm_mem.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_mem.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,42 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_mem.h 295124 2016-02-01 14:56:11Z grehan $
+ */
+
+#ifndef	_VMM_MEM_H_
+#define	_VMM_MEM_H_
+
+struct vmspace;
+struct vm_object;
+
+int		vmm_mem_init(void);
+struct vm_object *vmm_mmio_alloc(struct vmspace *, vm_paddr_t gpa, size_t len,
+				 vm_paddr_t hpa);
+void		vmm_mmio_free(struct vmspace *, vm_paddr_t gpa, size_t size);
+vm_paddr_t	vmm_mem_maxaddr(void);
+
+#endif


Property changes on: trunk/sys/amd64/vmm/vmm_mem.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_stat.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_stat.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_stat.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,171 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_stat.c 284900 2015-06-28 03:22:26Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_stat.c 284900 2015-06-28 03:22:26Z neel $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <machine/vmm.h>
+#include "vmm_util.h"
+#include "vmm_stat.h"
+
+/*
+ * 'vst_num_elems' is the total number of addressable statistic elements
+ * 'vst_num_types' is the number of unique statistic types
+ *
+ * It is always true that 'vst_num_elems' is greater than or equal to
+ * 'vst_num_types'. This is because a stat type may represent more than
+ * one element (for e.g. VMM_STAT_ARRAY).
+ */
+static int vst_num_elems, vst_num_types;
+static struct vmm_stat_type *vsttab[MAX_VMM_STAT_ELEMS];
+
+static MALLOC_DEFINE(M_VMM_STAT, "vmm stat", "vmm stat");
+
+#define	vst_size	((size_t)vst_num_elems * sizeof(uint64_t))
+
+void
+vmm_stat_register(void *arg)
+{
+	struct vmm_stat_type *vst = arg;
+
+	/* We require all stats to identify themselves with a description */
+	if (vst->desc == NULL)
+		return;
+
+	if (vst->scope == VMM_STAT_SCOPE_INTEL && !vmm_is_intel())
+		return;
+
+	if (vst->scope == VMM_STAT_SCOPE_AMD && !vmm_is_amd())
+		return;
+
+	if (vst_num_elems + vst->nelems >= MAX_VMM_STAT_ELEMS) {
+		printf("Cannot accomodate vmm stat type \"%s\"!\n", vst->desc);
+		return;
+	}
+
+	vst->index = vst_num_elems;
+	vst_num_elems += vst->nelems;
+
+	vsttab[vst_num_types++] = vst;
+}
+
+int
+vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats, uint64_t *buf)
+{
+	struct vmm_stat_type *vst;
+	uint64_t *stats;
+	int i;
+
+	if (vcpu < 0 || vcpu >= VM_MAXCPU)
+		return (EINVAL);
+
+	/* Let stats functions update their counters */
+	for (i = 0; i < vst_num_types; i++) {
+		vst = vsttab[i];
+		if (vst->func != NULL)
+			(*vst->func)(vm, vcpu, vst);
+	}
+
+	/* Copy over the stats */
+	stats = vcpu_stats(vm, vcpu);
+	for (i = 0; i < vst_num_elems; i++)
+		buf[i] = stats[i];
+	*num_stats = vst_num_elems;
+	return (0);
+}
+
+void *
+vmm_stat_alloc(void)
+{
+
+	return (malloc(vst_size, M_VMM_STAT, M_WAITOK));
+}
+
+void
+vmm_stat_init(void *vp)
+{
+
+	bzero(vp, vst_size);
+}
+
+void
+vmm_stat_free(void *vp)
+{
+	free(vp, M_VMM_STAT);
+}
+
+int
+vmm_stat_desc_copy(int index, char *buf, int bufsize)
+{
+	int i;
+	struct vmm_stat_type *vst;
+
+	for (i = 0; i < vst_num_types; i++) {
+		vst = vsttab[i];
+		if (index >= vst->index && index < vst->index + vst->nelems) {
+			if (vst->nelems > 1) {
+				snprintf(buf, bufsize, "%s[%d]",
+					 vst->desc, index - vst->index);
+			} else {
+				strlcpy(buf, vst->desc, bufsize);
+			}
+			return (0);	/* found it */
+		}
+	}
+
+	return (EINVAL);
+}
+
+/* global statistics */
+VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
+VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
+VMM_STAT(VMEXIT_EXTINT, "vm exits due to external interrupt");
+VMM_STAT(VMEXIT_HLT, "number of times hlt was intercepted");
+VMM_STAT(VMEXIT_CR_ACCESS, "number of times %cr access was intercepted");
+VMM_STAT(VMEXIT_RDMSR, "number of times rdmsr was intercepted");
+VMM_STAT(VMEXIT_WRMSR, "number of times wrmsr was intercepted");
+VMM_STAT(VMEXIT_MTRAP, "number of monitor trap exits");
+VMM_STAT(VMEXIT_PAUSE, "number of times pause was intercepted");
+VMM_STAT(VMEXIT_INTR_WINDOW, "vm exits due to interrupt window opening");
+VMM_STAT(VMEXIT_NMI_WINDOW, "vm exits due to nmi window opening");
+VMM_STAT(VMEXIT_INOUT, "number of times in/out was intercepted");
+VMM_STAT(VMEXIT_CPUID, "number of times cpuid was intercepted");
+VMM_STAT(VMEXIT_NESTED_FAULT, "vm exits due to nested page fault");
+VMM_STAT(VMEXIT_INST_EMUL, "vm exits for instruction emulation");
+VMM_STAT(VMEXIT_UNKNOWN, "number of vm exits for unknown reason");
+VMM_STAT(VMEXIT_ASTPENDING, "number of times astpending at exit");
+VMM_STAT(VMEXIT_REQIDLE, "number of times idle requested at exit");
+VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace");
+VMM_STAT(VMEXIT_RENDEZVOUS, "number of times rendezvous pending at exit");
+VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions");


Property changes on: trunk/sys/amd64/vmm/vmm_stat.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_stat.h
===================================================================
--- trunk/sys/amd64/vmm/vmm_stat.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_stat.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,162 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_stat.h 284900 2015-06-28 03:22:26Z neel $
+ */
+
+#ifndef _VMM_STAT_H_
+#define	_VMM_STAT_H_
+
+struct vm;
+
+#define	MAX_VMM_STAT_ELEMS	64		/* arbitrary */
+
+enum vmm_stat_scope {
+	VMM_STAT_SCOPE_ANY,
+	VMM_STAT_SCOPE_INTEL,		/* Intel VMX specific statistic */
+	VMM_STAT_SCOPE_AMD,		/* AMD SVM specific statistic */
+};
+
+struct vmm_stat_type;
+typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
+    struct vmm_stat_type *stat);
+
+struct vmm_stat_type {
+	int	index;			/* position in the stats buffer */
+	int	nelems;			/* standalone or array */
+	const char *desc;		/* description of statistic */
+	vmm_stat_func_t func;
+	enum vmm_stat_scope scope;
+};
+
+void	vmm_stat_register(void *arg);
+
+#define	VMM_STAT_FDEFINE(type, nelems, desc, func, scope)		\
+	struct vmm_stat_type type[1] = {				\
+		{ -1, nelems, desc, func, scope }			\
+	};								\
+	SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
+
+#define VMM_STAT_DEFINE(type, nelems, desc, scope) 			\
+	VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
+
+#define	VMM_STAT_DECLARE(type)						\
+	extern struct vmm_stat_type type[1]
+
+#define	VMM_STAT(type, desc)		\
+	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
+#define	VMM_STAT_INTEL(type, desc)	\
+	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_INTEL)
+#define	VMM_STAT_AMD(type, desc)	\
+	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
+
+#define	VMM_STAT_FUNC(type, desc, func)	\
+	VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
+
+#define	VMM_STAT_ARRAY(type, nelems, desc)	\
+	VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
+
+void	*vmm_stat_alloc(void);
+void	vmm_stat_init(void *vp);
+void 	vmm_stat_free(void *vp);
+
+/*
+ * 'buf' should be at least fit 'MAX_VMM_STAT_TYPES' entries
+ */
+int	vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats, uint64_t *buf);
+int	vmm_stat_desc_copy(int index, char *buf, int buflen);
+
+static void __inline
+vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
+		    int statidx, uint64_t x)
+{
+#ifdef VMM_KEEP_STATS
+	uint64_t *stats;
+	
+	stats = vcpu_stats(vm, vcpu);
+
+	if (vst->index >= 0 && statidx < vst->nelems)
+		stats[vst->index + statidx] += x;
+#endif
+}
+
+static void __inline
+vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
+		   int statidx, uint64_t val)
+{
+#ifdef VMM_KEEP_STATS
+	uint64_t *stats;
+	
+	stats = vcpu_stats(vm, vcpu);
+
+	if (vst->index >= 0 && statidx < vst->nelems)
+		stats[vst->index + statidx] = val;
+#endif
+}
+		   
+static void __inline
+vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
+{
+
+#ifdef VMM_KEEP_STATS
+	vmm_stat_array_incr(vm, vcpu, vst, 0, x);
+#endif
+}
+
+static void __inline
+vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
+{
+
+#ifdef VMM_KEEP_STATS
+	vmm_stat_array_set(vm, vcpu, vst, 0, val);
+#endif
+}
+
+VMM_STAT_DECLARE(VCPU_MIGRATIONS);
+VMM_STAT_DECLARE(VMEXIT_COUNT);
+VMM_STAT_DECLARE(VMEXIT_EXTINT);
+VMM_STAT_DECLARE(VMEXIT_HLT);
+VMM_STAT_DECLARE(VMEXIT_CR_ACCESS);
+VMM_STAT_DECLARE(VMEXIT_RDMSR);
+VMM_STAT_DECLARE(VMEXIT_WRMSR);
+VMM_STAT_DECLARE(VMEXIT_MTRAP);
+VMM_STAT_DECLARE(VMEXIT_PAUSE);
+VMM_STAT_DECLARE(VMEXIT_INTR_WINDOW);
+VMM_STAT_DECLARE(VMEXIT_NMI_WINDOW);
+VMM_STAT_DECLARE(VMEXIT_INOUT);
+VMM_STAT_DECLARE(VMEXIT_CPUID);
+VMM_STAT_DECLARE(VMEXIT_NESTED_FAULT);
+VMM_STAT_DECLARE(VMEXIT_INST_EMUL);
+VMM_STAT_DECLARE(VMEXIT_UNKNOWN);
+VMM_STAT_DECLARE(VMEXIT_ASTPENDING);
+VMM_STAT_DECLARE(VMEXIT_USERSPACE);
+VMM_STAT_DECLARE(VMEXIT_RENDEZVOUS);
+VMM_STAT_DECLARE(VMEXIT_EXCEPTION);
+VMM_STAT_DECLARE(VMEXIT_REQIDLE);
+#endif


Property changes on: trunk/sys/amd64/vmm/vmm_stat.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_support.S
===================================================================
--- trunk/sys/amd64/vmm/vmm_support.S	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_support.S	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,43 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_support.S 245678 2013-01-20 03:42:49Z neel $
+ */
+
+#define	LOCORE
+
+#include <machine/asmacros.h>
+
+#define	LA_EOI	0xB0
+
+	.text
+	SUPERALIGN_TEXT
+IDTVEC(justreturn)
+	pushq	%rax
+	movq	lapic, %rax
+	movl	$0, LA_EOI(%rax)
+	popq	%rax
+	iretq


Property changes on: trunk/sys/amd64/vmm/vmm_support.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_util.c
===================================================================
--- trunk/sys/amd64/vmm/vmm_util.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_util.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,112 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_util.c 245678 2013-01-20 03:42:49Z neel $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_util.c 245678 2013-01-20 03:42:49Z neel $");
+
+#include <sys/param.h>
+#include <sys/libkern.h>
+
+#include <machine/md_var.h>
+
+#include "vmm_util.h"
+
+boolean_t
+vmm_is_intel(void)
+{
+
+	if (strcmp(cpu_vendor, "GenuineIntel") == 0)
+		return (TRUE);
+	else
+		return (FALSE);
+}
+
+boolean_t
+vmm_is_amd(void)
+{
+	if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+		return (TRUE);
+	else
+		return (FALSE);
+}
+
+boolean_t
+vmm_supports_1G_pages(void)
+{
+	unsigned int regs[4];
+
+	/*
+	 * CPUID.80000001:EDX[bit 26] = 1 indicates support for 1GB pages
+	 *
+	 * Both Intel and AMD support this bit.
+	 */
+	if (cpu_exthigh >= 0x80000001) {
+		do_cpuid(0x80000001, regs);
+		if (regs[3] & (1 << 26))
+			return (TRUE);
+	}
+	return (FALSE);
+}
+
+#include <sys/proc.h>
+#include <machine/frame.h>
+#define	DUMP_REG(x)	printf(#x "\t\t0x%016lx\n", (long)(tf->tf_ ## x))
+#define	DUMP_SEG(x)	printf(#x "\t\t0x%04x\n", (unsigned)(tf->tf_ ## x))
+void
+dump_trapframe(struct trapframe *tf)
+{
+	DUMP_REG(rdi);
+	DUMP_REG(rsi);
+	DUMP_REG(rdx);
+	DUMP_REG(rcx);
+	DUMP_REG(r8);
+	DUMP_REG(r9);
+	DUMP_REG(rax);
+	DUMP_REG(rbx);
+	DUMP_REG(rbp);
+	DUMP_REG(r10);
+	DUMP_REG(r11);
+	DUMP_REG(r12);
+	DUMP_REG(r13);
+	DUMP_REG(r14);
+	DUMP_REG(r15);
+	DUMP_REG(trapno);
+	DUMP_REG(addr);
+	DUMP_REG(flags);
+	DUMP_REG(err);
+	DUMP_REG(rip);
+	DUMP_REG(rflags);
+	DUMP_REG(rsp);
+	DUMP_SEG(cs);
+	DUMP_SEG(ss);
+	DUMP_SEG(fs);
+	DUMP_SEG(gs);
+	DUMP_SEG(es);
+	DUMP_SEG(ds);
+}


Property changes on: trunk/sys/amd64/vmm/vmm_util.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/vmm_util.h
===================================================================
--- trunk/sys/amd64/vmm/vmm_util.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/vmm_util.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,41 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/vmm_util.h 245678 2013-01-20 03:42:49Z neel $
+ */
+
+#ifndef _VMM_UTIL_H_
+#define	_VMM_UTIL_H_
+
+struct trapframe;
+
+boolean_t	vmm_is_intel(void);
+boolean_t	vmm_is_amd(void);
+boolean_t	vmm_supports_1G_pages(void);
+
+void		dump_trapframe(struct trapframe *tf);
+
+#endif


Property changes on: trunk/sys/amd64/vmm/vmm_util.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/x86.c
===================================================================
--- trunk/sys/amd64/vmm/x86.c	                        (rev 0)
+++ trunk/sys/amd64/vmm/x86.c	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,525 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/x86.c 315928 2017-03-25 05:09:03Z grehan $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/x86.c 315928 2017-03-25 05:09:03Z grehan $");
+
+#include <sys/param.h>
+#include <sys/pcpu.h>
+#include <sys/systm.h>
+#include <sys/sysctl.h>
+
+#include <machine/clock.h>
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+#include <machine/segments.h>
+#include <machine/specialreg.h>
+
+#include <machine/vmm.h>
+
+#include "vmm_host.h"
+#include "vmm_ktr.h"
+#include "vmm_util.h"
+#include "x86.h"
+
+SYSCTL_DECL(_hw_vmm);
+static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD, 0, NULL);
+
+#define	CPUID_VM_HIGH		0x40000000
+
+static const char bhyve_id[12] = "bhyve bhyve ";
+
+static uint64_t bhyve_xcpuids;
+SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0,
+    "Number of times an unknown cpuid leaf was accessed");
+
+/*
+ * The default CPU topology is a single thread per package.
+ */
+static u_int threads_per_core = 1;
+SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN,
+    &threads_per_core, 0, NULL);
+
+static u_int cores_per_package = 1;
+SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN,
+    &cores_per_package, 0, NULL);
+
+static int cpuid_leaf_b = 1;
+SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN,
+    &cpuid_leaf_b, 0, NULL);
+
+/*
+ * Round up to the next power of two, if necessary, and then take log2.
+ * Returns -1 if argument is zero.
+ */
+static __inline int
+log2(u_int x)
+{
+
+	return (fls(x << (1 - powerof2(x))) - 1);
+}
+
+int
+x86_emulate_cpuid(struct vm *vm, int vcpu_id,
+		  uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
+{
+	const struct xsave_limits *limits;
+	uint64_t cr4;
+	int error, enable_invpcid, level, width, x2apic_id;
+	unsigned int func, regs[4], logical_cpus;
+	enum x2apic_state x2apic_state;
+
+	VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx);
+
+	/*
+	 * Requests for invalid CPUID levels should map to the highest
+	 * available level instead.
+	 */
+	if (cpu_exthigh != 0 && *eax >= 0x80000000) {
+		if (*eax > cpu_exthigh)
+			*eax = cpu_exthigh;
+	} else if (*eax >= 0x40000000) {
+		if (*eax > CPUID_VM_HIGH)
+			*eax = CPUID_VM_HIGH;
+	} else if (*eax > cpu_high) {
+		*eax = cpu_high;
+	}
+
+	func = *eax;
+
+	/*
+	 * In general the approach used for CPU topology is to
+	 * advertise a flat topology where all CPUs are packages with
+	 * no multi-core or SMT.
+	 */
+	switch (func) {
+		/*
+		 * Pass these through to the guest
+		 */
+		case CPUID_0000_0000:
+		case CPUID_0000_0002:
+		case CPUID_0000_0003:
+		case CPUID_8000_0000:
+		case CPUID_8000_0002:
+		case CPUID_8000_0003:
+		case CPUID_8000_0004:
+		case CPUID_8000_0006:
+			cpuid_count(*eax, *ecx, regs);
+			break;
+		case CPUID_8000_0008:
+			cpuid_count(*eax, *ecx, regs);
+			if (vmm_is_amd()) {
+				/*
+				 * XXX this might appear silly because AMD
+				 * cpus don't have threads.
+				 *
+				 * However this matches the logical cpus as
+				 * advertised by leaf 0x1 and will work even
+				 * if the 'threads_per_core' tunable is set
+				 * incorrectly on an AMD host.
+				 */
+				logical_cpus = threads_per_core *
+				    cores_per_package;
+				regs[2] = logical_cpus - 1;
+			}
+			break;
+
+		case CPUID_8000_0001:
+			cpuid_count(*eax, *ecx, regs);
+
+			/*
+			 * Hide SVM and Topology Extension features from guest.
+			 */
+			regs[2] &= ~(AMDID2_SVM | AMDID2_TOPOLOGY);
+
+			/*
+			 * Don't advertise extended performance counter MSRs
+			 * to the guest.
+			 */
+			regs[2] &= ~AMDID2_PCXC;
+			regs[2] &= ~AMDID2_PNXC;
+			regs[2] &= ~AMDID2_PTSCEL2I;
+
+			/*
+			 * Don't advertise Instruction Based Sampling feature.
+			 */
+			regs[2] &= ~AMDID2_IBS;
+
+			/* NodeID MSR not available */
+			regs[2] &= ~AMDID2_NODE_ID;
+
+			/* Don't advertise the OS visible workaround feature */
+			regs[2] &= ~AMDID2_OSVW;
+
+			/* Hide mwaitx/monitorx capability from the guest */
+			regs[2] &= ~AMDID2_MWAITX;
+
+			/*
+			 * Hide rdtscp/ia32_tsc_aux until we know how
+			 * to deal with them.
+			 */
+			regs[3] &= ~AMDID_RDTSCP;
+			break;
+
+		case CPUID_8000_0007:
+			/*
+			 * AMD uses this leaf to advertise the processor's
+			 * power monitoring and RAS capabilities. These
+			 * features are hardware-specific and exposing
+			 * them to a guest doesn't make a lot of sense.
+			 *
+			 * Intel uses this leaf only to advertise the
+			 * "Invariant TSC" feature with all other bits
+			 * being reserved (set to zero).
+			 */
+			regs[0] = 0;
+			regs[1] = 0;
+			regs[2] = 0;
+			regs[3] = 0;
+
+			/*
+			 * "Invariant TSC" can be advertised to the guest if:
+			 * - host TSC frequency is invariant
+			 * - host TSCs are synchronized across physical cpus
+			 *
+			 * XXX This still falls short because the vcpu
+			 * can observe the TSC moving backwards as it
+			 * migrates across physical cpus. But at least
+			 * it should discourage the guest from using the
+			 * TSC to keep track of time.
+			 */
+			if (tsc_is_invariant && smp_tsc)
+				regs[3] |= AMDPM_TSC_INVARIANT;
+			break;
+
+		case CPUID_0000_0001:
+			do_cpuid(1, regs);
+
+			error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
+			if (error) {
+				panic("x86_emulate_cpuid: error %d "
+				      "fetching x2apic state", error);
+			}
+
+			/*
+			 * Override the APIC ID only in ebx
+			 */
+			regs[1] &= ~(CPUID_LOCAL_APIC_ID);
+			regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
+
+			/*
+			 * Don't expose VMX, SpeedStep, TME or SMX capability.
+			 * Advertise x2APIC capability and Hypervisor guest.
+			 */
+			regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
+			regs[2] &= ~(CPUID2_SMX);
+
+			regs[2] |= CPUID2_HV;
+
+			if (x2apic_state != X2APIC_DISABLED)
+				regs[2] |= CPUID2_X2APIC;
+			else
+				regs[2] &= ~CPUID2_X2APIC;
+
+			/*
+			 * Only advertise CPUID2_XSAVE in the guest if
+			 * the host is using XSAVE.
+			 */
+			if (!(regs[2] & CPUID2_OSXSAVE))
+				regs[2] &= ~CPUID2_XSAVE;
+
+			/*
+			 * If CPUID2_XSAVE is being advertised and the
+			 * guest has set CR4_XSAVE, set
+			 * CPUID2_OSXSAVE.
+			 */
+			regs[2] &= ~CPUID2_OSXSAVE;
+			if (regs[2] & CPUID2_XSAVE) {
+				error = vm_get_register(vm, vcpu_id,
+				    VM_REG_GUEST_CR4, &cr4);
+				if (error)
+					panic("x86_emulate_cpuid: error %d "
+					      "fetching %%cr4", error);
+				if (cr4 & CR4_XSAVE)
+					regs[2] |= CPUID2_OSXSAVE;
+			}
+
+			/*
+			 * Hide monitor/mwait until we know how to deal with
+			 * these instructions.
+			 */
+			regs[2] &= ~CPUID2_MON;
+
+                        /*
+			 * Hide the performance and debug features.
+			 */
+			regs[2] &= ~CPUID2_PDCM;
+
+			/*
+			 * No TSC deadline support in the APIC yet
+			 */
+			regs[2] &= ~CPUID2_TSCDLT;
+
+			/*
+			 * Hide thermal monitoring
+			 */
+			regs[3] &= ~(CPUID_ACPI | CPUID_TM);
+
+			/*
+			 * Hide the debug store capability.
+			 */
+			regs[3] &= ~CPUID_DS;
+
+			/*
+			 * Advertise the Machine Check and MTRR capability.
+			 *
+			 * Some guest OSes (e.g. Windows) will not boot if
+			 * these features are absent.
+			 */
+			regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
+
+			logical_cpus = threads_per_core * cores_per_package;
+			regs[1] &= ~CPUID_HTT_CORES;
+			regs[1] |= (logical_cpus & 0xff) << 16;
+			regs[3] |= CPUID_HTT;
+			break;
+
+		case CPUID_0000_0004:
+			cpuid_count(*eax, *ecx, regs);
+
+			if (regs[0] || regs[1] || regs[2] || regs[3]) {
+				regs[0] &= 0x3ff;
+				regs[0] |= (cores_per_package - 1) << 26;
+				/*
+				 * Cache topology:
+				 * - L1 and L2 are shared only by the logical
+				 *   processors in a single core.
+				 * - L3 and above are shared by all logical
+				 *   processors in the package.
+				 */
+				logical_cpus = threads_per_core;
+				level = (regs[0] >> 5) & 0x7;
+				if (level >= 3)
+					logical_cpus *= cores_per_package;
+				regs[0] |= (logical_cpus - 1) << 14;
+			}
+			break;
+
+		case CPUID_0000_0007:
+			regs[0] = 0;
+			regs[1] = 0;
+			regs[2] = 0;
+			regs[3] = 0;
+
+			/* leaf 0 */
+			if (*ecx == 0) {
+				cpuid_count(*eax, *ecx, regs);
+
+				/* Only leaf 0 is supported */
+				regs[0] = 0;
+
+				/*
+				 * Expose known-safe features.
+				 */
+				regs[1] &= (CPUID_STDEXT_FSGSBASE |
+				    CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
+				    CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2 |
+				    CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
+				    CPUID_STDEXT_AVX512F |
+				    CPUID_STDEXT_AVX512PF |
+				    CPUID_STDEXT_AVX512ER |
+				    CPUID_STDEXT_AVX512CD);
+				regs[2] = 0;
+				regs[3] = 0;
+
+				/* Advertise INVPCID if it is enabled. */
+				error = vm_get_capability(vm, vcpu_id,
+				    VM_CAP_ENABLE_INVPCID, &enable_invpcid);
+				if (error == 0 && enable_invpcid)
+					regs[1] |= CPUID_STDEXT_INVPCID;
+			}
+			break;
+
+		case CPUID_0000_0006:
+			regs[0] = CPUTPM1_ARAT;
+			regs[1] = 0;
+			regs[2] = 0;
+			regs[3] = 0;
+			break;
+
+		case CPUID_0000_000A:
+			/*
+			 * Handle the access, but report 0 for
+			 * all options
+			 */
+			regs[0] = 0;
+			regs[1] = 0;
+			regs[2] = 0;
+			regs[3] = 0;
+			break;
+
+		case CPUID_0000_000B:
+			/*
+			 * Processor topology enumeration
+			 */
+			if (*ecx == 0) {
+				logical_cpus = threads_per_core;
+				width = log2(logical_cpus);
+				level = CPUID_TYPE_SMT;
+				x2apic_id = vcpu_id;
+			}
+
+			if (*ecx == 1) {
+				logical_cpus = threads_per_core *
+				    cores_per_package;
+				width = log2(logical_cpus);
+				level = CPUID_TYPE_CORE;
+				x2apic_id = vcpu_id;
+			}
+
+			if (!cpuid_leaf_b || *ecx >= 2) {
+				width = 0;
+				logical_cpus = 0;
+				level = 0;
+				x2apic_id = 0;
+			}
+
+			regs[0] = width & 0x1f;
+			regs[1] = logical_cpus & 0xffff;
+			regs[2] = (level << 8) | (*ecx & 0xff);
+			regs[3] = x2apic_id;
+			break;
+
+		case CPUID_0000_000D:
+			limits = vmm_get_xsave_limits();
+			if (!limits->xsave_enabled) {
+				regs[0] = 0;
+				regs[1] = 0;
+				regs[2] = 0;
+				regs[3] = 0;
+				break;
+			}
+
+			cpuid_count(*eax, *ecx, regs);
+			switch (*ecx) {
+			case 0:
+				/*
+				 * Only permit the guest to use bits
+				 * that are active in the host in
+				 * %xcr0.  Also, claim that the
+				 * maximum save area size is
+				 * equivalent to the host's current
+				 * save area size.  Since this runs
+				 * "inside" of vmrun(), it runs with
+				 * the guest's xcr0, so the current
+				 * save area size is correct as-is.
+				 */
+				regs[0] &= limits->xcr0_allowed;
+				regs[2] = limits->xsave_max_size;
+				regs[3] &= (limits->xcr0_allowed >> 32);
+				break;
+			case 1:
+				/* Only permit XSAVEOPT. */
+				regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
+				regs[1] = 0;
+				regs[2] = 0;
+				regs[3] = 0;
+				break;
+			default:
+				/*
+				 * If the leaf is for a permitted feature,
+				 * pass through as-is, otherwise return
+				 * all zeroes.
+				 */
+				if (!(limits->xcr0_allowed & (1ul << *ecx))) {
+					regs[0] = 0;
+					regs[1] = 0;
+					regs[2] = 0;
+					regs[3] = 0;
+				}
+				break;
+			}
+			break;
+
+		case 0x40000000:
+			regs[0] = CPUID_VM_HIGH;
+			bcopy(bhyve_id, &regs[1], 4);
+			bcopy(bhyve_id + 4, &regs[2], 4);
+			bcopy(bhyve_id + 8, &regs[3], 4);
+			break;
+
+		default:
+			/*
+			 * The leaf value has already been clamped so
+			 * simply pass this through, keeping count of
+			 * how many unhandled leaf values have been seen.
+			 */
+			atomic_add_long(&bhyve_xcpuids, 1);
+			cpuid_count(*eax, *ecx, regs);
+			break;
+	}
+
+	*eax = regs[0];
+	*ebx = regs[1];
+	*ecx = regs[2];
+	*edx = regs[3];
+
+	return (1);
+}
+
+bool
+vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap)
+{
+	bool rv;
+
+	KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d",
+	    __func__, cap));
+
+	/*
+	 * Simply passthrough the capabilities of the host cpu for now.
+	 */
+	rv = false;
+	switch (cap) {
+	case VCC_NO_EXECUTE:
+		if (amd_feature & AMDID_NX)
+			rv = true;
+		break;
+	case VCC_FFXSR:
+		if (amd_feature & AMDID_FFXSR)
+			rv = true;
+		break;
+	case VCC_TCE:
+		if (amd_feature2 & AMDID2_TCE)
+			rv = true;
+		break;
+	default:
+		panic("%s: unknown vm_cpu_capability %d", __func__, cap);
+	}
+	return (rv);
+}


Property changes on: trunk/sys/amd64/vmm/x86.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/amd64/vmm/x86.h
===================================================================
--- trunk/sys/amd64/vmm/x86.h	                        (rev 0)
+++ trunk/sys/amd64/vmm/x86.h	2018-06-01 22:54:07 UTC (rev 10172)
@@ -0,0 +1,79 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/amd64/vmm/x86.h 284900 2015-06-28 03:22:26Z neel $
+ */
+
+#ifndef _X86_H_
+#define	_X86_H_
+
+#define CPUID_0000_0000 (0x0)
+#define CPUID_0000_0001	(0x1)
+#define CPUID_0000_0002 (0x2)
+#define CPUID_0000_0003 (0x3)
+#define CPUID_0000_0004 (0x4)
+#define CPUID_0000_0006 (0x6)
+#define CPUID_0000_0007 (0x7)
+#define	CPUID_0000_000A	(0xA)
+#define	CPUID_0000_000B	(0xB)
+#define	CPUID_0000_000D	(0xD)
+#define CPUID_8000_0000	(0x80000000)
+#define CPUID_8000_0001	(0x80000001)
+#define CPUID_8000_0002	(0x80000002)
+#define CPUID_8000_0003	(0x80000003)
+#define CPUID_8000_0004	(0x80000004)
+#define CPUID_8000_0006	(0x80000006)
+#define CPUID_8000_0007	(0x80000007)
+#define CPUID_8000_0008	(0x80000008)
+
+/*
+ * CPUID instruction Fn0000_0001:
+ */
+#define CPUID_0000_0001_APICID_MASK			(0xff<<24)
+#define CPUID_0000_0001_APICID_SHIFT			24
+
+/*
+ * CPUID instruction Fn0000_0001 ECX
+ */
+#define CPUID_0000_0001_FEAT0_VMX	(1<<5)
+
+int x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint32_t *eax, uint32_t *ebx,
+		      uint32_t *ecx, uint32_t *edx);
+
+enum vm_cpuid_capability {
+	VCC_NONE,
+	VCC_NO_EXECUTE,
+	VCC_FFXSR,
+	VCC_TCE,
+	VCC_LAST
+};
+
+/*
+ * Return 'true' if the capability 'cap' is enabled in this virtual cpu
+ * and 'false' otherwise.
+ */
+bool vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability);
+#endif


Property changes on: trunk/sys/amd64/vmm/x86.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property


More information about the Midnightbsd-cvs mailing list