[Midnightbsd-cvs] src [12306] trunk/sys/xen/interface: sync with FreeBSD 11-stable
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Sat Feb 8 14:28:09 EST 2020
Revision: 12306
http://svnweb.midnightbsd.org/src/?rev=12306
Author: laffer1
Date: 2020-02-08 14:28:08 -0500 (Sat, 08 Feb 2020)
Log Message:
-----------
sync with FreeBSD 11-stable
Modified Paths:
--------------
trunk/sys/xen/interface/arch-arm.h
trunk/sys/xen/interface/callback.h
trunk/sys/xen/interface/dom0_ops.h
trunk/sys/xen/interface/domctl.h
trunk/sys/xen/interface/elfnote.h
trunk/sys/xen/interface/event_channel.h
trunk/sys/xen/interface/features.h
trunk/sys/xen/interface/grant_table.h
trunk/sys/xen/interface/kexec.h
trunk/sys/xen/interface/memory.h
trunk/sys/xen/interface/nmi.h
trunk/sys/xen/interface/physdev.h
trunk/sys/xen/interface/platform.h
trunk/sys/xen/interface/sched.h
trunk/sys/xen/interface/sysctl.h
trunk/sys/xen/interface/tmem.h
trunk/sys/xen/interface/trace.h
trunk/sys/xen/interface/vcpu.h
trunk/sys/xen/interface/version.h
trunk/sys/xen/interface/xen-compat.h
trunk/sys/xen/interface/xen.h
trunk/sys/xen/interface/xenoprof.h
Added Paths:
-----------
trunk/sys/xen/interface/errno.h
trunk/sys/xen/interface/gcov.h
trunk/sys/xen/interface/pmu.h
trunk/sys/xen/interface/vm_event.h
Modified: trunk/sys/xen/interface/arch-arm.h
===================================================================
--- trunk/sys/xen/interface/arch-arm.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/arch-arm.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -28,94 +28,254 @@
#ifndef __XEN_PUBLIC_ARCH_ARM_H__
#define __XEN_PUBLIC_ARCH_ARM_H__
-/* hypercall calling convention
- * ----------------------------
+/*
+ * `incontents 50 arm_abi Hypercall Calling Convention
*
* A hypercall is issued using the ARM HVC instruction.
*
* A hypercall can take up to 5 arguments. These are passed in
- * registers, the first argument in r0, the second argument in r1, the
- * third in r2, the forth in r3 and the fifth in r4.
+ * registers, the first argument in x0/r0 (for arm64/arm32 guests
+ * respectively irrespective of whether the underlying hypervisor is
+ * 32- or 64-bit), the second argument in x1/r1, the third in x2/r2,
+ * the forth in x3/r3 and the fifth in x4/r4.
*
- * The hypercall number is passed in r12.
+ * The hypercall number is passed in r12 (arm) or x16 (arm64). In both
+ * cases the relevant ARM procedure calling convention specifies this
+ * is an inter-procedure-call scratch register (e.g. for use in linker
+ * stubs). This use does not conflict with use during a hypercall.
*
* The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG.
*
- * The return value is in r0.
+ * The return value is in x0/r0.
*
- * The hypercall will clobber r12 and the argument registers used by
- * that hypercall (except r0 which is the return value) i.e. a 2
- * argument hypercall will clobber r1 and a 4 argument hypercall will
- * clobber r1, r2 and r3.
+ * The hypercall will clobber x16/r12 and the argument registers used
+ * by that hypercall (except r0 which is the return value) i.e. in
+ * addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a
+ * 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3.
*
+ * Parameter structs passed to hypercalls are laid out according to
+ * the Procedure Call Standard for the ARM Architecture (AAPCS, AKA
+ * EABI) and Procedure Call Standard for the ARM 64-bit Architecture
+ * (AAPCS64). Where there is a conflict the 64-bit standard should be
+ * used regardless of guest type. Structures which are passed as
+ * hypercall arguments are always little endian.
+ *
+ * All memory which is shared with other entities in the system
+ * (including the hypervisor and other guests) must reside in memory
+ * which is mapped as Normal Inner-cacheable. This applies to:
+ * - hypercall arguments passed via a pointer to guest memory.
+ * - memory shared via the grant table mechanism (including PV I/O
+ * rings etc).
+ * - memory shared with the hypervisor (struct shared_info, struct
+ * vcpu_info, the grant table, etc).
+ *
+ * Any Inner cache allocation strategy (Write-Back, Write-Through etc)
+ * is acceptable. There is no restriction on the Outer-cacheability.
*/
+/*
+ * `incontents 55 arm_hcall Supported Hypercalls
+ *
+ * Xen on ARM makes extensive use of hardware facilities and therefore
+ * only a subset of the potential hypercalls are required.
+ *
+ * Since ARM uses second stage paging any machine/physical addresses
+ * passed to hypercalls are Guest Physical Addresses (Intermediate
+ * Physical Addresses) unless otherwise noted.
+ *
+ * The following hypercalls (and sub operations) are supported on the
+ * ARM platform. Other hypercalls should be considered
+ * unavailable/unsupported.
+ *
+ * HYPERVISOR_memory_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_domctl
+ * All generic sub-operations, with the exception of:
+ * * XEN_DOMCTL_irq_permission (not yet implemented)
+ *
+ * HYPERVISOR_sched_op
+ * All generic sub-operations, with the exception of:
+ * * SCHEDOP_block -- prefer wfi hardware instruction
+ *
+ * HYPERVISOR_console_io
+ * All generic sub-operations
+ *
+ * HYPERVISOR_xen_version
+ * All generic sub-operations
+ *
+ * HYPERVISOR_event_channel_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_physdev_op
+ * No sub-operations are currenty supported
+ *
+ * HYPERVISOR_sysctl
+ * All generic sub-operations, with the exception of:
+ * * XEN_SYSCTL_page_offline_op
+ * * XEN_SYSCTL_get_pmstat
+ * * XEN_SYSCTL_pm_op
+ *
+ * HYPERVISOR_hvm_op
+ * Exactly these sub-operations are supported:
+ * * HVMOP_set_param
+ * * HVMOP_get_param
+ *
+ * HYPERVISOR_grant_table_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_vcpu_op
+ * Exactly these sub-operations are supported:
+ * * VCPUOP_register_vcpu_info
+ * * VCPUOP_register_runstate_memory_area
+ *
+ *
+ * Other notes on the ARM ABI:
+ *
+ * - struct start_info is not exported to ARM guests.
+ *
+ * - struct shared_info is mapped by ARM guests using the
+ * HYPERVISOR_memory_op sub-op XENMEM_add_to_physmap, passing
+ * XENMAPSPACE_shared_info as space parameter.
+ *
+ * - All the per-cpu struct vcpu_info are mapped by ARM guests using the
+ * HYPERVISOR_vcpu_op sub-op VCPUOP_register_vcpu_info, including cpu0
+ * struct vcpu_info.
+ *
+ * - The grant table is mapped using the HYPERVISOR_memory_op sub-op
+ * XENMEM_add_to_physmap, passing XENMAPSPACE_grant_table as space
+ * parameter. The memory range specified under the Xen compatible
+ * hypervisor node on device tree can be used as target gpfn for the
+ * mapping.
+ *
+ * - Xenstore is initialized by using the two hvm_params
+ * HVM_PARAM_STORE_PFN and HVM_PARAM_STORE_EVTCHN. They can be read
+ * with the HYPERVISOR_hvm_op sub-op HVMOP_get_param.
+ *
+ * - The paravirtualized console is initialized by using the two
+ * hvm_params HVM_PARAM_CONSOLE_PFN and HVM_PARAM_CONSOLE_EVTCHN. They
+ * can be read with the HYPERVISOR_hvm_op sub-op HVMOP_get_param.
+ *
+ * - Event channel notifications are delivered using the percpu GIC
+ * interrupt specified under the Xen compatible hypervisor node on
+ * device tree.
+ *
+ * - The device tree Xen compatible node is fully described under Linux
+ * at Documentation/devicetree/bindings/arm/xen.txt.
+ */
+
#define XEN_HYPERCALL_TAG 0XEA1
+#define int64_aligned_t int64_t __attribute__((aligned(8)))
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
#ifndef __ASSEMBLY__
-#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
- typedef struct { type *p; } __guest_handle_ ## name
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef union { type *p; unsigned long q; } \
+ __guest_handle_ ## name; \
+ typedef union { type *p; uint64_aligned_t q; } \
+ __guest_handle_64_ ## name;
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes
+ * aligned.
+ * XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an
+ * hypercall argument. It is 4 bytes on aarch32 and 8 bytes on aarch64.
+ */
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
-#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
-#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
+#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
+#define set_xen_guest_handle_raw(hnd, val) \
+ do { \
+ typeof(&(hnd)) _sxghr_tmp = &(hnd); \
+ _sxghr_tmp->q = 0; \
+ _sxghr_tmp->p = val; \
+ } while ( 0 )
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
-struct cpu_user_regs
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
+# define __DECL_REG(n64, n32) union { \
+ uint64_t n64; \
+ uint32_t n32; \
+ }
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */
+#define __DECL_REG(n64, n32) uint64_t n64
+#endif
+
+struct vcpu_guest_core_regs
{
- uint32_t r0;
- uint32_t r1;
- uint32_t r2;
- uint32_t r3;
- uint32_t r4;
- uint32_t r5;
- uint32_t r6;
- uint32_t r7;
- uint32_t r8;
- uint32_t r9;
- uint32_t r10;
- union {
- uint32_t r11;
- uint32_t fp;
- };
- uint32_t r12;
+ /* Aarch64 Aarch32 */
+ __DECL_REG(x0, r0_usr);
+ __DECL_REG(x1, r1_usr);
+ __DECL_REG(x2, r2_usr);
+ __DECL_REG(x3, r3_usr);
+ __DECL_REG(x4, r4_usr);
+ __DECL_REG(x5, r5_usr);
+ __DECL_REG(x6, r6_usr);
+ __DECL_REG(x7, r7_usr);
+ __DECL_REG(x8, r8_usr);
+ __DECL_REG(x9, r9_usr);
+ __DECL_REG(x10, r10_usr);
+ __DECL_REG(x11, r11_usr);
+ __DECL_REG(x12, r12_usr);
- uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */
+ __DECL_REG(x13, sp_usr);
+ __DECL_REG(x14, lr_usr);
- /* r14 - LR: is the same physical register as LR_usr */
- union {
- uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */
- uint32_t lr_usr;
- };
+ __DECL_REG(x15, __unused_sp_hyp);
- uint32_t pc; /* Return IP */
- uint32_t cpsr; /* Return mode */
- uint32_t pad0; /* Doubleword-align the kernel half of the frame */
+ __DECL_REG(x16, lr_irq);
+ __DECL_REG(x17, sp_irq);
- /* Outer guest frame only from here on... */
+ __DECL_REG(x18, lr_svc);
+ __DECL_REG(x19, sp_svc);
- uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq;
+ __DECL_REG(x20, lr_abt);
+ __DECL_REG(x21, sp_abt);
- uint32_t sp_usr; /* LR_usr is the same register as LR, see above */
+ __DECL_REG(x22, lr_und);
+ __DECL_REG(x23, sp_und);
- uint32_t sp_svc, sp_abt, sp_und, sp_irq, sp_fiq;
- uint32_t lr_svc, lr_abt, lr_und, lr_irq, lr_fiq;
+ __DECL_REG(x24, r8_fiq);
+ __DECL_REG(x25, r9_fiq);
+ __DECL_REG(x26, r10_fiq);
+ __DECL_REG(x27, r11_fiq);
+ __DECL_REG(x28, r12_fiq);
- uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq;
+ __DECL_REG(x29, sp_fiq);
+ __DECL_REG(x30, lr_fiq);
- uint32_t pad1; /* Doubleword-align the user half of the frame */
+ /* Return address and mode */
+ __DECL_REG(pc64, pc32); /* ELR_EL2 */
+ uint32_t cpsr; /* SPSR_EL2 */
+
+ union {
+ uint32_t spsr_el1; /* AArch64 */
+ uint32_t spsr_svc; /* AArch32 */
+ };
+
+ /* AArch32 guests only */
+ uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt;
+
+ /* AArch64 guests only */
+ uint64_t sp_el0;
+ uint64_t sp_el1, elr_el1;
};
-typedef struct cpu_user_regs cpu_user_regs_t;
-DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);
+#undef __DECL_REG
+
typedef uint64_t xen_pfn_t;
#define PRI_xen_pfn PRIx64
@@ -123,30 +283,77 @@
/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */
#define XEN_LEGACY_MAX_VCPUS 1
-typedef uint32_t xen_ulong_t;
+typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong PRIx64
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
struct vcpu_guest_context {
- struct cpu_user_regs user_regs; /* User-level CPU registers */
+#define _VGCF_online 0
+#define VGCF_online (1<<_VGCF_online)
+ uint32_t flags; /* VGCF_* */
+ struct vcpu_guest_core_regs user_regs; /* Core CPU registers */
+
uint32_t sctlr;
- uint32_t ttbr0, ttbr1, ttbcr;
+ uint64_t ttbcr, ttbr0, ttbr1;
};
typedef struct vcpu_guest_context vcpu_guest_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
-struct arch_vcpu_info { };
+/*
+ * struct xen_arch_domainconfig's ABI is covered by
+ * XEN_DOMCTL_INTERFACE_VERSION.
+ */
+#define XEN_DOMCTL_CONFIG_GIC_NATIVE 0
+#define XEN_DOMCTL_CONFIG_GIC_V2 1
+#define XEN_DOMCTL_CONFIG_GIC_V3 2
+struct xen_arch_domainconfig {
+ /* IN/OUT */
+ uint8_t gic_version;
+ /* IN */
+ uint32_t nr_spis;
+ /*
+ * OUT
+ * Based on the property clock-frequency in the DT timer node.
+ * The property may be present when the bootloader/firmware doesn't
+ * set correctly CNTFRQ which hold the timer frequency.
+ *
+ * As it's not possible to trap this register, we have to replicate
+ * the value in the guest DT.
+ *
+ * = 0 => property not present
+ * > 0 => Value of the property
+ *
+ */
+ uint32_t clock_frequency;
+};
+#endif /* __XEN__ || __XEN_TOOLS__ */
+
+struct arch_vcpu_info {
+};
typedef struct arch_vcpu_info arch_vcpu_info_t;
-struct arch_shared_info { };
+struct arch_shared_info {
+};
typedef struct arch_shared_info arch_shared_info_t;
typedef uint64_t xen_callback_t;
-#endif /* ifndef __ASSEMBLY __ */
+#endif
-/* PSR bits (CPSR, SPSR)*/
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
-/* 0-4: Mode */
-#define PSR_MODE_MASK 0x1f
+/* PSR bits (CPSR, SPSR) */
+
+#define PSR_THUMB (1<<5) /* Thumb Mode enable */
+#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
+#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */
+#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */
+#define PSR_BIG_ENDIAN (1<<9) /* arm32: Big Endian Mode */
+#define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */
+#define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */
+#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
+
+/* 32 bit modes */
#define PSR_MODE_USR 0x10
#define PSR_MODE_FIQ 0x11
#define PSR_MODE_IRQ 0x12
@@ -157,19 +364,102 @@
#define PSR_MODE_UND 0x1b
#define PSR_MODE_SYS 0x1f
-#define PSR_THUMB (1<<5) /* Thumb Mode enable */
-#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
-#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */
-#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */
-#define PSR_BIG_ENDIAN (1<<9) /* Big Endian Mode */
-#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
+/* 64 bit modes */
+#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */
+#define PSR_MODE_EL3h 0x0d
+#define PSR_MODE_EL3t 0x0c
+#define PSR_MODE_EL2h 0x09
+#define PSR_MODE_EL2t 0x08
+#define PSR_MODE_EL1h 0x05
+#define PSR_MODE_EL1t 0x04
+#define PSR_MODE_EL0t 0x00
+#define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC)
+#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)
+
+#define SCTLR_GUEST_INIT 0x00c50078
+
+/*
+ * Virtual machine platform (memory layout, interrupts)
+ *
+ * These are defined for consistency between the tools and the
+ * hypervisor. Guests must not rely on these hardcoded values but
+ * should instead use the FDT.
+ */
+
+/* Physical Address Space */
+
+/*
+ * vGIC mappings: Only one set of mapping is used by the guest.
+ * Therefore they can overlap.
+ */
+
+/* vGIC v2 mappings */
+#define GUEST_GICD_BASE 0x03001000ULL
+#define GUEST_GICD_SIZE 0x00001000ULL
+#define GUEST_GICC_BASE 0x03002000ULL
+#define GUEST_GICC_SIZE 0x00000100ULL
+
+/* vGIC v3 mappings */
+#define GUEST_GICV3_GICD_BASE 0x03001000ULL
+#define GUEST_GICV3_GICD_SIZE 0x00010000ULL
+
+#define GUEST_GICV3_RDIST_STRIDE 0x20000ULL
+#define GUEST_GICV3_RDIST_REGIONS 1
+
+#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU127 */
+#define GUEST_GICV3_GICR0_SIZE 0x01000000ULL
+
+/*
+ * 16MB == 4096 pages reserved for guest to use as a region to map its
+ * grant table in.
+ */
+#define GUEST_GNTTAB_BASE 0x38000000ULL
+#define GUEST_GNTTAB_SIZE 0x01000000ULL
+
+#define GUEST_MAGIC_BASE 0x39000000ULL
+#define GUEST_MAGIC_SIZE 0x01000000ULL
+
+#define GUEST_RAM_BANKS 2
+
+#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
+#define GUEST_RAM0_SIZE 0xc0000000ULL
+
+#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
+#define GUEST_RAM1_SIZE 0xfe00000000ULL
+
+#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */
+/* Largest amount of actual RAM, not including holes */
+#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE)
+/* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */
+#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }
+#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }
+
+/* Interrupts */
+#define GUEST_TIMER_VIRT_PPI 27
+#define GUEST_TIMER_PHYS_S_PPI 29
+#define GUEST_TIMER_PHYS_NS_PPI 30
+#define GUEST_EVTCHN_PPI 31
+
+/* PSCI functions */
+#define PSCI_cpu_suspend 0
+#define PSCI_cpu_off 1
+#define PSCI_cpu_on 2
+#define PSCI_migrate 3
+
+#endif
+
+#ifndef __ASSEMBLY__
+/* Stub definition of PMU structure */
+typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
+#endif
+
#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/callback.h
===================================================================
--- trunk/sys/xen/interface/callback.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/callback.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -37,7 +37,7 @@
* @extra_args == Operation-specific extra arguments (NULL if none).
*/
-/* ia64, x86: Callback for event delivery. */
+/* x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0
/* x86: Failsafe callback when guest state cannot be restored by Xen. */
@@ -114,7 +114,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/dom0_ops.h
===================================================================
--- trunk/sys/xen/interface/dom0_ops.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/dom0_ops.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -113,7 +113,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/domctl.h
===================================================================
--- trunk/sys/xen/interface/domctl.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/domctl.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -35,8 +35,10 @@
#include "xen.h"
#include "grant_table.h"
+#include "hvm/save.h"
+#include "memory.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000008
+#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000b
/*
* NB. xen_domctl.domain is an IN/OUT parameter for this operation.
@@ -47,7 +49,7 @@
/* IN parameters */
uint32_t ssidref;
xen_domain_handle_t handle;
- /* Is this an HVM guest (as opposed to a PV guest)? */
+ /* Is this an HVM guest (as opposed to a PVH or PV guest)? */
#define _XEN_DOMCTL_CDF_hvm_guest 0
#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
/* Use hardware-assisted paging if available? */
@@ -59,7 +61,11 @@
/* Disable out-of-sync shadow page tables? */
#define _XEN_DOMCTL_CDF_oos_off 3
#define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off)
+ /* Is this a PVH guest (as opposed to an HVM or PV guest)? */
+#define _XEN_DOMCTL_CDF_pvh_guest 4
+#define XEN_DOMCTL_CDF_pvh_guest (1U<<_XEN_DOMCTL_CDF_pvh_guest)
uint32_t flags;
+ struct xen_arch_domainconfig config;
};
typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
@@ -89,6 +95,9 @@
/* Being debugged. */
#define _XEN_DOMINF_debugged 6
#define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged)
+/* domain is PVH */
+#define _XEN_DOMINF_pvh_guest 7
+#define XEN_DOMINF_pvh_guest (1U<<_XEN_DOMINF_pvh_guest)
/* XEN_DOMINF_shutdown guest-supplied code. */
#define XEN_DOMINF_shutdownmask 255
#define XEN_DOMINF_shutdownshift 16
@@ -95,11 +104,13 @@
uint32_t flags; /* XEN_DOMINF_* */
uint64_aligned_t tot_pages;
uint64_aligned_t max_pages;
+ uint64_aligned_t outstanding_pages;
uint64_aligned_t shr_pages;
uint64_aligned_t paged_pages;
uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
uint64_aligned_t cpu_time;
uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
+#define XEN_INVALID_MAX_VCPU_ID (~0U) /* Domain has no vcpus? */
uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
uint32_t ssidref;
xen_domain_handle_t handle;
@@ -136,30 +147,9 @@
#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
#define XEN_DOMCTL_PFINFO_XALLOC (0xeU<<28) /* allocate-only page */
-#define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28)
+#define XEN_DOMCTL_PFINFO_BROKEN (0xdU<<28) /* broken page */
#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
-struct xen_domctl_getpageframeinfo {
- /* IN variables. */
- uint64_aligned_t gmfn; /* GMFN to query */
- /* OUT variables. */
- /* Is the page PINNED to a type? */
- uint32_t type; /* see above type defs */
-};
-typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
-
-
-/* XEN_DOMCTL_getpageframeinfo2 */
-struct xen_domctl_getpageframeinfo2 {
- /* IN variables. */
- uint64_aligned_t num;
- /* IN/OUT variables. */
- XEN_GUEST_HANDLE_64(uint32) array;
-};
-typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
-
/* XEN_DOMCTL_getpageframeinfo3 */
struct xen_domctl_getpageframeinfo3 {
/* IN variables. */
@@ -279,12 +269,47 @@
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
+/* Get/set the NUMA node(s) with which the guest has affinity with. */
+/* XEN_DOMCTL_setnodeaffinity */
+/* XEN_DOMCTL_getnodeaffinity */
+struct xen_domctl_nodeaffinity {
+ struct xenctl_bitmap nodemap;/* IN */
+};
+typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
+
+
/* Get/set which physical cpus a vcpu can execute on. */
/* XEN_DOMCTL_setvcpuaffinity */
/* XEN_DOMCTL_getvcpuaffinity */
struct xen_domctl_vcpuaffinity {
- uint32_t vcpu; /* IN */
- struct xenctl_cpumap cpumap; /* IN/OUT */
+ /* IN variables. */
+ uint32_t vcpu;
+ /* Set/get the hard affinity for vcpu */
+#define _XEN_VCPUAFFINITY_HARD 0
+#define XEN_VCPUAFFINITY_HARD (1U<<_XEN_VCPUAFFINITY_HARD)
+ /* Set/get the soft affinity for vcpu */
+#define _XEN_VCPUAFFINITY_SOFT 1
+#define XEN_VCPUAFFINITY_SOFT (1U<<_XEN_VCPUAFFINITY_SOFT)
+ uint32_t flags;
+ /*
+ * IN/OUT variables.
+ *
+ * Both are IN/OUT for XEN_DOMCTL_setvcpuaffinity, in which case they
+ * contain effective hard or/and soft affinity. That is, upon successful
+ * return, cpumap_soft, contains the intersection of the soft affinity,
+ * hard affinity and the cpupool's online CPUs for the domain (if
+ * XEN_VCPUAFFINITY_SOFT was set in flags). cpumap_hard contains the
+ * intersection between hard affinity and the cpupool's online CPUs (if
+ * XEN_VCPUAFFINITY_HARD was set in flags).
+ *
+ * Both are OUT-only for XEN_DOMCTL_getvcpuaffinity, in which case they
+ * contain the plain hard and/or soft affinity masks that were set during
+ * previous successful calls to XEN_DOMCTL_setvcpuaffinity (or the
+ * default values), without intersecting or altering them in any way.
+ */
+ struct xenctl_bitmap cpumap_hard;
+ struct xenctl_bitmap cpumap_soft;
};
typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
@@ -300,10 +325,12 @@
/* XEN_DOMCTL_scheduler_op */
/* Scheduler types. */
-#define XEN_SCHEDULER_SEDF 4
+/* #define XEN_SCHEDULER_SEDF 4 (Removed) */
#define XEN_SCHEDULER_CREDIT 5
#define XEN_SCHEDULER_CREDIT2 6
#define XEN_SCHEDULER_ARINC653 7
+#define XEN_SCHEDULER_RTDS 8
+
/* Set or get info? */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1
@@ -311,13 +338,6 @@
uint32_t sched_id; /* XEN_SCHEDULER_* */
uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
union {
- struct xen_domctl_sched_sedf {
- uint64_aligned_t period;
- uint64_aligned_t slice;
- uint64_aligned_t latency;
- uint32_t extratime;
- uint32_t weight;
- } sedf;
struct xen_domctl_sched_credit {
uint16_t weight;
uint16_t cap;
@@ -325,6 +345,10 @@
struct xen_domctl_sched_credit2 {
uint16_t weight;
} credit2;
+ struct xen_domctl_sched_rtds {
+ uint32_t period;
+ uint32_t budget;
+ } rtds;
} u;
};
typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
@@ -384,29 +408,9 @@
DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
-/* XEN_DOMCTL_arch_setup */
-#define _XEN_DOMAINSETUP_hvm_guest 0
-#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
-#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
-#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query)
-#define _XEN_DOMAINSETUP_sioemu_guest 2
-#define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest)
-typedef struct xen_domctl_arch_setup {
- uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */
-#ifdef __ia64__
- uint64_aligned_t bp; /* mpaddr of boot param area */
- uint64_aligned_t maxmem; /* Highest memory address for MDT. */
- uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */
- uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */
- int8_t vhpt_size_log2; /* Log2 of VHPT size. */
-#endif
-} xen_domctl_arch_setup_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
-
-
/* XEN_DOMCTL_settimeoffset */
struct xen_domctl_settimeoffset {
- int32_t time_offset_seconds; /* applied to domain wallclock time */
+ int64_aligned_t time_offset_seconds; /* applied to domain wallclock time */
};
typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
@@ -430,14 +434,6 @@
DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
-/* XEN_DOMCTL_real_mode_area */
-struct xen_domctl_real_mode_area {
- uint32_t log; /* log2 of Real Mode Area size */
-};
-typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
-
-
/* XEN_DOMCTL_sendtrigger */
#define XEN_DOMCTL_SENDTRIGGER_NMI 0
#define XEN_DOMCTL_SENDTRIGGER_RESET 1
@@ -452,12 +448,33 @@
DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
-/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
+/* Assign a device to a guest. Sets up IOMMU structures. */
/* XEN_DOMCTL_assign_device */
/* XEN_DOMCTL_test_assign_device */
-/* XEN_DOMCTL_deassign_device */
+/*
+ * XEN_DOMCTL_deassign_device: The behavior of this DOMCTL differs
+ * between the different type of device:
+ * - PCI device (XEN_DOMCTL_DEV_PCI) will be reassigned to DOM0
+ * - DT device (XEN_DOMCTL_DT_PCI) will left unassigned. DOM0
+ * will have to call XEN_DOMCTL_assign_device in order to use the
+ * device.
+ */
+#define XEN_DOMCTL_DEV_PCI 0
+#define XEN_DOMCTL_DEV_DT 1
struct xen_domctl_assign_device {
- uint32_t machine_sbdf; /* machine PCI ID of assigned device */
+ uint32_t dev; /* XEN_DOMCTL_DEV_* */
+ union {
+ struct {
+ uint32_t machine_sbdf; /* machine PCI ID of assigned device */
+ } pci;
+ struct {
+ uint32_t size; /* Length of the path */
+ XEN_GUEST_HANDLE_64(char) path; /* path to the device tree node */
+ } dt;
+ } u;
+ /* IN */
+#define XEN_DOMCTL_DEV_RDM_RELAXED 1
+ uint32_t flag; /* flag of assigned device */
};
typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
@@ -481,6 +498,7 @@
PT_IRQ_TYPE_ISA,
PT_IRQ_TYPE_MSI,
PT_IRQ_TYPE_MSI_TRANSLATE,
+ PT_IRQ_TYPE_SPI, /* ARM: valid range 32-1019 */
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
@@ -501,6 +519,9 @@
uint32_t gflags;
uint64_aligned_t gtable;
} msi;
+ struct {
+ uint16_t spi;
+ } spi;
} u;
};
typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
@@ -508,6 +529,7 @@
/* Bind machine I/O address range -> HVM address range. */
+/* If this returns -E2BIG lower nr_mfns value. */
/* XEN_DOMCTL_memory_mapping */
#define DPCI_ADD_MAPPING 1
#define DPCI_REMOVE_MAPPING 0
@@ -545,6 +567,7 @@
#define XEN_DOMCTL_MEM_CACHEATTR_WP 5
#define XEN_DOMCTL_MEM_CACHEATTR_WB 6
#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
+#define XEN_DOMCTL_DELETE_MEM_CACHEATTR (~(uint32_t)0)
struct xen_domctl_pin_mem_cacheattr {
uint64_aligned_t start, end;
uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
@@ -572,28 +595,20 @@
uint16_t sysenter_callback_cs;
uint8_t syscall32_disables_events;
uint8_t sysenter_disables_events;
- uint64_aligned_t mcg_cap;
+#if defined(__GNUC__)
+ union {
+ uint64_aligned_t mcg_cap;
+ struct hvm_vmce_vcpu vmce;
+ };
+#else
+ struct hvm_vmce_vcpu vmce;
#endif
+#endif
};
typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
/*
- * Set optimizaton features for a domain
- */
-/* XEN_DOMCTL_set_opt_feature */
-struct xen_domctl_set_opt_feature {
-#if defined(__ia64__)
- struct xen_ia64_opt_feature optf;
-#else
- /* Make struct non-empty: do not depend on this field name! */
- uint64_t dummy;
-#endif
-};
-typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t);
-
-/*
* Set the target domain for a domain
*/
/* XEN_DOMCTL_set_target */
@@ -617,6 +632,22 @@
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
#endif
+/*
+ * Arranges that if the domain suspends (specifically, if it shuts
+ * down with code SHUTDOWN_suspend), this event channel will be
+ * notified.
+ *
+ * This is _instead of_ the usual notification to the global
+ * VIRQ_DOM_EXC. (In most systems that pirq is owned by xenstored.)
+ *
+ * Only one subscription per domain is possible. Last subscriber
+ * wins; others are silently displaced.
+ *
+ * NB that contrary to the rather general name, it only applies to
+ * domain shutdown with code suspend. Shutdown for other reasons
+ * (including crash), and domain death, are notified to VIRQ_DOM_EXC
+ * regardless.
+ */
/* XEN_DOMCTL_subscribe */
struct xen_domctl_subscribe {
uint32_t port; /* IN */
@@ -665,18 +696,13 @@
/* XEN_DOMCTL_gettscinfo */
/* XEN_DOMCTL_settscinfo */
-struct xen_guest_tsc_info {
+typedef struct xen_domctl_tsc_info {
+ /* IN/OUT */
uint32_t tsc_mode;
uint32_t gtsc_khz;
uint32_t incarnation;
uint32_t pad;
uint64_aligned_t elapsed_nsec;
-};
-typedef struct xen_guest_tsc_info xen_guest_tsc_info_t;
-DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t);
-typedef struct xen_domctl_tsc_info {
- XEN_GUEST_HANDLE_64(xen_guest_tsc_info_t) out_info; /* OUT */
- xen_guest_tsc_info_t info; /* IN */
} xen_domctl_tsc_info_t;
/* XEN_DOMCTL_gdbsx_guestmemio guest mem io */
@@ -706,12 +732,23 @@
};
/*
- * Memory event operations
+ * VM event operations
*/
-/* XEN_DOMCTL_mem_event_op */
+/* XEN_DOMCTL_vm_event_op */
/*
+ * There are currently three rings available for VM events:
+ * sharing, monitor and paging. This hypercall allows one to
+ * control these rings (enable/disable), as well as to signal
+ * to the hypervisor to pull responses (resume) from the given
+ * ring.
+ */
+#define XEN_VM_EVENT_ENABLE 0
+#define XEN_VM_EVENT_DISABLE 1
+#define XEN_VM_EVENT_RESUME 2
+
+/*
* Domain memory paging
* Page memory in and out.
* Domctl interface to set up and tear down the
@@ -718,7 +755,7 @@
* pager<->hypervisor interface. Use XENMEM_paging_op*
* to perform per-page operations.
*
- * The XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE domctl returns several
+ * The XEN_VM_EVENT_PAGING_ENABLE domctl returns several
* non-standard error codes to indicate why paging could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EMLINK - guest has iommu passthrough enabled
@@ -725,35 +762,32 @@
* EXDEV - guest has PoD enabled
* EBUSY - guest has or had paging enabled, ring buffer still active
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1
+#define XEN_DOMCTL_VM_EVENT_OP_PAGING 1
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1
-
/*
- * Access permissions.
+ * Monitor helper.
*
* As with paging, use the domctl for teardown/setup of the
* helper<->hypervisor interface.
*
- * There are HVM hypercalls to set the per-page access permissions of every
- * page in a domain. When one of these permissions--independent, read,
- * write, and execute--is violated, the VCPU is paused and a memory event
- * is sent with what happened. (See public/mem_event.h) .
+ * The monitor interface can be used to register for various VM events. For
+ * example, there are HVM hypercalls to set the per-page access permissions
+ * of every page in a domain. When one of these permissions--independent,
+ * read, write, and execute--is violated, the VCPU is paused and a memory event
+ * is sent with what happened. The memory event handler can then resume the
+ * VCPU and redo the access with a XEN_VM_EVENT_RESUME option.
*
- * The memory event handler can then resume the VCPU and redo the access
- * with a XENMEM_access_op_resume hypercall.
+ * See public/vm_event.h for the list of available events that can be
+ * subscribed to via the monitor interface.
*
- * The XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE domctl returns several
+ * The XEN_VM_EVENT_MONITOR_* domctls returns
* non-standard error codes to indicate why access could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EBUSY - guest has or had access enabled, ring buffer still active
+ *
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
+#define XEN_DOMCTL_VM_EVENT_OP_MONITOR 2
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
-
/*
* Sharing ENOMEM helper.
*
@@ -767,21 +801,18 @@
* Note that shring can be turned on (as per the domctl below)
* *without* this ring being setup.
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
+#define XEN_DOMCTL_VM_EVENT_OP_SHARING 3
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE 1
-
/* Use for teardown/setup of helper<->hypervisor interface for paging,
* access and sharing.*/
-struct xen_domctl_mem_event_op {
- uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
- uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
+struct xen_domctl_vm_event_op {
+ uint32_t op; /* XEN_VM_EVENT_* */
+ uint32_t mode; /* XEN_DOMCTL_VM_EVENT_OP_* */
uint32_t port; /* OUT: event channel for ring */
};
-typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
+typedef struct xen_domctl_vm_event_op xen_domctl_vm_event_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vm_event_op_t);
/*
* Memory sharing operations
@@ -822,7 +853,7 @@
/* IN: VCPU that this call applies to. */
uint32_t vcpu;
/*
- * SET: xfeature support mask of struct (IN)
+ * SET: Ignored.
* GET: xfeature support mask of struct (IN/OUT)
* xfeature mask is served as identifications of the saving format
* so that compatible CPUs can have a check on format to decide
@@ -850,6 +881,189 @@
typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
+struct xen_domctl_set_broken_page_p2m {
+ uint64_aligned_t pfn;
+};
+typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
+
+/*
+ * XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port
+ * number the guest may use. Use this limit the amount of resources
+ * (global mapping space, xenheap) a guest may use for event channels.
+ */
+struct xen_domctl_set_max_evtchn {
+ uint32_t max_port;
+};
+typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
+
+/*
+ * ARM: Clean and invalidate caches associated with given region of
+ * guest memory.
+ */
+struct xen_domctl_cacheflush {
+ /* IN: page range to flush. */
+ xen_pfn_t start_pfn, nr_pfns;
+};
+typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t);
+
+#if defined(__i386__) || defined(__x86_64__)
+struct xen_domctl_vcpu_msr {
+ uint32_t index;
+ uint32_t reserved;
+ uint64_aligned_t value;
+};
+typedef struct xen_domctl_vcpu_msr xen_domctl_vcpu_msr_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msr_t);
+
+/*
+ * XEN_DOMCTL_set_vcpu_msrs / XEN_DOMCTL_get_vcpu_msrs.
+ *
+ * Input:
+ * - A NULL 'msrs' guest handle is a request for the maximum 'msr_count'.
+ * - Otherwise, 'msr_count' is the number of entries in 'msrs'.
+ *
+ * Output for get:
+ * - If 'msr_count' is less than the number Xen needs to write, -ENOBUFS shall
+ * be returned and 'msr_count' updated to reflect the intended number.
+ * - On success, 'msr_count' shall indicate the number of MSRs written, which
+ * may be less than the maximum if some are not currently used by the vcpu.
+ *
+ * Output for set:
+ * - If Xen encounters an error with a specific MSR, -EINVAL shall be returned
+ * and 'msr_count' shall be set to the offending index, to aid debugging.
+ */
+struct xen_domctl_vcpu_msrs {
+ uint32_t vcpu; /* IN */
+ uint32_t msr_count; /* IN/OUT */
+ XEN_GUEST_HANDLE_64(xen_domctl_vcpu_msr_t) msrs; /* IN/OUT */
+};
+typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t);
+#endif
+
+/* XEN_DOMCTL_setvnumainfo: specifies a virtual NUMA topology for the guest */
+struct xen_domctl_vnuma {
+ /* IN: number of vNUMA nodes to setup. Shall be greater than 0 */
+ uint32_t nr_vnodes;
+ /* IN: number of memory ranges to setup */
+ uint32_t nr_vmemranges;
+ /*
+ * IN: number of vCPUs of the domain (used as size of the vcpu_to_vnode
+ * array declared below). Shall be equal to the domain's max_vcpus.
+ */
+ uint32_t nr_vcpus;
+ uint32_t pad; /* must be zero */
+
+ /*
+ * IN: array for specifying the distances of the vNUMA nodes
+ * between each others. Shall have nr_vnodes*nr_vnodes elements.
+ */
+ XEN_GUEST_HANDLE_64(uint) vdistance;
+ /*
+ * IN: array for specifying to what vNUMA node each vCPU belongs.
+ * Shall have nr_vcpus elements.
+ */
+ XEN_GUEST_HANDLE_64(uint) vcpu_to_vnode;
+ /*
+ * IN: array for specifying on what physical NUMA node each vNUMA
+ * node is placed. Shall have nr_vnodes elements.
+ */
+ XEN_GUEST_HANDLE_64(uint) vnode_to_pnode;
+ /*
+ * IN: array for specifying the memory ranges. Shall have
+ * nr_vmemranges elements.
+ */
+ XEN_GUEST_HANDLE_64(xen_vmemrange_t) vmemrange;
+};
+typedef struct xen_domctl_vnuma xen_domctl_vnuma_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vnuma_t);
+
+struct xen_domctl_psr_cmt_op {
+#define XEN_DOMCTL_PSR_CMT_OP_DETACH 0
+#define XEN_DOMCTL_PSR_CMT_OP_ATTACH 1
+#define XEN_DOMCTL_PSR_CMT_OP_QUERY_RMID 2
+ uint32_t cmd;
+ uint32_t data;
+};
+typedef struct xen_domctl_psr_cmt_op xen_domctl_psr_cmt_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
+
+/* XEN_DOMCTL_MONITOR_*
+ *
+ * Enable/disable monitoring various VM events.
+ * This domctl configures what events will be reported to helper apps
+ * via the ring buffer "MONITOR". The ring has to be first enabled
+ * with the domctl XEN_DOMCTL_VM_EVENT_OP_MONITOR.
+ *
+ * GET_CAPABILITIES can be used to determine which of these features is
+ * available on a given platform.
+ *
+ * NOTICE: mem_access events are also delivered via the "MONITOR" ring buffer;
+ * however, enabling/disabling those events is performed with the use of
+ * memory_op hypercalls!
+ */
+#define XEN_DOMCTL_MONITOR_OP_ENABLE 0
+#define XEN_DOMCTL_MONITOR_OP_DISABLE 1
+#define XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES 2
+
+#define XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG 0
+#define XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR 1
+#define XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP 2
+#define XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT 3
+#define XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST 4
+
+struct xen_domctl_monitor_op {
+ uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
+
+ /*
+ * When used with ENABLE/DISABLE this has to be set to
+ * the requested XEN_DOMCTL_MONITOR_EVENT_* value.
+ * With GET_CAPABILITIES this field returns a bitmap of
+ * events supported by the platform, in the format
+ * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
+ */
+ uint32_t event;
+
+ /*
+ * Further options when issuing XEN_DOMCTL_MONITOR_OP_ENABLE.
+ */
+ union {
+ struct {
+ /* Which control register */
+ uint8_t index;
+ /* Pause vCPU until response */
+ uint8_t sync;
+ /* Send event only on a change of value */
+ uint8_t onchangeonly;
+ } mov_to_cr;
+
+ struct {
+ /* Enable the capture of an extended set of MSRs */
+ uint8_t extended_capture;
+ } mov_to_msr;
+
+ struct {
+ /* Pause vCPU until response */
+ uint8_t sync;
+ } guest_request;
+ } u;
+};
+typedef struct xen_domctl_monitor_op xen_domctl_monitor_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_monitor_op_t);
+
+struct xen_domctl_psr_cat_op {
+#define XEN_DOMCTL_PSR_CAT_OP_SET_L3_CBM 0
+#define XEN_DOMCTL_PSR_CAT_OP_GET_L3_CBM 1
+ uint32_t cmd; /* IN: XEN_DOMCTL_PSR_CAT_OP_* */
+ uint32_t target; /* IN */
+ uint64_t data; /* IN/OUT */
+};
+typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -858,8 +1072,8 @@
#define XEN_DOMCTL_unpausedomain 4
#define XEN_DOMCTL_getdomaininfo 5
#define XEN_DOMCTL_getmemlist 6
-#define XEN_DOMCTL_getpageframeinfo 7
-#define XEN_DOMCTL_getpageframeinfo2 8
+/* #define XEN_DOMCTL_getpageframeinfo 7 Obsolete - use getpageframeinfo3 */
+/* #define XEN_DOMCTL_getpageframeinfo2 8 Obsolete - use getpageframeinfo3 */
#define XEN_DOMCTL_setvcpuaffinity 9
#define XEN_DOMCTL_shadow_op 10
#define XEN_DOMCTL_max_mem 11
@@ -874,10 +1088,10 @@
#define XEN_DOMCTL_iomem_permission 20
#define XEN_DOMCTL_ioport_permission 21
#define XEN_DOMCTL_hypercall_init 22
-#define XEN_DOMCTL_arch_setup 23
+#define XEN_DOMCTL_arch_setup 23 /* Obsolete IA64 only */
#define XEN_DOMCTL_settimeoffset 24
#define XEN_DOMCTL_getvcpuaffinity 25
-#define XEN_DOMCTL_real_mode_area 26
+#define XEN_DOMCTL_real_mode_area 26 /* Obsolete PPC only */
#define XEN_DOMCTL_resumedomain 27
#define XEN_DOMCTL_sendtrigger 28
#define XEN_DOMCTL_subscribe 29
@@ -892,7 +1106,7 @@
#define XEN_DOMCTL_pin_mem_cacheattr 41
#define XEN_DOMCTL_set_ext_vcpucontext 42
#define XEN_DOMCTL_get_ext_vcpucontext 43
-#define XEN_DOMCTL_set_opt_feature 44
+#define XEN_DOMCTL_set_opt_feature 44 /* Obsolete IA64 only */
#define XEN_DOMCTL_test_assign_device 45
#define XEN_DOMCTL_set_target 46
#define XEN_DOMCTL_deassign_device 47
@@ -904,7 +1118,7 @@
#define XEN_DOMCTL_suppress_spurious_page_faults 53
#define XEN_DOMCTL_debug_op 54
#define XEN_DOMCTL_gethvmcontext_partial 55
-#define XEN_DOMCTL_mem_event_op 56
+#define XEN_DOMCTL_vm_event_op 56
#define XEN_DOMCTL_mem_sharing_op 57
#define XEN_DOMCTL_disable_migrate 58
#define XEN_DOMCTL_gettscinfo 59
@@ -915,6 +1129,17 @@
#define XEN_DOMCTL_set_access_required 64
#define XEN_DOMCTL_audit_p2m 65
#define XEN_DOMCTL_set_virq_handler 66
+#define XEN_DOMCTL_set_broken_page_p2m 67
+#define XEN_DOMCTL_setnodeaffinity 68
+#define XEN_DOMCTL_getnodeaffinity 69
+#define XEN_DOMCTL_set_max_evtchn 70
+#define XEN_DOMCTL_cacheflush 71
+#define XEN_DOMCTL_get_vcpu_msrs 72
+#define XEN_DOMCTL_set_vcpu_msrs 73
+#define XEN_DOMCTL_setvnumainfo 74
+#define XEN_DOMCTL_psr_cmt_op 75
+#define XEN_DOMCTL_monitor_op 77
+#define XEN_DOMCTL_psr_cat_op 78
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -925,9 +1150,8 @@
struct xen_domctl_createdomain createdomain;
struct xen_domctl_getdomaininfo getdomaininfo;
struct xen_domctl_getmemlist getmemlist;
- struct xen_domctl_getpageframeinfo getpageframeinfo;
- struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
struct xen_domctl_getpageframeinfo3 getpageframeinfo3;
+ struct xen_domctl_nodeaffinity nodeaffinity;
struct xen_domctl_vcpuaffinity vcpuaffinity;
struct xen_domctl_shadow_op shadow_op;
struct xen_domctl_max_mem max_mem;
@@ -941,11 +1165,9 @@
struct xen_domctl_iomem_permission iomem_permission;
struct xen_domctl_ioport_permission ioport_permission;
struct xen_domctl_hypercall_init hypercall_init;
- struct xen_domctl_arch_setup arch_setup;
struct xen_domctl_settimeoffset settimeoffset;
struct xen_domctl_disable_migrate disable_migrate;
struct xen_domctl_tsc_info tsc_info;
- struct xen_domctl_real_mode_area real_mode_area;
struct xen_domctl_hvmcontext hvmcontext;
struct xen_domctl_hvmcontext_partial hvmcontext_partial;
struct xen_domctl_address_size address_size;
@@ -957,22 +1179,29 @@
struct xen_domctl_ioport_mapping ioport_mapping;
struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
struct xen_domctl_ext_vcpucontext ext_vcpucontext;
- struct xen_domctl_set_opt_feature set_opt_feature;
struct xen_domctl_set_target set_target;
struct xen_domctl_subscribe subscribe;
struct xen_domctl_debug_op debug_op;
- struct xen_domctl_mem_event_op mem_event_op;
+ struct xen_domctl_vm_event_op vm_event_op;
struct xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
struct xen_domctl_vcpuextstate vcpuextstate;
+ struct xen_domctl_vcpu_msrs vcpu_msrs;
#endif
struct xen_domctl_set_access_required access_required;
struct xen_domctl_audit_p2m audit_p2m;
struct xen_domctl_set_virq_handler set_virq_handler;
+ struct xen_domctl_set_max_evtchn set_max_evtchn;
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
+ struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
+ struct xen_domctl_cacheflush cacheflush;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
+ struct xen_domctl_vnuma vnuma;
+ struct xen_domctl_psr_cmt_op psr_cmt_op;
+ struct xen_domctl_monitor_op monitor_op;
+ struct xen_domctl_psr_cat_op psr_cat_op;
uint8_t pad[128];
} u;
};
@@ -984,7 +1213,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/elfnote.h
===================================================================
--- trunk/sys/xen/interface/elfnote.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/elfnote.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -29,6 +29,8 @@
#define __XEN_PUBLIC_ELFNOTE_H__
/*
+ * `incontents 200 elfnotes ELF notes
+ *
* The notes should live in a PT_NOTE segment and have "Xen" in the
* name field.
*
@@ -37,6 +39,9 @@
*
* LEGACY indicated the fields in the legacy __xen_guest string which
* this a note type replaces.
+ *
+ * String values (for non-legacy) are NULL terminated ASCII, also known
+ * as ASCIZ type.
*/
/*
@@ -67,8 +72,8 @@
#define XEN_ELFNOTE_VIRT_BASE 3
/*
- * The offset of the ELF paddr field from the acutal required
- * psuedo-physical address (numeric).
+ * The offset of the ELF paddr field from the actual required
+ * pseudo-physical address (numeric).
*
* This is used to maintain backwards compatibility with older kernels
* which wrote __PAGE_OFFSET into that field. This field defaults to 0
@@ -159,6 +164,9 @@
/*
* Whether or not the guest supports cooperative suspend cancellation.
+ * This is a numeric value.
+ *
+ * Default is 0
*/
#define XEN_ELFNOTE_SUSPEND_CANCEL 14
@@ -256,7 +264,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Added: trunk/sys/xen/interface/errno.h
===================================================================
--- trunk/sys/xen/interface/errno.h (rev 0)
+++ trunk/sys/xen/interface/errno.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -0,0 +1,96 @@
+/* $MidnightBSD$ */
+#ifndef __XEN_PUBLIC_ERRNO_H__
+
+#ifndef __ASSEMBLY__
+
+#define XEN_ERRNO(name, value) XEN_##name = value,
+enum xen_errno {
+
+#else /* !__ASSEMBLY__ */
+
+#define XEN_ERRNO(name, value) .equ XEN_##name, value
+
+#endif /* __ASSEMBLY__ */
+
+/* ` enum neg_errnoval { [ -Efoo for each Efoo in the list below ] } */
+/* ` enum errnoval { */
+
+#endif /* __XEN_PUBLIC_ERRNO_H__ */
+
+#ifdef XEN_ERRNO
+
+/*
+ * Values originating from x86 Linux. Please consider using respective
+ * values when adding new definitions here.
+ *
+ * The set of identifiers to be added here shouldn't extend beyond what
+ * POSIX mandates (see e.g.
+ * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html)
+ * with the exception that we support some optional (XSR) values
+ * specified there (but no new ones should be added).
+ */
+
+XEN_ERRNO(EPERM, 1) /* Operation not permitted */
+XEN_ERRNO(ENOENT, 2) /* No such file or directory */
+XEN_ERRNO(ESRCH, 3) /* No such process */
+#ifdef __XEN__ /* Internal only, should never be exposed to the guest. */
+XEN_ERRNO(EINTR, 4) /* Interrupted system call */
+#endif
+XEN_ERRNO(EIO, 5) /* I/O error */
+XEN_ERRNO(ENXIO, 6) /* No such device or address */
+XEN_ERRNO(E2BIG, 7) /* Arg list too long */
+XEN_ERRNO(ENOEXEC, 8) /* Exec format error */
+XEN_ERRNO(EBADF, 9) /* Bad file number */
+XEN_ERRNO(ECHILD, 10) /* No child processes */
+XEN_ERRNO(EAGAIN, 11) /* Try again */
+XEN_ERRNO(ENOMEM, 12) /* Out of memory */
+XEN_ERRNO(EACCES, 13) /* Permission denied */
+XEN_ERRNO(EFAULT, 14) /* Bad address */
+XEN_ERRNO(EBUSY, 16) /* Device or resource busy */
+XEN_ERRNO(EEXIST, 17) /* File exists */
+XEN_ERRNO(EXDEV, 18) /* Cross-device link */
+XEN_ERRNO(ENODEV, 19) /* No such device */
+XEN_ERRNO(EINVAL, 22) /* Invalid argument */
+XEN_ERRNO(ENFILE, 23) /* File table overflow */
+XEN_ERRNO(EMFILE, 24) /* Too many open files */
+XEN_ERRNO(ENOSPC, 28) /* No space left on device */
+XEN_ERRNO(EMLINK, 31) /* Too many links */
+XEN_ERRNO(EDOM, 33) /* Math argument out of domain of func */
+XEN_ERRNO(ERANGE, 34) /* Math result not representable */
+XEN_ERRNO(EDEADLK, 35) /* Resource deadlock would occur */
+XEN_ERRNO(ENAMETOOLONG, 36) /* File name too long */
+XEN_ERRNO(ENOLCK, 37) /* No record locks available */
+XEN_ERRNO(ENOSYS, 38) /* Function not implemented */
+XEN_ERRNO(ENODATA, 61) /* No data available */
+XEN_ERRNO(ETIME, 62) /* Timer expired */
+XEN_ERRNO(EBADMSG, 74) /* Not a data message */
+XEN_ERRNO(EOVERFLOW, 75) /* Value too large for defined data type */
+XEN_ERRNO(EILSEQ, 84) /* Illegal byte sequence */
+#ifdef __XEN__ /* Internal only, should never be exposed to the guest. */
+XEN_ERRNO(ERESTART, 85) /* Interrupted system call should be restarted */
+#endif
+XEN_ERRNO(ENOTSOCK, 88) /* Socket operation on non-socket */
+XEN_ERRNO(EOPNOTSUPP, 95) /* Operation not supported on transport endpoint */
+XEN_ERRNO(EADDRINUSE, 98) /* Address already in use */
+XEN_ERRNO(EADDRNOTAVAIL, 99) /* Cannot assign requested address */
+XEN_ERRNO(ENOBUFS, 105) /* No buffer space available */
+XEN_ERRNO(EISCONN, 106) /* Transport endpoint is already connected */
+XEN_ERRNO(ENOTCONN, 107) /* Transport endpoint is not connected */
+XEN_ERRNO(ETIMEDOUT, 110) /* Connection timed out */
+
+#undef XEN_ERRNO
+#endif /* XEN_ERRNO */
+
+#ifndef __XEN_PUBLIC_ERRNO_H__
+#define __XEN_PUBLIC_ERRNO_H__
+
+/* ` } */
+
+#ifndef __ASSEMBLY__
+};
+#endif
+
+#define XEN_EWOULDBLOCK XEN_EAGAIN /* Operation would block */
+#define XEN_EDEADLOCK XEN_EDEADLK /* Resource deadlock would occur */
+
+#endif /* __XEN_PUBLIC_ERRNO_H__ */
Property changes on: trunk/sys/xen/interface/errno.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/xen/interface/event_channel.h
===================================================================
--- trunk/sys/xen/interface/event_channel.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/event_channel.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -72,13 +72,13 @@
#define EVTCHNOP_bind_vcpu 8
#define EVTCHNOP_unmask 9
#define EVTCHNOP_reset 10
+#define EVTCHNOP_init_control 11
+#define EVTCHNOP_expand_array 12
+#define EVTCHNOP_set_priority 13
/* ` } */
-#ifndef __XEN_EVTCHN_PORT_DEFINED__
typedef uint32_t evtchn_port_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
-#define __XEN_EVTCHN_PORT_DEFINED__ 1
-#endif
/*
* EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
@@ -102,6 +102,17 @@
* a port that is unbound and marked as accepting bindings from the calling
* domain. A fresh port is allocated in the calling domain and returned as
* <local_port>.
+ *
+ * In case the peer domain has already tried to set our event channel
+ * pending, before it was bound, EVTCHNOP_bind_interdomain always sets
+ * the local event channel pending.
+ *
+ * The usual pattern of use, in the guest's upcall (or subsequent
+ * handler) is as follows: (Re-enable the event channel for subsequent
+ * signalling and then) check for the existence of whatever condition
+ * is being waited for by other means, and take whatever action is
+ * needed (if any).
+ *
* NOTES:
* 1. <remote_dom> may be DOMID_SELF, allowing loopback connections.
*/
@@ -254,6 +265,10 @@
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
+ * 3. Destroys all control blocks and event array, resets event channel
+ * operations to 2-level ABI if called with <dom> == DOMID_SELF and FIFO
+ * ABI was used. Guests should not bind events during EVTCHNOP_reset call
+ * as these events are likely to be lost.
*/
struct evtchn_reset {
/* IN parameters. */
@@ -262,6 +277,43 @@
typedef struct evtchn_reset evtchn_reset_t;
/*
+ * EVTCHNOP_init_control: initialize the control block for the FIFO ABI.
+ *
+ * Note: any events that are currently pending will not be resent and
+ * will be lost. Guests should call this before binding any event to
+ * avoid losing any events.
+ */
+struct evtchn_init_control {
+ /* IN parameters. */
+ uint64_t control_gfn;
+ uint32_t offset;
+ uint32_t vcpu;
+ /* OUT parameters. */
+ uint8_t link_bits;
+ uint8_t _pad[7];
+};
+typedef struct evtchn_init_control evtchn_init_control_t;
+
+/*
+ * EVTCHNOP_expand_array: add an additional page to the event array.
+ */
+struct evtchn_expand_array {
+ /* IN parameters. */
+ uint64_t array_gfn;
+};
+typedef struct evtchn_expand_array evtchn_expand_array_t;
+
+/*
+ * EVTCHNOP_set_priority: set the priority for an event channel.
+ */
+struct evtchn_set_priority {
+ /* IN parameters. */
+ uint32_t port;
+ uint32_t priority;
+};
+typedef struct evtchn_set_priority evtchn_set_priority_t;
+
+/*
* ` enum neg_errnoval
* ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op)
* `
@@ -285,12 +337,48 @@
typedef struct evtchn_op evtchn_op_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
+/*
+ * 2-level ABI
+ */
+
+#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
+
+/*
+ * FIFO ABI
+ */
+
+/* Events may have priorities from 0 (highest) to 15 (lowest). */
+#define EVTCHN_FIFO_PRIORITY_MAX 0
+#define EVTCHN_FIFO_PRIORITY_DEFAULT 7
+#define EVTCHN_FIFO_PRIORITY_MIN 15
+
+#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1)
+
+typedef uint32_t event_word_t;
+
+#define EVTCHN_FIFO_PENDING 31
+#define EVTCHN_FIFO_MASKED 30
+#define EVTCHN_FIFO_LINKED 29
+#define EVTCHN_FIFO_BUSY 28
+
+#define EVTCHN_FIFO_LINK_BITS 17
+#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1)
+
+#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS)
+
+struct evtchn_fifo_control_block {
+ uint32_t ready;
+ uint32_t _rsvd;
+ uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
+};
+typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t;
+
#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/features.h
===================================================================
--- trunk/sys/xen/interface/features.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/features.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -29,6 +29,20 @@
#define __XEN_PUBLIC_FEATURES_H__
/*
+ * `incontents 200 elfnotes_features XEN_ELFNOTE_FEATURES
+ *
+ * The list of all the features the guest supports. They are set by
+ * parsing the XEN_ELFNOTE_FEATURES and XEN_ELFNOTE_SUPPORTED_FEATURES
+ * string. The format is the feature names (as given here without the
+ * "XENFEAT_" prefix) separated by '|' characters.
+ * If a feature is required for the kernel to function then the feature name
+ * must be preceded by a '!' character.
+ *
+ * Note that if XEN_ELFNOTE_SUPPORTED_FEATURES is used, then in the
+ * XENFEAT_dom0 MUST be set if the guest is to be booted as dom0,
+ */
+
+/*
* If set, the guest does not need to write-protect its pagetables, and can
* update them via direct writes.
*/
@@ -81,6 +95,14 @@
/* operation as Dom0 is supported */
#define XENFEAT_dom0 11
+/* Xen also maps grant references at pfn = mfn.
+ * This feature flag is deprecated and should not be used.
+#define XENFEAT_grant_map_identity 12
+ */
+
+/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */
+#define XENFEAT_memory_op_vnode_supported 13
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
@@ -88,7 +110,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Added: trunk/sys/xen/interface/gcov.h
===================================================================
--- trunk/sys/xen/interface/gcov.h (rev 0)
+++ trunk/sys/xen/interface/gcov.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -0,0 +1,116 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+ * gcov.h
+ *
+ * Coverage structures exported by Xen.
+ * Structure is different from Gcc one.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2013, Citrix Systems R&D Ltd.
+ */
+
+#ifndef __XEN_PUBLIC_GCOV_H__
+#define __XEN_PUBLIC_GCOV_H__ __XEN_PUBLIC_GCOV_H__
+
+#define XENCOV_COUNTERS 5
+#define XENCOV_TAG_BASE 0x58544300u
+#define XENCOV_TAG_FILE (XENCOV_TAG_BASE+0x46u)
+#define XENCOV_TAG_FUNC (XENCOV_TAG_BASE+0x66u)
+#define XENCOV_TAG_COUNTER(n) (XENCOV_TAG_BASE+0x30u+((n)&0xfu))
+#define XENCOV_TAG_END (XENCOV_TAG_BASE+0x2eu)
+#define XENCOV_IS_TAG_COUNTER(n) \
+ ((n) >= XENCOV_TAG_COUNTER(0) && (n) < XENCOV_TAG_COUNTER(XENCOV_COUNTERS))
+#define XENCOV_COUNTER_NUM(n) ((n)-XENCOV_TAG_COUNTER(0))
+
+/*
+ * The main structure for the blob is
+ * BLOB := FILE.. END
+ * FILE := TAG_FILE VERSION STAMP FILENAME COUNTERS FUNCTIONS
+ * FILENAME := LEN characters
+ * characters are padded to 32 bit
+ * LEN := 32 bit value
+ * COUNTERS := TAG_COUNTER(n) NUM COUNTER..
+ * NUM := 32 bit valie
+ * COUNTER := 64 bit value
+ * FUNCTIONS := TAG_FUNC NUM FUNCTION..
+ * FUNCTION := IDENT CHECKSUM NUM_COUNTERS
+ *
+ * All tagged structures are aligned to 8 bytes
+ */
+
+/**
+ * File information
+ * Prefixed with XENCOV_TAG_FILE and a string with filename
+ * Aligned to 8 bytes
+ */
+struct xencov_file
+{
+ uint32_t tag; /* XENCOV_TAG_FILE */
+ uint32_t version;
+ uint32_t stamp;
+ uint32_t fn_len;
+ char filename[1];
+};
+
+
+/**
+ * Counters information
+ * Prefixed with XENCOV_TAG_COUNTER(n) where n is 0..(XENCOV_COUNTERS-1)
+ * Aligned to 8 bytes
+ */
+struct xencov_counter
+{
+ uint32_t tag; /* XENCOV_TAG_COUNTER(n) */
+ uint32_t num;
+ uint64_t values[1];
+};
+
+/**
+ * Information for each function
+ * Number of counter is equal to the number of counter structures got before
+ */
+struct xencov_function
+{
+ uint32_t ident;
+ uint32_t checksum;
+ uint32_t num_counters[1];
+};
+
+/**
+ * Information for all functions
+ * Aligned to 8 bytes
+ */
+struct xencov_functions
+{
+ uint32_t tag; /* XENCOV_TAG_FUNC */
+ uint32_t num;
+ struct xencov_function xencov_function[1];
+};
+
+/**
+ * Terminator
+ */
+struct xencov_end
+{
+ uint32_t tag; /* XENCOV_TAG_END */
+};
+
+#endif /* __XEN_PUBLIC_GCOV_H__ */
+
Property changes on: trunk/sys/xen/interface/gcov.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/xen/interface/grant_table.h
===================================================================
--- trunk/sys/xen/interface/grant_table.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/grant_table.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -135,8 +135,10 @@
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
- * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
- * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
+ * GTF_permit_access: GFN that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST]
+ * GTF_transfer_completed: MFN whose ownership transferred by @domid
+ * (non-translated guests only). [XEN]
*/
uint32_t frame;
};
@@ -310,6 +312,7 @@
#define GNTTABOP_get_status_frames 9
#define GNTTABOP_get_version 10
#define GNTTABOP_swap_grant_ref 11
+#define GNTTABOP_cache_flush 12
#endif /* __XEN_INTERFACE_VERSION__ */
/* ` } */
@@ -321,7 +324,7 @@
/*
* GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
* by devices and/or host CPUs. If successful, <handle> is a tracking number
- * that must be presented later to destroy the mapping(s). On error, <handle>
+ * that must be presented later to destroy the mapping(s). On error, <status>
* is a negative status code.
* NOTES:
* 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
@@ -386,7 +389,11 @@
uint32_t nr_frames;
/* OUT parameters. */
int16_t status; /* => enum grant_status */
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
XEN_GUEST_HANDLE(ulong) frame_list;
+#else
+ XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
+#endif
};
typedef struct gnttab_setup_table gnttab_setup_table_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
@@ -446,12 +453,10 @@
#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
#define _GNTCOPY_dest_gref (1)
#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
-#define _GNTCOPY_can_fail (2)
-#define GNTCOPY_can_fail (1<<_GNTCOPY_can_fail)
struct gnttab_copy {
/* IN parameters. */
- struct {
+ struct gnttab_copy_ptr {
union {
grant_ref_t ref;
xen_pfn_t gmfn;
@@ -573,6 +578,25 @@
typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t);
+/*
+ * Issue one or more cache maintenance operations on a portion of a
+ * page granted to the calling domain by a foreign domain.
+ */
+struct gnttab_cache_flush {
+ union {
+ uint64_t dev_bus_addr;
+ grant_ref_t ref;
+ } a;
+ uint16_t offset; /* offset from start of grant */
+ uint16_t length; /* size within the grant */
+#define GNTTAB_CACHE_CLEAN (1<<0)
+#define GNTTAB_CACHE_INVAL (1<<1)
+#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
+ uint32_t op;
+};
+typedef struct gnttab_cache_flush gnttab_cache_flush_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_cache_flush_t);
+
#endif /* __XEN_INTERFACE_VERSION__ */
/*
@@ -653,7 +677,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/kexec.h
===================================================================
--- trunk/sys/xen/interface/kexec.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/kexec.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -98,9 +98,6 @@
#if defined(__i386__) || defined(__x86_64__)
unsigned long page_list[KEXEC_XEN_NO_PAGES];
#endif
-#if defined(__ia64__)
- unsigned long reboot_code_buffer;
-#endif
unsigned long indirection_page;
unsigned long start_address;
} xen_kexec_image_t;
@@ -109,6 +106,20 @@
* Perform kexec having previously loaded a kexec or kdump kernel
* as appropriate.
* type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
+ *
+ * Control is transferred to the image entry point with the host in
+ * the following state.
+ *
+ * - The image may be executed on any PCPU and all other PCPUs are
+ * stopped.
+ *
+ * - Local interrupts are disabled.
+ *
+ * - Register values are undefined.
+ *
+ * - The image segments have writeable 1:1 virtual to machine
+ * mappings. The location of any page tables is undefined and these
+ * page table frames are not be mapped.
*/
#define KEXEC_CMD_kexec 0
typedef struct xen_kexec_exec {
@@ -120,12 +131,12 @@
* type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
* image == relocation information for kexec (ignored for unload) [in]
*/
-#define KEXEC_CMD_kexec_load 1
-#define KEXEC_CMD_kexec_unload 2
-typedef struct xen_kexec_load {
+#define KEXEC_CMD_kexec_load_v1 1 /* obsolete since 0x00040400 */
+#define KEXEC_CMD_kexec_unload_v1 2 /* obsolete since 0x00040400 */
+typedef struct xen_kexec_load_v1 {
int type;
xen_kexec_image_t image;
-} xen_kexec_load_t;
+} xen_kexec_load_v1_t;
#define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */
#define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */
@@ -135,7 +146,7 @@
* to Xen it exists in a separate EFI
* region on ia64, and thus needs to be
* inserted into iomem_machine separately */
-#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of
+#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* Obsolete: machine address and size of
* the ia64_boot_param */
#define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of
* of the EFI Memory Map */
@@ -156,12 +167,82 @@
unsigned long start;
} xen_kexec_range_t;
+#if __XEN_INTERFACE_VERSION__ >= 0x00040400
+/*
+ * A contiguous chunk of a kexec image and it's destination machine
+ * address.
+ */
+typedef struct xen_kexec_segment {
+ union {
+ XEN_GUEST_HANDLE(const_void) h;
+ uint64_t _pad;
+ } buf;
+ uint64_t buf_size;
+ uint64_t dest_maddr;
+ uint64_t dest_size;
+} xen_kexec_segment_t;
+DEFINE_XEN_GUEST_HANDLE(xen_kexec_segment_t);
+
+/*
+ * Load a kexec image into memory.
+ *
+ * For KEXEC_TYPE_DEFAULT images, the segments may be anywhere in RAM.
+ * The image is relocated prior to being executed.
+ *
+ * For KEXEC_TYPE_CRASH images, each segment of the image must reside
+ * in the memory region reserved for kexec (KEXEC_RANGE_MA_CRASH) and
+ * the entry point must be within the image. The caller is responsible
+ * for ensuring that multiple images do not overlap.
+ *
+ * All image segments will be loaded to their destination machine
+ * addresses prior to being executed. The trailing portion of any
+ * segments with a source buffer (from dest_maddr + buf_size to
+ * dest_maddr + dest_size) will be zeroed.
+ *
+ * Segments with no source buffer will be accessible to the image when
+ * it is executed.
+ */
+
+#define KEXEC_CMD_kexec_load 4
+typedef struct xen_kexec_load {
+ uint8_t type; /* One of KEXEC_TYPE_* */
+ uint8_t _pad;
+ uint16_t arch; /* ELF machine type (EM_*). */
+ uint32_t nr_segments;
+ union {
+ XEN_GUEST_HANDLE(xen_kexec_segment_t) h;
+ uint64_t _pad;
+ } segments;
+ uint64_t entry_maddr; /* image entry point machine address. */
+} xen_kexec_load_t;
+DEFINE_XEN_GUEST_HANDLE(xen_kexec_load_t);
+
+/*
+ * Unload a kexec image.
+ *
+ * Type must be one of KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
+ */
+#define KEXEC_CMD_kexec_unload 5
+typedef struct xen_kexec_unload {
+ uint8_t type;
+} xen_kexec_unload_t;
+DEFINE_XEN_GUEST_HANDLE(xen_kexec_unload_t);
+
+#else /* __XEN_INTERFACE_VERSION__ < 0x00040400 */
+
+#define KEXEC_CMD_kexec_load KEXEC_CMD_kexec_load_v1
+#define KEXEC_CMD_kexec_unload KEXEC_CMD_kexec_unload_v1
+#define xen_kexec_load xen_kexec_load_v1
+#define xen_kexec_load_t xen_kexec_load_v1_t
+
+#endif
+
#endif /* _XEN_PUBLIC_KEXEC_H */
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/memory.h
===================================================================
--- trunk/sys/xen/interface/memory.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/memory.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -29,6 +29,7 @@
#define __XEN_PUBLIC_MEMORY_H__
#include "xen.h"
+#include "physdev.h"
/*
* Increase or decrease the specified domain's memory reservation. Returns the
@@ -56,6 +57,8 @@
/* Flag to request allocation only from the node specified */
#define XENMEMF_exact_node_request (1<<17)
#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
+/* Flag to indicate the node specified is virtual node */
+#define XENMEMF_vnode (1<<18)
#endif
struct xen_memory_reservation {
@@ -69,6 +72,8 @@
* IN: GPFN bases of extents to populate with memory
* OUT: GMFN bases of extents that were allocated
* (NB. This command also updates the mach_to_phys translation table)
+ * XENMEM_claim_pages:
+ * IN: must be zero
*/
XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
@@ -186,6 +191,15 @@
DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
/*
+ * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
+ *
+ * For a non compat caller, this functions similarly to
+ * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
+ * m2p table.
+ */
+#define XENMEM_machphys_compat_mfn_list 25
+
+/*
* Returns the location in virtual address space of the machine_to_phys
* mapping table. Architectures which do not have a m2p table, or which do not
* map it by default into guest address space, do not implement this command.
@@ -199,6 +213,16 @@
typedef struct xen_machphys_mapping xen_machphys_mapping_t;
DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
+/* Source mapping space. */
+/* ` enum phys_map_space { */
+#define XENMAPSPACE_shared_info 0 /* shared info page */
+#define XENMAPSPACE_grant_table 1 /* grant table page */
+#define XENMAPSPACE_gmfn 2 /* GMFN */
+#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
+#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
+ * XENMEM_add_to_physmap_batch only. */
+/* ` } */
+
/*
* Sets the GPFN at which a particular page appears in the specified guest's
* pseudophysical address space.
@@ -212,24 +236,52 @@
/* Number of pages to go through for gmfn_range */
uint16_t size;
- /* Source mapping space. */
-#define XENMAPSPACE_shared_info 0 /* shared info page */
-#define XENMAPSPACE_grant_table 1 /* grant table page */
-#define XENMAPSPACE_gmfn 2 /* GMFN */
-#define XENMAPSPACE_gmfn_range 3 /* GMFN range */
- unsigned int space;
+ unsigned int space; /* => enum phys_map_space */
#define XENMAPIDX_grant_table_status 0x80000000
- /* Index into source mapping space. */
+ /* Index into space being mapped. */
xen_ulong_t idx;
- /* GPFN where the source mapping page should appear. */
+ /* GPFN in domid where the source mapping page should appear. */
xen_pfn_t gpfn;
};
typedef struct xen_add_to_physmap xen_add_to_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
+/* A batched version of add_to_physmap. */
+#define XENMEM_add_to_physmap_batch 23
+struct xen_add_to_physmap_batch {
+ /* IN */
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+ uint16_t space; /* => enum phys_map_space */
+
+ /* Number of pages to go through */
+ uint16_t size;
+ domid_t foreign_domid; /* IFF gmfn_foreign */
+
+ /* Indexes into space being mapped. */
+ XEN_GUEST_HANDLE(xen_ulong_t) idxs;
+
+ /* GPFN in domid where the source mapping page should appear. */
+ XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
+
+ /* OUT */
+
+ /* Per index error code. */
+ XEN_GUEST_HANDLE(int) errs;
+};
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
+#define xen_add_to_physmap_range xen_add_to_physmap_batch
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+#endif
+
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
@@ -324,13 +376,9 @@
#define XENMEM_paging_op_evict 1
#define XENMEM_paging_op_prep 2
-#define XENMEM_access_op 21
-#define XENMEM_access_op_resume 0
-
-struct xen_mem_event_op {
- uint8_t op; /* XENMEM_*_op_* */
+struct xen_mem_paging_op {
+ uint8_t op; /* XENMEM_paging_op_* */
domid_t domain;
-
/* PAGING_PREP IN: buffer to immediately fill page in */
uint64_aligned_t buffer;
@@ -337,19 +385,69 @@
/* Other OPs */
uint64_aligned_t gfn; /* IN: gfn of page being operated on */
};
-typedef struct xen_mem_event_op xen_mem_event_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
+typedef struct xen_mem_paging_op xen_mem_paging_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
+#define XENMEM_access_op 21
+#define XENMEM_access_op_set_access 0
+#define XENMEM_access_op_get_access 1
+#define XENMEM_access_op_enable_emulate 2
+#define XENMEM_access_op_disable_emulate 3
+
+typedef enum {
+ XENMEM_access_n,
+ XENMEM_access_r,
+ XENMEM_access_w,
+ XENMEM_access_rw,
+ XENMEM_access_x,
+ XENMEM_access_rx,
+ XENMEM_access_wx,
+ XENMEM_access_rwx,
+ /*
+ * Page starts off as r-x, but automatically
+ * change to r-w on a write
+ */
+ XENMEM_access_rx2rw,
+ /*
+ * Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu
+ */
+ XENMEM_access_n2rwx,
+ /* Take the domain default */
+ XENMEM_access_default
+} xenmem_access_t;
+
+struct xen_mem_access_op {
+ /* XENMEM_access_op_* */
+ uint8_t op;
+ /* xenmem_access_t */
+ uint8_t access;
+ domid_t domid;
+ /*
+ * Number of pages for set op
+ * Ignored on setting default access and other ops
+ */
+ uint32_t nr;
+ /*
+ * First pfn for set op
+ * pfn for get op
+ * ~0ull is used to set and get the default access for pages
+ */
+ uint64_aligned_t pfn;
+};
+typedef struct xen_mem_access_op xen_mem_access_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
+
#define XENMEM_sharing_op 22
#define XENMEM_sharing_op_nominate_gfn 0
#define XENMEM_sharing_op_nominate_gref 1
#define XENMEM_sharing_op_share 2
-#define XENMEM_sharing_op_resume 3
-#define XENMEM_sharing_op_debug_gfn 4
-#define XENMEM_sharing_op_debug_mfn 5
-#define XENMEM_sharing_op_debug_gref 6
-#define XENMEM_sharing_op_add_physmap 7
-#define XENMEM_sharing_op_audit 8
+#define XENMEM_sharing_op_debug_gfn 3
+#define XENMEM_sharing_op_debug_mfn 4
+#define XENMEM_sharing_op_debug_gref 5
+#define XENMEM_sharing_op_add_physmap 6
+#define XENMEM_sharing_op_audit 7
#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
@@ -398,14 +496,127 @@
typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
+/*
+ * Attempt to stake a claim for a domain on a quantity of pages
+ * of system RAM, but _not_ assign specific pageframes. Only
+ * arithmetic is performed so the hypercall is very fast and need
+ * not be preemptible, thus sidestepping time-of-check-time-of-use
+ * races for memory allocation. Returns 0 if the hypervisor page
+ * allocator has atomically and successfully claimed the requested
+ * number of pages, else non-zero.
+ *
+ * Any domain may have only one active claim. When sufficient memory
+ * has been allocated to resolve the claim, the claim silently expires.
+ * Claiming zero pages effectively resets any outstanding claim and
+ * is always successful.
+ *
+ * Note that a valid claim may be staked even after memory has been
+ * allocated for a domain. In this case, the claim is not incremental,
+ * i.e. if the domain's tot_pages is 3, and a claim is staked for 10,
+ * only 7 additional pages are claimed.
+ *
+ * Caller must be privileged or the hypercall fails.
+ */
+#define XENMEM_claim_pages 24
+
+/*
+ * XENMEM_claim_pages flags - the are no flags at this time.
+ * The zero value is appropiate.
+ */
+
+/*
+ * With some legacy devices, certain guest-physical addresses cannot safely
+ * be used for other purposes, e.g. to map guest RAM. This hypercall
+ * enumerates those regions so the toolstack can avoid using them.
+ */
+#define XENMEM_reserved_device_memory_map 27
+struct xen_reserved_device_memory {
+ xen_pfn_t start_pfn;
+ xen_ulong_t nr_pages;
+};
+typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
+
+struct xen_reserved_device_memory_map {
+#define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
+ /* IN */
+ uint32_t flags;
+ /*
+ * IN/OUT
+ *
+ * Gets set to the required number of entries when too low,
+ * signaled by error code -ERANGE.
+ */
+ unsigned int nr_entries;
+ /* OUT */
+ XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
+ /* IN */
+ union {
+ struct physdev_pci_device pci;
+ } dev;
+};
+typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+/*
+ * XENMEM_get_vnumainfo used by guest to get
+ * vNUMA topology from hypervisor.
+ */
+#define XENMEM_get_vnumainfo 26
+
+/* vNUMA node memory ranges */
+struct xen_vmemrange {
+ uint64_t start, end;
+ unsigned int flags;
+ unsigned int nid;
+};
+typedef struct xen_vmemrange xen_vmemrange_t;
+DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
+
+/*
+ * vNUMA topology specifies vNUMA node number, distance table,
+ * memory ranges and vcpu mapping provided for guests.
+ * XENMEM_get_vnumainfo hypercall expects to see from guest
+ * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
+ * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
+ * copied back to guest. Domain returns expected values of nr_vnodes,
+ * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
+ */
+struct xen_vnuma_topology_info {
+ /* IN */
+ domid_t domid;
+ uint16_t pad;
+ /* IN/OUT */
+ unsigned int nr_vnodes;
+ unsigned int nr_vcpus;
+ unsigned int nr_vmemranges;
+ /* OUT */
+ union {
+ XEN_GUEST_HANDLE(uint) h;
+ uint64_t pad;
+ } vdistance;
+ union {
+ XEN_GUEST_HANDLE(uint) h;
+ uint64_t pad;
+ } vcpu_to_vnode;
+ union {
+ XEN_GUEST_HANDLE(xen_vmemrange_t) h;
+ uint64_t pad;
+ } vmemrange;
+};
+typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
+DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
+
+/* Next available subop number is 28 */
+
#endif /* __XEN_PUBLIC_MEMORY_H__ */
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/nmi.h
===================================================================
--- trunk/sys/xen/interface/nmi.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/nmi.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -37,9 +37,14 @@
/* I/O-check error reported via ISA port 0x61, bit 6. */
#define _XEN_NMIREASON_io_error 0
#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
+ /* PCI SERR reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_pci_serr 1
+#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr)
+#if __XEN_INTERFACE_VERSION__ < 0x00040300 /* legacy alias of the above */
/* Parity error reported via ISA port 0x61, bit 7. */
#define _XEN_NMIREASON_parity_error 1
#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error)
+#endif
/* Unknown hardware-generated NMI. */
#define _XEN_NMIREASON_unknown 2
#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
@@ -73,7 +78,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/physdev.h
===================================================================
--- trunk/sys/xen/interface/physdev.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/physdev.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -17,6 +17,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Keir Fraser
*/
#ifndef __XEN_PUBLIC_PHYSDEV_H__
@@ -152,6 +154,7 @@
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
#define MAP_PIRQ_TYPE_MSI_SEG 0x3
+#define MAP_PIRQ_TYPE_MULTI_MSI 0x4
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
@@ -158,15 +161,15 @@
domid_t domid;
/* IN */
int type;
- /* IN */
+ /* IN (ignored for ..._MULTI_MSI) */
int index;
/* IN or OUT */
int pirq;
- /* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
+ /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */
int bus;
/* IN */
int devfn;
- /* IN */
+ /* IN (also OUT for ..._MULTI_MSI) */
int entry_nr;
/* IN */
uint64_t table_base;
@@ -293,6 +296,11 @@
uint8_t bus;
uint8_t devfn;
} physfn;
+ /*
+ * Optional parameters array.
+ * First element ([0]) is PXM domain associated with the device (if
+ * XEN_PCI_DEV_PXM is set)
+ */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
uint32_t optarr[];
#elif defined(__GNUC__)
@@ -304,6 +312,12 @@
#define PHYSDEVOP_pci_device_remove 26
#define PHYSDEVOP_restore_msi_ext 27
+/*
+ * Dom0 should use these two to announce MMIO resources assigned to
+ * MSI-X capable devices won't (prepare) or may (release) change.
+ */
+#define PHYSDEVOP_prepare_msix 30
+#define PHYSDEVOP_release_msix 31
struct physdev_pci_device {
/* IN */
uint16_t seg;
@@ -313,6 +327,24 @@
typedef struct physdev_pci_device physdev_pci_device_t;
DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
+#define PHYSDEVOP_DBGP_RESET_PREPARE 1
+#define PHYSDEVOP_DBGP_RESET_DONE 2
+
+#define PHYSDEVOP_DBGP_BUS_UNKNOWN 0
+#define PHYSDEVOP_DBGP_BUS_PCI 1
+
+#define PHYSDEVOP_dbgp_op 29
+struct physdev_dbgp_op {
+ /* IN */
+ uint8_t op;
+ uint8_t bus;
+ union {
+ struct physdev_pci_device pci;
+ } u;
+};
+typedef struct physdev_dbgp_op physdev_dbgp_op_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_t);
+
/*
* Notify that some PIRQ-bound event channels have been unmasked.
* ** This command is obsolete since interface version 0x00030202 and is **
@@ -320,9 +352,11 @@
*/
#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
/*
* These all-capitals physdev operation names are superceded by the new names
- * (defined above) since interface version 0x00030202.
+ * (defined above) since interface version 0x00030202. The guard above was
+ * added post-4.5 only though and hence shouldn't check for 0x00030202.
*/
#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
@@ -333,6 +367,7 @@
#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
+#endif
#if __XEN_INTERFACE_VERSION__ < 0x00040200
#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1
@@ -345,7 +380,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/platform.h
===================================================================
--- trunk/sys/xen/interface/platform.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/platform.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -36,13 +36,28 @@
* Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
* 1 January, 1970 if the current system time was <system_time>.
*/
-#define XENPF_settime 17
-struct xenpf_settime {
+#define XENPF_settime32 17
+struct xenpf_settime32 {
/* IN variables. */
uint32_t secs;
uint32_t nsecs;
uint64_t system_time;
};
+#define XENPF_settime64 62
+struct xenpf_settime64 {
+ /* IN variables. */
+ uint64_t secs;
+ uint32_t nsecs;
+ uint32_t mbz;
+ uint64_t system_time;
+};
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
+#define XENPF_settime XENPF_settime32
+#define xenpf_settime xenpf_settime32
+#else
+#define XENPF_settime XENPF_settime64
+#define xenpf_settime xenpf_settime64
+#endif
typedef struct xenpf_settime xenpf_settime_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
@@ -127,6 +142,26 @@
#define XEN_EFI_query_variable_info 9
#define XEN_EFI_query_capsule_capabilities 10
#define XEN_EFI_update_capsule 11
+
+struct xenpf_efi_time {
+ uint16_t year;
+ uint8_t month;
+ uint8_t day;
+ uint8_t hour;
+ uint8_t min;
+ uint8_t sec;
+ uint32_t ns;
+ int16_t tz;
+ uint8_t daylight;
+};
+
+struct xenpf_efi_guid {
+ uint32_t data1;
+ uint16_t data2;
+ uint16_t data3;
+ uint8_t data4[8];
+};
+
struct xenpf_efi_runtime_call {
uint32_t function;
/*
@@ -135,21 +170,11 @@
* where it holds the single returned value.
*/
uint32_t misc;
- unsigned long status;
+ xen_ulong_t status;
union {
#define XEN_EFI_GET_TIME_SET_CLEARS_NS 0x00000001
struct {
- struct xenpf_efi_time {
- uint16_t year;
- uint8_t month;
- uint8_t day;
- uint8_t hour;
- uint8_t min;
- uint8_t sec;
- uint32_t ns;
- int16_t tz;
- uint8_t daylight;
- } time;
+ struct xenpf_efi_time time;
uint32_t resolution;
uint32_t accuracy;
} get_time;
@@ -169,22 +194,18 @@
#define XEN_EFI_VARIABLE_RUNTIME_ACCESS 0x00000004
struct {
XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */
- unsigned long size;
+ xen_ulong_t size;
XEN_GUEST_HANDLE(void) data;
- struct xenpf_efi_guid {
- uint32_t data1;
- uint16_t data2;
- uint16_t data3;
- uint8_t data4[8];
- } vendor_guid;
+ struct xenpf_efi_guid vendor_guid;
} get_variable, set_variable;
struct {
- unsigned long size;
+ xen_ulong_t size;
XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */
struct xenpf_efi_guid vendor_guid;
} get_next_variable_name;
+#define XEN_EFI_VARINFO_BOOT_SNAPSHOT 0x00000001
struct {
uint32_t attr;
uint64_t max_store_size;
@@ -194,14 +215,14 @@
struct {
XEN_GUEST_HANDLE(void) capsule_header_array;
- unsigned long capsule_count;
+ xen_ulong_t capsule_count;
uint64_t max_capsule_size;
- unsigned int reset_type;
+ uint32_t reset_type;
} query_capsule_capabilities;
struct {
XEN_GUEST_HANDLE(void) capsule_header_array;
- unsigned long capsule_count;
+ xen_ulong_t capsule_count;
uint64_t sg_list; /* machine address */
} update_capsule;
} u;
@@ -219,6 +240,8 @@
#define XEN_FW_EFI_VENDOR 2
#define XEN_FW_EFI_MEM_INFO 3
#define XEN_FW_EFI_RT_VERSION 4
+#define XEN_FW_EFI_PCI_ROM 5
+#define XEN_FW_KBD_SHIFT_FLAGS 5
struct xenpf_firmware_info {
/* IN variables. */
uint32_t type;
@@ -266,7 +289,21 @@
uint64_t attr;
uint32_t type;
} mem;
+ struct {
+ /* IN variables */
+ uint16_t segment;
+ uint8_t bus;
+ uint8_t devfn;
+ uint16_t vendor;
+ uint16_t devid;
+ /* OUT variables */
+ uint64_t address;
+ xen_ulong_t size;
+ } pci_rom;
} efi_info; /* XEN_FW_EFI_INFO */
+
+ /* Int16, Fn02: Get keyboard shift flags. */
+ uint8_t kbd_shift_flags; /* XEN_FW_KBD_SHIFT_FLAGS */
} u;
};
typedef struct xenpf_firmware_info xenpf_firmware_info_t;
@@ -275,10 +312,16 @@
#define XENPF_enter_acpi_sleep 51
struct xenpf_enter_acpi_sleep {
/* IN variables */
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
uint16_t pm1a_cnt_val; /* PM1a control value. */
uint16_t pm1b_cnt_val; /* PM1b control value. */
+#else
+ uint16_t val_a; /* PM1a control / sleep type A. */
+ uint16_t val_b; /* PM1b control / sleep type B. */
+#endif
uint32_t sleep_state; /* Which state to enter (Sn). */
- uint32_t flags; /* Must be zero. */
+#define XENPF_ACPI_SLEEP_EXTENDED 0x00000001
+ uint32_t flags; /* XENPF_ACPI_SLEEP_*. */
};
typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t);
@@ -506,6 +549,67 @@
DEFINE_XEN_GUEST_HANDLE(xenpf_core_parking_t);
/*
+ * Access generic platform resources(e.g., accessing MSR, port I/O, etc)
+ * in unified way. Batch resource operations in one call are supported and
+ * they are always non-preemptible and executed in their original order.
+ * The batch itself returns a negative integer for general errors, or a
+ * non-negative integer for the number of successful operations. For the latter
+ * case, the @ret in the failed entry (if any) indicates the exact error.
+ */
+#define XENPF_resource_op 61
+
+#define XEN_RESOURCE_OP_MSR_READ 0
+#define XEN_RESOURCE_OP_MSR_WRITE 1
+
+/*
+ * Specially handled MSRs:
+ * - MSR_IA32_TSC
+ * READ: Returns the scaled system time(ns) instead of raw timestamp. In
+ * multiple entry case, if other MSR read is followed by a MSR_IA32_TSC
+ * read, then both reads are guaranteed to be performed atomically (with
+ * IRQ disabled). The return time indicates the point of reading that MSR.
+ * WRITE: Not supported.
+ */
+
+struct xenpf_resource_entry {
+ union {
+ uint32_t cmd; /* IN: XEN_RESOURCE_OP_* */
+ int32_t ret; /* OUT: return value for failed entry */
+ } u;
+ uint32_t rsvd; /* IN: padding and must be zero */
+ uint64_t idx; /* IN: resource address to access */
+ uint64_t val; /* IN/OUT: resource value to set/get */
+};
+typedef struct xenpf_resource_entry xenpf_resource_entry_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_resource_entry_t);
+
+struct xenpf_resource_op {
+ uint32_t nr_entries; /* number of resource entry */
+ uint32_t cpu; /* which cpu to run */
+ XEN_GUEST_HANDLE(xenpf_resource_entry_t) entries;
+};
+typedef struct xenpf_resource_op xenpf_resource_op_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_resource_op_t);
+
+#define XENPF_get_symbol 63
+struct xenpf_symdata {
+ /* IN/OUT variables */
+ uint32_t namelen; /* IN: size of name buffer */
+ /* OUT: strlen(name) of hypervisor symbol (may be */
+ /* larger than what's been copied to guest) */
+ uint32_t symnum; /* IN: Symbol to read */
+ /* OUT: Next available symbol. If same as IN then */
+ /* we reached the end */
+
+ /* OUT variables */
+ XEN_GUEST_HANDLE(char) name;
+ uint64_t address;
+ char type;
+};
+typedef struct xenpf_symdata xenpf_symdata_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_symdata_t);
+
+/*
* ` enum neg_errnoval
* ` HYPERVISOR_platform_op(const struct xen_platform_op*);
*/
@@ -514,6 +618,8 @@
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
union {
struct xenpf_settime settime;
+ struct xenpf_settime32 settime32;
+ struct xenpf_settime64 settime64;
struct xenpf_add_memtype add_memtype;
struct xenpf_del_memtype del_memtype;
struct xenpf_read_memtype read_memtype;
@@ -531,6 +637,8 @@
struct xenpf_cpu_hotadd cpu_add;
struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
+ struct xenpf_resource_op resource_op;
+ struct xenpf_symdata symdata;
uint8_t pad[128];
} u;
};
@@ -542,7 +650,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Added: trunk/sys/xen/interface/pmu.h
===================================================================
--- trunk/sys/xen/interface/pmu.h (rev 0)
+++ trunk/sys/xen/interface/pmu.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -0,0 +1,134 @@
+/* $MidnightBSD$ */
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef __XEN_PUBLIC_PMU_H__
+#define __XEN_PUBLIC_PMU_H__
+
+#include "xen.h"
+#if defined(__i386__) || defined(__x86_64__)
+#include "arch-x86/pmu.h"
+#elif defined (__arm__) || defined (__aarch64__)
+#include "arch-arm.h"
+#else
+#error "Unsupported architecture"
+#endif
+
+#define XENPMU_VER_MAJ 0
+#define XENPMU_VER_MIN 1
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_xenpmu_op(enum xenpmu_op cmd, struct xenpmu_params *args);
+ *
+ * @cmd == XENPMU_* (PMU operation)
+ * @args == struct xenpmu_params
+ */
+/* ` enum xenpmu_op { */
+#define XENPMU_mode_get 0 /* Also used for getting PMU version */
+#define XENPMU_mode_set 1
+#define XENPMU_feature_get 2
+#define XENPMU_feature_set 3
+#define XENPMU_init 4
+#define XENPMU_finish 5
+#define XENPMU_lvtpc_set 6
+#define XENPMU_flush 7 /* Write cached MSR values to HW */
+/* ` } */
+
+/* Parameters structure for HYPERVISOR_xenpmu_op call */
+struct xen_pmu_params {
+ /* IN/OUT parameters */
+ struct {
+ uint32_t maj;
+ uint32_t min;
+ } version;
+ uint64_t val;
+
+ /* IN parameters */
+ uint32_t vcpu;
+ uint32_t pad;
+};
+typedef struct xen_pmu_params xen_pmu_params_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t);
+
+/* PMU modes:
+ * - XENPMU_MODE_OFF: No PMU virtualization
+ * - XENPMU_MODE_SELF: Guests can profile themselves
+ * - XENPMU_MODE_HV: Guests can profile themselves, dom0 profiles
+ * itself and Xen
+ * - XENPMU_MODE_ALL: Only dom0 has access to VPMU and it profiles
+ * everyone: itself, the hypervisor and the guests.
+ */
+#define XENPMU_MODE_OFF 0
+#define XENPMU_MODE_SELF (1<<0)
+#define XENPMU_MODE_HV (1<<1)
+#define XENPMU_MODE_ALL (1<<2)
+
+/*
+ * PMU features:
+ * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
+ */
+#define XENPMU_FEATURE_INTEL_BTS 1
+
+/*
+ * Shared PMU data between hypervisor and PV(H) domains.
+ *
+ * The hypervisor fills out this structure during PMU interrupt and sends an
+ * interrupt to appropriate VCPU.
+ * Architecture-independent fields of xen_pmu_data are WO for the hypervisor
+ * and RO for the guest but some fields in xen_pmu_arch can be writable
+ * by both the hypervisor and the guest (see arch-$arch/pmu.h).
+ */
+struct xen_pmu_data {
+ /* Interrupted VCPU */
+ uint32_t vcpu_id;
+
+ /*
+ * Physical processor on which the interrupt occurred. On non-privileged
+ * guests set to vcpu_id;
+ */
+ uint32_t pcpu_id;
+
+ /*
+ * Domain that was interrupted. On non-privileged guests set to DOMID_SELF.
+ * On privileged guests can be DOMID_SELF, DOMID_XEN, or, when in
+ * XENPMU_MODE_ALL mode, domain ID of another domain.
+ */
+ domid_t domain_id;
+
+ uint8_t pad[6];
+
+ /* Architecture-specific information */
+ struct xen_pmu_arch pmu;
+};
+
+#endif /* __XEN_PUBLIC_PMU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
Property changes on: trunk/sys/xen/interface/pmu.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/xen/interface/sched.h
===================================================================
--- trunk/sys/xen/interface/sched.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/sched.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -1,9 +1,9 @@
/* $MidnightBSD$ */
/******************************************************************************
* sched.h
- *
+ *
* Scheduler state interactions
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@@ -31,11 +31,21 @@
#include "event_channel.h"
/*
+ * `incontents 150 sched Guest Scheduler Operations
+ *
+ * The SCHEDOP interface provides mechanisms for a guest to interact
+ * with the scheduler, including yield, blocking and shutting itself
+ * down.
+ */
+
+/*
* The prototype for this hypercall is:
- * long sched_op(int cmd, void *arg)
+ * ` long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...)
+ *
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == Operation-specific extra argument(s), as described below.
- *
+ * ... == Additional Operation-specific extra arguments, described below.
+ *
* Versions of Xen prior to 3.0.2 provided only the following legacy version
* of this hypercall, supporting only the commands yield, block and shutdown:
* long sched_op(int cmd, unsigned long arg)
@@ -42,9 +52,12 @@
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
* == SHUTDOWN_* code (SCHEDOP_shutdown)
- * This legacy version is available to new guests as sched_op_compat().
+ *
+ * This legacy version is available to new guests as:
+ * ` long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg)
*/
+/* ` enum sched_op { // SCHEDOP_* => struct sched_* */
/*
* Voluntarily yield the CPU.
* @arg == NULL.
@@ -62,53 +75,44 @@
/*
* Halt execution of this domain (all VCPUs) and notify the system controller.
- * @arg == pointer to sched_shutdown structure.
+ * @arg == pointer to sched_shutdown_t structure.
+ *
+ * If the sched_shutdown_t reason is SHUTDOWN_suspend then
+ * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN
+ * of the guest's start info page. RDX/EDX is the third hypercall
+ * argument.
+ *
+ * In addition, which reason is SHUTDOWN_suspend this hypercall
+ * returns 1 if suspend was cancelled or the domain was merely
+ * checkpointed, and 0 if it is resuming in a new domain.
*/
#define SCHEDOP_shutdown 2
-struct sched_shutdown {
- unsigned int reason; /* SHUTDOWN_* */
-};
-typedef struct sched_shutdown sched_shutdown_t;
-DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
/*
* Poll a set of event-channel ports. Return when one or more are pending. An
* optional timeout may be specified.
- * @arg == pointer to sched_poll structure.
+ * @arg == pointer to sched_poll_t structure.
*/
#define SCHEDOP_poll 3
-struct sched_poll {
- XEN_GUEST_HANDLE(evtchn_port_t) ports;
- unsigned int nr_ports;
- uint64_t timeout;
-};
-typedef struct sched_poll sched_poll_t;
-DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
/*
* Declare a shutdown for another domain. The main use of this function is
* in interpreting shutdown requests and reasons for fully-virtualized
* domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
- * @arg == pointer to sched_remote_shutdown structure.
+ * @arg == pointer to sched_remote_shutdown_t structure.
*/
#define SCHEDOP_remote_shutdown 4
-struct sched_remote_shutdown {
- domid_t domain_id; /* Remote domain ID */
- unsigned int reason; /* SHUTDOWN_xxx reason */
-};
-typedef struct sched_remote_shutdown sched_remote_shutdown_t;
-DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
/*
* Latch a shutdown code, so that when the domain later shuts down it
* reports this code to the control tools.
- * @arg == as for SCHEDOP_shutdown.
+ * @arg == sched_shutdown_t, as for SCHEDOP_shutdown.
*/
#define SCHEDOP_shutdown_code 5
/*
* Setup, poke and destroy a domain watchdog timer.
- * @arg == pointer to sched_watchdog structure.
+ * @arg == pointer to sched_watchdog_t structure.
* With id == 0, setup a domain watchdog timer to cause domain shutdown
* after timeout, returns watchdog id.
* With id != 0 and timeout == 0, destroy domain watchdog timer.
@@ -115,6 +119,29 @@
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
+/* ` } */
+
+struct sched_shutdown {
+ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+};
+typedef struct sched_shutdown sched_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
+
+struct sched_poll {
+ XEN_GUEST_HANDLE(evtchn_port_t) ports;
+ unsigned int nr_ports;
+ uint64_t timeout;
+};
+typedef struct sched_poll sched_poll_t;
+DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
+
+struct sched_remote_shutdown {
+ domid_t domain_id; /* Remote domain ID */
+ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+};
+typedef struct sched_remote_shutdown sched_remote_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
+
struct sched_watchdog {
uint32_t id; /* watchdog ID */
uint32_t timeout; /* timeout */
@@ -127,11 +154,14 @@
* software to determine the appropriate action. For the most part, Xen does
* not care about the shutdown code.
*/
+/* ` enum sched_shutdown_reason { */
#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
+#define SHUTDOWN_MAX 4 /* Maximum valid shutdown reason. */
+/* ` } */
#endif /* __XEN_PUBLIC_SCHED_H__ */
@@ -138,7 +168,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/sysctl.h
===================================================================
--- trunk/sys/xen/interface/sysctl.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/sysctl.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -34,8 +34,10 @@
#include "xen.h"
#include "domctl.h"
+#include "physdev.h"
+#include "tmem.h"
-#define XEN_SYSCTL_INTERFACE_VERSION 0x00000009
+#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000C
/*
* Read console content from Xen buffer ring.
@@ -72,7 +74,7 @@
#define XEN_SYSCTL_TBUFOP_disable 5
uint32_t cmd;
/* IN/OUT variables */
- struct xenctl_cpumap cpu_mask;
+ struct xenctl_bitmap cpu_mask;
uint32_t evt_mask;
/* OUT variables */
uint64_aligned_t buffer_mfn;
@@ -102,6 +104,7 @@
uint64_aligned_t total_pages;
uint64_aligned_t free_pages;
uint64_aligned_t scrub_pages;
+ uint64_aligned_t outstanding_pages;
uint32_t hw_cap[8];
/* XEN_SYSCTL_PHYSCAP_??? */
@@ -226,13 +229,17 @@
uint64_aligned_t idle_time; /* idle time from boot */
XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */
XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
- uint64_aligned_t pc2;
- uint64_aligned_t pc3;
- uint64_aligned_t pc6;
- uint64_aligned_t pc7;
- uint64_aligned_t cc3;
- uint64_aligned_t cc6;
- uint64_aligned_t cc7;
+ uint32_t nr_pc; /* entry nr in pc[] */
+ uint32_t nr_cc; /* entry nr in cc[] */
+ /*
+ * These two arrays may (and generally will) have unused slots; slots not
+ * having a corresponding hardware register will not be written by the
+ * hypervisor. It is therefore up to the caller to put a suitable sentinel
+ * into all slots before invoking the function.
+ * Indexing is 1-biased (PC1/CC1 being at index 0).
+ */
+ XEN_GUEST_HANDLE_64(uint64) pc;
+ XEN_GUEST_HANDLE_64(uint64) cc;
};
struct xen_sysctl_get_pmstat {
@@ -458,61 +465,76 @@
typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
-/* XEN_SYSCTL_topologyinfo */
-#define INVALID_TOPOLOGY_ID (~0U)
-struct xen_sysctl_topologyinfo {
- /*
- * IN: maximum addressable entry in the caller-provided arrays.
- * OUT: largest cpu identifier in the system.
- * If OUT is greater than IN then the arrays are truncated!
- * If OUT is leass than IN then the array tails are not written by sysctl.
- */
- uint32_t max_cpu_index;
+/* XEN_SYSCTL_cputopoinfo */
+#define XEN_INVALID_CORE_ID (~0U)
+#define XEN_INVALID_SOCKET_ID (~0U)
+#define XEN_INVALID_NODE_ID (~0U)
- /*
- * If not NULL, these arrays are filled with core/socket/node identifier
- * for each cpu.
- * If a cpu has no core/socket/node information (e.g., cpu not present)
- * then the sentinel value ~0u is written to each array.
- * The number of array elements written by the sysctl is:
- * min(@max_cpu_index_IN, at max_cpu_index_OUT)+1
- */
- XEN_GUEST_HANDLE_64(uint32) cpu_to_core;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_socket;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
+struct xen_sysctl_cputopo {
+ uint32_t core;
+ uint32_t socket;
+ uint32_t node;
};
-typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
+typedef struct xen_sysctl_cputopo xen_sysctl_cputopo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopo_t);
+/*
+ * IN:
+ * - a NULL 'cputopo' handle is a request for maximun 'num_cpus'.
+ * - otherwise it's the number of entries in 'cputopo'
+ *
+ * OUT:
+ * - If 'num_cpus' is less than the number Xen wants to write but the handle
+ * handle is not a NULL one, partial data gets returned and 'num_cpus' gets
+ * updated to reflect the intended number.
+ * - Otherwise, 'num_cpus' shall indicate the number of entries written, which
+ * may be less than the input value.
+ */
+struct xen_sysctl_cputopoinfo {
+ uint32_t num_cpus;
+ XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
+};
+typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
+
/* XEN_SYSCTL_numainfo */
-#define INVALID_NUMAINFO_ID (~0U)
+#define XEN_INVALID_MEM_SZ (~0U)
+#define XEN_INVALID_NODE_DIST (~0U)
+
+struct xen_sysctl_meminfo {
+ uint64_t memsize;
+ uint64_t memfree;
+};
+typedef struct xen_sysctl_meminfo xen_sysctl_meminfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_meminfo_t);
+
+/*
+ * IN:
+ * - Both 'meminfo' and 'distance' handles being null is a request
+ * for maximum value of 'num_nodes'.
+ * - Otherwise it's the number of entries in 'meminfo' and square root
+ * of number of entries in 'distance' (when corresponding handle is
+ * non-null)
+ *
+ * OUT:
+ * - If 'num_nodes' is less than the number Xen wants to write but either
+ * handle is not a NULL one, partial data gets returned and 'num_nodes'
+ * gets updated to reflect the intended number.
+ * - Otherwise, 'num_nodes' shall indicate the number of entries written, which
+ * may be less than the input value.
+ */
+
struct xen_sysctl_numainfo {
- /*
- * IN: maximum addressable entry in the caller-provided arrays.
- * OUT: largest node identifier in the system.
- * If OUT is greater than IN then the arrays are truncated!
- */
- uint32_t max_node_index;
+ uint32_t num_nodes;
- /* NB. Entries are 0 if node is not present. */
- XEN_GUEST_HANDLE_64(uint64) node_to_memsize;
- XEN_GUEST_HANDLE_64(uint64) node_to_memfree;
+ XEN_GUEST_HANDLE_64(xen_sysctl_meminfo_t) meminfo;
/*
- * Array, of size (max_node_index+1)^2, listing memory access distances
- * between nodes. If an entry has no node distance information (e.g., node
- * not present) then the value ~0u is written.
- *
- * Note that the array rows must be indexed by multiplying by the minimum
- * of the caller-provided max_node_index and the returned value of
- * max_node_index. That is, if the largest node index in the system is
- * smaller than the caller can handle, a smaller 2-d array is constructed
- * within the space provided by the caller. When this occurs, trailing
- * space provided by the caller is not modified. If the largest node index
- * in the system is larger than the caller can handle, then a 2-d array of
- * the maximum size handleable by the caller is constructed.
+ * Distance between nodes 'i' and 'j' is stored in index 'i*N + j',
+ * where N is the number of nodes that will be returned in 'num_nodes'
+ * (i.e. not 'num_nodes' provided by the caller)
*/
- XEN_GUEST_HANDLE_64(uint32) node_to_node_distance;
+ XEN_GUEST_HANDLE_64(uint32) distance;
};
typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
@@ -533,7 +555,7 @@
uint32_t domid; /* IN: M */
uint32_t cpu; /* IN: AR */
uint32_t n_dom; /* OUT: I */
- struct xenctl_cpumap cpumap; /* OUT: IF */
+ struct xenctl_bitmap cpumap; /* OUT: IF */
};
typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
@@ -597,6 +619,152 @@
typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
+/* XEN_SYSCTL_coverage_op */
+/*
+ * Get total size of information, to help allocate
+ * the buffer. The pointer points to a 32 bit value.
+ */
+#define XEN_SYSCTL_COVERAGE_get_total_size 0
+
+/*
+ * Read coverage information in a single run
+ * You must use a tool to split them.
+ */
+#define XEN_SYSCTL_COVERAGE_read 1
+
+/*
+ * Reset all the coverage counters to 0
+ * No parameters.
+ */
+#define XEN_SYSCTL_COVERAGE_reset 2
+
+/*
+ * Like XEN_SYSCTL_COVERAGE_read but reset also
+ * counters to 0 in a single call.
+ */
+#define XEN_SYSCTL_COVERAGE_read_and_reset 3
+
+struct xen_sysctl_coverage_op {
+ uint32_t cmd; /* XEN_SYSCTL_COVERAGE_* */
+ union {
+ uint32_t total_size; /* OUT */
+ XEN_GUEST_HANDLE_64(uint8) raw_info; /* OUT */
+ } u;
+};
+typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
+
+#define XEN_SYSCTL_PSR_CMT_get_total_rmid 0
+#define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor 1
+/* The L3 cache size is returned in KB unit */
+#define XEN_SYSCTL_PSR_CMT_get_l3_cache_size 2
+#define XEN_SYSCTL_PSR_CMT_enabled 3
+#define XEN_SYSCTL_PSR_CMT_get_l3_event_mask 4
+struct xen_sysctl_psr_cmt_op {
+ uint32_t cmd; /* IN: XEN_SYSCTL_PSR_CMT_* */
+ uint32_t flags; /* padding variable, may be extended for future use */
+ union {
+ uint64_t data; /* OUT */
+ struct {
+ uint32_t cpu; /* IN */
+ uint32_t rsvd;
+ } l3_cache;
+ } u;
+};
+typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
+
+/* XEN_SYSCTL_pcitopoinfo */
+#define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
+struct xen_sysctl_pcitopoinfo {
+ /*
+ * IN: Number of elements in 'pcitopo' and 'nodes' arrays.
+ * OUT: Number of processed elements of those arrays.
+ */
+ uint32_t num_devs;
+
+ /* IN: list of devices for which node IDs are requested. */
+ XEN_GUEST_HANDLE_64(physdev_pci_device_t) devs;
+
+ /*
+ * OUT: node identifier for each device.
+ * If information for a particular device is not available then
+ * corresponding entry will be set to XEN_INVALID_NODE_ID. If
+ * device is not known to the hypervisor then XEN_INVALID_DEV
+ * will be provided.
+ */
+ XEN_GUEST_HANDLE_64(uint32) nodes;
+};
+typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
+
+#define XEN_SYSCTL_PSR_CAT_get_l3_info 0
+struct xen_sysctl_psr_cat_op {
+ uint32_t cmd; /* IN: XEN_SYSCTL_PSR_CAT_* */
+ uint32_t target; /* IN */
+ union {
+ struct {
+ uint32_t cbm_len; /* OUT: CBM length */
+ uint32_t cos_max; /* OUT: Maximum COS */
+ } l3_info;
+ } u;
+};
+typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
+
+#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
+
+#define XEN_SYSCTL_TMEM_OP_THAW 0
+#define XEN_SYSCTL_TMEM_OP_FREEZE 1
+#define XEN_SYSCTL_TMEM_OP_FLUSH 2
+#define XEN_SYSCTL_TMEM_OP_DESTROY 3
+#define XEN_SYSCTL_TMEM_OP_LIST 4
+#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT 5
+#define XEN_SYSCTL_TMEM_OP_SET_CAP 6
+#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS 7
+#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8
+#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION 11
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS 12
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP 14
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS 15
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS 16
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES 17
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID 18
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE 19
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV 20
+#define XEN_SYSCTL_TMEM_OP_SAVE_END 21
+#define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN 30
+#define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE 32
+#define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE 33
+
+/*
+ * XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_[PAGE|INV] override the 'buf' in
+ * xen_sysctl_tmem_op with this structure - sometimes with an extra
+ * page tackled on.
+ */
+struct tmem_handle {
+ uint32_t pool_id;
+ uint32_t index;
+ xen_tmem_oid_t oid;
+};
+
+struct xen_sysctl_tmem_op {
+ uint32_t cmd; /* IN: XEN_SYSCTL_TMEM_OP_* . */
+ int32_t pool_id; /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
+ uint32_t cli_id; /* IN: client id, 0 for XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
+ for all others can be the domain id or
+ XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
+ uint32_t arg1; /* IN: If not applicable to command use 0. */
+ uint32_t arg2; /* IN: If not applicable to command use 0. */
+ uint32_t pad; /* Padding so structure is the same under 32 and 64. */
+ xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
+ XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. */
+};
+typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
+
struct xen_sysctl {
uint32_t cmd;
#define XEN_SYSCTL_readconsole 1
@@ -613,16 +781,22 @@
#define XEN_SYSCTL_pm_op 12
#define XEN_SYSCTL_page_offline_op 14
#define XEN_SYSCTL_lockprof_op 15
-#define XEN_SYSCTL_topologyinfo 16
+#define XEN_SYSCTL_cputopoinfo 16
#define XEN_SYSCTL_numainfo 17
#define XEN_SYSCTL_cpupool_op 18
#define XEN_SYSCTL_scheduler_op 19
+#define XEN_SYSCTL_coverage_op 20
+#define XEN_SYSCTL_psr_cmt_op 21
+#define XEN_SYSCTL_pcitopoinfo 22
+#define XEN_SYSCTL_psr_cat_op 23
+#define XEN_SYSCTL_tmem_op 24
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
struct xen_sysctl_tbuf_op tbuf_op;
struct xen_sysctl_physinfo physinfo;
- struct xen_sysctl_topologyinfo topologyinfo;
+ struct xen_sysctl_cputopoinfo cputopoinfo;
+ struct xen_sysctl_pcitopoinfo pcitopoinfo;
struct xen_sysctl_numainfo numainfo;
struct xen_sysctl_sched_id sched_id;
struct xen_sysctl_perfc_op perfc_op;
@@ -637,6 +811,10 @@
struct xen_sysctl_lockprof_op lockprof_op;
struct xen_sysctl_cpupool_op cpupool_op;
struct xen_sysctl_scheduler_op scheduler_op;
+ struct xen_sysctl_coverage_op coverage_op;
+ struct xen_sysctl_psr_cmt_op psr_cmt_op;
+ struct xen_sysctl_psr_cat_op psr_cat_op;
+ struct xen_sysctl_tmem_op tmem_op;
uint8_t pad[128];
} u;
};
@@ -648,7 +826,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/tmem.h
===================================================================
--- trunk/sys/xen/interface/tmem.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/tmem.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -34,48 +34,28 @@
#define TMEM_SPEC_VERSION 1
/* Commands to HYPERVISOR_tmem_op() */
-#define TMEM_CONTROL 0
+#ifdef __XEN__
+#define TMEM_CONTROL 0 /* Now called XEN_SYSCTL_tmem_op */
+#else
+#undef TMEM_CONTROL
+#endif
#define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2
-#define TMEM_NEW_PAGE 3
#define TMEM_PUT_PAGE 4
#define TMEM_GET_PAGE 5
#define TMEM_FLUSH_PAGE 6
#define TMEM_FLUSH_OBJECT 7
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+#define TMEM_NEW_PAGE 3
#define TMEM_READ 8
#define TMEM_WRITE 9
#define TMEM_XCHG 10
+#endif
/* Privileged commands to HYPERVISOR_tmem_op() */
-#define TMEM_AUTH 101
+#define TMEM_AUTH 101
#define TMEM_RESTORE_NEW 102
-/* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */
-#define TMEMC_THAW 0
-#define TMEMC_FREEZE 1
-#define TMEMC_FLUSH 2
-#define TMEMC_DESTROY 3
-#define TMEMC_LIST 4
-#define TMEMC_SET_WEIGHT 5
-#define TMEMC_SET_CAP 6
-#define TMEMC_SET_COMPRESS 7
-#define TMEMC_QUERY_FREEABLE_MB 8
-#define TMEMC_SAVE_BEGIN 10
-#define TMEMC_SAVE_GET_VERSION 11
-#define TMEMC_SAVE_GET_MAXPOOLS 12
-#define TMEMC_SAVE_GET_CLIENT_WEIGHT 13
-#define TMEMC_SAVE_GET_CLIENT_CAP 14
-#define TMEMC_SAVE_GET_CLIENT_FLAGS 15
-#define TMEMC_SAVE_GET_POOL_FLAGS 16
-#define TMEMC_SAVE_GET_POOL_NPAGES 17
-#define TMEMC_SAVE_GET_POOL_UUID 18
-#define TMEMC_SAVE_GET_NEXT_PAGE 19
-#define TMEMC_SAVE_GET_NEXT_INV 20
-#define TMEMC_SAVE_END 21
-#define TMEMC_RESTORE_BEGIN 30
-#define TMEMC_RESTORE_PUT_PAGE 32
-#define TMEMC_RESTORE_FLUSH_PAGE 33
-
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
@@ -94,9 +74,16 @@
#define EFROZEN 1000
#define EEMPTY 1001
+struct xen_tmem_oid {
+ uint64_t oid[3];
+};
+typedef struct xen_tmem_oid xen_tmem_oid_t;
+DEFINE_XEN_GUEST_HANDLE(xen_tmem_oid_t);
#ifndef __ASSEMBLY__
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
typedef xen_pfn_t tmem_cli_mfn_t;
+#endif
typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t;
struct tmem_op {
uint32_t cmd;
@@ -107,33 +94,22 @@
uint32_t flags;
uint32_t arg1;
} creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
- struct {
- uint32_t subop;
- uint32_t cli_id;
- uint32_t arg1;
- uint32_t arg2;
- uint64_t oid[3];
- tmem_cli_va_t buf;
- } ctrl; /* for cmd == TMEM_CONTROL */
struct {
-
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
uint64_t oid[3];
+#else
+ xen_tmem_oid_t oid;
+#endif
uint32_t index;
uint32_t tmem_offset;
uint32_t pfn_offset;
uint32_t len;
- tmem_cli_mfn_t cmfn; /* client machine page frame */
+ xen_pfn_t cmfn; /* client machine page frame */
} gen; /* for all other cmd ("generic") */
} u;
};
typedef struct tmem_op tmem_op_t;
DEFINE_XEN_GUEST_HANDLE(tmem_op_t);
-
-struct tmem_handle {
- uint32_t pool_id;
- uint32_t index;
- uint64_t oid[3];
-};
#endif
#endif /* __XEN_PUBLIC_TMEM_H__ */
@@ -141,7 +117,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/trace.h
===================================================================
--- trunk/sys/xen/interface/trace.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/trace.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -51,13 +51,41 @@
#define TRC_SUBCLS_SHIFT 12
/* trace subclasses for SVM */
-#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
-#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
+#define TRC_HVM_EMUL 0x00084000 /* emulated devices */
#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */
#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
+/*
+ * The highest 3 bits of the last 12 bits of TRC_SCHED_CLASS above are
+ * reserved for encoding what scheduler produced the information. The
+ * actual event is encoded in the last 9 bits.
+ *
+ * This means we have 8 scheduling IDs available (which means at most 8
+ * schedulers generating events) and, in each scheduler, up to 512
+ * different events.
+ */
+#define TRC_SCHED_ID_BITS 3
+#define TRC_SCHED_ID_SHIFT (TRC_SUBCLS_SHIFT - TRC_SCHED_ID_BITS)
+#define TRC_SCHED_ID_MASK (((1UL<<TRC_SCHED_ID_BITS) - 1) << TRC_SCHED_ID_SHIFT)
+#define TRC_SCHED_EVT_MASK (~(TRC_SCHED_ID_MASK))
+
+/* Per-scheduler IDs, to identify scheduler specific events */
+#define TRC_SCHED_CSCHED 0
+#define TRC_SCHED_CSCHED2 1
+/* #define XEN_SCHEDULER_SEDF 2 (Removed) */
+#define TRC_SCHED_ARINC653 3
+#define TRC_SCHED_RTDS 4
+
+/* Per-scheduler tracing */
+#define TRC_SCHED_CLASS_EVT(_c, _e) \
+ ( ( TRC_SCHED_CLASS | \
+ ((TRC_SCHED_##_c << TRC_SCHED_ID_SHIFT) & TRC_SCHED_ID_MASK) ) + \
+ (_e & TRC_SCHED_EVT_MASK) )
+
/* Trace classes for Hardware */
#define TRC_HW_PM 0x00801000 /* Power management traces */
#define TRC_HW_IRQ 0x00802000 /* Traces relating to the handling of IRQs */
@@ -95,21 +123,52 @@
#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17)
#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
+#define TRC_PV_ENTRY 0x00201000 /* Hypervisor entry points for PV guests. */
+#define TRC_PV_SUBCALL 0x00202000 /* Sub-call in a multicall hypercall */
-#define TRC_PV_HYPERCALL (TRC_PV + 1)
-#define TRC_PV_TRAP (TRC_PV + 3)
-#define TRC_PV_PAGE_FAULT (TRC_PV + 4)
-#define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5)
-#define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6)
-#define TRC_PV_EMULATE_4GB (TRC_PV + 7)
-#define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8)
-#define TRC_PV_PAGING_FIXUP (TRC_PV + 9)
-#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10)
-#define TRC_PV_PTWR_EMULATION (TRC_PV + 11)
-#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12)
- /* Indicates that addresses in trace record are 64 bits */
-#define TRC_64_FLAG (0x100)
+#define TRC_PV_HYPERCALL (TRC_PV_ENTRY + 1)
+#define TRC_PV_TRAP (TRC_PV_ENTRY + 3)
+#define TRC_PV_PAGE_FAULT (TRC_PV_ENTRY + 4)
+#define TRC_PV_FORCED_INVALID_OP (TRC_PV_ENTRY + 5)
+#define TRC_PV_EMULATE_PRIVOP (TRC_PV_ENTRY + 6)
+#define TRC_PV_EMULATE_4GB (TRC_PV_ENTRY + 7)
+#define TRC_PV_MATH_STATE_RESTORE (TRC_PV_ENTRY + 8)
+#define TRC_PV_PAGING_FIXUP (TRC_PV_ENTRY + 9)
+#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV_ENTRY + 10)
+#define TRC_PV_PTWR_EMULATION (TRC_PV_ENTRY + 11)
+#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV_ENTRY + 12)
+#define TRC_PV_HYPERCALL_V2 (TRC_PV_ENTRY + 13)
+#define TRC_PV_HYPERCALL_SUBCALL (TRC_PV_SUBCALL + 14)
+/*
+ * TRC_PV_HYPERCALL_V2 format
+ *
+ * Only some of the hypercall argument are recorded. Bit fields A0 to
+ * A5 in the first extra word are set if the argument is present and
+ * the arguments themselves are packed sequentially in the following
+ * words.
+ *
+ * The TRC_64_FLAG bit is not set for these events (even if there are
+ * 64-bit arguments in the record).
+ *
+ * Word
+ * 0 bit 31 30|29 28|27 26|25 24|23 22|21 20|19 ... 0
+ * A5 |A4 |A3 |A2 |A1 |A0 |Hypercall op
+ * 1 First 32 bit (or low word of first 64 bit) arg in record
+ * 2 Second 32 bit (or high word of first 64 bit) arg in record
+ * ...
+ *
+ * A0-A5 bitfield values:
+ *
+ * 00b Argument not present
+ * 01b 32-bit argument present
+ * 10b 64-bit argument present
+ * 11b Reserved
+ */
+#define TRC_PV_HYPERCALL_V2_ARG_32(i) (0x1 << (20 + 2*(i)))
+#define TRC_PV_HYPERCALL_V2_ARG_64(i) (0x2 << (20 + 2*(i)))
+#define TRC_PV_HYPERCALL_V2_ARG_MASK (0xfff00000)
+
#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1)
#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2)
#define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3)
@@ -173,6 +232,25 @@
#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
+/* Trace events for emulated devices */
+#define TRC_HVM_EMUL_HPET_START_TIMER (TRC_HVM_EMUL + 0x1)
+#define TRC_HVM_EMUL_PIT_START_TIMER (TRC_HVM_EMUL + 0x2)
+#define TRC_HVM_EMUL_RTC_START_TIMER (TRC_HVM_EMUL + 0x3)
+#define TRC_HVM_EMUL_LAPIC_START_TIMER (TRC_HVM_EMUL + 0x4)
+#define TRC_HVM_EMUL_HPET_STOP_TIMER (TRC_HVM_EMUL + 0x5)
+#define TRC_HVM_EMUL_PIT_STOP_TIMER (TRC_HVM_EMUL + 0x6)
+#define TRC_HVM_EMUL_RTC_STOP_TIMER (TRC_HVM_EMUL + 0x7)
+#define TRC_HVM_EMUL_LAPIC_STOP_TIMER (TRC_HVM_EMUL + 0x8)
+#define TRC_HVM_EMUL_PIT_TIMER_CB (TRC_HVM_EMUL + 0x9)
+#define TRC_HVM_EMUL_LAPIC_TIMER_CB (TRC_HVM_EMUL + 0xA)
+#define TRC_HVM_EMUL_PIC_INT_OUTPUT (TRC_HVM_EMUL + 0xB)
+#define TRC_HVM_EMUL_PIC_KICK (TRC_HVM_EMUL + 0xC)
+#define TRC_HVM_EMUL_PIC_INTACK (TRC_HVM_EMUL + 0xD)
+#define TRC_HVM_EMUL_PIC_POSEDGE (TRC_HVM_EMUL + 0xE)
+#define TRC_HVM_EMUL_PIC_NEGEDGE (TRC_HVM_EMUL + 0xF)
+#define TRC_HVM_EMUL_PIC_PEND_IRQ_CALL (TRC_HVM_EMUL + 0x10)
+#define TRC_HVM_EMUL_LAPIC_PIC_INTR (TRC_HVM_EMUL + 0x11)
+
/* trace events for per class */
#define TRC_PM_FREQ_CHANGE (TRC_HW_PM + 0x01)
#define TRC_PM_IDLE_ENTRY (TRC_HW_PM + 0x02)
@@ -188,6 +266,14 @@
#define TRC_HW_IRQ_UNMAPPED_VECTOR (TRC_HW_IRQ + 0x7)
#define TRC_HW_IRQ_HANDLED (TRC_HW_IRQ + 0x8)
+/*
+ * Event Flags
+ *
+ * Some events (e.g, TRC_PV_TRAP and TRC_HVM_IOMEM_READ) have multiple
+ * record formats. These event flags distinguish between the
+ * different formats.
+ */
+#define TRC_64_FLAG 0x100 /* Addresses are 64 bits (instead of 32 bits) */
/* This structure represents a single trace buffer record. */
struct t_rec {
@@ -238,7 +324,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/vcpu.h
===================================================================
--- trunk/sys/xen/interface/vcpu.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/vcpu.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -32,7 +32,7 @@
/*
* Prototype for this hypercall is:
- * int vcpu_op(int cmd, int vcpuid, void *extra_args)
+ * long vcpu_op(int cmd, unsigned int vcpuid, void *extra_args)
* @cmd == VCPUOP_??? (VCPU operation).
* @vcpuid == VCPU to operate on.
* @extra_args == Operation-specific extra arguments (NULL if none).
@@ -233,7 +233,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/version.h
===================================================================
--- trunk/sys/xen/interface/version.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/version.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -29,6 +29,8 @@
#ifndef __XEN_PUBLIC_VERSION_H__
#define __XEN_PUBLIC_VERSION_H__
+#include "xen.h"
+
/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
/* arg == NULL; returns major:minor (16:16). */
@@ -59,7 +61,7 @@
#define XENVER_platform_parameters 5
struct xen_platform_parameters {
- unsigned long virt_start;
+ xen_ulong_t virt_start;
};
typedef struct xen_platform_parameters xen_platform_parameters_t;
@@ -87,7 +89,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Added: trunk/sys/xen/interface/vm_event.h
===================================================================
--- trunk/sys/xen/interface/vm_event.h (rev 0)
+++ trunk/sys/xen/interface/vm_event.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -0,0 +1,270 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+ * vm_event.h
+ *
+ * Memory event common structures.
+ *
+ * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _XEN_PUBLIC_VM_EVENT_H
+#define _XEN_PUBLIC_VM_EVENT_H
+
+#include "xen.h"
+
+#define VM_EVENT_INTERFACE_VERSION 0x00000001
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#include "io/ring.h"
+
+/*
+ * Memory event flags
+ */
+
+/*
+ * VCPU_PAUSED in a request signals that the vCPU triggering the event has been
+ * paused
+ * VCPU_PAUSED in a response signals to unpause the vCPU
+ */
+#define VM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
+/* Flags to aid debugging vm_event */
+#define VM_EVENT_FLAG_FOREIGN (1 << 1)
+/*
+ * The following flags can be set in response to a mem_access event.
+ *
+ * Emulate the fault-causing instruction (if set in the event response flags).
+ * This will allow the guest to continue execution without lifting the page
+ * access restrictions.
+ */
+#define VM_EVENT_FLAG_EMULATE (1 << 2)
+/*
+ * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
+ * potentially having side effects (like memory mapped or port I/O) disabled.
+ */
+#define VM_EVENT_FLAG_EMULATE_NOWRITE (1 << 3)
+/*
+ * Toggle singlestepping on vm_event response.
+ * Requires the vCPU to be paused already (synchronous events only).
+ */
+#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP (1 << 4)
+/*
+ * Data is being sent back to the hypervisor in the event response, to be
+ * returned by the read function when emulating an instruction.
+ * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
+ * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
+ * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
+ * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
+ */
+#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
+ /*
+ * Deny completion of the operation that triggered the event.
+ * Currently only useful for MSR, CR0, CR3 and CR4 write events.
+ */
+#define VM_EVENT_FLAG_DENY (1 << 6)
+/*
+ * This flag can be set in a request or a response
+ *
+ * On a request, indicates that the event occurred in the alternate p2m specified by
+ * the altp2m_idx request field.
+ *
+ * On a response, indicates that the VCPU should resume in the alternate p2m specified
+ * by the altp2m_idx response field if possible.
+ */
+#define VM_EVENT_FLAG_ALTERNATE_P2M (1 << 7)
+
+/*
+ * Reasons for the vm event request
+ */
+
+/* Default case */
+#define VM_EVENT_REASON_UNKNOWN 0
+/* Memory access violation */
+#define VM_EVENT_REASON_MEM_ACCESS 1
+/* Memory sharing event */
+#define VM_EVENT_REASON_MEM_SHARING 2
+/* Memory paging event */
+#define VM_EVENT_REASON_MEM_PAGING 3
+/* A control register was updated */
+#define VM_EVENT_REASON_WRITE_CTRLREG 4
+/* An MSR was updated. */
+#define VM_EVENT_REASON_MOV_TO_MSR 5
+/* Debug operation executed (e.g. int3) */
+#define VM_EVENT_REASON_SOFTWARE_BREAKPOINT 6
+/* Single-step (e.g. MTF) */
+#define VM_EVENT_REASON_SINGLESTEP 7
+/* An event has been requested via HVMOP_guest_request_vm_event. */
+#define VM_EVENT_REASON_GUEST_REQUEST 8
+
+/* Supported values for the vm_event_write_ctrlreg index. */
+#define VM_EVENT_X86_CR0 0
+#define VM_EVENT_X86_CR3 1
+#define VM_EVENT_X86_CR4 2
+#define VM_EVENT_X86_XCR0 3
+
+/*
+ * Using a custom struct (not hvm_hw_cpu) so as to not fill
+ * the vm_event ring buffer too quickly.
+ */
+struct vm_event_regs_x86 {
+ uint64_t rax;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbx;
+ uint64_t rsp;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rflags;
+ uint64_t dr7;
+ uint64_t rip;
+ uint64_t cr0;
+ uint64_t cr2;
+ uint64_t cr3;
+ uint64_t cr4;
+ uint64_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+ uint64_t msr_efer;
+ uint64_t msr_star;
+ uint64_t msr_lstar;
+ uint64_t fs_base;
+ uint64_t gs_base;
+ uint32_t cs_arbytes;
+ uint32_t _pad;
+};
+
+/*
+ * mem_access flag definitions
+ *
+ * These flags are set only as part of a mem_event request.
+ *
+ * R/W/X: Defines the type of violation that has triggered the event
+ * Multiple types can be set in a single violation!
+ * GLA_VALID: If the gla field holds a guest VA associated with the event
+ * FAULT_WITH_GLA: If the violation was triggered by accessing gla
+ * FAULT_IN_GPT: If the violation was triggered during translating gla
+ */
+#define MEM_ACCESS_R (1 << 0)
+#define MEM_ACCESS_W (1 << 1)
+#define MEM_ACCESS_X (1 << 2)
+#define MEM_ACCESS_RWX (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X)
+#define MEM_ACCESS_RW (MEM_ACCESS_R | MEM_ACCESS_W)
+#define MEM_ACCESS_RX (MEM_ACCESS_R | MEM_ACCESS_X)
+#define MEM_ACCESS_WX (MEM_ACCESS_W | MEM_ACCESS_X)
+#define MEM_ACCESS_GLA_VALID (1 << 3)
+#define MEM_ACCESS_FAULT_WITH_GLA (1 << 4)
+#define MEM_ACCESS_FAULT_IN_GPT (1 << 5)
+
+struct vm_event_mem_access {
+ uint64_t gfn;
+ uint64_t offset;
+ uint64_t gla; /* if flags has MEM_ACCESS_GLA_VALID set */
+ uint32_t flags; /* MEM_ACCESS_* */
+ uint32_t _pad;
+};
+
+struct vm_event_write_ctrlreg {
+ uint32_t index;
+ uint32_t _pad;
+ uint64_t new_value;
+ uint64_t old_value;
+};
+
+struct vm_event_debug {
+ uint64_t gfn;
+};
+
+struct vm_event_mov_to_msr {
+ uint64_t msr;
+ uint64_t value;
+};
+
+#define MEM_PAGING_DROP_PAGE (1 << 0)
+#define MEM_PAGING_EVICT_FAIL (1 << 1)
+
+struct vm_event_paging {
+ uint64_t gfn;
+ uint32_t p2mt;
+ uint32_t flags;
+};
+
+struct vm_event_sharing {
+ uint64_t gfn;
+ uint32_t p2mt;
+ uint32_t _pad;
+};
+
+struct vm_event_emul_read_data {
+ uint32_t size;
+ /* The struct is used in a union with vm_event_regs_x86. */
+ uint8_t data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
+};
+
+typedef struct vm_event_st {
+ uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
+ uint32_t flags; /* VM_EVENT_FLAG_* */
+ uint32_t reason; /* VM_EVENT_REASON_* */
+ uint32_t vcpu_id;
+ uint16_t altp2m_idx; /* may be used during request and response */
+ uint16_t _pad[3];
+
+ union {
+ struct vm_event_paging mem_paging;
+ struct vm_event_sharing mem_sharing;
+ struct vm_event_mem_access mem_access;
+ struct vm_event_write_ctrlreg write_ctrlreg;
+ struct vm_event_mov_to_msr mov_to_msr;
+ struct vm_event_debug software_breakpoint;
+ struct vm_event_debug singlestep;
+ } u;
+
+ union {
+ union {
+ struct vm_event_regs_x86 x86;
+ } regs;
+
+ struct vm_event_emul_read_data emul_read_data;
+ } data;
+} vm_event_request_t, vm_event_response_t;
+
+DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+#endif /* _XEN_PUBLIC_VM_EVENT_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
Property changes on: trunk/sys/xen/interface/vm_event.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/xen/interface/xen-compat.h
===================================================================
--- trunk/sys/xen/interface/xen-compat.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/xen-compat.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -28,14 +28,17 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040200
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040600
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
#elif !defined(__XEN_INTERFACE_VERSION__)
-/* Guests which do not specify a version get the legacy interface. */
-#define __XEN_INTERFACE_VERSION__ 0x00000000
+/*
+ * The interface version is not set if and only if xen/xen-os.h is not
+ * included.
+ */
+#error "Please include xen/xen-os.h"
#endif
#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
Modified: trunk/sys/xen/interface/xen.h
===================================================================
--- trunk/sys/xen/interface/xen.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/xen.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -32,9 +32,7 @@
#if defined(__i386__) || defined(__x86_64__)
#include "arch-x86/xen.h"
-#elif defined(__ia64__)
-#include "arch-ia64.h"
-#elif defined(__arm__)
+#elif defined(__arm__) || defined (__aarch64__)
#include "arch-arm.h"
#else
#error "Unsupported architecture"
@@ -46,12 +44,15 @@
__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
DEFINE_XEN_GUEST_HANDLE(int);
__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
DEFINE_XEN_GUEST_HANDLE(long);
__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
+#endif
DEFINE_XEN_GUEST_HANDLE(void);
DEFINE_XEN_GUEST_HANDLE(uint64_t);
DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
+DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#endif
/*
@@ -101,6 +102,7 @@
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
+#define __HYPERVISOR_xenpmu_op 40
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
@@ -160,6 +162,7 @@
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
+#define VIRQ_XENPMU 13 /* V. PMC interrupt */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -277,15 +280,15 @@
* refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
* pages instead of using MTRRs.
*
- * The PAT MSR is as follow (it is a 64-bit value, each entry is 8 bits):
- * PAT4 PAT0
- * +---+----+----+----+-----+----+----+
- * WC | WC | WB | UC | UC- | WC | WB | <= Linux
- * +---+----+----+----+-----+----+----+
- * WC | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
- * +---+----+----+----+-----+----+----+
- * WC | WP | WC | UC | UC- | WT | WB | <= Xen
- * +---+----+----+----+-----+----+----+
+ * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
+ * PAT4 PAT0
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
+ * +-----+-----+----+----+----+-----+----+----+
+ * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
+ * +-----+-----+----+----+----+-----+----+----+
*
* The lookup of this index table translates to looking up
* Bit 7, Bit 4, and Bit 3 of val entry:
@@ -319,41 +322,47 @@
/*
* MMU EXTENDED OPERATIONS
- *
- * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ *
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_mmuext_op(mmuext_op_t uops[],
+ * ` unsigned int count,
+ * ` unsigned int *pdone,
+ * ` unsigned int foreigndom)
+ */
+/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
* A foreigndom (FD) can be specified (or DOMID_SELF for none).
* Where the FD has some effect, it is described below.
- *
+ *
* cmd: MMUEXT_(UN)PIN_*_TABLE
* mfn: Machine frame number to be (un)pinned as a p.t. page.
* The frame must belong to the FD, if one is specified.
- *
+ *
* cmd: MMUEXT_NEW_BASEPTR
* mfn: Machine frame number of new page-table base to install in MMU.
- *
+ *
* cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
* mfn: Machine frame number of new page-table base to install in MMU
* when in user space.
- *
+ *
* cmd: MMUEXT_TLB_FLUSH_LOCAL
* No additional arguments. Flushes local TLB.
- *
+ *
* cmd: MMUEXT_INVLPG_LOCAL
* linear_addr: Linear address to be flushed from the local TLB.
- *
+ *
* cmd: MMUEXT_TLB_FLUSH_MULTI
* vcpumask: Pointer to bitmap of VCPUs to be flushed.
- *
+ *
* cmd: MMUEXT_INVLPG_MULTI
* linear_addr: Linear address to be flushed.
* vcpumask: Pointer to bitmap of VCPUs to be flushed.
- *
+ *
* cmd: MMUEXT_TLB_FLUSH_ALL
* No additional arguments. Flushes all VCPUs' TLBs.
- *
+ *
* cmd: MMUEXT_INVLPG_ALL
* linear_addr: Linear address to be flushed from all VCPUs' TLBs.
- *
+ *
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
*
@@ -360,7 +369,7 @@
* cmd: MMUEXT_FLUSH_CACHE_GLOBAL
* No additional arguments. Writes back and flushes cache contents
* on all CPUs in the system.
- *
+ *
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
* nr_ents: Number of entries in LDT.
@@ -375,6 +384,7 @@
* cmd: MMUEXT_[UN]MARK_SUPER
* mfn: Machine frame number of head of superpage to be [un]marked.
*/
+/* ` enum mmuext_cmd { */
#define MMUEXT_PIN_L1_TABLE 0
#define MMUEXT_PIN_L2_TABLE 1
#define MMUEXT_PIN_L3_TABLE 2
@@ -395,10 +405,11 @@
#define MMUEXT_FLUSH_CACHE_GLOBAL 18
#define MMUEXT_MARK_SUPER 19
#define MMUEXT_UNMARK_SUPER 20
+/* ` } */
#ifndef __ASSEMBLY__
struct mmuext_op {
- unsigned int cmd;
+ unsigned int cmd; /* => enum mmuext_cmd */
union {
/* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
* CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
@@ -423,9 +434,24 @@
DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
#endif
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val,
+ * ` enum uvm_flags flags)
+ * `
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val,
+ * ` enum uvm_flags flags,
+ * ` domid_t domid)
+ * `
+ * ` @va: The virtual address whose mapping we want to change
+ * ` @val: The new page table entry, must contain a machine address
+ * ` @flags: Control TLB flushes
+ */
/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
+/* ` enum uvm_flags { */
#define UVMF_NONE (0UL<<0) /* No flushing at all. */
#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
@@ -433,6 +459,7 @@
#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
+/* ` } */
/*
* Commands to HYPERVISOR_console_io().
@@ -462,7 +489,21 @@
/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
+/*
+ * x86/64 guests: strictly hide M2P from user mode.
+ * This allows the guest to control respective hypervisor behavior:
+ * - when not set, L4 tables get created with the respective slot blank,
+ * and whenever the L4 table gets used as a kernel one the missing
+ * mapping gets inserted,
+ * - when set, L4 tables get created with the respective slot initialized
+ * as before, and whenever the L4 table gets used as a user one the
+ * mapping gets zapped.
+ */
+#define VMASST_TYPE_m2p_strict 32
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
#define MAX_VMASST_TYPE 3
+#endif
#ifndef __ASSEMBLY__
@@ -515,21 +556,28 @@
DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
/*
- * Send an array of these to HYPERVISOR_multicall().
- * NB. The fields are natural register size for this architecture.
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_multicall(multicall_entry_t call_list[],
+ * ` uint32_t nr_calls);
+ *
+ * NB. The fields are logically the natural register size for this
+ * architecture. In cases where xen_ulong_t is larger than this then
+ * any unused bits in the upper portion must be zero.
*/
struct multicall_entry {
- unsigned long op, result;
- unsigned long args[6];
+ xen_ulong_t op, result;
+ xen_ulong_t args[6];
};
typedef struct multicall_entry multicall_entry_t;
DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
/*
- * Event channel endpoints per domain:
+ * Event channel endpoints per domain (when using the 2-level ABI):
* 1024 if a long is 32 bits; 4096 if a long is 64 bits.
*/
-#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
+#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS
+#endif
struct vcpu_time_info {
/*
@@ -585,8 +633,12 @@
* to block: this avoids wakeup-waiting races.
*/
uint8_t evtchn_upcall_pending;
+#ifdef XEN_HAVE_PV_UPCALL_MASK
uint8_t evtchn_upcall_mask;
- unsigned long evtchn_pending_sel;
+#else /* XEN_HAVE_PV_UPCALL_MASK */
+ uint8_t pad0;
+#endif /* XEN_HAVE_PV_UPCALL_MASK */
+ xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct vcpu_time_info time;
}; /* 64 bytes (x86) */
@@ -595,6 +647,7 @@
#endif
/*
+ * `incontents 200 startofday_shared Start-of-day shared data structure
* Xen/kernel shared data -- pointer provided in start_info.
*
* This structure is defined to be both smaller than a page, and the
@@ -636,8 +689,8 @@
* per-vcpu selector word to be set. Each bit in the selector covers a
* 'C long' in the PENDING bitfield array.
*/
- unsigned long evtchn_pending[sizeof(unsigned long) * 8];
- unsigned long evtchn_mask[sizeof(unsigned long) * 8];
+ xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
+ xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
@@ -646,6 +699,12 @@
uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
+#if !defined(__i386__)
+ uint32_t wc_sec_hi;
+# define xen_wc_sec_hi wc_sec_hi
+#elif !defined(__XEN__) && !defined(__XEN_TOOLS__)
+# define xen_wc_sec_hi arch.wc_sec_hi
+#endif
struct arch_shared_info arch;
@@ -655,30 +714,43 @@
#endif
/*
- * Start-of-day memory layout:
+ * `incontents 200 startofday Start-of-day memory layout
+ *
* 1. The domain is started within contiguous virtual-memory region.
* 2. The contiguous region ends on an aligned 4MB boundary.
* 3. This the order of bootstrap elements in the initial virtual region:
* a. relocated kernel image
* b. initial ram disk [mod_start, mod_len]
+ * (may be omitted)
* c. list of allocated page frames [mfn_list, nr_pages]
* (unless relocated due to XEN_ELFNOTE_INIT_P2M)
* d. start_info_t structure [register ESI (x86)]
- * e. bootstrap page tables [pt_base, CR3 (x86)]
- * f. bootstrap stack [register ESP (x86)]
+ * in case of dom0 this page contains the console info, too
+ * e. unless dom0: xenstore ring page
+ * f. unless dom0: console ring page
+ * g. bootstrap page tables [pt_base and CR3 (x86)]
+ * h. bootstrap stack [register ESP (x86)]
* 4. Bootstrap elements are packed together, but each is 4kB-aligned.
- * 5. The initial ram disk may be omitted.
- * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
* layout for the domain. In particular, the bootstrap virtual-memory
* region is a 1:1 mapping to the first section of the pseudo-physical map.
- * 7. All bootstrap elements are mapped read-writable for the guest OS. The
+ * 6. All bootstrap elements are mapped read-writable for the guest OS. The
* only exception is the bootstrap page table, which is mapped read-only.
- * 8. There is guaranteed to be at least 512kB padding after the final
+ * 7. There is guaranteed to be at least 512kB padding after the final
* bootstrap element. If necessary, the bootstrap virtual region is
* extended by an extra 4MB to ensure this.
+ *
+ * Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page
+ * table layout") a bug caused the pt_base (3.g above) and cr3 to not point
+ * to the start of the guest page tables (it was offset by two pages).
+ * This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU
+ * or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got
+ * allocated in the order: 'first L1','first L2', 'first L3', so the offset
+ * to the page table base is by two pages back. The initial domain if it is
+ * 32-bit and runs under a 64-bit hypervisor should _NOT_ use two of the
+ * pages preceding pt_base and mark them as reserved/unused.
*/
-
-#define MAX_GUEST_CMDLINE 1024
+#ifdef XEN_HAVE_PV_GUEST_ENTRY
struct start_info {
/* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
char magic[32]; /* "xen-<version>-<platform>". */
@@ -705,6 +777,7 @@
/* (PFN of pre-loaded module if */
/* SIF_MOD_START_PFN set in flags). */
unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
+#define MAX_GUEST_CMDLINE 1024
int8_t cmd_line[MAX_GUEST_CMDLINE];
/* The pfn range here covers both page table and p->m table frames. */
unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
@@ -717,6 +790,7 @@
#define console_mfn console.domU.mfn
#define console_evtchn console.domU.evtchn
#endif
+#endif /* XEN_HAVE_PV_GUEST_ENTRY */
/* These flags are passed in the 'flags' field of start_info_t. */
#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
@@ -723,6 +797,8 @@
#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
+ /* P->M making the 3 level tree obsolete? */
#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
/*
@@ -750,7 +826,14 @@
/* Unused, must be zero */
uint32_t pad;
};
-
+/*
+ * `incontents 200 startofday_dom0_console Dom0_console
+ *
+ * The console structure in start_info.console.dom0
+ *
+ * This structure includes a variety of information required to
+ * have a working VGA/VESA console.
+ */
typedef struct dom0_vga_console_info {
uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
#define XEN_VGATYPE_TEXT_MODE_3 0x03
@@ -815,6 +898,9 @@
/* Default definitions for macros used by domctl/sysctl. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#ifndef int64_aligned_t
+#define int64_aligned_t int64_t
+#endif
#ifndef uint64_aligned_t
#define uint64_aligned_t uint64_t
#endif
@@ -823,9 +909,9 @@
#endif
#ifndef __ASSEMBLY__
-struct xenctl_cpumap {
+struct xenctl_bitmap {
XEN_GUEST_HANDLE_64(uint8) bitmap;
- uint32_t nr_cpus;
+ uint32_t nr_bits;
};
#endif
@@ -836,7 +922,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
Modified: trunk/sys/xen/interface/xenoprof.h
===================================================================
--- trunk/sys/xen/interface/xenoprof.h 2020-02-08 19:27:58 UTC (rev 12305)
+++ trunk/sys/xen/interface/xenoprof.h 2020-02-08 19:28:08 UTC (rev 12306)
@@ -145,7 +145,7 @@
/*
* Local variables:
* mode: C
- * c-set-style: "BSD"
+ * c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
More information about the Midnightbsd-cvs
mailing list