[Midnightbsd-cvs] src [9990] trunk/sys/ia64: sync with freebsd

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Sat May 26 18:00:13 EDT 2018


Revision: 9990
          http://svnweb.midnightbsd.org/src/?rev=9990
Author:   laffer1
Date:     2018-05-26 18:00:12 -0400 (Sat, 26 May 2018)
Log Message:
-----------
sync with freebsd

Added Paths:
-----------
    trunk/sys/ia64/
    trunk/sys/ia64/acpica/
    trunk/sys/ia64/acpica/OsdEnvironment.c
    trunk/sys/ia64/acpica/acpi_machdep.c
    trunk/sys/ia64/acpica/acpi_wakeup.c
    trunk/sys/ia64/acpica/madt.c
    trunk/sys/ia64/compile/
    trunk/sys/ia64/conf/
    trunk/sys/ia64/conf/DEFAULTS
    trunk/sys/ia64/conf/GENERIC
    trunk/sys/ia64/conf/GENERIC.hints
    trunk/sys/ia64/conf/Makefile
    trunk/sys/ia64/conf/NOTES
    trunk/sys/ia64/disasm/
    trunk/sys/ia64/disasm/disasm.h
    trunk/sys/ia64/disasm/disasm_decode.c
    trunk/sys/ia64/disasm/disasm_extract.c
    trunk/sys/ia64/disasm/disasm_format.c
    trunk/sys/ia64/disasm/disasm_int.h
    trunk/sys/ia64/ia32/
    trunk/sys/ia64/ia32/ia32_misc.c
    trunk/sys/ia64/ia32/ia32_reg.c
    trunk/sys/ia64/ia32/ia32_signal.c
    trunk/sys/ia64/ia32/ia32_trap.c
    trunk/sys/ia64/ia64/
    trunk/sys/ia64/ia64/autoconf.c
    trunk/sys/ia64/ia64/bus_machdep.c
    trunk/sys/ia64/ia64/busdma_machdep.c
    trunk/sys/ia64/ia64/clock.c
    trunk/sys/ia64/ia64/context.S
    trunk/sys/ia64/ia64/db_machdep.c
    trunk/sys/ia64/ia64/dump_machdep.c
    trunk/sys/ia64/ia64/efi.c
    trunk/sys/ia64/ia64/elf_machdep.c
    trunk/sys/ia64/ia64/emulate.c
    trunk/sys/ia64/ia64/exception.S
    trunk/sys/ia64/ia64/gdb_machdep.c
    trunk/sys/ia64/ia64/genassym.c
    trunk/sys/ia64/ia64/highfp.c
    trunk/sys/ia64/ia64/in_cksum.c
    trunk/sys/ia64/ia64/interrupt.c
    trunk/sys/ia64/ia64/iodev_machdep.c
    trunk/sys/ia64/ia64/locore.S
    trunk/sys/ia64/ia64/machdep.c
    trunk/sys/ia64/ia64/mca.c
    trunk/sys/ia64/ia64/mem.c
    trunk/sys/ia64/ia64/mp_locore.S
    trunk/sys/ia64/ia64/mp_machdep.c
    trunk/sys/ia64/ia64/nexus.c
    trunk/sys/ia64/ia64/pal.S
    trunk/sys/ia64/ia64/physical.S
    trunk/sys/ia64/ia64/physmem.c
    trunk/sys/ia64/ia64/pmap.c
    trunk/sys/ia64/ia64/ptrace_machdep.c
    trunk/sys/ia64/ia64/sal.c
    trunk/sys/ia64/ia64/sapic.c
    trunk/sys/ia64/ia64/setjmp.S
    trunk/sys/ia64/ia64/stack_machdep.c
    trunk/sys/ia64/ia64/support.S
    trunk/sys/ia64/ia64/sys_machdep.c
    trunk/sys/ia64/ia64/syscall.S
    trunk/sys/ia64/ia64/trap.c
    trunk/sys/ia64/ia64/uio_machdep.c
    trunk/sys/ia64/ia64/uma_machdep.c
    trunk/sys/ia64/ia64/unaligned.c
    trunk/sys/ia64/ia64/unwind.c
    trunk/sys/ia64/ia64/vm_machdep.c
    trunk/sys/ia64/ia64/xtrace.c
    trunk/sys/ia64/include/
    trunk/sys/ia64/include/_align.h
    trunk/sys/ia64/include/_bus.h
    trunk/sys/ia64/include/_inttypes.h
    trunk/sys/ia64/include/_limits.h
    trunk/sys/ia64/include/_regset.h
    trunk/sys/ia64/include/_stdint.h
    trunk/sys/ia64/include/_types.h
    trunk/sys/ia64/include/acpica_machdep.h
    trunk/sys/ia64/include/asm.h
    trunk/sys/ia64/include/atomic.h
    trunk/sys/ia64/include/bootinfo.h
    trunk/sys/ia64/include/bus.h
    trunk/sys/ia64/include/bus_dma.h
    trunk/sys/ia64/include/clock.h
    trunk/sys/ia64/include/counter.h
    trunk/sys/ia64/include/cpu.h
    trunk/sys/ia64/include/cpufunc.h
    trunk/sys/ia64/include/db_machdep.h
    trunk/sys/ia64/include/dig64.h
    trunk/sys/ia64/include/elf.h
    trunk/sys/ia64/include/endian.h
    trunk/sys/ia64/include/exec.h
    trunk/sys/ia64/include/float.h
    trunk/sys/ia64/include/floatingpoint.h
    trunk/sys/ia64/include/fpu.h
    trunk/sys/ia64/include/frame.h
    trunk/sys/ia64/include/gdb_machdep.h
    trunk/sys/ia64/include/ia64_cpu.h
    trunk/sys/ia64/include/ieee.h
    trunk/sys/ia64/include/ieeefp.h
    trunk/sys/ia64/include/in_cksum.h
    trunk/sys/ia64/include/intr.h
    trunk/sys/ia64/include/intrcnt.h
    trunk/sys/ia64/include/iodev.h
    trunk/sys/ia64/include/kdb.h
    trunk/sys/ia64/include/limits.h
    trunk/sys/ia64/include/mca.h
    trunk/sys/ia64/include/md_var.h
    trunk/sys/ia64/include/memdev.h
    trunk/sys/ia64/include/pal.h
    trunk/sys/ia64/include/param.h
    trunk/sys/ia64/include/pc/
    trunk/sys/ia64/include/pc/display.h
    trunk/sys/ia64/include/pcb.h
    trunk/sys/ia64/include/pci_cfgreg.h
    trunk/sys/ia64/include/pcpu.h
    trunk/sys/ia64/include/pmap.h
    trunk/sys/ia64/include/pmc_mdep.h
    trunk/sys/ia64/include/proc.h
    trunk/sys/ia64/include/profile.h
    trunk/sys/ia64/include/pte.h
    trunk/sys/ia64/include/ptrace.h
    trunk/sys/ia64/include/reg.h
    trunk/sys/ia64/include/reloc.h
    trunk/sys/ia64/include/resource.h
    trunk/sys/ia64/include/runq.h
    trunk/sys/ia64/include/sal.h
    trunk/sys/ia64/include/setjmp.h
    trunk/sys/ia64/include/sf_buf.h
    trunk/sys/ia64/include/sigframe.h
    trunk/sys/ia64/include/signal.h
    trunk/sys/ia64/include/smp.h
    trunk/sys/ia64/include/stdarg.h
    trunk/sys/ia64/include/sysarch.h
    trunk/sys/ia64/include/ucontext.h
    trunk/sys/ia64/include/unwind.h
    trunk/sys/ia64/include/varargs.h
    trunk/sys/ia64/include/vdso.h
    trunk/sys/ia64/include/vm.h
    trunk/sys/ia64/include/vmparam.h
    trunk/sys/ia64/isa/
    trunk/sys/ia64/isa/isa.c
    trunk/sys/ia64/isa/isa_dma.c
    trunk/sys/ia64/pci/
    trunk/sys/ia64/pci/pci_cfgreg.c

Added: trunk/sys/ia64/acpica/OsdEnvironment.c
===================================================================
--- trunk/sys/ia64/acpica/OsdEnvironment.c	                        (rev 0)
+++ trunk/sys/ia64/acpica/OsdEnvironment.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,78 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000,2001 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/acpica/OsdEnvironment.c 270296 2014-08-21 19:51:07Z emaste $");
+
+#include <sys/types.h>
+#include <sys/efi.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+static u_long acpi_root_phys;
+
+SYSCTL_ULONG(_machdep, OID_AUTO, acpi_root, CTLFLAG_RD, &acpi_root_phys, 0,
+    "The physical address of the RSDP");
+
+ACPI_STATUS
+AcpiOsInitialize(void)
+{
+
+	return (AE_OK);
+}
+
+ACPI_STATUS
+AcpiOsTerminate(void)
+{
+
+	return (AE_OK);
+}
+
+static u_long
+acpi_get_root_from_efi(void)
+{
+	static struct uuid acpi_root_uuid = EFI_TABLE_ACPI20;
+	void *acpi_root;
+
+	acpi_root = efi_get_table(&acpi_root_uuid);
+	if (acpi_root != NULL)
+		return (IA64_RR_MASK((uintptr_t)acpi_root));
+
+	return (0);
+}
+
+ACPI_PHYSICAL_ADDRESS
+AcpiOsGetRootPointer(void)
+{
+
+	if (acpi_root_phys == 0)
+		acpi_root_phys = acpi_get_root_from_efi();
+
+	return (acpi_root_phys);
+}


Property changes on: trunk/sys/ia64/acpica/OsdEnvironment.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/acpica/acpi_machdep.c
===================================================================
--- trunk/sys/ia64/acpica/acpi_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/acpica/acpi_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,102 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/acpica/acpi_machdep.c 222769 2011-06-06 19:06:15Z marcel $
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <machine/md_var.h>
+#include <machine/pal.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/actables.h>
+#include <dev/acpica/acpivar.h>
+
+int
+acpi_machdep_init(device_t dev)
+{
+	struct	acpi_softc *sc;
+
+        sc = device_get_softc(dev);
+
+	acpi_install_wakeup_handler(sc);
+
+	return (0);
+}
+
+int
+acpi_machdep_quirks(int *quirks)
+{
+	return (0);
+}
+
+void
+acpi_cpu_c1()
+{
+#ifdef INVARIANTS
+	register_t ie;
+
+	ie = intr_disable();
+	KASSERT(ie == 0, ("%s called with interrupts enabled\n", __func__));
+#endif
+	ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
+	ia64_enable_intr();
+}
+
+void *
+acpi_find_table(const char *sig)
+{
+	ACPI_PHYSICAL_ADDRESS rsdp_ptr;
+	ACPI_TABLE_RSDP *rsdp;
+	ACPI_TABLE_XSDT *xsdt;
+	ACPI_TABLE_HEADER *table;
+	UINT64 addr;
+	u_int i, count;
+
+	if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0)
+		return (NULL);
+
+	rsdp = (ACPI_TABLE_RSDP *)IA64_PHYS_TO_RR7(rsdp_ptr);
+	xsdt = (ACPI_TABLE_XSDT *)IA64_PHYS_TO_RR7(rsdp->XsdtPhysicalAddress);
+
+	count = (UINT64 *)((char *)xsdt + xsdt->Header.Length) -
+	    xsdt->TableOffsetEntry;
+
+	for (i = 0; i < count; i++) {
+		addr = xsdt->TableOffsetEntry[i];
+		table = (ACPI_TABLE_HEADER *)IA64_PHYS_TO_RR7(addr);
+
+		if (strncmp(table->Signature, sig, ACPI_NAME_SIZE) != 0)
+			continue;
+		if (ACPI_FAILURE(AcpiTbChecksum((void *)table, table->Length)))
+			continue;
+
+		return (table);
+	}
+
+	return (NULL);
+}


Property changes on: trunk/sys/ia64/acpica/acpi_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/acpica/acpi_wakeup.c
===================================================================
--- trunk/sys/ia64/acpica/acpi_wakeup.c	                        (rev 0)
+++ trunk/sys/ia64/acpica/acpi_wakeup.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,53 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/acpica/acpi_wakeup.c 236409 2012-06-01 17:07:52Z jkim $
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+int
+acpi_sleep_machdep(struct acpi_softc *sc, int state)
+{
+	return (0);
+}
+
+int
+acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result,
+    int intr_enabled)
+{
+	return (0);
+}
+
+void
+acpi_install_wakeup_handler(struct acpi_softc *sc)
+{
+}


Property changes on: trunk/sys/ia64/acpica/acpi_wakeup.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/acpica/madt.c
===================================================================
--- trunk/sys/ia64/acpica/madt.c	                        (rev 0)
+++ trunk/sys/ia64/acpica/madt.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,248 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/acpica/madt.c 203883 2010-02-14 16:56:24Z marcel $
+ */
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/actables.h>
+
+#include <machine/md_var.h>
+
+struct sapic *sapic_create(int, int, u_int64_t);
+
+static void
+print_entry(ACPI_SUBTABLE_HEADER *entry)
+{
+
+	switch (entry->Type) {
+	case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE: {
+		ACPI_MADT_INTERRUPT_OVERRIDE *iso =
+		    (ACPI_MADT_INTERRUPT_OVERRIDE *)entry;
+		printf("\tInterrupt source override entry\n");
+		printf("\t\tBus=%u, Source=%u, Irq=0x%x\n", iso->Bus,
+		    iso->SourceIrq, iso->GlobalIrq);
+		break;
+	}
+
+	case ACPI_MADT_TYPE_IO_APIC:
+		printf("\tI/O APIC entry\n");
+		break;
+
+	case ACPI_MADT_TYPE_IO_SAPIC: {
+		ACPI_MADT_IO_SAPIC *sapic = (ACPI_MADT_IO_SAPIC *)entry;
+		printf("\tI/O SAPIC entry\n");
+		printf("\t\tId=0x%x, InterruptBase=0x%x, Address=0x%lx\n",
+		    sapic->Id, sapic->GlobalIrqBase, sapic->Address);
+		break;
+	}
+
+	case ACPI_MADT_TYPE_LOCAL_APIC_NMI:
+		printf("\tLocal APIC NMI entry\n");
+		break;
+
+	case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE: {
+		ACPI_MADT_LOCAL_APIC_OVERRIDE *lapic =
+		    (ACPI_MADT_LOCAL_APIC_OVERRIDE *)entry;
+		printf("\tLocal APIC override entry\n");
+		printf("\t\tLocal APIC address=0x%jx\n", lapic->Address);
+		break;
+	}
+
+	case ACPI_MADT_TYPE_LOCAL_SAPIC: {
+		ACPI_MADT_LOCAL_SAPIC *sapic = (ACPI_MADT_LOCAL_SAPIC *)entry;
+		printf("\tLocal SAPIC entry\n");
+		printf("\t\tProcessorId=0x%x, Id=0x%x, Eid=0x%x",
+		    sapic->ProcessorId, sapic->Id, sapic->Eid);
+		if (!(sapic->LapicFlags & ACPI_MADT_ENABLED))
+			printf(" (disabled)");
+		printf("\n");
+		break;
+	}
+
+	case ACPI_MADT_TYPE_NMI_SOURCE:
+		printf("\tNMI entry\n");
+		break;
+
+	case ACPI_MADT_TYPE_INTERRUPT_SOURCE: {
+		ACPI_MADT_INTERRUPT_SOURCE *pis =
+		    (ACPI_MADT_INTERRUPT_SOURCE *)entry;
+		printf("\tPlatform interrupt entry\n");
+		printf("\t\tPolarity=%u, TriggerMode=%u, Id=0x%x, "
+		    "Eid=0x%x, Vector=0x%x, Irq=%d\n",
+		    pis->IntiFlags & ACPI_MADT_POLARITY_MASK,
+		    (pis->IntiFlags & ACPI_MADT_TRIGGER_MASK) >> 2,
+		    pis->Id, pis->Eid, pis->IoSapicVector, pis->GlobalIrq);
+		break;
+	}
+
+	case ACPI_MADT_TYPE_LOCAL_APIC:
+		printf("\tLocal APIC entry\n");
+		break;
+
+	default:
+		printf("\tUnknown type %d entry\n", entry->Type);
+		break;
+	}
+}
+
+void
+ia64_probe_sapics(void)
+{
+	ACPI_PHYSICAL_ADDRESS rsdp_ptr;
+	ACPI_SUBTABLE_HEADER *entry;
+	ACPI_TABLE_MADT *table;
+	ACPI_TABLE_RSDP *rsdp;
+	ACPI_TABLE_XSDT *xsdt;
+	char *end, *p;
+	int t, tables;
+
+	if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0)
+		return;
+
+	rsdp = (ACPI_TABLE_RSDP *)IA64_PHYS_TO_RR7(rsdp_ptr);
+	xsdt = (ACPI_TABLE_XSDT *)IA64_PHYS_TO_RR7(rsdp->XsdtPhysicalAddress);
+
+	tables = (UINT64 *)((char *)xsdt + xsdt->Header.Length) -
+	    xsdt->TableOffsetEntry;
+
+	for (t = 0; t < tables; t++) {
+		table = (ACPI_TABLE_MADT *)
+		    IA64_PHYS_TO_RR7(xsdt->TableOffsetEntry[t]);
+
+		if (bootverbose)
+			printf("Table '%c%c%c%c' at %p\n",
+			    table->Header.Signature[0],
+			    table->Header.Signature[1],
+			    table->Header.Signature[2],
+			    table->Header.Signature[3], table);
+
+		if (strncmp(table->Header.Signature, ACPI_SIG_MADT,
+		    ACPI_NAME_SIZE) != 0 ||
+		    ACPI_FAILURE(AcpiTbChecksum((void *)table,
+		    table->Header.Length)))
+			continue;
+
+		/* Save the address of the processor interrupt block. */
+		if (bootverbose)
+			printf("\tLocal APIC address=0x%x\n", table->Address);
+		ia64_lapic_addr = table->Address;
+
+		end = (char *)table + table->Header.Length;
+		p = (char *)(table + 1);
+		while (p < end) {
+			entry = (ACPI_SUBTABLE_HEADER *)p;
+
+			if (bootverbose)
+				print_entry(entry);
+
+			switch (entry->Type) {
+			case ACPI_MADT_TYPE_IO_SAPIC: {
+				ACPI_MADT_IO_SAPIC *sapic =
+				    (ACPI_MADT_IO_SAPIC *)entry;
+				sapic_create(sapic->Id, sapic->GlobalIrqBase,
+				    sapic->Address);
+				break;
+			}
+
+			case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE: {
+				ACPI_MADT_LOCAL_APIC_OVERRIDE *lapic =
+				    (ACPI_MADT_LOCAL_APIC_OVERRIDE *)entry;
+				ia64_lapic_addr = lapic->Address;
+				break;
+			}
+
+#ifdef SMP
+			case ACPI_MADT_TYPE_LOCAL_SAPIC: {
+				ACPI_MADT_LOCAL_SAPIC *sapic =
+				    (ACPI_MADT_LOCAL_SAPIC *)entry;
+				if (sapic->LapicFlags & ACPI_MADT_ENABLED)
+					cpu_mp_add(sapic->ProcessorId,
+					    sapic->Id, sapic->Eid);
+				break;
+			}
+#endif
+
+			default:
+				break;
+			}
+
+			p += entry->Length;
+		}
+	}
+}
+
+/*
+ * Count the number of local SAPIC entries in the APIC table. Every enabled
+ * entry corresponds to a processor.
+ */
+int
+ia64_count_cpus(void)
+{
+	ACPI_PHYSICAL_ADDRESS rsdp_ptr;
+	ACPI_MADT_LOCAL_SAPIC *entry;
+	ACPI_TABLE_MADT *table;
+	ACPI_TABLE_RSDP *rsdp;
+	ACPI_TABLE_XSDT *xsdt;
+	char *end, *p;
+	int cpus, t, tables;
+
+	if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0)
+		return (0);
+
+	rsdp = (ACPI_TABLE_RSDP *)IA64_PHYS_TO_RR7(rsdp_ptr);
+	xsdt = (ACPI_TABLE_XSDT *)IA64_PHYS_TO_RR7(rsdp->XsdtPhysicalAddress);
+
+	tables = (UINT64 *)((char *)xsdt + xsdt->Header.Length) -
+	    xsdt->TableOffsetEntry;
+
+	cpus = 0;
+
+	for (t = 0; t < tables; t++) {
+		table = (ACPI_TABLE_MADT *)
+		    IA64_PHYS_TO_RR7(xsdt->TableOffsetEntry[t]);
+
+		if (strncmp(table->Header.Signature, ACPI_SIG_MADT,
+		    ACPI_NAME_SIZE) != 0 ||
+		    ACPI_FAILURE(AcpiTbChecksum((void *)table,
+			table->Header.Length)))
+			continue;
+
+		end = (char *)table + table->Header.Length;
+		p = (char *)(table + 1);
+		while (p < end) {
+			entry = (ACPI_MADT_LOCAL_SAPIC *)p;
+
+			if (entry->Header.Type == ACPI_MADT_TYPE_LOCAL_SAPIC &&
+			    (entry->LapicFlags & ACPI_MADT_ENABLED))
+				cpus++;
+
+			p += entry->Header.Length;
+		}
+	}
+
+	return (cpus);
+}


Property changes on: trunk/sys/ia64/acpica/madt.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/conf/DEFAULTS
===================================================================
--- trunk/sys/ia64/conf/DEFAULTS	                        (rev 0)
+++ trunk/sys/ia64/conf/DEFAULTS	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,22 @@
+#
+# DEFAULTS -- Default kernel configuration file for FreeBSD/ia64
+#
+# $FreeBSD: stable/10/sys/ia64/conf/DEFAULTS 232619 2012-03-06 20:01:25Z attilio $
+
+machine		ia64
+
+# Bus support.
+device		acpi		# ACPI support
+
+# Pseudo devices.
+device		io		# I/O & EFI runtime device
+device		mem		# Memory and kernel memory devices
+
+# UART chips on this platform
+device		uart_ns8250
+
+options 	GEOM_PART_BSD
+options 	GEOM_PART_GPT
+options 	GEOM_PART_MBR
+
+options 	NEW_PCIB


Property changes on: trunk/sys/ia64/conf/DEFAULTS
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/ia64/conf/GENERIC
===================================================================
--- trunk/sys/ia64/conf/GENERIC	                        (rev 0)
+++ trunk/sys/ia64/conf/GENERIC	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,202 @@
+#
+# GENERIC -- Generic kernel configuration file for FreeBSD/ia64
+#
+# For more information on this file, please read the handbook section on
+# Kernel Configuration Files:
+#
+#    http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html
+#
+# The handbook is also available locally in /usr/share/doc/handbook
+# if you've installed the doc distribution, otherwise always see the
+# FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the
+# latest information.
+#
+# An exhaustive list of options and more detailed explanations of the
+# device lines is also present in the ../../conf/NOTES and NOTES files.
+# If you are in doubt as to the purpose or necessity of a line, check
+# first in NOTES.
+#
+# For hardware specific information check HARDWARE.TXT
+#
+# $FreeBSD: stable/10/sys/ia64/conf/GENERIC 268813 2014-07-17 22:31:46Z imp $
+
+cpu		ITANIUM2
+ident		GENERIC
+
+makeoptions	DEBUG=-g	# Build kernel with gdb(1) debug symbols
+
+options 	AUDIT		# Security event auditing
+options 	CAPABILITY_MODE	# Capsicum capability mode
+options 	CAPABILITIES	# Capsicum capabilities
+options 	CD9660		# ISO 9660 Filesystem
+options 	COMPAT_FREEBSD7	# Compatible with FreeBSD7
+options 	FFS		# Berkeley Fast Filesystem
+options 	GEOM_LABEL	# Provides labelization
+options 	INCLUDE_CONFIG_FILE     # Include this file in kernel
+options 	INET		# InterNETworking
+options 	INET6		# IPv6 communications protocols
+options 	KTRACE		# ktrace(1) syscall trace support
+options 	MAC		# TrustedBSD MAC Framework
+options 	MD_ROOT		# MD usable as root device
+options 	MSDOSFS		# MSDOS Filesystem
+options 	NFSCL		# New Network Filesystem Client
+options 	NFSLOCKD	# Network Lock Manager
+options 	NFSD		# New Network Filesystem Server
+options 	NFS_ROOT	# NFS usable as root device
+options 	P1003_1B_SEMAPHORES	# POSIX-style semaphores
+options 	PREEMPTION	# Enable kernel thread preemption
+options 	PRINTF_BUFR_SIZE=128  # Printf buffering to limit interspersion
+options 	PROCDESC	# Support for process descriptors
+options 	PROCFS		# Process filesystem (/proc)
+options 	PSEUDOFS	# Pseudo-filesystem framework
+options 	SCHED_ULE	# ULE scheduler
+options 	SCSI_DELAY=5000	# Delay (in ms) before probing SCSI
+options 	SCTP		# Stream Control Transmission Protocol
+options 	SMP		# Symmetric Multi-Processor support
+options 	SOFTUPDATES	# Enable FFS soft updates support
+options 	STACK		# stack(9) support
+options 	SYSVMSG		# SYSV-style message queues
+options 	SYSVSEM		# SYSV-style semaphores
+options 	SYSVSHM		# SYSV-style shared memory
+options 	UFS_ACL		# Support for access control lists
+options 	UFS_DIRHASH	# Hash-based directory lookup scheme
+options 	UFS_GJOURNAL	# Enable gjournal-based UFS journaling
+options 	QUOTA		# Enable disk quotas for UFS
+options 	_KPOSIX_PRIORITY_SCHEDULING	# Posix P1003_1B RT extensions
+
+# Debugging support.  Always need this:
+options 	KDB		# Enable kernel debugger support.
+options 	KDB_TRACE	# Print a stack trace for a panic.
+
+# Various "busses"
+device		miibus		# MII bus support (Ethernet)
+device		pci		# PCI bus support
+device		scbus		# SCSI bus (required for ATA/SCSI)
+device		usb		# USB Bus (required for USB)
+
+# ATA controllers
+device		ahci		# AHCI-compatible SATA controllers
+device		ata		# Legacy ATA/SATA controllers
+device		mvs		# Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA
+device		siis		# SiliconImage SiI3124/SiI3132/SiI3531 SATA
+
+# SCSI Controllers
+device		ahc		# AHA2940 and AIC7xxx devices
+device		ahd		# AHA39320/29320 and AIC79xx devices
+device		hptiop		# Highpoint RocketRaid 3xxx series
+device		isp		# Qlogic family
+device		mpt		# LSI-Logic MPT-Fusion
+device		mps		# LSI-Logic MPT-Fusion 2
+device		mpr		# LSI-Logic MPT-Fusion 3
+device		sym		# NCR/Symbios Logic
+
+# RAID controllers interfaced to the SCSI subsystem
+device		amr		# AMI MegaRAID
+device		ciss		# Compaq Smart RAID 5*
+device		dpt		# DPT Smartcache III, IV
+device		iir		# Intel Integrated RAID
+device		ips		# IBM (Adaptec) ServeRAID
+device		mly		# Mylex AcceleRAID/eXtremeRAID
+device		twa		# 3ware 9000 series PATA/SATA RAID
+
+# ATA/SCSI peripherals
+device		cd		# CD-ROM, DVD-ROM etc.
+device		ch		# Media changer
+device		da		# Direct Access (ie disk)
+device		pass		# Passthrough (direct ATA/SCSI access)
+device		sa		# Sequential Access (ie tape)
+device		ses		# Enclosure Services (SES and SAF-TE)
+#device		ctl		# CAM Target Layer
+
+# RAID controllers
+device		aac		# Adaptec FSA RAID
+device		aacp		# SCSI passthrough for aac (requires CAM)
+device		aacraid		# Adaptec by PMC RAID
+device		ida		# Compaq Smart RAID
+device		mlx		# Mylex DAC960 family
+
+# USB host controllers and peripherals
+options 	USB_DEBUG	# enable debug msgs
+device		ehci		# EHCI host controller
+device		ohci		# OHCI PCI->USB interface
+device		uhci		# UHCI PCI->USB interface
+device		uhid		# Human Interface Devices
+device		ukbd		# Keyboard
+device		ulpt		# Printer
+device		umass		# Disks/Mass storage (need scbus & da)
+device		ums		# Mouse
+
+# PCI Ethernet NICs.
+device		de		# DEC/Intel DC21x4x (``Tulip'')
+device		em		# Intel PRO/1000 Gigabit Ethernet Family
+device		igb		# Intel PRO/1000 PCIE Server Gigabit Family
+device		ixgbe		# Intel PRO/10GbE PCIE Ethernet Family
+device		txp		# 3Com 3cR990 (``Typhoon'')
+
+# PCI Ethernet NICs that use the common MII bus controller code.
+device		ae		# Attansic/Atheros L2 FastEthernet
+device		age		# Attansic/Atheros L1 Gigabit Ethernet
+device		alc		# Atheros AR8131/AR8132 Ethernet
+device		ale		# Atheros AR8121/AR8113/AR8114 Ethernet
+device		bce		# Broadcom BCM5706/BCM5708 Gigabit Ethernet
+device		bfe		# Broadcom BCM440x 10/100 Ethernet
+device		bge		# Broadcom BCM570xx Gigabit Ethernet
+device		et		# Agere ET1310 10/100/Gigabit Ethernet
+device		jme		# JMicron JMC250 Gigabit/JMC260 Fast Ethernet
+device		msk		# Marvell/SysKonnect Yukon II Gigabit Ethernet
+device		nge		# NatSemi DP83820 gigabit Ethernet
+device		fxp		# Intel EtherExpress PRO/100B (82557, 82558)
+device		re		# RealTek 8139C+/8169/8169S/8110S
+device		sf		# Adaptec AIC-6915 (``Starfire'')
+device		sk		# SysKonnect SK-984x & SK-982x gigabit Ethernet
+device		ste		# Sundance ST201 (D-Link DFE-550TX)
+device		stge		# Sundance/Tamarack TC9021 gigabit Ethernet
+device		tx		# SMC EtherPower II (83c170 ``EPIC'')
+device		vge		# VIA VT612x gigabit Ethernet
+device		xl		# 3Com 3c90x ("Boomerang", "Cyclone")
+
+# USB Ethernet
+device		aue		# ADMtek USB Ethernet
+device		axe		# ASIX Electronics USB Ethernet
+device		cdce		# Generic USB over Ethernet
+device		cue		# CATC USB Ethernet
+device		kue		# Kawasaki LSI USB Ethernet
+device		rue		# RealTek RTL8150 USB Ethernet
+device		udav		# Davicom DM9601E USB
+
+# USB Serial
+device		uark		# Technologies ARK3116 based serial adapters
+device		ubsa		# Belkin F5U103 and compatible serial adapters
+device		uftdi		# For FTDI usb serial adapters
+device		uipaq		# Some WinCE based devices
+device		uplcom		# Prolific PL-2303 serial adapters
+device		uslcom		# SI Labs CP2101/CP2102 serial adapters
+device		uvisor		# Visor and Palm devices
+device		uvscom		# USB serial support for DDI pocket's PHS
+
+# Wireless NIC cards.
+# The wlan(4) module assumes this, so just define it so it
+# at least correctly loads.
+options 	IEEE80211_SUPPORT_MESH
+
+# The ath(4) and ath_hal(4) code requires this.  The module currently
+# builds everything including AR5416 (and later 11n NIC) support.
+options 	AH_SUPPORT_AR5416
+
+# Various (pseudo) devices
+device		ether		# Ethernet support
+device		faith		# IPv6-to-IPv4 relaying (translation)
+device		gif		# IPv6 and IPv4 tunneling
+device		loop		# Network loopback
+device		md		# Memory "disks"
+device		puc		# Multi I/O cards and multi-channel UARTs
+device		random		# Entropy device
+device		tun		# Packet tunnel.
+device		uart		# Serial port (UART)
+device		vlan		# 802.1Q VLAN support
+device		firmware	# firmware assist module
+
+# The `bpf' device enables the Berkeley Packet Filter.
+# Be aware of the administrative consequences of enabling this!
+# Note that 'bpf' is required for DHCP.
+device		bpf		# Berkeley packet filter


Property changes on: trunk/sys/ia64/conf/GENERIC
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+conf/DEFAULTS
\ No newline at end of property
Added: trunk/sys/ia64/conf/GENERIC.hints
===================================================================
--- trunk/sys/ia64/conf/GENERIC.hints	                        (rev 0)
+++ trunk/sys/ia64/conf/GENERIC.hints	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,2 @@
+# $FreeBSD: stable/10/sys/ia64/conf/GENERIC.hints 137708 2004-11-14 23:42:48Z marcel $
+hw.uart.console="io:0x3f8"


Property changes on: trunk/sys/ia64/conf/GENERIC.hints
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+conf/DEFAULTS
\ No newline at end of property
Added: trunk/sys/ia64/conf/Makefile
===================================================================
--- trunk/sys/ia64/conf/Makefile	                        (rev 0)
+++ trunk/sys/ia64/conf/Makefile	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,6 @@
+# $MidnightBSD$
+# $FreeBSD: stable/10/sys/ia64/conf/Makefile 201813 2010-01-08 18:57:31Z bz $
+
+TARGET=ia64
+
+.include "${.CURDIR}/../../conf/makeLINT.mk"


Property changes on: trunk/sys/ia64/conf/Makefile
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/conf/NOTES
===================================================================
--- trunk/sys/ia64/conf/NOTES	                        (rev 0)
+++ trunk/sys/ia64/conf/NOTES	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,105 @@
+# $FreeBSD: stable/10/sys/ia64/conf/NOTES 239376 2012-08-18 22:59:06Z marcel $
+#
+# This file contains machine dependent kernel configuration notes.  For
+# machine independent notes, look in /sys/conf/NOTES.
+
+# directive: cpu
+# You must specify at least one CPU (the one you intend to run on). Deleting
+# the support for CPUs you don't need to use may make parts of the system run
+# faster. There's currently no special code for the different CPUs. Note also
+# that the cpu declares the family. We may need to add support for specifying
+# particular models.
+cpu		ITANIUM
+cpu		ITANIUM2
+
+# option: COMPAT_FREEBSD32
+# This option enables the support for execution of i386 (32-bit) programs on
+# ia64. It is based on the ia32 emulation in the processor.
+options 	COMPAT_FREEBSD32
+
+# option: LOG2_ID_PAGE_SIZE
+# Specify the log2 size of the identity (direct) mappings in regions 6 and 7
+# of the virtual address space.
+options 	LOG2_ID_PAGE_SIZE=27		# 128M
+
+# option: LOG2_PAGE_SIZE
+# Specify the log2 size of the page to be used for virtual memory management.
+# The page size being equal to 1<<LOG2_PAGE_SIZE.
+options 	LOG2_PAGE_SIZE=14		# 16K
+
+# option: UWX_TRACE_ENABLE
+# Build the unwinder with tracing support. This option is used to debug the
+# unwinder itself and the glue around it.
+options 	UWX_TRACE_ENABLE
+
+# MI options
+options 	ACPI_DEBUG
+options 	KSTACK_PAGES=3
+
+device		acpi
+
+device		agp
+device		isa
+device		pci
+
+# PS/2 mouse
+device		psm
+hint.psm.0.at="atkbdc"
+hint.psm.0.irq="12"
+
+# Options for psm:
+options 	PSM_HOOKRESUME		#hook the system resume event, useful
+					#for some laptops
+options 	PSM_RESETAFTERSUSPEND	#reset the device at the resume event
+
+# The keyboard controller; it controls the keyboard and the PS/2 mouse.
+device		atkbdc
+hint.atkbdc.0.at="isa"
+hint.atkbdc.0.port="0x060"
+
+# The AT keyboard
+device		atkbd
+hint.atkbd.0.at="atkbdc"
+hint.atkbd.0.irq="1"
+
+# Options for atkbd:
+options 	ATKBD_DFLT_KEYMAP	# specify the built-in keymap
+makeoptions	ATKBD_DFLT_KEYMAP=jp.106
+
+# `flags' for atkbd:
+#       0x01    Force detection of keyboard, else we always assume a keyboard
+#       0x02    Don't reset keyboard, useful for some newer ThinkPads
+#	0x03	Force detection and avoid reset, might help with certain
+#		dockingstations
+#       0x04    Old-style (XT) keyboard support, useful for older ThinkPads
+
+# Video card driver for VGA adapters.
+device		vga
+hint.vga.0.at="isa"
+
+# Options for vga:
+# Try the following option if the mouse pointer is not drawn correctly
+# or font does not seem to be loaded properly.  May cause flicker on
+# some systems.
+options 	VGA_ALT_SEQACCESS
+
+# If you can dispense with some vga driver features, you may want to
+# use the following options to save some memory.
+#options 	VGA_NO_FONT_LOADING	# don't save/load font
+#options 	VGA_NO_MODE_CHANGE	# don't change video modes
+
+# Older video cards may require this option for proper operation.
+options 	VGA_SLOW_IOACCESS	# do byte-wide i/o's to TS and GDC regs
+
+# The following option probably won't work with the LCD displays.
+options 	VGA_WIDTH90		# support 90 column modes
+
+# Debugging.
+options 	VGA_DEBUG
+
+# AGP debugging.
+options 	AGP_DEBUG
+
+# The following devices are not supported.
+nodevice	fdc
+nooption	FDC_DEBUG


Property changes on: trunk/sys/ia64/conf/NOTES
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+conf/DEFAULTS
\ No newline at end of property
Added: trunk/sys/ia64/disasm/disasm.h
===================================================================
--- trunk/sys/ia64/disasm/disasm.h	                        (rev 0)
+++ trunk/sys/ia64/disasm/disasm.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,329 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000-2006 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/disasm/disasm.h 172689 2007-10-16 02:49:40Z marcel $
+ */
+
+#ifndef _DISASM_H_
+#define	_DISASM_H_
+
+#ifndef _DISASM_INT_H_
+#define	ASM_ADDITIONAL_OPCODES		ASM_OP_NUMBER_OF_OPCODES
+#endif
+
+/* Application registers. */
+#define	AR_K0		0
+#define	AR_K1		1
+#define	AR_K2		2
+#define	AR_K3		3
+#define	AR_K4		4
+#define	AR_K5		5
+#define	AR_K6		6
+#define	AR_K7		7
+#define	AR_RSC		16
+#define	AR_BSP		17
+#define	AR_BSPSTORE	18
+#define	AR_RNAT		19
+#define	AR_FCR		21
+#define	AR_EFLAG	24
+#define	AR_CSD		25
+#define	AR_SSD		26
+#define	AR_CFLG		27
+#define	AR_FSR		28
+#define	AR_FIR		29
+#define	AR_FDR		30
+#define	AR_CCV		32
+#define	AR_UNAT		36
+#define	AR_FPSR		40
+#define	AR_ITC		44
+#define	AR_PFS		64
+#define	AR_LC		65
+#define	AR_EC		66
+
+/* Control registers. */
+#define	CR_DCR		0
+#define	CR_ITM		1
+#define	CR_IVA		2
+#define	CR_PTA		8
+#define	CR_IPSR		16
+#define	CR_ISR		17
+#define	CR_IIP		19
+#define	CR_IFA		20
+#define	CR_ITIR		21
+#define	CR_IIPA		22
+#define	CR_IFS		23
+#define	CR_IIM		24
+#define	CR_IHA		25
+#define	CR_LID		64
+#define	CR_IVR		65
+#define	CR_TPR		66
+#define	CR_EOI		67
+#define	CR_IRR0		68
+#define	CR_IRR1		69
+#define	CR_IRR2		70
+#define	CR_IRR3		71
+#define	CR_ITV		72
+#define	CR_PMV		73
+#define	CR_CMCV		74
+#define	CR_LRR0		80
+#define	CR_LRR1		81
+
+enum asm_cmpltr_class {
+	ASM_CC_NONE,
+	ASM_CC_ACLR,
+	ASM_CC_BSW, ASM_CC_BTYPE, ASM_CC_BWH,
+	ASM_CC_CHK, ASM_CC_CLRRRB, ASM_CC_CREL, ASM_CC_CTYPE,
+	ASM_CC_DEP, ASM_CC_DH,
+	ASM_CC_FC, ASM_CC_FCREL, ASM_CC_FCTYPE, ASM_CC_FCVT, ASM_CC_FLDTYPE,
+	ASM_CC_FMERGE, ASM_CC_FREL, ASM_CC_FSWAP,
+	ASM_CC_GETF,
+	ASM_CC_IH, ASM_CC_INVALA, ASM_CC_IPWH, ASM_CC_ITC, ASM_CC_ITR,
+	ASM_CC_LDHINT, ASM_CC_LDTYPE, ASM_CC_LFETCH, ASM_CC_LFHINT,
+	ASM_CC_LFTYPE, ASM_CC_LR,
+	ASM_CC_MF, ASM_CC_MOV, ASM_CC_MWH,
+	ASM_CC_PAVG, ASM_CC_PC, ASM_CC_PH, ASM_CC_PREL, ASM_CC_PRTYPE,
+	ASM_CC_PTC, ASM_CC_PTR, ASM_CC_PVEC,
+	ASM_CC_SAT, ASM_CC_SEM, ASM_CC_SETF, ASM_CC_SF, ASM_CC_SRLZ,
+	ASM_CC_STHINT, ASM_CC_STTYPE, ASM_CC_SYNC,
+	ASM_CC_RW,
+	ASM_CC_TREL, ASM_CC_TRUNC,
+	ASM_CC_UNIT, ASM_CC_UNPACK, ASM_CC_UNS,
+	ASM_CC_VMSW,
+	ASM_CC_XMA
+};
+
+enum asm_cmpltr_type {
+	ASM_CT_NONE,
+	ASM_CT_COND = ASM_CT_NONE,
+
+	ASM_CT_0, ASM_CT_1,
+	ASM_CT_A, ASM_CT_ACQ, ASM_CT_AND,
+	ASM_CT_B, ASM_CT_BIAS,
+	ASM_CT_C_CLR, ASM_CT_C_CLR_ACQ, ASM_CT_C_NC, ASM_CT_CALL,
+	ASM_CT_CEXIT, ASM_CT_CLOOP, ASM_CT_CLR, ASM_CT_CTOP,
+	ASM_CT_D, ASM_CT_DC_DC, ASM_CT_DC_NT, ASM_CT_DPNT, ASM_CT_DPTK,
+	ASM_CT_E, ASM_CT_EQ, ASM_CT_EXCL, ASM_CT_EXIT, ASM_CT_EXP,
+	ASM_CT_F, ASM_CT_FAULT, ASM_CT_FEW, ASM_CT_FILL, ASM_CT_FX, ASM_CT_FXU,
+	ASM_CT_G, ASM_CT_GA, ASM_CT_GE, ASM_CT_GT,
+	ASM_CT_H, ASM_CT_HU,
+	ASM_CT_I, ASM_CT_IA, ASM_CT_IMP,
+	ASM_CT_L, ASM_CT_LE, ASM_CT_LOOP, ASM_CT_LR, ASM_CT_LT, ASM_CT_LTU,
+	ASM_CT_M, ASM_CT_MANY,
+	ASM_CT_NC, ASM_CT_NE, ASM_CT_NEQ, ASM_CT_NL, ASM_CT_NLE, ASM_CT_NLT,
+	ASM_CT_NM, ASM_CT_NR, ASM_CT_NS, ASM_CT_NT_DC, ASM_CT_NT_NT,
+	ASM_CT_NT_TK, ASM_CT_NT1, ASM_CT_NT2, ASM_CT_NTA, ASM_CT_NZ,
+	ASM_CT_OR, ASM_CT_OR_ANDCM, ASM_CT_ORD,
+	ASM_CT_PR,
+	ASM_CT_R, ASM_CT_RAZ, ASM_CT_REL, ASM_CT_RET, ASM_CT_RW,
+	ASM_CT_S, ASM_CT_S0, ASM_CT_S1, ASM_CT_S2, ASM_CT_S3, ASM_CT_SA,
+	ASM_CT_SE, ASM_CT_SIG, ASM_CT_SPILL, ASM_CT_SPNT, ASM_CT_SPTK,
+	ASM_CT_SSS,
+	ASM_CT_TK_DC, ASM_CT_TK_NT, ASM_CT_TK_TK, ASM_CT_TRUNC,
+	ASM_CT_U, ASM_CT_UNC, ASM_CT_UNORD, ASM_CT_USS, ASM_CT_UUS, ASM_CT_UUU,
+	ASM_CT_W, ASM_CT_WEXIT, ASM_CT_WTOP,
+	ASM_CT_X, ASM_CT_XF,
+	ASM_CT_Z,
+};
+
+/* Completer. */
+struct asm_cmpltr {
+	enum asm_cmpltr_class	c_class;
+	enum asm_cmpltr_type	c_type;
+};
+
+/* Operand types. */
+enum asm_oper_type {
+	ASM_OPER_NONE,
+	ASM_OPER_AREG,		/* = ar# */
+	ASM_OPER_BREG,		/* = b# */
+	ASM_OPER_CPUID,		/* = cpuid[r#] */
+	ASM_OPER_CREG,		/* = cr# */
+	ASM_OPER_DBR,		/* = dbr[r#] */
+	ASM_OPER_DISP,		/* IP relative displacement. */
+	ASM_OPER_DTR,		/* = dtr[r#] */
+	ASM_OPER_FREG,		/* = f# */
+	ASM_OPER_GREG,		/* = r# */
+	ASM_OPER_IBR,		/* = ibr[r#] */
+	ASM_OPER_IMM,		/* Immediate */
+	ASM_OPER_IP,		/* = ip */
+	ASM_OPER_ITR,		/* = itr[r#] */
+	ASM_OPER_MEM,		/* = [r#] */
+	ASM_OPER_MSR,		/* = msr[r#] */
+	ASM_OPER_PKR,		/* = pkr[r#] */
+	ASM_OPER_PMC,		/* = pmc[r#] */
+	ASM_OPER_PMD,		/* = pmd[r#] */
+	ASM_OPER_PR,		/* = pr */
+	ASM_OPER_PR_ROT,	/* = pr.rot */
+	ASM_OPER_PREG,		/* = p# */
+	ASM_OPER_PSR,		/* = psr */
+	ASM_OPER_PSR_L,		/* = psr.l */
+	ASM_OPER_PSR_UM,	/* = psr.um */
+	ASM_OPER_RR		/* = rr[r#] */
+};
+
+/* Operand */
+struct asm_oper {
+	enum asm_oper_type	o_type;
+	uint64_t		o_value;
+};
+
+/* Instruction formats. */
+enum asm_fmt {
+	ASM_FMT_NONE,
+	ASM_FMT_A = 0x0100,
+	ASM_FMT_A1,  ASM_FMT_A2,  ASM_FMT_A3,  ASM_FMT_A4,
+	ASM_FMT_A5,  ASM_FMT_A6,  ASM_FMT_A7,  ASM_FMT_A8,
+	ASM_FMT_A9,  ASM_FMT_A10,
+	ASM_FMT_B = 0x0200,
+	ASM_FMT_B1,  ASM_FMT_B2,  ASM_FMT_B3,  ASM_FMT_B4,
+	ASM_FMT_B5,  ASM_FMT_B6,  ASM_FMT_B7,  ASM_FMT_B8,
+	ASM_FMT_B9,
+	ASM_FMT_F = 0x0300,
+	ASM_FMT_F1,  ASM_FMT_F2,  ASM_FMT_F3,  ASM_FMT_F4,
+	ASM_FMT_F5,  ASM_FMT_F6,  ASM_FMT_F7,  ASM_FMT_F8,
+	ASM_FMT_F9,  ASM_FMT_F10, ASM_FMT_F11, ASM_FMT_F12,
+	ASM_FMT_F13, ASM_FMT_F14, ASM_FMT_F15, ASM_FMT_F16,
+	ASM_FMT_I = 0x0400,
+	ASM_FMT_I1,  ASM_FMT_I2,  ASM_FMT_I3,  ASM_FMT_I4,
+	ASM_FMT_I5,  ASM_FMT_I6,  ASM_FMT_I7,  ASM_FMT_I8,
+	ASM_FMT_I9,  ASM_FMT_I10, ASM_FMT_I11, ASM_FMT_I12,
+	ASM_FMT_I13, ASM_FMT_I14, ASM_FMT_I15, ASM_FMT_I16,
+	ASM_FMT_I17, ASM_FMT_I18, ASM_FMT_I19, ASM_FMT_I20,
+	ASM_FMT_I21, ASM_FMT_I22, ASM_FMT_I23, ASM_FMT_I24,
+	ASM_FMT_I25, ASM_FMT_I26, ASM_FMT_I27, ASM_FMT_I28,
+	ASM_FMT_I29, ASM_FMT_I30,
+	ASM_FMT_M = 0x0500,
+	ASM_FMT_M1,  ASM_FMT_M2,  ASM_FMT_M3,  ASM_FMT_M4,
+	ASM_FMT_M5,  ASM_FMT_M6,  ASM_FMT_M7,  ASM_FMT_M8,
+	ASM_FMT_M9,  ASM_FMT_M10, ASM_FMT_M11, ASM_FMT_M12,
+	ASM_FMT_M13, ASM_FMT_M14, ASM_FMT_M15, ASM_FMT_M16,
+	ASM_FMT_M17, ASM_FMT_M18, ASM_FMT_M19, ASM_FMT_M20,
+	ASM_FMT_M21, ASM_FMT_M22, ASM_FMT_M23, ASM_FMT_M24,
+	ASM_FMT_M25, ASM_FMT_M26, ASM_FMT_M27, ASM_FMT_M28,
+	ASM_FMT_M29, ASM_FMT_M30, ASM_FMT_M31, ASM_FMT_M32,
+	ASM_FMT_M33, ASM_FMT_M34, ASM_FMT_M35, ASM_FMT_M36,
+	ASM_FMT_M37, ASM_FMT_M38, ASM_FMT_M39, ASM_FMT_M40,
+	ASM_FMT_M41, ASM_FMT_M42, ASM_FMT_M43, ASM_FMT_M44,
+	ASM_FMT_M45, ASM_FMT_M46, ASM_FMT_M47, ASM_FMT_M48,
+	ASM_FMT_X = 0x0600,
+	ASM_FMT_X1,  ASM_FMT_X2,  ASM_FMT_X3,  ASM_FMT_X4,
+	ASM_FMT_X5
+};
+
+/* Instruction opcodes. */
+enum asm_op {
+	ASM_OP_NONE,
+	ASM_OP_ADD, ASM_OP_ADDL, ASM_OP_ADDP4, ASM_OP_ADDS, ASM_OP_ALLOC,
+	ASM_OP_AND, ASM_OP_ANDCM,
+	ASM_OP_BR, ASM_OP_BREAK, ASM_OP_BRL, ASM_OP_BRP, ASM_OP_BSW,
+	ASM_OP_CHK, ASM_OP_CLRRRB, ASM_OP_CMP, ASM_OP_CMP4, ASM_OP_CMP8XCHG16,
+	ASM_OP_CMPXCHG1, ASM_OP_CMPXCHG2, ASM_OP_CMPXCHG4, ASM_OP_CMPXCHG8,
+	ASM_OP_COVER, ASM_OP_CZX1, ASM_OP_CZX2,
+	ASM_OP_DEP,
+	ASM_OP_EPC, ASM_OP_EXTR,
+	ASM_OP_FAMAX, ASM_OP_FAMIN, ASM_OP_FAND, ASM_OP_FANDCM, ASM_OP_FC,
+	ASM_OP_FCHKF, ASM_OP_FCLASS, ASM_OP_FCLRF, ASM_OP_FCMP, ASM_OP_FCVT,
+	ASM_OP_FETCHADD4, ASM_OP_FETCHADD8, ASM_OP_FLUSHRS, ASM_OP_FMA,
+	ASM_OP_FMAX, ASM_OP_FMERGE, ASM_OP_FMIN, ASM_OP_FMIX, ASM_OP_FMS,
+	ASM_OP_FNMA, ASM_OP_FOR, ASM_OP_FPACK, ASM_OP_FPAMAX, ASM_OP_FPAMIN,
+	ASM_OP_FPCMP, ASM_OP_FPCVT, ASM_OP_FPMA, ASM_OP_FPMAX, ASM_OP_FPMERGE,
+	ASM_OP_FPMIN, ASM_OP_FPMS, ASM_OP_FPNMA, ASM_OP_FPRCPA,
+	ASM_OP_FPRSQRTA, ASM_OP_FRCPA, ASM_OP_FRSQRTA, ASM_OP_FSELECT,
+	ASM_OP_FSETC, ASM_OP_FSWAP, ASM_OP_FSXT, ASM_OP_FWB, ASM_OP_FXOR,
+	ASM_OP_GETF,
+	ASM_OP_HINT,
+	ASM_OP_INVALA, ASM_OP_ITC, ASM_OP_ITR,
+	ASM_OP_LD1, ASM_OP_LD16, ASM_OP_LD2, ASM_OP_LD4, ASM_OP_LD8,
+	ASM_OP_LDF, ASM_OP_LDF8, ASM_OP_LDFD, ASM_OP_LDFE, ASM_OP_LDFP8,
+	ASM_OP_LDFPD, ASM_OP_LDFPS, ASM_OP_LDFS, ASM_OP_LFETCH, ASM_OP_LOADRS,
+	ASM_OP_MF, ASM_OP_MIX1, ASM_OP_MIX2, ASM_OP_MIX4, ASM_OP_MOV,
+	ASM_OP_MOVL, ASM_OP_MUX1, ASM_OP_MUX2,
+	ASM_OP_NOP,
+	ASM_OP_OR,
+	ASM_OP_PACK2, ASM_OP_PACK4, ASM_OP_PADD1, ASM_OP_PADD2, ASM_OP_PADD4,
+	ASM_OP_PAVG1, ASM_OP_PAVG2, ASM_OP_PAVGSUB1, ASM_OP_PAVGSUB2,
+	ASM_OP_PCMP1, ASM_OP_PCMP2, ASM_OP_PCMP4, ASM_OP_PMAX1, ASM_OP_PMAX2,
+	ASM_OP_PMIN1, ASM_OP_PMIN2, ASM_OP_PMPY2, ASM_OP_PMPYSHR2,
+	ASM_OP_POPCNT, ASM_OP_PROBE, ASM_OP_PSAD1, ASM_OP_PSHL2, ASM_OP_PSHL4,
+	ASM_OP_PSHLADD2, ASM_OP_PSHR2, ASM_OP_PSHR4, ASM_OP_PSHRADD2,
+	ASM_OP_PSUB1, ASM_OP_PSUB2, ASM_OP_PSUB4, ASM_OP_PTC, ASM_OP_PTR,
+	ASM_OP_RFI, ASM_OP_RSM, ASM_OP_RUM,
+	ASM_OP_SETF, ASM_OP_SHL, ASM_OP_SHLADD, ASM_OP_SHLADDP4, ASM_OP_SHR,
+	ASM_OP_SHRP, ASM_OP_SRLZ, ASM_OP_SSM, ASM_OP_ST1, ASM_OP_ST16,
+	ASM_OP_ST2, ASM_OP_ST4, ASM_OP_ST8, ASM_OP_STF, ASM_OP_STF8,
+	ASM_OP_STFD, ASM_OP_STFE, ASM_OP_STFS, ASM_OP_SUB, ASM_OP_SUM,
+	ASM_OP_SXT1, ASM_OP_SXT2, ASM_OP_SXT4, ASM_OP_SYNC,
+	ASM_OP_TAK, ASM_OP_TBIT, ASM_OP_TF, ASM_OP_THASH, ASM_OP_TNAT,
+	ASM_OP_TPA, ASM_OP_TTAG,
+	ASM_OP_UNPACK1, ASM_OP_UNPACK2, ASM_OP_UNPACK4,
+	ASM_OP_VMSW,
+	ASM_OP_XCHG1, ASM_OP_XCHG2, ASM_OP_XCHG4, ASM_OP_XCHG8, ASM_OP_XMA,
+	ASM_OP_XOR,
+	ASM_OP_ZXT1, ASM_OP_ZXT2, ASM_OP_ZXT4,
+	/* Additional opcodes used only internally. */
+	ASM_ADDITIONAL_OPCODES
+};
+
+/* Instruction. */
+struct asm_inst {
+	uint64_t		i_bits;
+	struct asm_oper		i_oper[7];
+	struct asm_cmpltr	i_cmpltr[5];
+	enum asm_fmt		i_format;
+	enum asm_op		i_op;
+	int			i_ncmpltrs;
+	int			i_srcidx;
+};
+
+struct asm_bundle {
+	const char		*b_templ;
+	struct asm_inst		b_inst[3];
+};
+
+/* Functional units. */
+enum asm_unit {
+	ASM_UNIT_NONE,
+	ASM_UNIT_A = 0x0100,	/* A unit. */
+	ASM_UNIT_B = 0x0200,	/* B unit. */
+	ASM_UNIT_F = 0x0300,	/* F unit. */
+	ASM_UNIT_I = 0x0400,	/* I unit. */
+	ASM_UNIT_M = 0x0500,	/* M unit. */
+	ASM_UNIT_X = 0x0600	/* X unit. */
+};
+
+#ifdef _DISASM_INT_H_
+int asm_extract(enum asm_op, enum asm_fmt, uint64_t, struct asm_bundle *, int);
+#endif
+
+int asm_decode(uint64_t, struct asm_bundle *);
+
+void asm_completer(const struct asm_cmpltr *, char *);
+void asm_mnemonic(const enum asm_op, char *);
+void asm_operand(const struct asm_oper *, char *, uint64_t);
+void asm_print_bundle(const struct asm_bundle *, uint64_t);
+void asm_print_inst(const struct asm_bundle *, int, uint64_t);
+
+#endif /* _DISASM_H_ */


Property changes on: trunk/sys/ia64/disasm/disasm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/disasm/disasm_decode.c
===================================================================
--- trunk/sys/ia64/disasm/disasm_decode.c	                        (rev 0)
+++ trunk/sys/ia64/disasm/disasm_decode.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,2566 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000-2006 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/disasm/disasm_decode.c 159916 2006-06-24 19:21:11Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <ia64/disasm/disasm_int.h>
+#include <ia64/disasm/disasm.h>
+
+/*
+ * Template names.
+ */
+static const char *asm_templname[] = {
+	"MII", "MII;", "MI;I", "MI;I;", "MLX", "MLX;", 0, 0,
+	"MMI", "MMI;", "M;MI", "M;MI;", "MFI", "MFI;", "MMF", "MMF;",
+	"MIB", "MIB;", "MBB", "MBB;", 0, 0, "BBB", "BBB;",
+	"MMB", "MMB;", 0, 0, "MFB", "MFB;", 0, 0
+};
+
+/*
+ * Decode A-unit instructions.
+ */
+static int
+asm_decodeA(uint64_t bits, struct asm_bundle *b, int slot)
+{
+	enum asm_fmt fmt;
+	enum asm_op op;
+
+	fmt = ASM_FMT_NONE, op = ASM_OP_NONE;
+	switch((int)OPCODE(bits)) {
+	case 0x8:
+		switch (FIELD(bits, 34, 2)) { /* x2a */
+		case 0x0:
+			if (FIELD(bits, 33, 1) == 0) { /* ve */
+				switch (FIELD(bits, 29, 4)) { /* x4 */
+				case 0x0:
+					if (FIELD(bits, 27, 2) <= 1) /* x2b */
+						op = ASM_OP_ADD,
+						    fmt = ASM_FMT_A1;
+					break;
+				case 0x1:
+					if (FIELD(bits, 27, 2) <= 1) /* x2b */
+						op = ASM_OP_SUB,
+						    fmt = ASM_FMT_A1;
+					break;
+				case 0x2:
+					if (FIELD(bits, 27, 2) == 0) /* x2b */
+						op = ASM_OP_ADDP4,
+						    fmt = ASM_FMT_A1;
+					break;
+				case 0x3:
+					switch (FIELD(bits, 27, 2)) { /* x2b */
+					case 0x0:
+						op = ASM_OP_AND,
+						    fmt = ASM_FMT_A1;
+						break;
+					case 0x1:
+						op = ASM_OP_ANDCM,
+						    fmt = ASM_FMT_A1;
+						break;
+					case 0x2:
+						op = ASM_OP_OR,
+						    fmt = ASM_FMT_A1;
+						break;
+					case 0x3:
+						op = ASM_OP_XOR,
+						    fmt = ASM_FMT_A1;
+						break;
+					}
+					break;
+				case 0xB:
+					switch (FIELD(bits, 27, 2)) { /* x2b */
+					case 0x0:
+						op = ASM_OP_AND,
+						    fmt = ASM_FMT_A3;
+						break;
+					case 0x1:
+						op = ASM_OP_ANDCM,
+						    fmt = ASM_FMT_A3;
+						break;
+					case 0x2:
+						op = ASM_OP_OR,
+						    fmt = ASM_FMT_A3;
+						break;
+					case 0x3:
+						op = ASM_OP_XOR,
+						    fmt = ASM_FMT_A3;
+						break;
+					}
+					break;
+				case 0x4:
+					op = ASM_OP_SHLADD, fmt = ASM_FMT_A2;
+					break;
+				case 0x6:
+					op = ASM_OP_SHLADDP4, fmt = ASM_FMT_A2;
+					break;
+				case 0x9:
+					if (FIELD(bits, 27, 2) == 1) /* x2b */
+						op = ASM_OP_SUB,
+						    fmt = ASM_FMT_A3;
+					break;
+				}
+			}
+			break;
+		case 0x1:
+			switch (FIELD(bits, 29, 8)) { /* za + x2a + zb + x4 */
+			case 0x20:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x0:
+					op = ASM_OP_PADD1_, fmt = ASM_FMT_A9;
+					break;
+				case 0x1:
+					op = ASM_OP_PADD1_SSS,
+					    fmt = ASM_FMT_A9;
+					break;
+				case 0x2:
+					op = ASM_OP_PADD1_UUU,
+					    fmt = ASM_FMT_A9;
+					break;
+				case 0x3:
+					op = ASM_OP_PADD1_UUS,
+					    fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			case 0x21:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x0:
+					op = ASM_OP_PSUB1_, fmt = ASM_FMT_A9;
+					break;
+				case 0x1:
+					op = ASM_OP_PSUB1_SSS,
+					    fmt = ASM_FMT_A9;
+					break;
+				case 0x2:
+					op = ASM_OP_PSUB1_UUU,
+					    fmt = ASM_FMT_A9;
+					break;
+				case 0x3:
+					op = ASM_OP_PSUB1_UUS,
+					    fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			case 0x22:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x2:
+					op = ASM_OP_PAVG1_, fmt = ASM_FMT_A9;
+					break;
+				case 0x3:
+					op = ASM_OP_PAVG1_RAZ,
+					    fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			case 0x23:
+				if (FIELD(bits, 27, 2) == 2) /* x2b */
+					op = ASM_OP_PAVGSUB1, fmt = ASM_FMT_A9;
+				break;
+			case 0x29:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x0:
+					op = ASM_OP_PCMP1_EQ, fmt = ASM_FMT_A9;
+					break;
+				case 0x1:
+					op = ASM_OP_PCMP1_GT, fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			case 0x30:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x0:
+					op = ASM_OP_PADD2_, fmt = ASM_FMT_A9;
+					break;
+				case 0x1:
+					op = ASM_OP_PADD2_SSS,
+					    fmt = ASM_FMT_A9;
+					break;
+				case 0x2:
+					op = ASM_OP_PADD2_UUU,
+					    fmt = ASM_FMT_A9;
+					break;
+				case 0x3:
+					op = ASM_OP_PADD2_UUS,
+					    fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			case 0x31:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x0:
+					op = ASM_OP_PSUB2_, fmt = ASM_FMT_A9;
+					break;
+				case 0x1:
+					op = ASM_OP_PSUB2_SSS,
+					    fmt = ASM_FMT_A9;
+					break;
+				case 0x2:
+					op = ASM_OP_PSUB2_UUU,
+					    fmt = ASM_FMT_A9;
+					break;
+				case 0x3:
+					op = ASM_OP_PSUB2_UUS,
+					    fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			case 0x32:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x2:
+					op = ASM_OP_PAVG2_, fmt = ASM_FMT_A9;
+					break;
+				case 0x3:
+					op = ASM_OP_PAVG2_RAZ,
+					    fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			case 0x33:
+				if (FIELD(bits, 27, 2) == 2) /* x2b */
+					op = ASM_OP_PAVGSUB2, fmt = ASM_FMT_A9;
+				break;
+			case 0x34:
+				op = ASM_OP_PSHLADD2, fmt = ASM_FMT_A10;
+				break;
+			case 0x36:
+				op = ASM_OP_PSHRADD2, fmt = ASM_FMT_A10;
+				break;
+			case 0x39:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x0:
+					op = ASM_OP_PCMP2_EQ, fmt = ASM_FMT_A9;
+					break;
+				case 0x1:
+					op = ASM_OP_PCMP2_GT, fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			case 0xA0:
+				if (FIELD(bits, 27, 2) == 0) /* x2b */
+					op = ASM_OP_PADD4, fmt = ASM_FMT_A9;
+				break;
+			case 0xA1:
+				if (FIELD(bits, 27, 2) == 0) /* x2b */
+					op = ASM_OP_PSUB4, fmt = ASM_FMT_A9;
+				break;
+			case 0xA9:
+				switch (FIELD(bits, 27, 2)) { /* x2b */
+				case 0x0:
+					op = ASM_OP_PCMP4_EQ, fmt = ASM_FMT_A9;
+					break;
+				case 0x1:
+					op = ASM_OP_PCMP4_GT, fmt = ASM_FMT_A9;
+					break;
+				}
+				break;
+			}
+			break;
+		case 0x2:
+			if (FIELD(bits, 33, 1) == 0) /* ve */
+				op = ASM_OP_ADDS, fmt = ASM_FMT_A4;
+			break;
+		case 0x3:
+			if (FIELD(bits, 33, 1) == 0) /* ve */
+				op = ASM_OP_ADDP4, fmt = ASM_FMT_A4;
+			break;
+		}
+		break;
+	case 0x9:
+		op = ASM_OP_ADDL, fmt = ASM_FMT_A5;
+		break;
+	case 0xC: case 0xD: case 0xE:
+		if (FIELD(bits, 12, 1) == 0) { /* c */
+			switch (FIELD(bits, 33, 8)) { /* maj + tb + x2 + ta */
+			case 0xC0:
+				op = ASM_OP_CMP_LT, fmt = ASM_FMT_A6;
+				break;
+			case 0xC1:
+				op = ASM_OP_CMP_EQ_AND, fmt = ASM_FMT_A6;
+				break;
+			case 0xC2:
+				op = ASM_OP_CMP4_LT, fmt = ASM_FMT_A6;
+				break;
+			case 0xC3:
+				op = ASM_OP_CMP4_EQ_AND, fmt = ASM_FMT_A6;
+				break;
+			case 0xC4: case 0xCC:
+				op = ASM_OP_CMP_LT, fmt = ASM_FMT_A8;
+				break;
+			case 0xC5: case 0xCD:
+				op = ASM_OP_CMP_EQ_AND, fmt = ASM_FMT_A8;
+				break;
+			case 0xC6: case 0xCE:
+				op = ASM_OP_CMP4_LT, fmt = ASM_FMT_A8;
+				break;
+			case 0xC7: case 0xCF:
+				op = ASM_OP_CMP4_EQ_AND, fmt = ASM_FMT_A8;
+				break;
+			case 0xC8:
+				op = ASM_OP_CMP_GT_AND, fmt = ASM_FMT_A7;
+				break;
+			case 0xC9:
+				op = ASM_OP_CMP_GE_AND, fmt = ASM_FMT_A7;
+				break;
+			case 0xCA:
+				op = ASM_OP_CMP4_GT_AND, fmt = ASM_FMT_A7;
+				break;
+			case 0xCB:
+				op = ASM_OP_CMP4_GE_AND, fmt = ASM_FMT_A7;
+				break;
+			case 0xD0:
+				op = ASM_OP_CMP_LTU, fmt = ASM_FMT_A6;
+				break;
+			case 0xD1:
+				op = ASM_OP_CMP_EQ_OR, fmt = ASM_FMT_A6;
+				break;
+			case 0xD2:
+				op = ASM_OP_CMP4_LTU, fmt = ASM_FMT_A6;
+				break;
+			case 0xD3:
+				op = ASM_OP_CMP4_EQ_OR, fmt = ASM_FMT_A6;
+				break;
+			case 0xD4: case 0xDC:
+				op = ASM_OP_CMP_LTU, fmt = ASM_FMT_A8;
+				break;
+			case 0xD5: case 0xDD:
+				op = ASM_OP_CMP_EQ_OR, fmt = ASM_FMT_A8;
+				break;
+			case 0xD6: case 0xDE:
+				op = ASM_OP_CMP4_LTU, fmt = ASM_FMT_A8;
+				break;
+			case 0xD7: case 0xDF:
+				op = ASM_OP_CMP4_EQ_OR, fmt = ASM_FMT_A8;
+				break;
+			case 0xD8:
+				op = ASM_OP_CMP_GT_OR, fmt = ASM_FMT_A7;
+				break;
+			case 0xD9:
+				op = ASM_OP_CMP_GE_OR, fmt = ASM_FMT_A7;
+				break;
+			case 0xDA:
+				op = ASM_OP_CMP4_GT_OR, fmt = ASM_FMT_A7;
+				break;
+			case 0xDB:
+				op = ASM_OP_CMP4_GE_OR, fmt = ASM_FMT_A7;
+				break;
+			case 0xE0:
+				op = ASM_OP_CMP_EQ, fmt = ASM_FMT_A6;
+				break;
+			case 0xE1:
+				op = ASM_OP_CMP_EQ_OR_ANDCM, fmt = ASM_FMT_A6;
+				break;
+			case 0xE2:
+				op = ASM_OP_CMP4_EQ, fmt = ASM_FMT_A6;
+				break;
+			case 0xE3:
+				op = ASM_OP_CMP4_EQ_OR_ANDCM, fmt = ASM_FMT_A6;
+				break;
+			case 0xE4: case 0xEC:
+				op = ASM_OP_CMP_EQ, fmt = ASM_FMT_A8;
+				break;
+			case 0xE5: case 0xED:
+				op = ASM_OP_CMP_EQ_OR_ANDCM, fmt = ASM_FMT_A8;
+				break;
+			case 0xE6: case 0xEE:
+				op = ASM_OP_CMP4_EQ, fmt = ASM_FMT_A8;
+				break;
+			case 0xE7: case 0xEF:
+				op = ASM_OP_CMP4_EQ_OR_ANDCM, fmt = ASM_FMT_A8;
+				break;
+			case 0xE8:
+				op = ASM_OP_CMP_GT_OR_ANDCM, fmt = ASM_FMT_A7;
+				break;
+			case 0xE9:
+				op = ASM_OP_CMP_GE_OR_ANDCM, fmt = ASM_FMT_A7;
+				break;
+			case 0xEA:
+				op = ASM_OP_CMP4_GT_OR_ANDCM, fmt = ASM_FMT_A7;
+				break;
+			case 0xEB:
+				op = ASM_OP_CMP4_GE_OR_ANDCM, fmt = ASM_FMT_A7;
+				break;
+			}
+		} else {
+			switch (FIELD(bits, 33, 8)) { /* maj + tb + x2 + ta */
+			case 0xC0:
+				op = ASM_OP_CMP_LT_UNC, fmt = ASM_FMT_A6;
+				break;
+			case 0xC1:
+				op = ASM_OP_CMP_NE_AND, fmt = ASM_FMT_A6;
+				break;
+			case 0xC2:
+				op = ASM_OP_CMP4_LT_UNC, fmt = ASM_FMT_A6;
+				break;
+			case 0xC3:
+				op = ASM_OP_CMP4_NE_AND, fmt = ASM_FMT_A6;
+				break;
+			case 0xC4: case 0xCC:
+				op = ASM_OP_CMP_LT_UNC, fmt = ASM_FMT_A8;
+				break;
+			case 0xC5: case 0xCD:
+				op = ASM_OP_CMP_NE_AND, fmt = ASM_FMT_A8;
+				break;
+			case 0xC6: case 0xCE:
+				op = ASM_OP_CMP4_LT_UNC, fmt = ASM_FMT_A8;
+				break;
+			case 0xC7: case 0xCF:
+				op = ASM_OP_CMP4_NE_AND, fmt = ASM_FMT_A8;
+				break;
+			case 0xC8:
+				op = ASM_OP_CMP_LE_AND, fmt = ASM_FMT_A7;
+				break;
+			case 0xC9:
+				op = ASM_OP_CMP_LT_AND, fmt = ASM_FMT_A7;
+				break;
+			case 0xCA:
+				op = ASM_OP_CMP4_LE_AND, fmt = ASM_FMT_A7;
+				break;
+			case 0xCB:
+				op = ASM_OP_CMP4_LT_AND, fmt = ASM_FMT_A7;
+				break;
+			case 0xD0:
+				op = ASM_OP_CMP_LTU_UNC, fmt = ASM_FMT_A6;
+				break;
+			case 0xD1:
+				op = ASM_OP_CMP_NE_OR, fmt = ASM_FMT_A6;
+				break;
+			case 0xD2:
+				op = ASM_OP_CMP4_LTU_UNC, fmt = ASM_FMT_A6;
+				break;
+			case 0xD3:
+				op = ASM_OP_CMP4_NE_OR, fmt = ASM_FMT_A6;
+				break;
+			case 0xD4: case 0xDC:
+				op = ASM_OP_CMP_LTU_UNC, fmt = ASM_FMT_A8;
+				break;
+			case 0xD5: case 0xDD:
+				op = ASM_OP_CMP_NE_OR, fmt = ASM_FMT_A8;
+				break;
+			case 0xD6: case 0xDE:
+				op = ASM_OP_CMP4_LTU_UNC, fmt = ASM_FMT_A8;
+				break;
+			case 0xD7: case 0xDF:
+				op = ASM_OP_CMP4_NE_OR, fmt = ASM_FMT_A8;
+				break;
+			case 0xD8:
+				op = ASM_OP_CMP_LE_OR, fmt = ASM_FMT_A7;
+				break;
+			case 0xD9:
+				op = ASM_OP_CMP_LT_OR, fmt = ASM_FMT_A7;
+				break;
+			case 0xDA:
+				op = ASM_OP_CMP4_LE_OR, fmt = ASM_FMT_A7;
+				break;
+			case 0xDB:
+				op = ASM_OP_CMP4_LT_OR, fmt = ASM_FMT_A7;
+				break;
+			case 0xE0:
+				op = ASM_OP_CMP_EQ_UNC, fmt = ASM_FMT_A6;
+				break;
+			case 0xE1:
+				op = ASM_OP_CMP_NE_OR_ANDCM, fmt = ASM_FMT_A6;
+				break;
+			case 0xE2:
+				op = ASM_OP_CMP4_EQ_UNC, fmt = ASM_FMT_A6;
+				break;
+			case 0xE3:
+				op = ASM_OP_CMP4_NE_OR_ANDCM, fmt = ASM_FMT_A6;
+				break;
+			case 0xE4: case 0xEC:
+				op = ASM_OP_CMP_EQ_UNC, fmt = ASM_FMT_A8;
+				break;
+			case 0xE5: case 0xED:
+				op = ASM_OP_CMP_NE_OR_ANDCM, fmt = ASM_FMT_A8;
+				break;
+			case 0xE6: case 0xEE:
+				op = ASM_OP_CMP4_EQ_UNC, fmt = ASM_FMT_A8;
+				break;
+			case 0xE7: case 0xEF:
+				op = ASM_OP_CMP4_NE_OR_ANDCM, fmt = ASM_FMT_A8;
+				break;
+			case 0xE8:
+				op = ASM_OP_CMP_LE_OR_ANDCM, fmt = ASM_FMT_A7;
+				break;
+			case 0xE9:
+				op = ASM_OP_CMP_LT_OR_ANDCM, fmt = ASM_FMT_A7;
+				break;
+			case 0xEA:
+				op = ASM_OP_CMP4_LE_OR_ANDCM, fmt = ASM_FMT_A7;
+				break;
+			case 0xEB:
+				op = ASM_OP_CMP4_LT_OR_ANDCM, fmt = ASM_FMT_A7;
+				break;
+			}
+		}
+		break;
+	}
+
+	if (op != ASM_OP_NONE)
+		return (asm_extract(op, fmt, bits, b, slot));
+	return (0);
+}
+
+/*
+ * Decode B-unit instructions.
+ */
+static int
+asm_decodeB(uint64_t ip, struct asm_bundle *b, int slot)
+{
+	uint64_t bits;
+	enum asm_fmt fmt;
+	enum asm_op op;
+
+	bits = SLOT(ip, slot);
+	fmt = ASM_FMT_NONE, op = ASM_OP_NONE;
+
+	switch((int)OPCODE(bits)) {
+	case 0x0:
+		switch (FIELD(bits, 27, 6)) { /* x6 */
+		case 0x0:
+			op = ASM_OP_BREAK_B, fmt = ASM_FMT_B9;
+			break;
+		case 0x2:
+			op = ASM_OP_COVER, fmt = ASM_FMT_B8;
+			break;
+		case 0x4:
+			op = ASM_OP_CLRRRB_, fmt = ASM_FMT_B8;
+			break;
+		case 0x5:
+			op = ASM_OP_CLRRRB_PR, fmt = ASM_FMT_B8;
+			break;
+		case 0x8:
+			op = ASM_OP_RFI, fmt = ASM_FMT_B8;
+			break;
+		case 0xC:
+			op = ASM_OP_BSW_0, fmt = ASM_FMT_B8;
+			break;
+		case 0xD:
+			op = ASM_OP_BSW_1, fmt = ASM_FMT_B8;
+			break;
+		case 0x10:
+			op = ASM_OP_EPC, fmt = ASM_FMT_B8;
+			break;
+		case 0x18:
+			op = ASM_OP_VMSW_0, fmt = ASM_FMT_B8;
+			break;
+		case 0x19:
+			op = ASM_OP_VMSW_1, fmt = ASM_FMT_B8;
+			break;
+		case 0x20:
+			switch (FIELD(bits, 6, 3)) { /* btype */
+			case 0x0:
+				op = ASM_OP_BR_COND, fmt = ASM_FMT_B4;
+				break;
+			case 0x1:
+				op = ASM_OP_BR_IA, fmt = ASM_FMT_B4;
+				break;
+			}
+			break;
+		case 0x21:
+			if (FIELD(bits, 6, 3) == 4) /* btype */
+				op = ASM_OP_BR_RET, fmt = ASM_FMT_B4;
+			break;
+		}
+		break;
+	case 0x1:
+		op = ASM_OP_BR_CALL, fmt = ASM_FMT_B5;
+		break;
+	case 0x2:
+		switch (FIELD(bits, 27, 6)) { /* x6 */
+		case 0x0:
+			op = ASM_OP_NOP_B, fmt = ASM_FMT_B9;
+			break;
+		case 0x1:
+			op = ASM_OP_HINT_B, fmt = ASM_FMT_B9;
+			break;
+		case 0x10:
+			op = ASM_OP_BRP_, fmt = ASM_FMT_B7;
+			break;
+		case 0x11:
+			op = ASM_OP_BRP_RET, fmt = ASM_FMT_B7;
+			break;
+		}
+		break;
+	case 0x4:
+		switch (FIELD(bits, 6, 3)) { /* btype */
+		case 0x0:
+			op = ASM_OP_BR_COND, fmt = ASM_FMT_B1;
+			break;
+		case 0x2:
+			op = ASM_OP_BR_WEXIT, fmt = ASM_FMT_B1;
+			break;
+		case 0x3:
+			op = ASM_OP_BR_WTOP, fmt = ASM_FMT_B1;
+			break;
+		case 0x5:
+			op = ASM_OP_BR_CLOOP, fmt = ASM_FMT_B2;
+			break;
+		case 0x6:
+			op = ASM_OP_BR_CEXIT, fmt = ASM_FMT_B2;
+			break;
+		case 0x7:
+			op = ASM_OP_BR_CTOP, fmt = ASM_FMT_B2;
+			break;
+		}
+		break;
+	case 0x5:
+		op = ASM_OP_BR_CALL, fmt = ASM_FMT_B3;
+		break;
+	case 0x7:
+		op = ASM_OP_BRP_, fmt = ASM_FMT_B6;
+		break;
+	}
+
+	if (op != ASM_OP_NONE)
+		return (asm_extract(op, fmt, bits, b, slot));
+	return (0);
+}
+
+/*
+ * Decode F-unit instructions.
+ */
+static int
+asm_decodeF(uint64_t ip, struct asm_bundle *b, int slot)
+{
+	uint64_t bits;
+	enum asm_fmt fmt;
+	enum asm_op op;
+
+	bits = SLOT(ip, slot);
+	fmt = ASM_FMT_NONE, op = ASM_OP_NONE;
+
+	switch((int)OPCODE(bits)) {
+	case 0x0:
+		if (FIELD(bits, 33, 1) == 0) { /* x */
+			switch (FIELD(bits, 27, 6)) { /* x6 */
+			case 0x0:
+				op = ASM_OP_BREAK_F, fmt = ASM_FMT_F15;
+				break;
+			case 0x1:
+				if (FIELD(bits, 26, 1) == 0) /* y */
+					op = ASM_OP_NOP_F, fmt = ASM_FMT_F16;
+				else  
+					op = ASM_OP_HINT_F, fmt = ASM_FMT_F16;
+				break;
+			case 0x4:
+				op = ASM_OP_FSETC, fmt = ASM_FMT_F12;
+				break;
+			case 0x5:
+				op = ASM_OP_FCLRF, fmt = ASM_FMT_F13;
+				break;
+			case 0x8:
+				op = ASM_OP_FCHKF, fmt = ASM_FMT_F14;
+				break;
+			case 0x10:
+				op = ASM_OP_FMERGE_S, fmt = ASM_FMT_F9;
+				break;
+			case 0x11:
+				op = ASM_OP_FMERGE_NS, fmt = ASM_FMT_F9;
+				break;
+			case 0x12:
+				op = ASM_OP_FMERGE_SE, fmt = ASM_FMT_F9;
+				break;
+			case 0x14:
+				op = ASM_OP_FMIN, fmt = ASM_FMT_F8;
+				break;
+			case 0x15:
+				op = ASM_OP_FMAX, fmt = ASM_FMT_F8;
+				break;
+			case 0x16:
+				op = ASM_OP_FAMIN, fmt = ASM_FMT_F8;
+				break;
+			case 0x17:
+				op = ASM_OP_FAMAX, fmt = ASM_FMT_F8;
+				break;
+			case 0x18:
+				op = ASM_OP_FCVT_FX, fmt = ASM_FMT_F10;
+				break;
+			case 0x19:
+				op = ASM_OP_FCVT_FXU, fmt = ASM_FMT_F10;
+				break;
+			case 0x1A:
+				op = ASM_OP_FCVT_FX_TRUNC, fmt = ASM_FMT_F10;
+				break;
+			case 0x1B:
+				op = ASM_OP_FCVT_FXU_TRUNC, fmt = ASM_FMT_F10;
+				break;
+			case 0x1C:
+				op = ASM_OP_FCVT_XF, fmt = ASM_FMT_F11;
+				break;
+			case 0x28:
+				op = ASM_OP_FPACK, fmt = ASM_FMT_F9;
+				break;
+			case 0x2C:
+				op = ASM_OP_FAND, fmt = ASM_FMT_F9;
+				break;
+			case 0x2D:
+				op = ASM_OP_FANDCM, fmt = ASM_FMT_F9;
+				break;
+			case 0x2E:
+				op = ASM_OP_FOR, fmt = ASM_FMT_F9;
+				break;
+			case 0x2F:
+				op = ASM_OP_FXOR, fmt = ASM_FMT_F9;
+				break;
+			case 0x34:
+				op = ASM_OP_FSWAP_, fmt = ASM_FMT_F9;
+				break;
+			case 0x35:
+				op = ASM_OP_FSWAP_NL, fmt = ASM_FMT_F9;
+				break;
+			case 0x36:
+				op = ASM_OP_FSWAP_NR, fmt = ASM_FMT_F9;
+				break;
+			case 0x39:
+				op = ASM_OP_FMIX_LR, fmt = ASM_FMT_F9;
+				break;
+			case 0x3A:
+				op = ASM_OP_FMIX_R, fmt = ASM_FMT_F9;
+				break;
+			case 0x3B:
+				op = ASM_OP_FMIX_L, fmt = ASM_FMT_F9;
+				break;
+			case 0x3C:
+				op = ASM_OP_FSXT_R, fmt = ASM_FMT_F9;
+				break;
+			case 0x3D:
+				op = ASM_OP_FSXT_L, fmt = ASM_FMT_F9;
+				break;
+			}
+		} else {
+			if (FIELD(bits, 36, 1) == 0) /* q */
+				op = ASM_OP_FRCPA, fmt = ASM_FMT_F6;
+			else
+				op = ASM_OP_FRSQRTA, fmt = ASM_FMT_F7;
+		}
+		break;
+	case 0x1:
+		if (FIELD(bits, 33, 1) == 0) { /* x */
+			switch (FIELD(bits, 27, 6)) { /* x6 */
+			case 0x10:
+				op = ASM_OP_FPMERGE_S, fmt = ASM_FMT_F9;
+				break;
+			case 0x11:
+				op = ASM_OP_FPMERGE_NS, fmt = ASM_FMT_F9;
+				break;
+			case 0x12:
+				op = ASM_OP_FPMERGE_SE, fmt = ASM_FMT_F9;
+				break;
+			case 0x14:
+				op = ASM_OP_FPMIN, fmt = ASM_FMT_F8;
+				break;
+			case 0x15:
+				op = ASM_OP_FPMAX, fmt = ASM_FMT_F8;
+				break;
+			case 0x16:
+				op = ASM_OP_FPAMIN, fmt = ASM_FMT_F8;
+				break;
+			case 0x17:
+				op = ASM_OP_FPAMAX, fmt = ASM_FMT_F8;
+				break;
+			case 0x18:
+				op = ASM_OP_FPCVT_FX, fmt = ASM_FMT_F10;
+				break;
+			case 0x19:
+				op = ASM_OP_FPCVT_FXU, fmt = ASM_FMT_F10;
+				break;
+			case 0x1A:
+				op = ASM_OP_FPCVT_FX_TRUNC, fmt = ASM_FMT_F10;
+				break;
+			case 0x1B:
+				op = ASM_OP_FPCVT_FXU_TRUNC, fmt = ASM_FMT_F10;
+				break;
+			case 0x30:
+				op = ASM_OP_FPCMP_EQ, fmt = ASM_FMT_F8;
+				break;
+			case 0x31:
+				op = ASM_OP_FPCMP_LT, fmt = ASM_FMT_F8;
+				break;
+			case 0x32:
+				op = ASM_OP_FPCMP_LE, fmt = ASM_FMT_F8;
+				break;
+			case 0x33:
+				op = ASM_OP_FPCMP_UNORD, fmt = ASM_FMT_F8;
+				break;
+			case 0x34:
+				op = ASM_OP_FPCMP_NEQ, fmt = ASM_FMT_F8;
+				break;
+			case 0x35:
+				op = ASM_OP_FPCMP_NLT, fmt = ASM_FMT_F8;
+				break;
+			case 0x36:
+				op = ASM_OP_FPCMP_NLE, fmt = ASM_FMT_F8;
+				break;
+			case 0x37:
+				op = ASM_OP_FPCMP_ORD, fmt = ASM_FMT_F8;
+				break;
+			}
+		} else {
+			if (FIELD(bits, 36, 1) == 0) /* q */
+				op = ASM_OP_FPRCPA, fmt = ASM_FMT_F6;
+			else
+				op = ASM_OP_FPRSQRTA, fmt = ASM_FMT_F7;
+		}
+		break;
+	case 0x4:
+		op = ASM_OP_FCMP, fmt = ASM_FMT_F4;
+		break;
+	case 0x5:
+		op = ASM_OP_FCLASS_M, fmt = ASM_FMT_F5;
+		break;
+	case 0x8:
+		if (FIELD(bits, 36, 1) == 0) /* x */
+			op = ASM_OP_FMA_, fmt = ASM_FMT_F1;
+		else
+			op = ASM_OP_FMA_S, fmt = ASM_FMT_F1;
+		break;
+	case 0x9:
+		if (FIELD(bits, 36, 1) == 0) /* x */
+			op = ASM_OP_FMA_D, fmt = ASM_FMT_F1;
+		else
+			op = ASM_OP_FPMA, fmt = ASM_FMT_F1;
+		break;
+	case 0xA:
+		if (FIELD(bits, 36, 1) == 0) /* x */
+			op = ASM_OP_FMS_, fmt = ASM_FMT_F1;
+		else
+			op = ASM_OP_FMS_S, fmt = ASM_FMT_F1;
+		break;
+	case 0xB:
+		if (FIELD(bits, 36, 1) == 0) /* x */
+			op = ASM_OP_FMS_D, fmt = ASM_FMT_F1;
+		else
+			op = ASM_OP_FPMS, fmt = ASM_FMT_F1;
+		break;
+	case 0xC:
+		if (FIELD(bits, 36, 1) == 0) /* x */
+			op = ASM_OP_FNMA_, fmt = ASM_FMT_F1;
+		else
+			op = ASM_OP_FNMA_S, fmt = ASM_FMT_F1;
+		break;
+	case 0xD:
+		if (FIELD(bits, 36, 1) == 0) /* x */
+			op = ASM_OP_FNMA_D, fmt = ASM_FMT_F1;
+		else
+			op = ASM_OP_FPNMA, fmt = ASM_FMT_F1;
+		break;
+	case 0xE:
+		if (FIELD(bits, 36, 1) == 1) { /* x */
+			switch (FIELD(bits, 34, 2)) { /* x2 */
+			case 0x0:
+				op = ASM_OP_XMA_L, fmt = ASM_FMT_F2;
+				break;
+			case 0x2:
+				op = ASM_OP_XMA_HU, fmt = ASM_FMT_F2;
+				break;
+			case 0x3:
+				op = ASM_OP_XMA_H, fmt = ASM_FMT_F2;
+				break;
+			}
+		} else
+			op = ASM_OP_FSELECT, fmt = ASM_FMT_F3;
+		break;
+	}
+
+	if (op != ASM_OP_NONE)
+		return (asm_extract(op, fmt, bits, b, slot));
+	return (0);
+}
+
+/*
+ * Decode I-unit instructions.
+ */
+static int
+asm_decodeI(uint64_t ip, struct asm_bundle *b, int slot)
+{
+	uint64_t bits;
+	enum asm_fmt fmt;
+	enum asm_op op;
+
+	bits = SLOT(ip, slot);
+	if ((int)OPCODE(bits) >= 8)
+		return (asm_decodeA(bits, b, slot));
+	fmt = ASM_FMT_NONE, op = ASM_OP_NONE;
+
+	switch((int)OPCODE(bits)) {
+	case 0x0:
+		switch (FIELD(bits, 33, 3)) { /* x3 */
+		case 0x0:
+			switch (FIELD(bits, 27, 6)) { /* x6 */
+			case 0x0:
+				op = ASM_OP_BREAK_I, fmt = ASM_FMT_I19;
+				break;
+			case 0x1:
+				if (FIELD(bits, 26, 1) == 0) /* y */
+					op = ASM_OP_NOP_I, fmt = ASM_FMT_I18;
+				else
+					op = ASM_OP_HINT_I, fmt = ASM_FMT_I18;
+				break;
+			case 0xA:
+				op = ASM_OP_MOV_I, fmt = ASM_FMT_I27;
+				break;
+			case 0x10:
+				op = ASM_OP_ZXT1, fmt = ASM_FMT_I29;
+				break;
+			case 0x11:
+				op = ASM_OP_ZXT2, fmt = ASM_FMT_I29;
+				break;
+			case 0x12:
+				op = ASM_OP_ZXT4, fmt = ASM_FMT_I29;
+				break;
+			case 0x14:
+				op = ASM_OP_SXT1, fmt = ASM_FMT_I29;
+				break;
+			case 0x15:
+				op = ASM_OP_SXT2, fmt = ASM_FMT_I29;
+				break;
+			case 0x16:
+				op = ASM_OP_SXT4, fmt = ASM_FMT_I29;
+				break;
+			case 0x18:
+				op = ASM_OP_CZX1_L, fmt = ASM_FMT_I29;
+				break;
+			case 0x19:
+				op = ASM_OP_CZX2_L, fmt = ASM_FMT_I29;
+				break;
+			case 0x1C:
+				op = ASM_OP_CZX1_R, fmt = ASM_FMT_I29;
+				break;
+			case 0x1D:
+				op = ASM_OP_CZX2_R, fmt = ASM_FMT_I29;
+				break;
+			case 0x2A:
+				op = ASM_OP_MOV_I, fmt = ASM_FMT_I26;
+				break;
+			case 0x30:
+				op = ASM_OP_MOV_IP, fmt = ASM_FMT_I25;
+				break;
+			case 0x31:
+				op = ASM_OP_MOV_, fmt = ASM_FMT_I22;
+				break;
+			case 0x32:
+				op = ASM_OP_MOV_I, fmt = ASM_FMT_I28;
+				break;
+			case 0x33:
+				op = ASM_OP_MOV_PR, fmt = ASM_FMT_I25;
+				break;
+			}
+			break;
+		case 0x1:
+			op = ASM_OP_CHK_S_I, fmt = ASM_FMT_I20;
+			break;
+		case 0x2:
+			op = ASM_OP_MOV_, fmt = ASM_FMT_I24;
+			break;
+		case 0x3:
+			op = ASM_OP_MOV_, fmt = ASM_FMT_I23;
+			break;
+		case 0x7:
+			if (FIELD(bits, 22, 1) == 0) /* x */
+				op = ASM_OP_MOV_, fmt = ASM_FMT_I21;
+			else
+				op = ASM_OP_MOV_RET, fmt = ASM_FMT_I21;
+			break;
+		}
+		break;
+	case 0x4:
+		op = ASM_OP_DEP_, fmt = ASM_FMT_I15;
+		break;
+	case 0x5:
+		switch (FIELD(bits, 33, 3)) { /* x + x2 */
+		case 0x0:
+			if (FIELD(bits, 36, 1) == 0) { /* tb */
+				switch (FIELD(bits, 12, 2)) { /* c + y */
+				case 0x0:
+					op = ASM_OP_TBIT_Z, fmt = ASM_FMT_I16;
+					break;
+				case 0x1:
+					op = ASM_OP_TBIT_Z_UNC,
+					    fmt = ASM_FMT_I16;
+					break;
+				case 0x2:
+					if (FIELD(bits, 19, 1) == 0) /* x */
+						op = ASM_OP_TNAT_Z,
+						    fmt = ASM_FMT_I17;
+					else
+						op = ASM_OP_TF_Z,
+						    fmt = ASM_FMT_I30;
+					break;
+				case 0x3:
+					if (FIELD(bits, 19, 1) == 0) /* x */
+						op = ASM_OP_TNAT_Z_UNC,
+						    fmt = ASM_FMT_I17;
+					else
+						op = ASM_OP_TF_Z_UNC,
+						    fmt = ASM_FMT_I30;
+					break;
+				}
+			} else {
+				switch (FIELD(bits, 12, 2)) { /* c + y */
+				case 0x0:
+					op = ASM_OP_TBIT_Z_AND,
+					    fmt = ASM_FMT_I16;
+					break;
+				case 0x1:
+					op = ASM_OP_TBIT_NZ_AND,
+					    fmt = ASM_FMT_I16;
+					break;
+				case 0x2:
+					if (FIELD(bits, 19, 1) == 0) /* x */
+						op = ASM_OP_TNAT_Z_AND,
+						    fmt = ASM_FMT_I17;
+					else
+						op = ASM_OP_TF_Z_AND,
+						    fmt = ASM_FMT_I30;
+					break;
+				case 0x3:
+					if (FIELD(bits, 19, 1) == 0) /* x */
+						op = ASM_OP_TNAT_NZ_AND,
+						    fmt = ASM_FMT_I17;
+					else
+						op = ASM_OP_TF_NZ_AND,
+						    fmt = ASM_FMT_I30;
+					break;
+				}
+			}
+			break;
+		case 0x1:
+			if (FIELD(bits, 36, 1) == 0) { /* tb */
+				switch (FIELD(bits, 12, 2)) { /* c + y */
+				case 0x0:
+					op = ASM_OP_TBIT_Z_OR,
+					    fmt = ASM_FMT_I16;
+					break;
+				case 0x1:
+					op = ASM_OP_TBIT_NZ_OR,
+					    fmt = ASM_FMT_I16;
+					break;
+				case 0x2:
+					if (FIELD(bits, 19, 1) == 0) /* x */
+						op = ASM_OP_TNAT_Z_OR,
+						    fmt = ASM_FMT_I17;
+					else
+						op = ASM_OP_TF_Z_OR,
+						    fmt = ASM_FMT_I30;
+					break;
+				case 0x3:
+					if (FIELD(bits, 19, 1) == 0) /* x */
+						op = ASM_OP_TNAT_NZ_OR,
+						    fmt = ASM_FMT_I17;
+					else
+						op = ASM_OP_TF_NZ_OR,
+						    fmt = ASM_FMT_I30;
+					break;
+				}
+			} else {
+				switch (FIELD(bits, 12, 2)) { /* c + y */
+				case 0x0:
+					op = ASM_OP_TBIT_Z_OR_ANDCM,
+					    fmt = ASM_FMT_I16;
+					break;
+				case 0x1:
+					op = ASM_OP_TBIT_NZ_OR_ANDCM,
+					    fmt = ASM_FMT_I16;
+					break;
+				case 0x2:
+					if (FIELD(bits, 19, 1) == 0) /* x */
+						op = ASM_OP_TNAT_Z_OR_ANDCM,
+						    fmt = ASM_FMT_I17;
+					else
+						op = ASM_OP_TF_Z_OR_ANDCM,
+						    fmt = ASM_FMT_I30;
+					break;
+				case 0x3:
+					if (FIELD(bits, 19, 1) == 0) /* x */
+						op = ASM_OP_TNAT_NZ_OR_ANDCM,
+						    fmt = ASM_FMT_I17;
+					else
+						op = ASM_OP_TF_NZ_OR_ANDCM,
+						    fmt = ASM_FMT_I30;
+					break;
+				}
+			}
+			break;
+		case 0x2:
+			op = ASM_OP_EXTR, fmt = ASM_FMT_I11;
+			break;
+		case 0x3:
+			if (FIELD(bits, 26, 1) == 0) /* y */
+				op = ASM_OP_DEP_Z, fmt = ASM_FMT_I12;
+			else
+				op = ASM_OP_DEP_Z, fmt = ASM_FMT_I13;
+			break;
+		case 0x6:
+			op = ASM_OP_SHRP, fmt = ASM_FMT_I10;
+			break;
+		case 0x7:
+			op = ASM_OP_DEP_, fmt = ASM_FMT_I14;
+			break;
+		}
+		break;
+	case 0x7:
+		switch (FIELD(bits, 32, 5)) { /* ve + zb + x2a + za */
+		case 0x2:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x0:
+				op = ASM_OP_PSHR2_U, fmt = ASM_FMT_I5;
+				break;
+			case 0x1: case 0x5: case 0x9: case 0xD:
+				op = ASM_OP_PMPYSHR2_U, fmt = ASM_FMT_I1;
+				break;
+			case 0x2:
+				op = ASM_OP_PSHR2_, fmt = ASM_FMT_I5;
+				break;
+			case 0x3: case 0x7: case 0xB: case 0xF:
+				op = ASM_OP_PMPYSHR2_, fmt = ASM_FMT_I1;
+				break;
+			case 0x4:
+				op = ASM_OP_PSHL2, fmt = ASM_FMT_I7;
+				break;
+			}
+			break;
+		case 0x6:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x1:
+				op = ASM_OP_PSHR2_U, fmt = ASM_FMT_I6;
+				break;
+			case 0x3:
+				op = ASM_OP_PSHR2_, fmt = ASM_FMT_I6;
+				break;
+			case 0x9:
+				op = ASM_OP_POPCNT, fmt = ASM_FMT_I9;
+				break;
+			}
+			break;
+		case 0x8:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x1:
+				op = ASM_OP_PMIN1_U, fmt = ASM_FMT_I2;
+				break;
+			case 0x4:
+				op = ASM_OP_UNPACK1_H, fmt = ASM_FMT_I2;
+				break;
+			case 0x5:
+				op = ASM_OP_PMAX1_U, fmt = ASM_FMT_I2;
+				break;
+			case 0x6:
+				op = ASM_OP_UNPACK1_L, fmt = ASM_FMT_I2;
+				break;
+			case 0x8:
+				op = ASM_OP_MIX1_R, fmt = ASM_FMT_I2;
+				break;
+			case 0xA:
+				op = ASM_OP_MIX1_L, fmt = ASM_FMT_I2;
+				break;
+			case 0xB:
+				op = ASM_OP_PSAD1, fmt = ASM_FMT_I2;
+				break;
+			}
+			break;
+		case 0xA:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x0:
+				op = ASM_OP_PACK2_USS, fmt = ASM_FMT_I2;
+				break;
+			case 0x2:
+				op = ASM_OP_PACK2_SSS, fmt = ASM_FMT_I2;
+				break;
+			case 0x3:
+				op = ASM_OP_PMIN2, fmt = ASM_FMT_I2;
+				break;
+			case 0x4:
+				op = ASM_OP_UNPACK2_H, fmt = ASM_FMT_I2;
+				break;
+			case 0x6:
+				op = ASM_OP_UNPACK2_L, fmt = ASM_FMT_I2;
+				break;
+			case 0x7:
+				op = ASM_OP_PMAX2, fmt = ASM_FMT_I2;
+				break;
+			case 0x8:
+				op = ASM_OP_MIX2_R, fmt = ASM_FMT_I2;
+				break;
+			case 0xA:
+				op = ASM_OP_MIX2_L, fmt = ASM_FMT_I2;
+				break;
+			case 0xD:
+				op = ASM_OP_PMPY2_R, fmt = ASM_FMT_I2;
+				break;
+			case 0xF:
+				op = ASM_OP_PMPY2_L, fmt = ASM_FMT_I2;
+				break;
+			}
+			break;
+		case 0xC:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0xA:
+				op = ASM_OP_MUX1, fmt = ASM_FMT_I3;
+				break;
+			}
+			break;
+		case 0xE:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x5:
+				op = ASM_OP_PSHL2, fmt = ASM_FMT_I8;
+				break;
+			case 0xA:
+				op = ASM_OP_MUX2, fmt = ASM_FMT_I4;
+				break;
+			}
+			break;
+		case 0x10:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x0:
+				op = ASM_OP_PSHR4_U, fmt = ASM_FMT_I5;
+				break;
+			case 0x2:
+				op = ASM_OP_PSHR4_, fmt = ASM_FMT_I5;
+				break;
+			case 0x4:
+				op = ASM_OP_PSHL4, fmt = ASM_FMT_I7;
+				break;
+			}
+			break;
+		case 0x12:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x0:
+				op = ASM_OP_SHR_U, fmt = ASM_FMT_I5;
+				break;
+			case 0x2:
+				op = ASM_OP_SHR_, fmt = ASM_FMT_I5;
+				break;
+			case 0x4:
+				op = ASM_OP_SHL, fmt = ASM_FMT_I7;
+				break;
+			}
+			break;
+		case 0x14:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x1:
+				op = ASM_OP_PSHR4_U, fmt = ASM_FMT_I6;
+				break;
+			case 0x3:
+				op = ASM_OP_PSHR4_, fmt = ASM_FMT_I6;
+				break;
+			}
+			break;
+		case 0x18:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x2:
+				op = ASM_OP_PACK4_SSS, fmt = ASM_FMT_I2;
+				break;
+			case 0x4:
+				op = ASM_OP_UNPACK4_H, fmt = ASM_FMT_I2;
+				break;
+			case 0x6:
+				op = ASM_OP_UNPACK4_L, fmt = ASM_FMT_I2;
+				break;
+			case 0x8:
+				op = ASM_OP_MIX4_R, fmt = ASM_FMT_I2;
+				break;
+			case 0xA:
+				op = ASM_OP_MIX4_L, fmt = ASM_FMT_I2;
+				break;
+			}
+			break;
+		case 0x1C:
+			switch (FIELD(bits, 28, 4)) { /* x2b + x2c */
+			case 0x5:
+				op = ASM_OP_PSHL4, fmt = ASM_FMT_I8;
+				break;
+			}
+			break;
+		}
+		break;
+	}
+
+	if (op != ASM_OP_NONE)
+		return (asm_extract(op, fmt, bits, b, slot));
+	return (0);
+}
+
+/*
+ * Decode M-unit instructions.
+ */
+static int
+asm_decodeM(uint64_t ip, struct asm_bundle *b, int slot)
+{
+	uint64_t bits;
+	enum asm_fmt fmt;
+	enum asm_op op;
+
+	bits = SLOT(ip, slot);
+	if ((int)OPCODE(bits) >= 8)
+		return (asm_decodeA(bits, b, slot));
+	fmt = ASM_FMT_NONE, op = ASM_OP_NONE;
+
+	switch((int)OPCODE(bits)) {
+	case 0x0:
+		switch (FIELD(bits, 33, 3)) { /* x3 */
+		case 0x0:
+			switch (FIELD(bits, 27, 6)) { /* x6 (x4 + x2) */
+			case 0x0:
+				op = ASM_OP_BREAK_M, fmt = ASM_FMT_M37;
+				break;
+			case 0x1:
+				if (FIELD(bits, 26, 1) == 0) /* y */
+					op = ASM_OP_NOP_M, fmt = ASM_FMT_M48;
+				else
+					op = ASM_OP_HINT_M, fmt = ASM_FMT_M48;
+				break;
+			case 0x4: case 0x14: case 0x24: case 0x34:
+				op = ASM_OP_SUM, fmt = ASM_FMT_M44;
+				break;
+			case 0x5: case 0x15: case 0x25: case 0x35:
+				op = ASM_OP_RUM, fmt = ASM_FMT_M44;
+				break;
+			case 0x6: case 0x16: case 0x26: case 0x36:
+				op = ASM_OP_SSM, fmt = ASM_FMT_M44;
+				break;
+			case 0x7: case 0x17: case 0x27: case 0x37:
+				op = ASM_OP_RSM, fmt = ASM_FMT_M44;
+				break;
+			case 0xA:
+				op = ASM_OP_LOADRS, fmt = ASM_FMT_M25;
+				break;
+			case 0xC:
+				op = ASM_OP_FLUSHRS, fmt = ASM_FMT_M25;
+				break;
+			case 0x10:
+				op = ASM_OP_INVALA_, fmt = ASM_FMT_M24;
+				break;
+			case 0x12:
+				op = ASM_OP_INVALA_E, fmt = ASM_FMT_M26;
+				break;
+			case 0x13:
+				op = ASM_OP_INVALA_E, fmt = ASM_FMT_M27;
+				break;
+			case 0x20:
+				op = ASM_OP_FWB, fmt = ASM_FMT_M24;
+				break;
+			case 0x22:
+				op = ASM_OP_MF_, fmt = ASM_FMT_M24;
+				break;
+			case 0x23:
+				op = ASM_OP_MF_A, fmt = ASM_FMT_M24;
+				break;
+			case 0x28:
+				op = ASM_OP_MOV_M, fmt = ASM_FMT_M30;
+				break;
+			case 0x30:
+				op = ASM_OP_SRLZ_D, fmt = ASM_FMT_M24;
+				break;
+			case 0x31:
+				op = ASM_OP_SRLZ_I, fmt = ASM_FMT_M24;
+				break;
+			case 0x33:
+				op = ASM_OP_SYNC_I, fmt = ASM_FMT_M24;
+				break;
+			}
+			break;
+		case 0x4:
+			op = ASM_OP_CHK_A_NC, fmt = ASM_FMT_M22;
+			break;
+		case 0x5:
+			op = ASM_OP_CHK_A_CLR, fmt = ASM_FMT_M22;
+			break;
+		case 0x6:
+			op = ASM_OP_CHK_A_NC, fmt = ASM_FMT_M23;
+			break;
+		case 0x7:
+			op = ASM_OP_CHK_A_CLR, fmt = ASM_FMT_M23;
+			break;
+		}
+		break;
+	case 0x1:
+		switch (FIELD(bits, 33, 3)) { /* x3 */
+		case 0x0:
+			switch (FIELD(bits, 27, 6)) { /* x6 (x4 + x2) */
+			case 0x0:
+				op = ASM_OP_MOV_RR, fmt = ASM_FMT_M42;
+				break;
+			case 0x1:
+				op = ASM_OP_MOV_DBR, fmt = ASM_FMT_M42;
+				break;
+			case 0x2:
+				op = ASM_OP_MOV_IBR, fmt = ASM_FMT_M42;
+				break;
+			case 0x3:
+				op = ASM_OP_MOV_PKR, fmt = ASM_FMT_M42;
+				break;
+			case 0x4:
+				op = ASM_OP_MOV_PMC, fmt = ASM_FMT_M42;
+				break;
+			case 0x5:
+				op = ASM_OP_MOV_PMD, fmt = ASM_FMT_M42;
+				break;
+			case 0x6:
+				op = ASM_OP_MOV_MSR, fmt = ASM_FMT_M42;
+				break;
+			case 0x9:
+				op = ASM_OP_PTC_L, fmt = ASM_FMT_M45;
+				break;
+			case 0xA:
+				op = ASM_OP_PTC_G, fmt = ASM_FMT_M45;
+				break;
+			case 0xB:
+				op = ASM_OP_PTC_GA, fmt = ASM_FMT_M45;
+				break;
+			case 0xC:
+				op = ASM_OP_PTR_D, fmt = ASM_FMT_M45;
+				break;
+			case 0xD:
+				op = ASM_OP_PTR_I, fmt = ASM_FMT_M45;
+				break;
+			case 0xE:
+				op = ASM_OP_ITR_D, fmt = ASM_FMT_M42;
+				break;
+			case 0xF:
+				op = ASM_OP_ITR_I, fmt = ASM_FMT_M42;
+				break;
+			case 0x10:
+				op = ASM_OP_MOV_RR, fmt = ASM_FMT_M43;
+				break;
+			case 0x11:
+				op = ASM_OP_MOV_DBR, fmt = ASM_FMT_M43;
+				break;
+			case 0x12:
+				op = ASM_OP_MOV_IBR, fmt = ASM_FMT_M43;
+				break;
+			case 0x13:
+				op = ASM_OP_MOV_PKR, fmt = ASM_FMT_M43;
+				break;
+			case 0x14:
+				op = ASM_OP_MOV_PMC, fmt = ASM_FMT_M43;
+				break;
+			case 0x15:
+				op = ASM_OP_MOV_PMD, fmt = ASM_FMT_M43;
+				break;
+			case 0x16:
+				op = ASM_OP_MOV_MSR, fmt = ASM_FMT_M43;
+				break;
+			case 0x17:
+				op = ASM_OP_MOV_CPUID, fmt = ASM_FMT_M43;
+				break;
+			case 0x18:
+				op = ASM_OP_PROBE_R, fmt = ASM_FMT_M39;
+				break;
+			case 0x19:
+				op = ASM_OP_PROBE_W, fmt = ASM_FMT_M39;
+				break;
+			case 0x1A:
+				op = ASM_OP_THASH, fmt = ASM_FMT_M46;
+				break;
+			case 0x1B:
+				op = ASM_OP_TTAG, fmt = ASM_FMT_M46;
+				break;
+			case 0x1E:
+				op = ASM_OP_TPA, fmt = ASM_FMT_M46;
+				break;
+			case 0x1F:
+				op = ASM_OP_TAK, fmt = ASM_FMT_M46;
+				break;
+			case 0x21:
+				op = ASM_OP_MOV_PSR_UM, fmt = ASM_FMT_M36;
+				break;
+			case 0x22:
+				op = ASM_OP_MOV_M, fmt = ASM_FMT_M31;
+				break;
+			case 0x24:
+				op = ASM_OP_MOV_, fmt = ASM_FMT_M33;
+				break;
+			case 0x25:
+				op = ASM_OP_MOV_PSR, fmt = ASM_FMT_M36;
+				break;
+			case 0x29:
+				op = ASM_OP_MOV_PSR_UM, fmt = ASM_FMT_M35;
+				break;
+			case 0x2A:
+				op = ASM_OP_MOV_M, fmt = ASM_FMT_M29;
+				break;
+			case 0x2C:
+				op = ASM_OP_MOV_, fmt = ASM_FMT_M32;
+				break;
+			case 0x2D:
+				op = ASM_OP_MOV_PSR_L, fmt = ASM_FMT_M35;
+				break;
+			case 0x2E:
+				op = ASM_OP_ITC_D, fmt = ASM_FMT_M41;
+				break;
+			case 0x2F:
+				op = ASM_OP_ITC_I, fmt = ASM_FMT_M41;
+				break;
+			case 0x30:
+				if (FIELD(bits, 36, 1) == 0) /* x */
+					op = ASM_OP_FC_, fmt = ASM_FMT_M28;
+				else
+					op = ASM_OP_FC_I, fmt = ASM_FMT_M28;
+				break;
+			case 0x31:
+				op = ASM_OP_PROBE_RW_FAULT, fmt = ASM_FMT_M40;
+				break;
+			case 0x32:
+				op = ASM_OP_PROBE_R_FAULT, fmt = ASM_FMT_M40;
+				break;
+			case 0x33:
+				op = ASM_OP_PROBE_W_FAULT, fmt = ASM_FMT_M40;
+				break;
+			case 0x34:
+				op = ASM_OP_PTC_E, fmt = ASM_FMT_M47;
+				break;
+			case 0x38:
+				op = ASM_OP_PROBE_R, fmt = ASM_FMT_M38;
+				break;
+			case 0x39:
+				op = ASM_OP_PROBE_W, fmt = ASM_FMT_M38;
+				break;
+			}
+			break;
+		case 0x1:
+			op = ASM_OP_CHK_S_M, fmt = ASM_FMT_M20;
+			break;
+		case 0x3:
+			op = ASM_OP_CHK_S, fmt = ASM_FMT_M21;
+			break;
+		case 0x6:
+			op = ASM_OP_ALLOC, fmt = ASM_FMT_M34;
+			break;
+		}
+		break;
+	case 0x4:
+		if (FIELD(bits, 27, 1) == 0) { /* x */
+			switch (FIELD(bits, 30, 7)) { /* x6 + m */
+			case 0x0:
+				op = ASM_OP_LD1_, fmt = ASM_FMT_M1;
+				break;
+			case 0x1:
+				op = ASM_OP_LD2_, fmt = ASM_FMT_M1;
+				break;
+			case 0x2:
+				op = ASM_OP_LD4_, fmt = ASM_FMT_M1;
+				break;
+			case 0x3:
+				op = ASM_OP_LD8_, fmt = ASM_FMT_M1;
+				break;
+			case 0x4:
+				op = ASM_OP_LD1_S, fmt = ASM_FMT_M1;
+				break;
+			case 0x5:
+				op = ASM_OP_LD2_S, fmt = ASM_FMT_M1;
+				break;
+			case 0x6:
+				op = ASM_OP_LD4_S, fmt = ASM_FMT_M1;
+				break;
+			case 0x7:
+				op = ASM_OP_LD8_S, fmt = ASM_FMT_M1;
+				break;
+			case 0x8:
+				op = ASM_OP_LD1_A, fmt = ASM_FMT_M1;
+				break;
+			case 0x9:
+				op = ASM_OP_LD2_A, fmt = ASM_FMT_M1;
+				break;
+			case 0xA:
+				op = ASM_OP_LD4_A, fmt = ASM_FMT_M1;
+				break;
+			case 0xB:
+				op = ASM_OP_LD8_A, fmt = ASM_FMT_M1;
+				break;
+			case 0xC:
+				op = ASM_OP_LD1_SA, fmt = ASM_FMT_M1;
+				break;
+			case 0xD:
+				op = ASM_OP_LD2_SA, fmt = ASM_FMT_M1;
+				break;
+			case 0xE:
+				op = ASM_OP_LD4_SA, fmt = ASM_FMT_M1;
+				break;
+			case 0xF:
+				op = ASM_OP_LD8_SA, fmt = ASM_FMT_M1;
+				break;
+			case 0x10:
+				op = ASM_OP_LD1_BIAS, fmt = ASM_FMT_M1;
+				break;
+			case 0x11:
+				op = ASM_OP_LD2_BIAS, fmt = ASM_FMT_M1;
+				break;
+			case 0x12:
+				op = ASM_OP_LD4_BIAS, fmt = ASM_FMT_M1;
+				break;
+			case 0x13:
+				op = ASM_OP_LD8_BIAS, fmt = ASM_FMT_M1;
+				break;
+			case 0x14:
+				op = ASM_OP_LD1_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x15:
+				op = ASM_OP_LD2_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x16:
+				op = ASM_OP_LD4_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x17:
+				op = ASM_OP_LD8_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x1B:
+				op = ASM_OP_LD8_FILL, fmt = ASM_FMT_M1;
+				break;
+			case 0x20:
+				op = ASM_OP_LD1_C_CLR, fmt = ASM_FMT_M1;
+				break;
+			case 0x21:
+				op = ASM_OP_LD2_C_CLR, fmt = ASM_FMT_M1;
+				break;
+			case 0x22:
+				op = ASM_OP_LD4_C_CLR, fmt = ASM_FMT_M1;
+				break;
+			case 0x23:
+				op = ASM_OP_LD8_C_CLR, fmt = ASM_FMT_M1;
+				break;
+			case 0x24:
+				op = ASM_OP_LD1_C_NC, fmt = ASM_FMT_M1;
+				break;
+			case 0x25:
+				op = ASM_OP_LD2_C_NC, fmt = ASM_FMT_M1;
+				break;
+			case 0x26:
+				op = ASM_OP_LD4_C_NC, fmt = ASM_FMT_M1;
+				break;
+			case 0x27:
+				op = ASM_OP_LD8_C_NC, fmt = ASM_FMT_M1;
+				break;
+			case 0x28:
+				op = ASM_OP_LD1_C_CLR_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x29:
+				op = ASM_OP_LD2_C_CLR_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x2A:
+				op = ASM_OP_LD4_C_CLR_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x2B:
+				op = ASM_OP_LD8_C_CLR_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x30:
+				op = ASM_OP_ST1_, fmt = ASM_FMT_M4;
+				break;
+			case 0x31:
+				op = ASM_OP_ST2_, fmt = ASM_FMT_M4;
+				break;
+			case 0x32:
+				op = ASM_OP_ST4_, fmt = ASM_FMT_M4;
+				break;
+			case 0x33:
+				op = ASM_OP_ST8_, fmt = ASM_FMT_M4;
+				break;
+			case 0x34:
+				op = ASM_OP_ST1_REL, fmt = ASM_FMT_M4;
+				break;
+			case 0x35:
+				op = ASM_OP_ST2_REL, fmt = ASM_FMT_M4;
+				break;
+			case 0x36:
+				op = ASM_OP_ST4_REL, fmt = ASM_FMT_M4;
+				break;
+			case 0x37:
+				op = ASM_OP_ST8_REL, fmt = ASM_FMT_M4;
+				break;
+			case 0x3B:
+				op = ASM_OP_ST8_SPILL, fmt = ASM_FMT_M4;
+				break;
+			case 0x40:
+				op = ASM_OP_LD1_, fmt = ASM_FMT_M2;
+				break;
+			case 0x41:
+				op = ASM_OP_LD2_, fmt = ASM_FMT_M2;
+				break;
+			case 0x42:
+				op = ASM_OP_LD4_, fmt = ASM_FMT_M2;
+				break;
+			case 0x43:
+				op = ASM_OP_LD8_, fmt = ASM_FMT_M2;
+				break;
+			case 0x44:
+				op = ASM_OP_LD1_S, fmt = ASM_FMT_M2;
+				break;
+			case 0x45:
+				op = ASM_OP_LD2_S, fmt = ASM_FMT_M2;
+				break;
+			case 0x46:
+				op = ASM_OP_LD4_S, fmt = ASM_FMT_M2;
+				break;
+			case 0x47:
+				op = ASM_OP_LD8_S, fmt = ASM_FMT_M2;
+				break;
+			case 0x48:
+				op = ASM_OP_LD1_A, fmt = ASM_FMT_M2;
+				break;
+			case 0x49:
+				op = ASM_OP_LD2_A, fmt = ASM_FMT_M2;
+				break;
+			case 0x4A:
+				op = ASM_OP_LD4_A, fmt = ASM_FMT_M2;
+				break;
+			case 0x4B:
+				op = ASM_OP_LD8_A, fmt = ASM_FMT_M2;
+				break;
+			case 0x4C:
+				op = ASM_OP_LD1_SA, fmt = ASM_FMT_M2;
+				break;
+			case 0x4D:
+				op = ASM_OP_LD2_SA, fmt = ASM_FMT_M2;
+				break;
+			case 0x4E:
+				op = ASM_OP_LD4_SA, fmt = ASM_FMT_M2;
+				break;
+			case 0x4F:
+				op = ASM_OP_LD8_SA, fmt = ASM_FMT_M2;
+				break;
+			case 0x50:
+				op = ASM_OP_LD1_BIAS, fmt = ASM_FMT_M2;
+				break;
+			case 0x51:
+				op = ASM_OP_LD2_BIAS, fmt = ASM_FMT_M2;
+				break;
+			case 0x52:
+				op = ASM_OP_LD4_BIAS, fmt = ASM_FMT_M2;
+				break;
+			case 0x53:
+				op = ASM_OP_LD8_BIAS, fmt = ASM_FMT_M2;
+				break;
+			case 0x54:
+				op = ASM_OP_LD1_ACQ, fmt = ASM_FMT_M2;
+				break;
+			case 0x55:
+				op = ASM_OP_LD2_ACQ, fmt = ASM_FMT_M2;
+				break;
+			case 0x56:
+				op = ASM_OP_LD4_ACQ, fmt = ASM_FMT_M2;
+				break;
+			case 0x57:
+				op = ASM_OP_LD8_ACQ, fmt = ASM_FMT_M2;
+				break;
+			case 0x5B:
+				op = ASM_OP_LD8_FILL, fmt = ASM_FMT_M2;
+				break;
+			case 0x60:
+				op = ASM_OP_LD1_C_CLR, fmt = ASM_FMT_M2;
+				break;
+			case 0x61:
+				op = ASM_OP_LD2_C_CLR, fmt = ASM_FMT_M2;
+				break;
+			case 0x62:
+				op = ASM_OP_LD4_C_CLR, fmt = ASM_FMT_M2;
+				break;
+			case 0x63:
+				op = ASM_OP_LD8_C_CLR, fmt = ASM_FMT_M2;
+				break;
+			case 0x64:
+				op = ASM_OP_LD1_C_NC, fmt = ASM_FMT_M2;
+				break;
+			case 0x65:
+				op = ASM_OP_LD2_C_NC, fmt = ASM_FMT_M2;
+				break;
+			case 0x66:
+				op = ASM_OP_LD4_C_NC, fmt = ASM_FMT_M2;
+				break;
+			case 0x67:
+				op = ASM_OP_LD8_C_NC, fmt = ASM_FMT_M2;
+				break;
+			case 0x68:
+				op = ASM_OP_LD1_C_CLR_ACQ, fmt = ASM_FMT_M2;
+				break;
+			case 0x69:
+				op = ASM_OP_LD2_C_CLR_ACQ, fmt = ASM_FMT_M2;
+				break;
+			case 0x6A:
+				op = ASM_OP_LD4_C_CLR_ACQ, fmt = ASM_FMT_M2;
+				break;
+			case 0x6B:
+				op = ASM_OP_LD8_C_CLR_ACQ, fmt = ASM_FMT_M2;
+				break;
+			}
+		} else {
+			switch (FIELD(bits, 30, 7)) { /* x6 + m */
+			case 0x0:
+				op = ASM_OP_CMPXCHG1_ACQ, fmt = ASM_FMT_M16;
+				break;
+			case 0x1:
+				op = ASM_OP_CMPXCHG2_ACQ, fmt = ASM_FMT_M16;
+				break;
+			case 0x2:
+				op = ASM_OP_CMPXCHG4_ACQ, fmt = ASM_FMT_M16;
+				break;
+			case 0x3:
+				op = ASM_OP_CMPXCHG8_ACQ, fmt = ASM_FMT_M16;
+				break;
+			case 0x4:
+				op = ASM_OP_CMPXCHG1_REL, fmt = ASM_FMT_M16;
+				break;
+			case 0x5:
+				op = ASM_OP_CMPXCHG2_REL, fmt = ASM_FMT_M16;
+				break;
+			case 0x6:
+				op = ASM_OP_CMPXCHG4_REL, fmt = ASM_FMT_M16;
+				break;
+			case 0x7:
+				op = ASM_OP_CMPXCHG8_REL, fmt = ASM_FMT_M16;
+				break;
+			case 0x8:
+				op = ASM_OP_XCHG1, fmt = ASM_FMT_M16;
+				break;
+			case 0x9:
+				op = ASM_OP_XCHG2, fmt = ASM_FMT_M16;
+				break;
+			case 0xA:
+				op = ASM_OP_XCHG4, fmt = ASM_FMT_M16;
+				break;
+			case 0xB:
+				op = ASM_OP_XCHG8, fmt = ASM_FMT_M16;
+				break;
+			case 0x12:
+				op = ASM_OP_FETCHADD4_ACQ, fmt = ASM_FMT_M17;
+				break;
+			case 0x13:
+				op = ASM_OP_FETCHADD8_ACQ, fmt = ASM_FMT_M17;
+				break;
+			case 0x16:
+				op = ASM_OP_FETCHADD4_REL, fmt = ASM_FMT_M17;
+				break;
+			case 0x17:
+				op = ASM_OP_FETCHADD8_REL, fmt = ASM_FMT_M17;
+				break;
+			case 0x1C:
+				op = ASM_OP_GETF_SIG, fmt = ASM_FMT_M19;
+				break;
+			case 0x1D:
+				op = ASM_OP_GETF_EXP, fmt = ASM_FMT_M19;
+				break;
+			case 0x1E:
+				op = ASM_OP_GETF_S, fmt = ASM_FMT_M19;
+				break;
+			case 0x1F:
+				op = ASM_OP_GETF_D, fmt = ASM_FMT_M19;
+				break;
+			case 0x20:
+				op = ASM_OP_CMP8XCHG16_ACQ, fmt = ASM_FMT_M16;
+				break;
+			case 0x24:
+				op = ASM_OP_CMP8XCHG16_REL, fmt = ASM_FMT_M16;
+				break;
+			case 0x28:
+				op = ASM_OP_LD16_, fmt = ASM_FMT_M1;
+				break;
+			case 0x2C:
+				op = ASM_OP_LD16_ACQ, fmt = ASM_FMT_M1;
+				break;
+			case 0x30:
+				op = ASM_OP_ST16_, fmt = ASM_FMT_M4;
+				break;
+			case 0x34:
+				op = ASM_OP_ST16_REL, fmt = ASM_FMT_M4;
+				break;
+			}
+		}
+		break;
+	case 0x5:
+		switch (FIELD(bits, 30, 6)) { /* x6 */
+		case 0x0:
+			op = ASM_OP_LD1_, fmt = ASM_FMT_M3;
+			break;
+		case 0x1:
+			op = ASM_OP_LD2_, fmt = ASM_FMT_M3;
+			break;
+		case 0x2:
+			op = ASM_OP_LD4_, fmt = ASM_FMT_M3;
+			break;
+		case 0x3:
+			op = ASM_OP_LD8_, fmt = ASM_FMT_M3;
+			break;
+		case 0x4:
+			op = ASM_OP_LD1_S, fmt = ASM_FMT_M3;
+			break;
+		case 0x5:
+			op = ASM_OP_LD2_S, fmt = ASM_FMT_M3;
+			break;
+		case 0x6:
+			op = ASM_OP_LD4_S, fmt = ASM_FMT_M3;
+			break;
+		case 0x7:
+			op = ASM_OP_LD8_S, fmt = ASM_FMT_M3;
+			break;
+		case 0x8:
+			op = ASM_OP_LD1_A, fmt = ASM_FMT_M3;
+			break;
+		case 0x9:
+			op = ASM_OP_LD2_A, fmt = ASM_FMT_M3;
+			break;
+		case 0xA:
+			op = ASM_OP_LD4_A, fmt = ASM_FMT_M3;
+			break;
+		case 0xB:
+			op = ASM_OP_LD8_A, fmt = ASM_FMT_M3;
+			break;
+		case 0xC:
+			op = ASM_OP_LD1_SA, fmt = ASM_FMT_M3;
+			break;
+		case 0xD:
+			op = ASM_OP_LD2_SA, fmt = ASM_FMT_M3;
+			break;
+		case 0xE:
+			op = ASM_OP_LD4_SA, fmt = ASM_FMT_M3;
+			break;
+		case 0xF:
+			op = ASM_OP_LD8_SA, fmt = ASM_FMT_M3;
+			break;
+		case 0x10:
+			op = ASM_OP_LD1_BIAS, fmt = ASM_FMT_M3;
+			break;
+		case 0x11:
+			op = ASM_OP_LD2_BIAS, fmt = ASM_FMT_M3;
+			break;
+		case 0x12:
+			op = ASM_OP_LD4_BIAS, fmt = ASM_FMT_M3;
+			break;
+		case 0x13:
+			op = ASM_OP_LD8_BIAS, fmt = ASM_FMT_M3;
+			break;
+		case 0x14:
+			op = ASM_OP_LD1_ACQ, fmt = ASM_FMT_M3;
+			break;
+		case 0x15:
+			op = ASM_OP_LD2_ACQ, fmt = ASM_FMT_M3;
+			break;
+		case 0x16:
+			op = ASM_OP_LD4_ACQ, fmt = ASM_FMT_M3;
+			break;
+		case 0x17:
+			op = ASM_OP_LD8_ACQ, fmt = ASM_FMT_M3;
+			break;
+		case 0x1B:
+			op = ASM_OP_LD8_FILL, fmt = ASM_FMT_M3;
+			break;
+		case 0x20:
+			op = ASM_OP_LD1_C_CLR, fmt = ASM_FMT_M3;
+			break;
+		case 0x21:
+			op = ASM_OP_LD2_C_CLR, fmt = ASM_FMT_M3;
+			break;
+		case 0x22:
+			op = ASM_OP_LD4_C_CLR, fmt = ASM_FMT_M3;
+			break;
+		case 0x23:
+			op = ASM_OP_LD8_C_CLR, fmt = ASM_FMT_M3;
+			break;
+		case 0x24:
+			op = ASM_OP_LD1_C_NC, fmt = ASM_FMT_M3;
+			break;
+		case 0x25:
+			op = ASM_OP_LD2_C_NC, fmt = ASM_FMT_M3;
+			break;
+		case 0x26:
+			op = ASM_OP_LD4_C_NC, fmt = ASM_FMT_M3;
+			break;
+		case 0x27:
+			op = ASM_OP_LD8_C_NC, fmt = ASM_FMT_M3;
+			break;
+		case 0x28:
+			op = ASM_OP_LD1_C_CLR_ACQ, fmt = ASM_FMT_M3;
+			break;
+		case 0x29:
+			op = ASM_OP_LD2_C_CLR_ACQ, fmt = ASM_FMT_M3;
+			break;
+		case 0x2A:
+			op = ASM_OP_LD4_C_CLR_ACQ, fmt = ASM_FMT_M3;
+			break;
+		case 0x2B:
+			op = ASM_OP_LD8_C_CLR_ACQ, fmt = ASM_FMT_M3;
+			break;
+		case 0x30:
+			op = ASM_OP_ST1_, fmt = ASM_FMT_M5;
+			break;
+		case 0x31:
+			op = ASM_OP_ST2_, fmt = ASM_FMT_M5;
+			break;
+		case 0x32:
+			op = ASM_OP_ST4_, fmt = ASM_FMT_M5;
+			break;
+		case 0x33:
+			op = ASM_OP_ST8_, fmt = ASM_FMT_M5;
+			break;
+		case 0x34:
+			op = ASM_OP_ST1_REL, fmt = ASM_FMT_M5;
+			break;
+		case 0x35:
+			op = ASM_OP_ST2_REL, fmt = ASM_FMT_M5;
+			break;
+		case 0x36:
+			op = ASM_OP_ST4_REL, fmt = ASM_FMT_M5;
+			break;
+		case 0x37:
+			op = ASM_OP_ST8_REL, fmt = ASM_FMT_M5;
+			break;
+		case 0x3B:
+			op = ASM_OP_ST8_SPILL, fmt = ASM_FMT_M5;
+			break;
+		}
+		break;
+	case 0x6:
+		if (FIELD(bits, 27, 1) == 0) { /* x */
+			switch (FIELD(bits, 30, 7)) { /* x6 + m */
+			case 0x0:
+				op = ASM_OP_LDFE_, fmt = ASM_FMT_M6;
+				break;
+			case 0x1:
+				op = ASM_OP_LDF8_, fmt = ASM_FMT_M6;
+				break;
+			case 0x2:
+				op = ASM_OP_LDFS_, fmt = ASM_FMT_M6;
+				break;
+			case 0x3:
+				op = ASM_OP_LDFD_, fmt = ASM_FMT_M6;
+				break;
+			case 0x4:
+				op = ASM_OP_LDFE_S, fmt = ASM_FMT_M6;
+				break;
+			case 0x5:
+				op = ASM_OP_LDF8_S, fmt = ASM_FMT_M6;
+				break;
+			case 0x6:
+				op = ASM_OP_LDFS_S, fmt = ASM_FMT_M6;
+				break;
+			case 0x7:
+				op = ASM_OP_LDFD_S, fmt = ASM_FMT_M6;
+				break;
+			case 0x8:
+				op = ASM_OP_LDFE_A, fmt = ASM_FMT_M6;
+				break;
+			case 0x9:
+				op = ASM_OP_LDF8_A, fmt = ASM_FMT_M6;
+				break;
+			case 0xA:
+				op = ASM_OP_LDFS_A, fmt = ASM_FMT_M6;
+				break;
+			case 0xB:
+				op = ASM_OP_LDFD_A, fmt = ASM_FMT_M6;
+				break;
+			case 0xC:
+				op = ASM_OP_LDFE_SA, fmt = ASM_FMT_M6;
+				break;
+			case 0xD:
+				op = ASM_OP_LDF8_SA, fmt = ASM_FMT_M6;
+				break;
+			case 0xE:
+				op = ASM_OP_LDFS_SA, fmt = ASM_FMT_M6;
+				break;
+			case 0xF:
+				op = ASM_OP_LDFD_SA, fmt = ASM_FMT_M6;
+				break;
+			case 0x1B:
+				op = ASM_OP_LDF_FILL, fmt = ASM_FMT_M6;
+				break;
+			case 0x20:
+				op = ASM_OP_LDFE_C_CLR, fmt = ASM_FMT_M6;
+				break;
+			case 0x21:
+				op = ASM_OP_LDF8_C_CLR, fmt = ASM_FMT_M6;
+				break;
+			case 0x22:
+				op = ASM_OP_LDFS_C_CLR, fmt = ASM_FMT_M6;
+				break;
+			case 0x23:
+				op = ASM_OP_LDFD_C_CLR, fmt = ASM_FMT_M6;
+				break;
+			case 0x24:
+				op = ASM_OP_LDFE_C_NC, fmt = ASM_FMT_M6;
+				break;
+			case 0x25:
+				op = ASM_OP_LDF8_C_NC, fmt = ASM_FMT_M6;
+				break;
+			case 0x26:
+				op = ASM_OP_LDFS_C_NC, fmt = ASM_FMT_M6;
+				break;
+			case 0x27:
+				op = ASM_OP_LDFD_C_NC, fmt = ASM_FMT_M6;
+				break;
+			case 0x2C:
+				op = ASM_OP_LFETCH_, fmt = ASM_FMT_M13;
+				break;
+			case 0x2D:
+				op = ASM_OP_LFETCH_EXCL, fmt = ASM_FMT_M13;
+				break;
+			case 0x2E:
+				op = ASM_OP_LFETCH_FAULT, fmt = ASM_FMT_M13;
+				break;
+			case 0x2F:
+				op = ASM_OP_LFETCH_FAULT_EXCL,
+				    fmt = ASM_FMT_M13;
+				break;
+			case 0x30:
+				op = ASM_OP_STFE, fmt = ASM_FMT_M9;
+				break;
+			case 0x31:
+				op = ASM_OP_STF8, fmt = ASM_FMT_M9;
+				break;
+			case 0x32:
+				op = ASM_OP_STFS, fmt = ASM_FMT_M9;
+				break;
+			case 0x33:
+				op = ASM_OP_STFD, fmt = ASM_FMT_M9;
+				break;
+			case 0x3B:
+				op = ASM_OP_STF_SPILL, fmt = ASM_FMT_M9;
+				break;
+			case 0x40:
+				op = ASM_OP_LDFE_, fmt = ASM_FMT_M7;
+				break;
+			case 0x41:
+				op = ASM_OP_LDF8_, fmt = ASM_FMT_M7;
+				break;
+			case 0x42:
+				op = ASM_OP_LDFS_, fmt = ASM_FMT_M7;
+				break;
+			case 0x43:
+				op = ASM_OP_LDFD_, fmt = ASM_FMT_M7;
+				break;
+			case 0x44:
+				op = ASM_OP_LDFE_S, fmt = ASM_FMT_M7;
+				break;
+			case 0x45:
+				op = ASM_OP_LDF8_S, fmt = ASM_FMT_M7;
+				break;
+			case 0x46:
+				op = ASM_OP_LDFS_S, fmt = ASM_FMT_M7;
+				break;
+			case 0x47:
+				op = ASM_OP_LDFD_S, fmt = ASM_FMT_M7;
+				break;
+			case 0x48:
+				op = ASM_OP_LDFE_A, fmt = ASM_FMT_M7;
+				break;
+			case 0x49:
+				op = ASM_OP_LDF8_A, fmt = ASM_FMT_M7;
+				break;
+			case 0x4A:
+				op = ASM_OP_LDFS_A, fmt = ASM_FMT_M7;
+				break;
+			case 0x4B:
+				op = ASM_OP_LDFD_A, fmt = ASM_FMT_M7;
+				break;
+			case 0x4C:
+				op = ASM_OP_LDFE_SA, fmt = ASM_FMT_M7;
+				break;
+			case 0x4D:
+				op = ASM_OP_LDF8_SA, fmt = ASM_FMT_M7;
+				break;
+			case 0x4E:
+				op = ASM_OP_LDFS_SA, fmt = ASM_FMT_M7;
+				break;
+			case 0x4F:
+				op = ASM_OP_LDFD_SA, fmt = ASM_FMT_M7;
+				break;
+			case 0x5B:
+				op = ASM_OP_LDF_FILL, fmt = ASM_FMT_M7;
+				break;
+			case 0x60:
+				op = ASM_OP_LDFE_C_CLR, fmt = ASM_FMT_M7;
+				break;
+			case 0x61:
+				op = ASM_OP_LDF8_C_CLR, fmt = ASM_FMT_M7;
+				break;
+			case 0x62:
+				op = ASM_OP_LDFS_C_CLR, fmt = ASM_FMT_M7;
+				break;
+			case 0x63:
+				op = ASM_OP_LDFD_C_CLR, fmt = ASM_FMT_M7;
+				break;
+			case 0x64:
+				op = ASM_OP_LDFE_C_NC, fmt = ASM_FMT_M7;
+				break;
+			case 0x65:
+				op = ASM_OP_LDF8_C_NC, fmt = ASM_FMT_M7;
+				break;
+			case 0x66:
+				op = ASM_OP_LDFS_C_NC, fmt = ASM_FMT_M7;
+				break;
+			case 0x67:
+				op = ASM_OP_LDFD_C_NC, fmt = ASM_FMT_M7;
+				break;
+			case 0x6C:
+				op = ASM_OP_LFETCH_, fmt = ASM_FMT_M14;
+				break;
+			case 0x6D:
+				op = ASM_OP_LFETCH_EXCL, fmt = ASM_FMT_M14;
+				break;
+			case 0x6E:
+				op = ASM_OP_LFETCH_FAULT, fmt = ASM_FMT_M14;
+				break;
+			case 0x6F:
+				op = ASM_OP_LFETCH_FAULT_EXCL,
+				    fmt = ASM_FMT_M14;
+				break;
+			}
+		} else {
+			switch (FIELD(bits, 30, 7)) { /* x6 + m */
+			case 0x1:
+				op = ASM_OP_LDFP8_, fmt = ASM_FMT_M11;
+				break;
+			case 0x2:
+				op = ASM_OP_LDFPS_, fmt = ASM_FMT_M11;
+				break;
+			case 0x3:
+				op = ASM_OP_LDFPD_, fmt = ASM_FMT_M11;
+				break;
+			case 0x5:
+				op = ASM_OP_LDFP8_S, fmt = ASM_FMT_M11;
+				break;
+			case 0x6:
+				op = ASM_OP_LDFPS_S, fmt = ASM_FMT_M11;
+				break;
+			case 0x7:
+				op = ASM_OP_LDFPD_S, fmt = ASM_FMT_M11;
+				break;
+			case 0x9:
+				op = ASM_OP_LDFP8_A, fmt = ASM_FMT_M11;
+				break;
+			case 0xA:
+				op = ASM_OP_LDFPS_A, fmt = ASM_FMT_M11;
+				break;
+			case 0xB:
+				op = ASM_OP_LDFPD_A, fmt = ASM_FMT_M11;
+				break;
+			case 0xD:
+				op = ASM_OP_LDFP8_SA, fmt = ASM_FMT_M11;
+				break;
+			case 0xE:
+				op = ASM_OP_LDFPS_SA, fmt = ASM_FMT_M11;
+				break;
+			case 0xF:
+				op = ASM_OP_LDFPD_SA, fmt = ASM_FMT_M11;
+				break;
+			case 0x1C:
+				op = ASM_OP_SETF_SIG, fmt = ASM_FMT_M18;
+				break;
+			case 0x1D:
+				op = ASM_OP_SETF_EXP, fmt = ASM_FMT_M18;
+				break;
+			case 0x1E:
+				op = ASM_OP_SETF_S, fmt = ASM_FMT_M18;
+				break;
+			case 0x1F:
+				op = ASM_OP_SETF_D, fmt = ASM_FMT_M18;
+				break;
+			case 0x21:
+				op = ASM_OP_LDFP8_C_CLR, fmt = ASM_FMT_M11;
+				break;
+			case 0x22:
+				op = ASM_OP_LDFPS_C_CLR, fmt = ASM_FMT_M11;
+				break;
+			case 0x23:
+				op = ASM_OP_LDFPD_C_CLR, fmt = ASM_FMT_M11;
+				break;
+			case 0x25:
+				op = ASM_OP_LDFP8_C_NC, fmt = ASM_FMT_M11;
+				break;
+			case 0x26:
+				op = ASM_OP_LDFPS_C_NC, fmt = ASM_FMT_M11;
+				break;
+			case 0x27:
+				op = ASM_OP_LDFPD_C_NC, fmt = ASM_FMT_M11;
+				break;
+			case 0x41:
+				op = ASM_OP_LDFP8_, fmt = ASM_FMT_M12;
+				break;
+			case 0x42:
+				op = ASM_OP_LDFPS_, fmt = ASM_FMT_M12;
+				break;
+			case 0x43:
+				op = ASM_OP_LDFPD_, fmt = ASM_FMT_M12;
+				break;
+			case 0x45:
+				op = ASM_OP_LDFP8_S, fmt = ASM_FMT_M12;
+				break;
+			case 0x46:
+				op = ASM_OP_LDFPS_S, fmt = ASM_FMT_M12;
+				break;
+			case 0x47:
+				op = ASM_OP_LDFPD_S, fmt = ASM_FMT_M12;
+				break;
+			case 0x49:
+				op = ASM_OP_LDFP8_A, fmt = ASM_FMT_M12;
+				break;
+			case 0x4A:
+				op = ASM_OP_LDFPS_A, fmt = ASM_FMT_M12;
+				break;
+			case 0x4B:
+				op = ASM_OP_LDFPD_A, fmt = ASM_FMT_M12;
+				break;
+			case 0x4D:
+				op = ASM_OP_LDFP8_SA, fmt = ASM_FMT_M12;
+				break;
+			case 0x4E:
+				op = ASM_OP_LDFPS_SA, fmt = ASM_FMT_M12;
+				break;
+			case 0x4F:
+				op = ASM_OP_LDFPD_SA, fmt = ASM_FMT_M12;
+				break;
+			case 0x61:
+				op = ASM_OP_LDFP8_C_CLR, fmt = ASM_FMT_M12;
+				break;
+			case 0x62:
+				op = ASM_OP_LDFPS_C_CLR, fmt = ASM_FMT_M12;
+				break;
+			case 0x63:
+				op = ASM_OP_LDFPD_C_CLR, fmt = ASM_FMT_M12;
+				break;
+			case 0x65:
+				op = ASM_OP_LDFP8_C_NC, fmt = ASM_FMT_M12;
+				break;
+			case 0x66:
+				op = ASM_OP_LDFPS_C_NC, fmt = ASM_FMT_M12;
+				break;
+			case 0x67:
+				op = ASM_OP_LDFPD_C_NC, fmt = ASM_FMT_M12;
+				break;
+			}
+		}
+		break;
+	case 0x7:
+		switch (FIELD(bits, 30, 6)) { /* x6 */
+		case 0x0:
+			op = ASM_OP_LDFE_, fmt = ASM_FMT_M8;
+			break;
+		case 0x1:
+			op = ASM_OP_LDF8_, fmt = ASM_FMT_M8;
+			break;
+		case 0x2:
+			op = ASM_OP_LDFS_, fmt = ASM_FMT_M8;
+			break;
+		case 0x3:
+			op = ASM_OP_LDFD_, fmt = ASM_FMT_M8;
+			break;
+		case 0x4:
+			op = ASM_OP_LDFE_S, fmt = ASM_FMT_M8;
+			break;
+		case 0x5:
+			op = ASM_OP_LDF8_S, fmt = ASM_FMT_M8;
+			break;
+		case 0x6:
+			op = ASM_OP_LDFS_S, fmt = ASM_FMT_M8;
+			break;
+		case 0x7:
+			op = ASM_OP_LDFD_S, fmt = ASM_FMT_M8;
+			break;
+		case 0x8:
+			op = ASM_OP_LDFE_A, fmt = ASM_FMT_M8;
+			break;
+		case 0x9:
+			op = ASM_OP_LDF8_A, fmt = ASM_FMT_M8;
+			break;
+		case 0xA:
+			op = ASM_OP_LDFS_A, fmt = ASM_FMT_M8;
+			break;
+		case 0xB:
+			op = ASM_OP_LDFD_A, fmt = ASM_FMT_M8;
+			break;
+		case 0xC:
+			op = ASM_OP_LDFE_SA, fmt = ASM_FMT_M8;
+			break;
+		case 0xD:
+			op = ASM_OP_LDF8_SA, fmt = ASM_FMT_M8;
+			break;
+		case 0xE:
+			op = ASM_OP_LDFS_SA, fmt = ASM_FMT_M8;
+			break;
+		case 0xF:
+			op = ASM_OP_LDFD_SA, fmt = ASM_FMT_M8;
+			break;
+		case 0x1B:
+			op = ASM_OP_LDF_FILL, fmt = ASM_FMT_M8;
+			break;
+		case 0x20:
+			op = ASM_OP_LDFE_C_CLR, fmt = ASM_FMT_M8;
+			break;
+		case 0x21:
+			op = ASM_OP_LDF8_C_CLR, fmt = ASM_FMT_M8;
+			break;
+		case 0x22:
+			op = ASM_OP_LDFS_C_CLR, fmt = ASM_FMT_M8;
+			break;
+		case 0x23:
+			op = ASM_OP_LDFD_C_CLR, fmt = ASM_FMT_M8;
+			break;
+		case 0x24:
+			op = ASM_OP_LDFE_C_NC, fmt = ASM_FMT_M8;
+			break;
+		case 0x25:
+			op = ASM_OP_LDF8_C_NC, fmt = ASM_FMT_M8;
+			break;
+		case 0x26:
+			op = ASM_OP_LDFS_C_NC, fmt = ASM_FMT_M8;
+			break;
+		case 0x27:
+			op = ASM_OP_LDFD_C_NC, fmt = ASM_FMT_M8;
+			break;
+		case 0x2C:
+			op = ASM_OP_LFETCH_, fmt = ASM_FMT_M15;
+			break;
+		case 0x2D:
+			op = ASM_OP_LFETCH_EXCL, fmt = ASM_FMT_M15;
+			break;
+		case 0x2E:
+			op = ASM_OP_LFETCH_FAULT, fmt = ASM_FMT_M15;
+			break;
+		case 0x2F:
+			op = ASM_OP_LFETCH_FAULT_EXCL, fmt = ASM_FMT_M15;
+			break;
+		case 0x30:
+			op = ASM_OP_STFE, fmt = ASM_FMT_M10;
+			break;
+		case 0x31:
+			op = ASM_OP_STF8, fmt = ASM_FMT_M10;
+			break;
+		case 0x32:
+			op = ASM_OP_STFS, fmt = ASM_FMT_M10;
+			break;
+		case 0x33:
+			op = ASM_OP_STFD, fmt = ASM_FMT_M10;
+			break;
+		case 0x3B:
+			op = ASM_OP_STF_SPILL, fmt = ASM_FMT_M10;
+			break;
+		}
+		break;
+	}
+
+	if (op != ASM_OP_NONE)
+		return (asm_extract(op, fmt, bits, b, slot));
+	return (0);
+}
+
+/*
+ * Decode X-unit instructions.
+ */
+static int
+asm_decodeX(uint64_t ip, struct asm_bundle *b, int slot)
+{
+	uint64_t bits;
+	enum asm_fmt fmt;
+	enum asm_op op;
+
+	KASSERT(slot == 2, ("foo"));
+	bits = SLOT(ip, slot);
+	fmt = ASM_FMT_NONE, op = ASM_OP_NONE;
+	/* Initialize slot 1 (slot - 1) */
+	b->b_inst[slot - 1].i_format = ASM_FMT_NONE;
+	b->b_inst[slot - 1].i_bits = SLOT(ip, slot - 1);
+
+	switch((int)OPCODE(bits)) {
+	case 0x0:
+		if (FIELD(bits, 33, 3) == 0) { /* x3 */
+			switch (FIELD(bits, 27, 6)) { /* x6 */
+			case 0x0:
+				op = ASM_OP_BREAK_X, fmt = ASM_FMT_X1;
+				break;
+			case 0x1:
+				if (FIELD(bits, 26, 1) == 0) /* y */
+					op = ASM_OP_NOP_X, fmt = ASM_FMT_X5;
+				else
+					op = ASM_OP_HINT_X, fmt = ASM_FMT_X5;
+				break;
+			}
+		}
+		break;
+	case 0x6:
+		if (FIELD(bits, 20, 1) == 0)
+			op = ASM_OP_MOVL, fmt = ASM_FMT_X2;
+		break;
+	case 0xC:
+		if (FIELD(bits, 6, 3) == 0) /* btype */
+			op = ASM_OP_BRL_COND, fmt = ASM_FMT_X3;
+		break;
+	case 0xD:
+		op = ASM_OP_BRL_CALL, fmt = ASM_FMT_X4;
+		break;
+	}
+
+	if (op != ASM_OP_NONE)
+		return (asm_extract(op, fmt, bits, b, slot));
+	return (0);
+}
+
+int
+asm_decode(uint64_t ip, struct asm_bundle *b)
+{
+	const char *tp;
+	unsigned int slot;
+	int ok;
+
+	memset(b, 0, sizeof(*b));
+
+	b->b_templ = asm_templname[TMPL(ip)];
+	if (b->b_templ == 0)
+		return (0);
+
+	slot = 0;
+	tp = b->b_templ;
+
+	ok = 1;
+	while (ok && *tp != 0) {
+		switch (*tp++) {
+		case 'B':
+			ok = asm_decodeB(ip, b, slot++);
+			break;
+		case 'F':
+			ok = asm_decodeF(ip, b, slot++);
+			break;
+		case 'I':
+			ok = asm_decodeI(ip, b, slot++);
+			break;
+		case 'L':
+			ok = (slot++ == 1) ? 1 : 0;
+			break;
+		case 'M':
+			ok = asm_decodeM(ip, b, slot++);
+			break;
+		case 'X':
+			ok = asm_decodeX(ip, b, slot++);
+			break;
+		case ';':
+			ok = 1;
+			break;
+		default:
+			ok = 0;
+			break;
+		}
+	}
+	return (ok);
+}


Property changes on: trunk/sys/ia64/disasm/disasm_decode.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/disasm/disasm_extract.c
===================================================================
--- trunk/sys/ia64/disasm/disasm_extract.c	                        (rev 0)
+++ trunk/sys/ia64/disasm/disasm_extract.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,2610 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000-2006 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/disasm/disasm_extract.c 159916 2006-06-24 19:21:11Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/stdarg.h>
+
+#include <ia64/disasm/disasm_int.h>
+#include <ia64/disasm/disasm.h>
+
+#define FRAG(o,l)	((int)((o << 8) | (l & 0xff)))
+#define FRAG_OFS(f)	(f >> 8)
+#define FRAG_LEN(f)	(f & 0xff)
+
+/*
+ * Support functions.
+ */
+static void
+asm_cmpltr_add(struct asm_inst *i, enum asm_cmpltr_class c,
+    enum asm_cmpltr_type t)
+{
+
+	i->i_cmpltr[i->i_ncmpltrs].c_class = c;
+	i->i_cmpltr[i->i_ncmpltrs].c_type = t;
+	i->i_ncmpltrs++;
+	KASSERT(i->i_ncmpltrs < 6, ("foo"));
+}
+
+static void
+asm_hint(struct asm_inst *i, enum asm_cmpltr_class c)
+{
+
+	switch (FIELD(i->i_bits, 28, 2)) { /* hint */
+	case 0:
+		asm_cmpltr_add(i, c, ASM_CT_NONE);
+		break;
+	case 1:
+		asm_cmpltr_add(i, c, ASM_CT_NT1);
+		break;
+	case 2:
+		asm_cmpltr_add(i, c, ASM_CT_NT2);
+		break;
+	case 3:
+		asm_cmpltr_add(i, c, ASM_CT_NTA);
+		break;
+	}
+}
+
+static void
+asm_sf(struct asm_inst *i)
+{
+
+	switch (FIELD(i->i_bits, 34, 2)) {
+	case 0:
+		asm_cmpltr_add(i, ASM_CC_SF, ASM_CT_S0);
+		break;
+	case 1:
+		asm_cmpltr_add(i, ASM_CC_SF, ASM_CT_S1);
+		break;
+	case 2:
+		asm_cmpltr_add(i, ASM_CC_SF, ASM_CT_S2);
+		break;
+	case 3:
+		asm_cmpltr_add(i, ASM_CC_SF, ASM_CT_S3);
+		break;
+	}
+}
+
+static void
+asm_brhint(struct asm_inst *i)
+{
+	uint64_t bits = i->i_bits;
+
+	switch (FIELD(bits, 33, 2)) { /* bwh */
+	case 0:
+		asm_cmpltr_add(i, ASM_CC_BWH, ASM_CT_SPTK);
+		break;
+	case 1:
+		asm_cmpltr_add(i, ASM_CC_BWH, ASM_CT_SPNT);
+		break;
+	case 2:
+		asm_cmpltr_add(i, ASM_CC_BWH, ASM_CT_DPTK);
+		break;
+	case 3:
+		asm_cmpltr_add(i, ASM_CC_BWH, ASM_CT_DPNT);
+		break;
+	}
+
+	if (FIELD(bits, 12, 1)) /* ph */
+		asm_cmpltr_add(i, ASM_CC_PH, ASM_CT_MANY);
+	else
+		asm_cmpltr_add(i, ASM_CC_PH, ASM_CT_FEW);
+
+	if (FIELD(bits, 35, 1)) /* dh */
+		asm_cmpltr_add(i, ASM_CC_DH, ASM_CT_CLR);
+	else
+		asm_cmpltr_add(i, ASM_CC_DH, ASM_CT_NONE);
+}
+
+static void
+asm_brphint(struct asm_inst *i)
+{
+	uint64_t bits = i->i_bits;
+
+	switch (FIELD(bits, 3, 2)) { /* ipwh, indwh */
+	case 0:
+		asm_cmpltr_add(i, ASM_CC_IPWH, ASM_CT_SPTK);
+		break;
+	case 1:
+		asm_cmpltr_add(i, ASM_CC_IPWH, ASM_CT_LOOP);
+		break;
+	case 2:
+		asm_cmpltr_add(i, ASM_CC_IPWH, ASM_CT_DPTK);
+		break;
+	case 3:
+		asm_cmpltr_add(i, ASM_CC_IPWH, ASM_CT_EXIT);
+		break;
+	}
+
+	if (FIELD(bits, 5, 1)) /* ph */
+		asm_cmpltr_add(i, ASM_CC_PH, ASM_CT_MANY);
+	else
+		asm_cmpltr_add(i, ASM_CC_PH, ASM_CT_FEW);
+
+	switch (FIELD(bits, 0, 3)) { /* pvec */
+	case 0:
+		asm_cmpltr_add(i, ASM_CC_PVEC, ASM_CT_DC_DC);
+		break;
+	case 1:
+		asm_cmpltr_add(i, ASM_CC_PVEC, ASM_CT_DC_NT);
+		break;
+	case 2:
+		asm_cmpltr_add(i, ASM_CC_PVEC, ASM_CT_TK_DC);
+		break;
+	case 3:
+		asm_cmpltr_add(i, ASM_CC_PVEC, ASM_CT_TK_TK);
+		break;
+	case 4:
+		asm_cmpltr_add(i, ASM_CC_PVEC, ASM_CT_TK_NT);
+		break;
+	case 5:
+		asm_cmpltr_add(i, ASM_CC_PVEC, ASM_CT_NT_DC);
+		break;
+	case 6:
+		asm_cmpltr_add(i, ASM_CC_PVEC, ASM_CT_NT_TK);
+		break;
+	case 7:
+		asm_cmpltr_add(i, ASM_CC_PVEC, ASM_CT_NT_NT);
+		break;
+	}
+
+	if (FIELD(bits, 35, 1)) /* ih */
+		asm_cmpltr_add(i, ASM_CC_IH, ASM_CT_IMP);
+	else
+		asm_cmpltr_add(i, ASM_CC_IH, ASM_CT_NONE);
+}
+
+static enum asm_oper_type
+asm_normalize(struct asm_inst *i, enum asm_op op)
+{
+	enum asm_oper_type ot = ASM_OPER_NONE;
+
+	switch (op) {
+	case ASM_OP_BR_CALL:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_CALL);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BR_CEXIT:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_CEXIT);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BR_CLOOP:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_CLOOP);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BR_COND:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_COND);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BR_CTOP:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_CTOP);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BR_IA:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_IA);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BR_RET:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_RET);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BR_WEXIT:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_WEXIT);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BR_WTOP:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_WTOP);
+		op = ASM_OP_BR;
+		break;
+	case ASM_OP_BREAK_B:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_B);
+		op = ASM_OP_BREAK;
+		break;
+	case ASM_OP_BREAK_F:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_F);
+		op = ASM_OP_BREAK;
+		break;
+	case ASM_OP_BREAK_I:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_I);
+		op = ASM_OP_BREAK;
+		break;
+	case ASM_OP_BREAK_M:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_M);
+		op = ASM_OP_BREAK;
+		break;
+	case ASM_OP_BREAK_X:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_X);
+		op = ASM_OP_BREAK;
+		break;
+	case ASM_OP_BRL_COND:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_COND);
+		op = ASM_OP_BRL;
+		break;
+	case ASM_OP_BRL_CALL:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_CALL);
+		op = ASM_OP_BRL;
+		break;
+	case ASM_OP_BRP_:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_NONE);
+		op = ASM_OP_BRP;
+		break;
+	case ASM_OP_BRP_RET:
+		asm_cmpltr_add(i, ASM_CC_BTYPE, ASM_CT_RET);
+		op = ASM_OP_BRP;
+		break;
+	case ASM_OP_BSW_0:
+		asm_cmpltr_add(i, ASM_CC_BSW, ASM_CT_0);
+		op = ASM_OP_BSW;
+		break;
+	case ASM_OP_BSW_1:
+		asm_cmpltr_add(i, ASM_CC_BSW, ASM_CT_1);
+		op = ASM_OP_BSW;
+		break;
+	case ASM_OP_CHK_A_CLR:
+		asm_cmpltr_add(i, ASM_CC_CHK, ASM_CT_A);
+		asm_cmpltr_add(i, ASM_CC_ACLR, ASM_CT_CLR);
+		op = ASM_OP_CHK;
+		break;
+	case ASM_OP_CHK_A_NC:
+		asm_cmpltr_add(i, ASM_CC_CHK, ASM_CT_A);
+		asm_cmpltr_add(i, ASM_CC_ACLR, ASM_CT_NC);
+		op = ASM_OP_CHK;
+		break;
+	case ASM_OP_CHK_S:
+		asm_cmpltr_add(i, ASM_CC_CHK, ASM_CT_S);
+		op = ASM_OP_CHK;
+		break;
+	case ASM_OP_CHK_S_I:
+		asm_cmpltr_add(i, ASM_CC_CHK, ASM_CT_S);
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_I);
+		op = ASM_OP_CHK;
+		break;
+	case ASM_OP_CHK_S_M:
+		asm_cmpltr_add(i, ASM_CC_CHK, ASM_CT_S);
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_M);
+		op = ASM_OP_CHK;
+		break;
+	case ASM_OP_CLRRRB_:
+		asm_cmpltr_add(i, ASM_CC_CLRRRB, ASM_CT_NONE);
+		op = ASM_OP_CLRRRB;
+		break;
+	case ASM_OP_CLRRRB_PR:
+		asm_cmpltr_add(i, ASM_CC_CLRRRB, ASM_CT_PR);
+		op = ASM_OP_CLRRRB;
+		break;
+	case ASM_OP_CMP_EQ:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_EQ_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_EQ_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_EQ_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_EQ_UNC:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_GE_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_GE_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_GE_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_GT_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_GT_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_GT_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LE_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LE_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LE_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LT:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LT_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LT_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LT_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LT_UNC:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LTU:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LTU);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_LTU_UNC:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LTU);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_NE_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_NE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_NE_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_NE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP_NE_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_NE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP;
+		break;
+	case ASM_OP_CMP4_EQ:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_EQ_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_EQ_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_EQ_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_EQ_UNC:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_EQ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_GE_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_GE_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_GE_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_GT_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_GT_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_GT_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_GT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LE_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LE_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LE_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LT:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LT_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LT_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LT_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LT_UNC:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LT);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LTU:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LTU);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_LTU_UNC:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_LTU);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_NE_AND:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_NE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_NE_OR:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_NE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP4_NE_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_CREL, ASM_CT_NE);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_CMP4;
+		break;
+	case ASM_OP_CMP8XCHG16_ACQ:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_ACQ);
+		op = ASM_OP_CMP8XCHG16;
+		break;
+	case ASM_OP_CMP8XCHG16_REL:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_REL);
+		op = ASM_OP_CMP8XCHG16;
+		break;
+	case ASM_OP_CMPXCHG1_ACQ:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_ACQ);
+		op = ASM_OP_CMPXCHG1;
+		break;
+	case ASM_OP_CMPXCHG1_REL:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_REL);
+		op = ASM_OP_CMPXCHG1;
+		break;
+	case ASM_OP_CMPXCHG2_ACQ:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_ACQ);
+		op = ASM_OP_CMPXCHG2;
+		break;
+	case ASM_OP_CMPXCHG2_REL:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_REL);
+		op = ASM_OP_CMPXCHG2;
+		break;
+	case ASM_OP_CMPXCHG4_ACQ:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_ACQ);
+		op = ASM_OP_CMPXCHG4;
+		break;
+	case ASM_OP_CMPXCHG4_REL:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_REL);
+		op = ASM_OP_CMPXCHG4;
+		break;
+	case ASM_OP_CMPXCHG8_ACQ:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_ACQ);
+		op = ASM_OP_CMPXCHG8;
+		break;
+	case ASM_OP_CMPXCHG8_REL:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_REL);
+		op = ASM_OP_CMPXCHG8;
+		break;
+	case ASM_OP_CZX1_L:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_L);
+		op = ASM_OP_CZX1;
+		break;
+	case ASM_OP_CZX1_R:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_R);
+		op = ASM_OP_CZX1;
+		break;
+	case ASM_OP_CZX2_L:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_L);
+		op = ASM_OP_CZX2;
+		break;
+	case ASM_OP_CZX2_R:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_R);
+		op = ASM_OP_CZX2;
+		break;
+	case ASM_OP_DEP_:
+		asm_cmpltr_add(i, ASM_CC_DEP, ASM_CT_NONE);
+		op = ASM_OP_DEP;
+		break;
+	case ASM_OP_DEP_Z:
+		asm_cmpltr_add(i, ASM_CC_DEP, ASM_CT_Z);
+		op = ASM_OP_DEP;
+		break;
+	case ASM_OP_FC_:
+		asm_cmpltr_add(i, ASM_CC_FC, ASM_CT_NONE);
+		op = ASM_OP_FC;
+		break;
+	case ASM_OP_FC_I:
+		asm_cmpltr_add(i, ASM_CC_FC, ASM_CT_I);
+		op = ASM_OP_FC;
+		break;
+	case ASM_OP_FCLASS_M:
+		asm_cmpltr_add(i, ASM_CC_FCREL, ASM_CT_M);
+		op = ASM_OP_FCLASS;
+		break;
+	case ASM_OP_FCVT_FX:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_FX);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_NONE);
+		op = ASM_OP_FCVT;
+		break;
+	case ASM_OP_FCVT_FX_TRUNC:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_FX);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_TRUNC);
+		op = ASM_OP_FCVT;
+		break;
+	case ASM_OP_FCVT_FXU:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_FXU);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_NONE);
+		op = ASM_OP_FCVT;
+		break;
+	case ASM_OP_FCVT_FXU_TRUNC:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_FXU);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_TRUNC);
+		op = ASM_OP_FCVT;
+		break;
+	case ASM_OP_FCVT_XF:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_XF);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_NONE);
+		op = ASM_OP_FCVT;
+		break;
+	case ASM_OP_FETCHADD4_ACQ:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_ACQ);
+		op = ASM_OP_FETCHADD4;
+		break;
+	case ASM_OP_FETCHADD4_REL:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_REL);
+		op = ASM_OP_FETCHADD4;
+		break;
+	case ASM_OP_FETCHADD8_ACQ:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_ACQ);
+		op = ASM_OP_FETCHADD8;
+		break;
+	case ASM_OP_FETCHADD8_REL:
+		asm_cmpltr_add(i, ASM_CC_SEM, ASM_CT_REL);
+		op = ASM_OP_FETCHADD8;
+		break;
+	case ASM_OP_FMA_:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_NONE);
+		op = ASM_OP_FMA;
+		break;
+	case ASM_OP_FMA_D:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_D);
+		op = ASM_OP_FMA;
+		break;
+	case ASM_OP_FMA_S:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_S);
+		op = ASM_OP_FMA;
+		break;
+	case ASM_OP_FMERGE_NS:
+		asm_cmpltr_add(i, ASM_CC_FMERGE, ASM_CT_NS);
+		op = ASM_OP_FMERGE;
+		break;
+	case ASM_OP_FMERGE_S:
+		asm_cmpltr_add(i, ASM_CC_FMERGE, ASM_CT_S);
+		op = ASM_OP_FMERGE;
+		break;
+	case ASM_OP_FMERGE_SE:
+		asm_cmpltr_add(i, ASM_CC_FMERGE, ASM_CT_SE);
+		op = ASM_OP_FMERGE;
+		break;
+	case ASM_OP_FMIX_L:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_L);
+		op = ASM_OP_FMIX;
+		break;
+	case ASM_OP_FMIX_LR:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_LR);
+		op = ASM_OP_FMIX;
+		break;
+	case ASM_OP_FMIX_R:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_R);
+		op = ASM_OP_FMIX;
+		break;
+	case ASM_OP_FMS_:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_NONE);
+		op = ASM_OP_FMS;
+		break;
+	case ASM_OP_FMS_D:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_D);
+		op = ASM_OP_FMS;
+		break;
+	case ASM_OP_FMS_S:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_S);
+		op = ASM_OP_FMS;
+		break;
+	case ASM_OP_FNMA_:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_NONE);
+		op = ASM_OP_FNMA;
+		break;
+	case ASM_OP_FNMA_D:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_D);
+		op = ASM_OP_FNMA;
+		break;
+	case ASM_OP_FNMA_S:
+		asm_cmpltr_add(i, ASM_CC_PC, ASM_CT_S);
+		op = ASM_OP_FNMA;
+		break;
+	case ASM_OP_FPCMP_EQ:
+		asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_EQ);
+		op = ASM_OP_FPCMP;
+		break;
+	case ASM_OP_FPCMP_LE:
+		asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_LE);
+		op = ASM_OP_FPCMP;
+		break;
+	case ASM_OP_FPCMP_LT:
+		asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_LT);
+		op = ASM_OP_FPCMP;
+		break;
+	case ASM_OP_FPCMP_NEQ:
+		asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_NEQ);
+		op = ASM_OP_FPCMP;
+		break;
+	case ASM_OP_FPCMP_NLE:
+		asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_NLE);
+		op = ASM_OP_FPCMP;
+		break;
+	case ASM_OP_FPCMP_NLT:
+		asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_NLT);
+		op = ASM_OP_FPCMP;
+		break;
+	case ASM_OP_FPCMP_ORD:
+		asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_ORD);
+		op = ASM_OP_FPCMP;
+		break;
+	case ASM_OP_FPCMP_UNORD:
+		asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_UNORD);
+		op = ASM_OP_FPCMP;
+		break;
+	case ASM_OP_FPCVT_FX:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_FX);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_NONE);
+		op = ASM_OP_FPCVT;
+		break;
+	case ASM_OP_FPCVT_FX_TRUNC:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_FX);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_TRUNC);
+		op = ASM_OP_FPCVT;
+		break;
+	case ASM_OP_FPCVT_FXU:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_FXU);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_NONE);
+		op = ASM_OP_FPCVT;
+		break;
+	case ASM_OP_FPCVT_FXU_TRUNC:
+		asm_cmpltr_add(i, ASM_CC_FCVT, ASM_CT_FXU);
+		asm_cmpltr_add(i, ASM_CC_TRUNC, ASM_CT_TRUNC);
+		op = ASM_OP_FPCVT;
+		break;
+	case ASM_OP_FPMERGE_NS:
+		asm_cmpltr_add(i, ASM_CC_FMERGE, ASM_CT_NS);
+		op = ASM_OP_FPMERGE;
+		break;
+	case ASM_OP_FPMERGE_S:
+		asm_cmpltr_add(i, ASM_CC_FMERGE, ASM_CT_S);
+		op = ASM_OP_FPMERGE;
+		break;
+	case ASM_OP_FPMERGE_SE:
+		asm_cmpltr_add(i, ASM_CC_FMERGE, ASM_CT_SE);
+		op = ASM_OP_FPMERGE;
+		break;
+	case ASM_OP_FSWAP_:
+		asm_cmpltr_add(i, ASM_CC_FSWAP, ASM_CT_NONE);
+		op = ASM_OP_FSWAP;
+		break;
+	case ASM_OP_FSWAP_NL:
+		asm_cmpltr_add(i, ASM_CC_FSWAP, ASM_CT_NL);
+		op = ASM_OP_FSWAP;
+		break;
+	case ASM_OP_FSWAP_NR:
+		asm_cmpltr_add(i, ASM_CC_FSWAP, ASM_CT_NR);
+		op = ASM_OP_FSWAP;
+		break;
+	case ASM_OP_FSXT_L:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_L);
+		op = ASM_OP_FSXT;
+		break;
+	case ASM_OP_FSXT_R:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_R);
+		op = ASM_OP_FSXT;
+		break;
+	case ASM_OP_GETF_D:
+		asm_cmpltr_add(i, ASM_CC_GETF, ASM_CT_D);
+		op = ASM_OP_GETF;
+		break;
+	case ASM_OP_GETF_EXP:
+		asm_cmpltr_add(i, ASM_CC_GETF, ASM_CT_EXP);
+		op = ASM_OP_GETF;
+		break;
+	case ASM_OP_GETF_S:
+		asm_cmpltr_add(i, ASM_CC_GETF, ASM_CT_S);
+		op = ASM_OP_GETF;
+		break;
+	case ASM_OP_GETF_SIG:
+		asm_cmpltr_add(i, ASM_CC_GETF, ASM_CT_SIG);
+		op = ASM_OP_GETF;
+		break;
+	case ASM_OP_HINT_B:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_B);
+		op = ASM_OP_HINT;
+		break;
+	case ASM_OP_HINT_F:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_F);
+		op = ASM_OP_HINT;
+		break;
+	case ASM_OP_HINT_I:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_I);
+		op = ASM_OP_HINT;
+		break;
+	case ASM_OP_HINT_M:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_M);
+		op = ASM_OP_HINT;
+		break;
+	case ASM_OP_HINT_X:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_X);
+		op = ASM_OP_HINT;
+		break;
+	case ASM_OP_INVALA_:
+		asm_cmpltr_add(i, ASM_CC_INVALA, ASM_CT_NONE);
+		op = ASM_OP_INVALA;
+		break;
+	case ASM_OP_INVALA_E:
+		asm_cmpltr_add(i, ASM_CC_INVALA, ASM_CT_E);
+		op = ASM_OP_INVALA;
+		break;
+	case ASM_OP_ITC_D:
+		asm_cmpltr_add(i, ASM_CC_ITC, ASM_CT_D);
+		op = ASM_OP_ITC;
+		break;
+	case ASM_OP_ITC_I:
+		asm_cmpltr_add(i, ASM_CC_ITC, ASM_CT_I);
+		op = ASM_OP_ITC;
+		break;
+	case ASM_OP_ITR_D:
+		asm_cmpltr_add(i, ASM_CC_ITR, ASM_CT_D);
+		ot = ASM_OPER_DTR;
+		op = ASM_OP_ITR;
+		break;
+	case ASM_OP_ITR_I:
+		asm_cmpltr_add(i, ASM_CC_ITR, ASM_CT_I);
+		ot = ASM_OPER_ITR;
+		op = ASM_OP_ITR;
+		break;
+	case ASM_OP_LD1_:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD1_A:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_A);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD1_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_ACQ);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD1_BIAS:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_BIAS);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD1_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD1_C_CLR_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_CLR_ACQ);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD1_C_NC:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD1_S:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_S);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD1_SA: 
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_SA);
+		op = ASM_OP_LD1;
+		break;
+	case ASM_OP_LD16_:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LD16;
+		break;
+	case ASM_OP_LD16_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_ACQ);
+		op = ASM_OP_LD16;
+		break;
+	case ASM_OP_LD2_:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD2_A:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_A);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD2_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_ACQ);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD2_BIAS:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_BIAS);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD2_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD2_C_CLR_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_CLR_ACQ);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD2_C_NC:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD2_S:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_S);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD2_SA: 
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_SA);
+		op = ASM_OP_LD2;
+		break;
+	case ASM_OP_LD4_:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD4_A:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_A);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD4_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_ACQ);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD4_BIAS:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_BIAS);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD4_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD4_C_CLR_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_CLR_ACQ);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD4_C_NC:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD4_S:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_S);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD4_SA: 
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_SA);
+		op = ASM_OP_LD4;
+		break;
+	case ASM_OP_LD8_:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_A:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_A);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_ACQ);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_BIAS:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_BIAS);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_C_CLR_ACQ:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_CLR_ACQ);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_C_NC:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_FILL:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_FILL);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_S:
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_S);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LD8_SA: 
+		asm_cmpltr_add(i, ASM_CC_LDTYPE, ASM_CT_SA);
+		op = ASM_OP_LD8;
+		break;
+	case ASM_OP_LDF_FILL:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_FILL);
+		op = ASM_OP_LDF;
+		break;
+	case ASM_OP_LDF8_:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LDF8;
+		break;
+	case ASM_OP_LDF8_A:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_A);
+		op = ASM_OP_LDF8;
+		break;
+	case ASM_OP_LDF8_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LDF8;
+		break;
+	case ASM_OP_LDF8_C_NC:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LDF8;
+		break;
+	case ASM_OP_LDF8_S:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_S);
+		op = ASM_OP_LDF8;
+		break;
+	case ASM_OP_LDF8_SA:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_SA);
+		op = ASM_OP_LDF8;
+		break;
+	case ASM_OP_LDFD_:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LDFD;
+		break;
+	case ASM_OP_LDFD_A:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_A);
+		op = ASM_OP_LDFD;
+		break;
+	case ASM_OP_LDFD_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LDFD;
+		break;
+	case ASM_OP_LDFD_C_NC:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LDFD;
+		break;
+	case ASM_OP_LDFD_S:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_S);
+		op = ASM_OP_LDFD;
+		break;
+	case ASM_OP_LDFD_SA:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_SA);
+		op = ASM_OP_LDFD;
+		break;
+	case ASM_OP_LDFE_:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LDFE;
+		break;
+	case ASM_OP_LDFE_A:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_A);
+		op = ASM_OP_LDFE;
+		break;
+	case ASM_OP_LDFE_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LDFE;
+		break;
+	case ASM_OP_LDFE_C_NC:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LDFE;
+		break;
+	case ASM_OP_LDFE_S:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_S);
+		op = ASM_OP_LDFE;
+		break;
+	case ASM_OP_LDFE_SA:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_SA);
+		op = ASM_OP_LDFE;
+		break;
+	case ASM_OP_LDFP8_:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LDFP8;
+		break;
+	case ASM_OP_LDFP8_A:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_A);
+		op = ASM_OP_LDFP8;
+		break;
+	case ASM_OP_LDFP8_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LDFP8;
+		break;
+	case ASM_OP_LDFP8_C_NC:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LDFP8;
+		break;
+	case ASM_OP_LDFP8_S:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_S);
+		op = ASM_OP_LDFP8;
+		break;
+	case ASM_OP_LDFP8_SA:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_SA);
+		op = ASM_OP_LDFP8;
+		break;
+	case ASM_OP_LDFPD_:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LDFPD;
+		break;
+	case ASM_OP_LDFPD_A:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_A);
+		op = ASM_OP_LDFPD;
+		break;
+	case ASM_OP_LDFPD_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LDFPD;
+		break;
+	case ASM_OP_LDFPD_C_NC:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LDFPD;
+		break;
+	case ASM_OP_LDFPD_S:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_S);
+		op = ASM_OP_LDFPD;
+		break;
+	case ASM_OP_LDFPD_SA:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_SA);
+		op = ASM_OP_LDFPD;
+		break;
+	case ASM_OP_LDFPS_:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LDFPS;
+		break;
+	case ASM_OP_LDFPS_A:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_A);
+		op = ASM_OP_LDFPS;
+		break;
+	case ASM_OP_LDFPS_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LDFPS;
+		break;
+	case ASM_OP_LDFPS_C_NC:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LDFPS;
+		break;
+	case ASM_OP_LDFPS_S:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_S);
+		op = ASM_OP_LDFPS;
+		break;
+	case ASM_OP_LDFPS_SA:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_SA);
+		op = ASM_OP_LDFPS;
+		break;
+	case ASM_OP_LDFS_:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_NONE);
+		op = ASM_OP_LDFS;
+		break;
+	case ASM_OP_LDFS_A:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_A);
+		op = ASM_OP_LDFS;
+		break;
+	case ASM_OP_LDFS_C_CLR:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_CLR);
+		op = ASM_OP_LDFS;
+		break;
+	case ASM_OP_LDFS_C_NC:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_C_NC);
+		op = ASM_OP_LDFS;
+		break;
+	case ASM_OP_LDFS_S:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_S);
+		op = ASM_OP_LDFS;
+		break;
+	case ASM_OP_LDFS_SA:
+		asm_cmpltr_add(i, ASM_CC_FLDTYPE, ASM_CT_SA);
+		op = ASM_OP_LDFS;
+		break;
+	case ASM_OP_LFETCH_:
+		asm_cmpltr_add(i, ASM_CC_LFTYPE, ASM_CT_NONE);
+		asm_cmpltr_add(i, ASM_CC_LFETCH, ASM_CT_NONE);
+		op = ASM_OP_LFETCH;
+		break;
+	case ASM_OP_LFETCH_EXCL:
+		asm_cmpltr_add(i, ASM_CC_LFTYPE, ASM_CT_NONE);
+		asm_cmpltr_add(i, ASM_CC_LFETCH, ASM_CT_EXCL);
+		op = ASM_OP_LFETCH;
+		break;
+	case ASM_OP_LFETCH_FAULT:
+		asm_cmpltr_add(i, ASM_CC_LFTYPE, ASM_CT_FAULT);
+		asm_cmpltr_add(i, ASM_CC_LFETCH, ASM_CT_NONE);
+		op = ASM_OP_LFETCH;
+		break;
+	case ASM_OP_LFETCH_FAULT_EXCL:
+		asm_cmpltr_add(i, ASM_CC_LFTYPE, ASM_CT_FAULT);
+		asm_cmpltr_add(i, ASM_CC_LFETCH, ASM_CT_EXCL);
+		op = ASM_OP_LFETCH;
+		break;
+	case ASM_OP_MF_:
+		asm_cmpltr_add(i, ASM_CC_MF, ASM_CT_NONE);
+		op = ASM_OP_MF;
+		break;
+	case ASM_OP_MF_A:
+		asm_cmpltr_add(i, ASM_CC_MF, ASM_CT_A);
+		op = ASM_OP_MF;
+		break;
+	case ASM_OP_MIX1_L:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_L);
+		op = ASM_OP_MIX1;
+		break;
+	case ASM_OP_MIX1_R:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_R);
+		op = ASM_OP_MIX1;
+		break;
+	case ASM_OP_MIX2_L:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_L);
+		op = ASM_OP_MIX2;
+		break;
+	case ASM_OP_MIX2_R:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_R);
+		op = ASM_OP_MIX2;
+		break;
+	case ASM_OP_MIX4_L:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_L);
+		op = ASM_OP_MIX4;
+		break;
+	case ASM_OP_MIX4_R:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_R);
+		op = ASM_OP_MIX4;
+		break;
+	case ASM_OP_MOV_:
+		asm_cmpltr_add(i, ASM_CC_MOV, ASM_CT_NONE);
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_I:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_I);
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_M:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_M);
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_RET:
+		asm_cmpltr_add(i, ASM_CC_MOV, ASM_CT_RET);
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_CPUID:
+		ot = ASM_OPER_CPUID;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_DBR:
+		ot = ASM_OPER_DBR;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_IBR:
+		ot = ASM_OPER_IBR;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_IP:
+		ot = ASM_OPER_IP;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_MSR:
+		ot = ASM_OPER_MSR;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_PKR:
+		ot = ASM_OPER_PKR;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_PMC:
+		ot = ASM_OPER_PMC;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_PMD:
+		ot = ASM_OPER_PMD;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_PR:
+		ot = ASM_OPER_PR;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_PSR:
+		ot = ASM_OPER_PSR;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_PSR_L:
+		ot = ASM_OPER_PSR_L;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_PSR_UM:
+		ot = ASM_OPER_PSR_UM;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_MOV_RR:
+		ot = ASM_OPER_RR;
+		op = ASM_OP_MOV;
+		break;
+	case ASM_OP_NOP_B:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_B);
+		op = ASM_OP_NOP;
+		break;
+	case ASM_OP_NOP_F:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_F);
+		op = ASM_OP_NOP;
+		break;
+	case ASM_OP_NOP_I:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_I);
+		op = ASM_OP_NOP;
+		break;
+	case ASM_OP_NOP_M:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_M);
+		op = ASM_OP_NOP;
+		break;
+	case ASM_OP_NOP_X:
+		asm_cmpltr_add(i, ASM_CC_UNIT, ASM_CT_X);
+		op = ASM_OP_NOP;
+		break;
+	case ASM_OP_PACK2_SSS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_SSS);
+		op = ASM_OP_PACK2;
+		break;
+	case ASM_OP_PACK2_USS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_USS);
+		op = ASM_OP_PACK2;
+		break;
+	case ASM_OP_PACK4_SSS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_SSS);
+		op = ASM_OP_PACK4;
+		break;
+	case ASM_OP_PADD1_:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_NONE);
+		op = ASM_OP_PADD1;
+		break;
+	case ASM_OP_PADD1_SSS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_SSS);
+		op = ASM_OP_PADD1;
+		break;
+	case ASM_OP_PADD1_UUS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_UUS);
+		op = ASM_OP_PADD1;
+		break;
+	case ASM_OP_PADD1_UUU:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_UUU);
+		op = ASM_OP_PADD1;
+		break;
+	case ASM_OP_PADD2_:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_NONE);
+		op = ASM_OP_PADD2;
+		break;
+	case ASM_OP_PADD2_SSS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_SSS);
+		op = ASM_OP_PADD2;
+		break;
+	case ASM_OP_PADD2_UUS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_UUS);
+		op = ASM_OP_PADD2;
+		break;
+	case ASM_OP_PADD2_UUU:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_UUU);
+		op = ASM_OP_PADD2;
+		break;
+	case ASM_OP_PAVG1_:
+		asm_cmpltr_add(i, ASM_CC_PAVG, ASM_CT_NONE);
+		op = ASM_OP_PAVG1;
+		break;
+	case ASM_OP_PAVG1_RAZ:
+		asm_cmpltr_add(i, ASM_CC_PAVG, ASM_CT_RAZ);
+		op = ASM_OP_PAVG1;
+		break;
+	case ASM_OP_PAVG2_:
+		asm_cmpltr_add(i, ASM_CC_PAVG, ASM_CT_NONE);
+		op = ASM_OP_PAVG2;
+		break;
+	case ASM_OP_PAVG2_RAZ:
+		asm_cmpltr_add(i, ASM_CC_PAVG, ASM_CT_RAZ);
+		op = ASM_OP_PAVG2;
+		break;
+	case ASM_OP_PCMP1_EQ:
+		asm_cmpltr_add(i, ASM_CC_PREL, ASM_CT_EQ);
+		op = ASM_OP_PCMP1;
+		break;
+	case ASM_OP_PCMP1_GT:
+		asm_cmpltr_add(i, ASM_CC_PREL, ASM_CT_GT);
+		op = ASM_OP_PCMP1;
+		break;
+	case ASM_OP_PCMP2_EQ:
+		asm_cmpltr_add(i, ASM_CC_PREL, ASM_CT_EQ);
+		op = ASM_OP_PCMP2;
+		break;
+	case ASM_OP_PCMP2_GT:
+		asm_cmpltr_add(i, ASM_CC_PREL, ASM_CT_GT);
+		op = ASM_OP_PCMP2;
+		break;
+	case ASM_OP_PCMP4_EQ:
+		asm_cmpltr_add(i, ASM_CC_PREL, ASM_CT_EQ);
+		op = ASM_OP_PCMP4;
+		break;
+	case ASM_OP_PCMP4_GT:
+		asm_cmpltr_add(i, ASM_CC_PREL, ASM_CT_GT);
+		op = ASM_OP_PCMP4;
+		break;
+	case ASM_OP_PMAX1_U:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_U);
+		op = ASM_OP_PMAX1;
+		break;
+	case ASM_OP_PMIN1_U:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_U);
+		op = ASM_OP_PMIN1;
+		break;
+	case ASM_OP_PMPY2_L:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_L);
+		op = ASM_OP_PMPY2;
+		break;
+	case ASM_OP_PMPY2_R:
+		asm_cmpltr_add(i, ASM_CC_LR, ASM_CT_R);
+		op = ASM_OP_PMPY2;
+		break;
+	case ASM_OP_PMPYSHR2_:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_NONE);
+		op = ASM_OP_PMPYSHR2;
+		break;
+	case ASM_OP_PMPYSHR2_U:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_U);
+		op = ASM_OP_PMPYSHR2;
+		break;
+	case ASM_OP_PROBE_R:
+		asm_cmpltr_add(i, ASM_CC_RW, ASM_CT_R);
+		asm_cmpltr_add(i, ASM_CC_PRTYPE, ASM_CT_NONE);
+		op = ASM_OP_PROBE;
+		break;
+	case ASM_OP_PROBE_R_FAULT:
+		asm_cmpltr_add(i, ASM_CC_RW, ASM_CT_R);
+		asm_cmpltr_add(i, ASM_CC_PRTYPE, ASM_CT_FAULT);
+		op = ASM_OP_PROBE;
+		break;
+	case ASM_OP_PROBE_RW_FAULT:
+		asm_cmpltr_add(i, ASM_CC_RW, ASM_CT_RW);
+		asm_cmpltr_add(i, ASM_CC_PRTYPE, ASM_CT_FAULT);
+		op = ASM_OP_PROBE;
+		break;
+	case ASM_OP_PROBE_W:
+		asm_cmpltr_add(i, ASM_CC_RW, ASM_CT_W);
+		asm_cmpltr_add(i, ASM_CC_PRTYPE, ASM_CT_NONE);
+		op = ASM_OP_PROBE;
+		break;
+	case ASM_OP_PROBE_W_FAULT:
+		asm_cmpltr_add(i, ASM_CC_RW, ASM_CT_W);
+		asm_cmpltr_add(i, ASM_CC_PRTYPE, ASM_CT_FAULT);
+		op = ASM_OP_PROBE;
+		break;
+	case ASM_OP_PSHR2_:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_NONE);
+		op = ASM_OP_PSHR2;
+		break;
+	case ASM_OP_PSHR2_U:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_U);
+		op = ASM_OP_PSHR2;
+		break;
+	case ASM_OP_PSHR4_:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_NONE);
+		op = ASM_OP_PSHR4;
+		break;
+	case ASM_OP_PSHR4_U:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_U);
+		op = ASM_OP_PSHR4;
+		break;
+	case ASM_OP_PSUB1_:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_NONE);
+		op = ASM_OP_PSUB1;
+		break;
+	case ASM_OP_PSUB1_SSS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_SSS);
+		op = ASM_OP_PSUB1;
+		break;
+	case ASM_OP_PSUB1_UUS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_UUS);
+		op = ASM_OP_PSUB1;
+		break;
+	case ASM_OP_PSUB1_UUU:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_UUU);
+		op = ASM_OP_PSUB1;
+		break;
+	case ASM_OP_PSUB2_:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_NONE);
+		op = ASM_OP_PSUB2;
+		break;
+	case ASM_OP_PSUB2_SSS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_SSS);
+		op = ASM_OP_PSUB2;
+		break;
+	case ASM_OP_PSUB2_UUS:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_UUS);
+		op = ASM_OP_PSUB2;
+		break;
+	case ASM_OP_PSUB2_UUU:
+		asm_cmpltr_add(i, ASM_CC_SAT, ASM_CT_UUU);
+		op = ASM_OP_PSUB2;
+		break;
+	case ASM_OP_PTC_E:
+		asm_cmpltr_add(i, ASM_CC_PTC, ASM_CT_E);
+		op = ASM_OP_PTC;
+		break;
+	case ASM_OP_PTC_G:
+		asm_cmpltr_add(i, ASM_CC_PTC, ASM_CT_G);
+		op = ASM_OP_PTC;
+		break;
+	case ASM_OP_PTC_GA:
+		asm_cmpltr_add(i, ASM_CC_PTC, ASM_CT_GA);
+		op = ASM_OP_PTC;
+		break;
+	case ASM_OP_PTC_L:
+		asm_cmpltr_add(i, ASM_CC_PTC, ASM_CT_L);
+		op = ASM_OP_PTC;
+		break;
+	case ASM_OP_PTR_D:
+		asm_cmpltr_add(i, ASM_CC_PTR, ASM_CT_D);
+		op = ASM_OP_PTR;
+		break;
+	case ASM_OP_PTR_I:
+		asm_cmpltr_add(i, ASM_CC_PTR, ASM_CT_I);
+		op = ASM_OP_PTR;
+		break;
+	case ASM_OP_SETF_D:
+		asm_cmpltr_add(i, ASM_CC_SETF, ASM_CT_D);
+		op = ASM_OP_SETF;
+		break;
+	case ASM_OP_SETF_EXP:
+		asm_cmpltr_add(i, ASM_CC_SETF, ASM_CT_EXP);
+		op = ASM_OP_SETF;
+		break;
+	case ASM_OP_SETF_S:
+		asm_cmpltr_add(i, ASM_CC_SETF, ASM_CT_S);
+		op = ASM_OP_SETF;
+		break;
+	case ASM_OP_SETF_SIG:
+		asm_cmpltr_add(i, ASM_CC_SETF, ASM_CT_SIG);
+		op = ASM_OP_SETF;
+		break;
+	case ASM_OP_SHR_:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_NONE);
+		op = ASM_OP_SHR;
+		break;
+	case ASM_OP_SHR_U:
+		asm_cmpltr_add(i, ASM_CC_UNS, ASM_CT_U);
+		op = ASM_OP_SHR;
+		break;
+	case ASM_OP_SRLZ_D:
+		asm_cmpltr_add(i, ASM_CC_SRLZ, ASM_CT_D);
+		op = ASM_OP_SRLZ;
+		break;
+	case ASM_OP_SRLZ_I:
+		asm_cmpltr_add(i, ASM_CC_SRLZ, ASM_CT_I);
+		op = ASM_OP_SRLZ;
+		break;
+	case ASM_OP_ST1_:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_NONE);
+		op = ASM_OP_ST1;
+		break;
+	case ASM_OP_ST1_REL:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_REL);
+		op = ASM_OP_ST1;
+		break;
+	case ASM_OP_ST16_:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_NONE);
+		op = ASM_OP_ST16;
+		break;
+	case ASM_OP_ST16_REL:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_REL);
+		op = ASM_OP_ST16;
+		break;
+	case ASM_OP_ST2_:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_NONE);
+		op = ASM_OP_ST2;
+		break;
+	case ASM_OP_ST2_REL:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_REL);
+		op = ASM_OP_ST2;
+		break;
+	case ASM_OP_ST4_:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_NONE);
+		op = ASM_OP_ST4;
+		break;
+	case ASM_OP_ST4_REL:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_REL);
+		op = ASM_OP_ST4;
+		break;
+	case ASM_OP_ST8_:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_NONE);
+		op = ASM_OP_ST8;
+		break;
+	case ASM_OP_ST8_REL:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_REL);
+		op = ASM_OP_ST8;
+		break;
+	case ASM_OP_ST8_SPILL:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_SPILL);
+		op = ASM_OP_ST8;
+		break;
+	case ASM_OP_STF_SPILL:
+		asm_cmpltr_add(i, ASM_CC_STTYPE, ASM_CT_SPILL);
+		op = ASM_OP_STF;
+		break;
+	case ASM_OP_SYNC_I:
+		asm_cmpltr_add(i, ASM_CC_SYNC, ASM_CT_I);
+		op = ASM_OP_SYNC;
+		break;
+	case ASM_OP_TBIT_NZ_AND:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_TBIT;
+		break;
+	case ASM_OP_TBIT_NZ_OR:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_TBIT;
+		break;
+	case ASM_OP_TBIT_NZ_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_TBIT;
+		break;
+	case ASM_OP_TBIT_Z:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_TBIT;
+		break;
+	case ASM_OP_TBIT_Z_AND:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_TBIT;
+		break;
+	case ASM_OP_TBIT_Z_OR:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_TBIT;
+		break;
+	case ASM_OP_TBIT_Z_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_TBIT;
+		break;
+	case ASM_OP_TBIT_Z_UNC:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_TBIT;
+		break;
+	case ASM_OP_TF_NZ_AND:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_TF;
+		break;
+	case ASM_OP_TF_NZ_OR:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_TF;
+		break;
+	case ASM_OP_TF_NZ_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_TF;
+		break;
+	case ASM_OP_TF_Z:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_TF;
+		break;
+	case ASM_OP_TF_Z_AND:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_TF;
+		break;
+	case ASM_OP_TF_Z_OR:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_TF;
+		break;
+	case ASM_OP_TF_Z_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_TF;
+		break;
+	case ASM_OP_TF_Z_UNC:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_TF;
+		break;
+	case ASM_OP_TNAT_NZ_AND:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_TNAT;
+		break;
+	case ASM_OP_TNAT_NZ_OR:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_TNAT;
+		break;
+	case ASM_OP_TNAT_NZ_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_NZ);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_TNAT;
+		break;
+	case ASM_OP_TNAT_Z:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_NONE);
+		op = ASM_OP_TNAT;
+		break;
+	case ASM_OP_TNAT_Z_AND:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_AND);
+		op = ASM_OP_TNAT;
+		break;
+	case ASM_OP_TNAT_Z_OR:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR);
+		op = ASM_OP_TNAT;
+		break;
+	case ASM_OP_TNAT_Z_OR_ANDCM:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_OR_ANDCM);
+		op = ASM_OP_TNAT;
+		break;
+	case ASM_OP_TNAT_Z_UNC:
+		asm_cmpltr_add(i, ASM_CC_TREL, ASM_CT_Z);
+		asm_cmpltr_add(i, ASM_CC_CTYPE, ASM_CT_UNC);
+		op = ASM_OP_TNAT;
+		break;
+	case ASM_OP_UNPACK1_H:
+		asm_cmpltr_add(i, ASM_CC_UNPACK, ASM_CT_H);
+		op = ASM_OP_UNPACK1;
+		break;
+	case ASM_OP_UNPACK1_L:
+		asm_cmpltr_add(i, ASM_CC_UNPACK, ASM_CT_L);
+		op = ASM_OP_UNPACK1;
+		break;
+	case ASM_OP_UNPACK2_H:
+		asm_cmpltr_add(i, ASM_CC_UNPACK, ASM_CT_H);
+		op = ASM_OP_UNPACK2;
+		break;
+	case ASM_OP_UNPACK2_L:
+		asm_cmpltr_add(i, ASM_CC_UNPACK, ASM_CT_L);
+		op = ASM_OP_UNPACK2;
+		break;
+	case ASM_OP_UNPACK4_H:
+		asm_cmpltr_add(i, ASM_CC_UNPACK, ASM_CT_H);
+		op = ASM_OP_UNPACK4;
+		break;
+	case ASM_OP_UNPACK4_L:
+		asm_cmpltr_add(i, ASM_CC_UNPACK, ASM_CT_L);
+		op = ASM_OP_UNPACK4;
+		break;
+	case ASM_OP_VMSW_0:
+		asm_cmpltr_add(i, ASM_CC_VMSW, ASM_CT_0);
+		op = ASM_OP_VMSW;
+		break;
+	case ASM_OP_VMSW_1:
+		asm_cmpltr_add(i, ASM_CC_VMSW, ASM_CT_1);
+		op = ASM_OP_VMSW;
+		break;
+	case ASM_OP_XMA_H:
+		asm_cmpltr_add(i, ASM_CC_XMA, ASM_CT_H);
+		op = ASM_OP_XMA;
+		break;
+	case ASM_OP_XMA_HU:
+		asm_cmpltr_add(i, ASM_CC_XMA, ASM_CT_HU);
+		op = ASM_OP_XMA;
+		break;
+	case ASM_OP_XMA_L:
+		asm_cmpltr_add(i, ASM_CC_XMA, ASM_CT_L);
+		op = ASM_OP_XMA;
+		break;
+	default:
+		KASSERT(op < ASM_OP_NUMBER_OF_OPCODES, ("foo"));
+		break;
+	}
+	i->i_op = op;
+	return (ot);
+}
+
+static __inline void
+op_imm(struct asm_inst *i, int op, uint64_t val)
+{
+	i->i_oper[op].o_type = ASM_OPER_IMM;
+	i->i_oper[op].o_value = val;
+}
+
+static __inline void
+op_type(struct asm_inst *i, int op, enum asm_oper_type ot)
+{
+	i->i_oper[op].o_type = ot;
+}
+
+static __inline void
+op_value(struct asm_inst *i, int op, uint64_t val)
+{
+	i->i_oper[op].o_value = val;
+}
+
+static __inline void
+operand(struct asm_inst *i, int op, enum asm_oper_type ot, uint64_t bits,
+    int o, int l)
+{
+	i->i_oper[op].o_type = ot;
+	i->i_oper[op].o_value = FIELD(bits, o, l);
+}
+
+static uint64_t
+imm(uint64_t bits, int sign, int o, int l)
+{
+	uint64_t val = FIELD(bits, o, l);
+
+	if (sign && (val & (1LL << (l - 1))) != 0)
+		val |= -1LL << l;
+	return (val);
+}
+
+static void
+s_imm(struct asm_inst *i, int op, uint64_t bits, int o, int l)
+{
+	i->i_oper[op].o_type = ASM_OPER_IMM;
+	i->i_oper[op].o_value = imm(bits, 1, o, l);
+}
+
+static void
+u_imm(struct asm_inst *i, int op, uint64_t bits, int o, int l)
+{
+	i->i_oper[op].o_type = ASM_OPER_IMM;
+	i->i_oper[op].o_value = imm(bits, 0, o, l);
+}
+
+static uint64_t
+vimm(uint64_t bits, int sign, va_list ap)
+{
+	uint64_t val = 0;
+	int len = 0;
+	int frag;
+
+	while ((frag = va_arg(ap, int)) != 0) {
+		val |= (uint64_t)FIELD(bits, FRAG_OFS(frag), FRAG_LEN(frag))
+		    << len;
+		len += FRAG_LEN(frag);
+	}
+	if (sign && (val & (1LL << (len - 1))) != 0)
+		val |= -1LL << len;
+	return (val);
+}
+
+static void
+s_immf(struct asm_inst *i, int op, uint64_t bits, ...)
+{
+	va_list ap;
+	va_start(ap, bits);
+	i->i_oper[op].o_type = ASM_OPER_IMM;
+	i->i_oper[op].o_value = vimm(bits, 1, ap);
+	va_end(ap);
+}
+
+static void
+u_immf(struct asm_inst *i, int op, uint64_t bits, ...)
+{
+	va_list ap;
+	va_start(ap, bits);
+	i->i_oper[op].o_type = ASM_OPER_IMM;
+	i->i_oper[op].o_value = vimm(bits, 0, ap);
+	va_end(ap);
+}
+
+static void
+disp(struct asm_inst *i, int op, uint64_t bits, ...)
+{
+	va_list ap;
+	va_start(ap, bits);
+	i->i_oper[op].o_type = ASM_OPER_DISP;
+	i->i_oper[op].o_value = vimm(bits, 1, ap) << 4;
+	va_end(ap);
+}
+
+static __inline void
+combine(uint64_t *dst, int dl, uint64_t src, int sl, int so)
+{
+	*dst = (*dst & ((1LL << dl) - 1LL)) |
+	    ((uint64_t)_FLD64(src, so, sl) << dl);
+}
+
+int
+asm_extract(enum asm_op op, enum asm_fmt fmt, uint64_t bits,
+    struct asm_bundle *b, int slot)
+{
+	struct asm_inst *i = b->b_inst + slot;
+	enum asm_oper_type ot;
+
+	KASSERT(op != ASM_OP_NONE, ("foo"));
+	i->i_bits = bits;
+	i->i_format = fmt;
+	i->i_srcidx = 2;
+
+	ot = asm_normalize(i, op);
+
+	if (fmt != ASM_FMT_B6 && fmt != ASM_FMT_B7)
+		operand(i, 0, ASM_OPER_PREG, bits, 0, 6);
+
+	switch (fmt) {
+	case ASM_FMT_A1:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		if ((op == ASM_OP_ADD && FIELD(bits, 27, 2) == 1) ||
+		    (op == ASM_OP_SUB && FIELD(bits, 27, 2) == 0))
+			op_imm(i, 4, 1LL);
+		break;
+	case ASM_FMT_A2:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		op_imm(i, 3, 1LL + FIELD(bits, 27, 2));
+		operand(i, 4, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_A3:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		s_immf(i, 2, bits, FRAG(13,7), FRAG(36,1), 0);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_A4:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		s_immf(i, 2, bits, FRAG(13,7), FRAG(27,6), FRAG(36,1), 0);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_A5:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		s_immf(i, 2, bits, FRAG(13,7), FRAG(27,9), FRAG(22,5),
+		    FRAG(36,1), 0);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 2);
+		break;
+	case ASM_FMT_A6: /* 2 dst */
+		operand(i, 1, ASM_OPER_PREG, bits, 6, 6);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		operand(i, 3, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 4, ASM_OPER_GREG, bits, 20, 7);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_A7: /* 2 dst */
+		if (FIELD(bits, 13, 7) != 0)
+			return (0);
+		operand(i, 1, ASM_OPER_PREG, bits, 6, 6);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		operand(i, 3, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 4, ASM_OPER_GREG, bits, 20, 7);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_A8: /* 2 dst */
+		operand(i, 1, ASM_OPER_PREG, bits, 6, 6);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		s_immf(i, 3, bits, FRAG(13,7), FRAG(36,1), 0);
+		operand(i, 4, ASM_OPER_GREG, bits, 20, 7);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_A9:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_A10:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		op_imm(i, 3, 1LL + FIELD(bits, 27, 2));
+		operand(i, 4, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_B1: /* 0 dst */
+		asm_brhint(i);
+		disp(i, 1, bits, FRAG(13,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_B2: /* 0 dst */
+		if (FIELD(bits, 0, 6) != 0)
+			return (0);
+		asm_brhint(i);
+		disp(i, 1, bits, FRAG(13,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_B3:
+		asm_brhint(i);
+		operand(i, 1, ASM_OPER_BREG, bits, 6, 3);
+		disp(i, 2, bits, FRAG(13,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_B4: /* 0 dst */
+		asm_brhint(i);
+		operand(i, 1, ASM_OPER_BREG, bits, 13, 3);
+		break;
+	case ASM_FMT_B5:
+#if 0
+		if (FIELD(bits, 32, 1) == 0)
+			return (0);
+#endif
+		asm_brhint(i);
+		operand(i, 1, ASM_OPER_BREG, bits, 6, 3);
+		operand(i, 2, ASM_OPER_BREG, bits, 13, 3);
+		break;
+	case ASM_FMT_B6: /* 0 dst */
+		asm_brphint(i);
+		disp(i, 1, bits, FRAG(13,20), FRAG(36,1), 0);
+		disp(i, 2, bits, FRAG(6,7), FRAG(33,2), 0);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_B7: /* 0 dst */
+		asm_brphint(i);
+		operand(i, 1, ASM_OPER_BREG, bits, 13, 3);
+		disp(i, 2, bits, FRAG(6,7), FRAG(33,2), 0);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_B8:
+		/* no operands */
+		break;
+	case ASM_FMT_B9: /* 0 dst */
+		u_immf(i, 1, bits, FRAG(6,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_F1:
+		asm_sf(i);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_FREG, bits, 20, 7);
+		operand(i, 4, ASM_OPER_FREG, bits, 27, 7);
+		break;
+	case ASM_FMT_F2:
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_FREG, bits, 20, 7);
+		operand(i, 4, ASM_OPER_FREG, bits, 27, 7);
+		break;
+	case ASM_FMT_F3:
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_FREG, bits, 20, 7);
+		operand(i, 4, ASM_OPER_FREG, bits, 27, 7);
+		break;
+	case ASM_FMT_F4: /* 2 dst */
+		if (FIELD(bits, 33, 1)) { /* ra */
+			if (FIELD(bits, 36, 1)) /* rb */
+				asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_UNORD);
+			else
+				asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_LE);
+		} else {
+			if (FIELD(bits, 36, 1)) /* rb */
+				asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_LT);
+			else
+				asm_cmpltr_add(i, ASM_CC_FREL, ASM_CT_EQ);
+		}
+		if (FIELD(bits, 12, 1)) /* ta */
+			asm_cmpltr_add(i, ASM_CC_FCTYPE, ASM_CT_UNC);
+		else
+			asm_cmpltr_add(i, ASM_CC_FCTYPE, ASM_CT_NONE);
+		asm_sf(i);
+		operand(i, 1, ASM_OPER_PREG, bits, 6, 6);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		operand(i, 3, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 4, ASM_OPER_FREG, bits, 20, 7);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_F5: /* 2 dst */
+		operand(i, 1, ASM_OPER_PREG, bits, 6, 6);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		operand(i, 3, ASM_OPER_FREG, bits, 13, 7);
+		u_immf(i, 4, bits, FRAG(33,2), FRAG(20,7), 0);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_F6: /* 2 dst */
+		asm_sf(i);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		operand(i, 3, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 4, ASM_OPER_FREG, bits, 20, 7);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_F7: /* 2 dst */
+		asm_sf(i);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		operand(i, 3, ASM_OPER_FREG, bits, 20, 7);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_F8:
+		asm_sf(i);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_FREG, bits, 20, 7);
+		break;
+	case ASM_FMT_F9:
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_FREG, bits, 20, 7);
+		break;
+	case ASM_FMT_F10:
+		asm_sf(i);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		break;
+	case ASM_FMT_F11:
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		break;
+	case ASM_FMT_F12: /* 0 dst */
+		asm_sf(i);
+		u_imm(i, 1, bits, 13, 7);
+		u_imm(i, 2, bits, 20, 7);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_F13:
+		asm_sf(i);
+		/* no operands */
+		break;
+	case ASM_FMT_F14: /* 0 dst */
+		asm_sf(i);
+		disp(i, 1, bits, FRAG(6,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_F15: /* 0 dst */
+		u_imm(i, 1, bits, 6, 20);
+		break;
+	case ASM_FMT_F16: /* 0 dst */
+		u_imm(i, 1, bits, 6, 20);
+		break;
+	case ASM_FMT_I1:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		switch (FIELD(bits, 30, 2)) {
+		case 0:	op_imm(i, 4, 0LL); break;
+		case 1: op_imm(i, 4, 7LL); break;
+		case 2: op_imm(i, 4, 15LL); break;
+		case 3: op_imm(i, 4, 16LL); break;
+		}
+		break;
+	case ASM_FMT_I2:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_I3:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		u_imm(i, 3, bits, 20, 4);
+		break;
+	case ASM_FMT_I4:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		u_imm(i, 3, bits, 20, 8);
+		break;
+	case ASM_FMT_I5:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 20, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_I6:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 20, 7);
+		u_imm(i, 3, bits, 14, 5);
+		break;
+	case ASM_FMT_I7:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_I8:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		op_imm(i, 3, 31LL - FIELD(bits, 20, 5));
+		break;
+	case ASM_FMT_I9:
+		if (FIELD(bits, 13, 7) != 0)
+			return (0);
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_I10:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		u_imm(i, 4, bits, 27, 6);
+		break;
+	case ASM_FMT_I11:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 20, 7);
+		u_imm(i, 3, bits, 14, 6);
+		op_imm(i, 4, 1LL + FIELD(bits, 27, 6));
+		break;
+	case ASM_FMT_I12:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		op_imm(i, 3, 63LL - FIELD(bits, 20, 6));
+		op_imm(i, 4, 1LL + FIELD(bits, 27, 6));
+		break;
+	case ASM_FMT_I13:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		s_immf(i, 2, bits, FRAG(13,7), FRAG(36,1), 0);
+		op_imm(i, 3, 63LL - FIELD(bits, 20, 6));
+		op_imm(i, 4, 1LL + FIELD(bits, 27, 6));
+		break;
+	case ASM_FMT_I14:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		s_imm(i, 2, bits, 36, 1);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		op_imm(i, 4, 63LL - FIELD(bits, 14, 6));
+		op_imm(i, 5, 1LL + FIELD(bits, 27, 6));
+		break;
+	case ASM_FMT_I15:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		op_imm(i, 4, 63LL - FIELD(bits, 31, 6));
+		op_imm(i, 5, 1LL + FIELD(bits, 27, 4));
+		break;
+	case ASM_FMT_I16: /* 2 dst */
+		operand(i, 1, ASM_OPER_PREG, bits, 6, 6);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		u_imm(i, 4, bits, 14, 6);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_I17: /* 2 dst */
+		operand(i, 1, ASM_OPER_PREG, bits, 6, 6);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		operand(i, 3, ASM_OPER_GREG, bits, 20, 7);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_I18:
+		u_immf(i, 1, bits, FRAG(6,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_I19:
+		u_immf(i, 1, bits, FRAG(6,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_I20: /* 0 dst */
+		operand(i, 1, ASM_OPER_GREG, bits, 13, 7);
+		disp(i, 2, bits, FRAG(6,7), FRAG(20,13), FRAG(36,1), 0);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_I21:
+		switch (FIELD(bits, 20, 2)) { /* wh */
+		case 0:	asm_cmpltr_add(i, ASM_CC_MWH, ASM_CT_SPTK); break;
+		case 1:	asm_cmpltr_add(i, ASM_CC_MWH, ASM_CT_NONE); break;
+		case 2:	asm_cmpltr_add(i, ASM_CC_MWH, ASM_CT_DPTK); break;
+		case 3:	return (0);
+		}
+		if (FIELD(bits, 23, 1)) /* ih */
+			asm_cmpltr_add(i, ASM_CC_IH, ASM_CT_IMP);
+		else
+			asm_cmpltr_add(i, ASM_CC_IH, ASM_CT_NONE);
+		operand(i, 1, ASM_OPER_BREG, bits, 6, 3);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		disp(i, 3, bits, FRAG(24,9), 0);
+		break;
+	case ASM_FMT_I22:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_BREG, bits, 13, 3);
+		break;
+	case ASM_FMT_I23:
+		op_type(i, 1, ASM_OPER_PR);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		u_immf(i, 3, bits, FRAG(6,7), FRAG(24,8), FRAG(36,1), 0);
+		i->i_oper[3].o_value <<= 1;
+		break;
+	case ASM_FMT_I24:
+		op_type(i, 1, ASM_OPER_PR_ROT);
+		s_immf(i, 2, bits, FRAG(6,27), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_I25:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		op_type(i, 2, ot);
+		break;
+	case ASM_FMT_I26:
+		operand(i, 1, ASM_OPER_AREG, bits, 20, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_I27:
+		operand(i, 1, ASM_OPER_AREG, bits, 20, 7);
+		s_immf(i, 2, bits, FRAG(13,7), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_I28:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_AREG, bits, 20, 7);
+		break;
+	case ASM_FMT_I29:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_I30: /* 2 dst */
+		operand(i, 1, ASM_OPER_PREG, bits, 6, 6);
+		operand(i, 2, ASM_OPER_PREG, bits, 27, 6);
+		op_imm(i, 3, 32LL + FIELD(bits, 14, 5));
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_M1:
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		if (i->i_op == ASM_OP_LD16) {
+			op_type(i, 2, ASM_OPER_AREG);
+			op_value(i, 2, AR_CSD);
+			i->i_srcidx++;
+		}
+		operand(i, i->i_srcidx, ASM_OPER_MEM, bits, 20, 7);
+		break;
+	case ASM_FMT_M2:
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_MEM, bits, 20, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M3:
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_MEM, bits, 20, 7);
+		s_immf(i, 3, bits, FRAG(13,7), FRAG(27,1), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_M4:
+		asm_hint(i, ASM_CC_STHINT);
+		operand(i, 1, ASM_OPER_MEM, bits, 20, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		if (i->i_op == ASM_OP_ST16) {
+			op_type(i, 3, ASM_OPER_AREG);
+			op_value(i, 3, AR_CSD);
+		}
+		break;
+	case ASM_FMT_M5:
+		asm_hint(i, ASM_CC_STHINT);
+		operand(i, 1, ASM_OPER_MEM, bits, 20, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		s_immf(i, 3, bits, FRAG(6,7), FRAG(27,1), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_M6:
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_MEM, bits, 20, 7);
+		break;
+	case ASM_FMT_M7:
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_MEM, bits, 20, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M8:
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_MEM, bits, 20, 7);
+		s_immf(i, 3, bits, FRAG(13,7), FRAG(27,1), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_M9:
+		asm_hint(i, ASM_CC_STHINT);
+		operand(i, 1, ASM_OPER_MEM, bits, 20, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M10:
+		asm_hint(i, ASM_CC_STHINT);
+		operand(i, 1, ASM_OPER_MEM, bits, 20, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		s_immf(i, 3, bits, FRAG(6,7), FRAG(27,1), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_M11: /* 2 dst */
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_MEM, bits, 20, 7);
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_M12: /* 2 dst */
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		operand(i, 3, ASM_OPER_MEM, bits, 20, 7);
+		op_imm(i, 4, 8LL << FIELD(bits, 30, 1));
+		i->i_srcidx++;
+		break;
+	case ASM_FMT_M13:
+		asm_hint(i, ASM_CC_LFHINT);
+		operand(i, 1, ASM_OPER_MEM, bits, 20, 7);
+		break;
+	case ASM_FMT_M14: /* 0 dst */
+		asm_hint(i, ASM_CC_LFHINT);
+		operand(i, 1, ASM_OPER_MEM, bits, 20, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_M15: /* 0 dst */
+		asm_hint(i, ASM_CC_LFHINT);
+		operand(i, 1, ASM_OPER_MEM, bits, 20, 7);
+		s_immf(i, 2, bits, FRAG(13,7), FRAG(27,1), FRAG(36,1), 0);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_M16:
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_MEM, bits, 20, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 13, 7);
+		if (i->i_op == ASM_OP_CMP8XCHG16) {
+			op_type(i, 4, ASM_OPER_AREG);
+			op_value(i, 4, AR_CSD);
+			op_type(i, 5, ASM_OPER_AREG);
+			op_value(i, 5, AR_CCV);
+		} else {
+			if (FIELD(bits, 30, 6) < 8) {
+				op_type(i, 4, ASM_OPER_AREG);
+				op_value(i, 4, AR_CCV);
+			}
+		}
+		break;
+	case ASM_FMT_M17:
+		asm_hint(i, ASM_CC_LDHINT);
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_MEM, bits, 20, 7);
+		switch (FIELD(bits, 13, 2)) {
+		case 0: op_imm(i, 3, 1LL << 4); break;
+		case 1: op_imm(i, 3, 1LL << 3); break;
+		case 2:	op_imm(i, 3, 1LL << 2); break;
+		case 3: op_imm(i, 3, 1LL); break;
+		}
+		if (FIELD(bits, 15, 1))
+			i->i_oper[3].o_value *= -1LL;
+		break;
+	case ASM_FMT_M18:
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M19:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_FREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M20: /* 0 dst */
+		operand(i, 1, ASM_OPER_GREG, bits, 13, 7);
+		disp(i, 2, bits, FRAG(6,7), FRAG(20,13), FRAG(36,1), 0);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_M21: /* 0 dst */
+		operand(i, 1, ASM_OPER_FREG, bits, 13, 7);
+		disp(i, 2, bits, FRAG(6,7), FRAG(20,13), FRAG(36,1), 0);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_M22: /* 0 dst */
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		disp(i, 2, bits, FRAG(13,20), FRAG(36,1), 0);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_M23: /* 0 dst */
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		disp(i, 2, bits, FRAG(13,20), FRAG(36,1), 0);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_M24:
+		/* no operands */
+		break;
+	case ASM_FMT_M25:
+		if (FIELD(bits, 0, 6) != 0)
+			return (0);
+		/* no operands */
+		break;
+	case ASM_FMT_M26:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		break;
+	case ASM_FMT_M27:
+		operand(i, 1, ASM_OPER_FREG, bits, 6, 7);
+		break;
+	case ASM_FMT_M28:
+		operand(i, 1, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_M29:
+		operand(i, 1, ASM_OPER_AREG, bits, 20, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M30:
+		operand(i, 1, ASM_OPER_AREG, bits, 20, 7);
+		s_immf(i, 2, bits, FRAG(13,7), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_M31:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_AREG, bits, 20, 7);
+		break;
+	case ASM_FMT_M32:
+		operand(i, 1, ASM_OPER_CREG, bits, 20, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M33:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_CREG, bits, 20, 7);
+		break;
+	case ASM_FMT_M34: {
+		uint64_t loc, out;
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		op_type(i, 2, ASM_OPER_AREG);
+		op_value(i, 2, AR_PFS);
+		loc = FIELD(bits, 20, 7);
+		out = FIELD(bits, 13, 7) - loc;
+		op_imm(i, 3, 0);
+		op_imm(i, 4, loc);
+		op_imm(i, 5, out);
+		op_imm(i, 6, (uint64_t)FIELD(bits, 27, 4) << 3);
+		break;
+	}
+	case ASM_FMT_M35:
+		if (FIELD(bits, 27, 6) == 0x2D)
+			op_type(i, 1, ASM_OPER_PSR_L);
+		else
+			op_type(i, 1, ASM_OPER_PSR_UM);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M36:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		if (FIELD(bits, 27, 6) == 0x25)
+			op_type(i, 2, ASM_OPER_PSR);
+		else
+			op_type(i, 2, ASM_OPER_PSR_UM);
+		break;
+	case ASM_FMT_M37:
+		u_immf(i, 1, bits, FRAG(6,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_M38:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 20, 7);
+		operand(i, 3, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M39:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 20, 7);
+		u_imm(i, 3, bits, 13, 2);
+		break;
+	case ASM_FMT_M40: /* 0 dst */
+		operand(i, 1, ASM_OPER_GREG, bits, 20, 7);
+		u_imm(i, 2, bits, 13, 2);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_M41:
+		operand(i, 1, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M42:
+		operand(i, 1, ot, bits, 20, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		break;
+	case ASM_FMT_M43:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ot, bits, 20, 7);
+		break;
+	case ASM_FMT_M44:
+		u_immf(i, 1, bits, FRAG(6,21), FRAG(31,2), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_M45: /* 0 dst */
+		operand(i, 1, ASM_OPER_GREG, bits, 20, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 13, 7);
+		i->i_srcidx--;
+		break;
+	case ASM_FMT_M46:
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		operand(i, 2, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_M47:
+		operand(i, 1, ASM_OPER_GREG, bits, 20, 7);
+		break;
+	case ASM_FMT_M48:
+		u_immf(i, 1, bits, FRAG(6,20), FRAG(36,1), 0);
+		break;
+	case ASM_FMT_X1:
+		KASSERT(slot == 2, ("foo"));
+		u_immf(i, 1, bits, FRAG(6,20), FRAG(36,1), 0);
+		combine(&i->i_oper[1].o_value, 21, b->b_inst[1].i_bits, 41, 0);
+		break;
+	case ASM_FMT_X2:
+		KASSERT(slot == 2, ("foo"));
+		operand(i, 1, ASM_OPER_GREG, bits, 6, 7);
+		u_immf(i, 2, bits, FRAG(13,7), FRAG(27,9), FRAG(22,5),
+		    FRAG(21,1), 0);
+		combine(&i->i_oper[2].o_value, 22, b->b_inst[1].i_bits, 41, 0);
+		combine(&i->i_oper[2].o_value, 63, bits, 1, 36);
+		break;
+	case ASM_FMT_X3:
+		KASSERT(slot == 2, ("foo"));
+		asm_brhint(i);
+		u_imm(i, 1, bits, 13, 20);
+		combine(&i->i_oper[1].o_value, 20, b->b_inst[1].i_bits, 39, 2);
+		combine(&i->i_oper[1].o_value, 59, bits, 1, 36);
+		i->i_oper[1].o_value <<= 4;
+		i->i_oper[1].o_type = ASM_OPER_DISP;
+		break;
+	case ASM_FMT_X4:
+		KASSERT(slot == 2, ("foo"));
+		asm_brhint(i);
+		operand(i, 1, ASM_OPER_BREG, bits, 6, 3);
+		u_imm(i, 2, bits, 13, 20);
+		combine(&i->i_oper[2].o_value, 20, b->b_inst[1].i_bits, 39, 2);
+		combine(&i->i_oper[2].o_value, 59, bits, 1, 36);
+		i->i_oper[2].o_value <<= 4;
+		i->i_oper[2].o_type = ASM_OPER_DISP;
+		break;
+	case ASM_FMT_X5:
+		KASSERT(slot == 2, ("foo"));
+		u_immf(i, 1, bits, FRAG(6,20), FRAG(36,1), 0);
+		combine(&i->i_oper[1].o_value, 21, b->b_inst[1].i_bits, 41, 0);
+		break;
+	default:
+		KASSERT(fmt == ASM_FMT_NONE, ("foo"));
+		return (0);
+	}
+
+	return (1);
+}


Property changes on: trunk/sys/ia64/disasm/disasm_extract.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/disasm/disasm_format.c
===================================================================
--- trunk/sys/ia64/disasm/disasm_format.c	                        (rev 0)
+++ trunk/sys/ia64/disasm/disasm_format.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,347 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000-2006 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/disasm/disasm_format.c 159916 2006-06-24 19:21:11Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <ia64/disasm/disasm_int.h>
+#include <ia64/disasm/disasm.h>
+
+/*
+ * Mnemonics (keep in sync with enum asm_op).
+ */
+static const char *asm_mnemonics[] = {
+	NULL,
+	"add", "addl", "addp4", "adds", "alloc", "and", "andcm",
+	"br", "break", "brl", "brp", "bsw",
+	"chk", "clrrrb", "cmp", "cmp4", "cmp8xchg16", "cmpxchg1", "cmpxchg2",
+	"cmpxchg4", "cmpxchg8", "cover", "czx1", "czx2",
+	"dep",
+	"epc", "extr",
+	"famax", "famin", "fand", "fandcm", "fc", "fchkf", "fclass", "fclrf",
+	"fcmp", "fcvt", "fetchadd4", "fetchadd8", "flushrs", "fma", "fmax",
+	"fmerge", "fmin", "fmix", "fms", "fnma", "for", "fpack", "fpamax",
+	"fpamin", "fpcmp", "fpcvt", "fpma", "fpmax", "fpmerge", "fpmin",
+	"fpms", "fpnma", "fprcpa", "fprsqrta", "frcpa", "frsqrta", "fselect",
+	"fsetc", "fswap", "fsxt", "fwb", "fxor",
+	"getf",
+	"hint",
+	"invala", "itc", "itr",
+	"ld1", "ld16", "ld2", "ld4", "ld8", "ldf", "ldf8", "ldfd", "ldfe",
+	"ldfp8", "ldfpd", "ldfps", "ldfs", "lfetch", "loadrs",
+	"mf", "mix1", "mix2", "mix4", "mov", "movl", "mux1", "mux2",
+	"nop",
+	"or",
+	"pack2", "pack4", "padd1", "padd2", "padd4", "pavg1", "pavg2",
+	"pavgsub1", "pavgsub2", "pcmp1", "pcmp2", "pcmp4", "pmax1", "pmax2",
+	"pmin1", "pmin2", "pmpy2", "pmpyshr2", "popcnt", "probe", "psad1",
+	"pshl2", "pshl4", "pshladd2", "pshr2", "pshr4", "pshradd2", "psub1",
+	"psub2", "psub4", "ptc", "ptr",
+	"rfi", "rsm", "rum",
+	"setf", "shl", "shladd", "shladdp4", "shr", "shrp", "srlz", "ssm",
+	"st1", "st16", "st2", "st4", "st8", "stf", "stf8", "stfd", "stfe",
+	"stfs", "sub", "sum", "sxt1", "sxt2", "sxt4", "sync",
+	"tak", "tbit", "tf", "thash", "tnat", "tpa", "ttag",
+	"unpack1", "unpack2", "unpack4",
+	"vmsw",
+	"xchg1", "xchg2", "xchg4", "xchg8", "xma", "xor",
+	"zxt1", "zxt2", "zxt4"
+};
+
+/*
+ * Completers (keep in sync with enum asm_cmpltr_type).
+ */
+static const char *asm_completers[] = {
+	"",
+	".0", ".1",
+	".a", ".acq", ".and",
+	".b", ".bias",
+	".c.clr", ".c.clr.acq", ".c.nc", ".call", ".cexit", ".cloop", ".clr",
+	".ctop",
+	".d", ".dc.dc", ".dc.nt", ".dpnt", ".dptk",
+	".e", ".eq", ".excl", ".exit", ".exp",
+	".f", ".fault", ".few", ".fill", ".fx", ".fxu",
+	".g", ".ga", ".ge", ".gt",
+	".h", ".hu",
+	".i", ".ia", ".imp",
+	".l", ".le", ".loop", ".lr", ".lt", ".ltu",
+	".m", ".many",
+	".nc", ".ne", ".neq", ".nl", ".nle", ".nlt", ".nm", ".nr", ".ns",
+	".nt.dc", ".nt.nt", ".nt.tk", ".nt1", ".nt2", ".nta", ".nz",
+	".or", ".or.andcm", ".ord",
+	".pr",
+	".r", ".raz", ".rel", ".ret", ".rw",
+	".s", ".s0", ".s1", ".s2", ".s3", ".sa", ".se", ".sig", ".spill",
+	".spnt", ".sptk", ".sss",
+	".tk.dc", ".tk.nt", ".tk.tk", ".trunc",
+	".u", ".unc", ".unord", ".uss", ".uus", ".uuu",
+	".w", ".wexit", ".wtop",
+	".x", ".xf",
+	".z"
+};
+
+void
+asm_completer(const struct asm_cmpltr *c, char *buf)
+{
+	strcpy(buf, asm_completers[c->c_type]);
+}
+
+void
+asm_mnemonic(enum asm_op op, char *buf)
+{
+	strcpy(buf, asm_mnemonics[(op < ASM_OP_INTERNAL_OPCODES) ? op : 0]);
+}
+
+void
+asm_operand(const struct asm_oper *o, char *buf, uint64_t ip)
+{
+	const char *n;
+
+	n = NULL;
+	switch (o->o_type) {
+	case ASM_OPER_AREG:
+		switch ((int)o->o_value) {
+		case AR_K0: n = "k0"; break;
+		case AR_K1: n = "k1"; break;
+		case AR_K2: n = "k2"; break;
+		case AR_K3: n = "k3"; break;
+		case AR_K4: n = "k4"; break;
+		case AR_K5: n = "k5"; break;
+		case AR_K6: n = "k6"; break;
+		case AR_K7: n = "k7"; break;
+		case AR_RSC: n = "rsc"; break;
+		case AR_BSP: n = "bsp"; break;
+		case AR_BSPSTORE: n = "bspstore"; break;
+		case AR_RNAT: n = "rnat"; break;
+		case AR_FCR: n = "fcr"; break;
+		case AR_EFLAG: n = "eflag"; break;
+		case AR_CSD: n = "csd"; break;
+		case AR_SSD: n = "ssd"; break;
+		case AR_CFLG: n = "cflg"; break;
+		case AR_FSR: n = "fsr"; break;
+		case AR_FIR: n = "fir"; break;
+		case AR_FDR: n = "fdr"; break;
+		case AR_CCV: n = "ccv"; break;
+		case AR_UNAT: n = "unat"; break;
+		case AR_FPSR: n = "fpsr"; break;
+		case AR_ITC: n = "itc"; break;
+		case AR_PFS: n = "pfs"; break;
+		case AR_LC: n = "lc"; break;
+		case AR_EC: n = "ec"; break;
+		default:
+			sprintf(buf, "ar%d", (int)o->o_value);
+			return;
+		}
+		sprintf(buf, "ar.%s", n);
+		return;
+	case ASM_OPER_BREG:
+		if (o->o_value != 0)
+			sprintf(buf, "b%d", (int)o->o_value);
+		else
+			strcpy(buf, "rp");
+		return;
+	case ASM_OPER_CPUID:
+		n = "cpuid";
+		break;
+	case ASM_OPER_CREG:
+		switch ((int)o->o_value) {
+		case CR_DCR: n = "dcr"; break;
+		case CR_ITM: n = "itm"; break;
+		case CR_IVA: n = "iva"; break;
+		case CR_PTA: n = "pta"; break;
+		case CR_IPSR: n = "ipsr"; break;
+		case CR_ISR: n = "isr"; break;
+		case CR_IIP: n = "iip"; break;
+		case CR_IFA: n = "ifa"; break;
+		case CR_ITIR: n = "itir"; break;
+		case CR_IIPA: n = "iipa"; break;
+		case CR_IFS: n = "ifs"; break;
+		case CR_IIM: n = "iim"; break;
+		case CR_IHA: n = "iha"; break;
+		case CR_LID: n = "lid"; break;
+		case CR_IVR: n = "ivr"; break;
+		case CR_TPR: n = "tpr"; break;
+		case CR_EOI: n = "eoi"; break;
+		case CR_IRR0: n = "irr0"; break;
+		case CR_IRR1: n = "irr1"; break;
+		case CR_IRR2: n = "irr2"; break;
+		case CR_IRR3: n = "irr3"; break;
+		case CR_ITV: n = "itv"; break;
+		case CR_PMV: n = "pmv"; break;
+		case CR_CMCV: n = "cmcv"; break;
+		case CR_LRR0: n = "lrr0"; break;
+		case CR_LRR1: n = "lrr1"; break;
+		default:
+			sprintf(buf, "cr%d", (int)o->o_value);
+			return;
+		}
+		sprintf(buf, "cr.%s", n);
+		return;
+	case ASM_OPER_DBR:
+		n = "dbr";
+		break;
+	case ASM_OPER_DISP:
+		sprintf(buf, "%lx", ip + o->o_value);
+		return;
+	case ASM_OPER_DTR:
+		n = "dtr";
+		break;
+	case ASM_OPER_FREG:
+		sprintf(buf, "f%d", (int)o->o_value);
+		return;
+	case ASM_OPER_GREG:
+		break;
+	case ASM_OPER_IBR:
+		n = "ibr";
+		break;
+	case ASM_OPER_IMM:
+		sprintf(buf, "0x%lx", o->o_value);
+		return;
+	case ASM_OPER_IP:
+		strcpy(buf, "ip");
+		return;
+	case ASM_OPER_ITR:
+		n = "itr";
+		break;
+	case ASM_OPER_MEM:
+		n = "";
+		break;
+	case ASM_OPER_MSR:
+		n = "msr";
+		break;
+	case ASM_OPER_PKR:
+		n = "pkr";
+		break;
+	case ASM_OPER_PMC:
+		n = "pmc";
+		break;
+	case ASM_OPER_PMD:
+		n = "pmd";
+		break;
+	case ASM_OPER_PR:
+		strcpy(buf, "pr");
+                return;
+	case ASM_OPER_PR_ROT:
+		strcpy(buf, "pr.rot");
+		return;
+	case ASM_OPER_PREG:
+		sprintf(buf, "p%d", (int)o->o_value);
+		return;
+	case ASM_OPER_PSR:
+		strcpy(buf, "psr");
+		return;
+	case ASM_OPER_PSR_L:
+		strcpy(buf, "psr.l");
+		return;
+	case ASM_OPER_PSR_UM:
+		strcpy(buf, "psr.um");
+		return;
+	case ASM_OPER_RR:
+		n = "rr";
+		break;
+	case ASM_OPER_NONE:
+		KASSERT(0, ("foo"));
+		break;
+	}
+	if (n != NULL)
+		buf += sprintf(buf, "%s[", n);
+	switch ((int)o->o_value) {
+	case 1:	strcpy(buf, "gp"); buf += 2; break;
+	case 12: strcpy(buf, "sp"); buf += 2; break;
+	case 13: strcpy(buf, "tp"); buf += 2; break;
+	default: buf += sprintf(buf, "r%d", (int)o->o_value); break;
+	}
+	if (n != NULL)
+		strcpy(buf, "]");
+}
+
+void
+asm_print_bundle(const struct asm_bundle *b, uint64_t ip)
+{
+	asm_print_inst(b, 0, ip);
+	asm_print_inst(b, 1, ip);
+	asm_print_inst(b, 2, ip);
+}
+
+void
+asm_print_inst(const struct asm_bundle *b, int slot, uint64_t ip)
+{
+	char buf[32];
+	const struct asm_inst *i;
+	const char *tmpl;
+	int n, w;
+
+	tmpl = b->b_templ + slot;
+	if (*tmpl == ';' || (slot == 2 && b->b_templ[1] == ';'))
+		tmpl++;
+	i = b->b_inst + slot;
+	if (*tmpl == 'L' || i->i_op == ASM_OP_NONE)
+		return;
+
+	/* Address + slot. */
+	printf("%lx[%c] ", ip + slot, *tmpl);
+
+	/* Predicate. */
+	if (i->i_oper[0].o_value != 0) {
+		asm_operand(i->i_oper+0, buf, ip);
+		w = printf("(%s)", buf);
+	} else
+		w = 0;
+	while (w++ < 8)
+		printf(" ");
+
+	/* Mnemonic & completers. */
+	asm_mnemonic(i->i_op, buf);
+	w = printf(buf);
+	n = 0;
+	while (n < i->i_ncmpltrs) {
+		asm_completer(i->i_cmpltr + n, buf);
+		w += printf(buf);
+		n++;
+	}
+	while (w++ < 15)
+		printf(" ");
+	printf(" ");
+
+	/* Operands. */
+	n = 1;
+	while (n < 7 && i->i_oper[n].o_type != ASM_OPER_NONE) {
+		if (n > 1) {
+			if (n == i->i_srcidx)
+				printf(" = ");
+			else
+				printf(", ");
+		}
+		asm_operand(i->i_oper + n, buf, ip);
+		printf(buf);
+		n++;
+	}
+	printf("\n");
+}


Property changes on: trunk/sys/ia64/disasm/disasm_format.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/disasm/disasm_int.h
===================================================================
--- trunk/sys/ia64/disasm/disasm_int.h	                        (rev 0)
+++ trunk/sys/ia64/disasm/disasm_int.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,223 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000-2006 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/disasm/disasm_int.h 159916 2006-06-24 19:21:11Z marcel $
+ */
+
+#ifndef _DISASM_INT_H_
+#define	_DISASM_INT_H_
+
+#ifdef _DISASM_H_
+#error	Include disasm_int.h before disasm.h
+#endif
+
+/*
+ * Instruction bundle specifics.
+ */
+#define	TMPL_BITS	5
+#define	SLOT_BITS	41
+#define	SLOT_COUNT	3
+
+#define	BUNDLE_SIZE	(SLOT_COUNT * SLOT_BITS + TMPL_BITS)
+#define	BUNDLE_BYTES	((BUNDLE_SIZE+7) >> 3)
+#define	TMPL_MASK	((1 << TMPL_BITS) - 1)
+#define	SLOT_MASK	((1ULL << SLOT_BITS) - 1ULL)
+#define	TMPL(p)		(*(const uint8_t*)(p) & TMPL_MASK)
+#define	_U32(p,i)	((uint64_t)(((const uint32_t*)(p))[i]))
+#define	_SLOT(p,i)	(_U32(p,i) | (_U32(p,(i)+1)<<32))
+#define	SLOT(p,i)	((_SLOT(p,i) >> (TMPL_BITS+((i)<<3)+(i))) & SLOT_MASK)
+
+/*
+ * Instruction specifics
+ */
+#define	_FLD64(i,o,l)	((i >> o) & ((1LL << l) - 1LL))
+#define	FIELD(i,o,l)	((uint32_t)_FLD64(i,o,l))
+#define	OPCODE(i)	FIELD(i, 37, 4)
+#define	QP_BITS		6
+#define	QP(i)		FIELD(i, 0, QP_BITS)
+#define	REG_BITS	7
+#define	REG(i,r)	FIELD(i, ((r) - 1) * REG_BITS + QP_BITS, REG_BITS)
+
+/*
+ * Opcodes used internally as sentinels to denote either a lack of more
+ * specific information or to preserve the additional state/information
+ * we already have and need to pass around for later use.
+ */
+#define	ASM_ADDITIONAL_OPCODES						\
+	ASM_OP_INTERNAL_OPCODES,					\
+	ASM_OP_BR_CALL, ASM_OP_BR_CEXIT, ASM_OP_BR_CLOOP,		\
+	ASM_OP_BR_COND, ASM_OP_BR_CTOP, ASM_OP_BR_IA, ASM_OP_BR_RET,	\
+	ASM_OP_BR_WEXIT, ASM_OP_BR_WTOP,				\
+	ASM_OP_BREAK_B, ASM_OP_BREAK_F, ASM_OP_BREAK_I, ASM_OP_BREAK_M,	\
+	ASM_OP_BREAK_X,							\
+	ASM_OP_BRL_COND, ASM_OP_BRL_CALL,				\
+	ASM_OP_BRP_, ASM_OP_BRP_RET,					\
+	ASM_OP_BSW_0, ASM_OP_BSW_1,					\
+	ASM_OP_CHK_A_CLR, ASM_OP_CHK_A_NC, ASM_OP_CHK_S,		\
+	ASM_OP_CHK_S_I, ASM_OP_CHK_S_M,					\
+	ASM_OP_CLRRRB_, ASM_OP_CLRRRB_PR,				\
+	ASM_OP_CMP_EQ, ASM_OP_CMP_EQ_AND, ASM_OP_CMP_EQ_OR,		\
+	ASM_OP_CMP_EQ_OR_ANDCM, ASM_OP_CMP_EQ_UNC, ASM_OP_CMP_GE_AND,	\
+	ASM_OP_CMP_GE_OR, ASM_OP_CMP_GE_OR_ANDCM, ASM_OP_CMP_GT_AND,	\
+	ASM_OP_CMP_GT_OR, ASM_OP_CMP_GT_OR_ANDCM, ASM_OP_CMP_LE_AND,	\
+	ASM_OP_CMP_LE_OR, ASM_OP_CMP_LE_OR_ANDCM, ASM_OP_CMP_LT,	\
+	ASM_OP_CMP_LT_AND, ASM_OP_CMP_LT_OR, ASM_OP_CMP_LT_OR_ANDCM,	\
+	ASM_OP_CMP_LT_UNC, ASM_OP_CMP_LTU, ASM_OP_CMP_LTU_UNC,		\
+	ASM_OP_CMP_NE_AND, ASM_OP_CMP_NE_OR, ASM_OP_CMP_NE_OR_ANDCM,	\
+	ASM_OP_CMP4_EQ, ASM_OP_CMP4_EQ_AND, ASM_OP_CMP4_EQ_OR,		\
+	ASM_OP_CMP4_EQ_OR_ANDCM, ASM_OP_CMP4_EQ_UNC, ASM_OP_CMP4_GE_AND,\
+	ASM_OP_CMP4_GE_OR, ASM_OP_CMP4_GE_OR_ANDCM, ASM_OP_CMP4_GT_AND,	\
+	ASM_OP_CMP4_GT_OR, ASM_OP_CMP4_GT_OR_ANDCM, ASM_OP_CMP4_LE_AND,	\
+	ASM_OP_CMP4_LE_OR, ASM_OP_CMP4_LE_OR_ANDCM, ASM_OP_CMP4_LT,	\
+	ASM_OP_CMP4_LT_AND, ASM_OP_CMP4_LT_OR, ASM_OP_CMP4_LT_OR_ANDCM,	\
+	ASM_OP_CMP4_LT_UNC, ASM_OP_CMP4_LTU, ASM_OP_CMP4_LTU_UNC,	\
+	ASM_OP_CMP4_NE_AND, ASM_OP_CMP4_NE_OR, ASM_OP_CMP4_NE_OR_ANDCM,	\
+	ASM_OP_CMP8XCHG16_ACQ, ASM_OP_CMP8XCHG16_REL,			\
+	ASM_OP_CMPXCHG1_ACQ, ASM_OP_CMPXCHG1_REL,			\
+	ASM_OP_CMPXCHG2_ACQ, ASM_OP_CMPXCHG2_REL,			\
+	ASM_OP_CMPXCHG4_ACQ, ASM_OP_CMPXCHG4_REL,			\
+	ASM_OP_CMPXCHG8_ACQ, ASM_OP_CMPXCHG8_REL,			\
+	ASM_OP_CZX1_L, ASM_OP_CZX1_R,					\
+	ASM_OP_CZX2_L, ASM_OP_CZX2_R,					\
+	ASM_OP_DEP_, ASM_OP_DEP_Z,					\
+	ASM_OP_FC_, ASM_OP_FC_I,					\
+	ASM_OP_FCLASS_M,						\
+	ASM_OP_FCVT_FX, ASM_OP_FCVT_FX_TRUNC, ASM_OP_FCVT_FXU,		\
+	ASM_OP_FCVT_FXU_TRUNC, ASM_OP_FCVT_XF,				\
+	ASM_OP_FETCHADD4_ACQ, ASM_OP_FETCHADD4_REL,			\
+	ASM_OP_FETCHADD8_ACQ, ASM_OP_FETCHADD8_REL,			\
+	ASM_OP_FMA_, ASM_OP_FMA_D, ASM_OP_FMA_S,			\
+	ASM_OP_FMERGE_NS, ASM_OP_FMERGE_S, ASM_OP_FMERGE_SE,		\
+	ASM_OP_FMIX_L, ASM_OP_FMIX_LR, ASM_OP_FMIX_R,			\
+	ASM_OP_FMS_, ASM_OP_FMS_D, ASM_OP_FMS_S,			\
+	ASM_OP_FNMA_, ASM_OP_FNMA_D, ASM_OP_FNMA_S,			\
+	ASM_OP_FPCMP_EQ, ASM_OP_FPCMP_LE, ASM_OP_FPCMP_LT,		\
+	ASM_OP_FPCMP_NEQ, ASM_OP_FPCMP_NLE, ASM_OP_FPCMP_NLT,		\
+	ASM_OP_FPCMP_ORD, ASM_OP_FPCMP_UNORD,				\
+	ASM_OP_FPCVT_FX, ASM_OP_FPCVT_FX_TRUNC, ASM_OP_FPCVT_FXU,	\
+	ASM_OP_FPCVT_FXU_TRUNC,						\
+	ASM_OP_FPMERGE_NS, ASM_OP_FPMERGE_S, ASM_OP_FPMERGE_SE,		\
+	ASM_OP_FSWAP_, ASM_OP_FSWAP_NL, ASM_OP_FSWAP_NR,		\
+	ASM_OP_FSXT_L, ASM_OP_FSXT_R,					\
+	ASM_OP_GETF_D, ASM_OP_GETF_EXP, ASM_OP_GETF_S, ASM_OP_GETF_SIG,	\
+	ASM_OP_HINT_B, ASM_OP_HINT_F, ASM_OP_HINT_I, ASM_OP_HINT_M,	\
+	ASM_OP_HINT_X,							\
+	ASM_OP_INVALA_, ASM_OP_INVALA_E,				\
+	ASM_OP_ITC_D, ASM_OP_ITC_I,					\
+	ASM_OP_ITR_D, ASM_OP_ITR_I,					\
+	ASM_OP_LD1_, ASM_OP_LD1_A, ASM_OP_LD1_ACQ, ASM_OP_LD1_BIAS,	\
+	ASM_OP_LD1_C_CLR, ASM_OP_LD1_C_CLR_ACQ, ASM_OP_LD1_C_NC,	\
+	ASM_OP_LD1_S, ASM_OP_LD1_SA,					\
+	ASM_OP_LD16_, ASM_OP_LD16_ACQ,					\
+	ASM_OP_LD2_, ASM_OP_LD2_A, ASM_OP_LD2_ACQ, ASM_OP_LD2_BIAS,	\
+	ASM_OP_LD2_C_CLR, ASM_OP_LD2_C_CLR_ACQ, ASM_OP_LD2_C_NC,	\
+	ASM_OP_LD2_S, ASM_OP_LD2_SA,					\
+	ASM_OP_LD4_, ASM_OP_LD4_A, ASM_OP_LD4_ACQ, ASM_OP_LD4_BIAS,	\
+	ASM_OP_LD4_C_CLR, ASM_OP_LD4_C_CLR_ACQ, ASM_OP_LD4_C_NC,	\
+	ASM_OP_LD4_S, ASM_OP_LD4_SA,					\
+	ASM_OP_LD8_, ASM_OP_LD8_A, ASM_OP_LD8_ACQ, ASM_OP_LD8_BIAS,	\
+	ASM_OP_LD8_C_CLR, ASM_OP_LD8_C_CLR_ACQ, ASM_OP_LD8_C_NC,	\
+	ASM_OP_LD8_FILL, ASM_OP_LD8_S, ASM_OP_LD8_SA,			\
+	ASM_OP_LDF_FILL,						\
+	ASM_OP_LDF8_, ASM_OP_LDF8_A, ASM_OP_LDF8_C_CLR,			\
+	ASM_OP_LDF8_C_NC, ASM_OP_LDF8_S, ASM_OP_LDF8_SA,		\
+	ASM_OP_LDFD_, ASM_OP_LDFD_A, ASM_OP_LDFD_C_CLR,			\
+	ASM_OP_LDFD_C_NC, ASM_OP_LDFD_S, ASM_OP_LDFD_SA,		\
+	ASM_OP_LDFE_, ASM_OP_LDFE_A, ASM_OP_LDFE_C_CLR,			\
+	ASM_OP_LDFE_C_NC, ASM_OP_LDFE_S, ASM_OP_LDFE_SA,		\
+	ASM_OP_LDFP8_, ASM_OP_LDFP8_A, ASM_OP_LDFP8_C_CLR,		\
+	ASM_OP_LDFP8_C_NC, ASM_OP_LDFP8_S, ASM_OP_LDFP8_SA,		\
+	ASM_OP_LDFPD_, ASM_OP_LDFPD_A, ASM_OP_LDFPD_C_CLR,		\
+	ASM_OP_LDFPD_C_NC, ASM_OP_LDFPD_S, ASM_OP_LDFPD_SA,		\
+	ASM_OP_LDFPS_, ASM_OP_LDFPS_A, ASM_OP_LDFPS_C_CLR,		\
+	ASM_OP_LDFPS_C_NC, ASM_OP_LDFPS_S, ASM_OP_LDFPS_SA,		\
+	ASM_OP_LDFS_, ASM_OP_LDFS_A, ASM_OP_LDFS_C_CLR,			\
+	ASM_OP_LDFS_C_NC, ASM_OP_LDFS_S, ASM_OP_LDFS_SA,		\
+	ASM_OP_LFETCH_, ASM_OP_LFETCH_EXCL, ASM_OP_LFETCH_FAULT,	\
+	ASM_OP_LFETCH_FAULT_EXCL,					\
+	ASM_OP_MF_, ASM_OP_MF_A,					\
+	ASM_OP_MIX1_L, ASM_OP_MIX1_R,					\
+	ASM_OP_MIX2_L, ASM_OP_MIX2_R,					\
+	ASM_OP_MIX4_L, ASM_OP_MIX4_R,					\
+	ASM_OP_MOV_, ASM_OP_MOV_CPUID, ASM_OP_MOV_DBR, ASM_OP_MOV_I,	\
+	ASM_OP_MOV_IBR, ASM_OP_MOV_IP, ASM_OP_MOV_M, ASM_OP_MOV_MSR,	\
+	ASM_OP_MOV_PKR, ASM_OP_MOV_PMC, ASM_OP_MOV_PMD, ASM_OP_MOV_PR,	\
+	ASM_OP_MOV_PSR, ASM_OP_MOV_PSR_L, ASM_OP_MOV_PSR_UM,		\
+	ASM_OP_MOV_RET, ASM_OP_MOV_RR,					\
+	ASM_OP_NOP_B, ASM_OP_NOP_F, ASM_OP_NOP_I, ASM_OP_NOP_M,		\
+	ASM_OP_NOP_X,							\
+	ASM_OP_PACK2_SSS, ASM_OP_PACK2_USS,				\
+	ASM_OP_PACK4_SSS,						\
+	ASM_OP_PADD1_, ASM_OP_PADD1_SSS, ASM_OP_PADD1_UUS,		\
+	ASM_OP_PADD1_UUU,						\
+	ASM_OP_PADD2_, ASM_OP_PADD2_SSS, ASM_OP_PADD2_UUS,		\
+	ASM_OP_PADD2_UUU,						\
+	ASM_OP_PAVG1_, ASM_OP_PAVG1_RAZ,				\
+	ASM_OP_PAVG2_, ASM_OP_PAVG2_RAZ,				\
+	ASM_OP_PCMP1_EQ, ASM_OP_PCMP1_GT,				\
+	ASM_OP_PCMP2_EQ, ASM_OP_PCMP2_GT,				\
+	ASM_OP_PCMP4_EQ, ASM_OP_PCMP4_GT,				\
+	ASM_OP_PMAX1_U,							\
+	ASM_OP_PMIN1_U,							\
+	ASM_OP_PMPY2_L, ASM_OP_PMPY2_R,					\
+	ASM_OP_PMPYSHR2_, ASM_OP_PMPYSHR2_U,				\
+	ASM_OP_PROBE_R, ASM_OP_PROBE_R_FAULT, ASM_OP_PROBE_RW_FAULT,	\
+	ASM_OP_PROBE_W, ASM_OP_PROBE_W_FAULT,				\
+	ASM_OP_PSHR2_, ASM_OP_PSHR2_U,					\
+	ASM_OP_PSHR4_, ASM_OP_PSHR4_U,					\
+	ASM_OP_PSUB1_, ASM_OP_PSUB1_SSS, ASM_OP_PSUB1_UUS,		\
+	ASM_OP_PSUB1_UUU,						\
+	ASM_OP_PSUB2_, ASM_OP_PSUB2_SSS, ASM_OP_PSUB2_UUS,		\
+	ASM_OP_PSUB2_UUU,						\
+	ASM_OP_PTC_E, ASM_OP_PTC_G, ASM_OP_PTC_GA, ASM_OP_PTC_L,	\
+	ASM_OP_PTR_D, ASM_OP_PTR_I,					\
+	ASM_OP_SETF_EXP, ASM_OP_SETF_D, ASM_OP_SETF_S, ASM_OP_SETF_SIG,	\
+	ASM_OP_SHR_, ASM_OP_SHR_U,					\
+	ASM_OP_SRLZ_D, ASM_OP_SRLZ_I,					\
+	ASM_OP_ST1_, ASM_OP_ST1_REL,					\
+	ASM_OP_ST16_, ASM_OP_ST16_REL,					\
+	ASM_OP_ST2_, ASM_OP_ST2_REL,					\
+	ASM_OP_ST4_, ASM_OP_ST4_REL,					\
+	ASM_OP_ST8_, ASM_OP_ST8_REL, ASM_OP_ST8_SPILL,			\
+	ASM_OP_STF_SPILL,						\
+	ASM_OP_SYNC_I,							\
+	ASM_OP_TBIT_NZ_AND, ASM_OP_TBIT_NZ_OR, ASM_OP_TBIT_NZ_OR_ANDCM,	\
+	ASM_OP_TBIT_Z, ASM_OP_TBIT_Z_AND, ASM_OP_TBIT_Z_OR,		\
+	ASM_OP_TBIT_Z_OR_ANDCM, ASM_OP_TBIT_Z_UNC,			\
+	ASM_OP_TF_NZ_AND, ASM_OP_TF_NZ_OR, ASM_OP_TF_NZ_OR_ANDCM,	\
+	ASM_OP_TF_Z, ASM_OP_TF_Z_AND, ASM_OP_TF_Z_OR,			\
+	ASM_OP_TF_Z_OR_ANDCM, ASM_OP_TF_Z_UNC,				\
+	ASM_OP_TNAT_NZ_AND, ASM_OP_TNAT_NZ_OR, ASM_OP_TNAT_NZ_OR_ANDCM,	\
+	ASM_OP_TNAT_Z, ASM_OP_TNAT_Z_AND, ASM_OP_TNAT_Z_OR,		\
+	ASM_OP_TNAT_Z_OR_ANDCM, ASM_OP_TNAT_Z_UNC,			\
+	ASM_OP_UNPACK1_H, ASM_OP_UNPACK1_L,				\
+	ASM_OP_UNPACK2_H, ASM_OP_UNPACK2_L,				\
+	ASM_OP_UNPACK4_H, ASM_OP_UNPACK4_L,				\
+	ASM_OP_VMSW_0, ASM_OP_VMSW_1,					\
+	ASM_OP_XMA_H, ASM_OP_XMA_HU, ASM_OP_XMA_L,			\
+	ASM_OP_NUMBER_OF_OPCODES
+
+#endif /* _DISASM_INT_H_ */


Property changes on: trunk/sys/ia64/disasm/disasm_int.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia32/ia32_misc.c
===================================================================
--- trunk/sys/ia64/ia32/ia32_misc.c	                        (rev 0)
+++ trunk/sys/ia64/ia32/ia32_misc.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,62 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2009 Konstantin Belousov
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia32/ia32_misc.c 220238 2011-04-01 11:16:29Z kib $");
+
+#include "opt_compat.h"
+
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <compat/freebsd32/freebsd32_util.h>
+#include <compat/freebsd32/freebsd32.h>
+#include <compat/freebsd32/freebsd32_proto.h>
+
+int
+freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap)
+{
+
+	return (EOPNOTSUPP);
+}
+
+#ifdef COMPAT_43
+int
+ofreebsd32_getpagesize(struct thread *td,
+    struct ofreebsd32_getpagesize_args *uap)
+{
+
+	td->td_retval[0] = IA32_PAGE_SIZE;
+	return (0);
+}
+#endif


Property changes on: trunk/sys/ia64/ia32/ia32_misc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia32/ia32_reg.c
===================================================================
--- trunk/sys/ia64/ia32/ia32_reg.c	                        (rev 0)
+++ trunk/sys/ia64/ia32/ia32_reg.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,81 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2005 Peter Wemm
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia32/ia32_reg.c 233125 2012-03-18 19:12:11Z tijl $");
+
+#include "opt_compat.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/procfs.h>
+
+int
+fill_regs32(struct thread *td, struct reg32 *regs)
+{
+
+	bzero(regs, sizeof(*regs));
+	return (EOPNOTSUPP);
+}
+
+int
+set_regs32(struct thread *td, struct reg32 *regs)
+{
+
+	return (EOPNOTSUPP);
+}
+
+int
+fill_fpregs32(struct thread *td, struct fpreg32 *regs)
+{
+
+	bzero(regs, sizeof(*regs));
+	return (EOPNOTSUPP);
+}
+
+int
+set_fpregs32(struct thread *td, struct fpreg32 *regs)
+{
+
+	return (EOPNOTSUPP);
+}
+
+int
+fill_dbregs32(struct thread *td, struct dbreg32 *regs)
+{
+
+	bzero(regs, sizeof(*regs));
+	return (EOPNOTSUPP);
+}
+
+int
+set_dbregs32(struct thread *td, struct dbreg32 *regs)
+{
+
+	return (EOPNOTSUPP);
+}


Property changes on: trunk/sys/ia64/ia32/ia32_reg.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia32/ia32_signal.c
===================================================================
--- trunk/sys/ia64/ia32/ia32_signal.c	                        (rev 0)
+++ trunk/sys/ia64/ia32/ia32_signal.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,299 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia32/ia32_signal.c 255426 2013-09-09 18:11:59Z jhb $");
+
+#include "opt_compat.h"
+
+#define __ELF_WORD_SIZE 32
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/fcntl.h>
+#include <sys/imgact.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mman.h>
+#include <sys/namei.h>
+#include <sys/pioctl.h>
+#include <sys/proc.h>
+#include <sys/procfs.h>
+#include <sys/resourcevar.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/stat.h>
+#include <sys/sx.h>
+#include <sys/syscall.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/vnode.h>
+#include <sys/imgact_elf.h>
+#include <sys/sysproto.h>
+
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+
+#include <compat/freebsd32/freebsd32_signal.h>
+#include <compat/freebsd32/freebsd32_util.h>
+#include <compat/freebsd32/freebsd32_proto.h>
+#include <compat/ia32/ia32_signal.h>
+#include <x86/include/psl.h>
+#include <x86/include/segments.h>
+#include <x86/include/specialreg.h>
+
+char ia32_sigcode[] = {
+	0xff, 0x54, 0x24, 0x10,		/* call *SIGF_HANDLER(%esp) */
+	0x8d, 0x44, 0x24, 0x14,		/* lea SIGF_UC(%esp),%eax */
+	0x50,				/* pushl %eax */
+	0xf7, 0x40, 0x54, 0x00, 0x00, 0x02, 0x02, /* testl $PSL_VM,UC_EFLAGS(%ea
+x) */
+	0x75, 0x03,			/* jne 9f */
+	0x8e, 0x68, 0x14,		/* movl UC_GS(%eax),%gs */
+	0xb8, 0x57, 0x01, 0x00, 0x00,	/* 9: movl $SYS_sigreturn,%eax */
+	0x50,				/* pushl %eax */
+	0xcd, 0x80,			/* int $0x80 */
+	0xeb, 0xfe,			/* 0: jmp 0b */
+	0
+};
+int sz_ia32_sigcode = sizeof(ia32_sigcode);
+
+#ifdef COMPAT_43
+int
+ofreebsd32_sigreturn(struct thread *td, struct ofreebsd32_sigreturn_args *uap)
+{
+
+	return (EOPNOTSUPP);
+}
+#endif
+
+/*
+ * Signal sending has not been implemented on ia64.  This causes
+ * the sigtramp code to not understand the arguments and the application
+ * will generally crash if it tries to handle a signal.  Calling
+ * sendsig() means that at least untrapped signals will work.
+ */
+void
+ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
+{
+	sendsig(catcher, ksi, mask);
+}
+
+#ifdef COMPAT_FREEBSD4
+int
+freebsd4_freebsd32_sigreturn(struct thread *td, struct freebsd4_freebsd32_sigreturn_args *uap)
+{
+	return (sys_sigreturn(td, (struct sigreturn_args *)uap));
+}
+#endif
+
+int
+freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
+{
+	return (sys_sigreturn(td, (struct sigreturn_args *)uap));
+}
+
+
+void
+ia32_setregs(struct thread *td, struct image_params *imgp, u_long stack)
+{
+	struct trapframe *tf = td->td_frame;
+	vm_offset_t gdt, ldt;
+	u_int64_t codesel, datasel, ldtsel;
+	u_int64_t codeseg, dataseg, gdtseg, ldtseg;
+	struct segment_descriptor desc;
+	struct vmspace *vmspace = td->td_proc->p_vmspace;
+	struct sysentvec *sv;
+
+	sv = td->td_proc->p_sysent;
+	exec_setregs(td, imgp, stack);
+
+	/* Non-syscall frames are cleared by exec_setregs() */
+	if (tf->tf_flags & FRAME_SYSCALL) {
+		bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
+		bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
+	} else
+		tf->tf_special.ndirty = 0;
+
+	tf->tf_special.psr |= IA64_PSR_IS;
+	tf->tf_special.sp = stack;
+
+	/* Point the RSE backstore to something harmless. */
+	tf->tf_special.bspstore = (sv->sv_psstrings - sz_ia32_sigcode -
+	    SPARE_USRSPACE + 15) & ~15;
+
+	codesel = LSEL(LUCODE_SEL, SEL_UPL);
+	datasel = LSEL(LUDATA_SEL, SEL_UPL);
+	ldtsel = GSEL(GLDT_SEL, SEL_UPL);
+
+	/* Setup ia32 segment registers. */
+	tf->tf_scratch.gr16 = (datasel << 48) | (datasel << 32) |
+	    (datasel << 16) | datasel;
+	tf->tf_scratch.gr17 = (ldtsel << 32) | (datasel << 16) | codesel;
+
+	/*
+	 * Build the GDT and LDT.
+	 */
+	gdt = sv->sv_usrstack;
+	vm_map_find(&vmspace->vm_map, NULL, 0, &gdt, IA32_PAGE_SIZE << 1, 0,
+	    VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
+	ldt = gdt + IA32_PAGE_SIZE;
+
+	desc.sd_lolimit = 8*NLDT-1;
+	desc.sd_lobase = ldt & 0xffffff;
+	desc.sd_type = SDT_SYSLDT;
+	desc.sd_dpl = SEL_UPL;
+	desc.sd_p = 1;
+	desc.sd_hilimit = 0;
+	desc.sd_def32 = 0;
+	desc.sd_gran = 0;
+	desc.sd_hibase = ldt >> 24;
+	copyout(&desc, (caddr_t) gdt + 8*GLDT_SEL, sizeof(desc));
+
+	desc.sd_lolimit = ((sv->sv_usrstack >> 12) - 1) & 0xffff;
+	desc.sd_lobase = 0;
+	desc.sd_type = SDT_MEMERA;
+	desc.sd_dpl = SEL_UPL;
+	desc.sd_p = 1;
+	desc.sd_hilimit = ((sv->sv_usrstack >> 12) - 1) >> 16;
+	desc.sd_def32 = 1;
+	desc.sd_gran = 1;
+	desc.sd_hibase = 0;
+	copyout(&desc, (caddr_t) ldt + 8*LUCODE_SEL, sizeof(desc));
+	desc.sd_type = SDT_MEMRWA;
+	copyout(&desc, (caddr_t) ldt + 8*LUDATA_SEL, sizeof(desc));
+
+	codeseg = 0		/* base */
+		+ (((sv->sv_usrstack >> 12) - 1) << 32) /* limit */
+		+ ((long)SDT_MEMERA << 52)
+		+ ((long)SEL_UPL << 57)
+		+ (1L << 59) /* present */
+		+ (1L << 62) /* 32 bits */
+		+ (1L << 63); /* page granularity */
+	dataseg = 0		/* base */
+		+ (((sv->sv_usrstack >> 12) - 1) << 32) /* limit */
+		+ ((long)SDT_MEMRWA << 52)
+		+ ((long)SEL_UPL << 57)
+		+ (1L << 59) /* present */
+		+ (1L << 62) /* 32 bits */
+		+ (1L << 63); /* page granularity */
+
+	tf->tf_scratch.csd = codeseg;
+	tf->tf_scratch.ssd = dataseg;
+	tf->tf_scratch.gr24 = dataseg; /* ESD */
+	tf->tf_scratch.gr27 = dataseg; /* DSD */
+	tf->tf_scratch.gr28 = dataseg; /* FSD */
+	tf->tf_scratch.gr29 = dataseg; /* GSD */
+
+	gdtseg = gdt		/* base */
+		+ ((8L*NGDT - 1) << 32) /* limit */
+		+ ((long)SDT_SYSNULL << 52)
+		+ ((long)SEL_UPL << 57)
+		+ (1L << 59) /* present */
+		+ (0L << 62) /* 16 bits */
+		+ (0L << 63); /* byte granularity */
+	ldtseg = ldt		/* base */
+		+ ((8L*NLDT - 1) << 32) /* limit */
+		+ ((long)SDT_SYSLDT << 52)
+		+ ((long)SEL_UPL << 57)
+		+ (1L << 59) /* present */
+		+ (0L << 62) /* 16 bits */
+		+ (0L << 63); /* byte granularity */
+
+	tf->tf_scratch.gr30 = ldtseg; /* LDTD */
+	tf->tf_scratch.gr31 = gdtseg; /* GDTD */
+
+	/* Set ia32 control registers on this processor. */
+	ia64_set_cflg(CR0_PE | CR0_PG | ((long)(CR4_XMM | CR4_FXSR) << 32));
+	ia64_set_eflag(PSL_USER);
+
+	/* PS_STRINGS value for BSD/OS binaries.  It is 0 for non-BSD/OS. */
+	tf->tf_scratch.gr11 = td->td_proc->p_sysent->sv_psstrings;
+
+	/*
+	 * XXX - Linux emulator
+	 * Make sure sure edx is 0x0 on entry. Linux binaries depend
+	 * on it.
+	 */
+	td->td_retval[1] = 0;
+}
+
+void
+ia32_restorectx(struct pcb *pcb)
+{
+
+	ia64_set_cflg(pcb->pcb_ia32_cflg);
+	ia64_set_eflag(pcb->pcb_ia32_eflag);
+	ia64_set_fcr(pcb->pcb_ia32_fcr);
+	ia64_set_fdr(pcb->pcb_ia32_fdr);
+	ia64_set_fir(pcb->pcb_ia32_fir);
+	ia64_set_fsr(pcb->pcb_ia32_fsr);
+}
+
+void
+ia32_savectx(struct pcb *pcb)
+{
+
+	pcb->pcb_ia32_cflg = ia64_get_cflg();
+	pcb->pcb_ia32_eflag = ia64_get_eflag();
+	pcb->pcb_ia32_fcr = ia64_get_fcr();
+	pcb->pcb_ia32_fdr = ia64_get_fdr();
+	pcb->pcb_ia32_fir = ia64_get_fir();
+	pcb->pcb_ia32_fsr = ia64_get_fsr();
+}
+
+int
+freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap)
+{
+
+	return (nosys(td, NULL));
+}
+
+int
+freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap)
+{
+
+	return (nosys(td, NULL));
+}
+
+int
+freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap)
+{
+
+	return (nosys(td, NULL));
+}


Property changes on: trunk/sys/ia64/ia32/ia32_signal.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia32/ia32_trap.c
===================================================================
--- trunk/sys/ia64/ia32/ia32_trap.c	                        (rev 0)
+++ trunk/sys/ia64/ia32/ia32_trap.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,283 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia32/ia32_trap.c 240244 2012-09-08 18:27:11Z attilio $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ktr.h>
+#include <sys/sysproto.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pioctl.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <sys/sysent.h>
+#include <machine/cpu.h>
+#include <machine/fpu.h>
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <x86/include/psl.h>
+
+#include <security/audit/audit.h>
+
+#include <compat/ia32/ia32_util.h>
+
+void
+ia32_set_syscall_retval(struct thread *td, int error)
+{
+	struct proc *p;
+	struct trapframe *tf;
+
+	tf = td->td_frame;
+
+	switch (error) {
+	case 0:
+		tf->tf_scratch.gr8 = td->td_retval[0];	/* eax */
+		tf->tf_scratch.gr10 = td->td_retval[1];	/* edx */
+		ia64_set_eflag(ia64_get_eflag() & ~PSL_C);
+		break;
+
+	case ERESTART:
+		/*
+		 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
+		 * int 0x80 is 2 bytes. XXX Assume int 0x80.
+		 */
+		tf->tf_special.iip -= 2;
+		break;
+
+	case EJUSTRETURN:
+		break;
+
+	default:
+		p = td->td_proc;
+		if (p->p_sysent->sv_errsize) {
+			if (error >= p->p_sysent->sv_errsize)
+				error = -1;	/* XXX */
+			else
+				error = p->p_sysent->sv_errtbl[error];
+		}
+		tf->tf_scratch.gr8 = error;
+		ia64_set_eflag(ia64_get_eflag() | PSL_C);
+		break;
+	}
+}
+
+int
+ia32_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
+{
+	struct trapframe *tf;
+	struct proc *p;
+	uint32_t args[8];
+	caddr_t params;
+	int error, i;
+
+	tf = td->td_frame;
+	p = td->td_proc;
+
+	params = (caddr_t)(tf->tf_special.sp & ((1L<<32)-1)) +
+	    sizeof(uint32_t);
+	sa->code = tf->tf_scratch.gr8;		/* eax */
+
+	if (sa->code == SYS_syscall) {
+		/* Code is first argument, followed by actual args. */
+		sa->code = fuword32(params);
+		params += sizeof(int);
+	} else if (sa->code == SYS___syscall) {
+		/*
+		 * Like syscall, but code is a quad, so as to maintain
+		 * quad alignment for the rest of the arguments.  We
+		 * use a 32-bit fetch in case params is not aligned.
+		 */
+		sa->code = fuword32(params);
+		params += sizeof(quad_t);
+	}
+
+	if (p->p_sysent->sv_mask)
+		sa->code &= p->p_sysent->sv_mask;
+	if (sa->code >= p->p_sysent->sv_size)
+		sa->callp = &p->p_sysent->sv_table[0];
+	else
+		sa->callp = &p->p_sysent->sv_table[sa->code];
+	sa->narg = sa->callp->sy_narg;
+
+	if (params != NULL && sa->narg != 0)
+		error = copyin(params, (caddr_t)args, sa->narg * sizeof(int));
+	else
+		error = 0;
+	sa->args = &sa->args32[0];
+
+	if (error == 0) {
+		for (i = 0; i < sa->narg; i++)
+			sa->args32[i] = args[i];
+		td->td_retval[0] = 0;
+		td->td_retval[1] = tf->tf_scratch.gr10;	/* edx */
+	}
+
+	return (error);
+}
+
+#include "../../kern/subr_syscall.c"
+
+static void
+ia32_syscall(struct trapframe *tf)
+{
+	struct thread *td;
+	struct syscall_args sa;
+	register_t eflags;
+	int error;
+	ksiginfo_t ksi;
+
+	td = curthread;
+	eflags = ia64_get_eflag();
+
+	error = syscallenter(td, &sa);
+
+	/*
+	 * Traced syscall.
+	 */
+	if ((eflags & PSL_T) && !(eflags & PSL_VM)) {
+		ia64_set_eflag(ia64_get_eflag() & ~PSL_T);
+		ksiginfo_init_trap(&ksi);
+		ksi.ksi_signo = SIGTRAP;
+		ksi.ksi_code = TRAP_TRACE;
+		ksi.ksi_addr = (void *)tf->tf_special.iip;
+		trapsignal(td, &ksi);
+	}
+
+	syscallret(td, error, &sa);
+}
+
+/*
+ * ia32_trap() is called from exception.S to handle the IA-32 specific
+ * interruption vectors.
+ */
+void
+ia32_trap(int vector, struct trapframe *tf)
+{
+	struct proc *p;
+	struct thread *td;
+	uint64_t ucode;
+	int sig;
+	ksiginfo_t ksi;
+
+	KASSERT(TRAPF_USERMODE(tf), ("%s: In kernel mode???", __func__));
+
+	ia64_set_fpsr(IA64_FPSR_DEFAULT);
+	PCPU_INC(cnt.v_trap);
+
+	td = curthread;
+	td->td_frame = tf;
+	td->td_pticks = 0;
+	p = td->td_proc;
+	if (td->td_ucred != p->p_ucred)
+		cred_update_thread(td);
+	sig = 0;
+	ucode = 0;
+	switch (vector) {
+	case IA64_VEC_IA32_EXCEPTION:
+		switch ((tf->tf_special.isr >> 16) & 0xffff) {
+		case IA32_EXCEPTION_DIVIDE:
+			ucode = FPE_INTDIV;
+			sig = SIGFPE;
+			break;
+		case IA32_EXCEPTION_DEBUG:
+		case IA32_EXCEPTION_BREAK:
+			sig = SIGTRAP;
+			break;
+		case IA32_EXCEPTION_OVERFLOW:
+			ucode = FPE_INTOVF;
+			sig = SIGFPE;
+			break;
+		case IA32_EXCEPTION_BOUND:
+			ucode = FPE_FLTSUB;
+			sig = SIGFPE;
+			break;
+		case IA32_EXCEPTION_DNA:
+			ucode = 0;
+			sig = SIGFPE;
+			break;
+		case IA32_EXCEPTION_NOT_PRESENT:
+		case IA32_EXCEPTION_STACK_FAULT:
+		case IA32_EXCEPTION_GPFAULT:
+			ucode = (tf->tf_special.isr & 0xffff) + BUS_SEGM_FAULT;
+			sig = SIGBUS;
+			break;
+		case IA32_EXCEPTION_FPERROR:
+			ucode = 0;	/* XXX */
+			sig = SIGFPE;
+			break;
+		case IA32_EXCEPTION_ALIGNMENT_CHECK:
+			ucode = tf->tf_special.ifa;	/* VA */
+			sig = SIGBUS;
+			break;
+		case IA32_EXCEPTION_STREAMING_SIMD:
+			ucode = 0; /* XXX */
+			sig = SIGFPE;
+			break;
+		default:
+			trap_panic(vector, tf);
+			break;
+		}
+		break;
+
+	case IA64_VEC_IA32_INTERCEPT:
+		/* XXX Maybe need to emulate ia32 instruction. */
+		trap_panic(vector, tf);
+
+	case IA64_VEC_IA32_INTERRUPT:
+		/* INT n instruction - probably a syscall. */
+		if (((tf->tf_special.isr >> 16) & 0xffff) == 0x80) {
+			ia32_syscall(tf);
+			goto out;
+		}
+		ucode = (tf->tf_special.isr >> 16) & 0xffff;
+		sig = SIGILL;
+		break;
+
+	default:
+		/* Should never happen of course. */
+		trap_panic(vector, tf);
+		break;
+	}
+
+	KASSERT(sig != 0, ("%s: signal not set", __func__));
+
+	ksiginfo_init_trap(&ksi);
+	ksi.ksi_signo = sig;
+	ksi.ksi_code = (int)ucode; /* XXX */
+	/* ksi.ksi_addr */
+	trapsignal(td, &ksi);
+
+out:
+	userret(td, tf);
+	do_ast(tf);
+}


Property changes on: trunk/sys/ia64/ia32/ia32_trap.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/autoconf.c
===================================================================
--- trunk/sys/ia64/ia64/autoconf.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/autoconf.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,94 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/autoconf.c 219756 2011-03-18 22:33:19Z marcel $
+ */
+
+#include "opt_isa.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/reboot.h>
+#include <sys/kernel.h>
+#include <sys/mount.h>
+#include <sys/sysctl.h>
+#include <sys/bus.h>
+#include <sys/cons.h>
+
+#include <machine/intr.h>
+#include <machine/md_var.h>
+
+static void	configure_first(void *);
+static void	configure(void *);
+static void	configure_final(void *);
+
+SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL);
+/* SI_ORDER_SECOND is hookable */
+SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL);
+/* SI_ORDER_MIDDLE is hookable */
+SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
+
+#ifdef DEV_ISA
+#include <isa/isavar.h>
+device_t isa_bus_device = 0;
+#endif
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+static void
+configure_first(void *dummy)
+{
+
+	device_add_child(root_bus, "nexus", 0);
+}
+
+static void
+configure(void *dummy)
+{
+
+	root_bus_configure();
+
+	/*
+	 * Probe ISA devices after everything.
+	 */
+#ifdef DEV_ISA
+	if (isa_bus_device)
+		isa_probe_children(isa_bus_device);
+#endif
+}
+
+static void
+configure_final(void *dummy)
+{
+
+	cninit_finish();
+
+	ia64_enable_intr();
+
+	cold = 0;
+}


Property changes on: trunk/sys/ia64/ia64/autoconf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/bus_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/bus_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/bus_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,378 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2009 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/bus_machdep.c 203883 2010-02-14 16:56:24Z marcel $");
+
+#include <sys/types.h>
+#include <machine/bus.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+extern u_long ia64_port_base;
+
+#define __PIO_ADDR(port)        \
+        (void *)(ia64_port_base | (((port) & 0xfffc) << 10) | ((port) & 0xFFF))
+
+int
+bus_space_map(bus_space_tag_t bst, bus_addr_t addr, bus_size_t size,
+    int flags __unused, bus_space_handle_t *bshp)
+{
+
+        *bshp = (__predict_false(bst == IA64_BUS_SPACE_IO))
+            ? addr : (uintptr_t)pmap_mapdev(addr, size);
+        return (0);
+}
+
+
+void
+bus_space_unmap(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+    bus_size_t size)
+{
+
+	pmap_unmapdev(bsh, size);
+}
+
+uint8_t
+bus_space_read_io_1(u_long port)
+{
+	uint8_t v;
+
+	ia64_mf();
+	v = ia64_ld1(__PIO_ADDR(port));
+	ia64_mf_a();
+	ia64_mf();
+	return (v);
+}
+
+uint16_t
+bus_space_read_io_2(u_long port)
+{
+	uint16_t v;
+
+	ia64_mf();
+	v = ia64_ld2(__PIO_ADDR(port));
+	ia64_mf_a();
+	ia64_mf();
+	return (v);
+}
+
+uint32_t
+bus_space_read_io_4(u_long port)
+{
+	uint32_t v;
+
+	ia64_mf();
+	v = ia64_ld4(__PIO_ADDR(port));
+	ia64_mf_a();
+	ia64_mf();
+	return (v);
+}
+
+#if 0
+uint64_t
+bus_space_read_io_8(u_long port)
+{
+}
+#endif
+
+void
+bus_space_write_io_1(u_long port, uint8_t val)
+{
+
+	ia64_mf();
+	ia64_st1(__PIO_ADDR(port), val);
+	ia64_mf_a();
+	ia64_mf();
+}
+
+void
+bus_space_write_io_2(u_long port, uint16_t val)
+{
+
+	ia64_mf();
+	ia64_st2(__PIO_ADDR(port), val);
+	ia64_mf_a();
+	ia64_mf();
+}
+
+void
+bus_space_write_io_4(u_long port, uint32_t val)
+{
+
+	ia64_mf();
+	ia64_st4(__PIO_ADDR(port), val);
+	ia64_mf_a();
+	ia64_mf();
+}
+
+#if 0
+void
+bus_space_write_io_8(u_long port, uint64_t val)
+{
+}
+#endif
+
+void
+bus_space_read_multi_io_1(u_long port, uint8_t *ptr, size_t count)
+{
+
+	while (count-- > 0)
+		*ptr++ = bus_space_read_io_1(port);
+}
+
+void
+bus_space_read_multi_io_2(u_long port, uint16_t *ptr, size_t count)
+{
+
+	while (count-- > 0)
+		*ptr++ = bus_space_read_io_2(port);
+}
+
+void
+bus_space_read_multi_io_4(u_long port, uint32_t *ptr, size_t count)
+{
+
+	while (count-- > 0)
+		*ptr++ = bus_space_read_io_4(port);
+}
+
+#if 0
+void
+bus_space_read_multi_io_8(u_long port, uint64_t *ptr, size_t count)
+{
+}
+#endif
+
+void
+bus_space_write_multi_io_1(u_long port, const uint8_t *ptr, size_t count)
+{
+
+	while (count-- > 0)
+		bus_space_write_io_1(port, *ptr++);
+}
+
+void
+bus_space_write_multi_io_2(u_long port, const uint16_t *ptr, size_t count)
+{
+
+	while (count-- > 0)
+		bus_space_write_io_2(port, *ptr++);
+}
+
+void
+bus_space_write_multi_io_4(u_long port, const uint32_t *ptr, size_t count)
+{
+
+	while (count-- > 0)
+		bus_space_write_io_4(port, *ptr++);
+}
+
+#if 0
+void
+bus_space_write_multi_io_8(u_long port, const uint64_t *ptr, size_t count)
+{
+}
+#endif
+
+void
+bus_space_read_region_io_1(u_long port, uint8_t *ptr, size_t count)
+{
+
+	while (count-- > 0) {
+		*ptr++ = bus_space_read_io_1(port);
+		port += 1;
+	}
+}
+
+void
+bus_space_read_region_io_2(u_long port, uint16_t *ptr, size_t count) 
+{
+
+	while (count-- > 0) {
+		*ptr++ = bus_space_read_io_2(port);
+		port += 2;
+	}
+}
+
+void
+bus_space_read_region_io_4(u_long port, uint32_t *ptr, size_t count) 
+{
+
+	while (count-- > 0) {
+		*ptr++ = bus_space_read_io_4(port);
+		port += 4;
+	}
+}
+
+#if 0
+void bus_space_read_region_io_8(u_long, uint64_t *, size_t);
+#endif
+
+void
+bus_space_write_region_io_1(u_long port, const uint8_t *ptr, size_t count)
+{
+
+	while (count-- > 0) {
+		bus_space_write_io_1(port, *ptr++);
+		port += 1;
+	}
+}
+
+void
+bus_space_write_region_io_2(u_long port, const uint16_t *ptr, size_t count)
+{
+
+	while (count-- > 0) {
+		bus_space_write_io_2(port, *ptr++);
+		port += 2;
+	}
+}
+
+void
+bus_space_write_region_io_4(u_long port, const uint32_t *ptr, size_t count)
+{
+
+	while (count-- > 0) {
+		bus_space_write_io_4(port, *ptr++);
+		port += 4;
+	}
+}
+
+#if 0
+void
+bus_space_write_region_io_8(u_long port, const uint64_t *ptr, size_t count)
+{
+}
+#endif
+
+void
+bus_space_set_region_io_1(u_long port, uint8_t val, size_t count)
+{
+
+	while (count-- > 0) {
+		bus_space_write_io_1(port, val);
+		port += 1;
+	}
+}
+
+void
+bus_space_set_region_io_2(u_long port, uint16_t val, size_t count)
+{
+
+	while (count-- > 0) {
+		bus_space_write_io_2(port, val);
+		port += 2;
+	}
+}
+
+void
+bus_space_set_region_io_4(u_long port, uint32_t val, size_t count)
+{
+
+	while (count-- > 0) {
+		bus_space_write_io_4(port, val);
+		port += 4;
+	}
+}
+
+#if 0
+void
+bus_space_set_region_io_8(u_long port, uint64_t val, size_t count)
+{
+}
+#endif
+
+void 
+bus_space_copy_region_io_1(u_long src, u_long dst, size_t count) 
+{
+	long delta;
+	uint8_t val;
+
+	if (src < dst) {
+		src += count - 1;
+		dst += count - 1;
+		delta = -1;
+	} else
+		delta = 1;
+
+	while (count-- > 0) {
+		val = bus_space_read_io_1(src);
+		bus_space_write_io_1(dst, val);
+		src += delta;
+		dst += delta;
+	}
+}
+
+void 
+bus_space_copy_region_io_2(u_long src, u_long dst, size_t count) 
+{
+	long delta;
+	uint16_t val;
+
+	if (src < dst) {
+		src += 2 * (count - 1);
+		dst += 2 * (count - 1);
+		delta = -2;
+	} else
+		delta = 2;
+
+	while (count-- > 0) {
+		val = bus_space_read_io_2(src);
+		bus_space_write_io_2(dst, val);
+		src += delta;
+		dst += delta;
+	}
+}
+
+void 
+bus_space_copy_region_io_4(u_long src, u_long dst, size_t count) 
+{
+	long delta;
+	uint32_t val;
+
+	if (src < dst) {
+		src += 4 * (count - 1);
+		dst += 4 * (count - 1);
+		delta = -4;
+	} else
+		delta = 4;
+
+	while (count-- > 0) {
+		val = bus_space_read_io_4(src);
+		bus_space_write_io_4(dst, val);
+		src += delta;
+		dst += delta;
+	}
+}
+
+#if 0
+void
+bus_space_copy_region_io_8(u_long src, u_long dst, size_t count)
+{
+}
+#endif


Property changes on: trunk/sys/ia64/ia64/bus_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/busdma_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/busdma_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/busdma_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,976 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/busdma_machdep.c 282506 2015-05-05 19:47:17Z hselasky $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/memdesc.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/md_var.h>
+
+#define	MAX_BPAGES	1024
+
+struct bus_dma_tag {
+	bus_dma_tag_t	parent;
+	bus_size_t	alignment;
+	bus_addr_t	boundary;
+	bus_addr_t	lowaddr;
+	bus_addr_t	highaddr;
+	bus_dma_filter_t *filter;
+	void		*filterarg;
+	bus_size_t	maxsize;
+	u_int		nsegments;
+	bus_size_t	maxsegsz;
+	int		flags;
+	int		ref_count;
+	int		map_count;
+	bus_dma_lock_t	*lockfunc;
+	void		*lockfuncarg;
+	bus_dma_segment_t *segments;
+};
+
+struct bounce_page {
+	vm_offset_t	vaddr;		/* kva of bounce buffer */
+	bus_addr_t	busaddr;	/* Physical address */
+	vm_offset_t	datavaddr;	/* kva of client data */
+	bus_addr_t	dataaddr;	/* client physical address */
+	bus_size_t	datacount;	/* client data count */
+	STAILQ_ENTRY(bounce_page) links;
+};
+
+u_int busdma_swi_pending;
+
+static struct mtx bounce_lock;
+static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
+static int free_bpages;
+static int reserved_bpages;
+static int active_bpages;
+static int total_bpages;
+static int total_bounced;
+static int total_deferred;
+
+static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
+SYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0,
+    "Free bounce pages");
+SYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages,
+    0, "Reserved bounce pages");
+SYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0,
+    "Active bounce pages");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
+    "Total bounce pages");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0,
+    "Total bounce requests");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred,
+    0, "Total bounce requests that were deferred");
+
+struct bus_dmamap {
+	struct bp_list	bpages;
+	int		pagesneeded;
+	int		pagesreserved;
+	bus_dma_tag_t	dmat;
+	struct memdesc	mem;
+	bus_dmamap_callback_t *callback;
+	void		*callback_arg;
+	STAILQ_ENTRY(bus_dmamap) links;
+};
+
+static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
+static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
+static struct bus_dmamap nobounce_dmamap;
+
+static void init_bounce_pages(void *dummy);
+static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+    int commit);
+static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+    vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
+static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
+    bus_size_t len);
+
+/*
+ * Return true if a match is made.
+ *
+ * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
+ *
+ * If paddr is within the bounds of the dma tag then call the filter callback
+ * to check for a match, if there is no filter callback then assume a match.
+ */
+static __inline int
+run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len)
+{
+	bus_addr_t bndy;
+	int retval;
+
+	retval = 0;
+	bndy = dmat->boundary;
+	do {
+		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
+		    (paddr & (dmat->alignment - 1)) != 0 ||
+		    (paddr & bndy) != ((paddr + len) & bndy)) &&
+		    (dmat->filter == NULL ||
+		    (*dmat->filter)(dmat->filterarg, paddr) != 0))
+			retval = 1;
+		dmat = dmat->parent;
+	} while (retval == 0 && dmat != NULL);
+	return (retval);
+}
+
+/*
+ * Convenience function for manipulating driver locks from busdma (during
+ * busdma_swi, for example).  Drivers that don't provide their own locks
+ * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
+ * non-mutex locking scheme don't have to use this at all.
+ */
+void
+busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
+{
+	struct mtx *dmtx;
+
+	dmtx = (struct mtx *)arg;
+	switch (op) {
+	case BUS_DMA_LOCK:
+		mtx_lock(dmtx);
+		break;
+	case BUS_DMA_UNLOCK:
+		mtx_unlock(dmtx);
+		break;
+	default:
+		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
+	}
+}
+
+/*
+ * dflt_lock should never get called.  It gets put into the dma tag when
+ * lockfunc == NULL, which is only valid if the maps that are associated
+ * with the tag are meant to never be defered.
+ * XXX Should have a way to identify which driver is responsible here.
+ */
+static void
+dflt_lock(void *arg, bus_dma_lock_op_t op)
+{
+	panic("driver error: busdma dflt_lock called");
+}
+
+#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+    void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+	bus_dma_tag_t newtag;
+	int error = 0;
+
+	/* Basic sanity checking */
+	if (boundary != 0 && boundary < maxsegsz)
+		maxsegsz = boundary;
+
+	/* Return a NULL tag on failure */
+	*dmat = NULL;
+
+	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
+	if (newtag == NULL)
+		return (ENOMEM);
+
+	newtag->parent = parent;
+	newtag->alignment = alignment;
+	newtag->boundary = boundary;
+	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
+	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
+	newtag->filter = filter;
+	newtag->filterarg = filterarg;
+	newtag->maxsize = maxsize;
+	newtag->nsegments = nsegments;
+	newtag->maxsegsz = maxsegsz;
+	newtag->flags = flags;
+	newtag->ref_count = 1; /* Count ourself */
+	newtag->map_count = 0;
+	if (lockfunc != NULL) {
+		newtag->lockfunc = lockfunc;
+		newtag->lockfuncarg = lockfuncarg;
+	} else {
+		newtag->lockfunc = dflt_lock;
+		newtag->lockfuncarg = NULL;
+	}
+	newtag->segments = NULL;
+
+	/* Take into account any restrictions imposed by our parent tag */
+	if (parent != NULL) {
+		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
+		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
+		if (newtag->boundary == 0)
+			newtag->boundary = parent->boundary;
+		else if (parent->boundary != 0)
+			newtag->boundary = MIN(parent->boundary,
+			    newtag->boundary);
+		if (newtag->filter == NULL) {
+			/*
+			 * Short circuit looking at our parent directly
+			 * since we have encapsulated all of its information
+			 */
+			newtag->filter = parent->filter;
+			newtag->filterarg = parent->filterarg;
+			newtag->parent = parent->parent;
+		}
+		if (newtag->parent != NULL)
+			atomic_add_int(&parent->ref_count, 1);
+	}
+
+	if (newtag->lowaddr < paddr_max && (flags & BUS_DMA_ALLOCNOW) != 0) {
+		/* Must bounce */
+
+		if (ptoa(total_bpages) < maxsize) {
+			int pages;
+
+			pages = atop(maxsize) - total_bpages;
+
+			/* Add pages to our bounce pool */
+			if (alloc_bounce_pages(newtag, pages) < pages)
+				error = ENOMEM;
+		}
+		/* Performed initial allocation */
+		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
+	}
+
+	if (error != 0) {
+		free(newtag, M_DEVBUF);
+	} else {
+		*dmat = newtag;
+	}
+	return (error);
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+	if (dmat != NULL) {
+
+		if (dmat->map_count != 0)
+			return (EBUSY);
+
+		while (dmat != NULL) {
+			bus_dma_tag_t parent;
+
+			parent = dmat->parent;
+			atomic_subtract_int(&dmat->ref_count, 1);
+			if (dmat->ref_count == 0) {
+				if (dmat->segments != NULL)
+					free(dmat->segments, M_DEVBUF);
+				free(dmat, M_DEVBUF);
+				/*
+				 * Last reference count, so
+				 * release our reference
+				 * count on our parent.
+				 */
+				dmat = parent;
+			} else
+				dmat = NULL;
+		}
+	}
+	return (0);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+	int error;
+
+	error = 0;
+
+	if (dmat->segments == NULL) {
+		dmat->segments = (bus_dma_segment_t *)malloc(
+		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
+		    M_NOWAIT);
+		if (dmat->segments == NULL)
+			return (ENOMEM);
+	}
+
+	/*
+	 * Bouncing might be required if the driver asks for an active
+	 * exclusion region, a data alignment that is stricter than 1, and/or
+	 * an active address boundary.
+	 */
+	if (dmat->lowaddr < paddr_max) {
+		/* Must bounce */
+		int maxpages;
+
+		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
+		    M_NOWAIT | M_ZERO);
+		if (*mapp == NULL)
+			return (ENOMEM);
+
+		/* Initialize the new map */
+		STAILQ_INIT(&((*mapp)->bpages));
+
+		/*
+		 * Attempt to add pages to our pool on a per-instance
+		 * basis up to a sane limit.
+		 */
+		maxpages = MIN(MAX_BPAGES, atop(paddr_max - dmat->lowaddr));
+		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
+		 || (dmat->map_count > 0 && total_bpages < maxpages)) {
+			int pages;
+
+			pages = MAX(atop(dmat->maxsize), 1);
+			pages = MIN(maxpages - total_bpages, pages);
+			if (alloc_bounce_pages(dmat, pages) < pages)
+				error = ENOMEM;
+
+			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
+				if (error == 0)
+					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
+			} else {
+				error = 0;
+			}
+		}
+	} else {
+		*mapp = NULL;
+	}
+	if (error == 0)
+		dmat->map_count++;
+	return (error);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+
+	if (map != NULL && map != &nobounce_dmamap) {
+		if (STAILQ_FIRST(&map->bpages) != NULL)
+			return (EBUSY);
+		free(map, M_DEVBUF);
+	}
+	dmat->map_count--;
+	return (0);
+}
+
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+    bus_dmamap_t *mapp)
+{
+	int mflags;
+
+	if (flags & BUS_DMA_NOWAIT)
+		mflags = M_NOWAIT;
+	else
+		mflags = M_WAITOK;
+
+	/* If we succeed, no mapping/bouncing will be required */
+	*mapp = NULL;
+
+	if (dmat->segments == NULL) {
+		dmat->segments = (bus_dma_segment_t *)malloc(
+		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
+		    mflags);
+		if (dmat->segments == NULL)
+			return (ENOMEM);
+	}
+	if (flags & BUS_DMA_ZERO)
+		mflags |= M_ZERO;
+
+	/*
+	 * XXX:
+	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
+	 * alignment guarantees of malloc need to be nailed down, and the
+	 * code below should be rewritten to take that into account.
+	 *
+	 * In the meantime, we'll warn the user if malloc gets it wrong.
+	 */
+	if ((dmat->maxsize <= PAGE_SIZE) &&
+	   (dmat->alignment < dmat->maxsize) &&
+	    dmat->lowaddr >= paddr_max) {
+		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
+	} else {
+		/*
+		 * XXX Use Contigmalloc until it is merged into this facility
+		 *     and handles multi-seg allocations.  Nobody is doing
+		 *     multi-seg allocations yet though.
+		 * XXX Certain AGP hardware does.
+		 */
+		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
+		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
+		    dmat->boundary);
+	}
+	if (*vaddr == NULL)
+		return (ENOMEM);
+	else if (vtophys(*vaddr) & (dmat->alignment - 1))
+		printf("bus_dmamem_alloc failed to align memory properly.\n");
+	return (0);
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
+ */
+void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+	/*
+	 * dmamem does not need to be bounced, so the map should be
+	 * NULL
+	 */
+	if (map != NULL)
+		panic("bus_dmamem_free: Invalid map freed\n");
+	if ((dmat->maxsize <= PAGE_SIZE) &&
+	   (dmat->alignment < dmat->maxsize) &&
+	    dmat->lowaddr >= paddr_max)
+		free(vaddr, M_DEVBUF);
+	else {
+		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
+	}
+}
+
+static void
+_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+    bus_size_t buflen, int flags)
+{
+	bus_addr_t curaddr;
+	bus_size_t sgsize;
+
+	if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
+	    dmat->alignment > 1) && map != &nobounce_dmamap &&
+	    map->pagesneeded == 0) {
+		/*
+		 * Count the number of bounce pages
+		 * needed in order to complete this transfer
+		 */
+		curaddr = buf;
+		while (buflen != 0) {
+			sgsize = MIN(buflen, dmat->maxsegsz);
+			if (run_filter(dmat, curaddr, 0) != 0) {
+				sgsize = MIN(sgsize, PAGE_SIZE);
+				map->pagesneeded++;
+			}
+			curaddr += sgsize;
+			buflen -= sgsize;
+		}
+	}
+}
+
+static void
+_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,  
+    void *buf, bus_size_t buflen, int flags)
+{
+	vm_offset_t vaddr;
+	vm_offset_t vendaddr;
+	bus_addr_t paddr;
+
+	if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
+	    dmat->alignment > 1) && map != &nobounce_dmamap &&
+	    map->pagesneeded == 0) {
+		/*
+		 * Count the number of bounce pages
+		 * needed in order to complete this transfer
+		 */
+		vaddr = trunc_page((vm_offset_t)buf);
+		vendaddr = (vm_offset_t)buf + buflen;
+
+		while (vaddr < vendaddr) {
+			if (pmap == kernel_pmap)
+				paddr = pmap_kextract(vaddr);
+			else
+				paddr = pmap_extract(pmap, vaddr);
+			if (run_filter(dmat, paddr, 0) != 0)
+				map->pagesneeded++;
+			vaddr += PAGE_SIZE;
+		}
+	}
+}
+
+static int
+_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
+{
+
+	/* Reserve Necessary Bounce Pages */
+	mtx_lock(&bounce_lock);
+	if (flags & BUS_DMA_NOWAIT) {
+		if (reserve_bounce_pages(dmat, map, 0) != 0) {
+			mtx_unlock(&bounce_lock);
+			return (ENOMEM);
+		}
+	} else {
+		if (reserve_bounce_pages(dmat, map, 1) != 0) {
+			/* Queue us for resources */
+			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
+			    map, links);
+			mtx_unlock(&bounce_lock);
+			return (EINPROGRESS);
+		}
+	}
+	mtx_unlock(&bounce_lock);
+
+	return (0);
+}
+
+/*
+ * Add a single contiguous physical range to the segment list.
+ */
+static int
+_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
+    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
+{
+	bus_addr_t baddr, bmask;
+	int seg;
+
+	/*
+	 * Make sure we don't cross any boundaries.
+	 */
+	bmask = ~(dmat->boundary - 1);
+	if (dmat->boundary > 0) {
+		baddr = (curaddr + dmat->boundary) & bmask;
+		if (sgsize > (baddr - curaddr))
+			sgsize = (baddr - curaddr);
+	}
+
+	/*
+	 * Insert chunk into a segment, coalescing with
+	 * previous segment if possible.
+	 */
+	seg = *segp;
+	if (seg == -1) {
+		seg = 0;
+		segs[seg].ds_addr = curaddr;
+		segs[seg].ds_len = sgsize;
+	} else {
+		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
+		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+		    (dmat->boundary == 0 ||
+		    (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+			segs[seg].ds_len += sgsize;
+		else {
+			if (++seg >= dmat->nsegments)
+				return (0);
+			segs[seg].ds_addr = curaddr;
+			segs[seg].ds_len = sgsize;
+		}
+	}
+	*segp = seg;
+	return (sgsize);
+}
+
+/*
+ * Utility function to load a physical buffer.  segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+int
+_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
+    int *segp)
+{
+	bus_addr_t curaddr;
+	bus_size_t sgsize;
+	int error;
+
+	if (map == NULL)
+		map = &nobounce_dmamap;
+
+	if (segs == NULL)
+		segs = dmat->segments;
+
+	if (map != &nobounce_dmamap) {
+		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
+		if (map->pagesneeded != 0) {
+			error = _bus_dmamap_reserve_pages(dmat, map, flags);
+			if (error)
+				return (error);
+		}
+	}
+
+	while (buflen > 0) {
+		curaddr = buf;
+		sgsize = MIN(buflen, dmat->maxsegsz);
+		if (map->pagesneeded != 0 &&
+		    run_filter(dmat, curaddr, sgsize)) {
+			sgsize = MIN(sgsize, PAGE_SIZE);
+			curaddr = add_bounce_page(dmat, map, 0, curaddr,
+			    sgsize);
+		}
+		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+		    segp);
+		if (sgsize == 0)
+			break;
+		buf += sgsize;
+		buflen -= sgsize;
+	}
+
+	/*
+	 * Did we fit?
+	 */
+	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+int
+_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
+    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
+    bus_dma_segment_t *segs, int *segp)
+{
+
+	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
+	    segs, segp));
+}
+
+/*
+ * Utility function to load a linear buffer.  segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+    bus_size_t buflen, pmap_t pmap, int flags,
+    bus_dma_segment_t *segs, int *segp)
+{
+	bus_size_t sgsize;
+	bus_addr_t curaddr;
+	vm_offset_t vaddr;
+	int error;
+
+	if (map == NULL)
+		map = &nobounce_dmamap;
+
+	if (segs == NULL)
+		segs = dmat->segments;
+
+	if (map != &nobounce_dmamap) {
+		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
+		if (map->pagesneeded != 0) {
+			error = _bus_dmamap_reserve_pages(dmat, map, flags);
+			if (error)
+				return (error);
+		}
+	}
+
+	vaddr = (vm_offset_t)buf;
+
+	while (buflen > 0) {
+		/*
+		 * Get the physical address for this segment.
+		 */
+		if (pmap == kernel_pmap)
+			curaddr = pmap_kextract(vaddr);
+		else
+			curaddr = pmap_extract(pmap, vaddr);
+
+		/*
+		 * Compute the segment size, and adjust counts.
+		 */
+		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+		if (sgsize > dmat->maxsegsz)
+			sgsize = dmat->maxsegsz;
+		if (buflen < sgsize)
+			sgsize = buflen;
+
+		if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize))
+			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
+			    sgsize);
+
+		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+		    segp);
+		if (sgsize == 0)
+			break;
+
+		vaddr += sgsize;
+		buflen -= sgsize;
+	}
+
+	/*
+	 * Did we fit?
+	 */
+	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+
+void
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
+{
+	if (map != NULL) {
+		map->dmat = dmat;
+		map->mem = *mem;
+		map->callback = callback;
+		map->callback_arg = callback_arg;
+	}
+}
+
+bus_dma_segment_t *
+_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_segment_t *segs, int nsegs, int error)
+{
+
+	if (segs == NULL)
+		segs = dmat->segments;
+	return (segs);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+void
+_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+	struct bounce_page *bpage;
+
+	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+		STAILQ_REMOVE_HEAD(&map->bpages, links);
+		free_bounce_page(dmat, bpage);
+	}
+}
+
+void
+_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+	struct bounce_page *bpage;
+
+	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+		/*
+		 * Handle data bouncing.  We might also
+		 * want to add support for invalidating
+		 * the caches on broken hardware
+		 */
+
+		if (op & BUS_DMASYNC_PREWRITE) {
+			while (bpage != NULL) {
+				if (bpage->datavaddr != 0)
+					bcopy((void *)bpage->datavaddr,
+					    (void *)bpage->vaddr,
+					    bpage->datacount);
+				else
+					physcopyout(bpage->dataaddr,
+					    (void *)bpage->vaddr,
+					    bpage->datacount);
+				bpage = STAILQ_NEXT(bpage, links);
+			}
+			total_bounced++;
+		}
+
+		if (op & BUS_DMASYNC_POSTREAD) {
+			while (bpage != NULL) {
+				if (bpage->datavaddr != 0)
+					bcopy((void *)bpage->vaddr,
+					    (void *)bpage->datavaddr,
+					    bpage->datacount);
+				else
+					physcopyin((void *)bpage->vaddr,
+					    bpage->dataaddr,
+					    bpage->datacount);
+				bpage = STAILQ_NEXT(bpage, links);
+			}
+			total_bounced++;
+		}
+	}
+}
+
+static void
+init_bounce_pages(void *dummy __unused)
+{
+
+	free_bpages = 0;
+	reserved_bpages = 0;
+	active_bpages = 0;
+	total_bpages = 0;
+	STAILQ_INIT(&bounce_page_list);
+	STAILQ_INIT(&bounce_map_waitinglist);
+	STAILQ_INIT(&bounce_map_callbacklist);
+	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
+}
+SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
+
+static int
+alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
+{
+	int count;
+
+	count = 0;
+	while (numpages > 0) {
+		struct bounce_page *bpage;
+
+		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
+		    M_NOWAIT | M_ZERO);
+		if (bpage == NULL)
+			break;
+		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
+		    M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, dmat->boundary);
+		if (bpage->vaddr == 0) {
+			free(bpage, M_DEVBUF);
+			break;
+		}
+		bpage->busaddr = pmap_kextract(bpage->vaddr);
+		mtx_lock(&bounce_lock);
+		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
+		total_bpages++;
+		free_bpages++;
+		mtx_unlock(&bounce_lock);
+		count++;
+		numpages--;
+	}
+	return (count);
+}
+
+static int
+reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
+{
+	int pages;
+
+	mtx_assert(&bounce_lock, MA_OWNED);
+	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
+	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
+		return (map->pagesneeded - (map->pagesreserved + pages));
+	free_bpages -= pages;
+	reserved_bpages += pages;
+	map->pagesreserved += pages;
+	pages = map->pagesneeded - map->pagesreserved;
+
+	return (pages);
+}
+
+static bus_addr_t
+add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
+    bus_addr_t addr, bus_size_t size)
+{
+	struct bounce_page *bpage;
+
+	KASSERT(map != NULL && map != &nobounce_dmamap,
+	    ("add_bounce_page: bad map %p", map));
+
+	if (map->pagesneeded == 0)
+		panic("add_bounce_page: map doesn't need any pages");
+	map->pagesneeded--;
+
+	if (map->pagesreserved == 0)
+		panic("add_bounce_page: map doesn't need any pages");
+	map->pagesreserved--;
+
+	mtx_lock(&bounce_lock);
+	bpage = STAILQ_FIRST(&bounce_page_list);
+	if (bpage == NULL)
+		panic("add_bounce_page: free page list is empty");
+
+	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
+	reserved_bpages--;
+	active_bpages++;
+	mtx_unlock(&bounce_lock);
+
+	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
+		/* Page offset needs to be preserved. */
+		bpage->vaddr |= addr & PAGE_MASK;
+		bpage->busaddr |= addr & PAGE_MASK;
+	}
+	bpage->datavaddr = vaddr;
+	bpage->dataaddr = addr;
+	bpage->datacount = size;
+	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
+	return (bpage->busaddr);
+}
+
+static void
+free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
+{
+	struct bus_dmamap *map;
+
+	bpage->datavaddr = 0;
+	bpage->datacount = 0;
+	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
+		/*
+		 * Reset the bounce page to start at offset 0.  Other uses
+		 * of this bounce page may need to store a full page of
+		 * data and/or assume it starts on a page boundary.
+		 */
+		bpage->vaddr &= ~PAGE_MASK;
+		bpage->busaddr &= ~PAGE_MASK;
+	}
+
+	mtx_lock(&bounce_lock);
+	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
+	free_bpages++;
+	active_bpages--;
+	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
+		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
+			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
+			STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map,
+			    links);
+			busdma_swi_pending = 1;
+			total_deferred++;
+			swi_sched(vm_ih, 0);
+		}
+	}
+	mtx_unlock(&bounce_lock);
+}
+
+void
+busdma_swi(void)
+{
+	bus_dma_tag_t dmat;
+	struct bus_dmamap *map;
+
+	mtx_lock(&bounce_lock);
+	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
+		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
+		mtx_unlock(&bounce_lock);
+		dmat = map->dmat;
+		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
+		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
+		    map->callback_arg, BUS_DMA_WAITOK);
+		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
+		mtx_lock(&bounce_lock);
+	}
+	mtx_unlock(&bounce_lock);
+}


Property changes on: trunk/sys/ia64/ia64/busdma_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/clock.c
===================================================================
--- trunk/sys/ia64/ia64/clock.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/clock.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,200 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2005, 2009-2011 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/clock.c 270296 2014-08-21 19:51:07Z emaste $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/efi.h>
+#include <sys/interrupt.h>
+#include <sys/priority.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/timeet.h>
+#include <sys/timetc.h>
+#include <sys/pcpu.h>
+
+#include <machine/cpu.h>
+#include <machine/intr.h>
+#include <machine/intrcnt.h>
+#include <machine/md_var.h>
+#include <machine/smp.h>
+
+#define	CLOCK_ET_OFF		0
+#define	CLOCK_ET_PERIODIC	1
+#define	CLOCK_ET_ONESHOT	2
+
+static struct eventtimer ia64_clock_et;
+static u_int ia64_clock_xiv;
+
+#ifndef SMP
+static timecounter_get_t ia64_get_timecount;
+
+static struct timecounter ia64_timecounter = {
+	ia64_get_timecount,	/* get_timecount */
+	0,			/* no poll_pps */
+	~0u,			/* counter_mask */
+	0,			/* frequency */
+	"ITC"			/* name */
+};
+
+static u_int
+ia64_get_timecount(struct timecounter* tc)
+{
+	return ia64_get_itc();
+}
+#endif
+
+static u_int
+ia64_ih_clock(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+	struct eventtimer *et;
+	struct trapframe *stf;
+	uint64_t itc, load;
+	uint32_t mode;
+
+	PCPU_INC(md.stats.pcs_nclks);
+	intrcnt[INTRCNT_CLOCK]++;
+
+	itc = ia64_get_itc();
+	PCPU_SET(md.clock, itc);
+
+	mode = PCPU_GET(md.clock_mode);
+	if (mode == CLOCK_ET_PERIODIC) {
+		load = PCPU_GET(md.clock_load);
+		ia64_set_itm(itc + load);
+	} else
+		ia64_set_itv((1 << 16) | xiv);
+
+	ia64_set_eoi(0);
+	ia64_srlz_d();
+
+	et = &ia64_clock_et;
+	if (et->et_active) {
+		stf = td->td_intr_frame;
+		td->td_intr_frame = tf;
+		et->et_event_cb(et, et->et_arg);
+		td->td_intr_frame = stf;
+	}
+	return (1);
+}
+
+/*
+ * Event timer start method.
+ */
+static int
+ia64_clock_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
+{
+	u_long itc, load;
+	register_t is;
+
+	if (period != 0) {
+		PCPU_SET(md.clock_mode, CLOCK_ET_PERIODIC);
+		load = (et->et_frequency * period) >> 32;
+	} else {
+		PCPU_SET(md.clock_mode, CLOCK_ET_ONESHOT);
+		load = 0;
+	}
+
+	PCPU_SET(md.clock_load, load);
+
+	if (first != 0)
+		load = (et->et_frequency * first) >> 32;
+
+	is = intr_disable();
+	itc = ia64_get_itc();
+	ia64_set_itm(itc + load);
+	ia64_set_itv(ia64_clock_xiv);
+	ia64_srlz_d();
+	intr_restore(is);
+	return (0);
+}
+
+/*
+ * Event timer stop method.
+ */
+static int
+ia64_clock_stop(struct eventtimer *et)
+{
+
+	ia64_set_itv((1 << 16) | ia64_clock_xiv);
+	ia64_srlz_d();
+	PCPU_SET(md.clock_mode, CLOCK_ET_OFF);
+	PCPU_SET(md.clock_load, 0);
+	return (0);
+}
+
+/*
+ * We call cpu_initclocks() on the APs as well. It allows us to
+ * group common initialization in the same function.
+ */
+void
+cpu_initclocks()
+{
+
+	ia64_clock_stop(NULL);
+	if (PCPU_GET(cpuid) == 0)
+		cpu_initclocks_bsp();
+	else
+		cpu_initclocks_ap();
+}
+
+static void
+clock_configure(void *dummy)
+{
+	struct eventtimer *et;
+	u_long itc_freq;
+
+	ia64_clock_xiv = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI,
+	    ia64_ih_clock);
+	if (ia64_clock_xiv == 0)
+		panic("No XIV for clock interrupts");
+
+	itc_freq = (u_long)ia64_itc_freq() * 1000000ul;
+
+	et = &ia64_clock_et;
+	et->et_name = "ITC";
+	et->et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
+	et->et_quality = 1000;
+	et->et_frequency = itc_freq;
+	et->et_min_period = SBT_1S / (10 * hz);
+	et->et_max_period = (0xfffffffeul << 32) / itc_freq;
+	et->et_start = ia64_clock_start;
+	et->et_stop = ia64_clock_stop;
+	et->et_priv = NULL;
+	et_register(et);
+
+#ifndef SMP
+	ia64_timecounter.tc_frequency = itc_freq;
+	tc_init(&ia64_timecounter);
+#endif
+}
+SYSINIT(clkcfg, SI_SUB_CONFIGURE, SI_ORDER_SECOND, clock_configure, NULL);


Property changes on: trunk/sys/ia64/ia64/clock.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/context.S
===================================================================
--- trunk/sys/ia64/ia64/context.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/context.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,805 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/context.S 204184 2010-02-22 01:23:41Z marcel $
+ */
+
+#include <machine/asm.h>
+#include <assym.s>
+
+	.text
+
+/*
+ * void restorectx(struct pcb *)
+ */
+ENTRY(restorectx, 1)
+{	.mmi
+	invala
+	mov		ar.rsc=0
+	add		r31=8,r32
+	;;
+}
+{	.mmi
+	ld8		r12=[r32]		// sp
+	ld8		r16=[r31],16		// unat (before)
+	add		r30=16,r32
+	;;
+}
+{	.mmi
+	ld8		r17=[r30],16		// rp
+	ld8		r18=[r31],16		// pr
+	add		r14=SIZEOF_SPECIAL,r32
+	;;
+}
+{	.mmi
+	ld8		r19=[r30],16		// pfs
+	ld8		r20=[r31],16		// bspstore
+	mov		rp=r17
+	;;
+}
+{	.mmi
+	loadrs
+	ld8		r21=[r30],16		// rnat
+	mov		pr=r18,0x1fffe
+	;;
+}
+{	.mmi
+	ld8		r17=[r14],8		// unat (after)
+	mov		ar.bspstore=r20
+	mov		ar.pfs=r19
+	;;
+}
+{	.mmi
+	mov		ar.unat=r17
+	mov		ar.rnat=r21
+	add		r15=8,r14
+	;;
+}
+{	.mmi
+	ld8.fill	r4=[r14],16		// r4
+	ld8.fill	r5=[r15],16		// r5
+	nop		0
+	;;
+}
+{	.mmi
+	ld8.fill	r6=[r14],16		// r6
+	ld8.fill	r7=[r15],16		// r7
+	nop		0
+	;;
+}
+{	.mmi
+	mov		ar.unat=r16
+	mov		ar.rsc=3
+	nop		0
+}
+{	.mmi
+	ld8		r17=[r14],16		// b1
+	ld8		r18=[r15],16		// b2
+	nop		0
+	;;
+}
+{	.mmi
+	ld8		r19=[r14],16		// b3
+	ld8		r20=[r15],16		// b4
+	mov		b1=r17
+	;;
+}
+{	.mmi
+	ld8		r16=[r14],24		// b5
+	ld8		r17=[r15],32		// lc
+	mov		b2=r18
+	;;
+}
+{	.mmi
+	ldf.fill	f2=[r14],32
+	ldf.fill	f3=[r15],32
+	mov		b3=r19
+	;;
+}
+{	.mmi
+	ldf.fill	f4=[r14],32
+	ldf.fill	f5=[r15],32
+	mov		b4=r20
+	;;
+}
+{	.mmi
+	ldf.fill	f16=[r14],32
+	ldf.fill	f17=[r15],32
+	mov		b5=r16
+	;;
+}
+{	.mmi
+	ldf.fill	f18=[r14],32
+	ldf.fill	f19=[r15],32
+	mov		ar.lc=r17
+	;;
+}
+	ldf.fill	f20=[r14],32
+	ldf.fill	f21=[r15],32
+	;;
+	ldf.fill	f22=[r14],32
+	ldf.fill	f23=[r15],32
+	;;
+	ldf.fill	f24=[r14],32
+	ldf.fill	f25=[r15],32
+	;;
+	ldf.fill	f26=[r14],32
+	ldf.fill	f27=[r15],32
+	;;
+{	.mmi
+	ldf.fill	f28=[r14],32
+	ldf.fill	f29=[r15],32
+	add		r8=1,r0
+	;;
+}
+{	.mmb
+	ldf.fill	f30=[r14]
+	ldf.fill	f31=[r15]
+	br.ret.sptk	rp
+	;;
+}
+END(restorectx)
+
+/*
+ * void swapctx(struct pcb *old, struct pcb *new)
+ */
+
+ENTRY(swapctx, 2)
+{	.mmi
+	mov		ar.rsc=0
+	mov		r16=ar.unat
+	add		r31=8,r32
+	;;
+}
+{	.mmi
+	flushrs
+	st8		[r32]=sp,16		// sp
+	mov		r17=rp
+	;;
+}
+{	.mmi
+	st8		[r31]=r16,16		// unat (before)
+	st8		[r32]=r17,16		// rp
+	mov		r16=pr
+	;;
+}
+{	.mmi
+	st8		[r31]=r16,16		// pr
+	mov		r17=ar.bsp
+	mov		r16=ar.pfs
+	;;
+}
+{	.mmi
+	st8		[r32]=r16,16		// pfs
+	st8		[r31]=r17,16		// bspstore
+	cmp.eq		p15,p0=0,r33
+	;;
+}
+{	.mmi
+	mov		r16=ar.rnat
+(p15)	mov		ar.rsc=3
+	add		r30=SIZEOF_SPECIAL-(6*8),r32
+	;;
+}
+{	.mmi
+	st8		[r32]=r16,SIZEOF_SPECIAL-(4*8)		// rnat
+	st8		[r31]=r0,SIZEOF_SPECIAL-(6*8)		// __spare
+	mov		r16=b1
+	;;
+}
+	/* callee_saved */
+{	.mmi
+	.mem.offset	8,0
+	st8.spill	[r31]=r4,16		// r4
+	.mem.offset	16,0
+	st8.spill	[r32]=r5,16		// r5
+	mov		r17=b2
+	;;
+}
+{	.mmi
+	.mem.offset	24,0
+	st8.spill	[r31]=r6,16		// r6
+	.mem.offset	32,0
+	st8.spill	[r32]=r7,16		// r7
+	mov		r18=b3
+	;;
+}
+{	.mmi
+	st8		[r31]=r16,16		// b1
+	mov		r16=ar.unat
+	mov		r19=b4
+	;;
+}
+{	.mmi
+	st8		[r30]=r16		// unat (after)
+	st8		[r32]=r17,16		// b2
+	mov		r16=b5
+	;;
+}
+{	.mmi
+	st8		[r31]=r18,16		// b3
+	st8		[r32]=r19,16		// b4
+	mov		r17=ar.lc
+	;;
+}
+	st8		[r31]=r16,16		// b5
+	st8		[r32]=r17,16		// lc
+	;;
+	st8		[r31]=r0,24		// __spare
+	stf.spill	[r32]=f2,32
+	;;
+	stf.spill	[r31]=f3,32
+	stf.spill	[r32]=f4,32
+	;;
+	stf.spill	[r31]=f5,32
+	stf.spill	[r32]=f16,32
+	;;
+	stf.spill	[r31]=f17,32
+	stf.spill	[r32]=f18,32
+	;;
+	stf.spill	[r31]=f19,32
+	stf.spill	[r32]=f20,32
+	;;
+	stf.spill	[r31]=f21,32
+	stf.spill	[r32]=f22,32
+	;;
+	stf.spill	[r31]=f23,32
+	stf.spill	[r32]=f24,32
+	;;
+	stf.spill	[r31]=f25,32
+	stf.spill	[r32]=f26,32
+	;;
+	stf.spill	[r31]=f27,32
+	stf.spill	[r32]=f28,32
+	;;
+{	.mmi
+	stf.spill	[r31]=f29,32
+	stf.spill	[r32]=f30
+(p15)	add		r8=0,r0
+	;;
+}
+{	.mmb
+	stf.spill	[r31]=f31
+	mf
+(p15)	br.ret.sptk	rp
+	;;
+}
+{	.mib
+	mov		r32=r33
+	nop		0
+	br.sptk		restorectx
+	;;
+}
+END(swapctx)
+
+/*
+ * save_callee_saved(struct _callee_saved *)
+ */
+ENTRY(save_callee_saved, 1)
+{	.mii
+	nop		0
+	add		r14=8,r32
+	add		r15=16,r32
+	;;
+}
+{	.mmi
+	.mem.offset	8,0
+	st8.spill	[r14]=r4,16		// r4
+	.mem.offset	16,0
+	st8.spill	[r15]=r5,16		// r5
+	mov		r16=b1
+	;;
+}
+{	.mmi
+	.mem.offset	24,0
+	st8.spill	[r14]=r6,16		// r6
+	.mem.offset	32,0
+	st8.spill	[r15]=r7,16		// r7
+	mov		r17=b2
+	;;
+}
+{	.mmi
+	st8		[r14]=r16,16		// b1
+	mov		r18=ar.unat
+	mov		r19=b3
+	;;
+}
+{	.mmi
+	st8		[r32]=r18		// nat (after)
+	st8		[r15]=r17,16		// b2
+	mov		r16=b4
+	;;
+}
+{	.mmi
+	st8		[r14]=r19,16		// b3
+	st8		[r15]=r16,16		// b4
+	mov		r17=b5
+	;;
+}
+{	.mii
+	st8		[r14]=r17,16		// b5
+	mov		r16=ar.lc
+	nop		0
+	;;
+}
+{	.mmb
+	st8		[r15]=r16		// ar.lc
+	st8		[r14]=r0		// __spare
+	br.ret.sptk	rp
+	;;
+}
+END(save_callee_saved)
+
+/*
+ * restore_callee_saved(struct _callee_saved *)
+ */
+ENTRY(restore_callee_saved, 1)
+{	.mmi
+	ld8		r30=[r32],16		// nat (after)
+	;;
+	mov		ar.unat=r30
+	add		r31=-8,r32
+	;;
+}
+{	.mmi
+	ld8.fill	r4=[r31],16		// r4
+	ld8.fill	r5=[r32],16		// r5
+	nop		0
+	;;
+}
+{	.mmi
+	ld8.fill	r6=[r31],16		// r6
+	ld8.fill	r7=[r32],16		// r7
+	nop		0
+	;;
+}
+{	.mmi
+	ld8		r30=[r31],16		// b1
+	ld8		r29=[r32],16		// b2
+	nop		0
+	;;
+}
+{	.mmi
+	ld8		r28=[r31],16		// b3
+	ld8		r27=[r32],16		// b4
+	mov		b1=r30
+	;;
+}
+{	.mii
+	ld8		r26=[r31]		// b5
+	mov		b2=r29
+	mov		b3=r28
+	;;
+}
+{	.mii
+	ld8		r25=[r32]		// lc
+	mov		b4=r27
+	mov		b5=r26
+	;;
+}
+{	.mib
+	nop		0
+	mov		ar.lc=r25
+	br.ret.sptk	rp
+	;;
+}
+END(restore_callee_saved)
+
+/*
+ * save_callee_saved_fp(struct _callee_saved_fp *)
+ */
+ENTRY(save_callee_saved_fp, 1)
+	add		r31=16,r32
+	stf.spill	[r32]=f2,32
+	;;
+	stf.spill	[r31]=f3,32
+	stf.spill	[r32]=f4,32
+	;;
+	stf.spill	[r31]=f5,32
+	stf.spill	[r32]=f16,32
+	;;
+	stf.spill	[r31]=f17,32
+	stf.spill	[r32]=f18,32
+	;;
+	stf.spill	[r31]=f19,32
+	stf.spill	[r32]=f20,32
+	;;
+	stf.spill	[r31]=f21,32
+	stf.spill	[r32]=f22,32
+	;;
+	stf.spill	[r31]=f23,32
+	stf.spill	[r32]=f24,32
+	;;
+	stf.spill	[r31]=f25,32
+	stf.spill	[r32]=f26,32
+	;;
+	stf.spill	[r31]=f27,32
+	stf.spill	[r32]=f28,32
+	;;
+	stf.spill	[r31]=f29,32
+	stf.spill	[r32]=f30
+	;;
+	stf.spill	[r31]=f31
+	br.ret.sptk	rp
+	;;
+END(save_callee_saved_fp)
+
+/*
+ * restore_callee_saved_fp(struct _callee_saved_fp *)
+ */
+ENTRY(restore_callee_saved_fp, 1)
+	add		r31=16,r32
+	ldf.fill	f2=[r32],32
+	;;
+	ldf.fill	f3=[r31],32
+	ldf.fill	f4=[r32],32
+	;;
+	ldf.fill	f5=[r31],32
+	ldf.fill	f16=[r32],32
+	;;
+	ldf.fill	f17=[r31],32
+	ldf.fill	f18=[r32],32
+	;;
+	ldf.fill	f19=[r31],32
+	ldf.fill	f20=[r32],32
+	;;
+	ldf.fill	f21=[r31],32
+	ldf.fill	f22=[r32],32
+	;;
+	ldf.fill	f23=[r31],32
+	ldf.fill	f24=[r32],32
+	;;
+	ldf.fill	f25=[r31],32
+	ldf.fill	f26=[r32],32
+	;;
+	ldf.fill	f27=[r31],32
+	ldf.fill	f28=[r32],32
+	;;
+	ldf.fill	f29=[r31],32
+	ldf.fill	f30=[r32]
+	;;
+	ldf.fill	f31=[r31]
+	br.ret.sptk	rp
+	;;
+END(restore_callee_saved_fp)
+
+/*
+ * save_high_fp(struct _high_fp *)
+ */
+ENTRY(save_high_fp, 1)
+	rsm		psr.dfh
+	;;
+	srlz.d
+	add		r31=16,r32
+	stf.spill	[r32]=f32,32
+	;;
+	stf.spill	[r31]=f33,32
+	stf.spill	[r32]=f34,32
+	;;
+	stf.spill	[r31]=f35,32
+	stf.spill	[r32]=f36,32
+	;;
+	stf.spill	[r31]=f37,32
+	stf.spill	[r32]=f38,32
+	;;
+	stf.spill	[r31]=f39,32
+	stf.spill	[r32]=f40,32
+	;;
+	stf.spill	[r31]=f41,32
+	stf.spill	[r32]=f42,32
+	;;
+	stf.spill	[r31]=f43,32
+	stf.spill	[r32]=f44,32
+	;;
+	stf.spill	[r31]=f45,32
+	stf.spill	[r32]=f46,32
+	;;
+	stf.spill	[r31]=f47,32
+	stf.spill	[r32]=f48,32
+	;;
+	stf.spill	[r31]=f49,32
+	stf.spill	[r32]=f50,32
+	;;
+	stf.spill	[r31]=f51,32
+	stf.spill	[r32]=f52,32
+	;;
+	stf.spill	[r31]=f53,32
+	stf.spill	[r32]=f54,32
+	;;
+	stf.spill	[r31]=f55,32
+	stf.spill	[r32]=f56,32
+	;;
+	stf.spill	[r31]=f57,32
+	stf.spill	[r32]=f58,32
+	;;
+	stf.spill	[r31]=f59,32
+	stf.spill	[r32]=f60,32
+	;;
+	stf.spill	[r31]=f61,32
+	stf.spill	[r32]=f62,32
+	;;
+	stf.spill	[r31]=f63,32
+	stf.spill	[r32]=f64,32
+	;;
+	stf.spill	[r31]=f65,32
+	stf.spill	[r32]=f66,32
+	;;
+	stf.spill	[r31]=f67,32
+	stf.spill	[r32]=f68,32
+	;;
+	stf.spill	[r31]=f69,32
+	stf.spill	[r32]=f70,32
+	;;
+	stf.spill	[r31]=f71,32
+	stf.spill	[r32]=f72,32
+	;;
+	stf.spill	[r31]=f73,32
+	stf.spill	[r32]=f74,32
+	;;
+	stf.spill	[r31]=f75,32
+	stf.spill	[r32]=f76,32
+	;;
+	stf.spill	[r31]=f77,32
+	stf.spill	[r32]=f78,32
+	;;
+	stf.spill	[r31]=f79,32
+	stf.spill	[r32]=f80,32
+	;;
+	stf.spill	[r31]=f81,32
+	stf.spill	[r32]=f82,32
+	;;
+	stf.spill	[r31]=f83,32
+	stf.spill	[r32]=f84,32
+	;;
+	stf.spill	[r31]=f85,32
+	stf.spill	[r32]=f86,32
+	;;
+	stf.spill	[r31]=f87,32
+	stf.spill	[r32]=f88,32
+	;;
+	stf.spill	[r31]=f89,32
+	stf.spill	[r32]=f90,32
+	;;
+	stf.spill	[r31]=f91,32
+	stf.spill	[r32]=f92,32
+	;;
+	stf.spill	[r31]=f93,32
+	stf.spill	[r32]=f94,32
+	;;
+	stf.spill	[r31]=f95,32
+	stf.spill	[r32]=f96,32
+	;;
+	stf.spill	[r31]=f97,32
+	stf.spill	[r32]=f98,32
+	;;
+	stf.spill	[r31]=f99,32
+	stf.spill	[r32]=f100,32
+	;;
+	stf.spill	[r31]=f101,32
+	stf.spill	[r32]=f102,32
+	;;
+	stf.spill	[r31]=f103,32
+	stf.spill	[r32]=f104,32
+	;;
+	stf.spill	[r31]=f105,32
+	stf.spill	[r32]=f106,32
+	;;
+	stf.spill	[r31]=f107,32
+	stf.spill	[r32]=f108,32
+	;;
+	stf.spill	[r31]=f109,32
+	stf.spill	[r32]=f110,32
+	;;
+	stf.spill	[r31]=f111,32
+	stf.spill	[r32]=f112,32
+	;;
+	stf.spill	[r31]=f113,32
+	stf.spill	[r32]=f114,32
+	;;
+	stf.spill	[r31]=f115,32
+	stf.spill	[r32]=f116,32
+	;;
+	stf.spill	[r31]=f117,32
+	stf.spill	[r32]=f118,32
+	;;
+	stf.spill	[r31]=f119,32
+	stf.spill	[r32]=f120,32
+	;;
+	stf.spill	[r31]=f121,32
+	stf.spill	[r32]=f122,32
+	;;
+	stf.spill	[r31]=f123,32
+	stf.spill	[r32]=f124,32
+	;;
+	stf.spill	[r31]=f125,32
+	stf.spill	[r32]=f126
+	;;
+	stf.spill	[r31]=f127
+	ssm		psr.dfh
+	;;
+	srlz.d
+	br.ret.sptk	rp
+	;;
+END(save_high_fp)
+
+/*
+ * restore_high_fp(struct _high_fp *)
+ */
+ENTRY(restore_high_fp, 1)
+	rsm		psr.dfh
+	;;
+	srlz.d
+	add		r31=16,r32
+	ldf.fill	f32=[r32],32
+	;;
+	ldf.fill	f33=[r31],32
+	ldf.fill	f34=[r32],32	
+	;;
+	ldf.fill	f35=[r31],32
+	ldf.fill	f36=[r32],32
+	;;
+	ldf.fill	f37=[r31],32
+	ldf.fill	f38=[r32],32
+	;;
+	ldf.fill	f39=[r31],32
+	ldf.fill	f40=[r32],32
+	;;
+	ldf.fill	f41=[r31],32
+	ldf.fill	f42=[r32],32
+	;;
+	ldf.fill	f43=[r31],32
+	ldf.fill	f44=[r32],32
+	;;
+	ldf.fill	f45=[r31],32
+	ldf.fill	f46=[r32],32
+	;;
+	ldf.fill	f47=[r31],32
+	ldf.fill	f48=[r32],32
+	;;
+	ldf.fill	f49=[r31],32
+	ldf.fill	f50=[r32],32
+	;;
+	ldf.fill	f51=[r31],32
+	ldf.fill	f52=[r32],32
+	;;
+	ldf.fill	f53=[r31],32
+	ldf.fill	f54=[r32],32
+	;;
+	ldf.fill	f55=[r31],32
+	ldf.fill	f56=[r32],32
+	;;
+	ldf.fill	f57=[r31],32
+	ldf.fill	f58=[r32],32
+	;;
+	ldf.fill	f59=[r31],32
+	ldf.fill	f60=[r32],32
+	;;
+	ldf.fill	f61=[r31],32
+	ldf.fill	f62=[r32],32
+	;;
+	ldf.fill	f63=[r31],32
+	ldf.fill	f64=[r32],32
+	;;
+	ldf.fill	f65=[r31],32
+	ldf.fill	f66=[r32],32
+	;;
+	ldf.fill	f67=[r31],32
+	ldf.fill	f68=[r32],32
+	;;
+	ldf.fill	f69=[r31],32
+	ldf.fill	f70=[r32],32
+	;;
+	ldf.fill	f71=[r31],32
+	ldf.fill	f72=[r32],32
+	;;
+	ldf.fill	f73=[r31],32
+	ldf.fill	f74=[r32],32
+	;;
+	ldf.fill	f75=[r31],32
+	ldf.fill	f76=[r32],32
+	;;
+	ldf.fill	f77=[r31],32
+	ldf.fill	f78=[r32],32
+	;;
+	ldf.fill	f79=[r31],32
+	ldf.fill	f80=[r32],32
+	;;
+	ldf.fill	f81=[r31],32
+	ldf.fill	f82=[r32],32
+	;;
+	ldf.fill	f83=[r31],32
+	ldf.fill	f84=[r32],32
+	;;
+	ldf.fill	f85=[r31],32
+	ldf.fill	f86=[r32],32
+	;;
+	ldf.fill	f87=[r31],32
+	ldf.fill	f88=[r32],32
+	;;
+	ldf.fill	f89=[r31],32
+	ldf.fill	f90=[r32],32
+	;;
+	ldf.fill	f91=[r31],32
+	ldf.fill	f92=[r32],32
+	;;
+	ldf.fill	f93=[r31],32
+	ldf.fill	f94=[r32],32
+	;;
+	ldf.fill	f95=[r31],32
+	ldf.fill	f96=[r32],32
+	;;
+	ldf.fill	f97=[r31],32
+	ldf.fill	f98=[r32],32
+	;;
+	ldf.fill	f99=[r31],32
+	ldf.fill	f100=[r32],32
+	;;
+	ldf.fill	f101=[r31],32
+	ldf.fill	f102=[r32],32
+	;;
+	ldf.fill	f103=[r31],32
+	ldf.fill	f104=[r32],32
+	;;
+	ldf.fill	f105=[r31],32
+	ldf.fill	f106=[r32],32
+	;;
+	ldf.fill	f107=[r31],32
+	ldf.fill	f108=[r32],32
+	;;
+	ldf.fill	f109=[r31],32
+	ldf.fill	f110=[r32],32
+	;;
+	ldf.fill	f111=[r31],32
+	ldf.fill	f112=[r32],32
+	;;
+	ldf.fill	f113=[r31],32
+	ldf.fill	f114=[r32],32
+	;;
+	ldf.fill	f115=[r31],32
+	ldf.fill	f116=[r32],32
+	;;
+	ldf.fill	f117=[r31],32
+	ldf.fill	f118=[r32],32
+	;;
+	ldf.fill	f119=[r31],32
+	ldf.fill	f120=[r32],32
+	;;
+	ldf.fill	f121=[r31],32
+	ldf.fill	f122=[r32],32
+	;;
+	ldf.fill	f123=[r31],32
+	ldf.fill	f124=[r32],32
+	;;
+	ldf.fill	f125=[r31],32
+	ldf.fill	f126=[r32]
+	;;
+	ldf.fill	f127=[r31]
+	ssm		psr.dfh
+	;;
+	srlz.d
+	br.ret.sptk	rp
+	;;
+END(restore_high_fp)


Property changes on: trunk/sys/ia64/ia64/context.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/db_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/db_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/db_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,611 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003-2005 Marcel Moolenaar
+ * Copyright (c) 2000-2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_xtrace.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/db_machdep.c 268200 2014-07-02 23:47:43Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/cons.h>
+#include <sys/kdb.h>
+#include <sys/ktr.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/smp.h>
+#include <sys/stack.h>
+
+#include <vm/vm.h>
+
+#include <machine/db_machdep.h>
+#include <machine/frame.h>
+#include <machine/kdb.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/setjmp.h>
+#include <machine/unwind.h>
+#include <machine/vmparam.h>
+
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+
+#include <ia64/disasm/disasm.h>
+
+#define	TMPL_BITS	5
+#define	TMPL_MASK	((1 << TMPL_BITS) - 1)
+#define	SLOT_BITS	41
+#define	SLOT_COUNT	3
+#define	SLOT_MASK	((1ULL << SLOT_BITS) - 1ULL)
+#define	SLOT_SHIFT(i)	(TMPL_BITS+((i)<<3)+(i))
+
+typedef db_expr_t __db_f(db_expr_t, db_expr_t, db_expr_t, db_expr_t, db_expr_t,
+    db_expr_t, db_expr_t, db_expr_t);
+
+register uint64_t __db_gp __asm__("gp");
+
+static db_varfcn_t db_frame;
+static db_varfcn_t db_getip;
+static db_varfcn_t db_getrse;
+
+#define	DB_OFFSET(x)	(db_expr_t *)offsetof(struct trapframe, x)
+struct db_variable db_regs[] = {
+	{"ip",		NULL,				db_getip},
+	{"cr.ifs",	DB_OFFSET(tf_special.cfm),	db_frame},
+	{"cr.ifa",	DB_OFFSET(tf_special.ifa),	db_frame},
+	{"ar.bspstore",	DB_OFFSET(tf_special.bspstore),	db_frame},
+	{"ndirty",	DB_OFFSET(tf_special.ndirty),	db_frame},
+	{"rp",		DB_OFFSET(tf_special.rp),	db_frame},
+	{"ar.pfs",	DB_OFFSET(tf_special.pfs),	db_frame},
+	{"psr",		DB_OFFSET(tf_special.psr),	db_frame},
+	{"cr.isr",	DB_OFFSET(tf_special.isr),	db_frame},
+	{"pr",		DB_OFFSET(tf_special.pr),	db_frame},
+	{"ar.rsc",	DB_OFFSET(tf_special.rsc),	db_frame},
+	{"ar.rnat",	DB_OFFSET(tf_special.rnat),	db_frame},
+	{"ar.unat",	DB_OFFSET(tf_special.unat),	db_frame},
+	{"ar.fpsr",	DB_OFFSET(tf_special.fpsr),	db_frame},
+	{"gp",		DB_OFFSET(tf_special.gp),	db_frame},
+	{"sp",		DB_OFFSET(tf_special.sp),	db_frame},
+	{"tp",		DB_OFFSET(tf_special.tp),	db_frame},
+	{"b6",		DB_OFFSET(tf_scratch.br6),	db_frame},
+	{"b7",		DB_OFFSET(tf_scratch.br7),	db_frame},
+	{"r2",		DB_OFFSET(tf_scratch.gr2),	db_frame},
+	{"r3",		DB_OFFSET(tf_scratch.gr3),	db_frame},
+	{"r8",		DB_OFFSET(tf_scratch.gr8),	db_frame},
+	{"r9",		DB_OFFSET(tf_scratch.gr9),	db_frame},
+	{"r10",		DB_OFFSET(tf_scratch.gr10),	db_frame},
+	{"r11",		DB_OFFSET(tf_scratch.gr11),	db_frame},
+	{"r14",		DB_OFFSET(tf_scratch.gr14),	db_frame},
+	{"r15",		DB_OFFSET(tf_scratch.gr15),	db_frame},
+	{"r16",		DB_OFFSET(tf_scratch.gr16),	db_frame},
+	{"r17",		DB_OFFSET(tf_scratch.gr17),	db_frame},
+	{"r18",		DB_OFFSET(tf_scratch.gr18),	db_frame},
+	{"r19",		DB_OFFSET(tf_scratch.gr19),	db_frame},
+	{"r20",		DB_OFFSET(tf_scratch.gr20),	db_frame},
+	{"r21",		DB_OFFSET(tf_scratch.gr21),	db_frame},
+	{"r22",		DB_OFFSET(tf_scratch.gr22),	db_frame},
+	{"r23",		DB_OFFSET(tf_scratch.gr23),	db_frame},
+	{"r24",		DB_OFFSET(tf_scratch.gr24),	db_frame},
+	{"r25",		DB_OFFSET(tf_scratch.gr25),	db_frame},
+	{"r26",		DB_OFFSET(tf_scratch.gr26),	db_frame},
+	{"r27",		DB_OFFSET(tf_scratch.gr27),	db_frame},
+	{"r28",		DB_OFFSET(tf_scratch.gr28),	db_frame},
+	{"r29",		DB_OFFSET(tf_scratch.gr29),	db_frame},
+	{"r30",		DB_OFFSET(tf_scratch.gr30),	db_frame},
+	{"r31",		DB_OFFSET(tf_scratch.gr31),	db_frame},
+	{"r32",		(db_expr_t*)0,			db_getrse},
+	{"r33",		(db_expr_t*)1,			db_getrse},
+	{"r34",		(db_expr_t*)2,			db_getrse},
+	{"r35",		(db_expr_t*)3,			db_getrse},
+	{"r36",		(db_expr_t*)4,			db_getrse},
+	{"r37",		(db_expr_t*)5,			db_getrse},
+	{"r38",		(db_expr_t*)6,			db_getrse},
+	{"r39",		(db_expr_t*)7,			db_getrse},
+	{"r40",		(db_expr_t*)8,			db_getrse},
+	{"r41",		(db_expr_t*)9,			db_getrse},
+	{"r42",		(db_expr_t*)10,			db_getrse},
+	{"r43",		(db_expr_t*)11,			db_getrse},
+	{"r44",		(db_expr_t*)12,			db_getrse},
+	{"r45",		(db_expr_t*)13,			db_getrse},
+	{"r46",		(db_expr_t*)14,			db_getrse},
+	{"r47",		(db_expr_t*)15,			db_getrse},
+	{"r48",		(db_expr_t*)16,			db_getrse},
+	{"r49",		(db_expr_t*)17,			db_getrse},
+	{"r50",		(db_expr_t*)18,			db_getrse},
+	{"r51",		(db_expr_t*)19,			db_getrse},
+	{"r52",		(db_expr_t*)20,			db_getrse},
+	{"r53",		(db_expr_t*)21,			db_getrse},
+	{"r54",		(db_expr_t*)22,			db_getrse},
+	{"r55",		(db_expr_t*)23,			db_getrse},
+	{"r56",		(db_expr_t*)24,			db_getrse},
+	{"r57",		(db_expr_t*)25,			db_getrse},
+	{"r58",		(db_expr_t*)26,			db_getrse},
+	{"r59",		(db_expr_t*)27,			db_getrse},
+	{"r60",		(db_expr_t*)28,			db_getrse},
+	{"r61",		(db_expr_t*)29,			db_getrse},
+	{"r62",		(db_expr_t*)30,			db_getrse},
+	{"r63",		(db_expr_t*)31,			db_getrse},
+	{"r64",		(db_expr_t*)32,			db_getrse},
+	{"r65",		(db_expr_t*)33,			db_getrse},
+	{"r66",		(db_expr_t*)34,			db_getrse},
+	{"r67",		(db_expr_t*)35,			db_getrse},
+	{"r68",		(db_expr_t*)36,			db_getrse},
+	{"r69",		(db_expr_t*)37,			db_getrse},
+	{"r70",		(db_expr_t*)38,			db_getrse},
+	{"r71",		(db_expr_t*)39,			db_getrse},
+	{"r72",		(db_expr_t*)40,			db_getrse},
+	{"r73",		(db_expr_t*)41,			db_getrse},
+	{"r74",		(db_expr_t*)42,			db_getrse},
+	{"r75",		(db_expr_t*)43,			db_getrse},
+	{"r76",		(db_expr_t*)44,			db_getrse},
+	{"r77",		(db_expr_t*)45,			db_getrse},
+	{"r78",		(db_expr_t*)46,			db_getrse},
+	{"r79",		(db_expr_t*)47,			db_getrse},
+	{"r80",		(db_expr_t*)48,			db_getrse},
+	{"r81",		(db_expr_t*)49,			db_getrse},
+	{"r82",		(db_expr_t*)50,			db_getrse},
+	{"r83",		(db_expr_t*)51,			db_getrse},
+	{"r84",		(db_expr_t*)52,			db_getrse},
+	{"r85",		(db_expr_t*)53,			db_getrse},
+	{"r86",		(db_expr_t*)54,			db_getrse},
+	{"r87",		(db_expr_t*)55,			db_getrse},
+	{"r88",		(db_expr_t*)56,			db_getrse},
+	{"r89",		(db_expr_t*)57,			db_getrse},
+	{"r90",		(db_expr_t*)58,			db_getrse},
+	{"r91",		(db_expr_t*)59,			db_getrse},
+	{"r92",		(db_expr_t*)60,			db_getrse},
+	{"r93",		(db_expr_t*)61,			db_getrse},
+	{"r94",		(db_expr_t*)62,			db_getrse},
+	{"r95",		(db_expr_t*)63,			db_getrse},
+	{"r96",		(db_expr_t*)64,			db_getrse},
+	{"r97",		(db_expr_t*)65,			db_getrse},
+	{"r98",		(db_expr_t*)66,			db_getrse},
+	{"r99",		(db_expr_t*)67,			db_getrse},
+	{"r100",	(db_expr_t*)68,			db_getrse},
+	{"r101",	(db_expr_t*)69,			db_getrse},
+	{"r102",	(db_expr_t*)70,			db_getrse},
+	{"r103",	(db_expr_t*)71,			db_getrse},
+	{"r104",	(db_expr_t*)72,			db_getrse},
+	{"r105",	(db_expr_t*)73,			db_getrse},
+	{"r106",	(db_expr_t*)74,			db_getrse},
+	{"r107",	(db_expr_t*)75,			db_getrse},
+	{"r108",	(db_expr_t*)76,			db_getrse},
+	{"r109",	(db_expr_t*)77,			db_getrse},
+	{"r110",	(db_expr_t*)78,			db_getrse},
+	{"r111",	(db_expr_t*)79,			db_getrse},
+	{"r112",	(db_expr_t*)80,			db_getrse},
+	{"r113",	(db_expr_t*)81,			db_getrse},
+	{"r114",	(db_expr_t*)82,			db_getrse},
+	{"r115",	(db_expr_t*)83,			db_getrse},
+	{"r116",	(db_expr_t*)84,			db_getrse},
+	{"r117",	(db_expr_t*)85,			db_getrse},
+	{"r118",	(db_expr_t*)86,			db_getrse},
+	{"r119",	(db_expr_t*)87,			db_getrse},
+	{"r120",	(db_expr_t*)88,			db_getrse},
+	{"r121",	(db_expr_t*)89,			db_getrse},
+	{"r122",	(db_expr_t*)90,			db_getrse},
+	{"r123",	(db_expr_t*)91,			db_getrse},
+	{"r124",	(db_expr_t*)92,			db_getrse},
+	{"r125",	(db_expr_t*)93,			db_getrse},
+	{"r126",	(db_expr_t*)94,			db_getrse},
+	{"r127",	(db_expr_t*)95,			db_getrse},
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+static int
+db_backtrace(struct thread *td, struct pcb *pcb, int count)
+{
+	struct unw_regstate rs;
+	struct trapframe *tf;
+	const char *name;
+	db_expr_t offset;
+	uint64_t bsp, cfm, ip, pfs, reg, sp;
+	c_db_sym_t sym;
+	int args, error, i;
+
+	error = unw_create_from_pcb(&rs, pcb);
+	while (!error && count-- && !db_pager_quit) {
+		error = unw_get_cfm(&rs, &cfm);
+		if (!error)
+			error = unw_get_bsp(&rs, &bsp);
+		if (!error)
+			error = unw_get_ip(&rs, &ip);
+		if (!error)
+			error = unw_get_sp(&rs, &sp);
+		if (error)
+			break;
+
+		args = IA64_CFM_SOL(cfm);
+		if (args > 8)
+			args = 8;
+
+		error = unw_step(&rs);
+		if (!error) {
+			if (!unw_get_cfm(&rs, &pfs)) {
+				i = IA64_CFM_SOF(pfs) - IA64_CFM_SOL(pfs);
+				if (args > i)
+					args = i;
+			}
+		}
+
+		sym = db_search_symbol(ip, DB_STGY_ANY, &offset);
+		db_symbol_values(sym, &name, NULL);
+		db_printf("%s(", name);
+		if (bsp >= VM_MAXUSER_ADDRESS) {
+			for (i = 0; i < args; i++) {
+				if ((bsp & 0x1ff) == 0x1f8)
+					bsp += 8;
+				db_read_bytes(bsp, sizeof(reg), (void*)&reg);
+				if (i > 0)
+					db_printf(", ");
+				db_printf("0x%lx", reg);
+				bsp += 8;
+			}
+		} else
+			db_printf("...");
+		db_printf(") at ");
+
+		db_printsym(ip, DB_STGY_PROC);
+		db_printf("\n");
+
+		if (error != ERESTART)
+			continue;
+		if (sp < VM_MAXUSER_ADDRESS)
+			break;
+
+		tf = (struct trapframe *)(sp + 16);
+		if ((tf->tf_flags & FRAME_SYSCALL) != 0 ||
+		    tf->tf_special.iip < VM_MAXUSER_ADDRESS)
+			break;
+
+		/* XXX ask if we should unwind across the trapframe. */
+		db_printf("--- trapframe at %p\n", tf);
+		unw_delete(&rs);
+		error = unw_create_from_frame(&rs, tf);
+	}
+
+	unw_delete(&rs);
+	/*
+	 * EJUSTRETURN and ERESTART signal the end of a trace and
+	 * are not really errors.
+	 */
+	return ((error > 0) ? error : 0);
+}
+
+void
+db_bkpt_clear(db_addr_t addr, BKPT_INST_TYPE *storage)
+{
+	BKPT_INST_TYPE tmp;
+	db_addr_t loc;
+	int slot;
+
+	slot = addr & 0xfUL;
+	if (slot >= SLOT_COUNT)
+		return;
+	loc = (addr & ~0xfUL) + (slot << 2);
+
+	db_read_bytes(loc, sizeof(BKPT_INST_TYPE), (char *)&tmp);
+	tmp &= ~(SLOT_MASK << SLOT_SHIFT(slot));
+	tmp |= *storage << SLOT_SHIFT(slot);
+	db_write_bytes(loc, sizeof(BKPT_INST_TYPE), (char *)&tmp);
+}
+
+void
+db_bkpt_skip(void)
+{
+
+	if (kdb_frame == NULL)
+		return;
+
+	kdb_frame->tf_special.psr += IA64_PSR_RI_1;
+	if ((kdb_frame->tf_special.psr & IA64_PSR_RI) > IA64_PSR_RI_2) {
+		kdb_frame->tf_special.psr &= ~IA64_PSR_RI;
+		kdb_frame->tf_special.iip += 16;
+	}
+}
+
+void
+db_bkpt_write(db_addr_t addr, BKPT_INST_TYPE *storage)
+{
+	BKPT_INST_TYPE tmp;
+	db_addr_t loc;
+	int slot;
+
+	slot = addr & 0xfUL;
+	if (slot >= SLOT_COUNT)
+		return;
+	loc = (addr & ~0xfUL) + (slot << 2);
+
+	db_read_bytes(loc, sizeof(BKPT_INST_TYPE), (char *)&tmp);
+	*storage = (tmp >> SLOT_SHIFT(slot)) & SLOT_MASK;
+
+	tmp &= ~(SLOT_MASK << SLOT_SHIFT(slot));
+	tmp |= (0x84000 << 6) << SLOT_SHIFT(slot);
+	db_write_bytes(loc, sizeof(BKPT_INST_TYPE), (char *)&tmp);
+}
+
+db_addr_t
+db_disasm(db_addr_t loc, boolean_t altfmt)
+{
+	char buf[32];
+	struct asm_bundle bundle;
+	const struct asm_inst *i;
+	const char *tmpl;
+	int n, slot;
+
+	slot = loc & 0xf;
+	loc &= ~0xful;
+	db_read_bytes(loc, 16, buf);
+	if (asm_decode((uintptr_t)buf, &bundle)) {
+		i = bundle.b_inst + slot;
+		tmpl = bundle.b_templ + slot;
+		if (*tmpl == ';' || (slot == 2 && bundle.b_templ[1] == ';'))
+			tmpl++;
+		if (*tmpl == 'L' || i->i_op == ASM_OP_NONE) {
+			db_printf("\n");
+			goto out;
+		}
+
+		/* Unit + slot. */
+		db_printf("[%c%d] ", *tmpl, slot);
+
+		/* Predicate. */
+		if (i->i_oper[0].o_value != 0) {
+			asm_operand(i->i_oper+0, buf, loc);
+			db_printf("(%s) ", buf);
+		} else
+			db_printf("   ");
+
+		/* Mnemonic & completers. */
+		asm_mnemonic(i->i_op, buf);
+		db_printf(buf);
+		n = 0;
+		while (n < i->i_ncmpltrs) {
+			asm_completer(i->i_cmpltr + n, buf);
+			db_printf(buf);
+			n++;
+		}
+		db_printf(" ");
+
+		/* Operands. */
+		n = 1;
+		while (n < 7 && i->i_oper[n].o_type != ASM_OPER_NONE) {
+			if (n > 1) {
+				if (n == i->i_srcidx)
+					db_printf("=");
+				else
+					db_printf(",");
+			}
+			asm_operand(i->i_oper + n, buf, loc);
+			db_printf(buf);
+			n++;
+		}
+		if (tmpl[1] == ';')
+			db_printf(" ;;");
+	} else {
+		tmpl = NULL;
+		slot = 2;
+	}
+	db_printf("\n");
+
+out:
+	slot++;
+	if (slot == 1 && tmpl[1] == 'L')
+		slot++;
+	if (slot > 2)
+		slot = 16;
+	return (loc + slot);
+}
+
+int
+db_fncall_ia64(db_expr_t addr, db_expr_t *rv, int nargs, db_expr_t args[])
+{
+	struct ia64_fdesc fdesc;
+	__db_f *f;
+
+	f = (__db_f *)&fdesc;
+	fdesc.func = addr;
+	fdesc.gp = __db_gp;	/* XXX doesn't work for modules. */
+	*rv = (*f)(args[0], args[1], args[2], args[3], args[4], args[5],
+	    args[6], args[7]);
+	return (1);
+}
+
+static int
+db_frame(struct db_variable *vp, db_expr_t *valuep, int op)
+{
+	uint64_t *reg;
+
+	if (kdb_frame == NULL)
+		return (0);
+	reg = (uint64_t*)((uintptr_t)kdb_frame + (uintptr_t)vp->valuep);
+	if (op == DB_VAR_GET)
+		*valuep = *reg;
+	else
+		*reg = *valuep;
+	return (1);
+}
+
+static int
+db_getip(struct db_variable *vp, db_expr_t *valuep, int op)
+{
+	u_long iip, slot;
+
+	if (kdb_frame == NULL)
+		return (0);
+
+	if (op == DB_VAR_GET) {
+		iip = kdb_frame->tf_special.iip;
+		slot = (kdb_frame->tf_special.psr >> 41) & 3;
+		*valuep = iip + slot;
+	} else {
+		iip = *valuep & ~0xf;
+		slot = *valuep & 0xf;
+		if (slot > 2)
+			return (0);
+		kdb_frame->tf_special.iip = iip;
+		kdb_frame->tf_special.psr &= ~IA64_PSR_RI;
+		kdb_frame->tf_special.psr |= slot << 41;
+	}
+	return (1);
+}
+
+static int
+db_getrse(struct db_variable *vp, db_expr_t *valuep, int op)
+{
+	u_int64_t *reg;
+	uint64_t bsp;
+	int nats, regno, sof;
+
+	if (kdb_frame == NULL)
+		return (0);
+
+	regno = (int)(intptr_t)valuep;
+	bsp = kdb_frame->tf_special.bspstore + kdb_frame->tf_special.ndirty;
+	sof = (int)(kdb_frame->tf_special.cfm & 0x7f);
+
+	if (regno >= sof)
+		return (0);
+
+	nats = (sof - regno + 63 - ((int)(bsp >> 3) & 0x3f)) / 63;
+	reg = (void*)(bsp - ((sof - regno + nats) << 3));
+	if (op == DB_VAR_GET)
+		*valuep = *reg;
+	else
+		*reg = *valuep;
+	return (1);
+}
+
+int
+db_md_clr_watchpoint(db_expr_t addr, db_expr_t size)
+{
+
+	return (-1);
+}
+
+void
+db_md_list_watchpoints()
+{
+
+	return;
+}
+
+int
+db_md_set_watchpoint(db_expr_t addr, db_expr_t size)
+{
+
+	return (-1);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+int
+db_read_bytes(vm_offset_t addr, size_t size, char *data)
+{
+	jmp_buf jb;
+	void *prev_jb;
+	char *src;
+	int ret;
+
+	prev_jb = kdb_jmpbuf(jb);
+	ret = setjmp(jb);
+	if (ret == 0) {
+		src = (char *)addr;
+		while (size-- > 0)
+			*data++ = *src++;
+	}
+	(void)kdb_jmpbuf(prev_jb);
+	return (ret);
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+int
+db_write_bytes(vm_offset_t addr, size_t size, char *data)
+{
+	jmp_buf jb;
+	void *prev_jb;
+	size_t cnt;
+	char *dst;
+	int ret;
+
+	prev_jb = kdb_jmpbuf(jb);
+	ret = setjmp(jb);
+	if (ret == 0) {
+		dst = (char *)addr;
+		cnt = size;
+		while (cnt-- > 0)
+			*dst++ = *data++;
+		kdb_cpu_sync_icache((void *)addr, size);
+	}
+	(void)kdb_jmpbuf(prev_jb);
+	return (ret);
+}
+
+void
+db_show_mdpcpu(struct pcpu *pc)
+{
+	struct pcpu_md *md = &pc->pc_md;
+
+	db_printf("MD: vhpt       = %#lx\n", md->vhpt);
+	db_printf("MD: lid        = %#lx\n", md->lid);
+	db_printf("MD: clock      = %#lx\n", md->clock);
+	db_printf("MD: clock_mode = %u\n", md->clock_mode);
+	db_printf("MD: clock_load = %#lx\n", md->clock_load);
+	db_printf("MD: stats      = %p\n", &md->stats);
+	db_printf("MD: pmap       = %p\n", md->current_pmap);
+#ifdef XTRACE
+	db_printf("MD: xtrace_buffer = %p\n", md->xtrace_buffer);
+	db_printf("MD: xtrace_tail   = %#lx\n", md->xtrace_tail);
+#endif
+}
+
+void
+db_trace_self(void)
+{
+	struct pcb pcb;
+
+	savectx(&pcb);
+	db_backtrace(curthread, &pcb, -1);
+}
+
+int
+db_trace_thread(struct thread *td, int count)
+{
+	struct pcb *ctx;
+
+	ctx = kdb_thr_ctx(td);
+	return (db_backtrace(td, ctx, count));
+}


Property changes on: trunk/sys/ia64/ia64/db_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/dump_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/dump_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/dump_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,441 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/dump_machdep.c 270296 2014-08-21 19:51:07Z emaste $");
+
+#include "opt_watchdog.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/cons.h>
+#include <sys/efi.h>
+#include <sys/kernel.h>
+#include <sys/kerneldump.h>
+#ifdef SW_WATCHDOG
+#include <sys/watchdog.h>
+#endif
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/bootinfo.h>
+#include <machine/elf.h>
+#include <machine/md_var.h>
+
+CTASSERT(sizeof(struct kerneldumpheader) == 512);
+
+/*
+ * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
+ * is to protect us from metadata and to protect metadata from us.
+ */
+#define	SIZEOF_METADATA		(64*1024)
+
+#define	MD_ALIGN(x)	(((off_t)(x) + EFI_PAGE_MASK) & ~EFI_PAGE_MASK)
+#define	DEV_ALIGN(x)	(((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
+
+static int minidump = 0;
+TUNABLE_INT("debug.minidump", &minidump);
+SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RW, &minidump, 0,
+    "Enable mini crash dumps");
+
+static struct kerneldumpheader kdh;
+static off_t dumplo, fileofs;
+
+/* Handle buffered writes. */
+static char buffer[DEV_BSIZE];
+static size_t fragsz;
+
+static int
+buf_write(struct dumperinfo *di, char *ptr, size_t sz)
+{
+	size_t len;
+	int error;
+
+	while (sz) {
+		len = DEV_BSIZE - fragsz;
+		if (len > sz)
+			len = sz;
+		bcopy(ptr, buffer + fragsz, len);
+		fragsz += len;
+		ptr += len;
+		sz -= len;
+		if (fragsz == DEV_BSIZE) {
+			error = dump_write(di, buffer, 0, dumplo,
+			    DEV_BSIZE);
+			if (error)
+				return (error);
+			dumplo += DEV_BSIZE;
+			fragsz = 0;
+		}
+	}
+
+	return (0);
+}
+
+static int
+buf_flush(struct dumperinfo *di)
+{
+	int error;
+
+	if (fragsz == 0)
+		return (0);
+
+	error = dump_write(di, buffer, 0, dumplo, DEV_BSIZE);
+	dumplo += DEV_BSIZE;
+	fragsz = 0;
+	return (error);
+}
+
+/*
+ * Physical dump support
+ */
+
+typedef int phys_callback_t(struct efi_md*, int, void*);
+
+static int
+phys_cb_dumpdata(struct efi_md *mdp, int seqnr, void *arg)
+{
+	struct dumperinfo *di = (struct dumperinfo*)arg;
+	vm_offset_t pa;
+	uint64_t pgs;
+	size_t counter, sz;
+	int c, error, twiddle;
+
+	error = 0;	/* catch case in which mdp->md_pages is 0 */
+	counter = 0;	/* Update twiddle every 16MB */
+	twiddle = 0;
+	pgs = mdp->md_pages;
+	pa = IA64_PHYS_TO_RR7(mdp->md_phys);
+
+	printf("  chunk %d: %ld pages ", seqnr, (long)pgs);
+
+	while (pgs) {
+		sz = (pgs > (DFLTPHYS >> EFI_PAGE_SHIFT))
+		    ? DFLTPHYS : pgs << EFI_PAGE_SHIFT;
+		counter += sz;
+		if (counter >> 24) {
+			printf("%c\b", "|/-\\"[twiddle++ & 3]);
+			counter &= (1<<24) - 1;
+		}
+#ifdef SW_WATCHDOG
+		wdog_kern_pat(WD_LASTVAL);
+#endif
+		error = dump_write(di, (void*)pa, 0, dumplo, sz);
+		if (error)
+			break;
+		dumplo += sz;
+		pgs -= sz >> EFI_PAGE_SHIFT;
+		pa += sz;
+
+		/* Check for user abort. */
+		c = cncheckc();
+		if (c == 0x03)
+			return (ECANCELED);
+		if (c != -1)
+			printf("(CTRL-C to abort)  ");
+	}
+	printf("... %s\n", (error) ? "fail" : "ok");
+	return (error);
+}
+
+static int
+phys_cb_dumphdr(struct efi_md *mdp, int seqnr, void *arg)
+{
+	struct dumperinfo *di = (struct dumperinfo*)arg;
+	Elf64_Phdr phdr;
+	int error;
+
+	bzero(&phdr, sizeof(phdr));
+	phdr.p_type = PT_LOAD;
+	phdr.p_flags = PF_R;			/* XXX */
+	phdr.p_offset = fileofs;
+	phdr.p_vaddr = (uintptr_t)mdp->md_virt;	/* XXX probably bogus. */
+	phdr.p_paddr = mdp->md_phys;
+	phdr.p_filesz = mdp->md_pages << EFI_PAGE_SHIFT;
+	phdr.p_memsz = mdp->md_pages << EFI_PAGE_SHIFT;
+	phdr.p_align = EFI_PAGE_SIZE;
+
+	error = buf_write(di, (char*)&phdr, sizeof(phdr));
+	fileofs += phdr.p_filesz;
+	return (error);
+}
+
+static int
+phys_cb_size(struct efi_md *mdp, int seqnr, void *arg)
+{
+	uint64_t *sz = (uint64_t*)arg;
+
+	*sz += (uint64_t)mdp->md_pages << EFI_PAGE_SHIFT;
+	return (0);
+}
+
+static int
+phys_foreach(phys_callback_t cb, void *arg)
+{
+	struct efi_md *mdp;
+	int error, seqnr;
+
+	seqnr = 0;
+	mdp = efi_md_first();
+	while (mdp != NULL) {
+		if (mdp->md_type == EFI_MD_TYPE_FREE ||
+		    mdp->md_type == EFI_MD_TYPE_DATA ||
+		    mdp->md_type == EFI_MD_TYPE_CODE ||
+		    mdp->md_type == EFI_MD_TYPE_BS_DATA ||
+		    mdp->md_type == EFI_MD_TYPE_BS_CODE) {
+			error = (*cb)(mdp, seqnr++, arg);
+			if (error)
+				return (-error);
+		}
+		mdp = efi_md_next(mdp);
+	}
+	return (seqnr);
+}
+
+/*
+ * Virtual dump (aka minidump) support
+ */
+
+typedef int virt_callback_t(vm_offset_t, vm_size_t, int, void*);
+
+static int
+virt_cb_size(vm_offset_t va, vm_size_t sz, int seqnr, void *arg)
+{
+	uint64_t *dumpsize = (uint64_t *)arg;
+
+	*dumpsize += sz;
+	return (0);
+}
+
+static int
+virt_cb_dumphdr(vm_offset_t va, vm_size_t sz, int seqnr, void *arg)
+{
+	struct dumperinfo *di = (struct dumperinfo *)arg;
+	Elf64_Phdr phdr;
+	int error;
+ 
+	bzero(&phdr, sizeof(phdr));
+	phdr.p_type = PT_LOAD;
+	phdr.p_flags = PF_R;			/* XXX */
+	phdr.p_offset = fileofs;
+	phdr.p_vaddr = va;
+	phdr.p_paddr = ~0UL;
+	phdr.p_filesz = sz;
+	phdr.p_memsz = sz;
+	phdr.p_align = PAGE_SIZE;
+
+	error = buf_write(di, (char*)&phdr, sizeof(phdr));
+	fileofs += phdr.p_filesz;
+	return (error);
+}
+
+static int
+virt_cb_dumpdata(vm_offset_t va, vm_size_t sz, int seqnr, void *arg) 
+{
+	struct dumperinfo *di = (struct dumperinfo *)arg;
+	size_t counter, iosz;
+	int c, error, twiddle;
+ 
+	error = 0;	/* catch case in which pgs is 0 */
+	counter = 0;	/* Update twiddle every 16MB */
+	twiddle = 0;
+
+	printf("  chunk %d: %ld pages ", seqnr, atop(sz));
+
+	while (sz) {
+		iosz = (sz > DFLTPHYS) ? DFLTPHYS : sz;
+		counter += iosz;
+		if (counter >> 24) {
+			printf("%c\b", "|/-\\"[twiddle++ & 3]);
+			counter &= (1<<24) - 1;
+		}
+#ifdef SW_WATCHDOG
+		wdog_kern_pat(WD_LASTVAL);
+#endif
+		error = dump_write(di, (void*)va, 0, dumplo, iosz);
+		if (error)
+			break;
+		dumplo += iosz;
+		sz -= iosz;
+		va += iosz;
+
+		/* Check for user abort. */
+		c = cncheckc();
+		if (c == 0x03)
+			return (ECANCELED);
+		if (c != -1)
+			printf("(CTRL-C to abort)  ");
+	}
+	printf("... %s\n", (error) ? "fail" : "ok");
+	return (error);
+}
+
+static int
+virt_foreach(virt_callback_t cb, void *arg)
+{
+	vm_offset_t va;
+	vm_size_t sz;
+	int error, seqnr;
+
+	seqnr = 0;
+	while (1) {
+		switch (seqnr) {
+		case 0:
+			va = IA64_PBVM_BASE;
+			sz = round_page(bootinfo->bi_kernend) - va;
+			break;
+		default:
+			va = 0;
+			sz = 0;
+			break;
+		}
+		if (va == 0 && sz == 0)
+			break;
+		error = (*cb)(va, sz, seqnr, arg);
+		if (error)
+			return (-error);
+		seqnr++;
+	}
+	return (seqnr);
+}
+
+/*
+ * main entry point.
+ */
+
+void
+dumpsys(struct dumperinfo *di)
+{
+	Elf64_Ehdr ehdr;
+	uint64_t dumpsize;
+	off_t hdrgap;
+	size_t hdrsz;
+	int error, status;
+
+	bzero(&ehdr, sizeof(ehdr));
+	ehdr.e_ident[EI_MAG0] = ELFMAG0;
+	ehdr.e_ident[EI_MAG1] = ELFMAG1;
+	ehdr.e_ident[EI_MAG2] = ELFMAG2;
+	ehdr.e_ident[EI_MAG3] = ELFMAG3;
+	ehdr.e_ident[EI_CLASS] = ELFCLASS64;
+#if BYTE_ORDER == LITTLE_ENDIAN
+	ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
+#else
+	ehdr.e_ident[EI_DATA] = ELFDATA2MSB;
+#endif
+	ehdr.e_ident[EI_VERSION] = EV_CURRENT;
+	ehdr.e_ident[EI_OSABI] = ELFOSABI_STANDALONE;	/* XXX big picture? */
+	ehdr.e_type = ET_CORE;
+	ehdr.e_machine = EM_IA_64;
+	ehdr.e_entry = (minidump) ? (uintptr_t)bootinfo :
+	    ia64_tpa((uintptr_t)bootinfo);
+	ehdr.e_phoff = sizeof(ehdr);
+	ehdr.e_flags = (minidump) ? 0 : EF_IA_64_ABSOLUTE; /* XXX misuse? */
+	ehdr.e_ehsize = sizeof(ehdr);
+	ehdr.e_phentsize = sizeof(Elf64_Phdr);
+	ehdr.e_shentsize = sizeof(Elf64_Shdr);
+
+	/* Calculate dump size. */
+	dumpsize = 0L;
+	status = (minidump) ? virt_foreach(virt_cb_size, &dumpsize) :
+	    phys_foreach(phys_cb_size, &dumpsize);
+	if (status < 0) {
+		error = -status;
+		goto fail;
+	}
+	ehdr.e_phnum = status;
+	hdrsz = ehdr.e_phoff + ehdr.e_phnum * ehdr.e_phentsize;
+	fileofs = (minidump) ? round_page(hdrsz) : MD_ALIGN(hdrsz);
+	dumpsize += fileofs;
+	hdrgap = fileofs - DEV_ALIGN(hdrsz);
+
+	/* Determine dump offset on device. */
+	if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
+		error = ENOSPC;
+		goto fail;
+	}
+	dumplo = di->mediaoffset + di->mediasize - dumpsize;
+	dumplo -= sizeof(kdh) * 2;
+
+	mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_IA64_VERSION, dumpsize, di->blocksize);
+
+	printf("Dumping %llu MB (%d chunks)\n", (long long)dumpsize >> 20,
+	    ehdr.e_phnum);
+
+	/* Dump leader */
+	error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
+	if (error)
+		goto fail;
+	dumplo += sizeof(kdh);
+
+	/* Dump ELF header */
+	error = buf_write(di, (char*)&ehdr, sizeof(ehdr));
+	if (error)
+		goto fail;
+
+	/* Dump program headers */
+	status = (minidump) ? virt_foreach(virt_cb_dumphdr, di) :
+	    phys_foreach(phys_cb_dumphdr, di);
+	if (status < 0) {
+		error = -status;
+		goto fail;
+	}
+	buf_flush(di);
+
+	/*
+	 * All headers are written using blocked I/O, so we know the
+	 * current offset is (still) block aligned. Skip the alignment
+	 * in the file to have the segment contents aligned at page
+	 * boundary. For physical dumps, it's the EFI page size (= 4K).
+	 * For minidumps it's the kernel's page size (= 8K).
+	 */
+	dumplo += hdrgap;
+
+	/* Dump memory chunks (updates dumplo) */
+	status = (minidump) ? virt_foreach(virt_cb_dumpdata, di) :
+	    phys_foreach(phys_cb_dumpdata, di);
+	if (status < 0) {
+		error = -status;
+		goto fail;
+	}
+
+	/* Dump trailer */
+	error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
+	if (error)
+		goto fail;
+
+	/* Signal completion, signoff and exit stage left. */
+	dump_write(di, NULL, 0, 0, 0);
+	printf("\nDump complete\n");
+	return;
+
+ fail:
+	if (error == ECANCELED)
+		printf("\nDump aborted\n");
+	else
+		printf("\n** DUMP FAILED (ERROR %d) **\n", error);
+}


Property changes on: trunk/sys/ia64/ia64/dump_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/efi.c
===================================================================
--- trunk/sys/ia64/ia64/efi.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/efi.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,271 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/efi.c 270296 2014-08-21 19:51:07Z emaste $");
+
+#include <sys/param.h>
+#include <sys/efi.h>
+#include <sys/systm.h>
+#include <machine/bootinfo.h>
+#include <machine/md_var.h>
+#include <machine/sal.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+static struct efi_systbl *efi_systbl;
+static struct efi_cfgtbl *efi_cfgtbl;
+static struct efi_rt *efi_runtime;
+
+static int efi_status2err[25] = {
+	0,		/* EFI_SUCCESS */
+	ENOEXEC,	/* EFI_LOAD_ERROR */
+	EINVAL,		/* EFI_INVALID_PARAMETER */
+	ENOSYS,		/* EFI_UNSUPPORTED */
+	EMSGSIZE, 	/* EFI_BAD_BUFFER_SIZE */
+	EOVERFLOW,	/* EFI_BUFFER_TOO_SMALL */
+	EBUSY,		/* EFI_NOT_READY */
+	EIO,		/* EFI_DEVICE_ERROR */
+	EROFS,		/* EFI_WRITE_PROTECTED */
+	EAGAIN,		/* EFI_OUT_OF_RESOURCES */
+	EIO,		/* EFI_VOLUME_CORRUPTED */
+	ENOSPC,		/* EFI_VOLUME_FULL */
+	ENXIO,		/* EFI_NO_MEDIA */
+	ESTALE,		/* EFI_MEDIA_CHANGED */
+	ENOENT,		/* EFI_NOT_FOUND */
+	EACCES,		/* EFI_ACCESS_DENIED */
+	ETIMEDOUT,	/* EFI_NO_RESPONSE */
+	EADDRNOTAVAIL,	/* EFI_NO_MAPPING */
+	ETIMEDOUT,	/* EFI_TIMEOUT */
+	EDOOFUS,	/* EFI_NOT_STARTED */
+	EALREADY,	/* EFI_ALREADY_STARTED */
+	ECANCELED,	/* EFI_ABORTED */
+	EPROTO,		/* EFI_ICMP_ERROR */
+	EPROTO,		/* EFI_TFTP_ERROR */
+	EPROTO		/* EFI_PROTOCOL_ERROR */
+};
+
+static int
+efi_status_to_errno(efi_status status)
+{
+	u_long code;
+	int error;
+
+	code = status & 0x3ffffffffffffffful;
+	error = (code < 25) ? efi_status2err[code] : EDOOFUS;
+	return (error);
+}
+
+void
+efi_boot_finish(void)
+{
+}
+
+/*
+ * Collect the entry points for PAL and SAL. Be extra careful about NULL
+ * pointer values. We're running pre-console, so it's better to return
+ * error values than to cause panics, machine checks and other traps and
+ * faults. Keep this minimal...
+ */
+int
+efi_boot_minimal(uint64_t systbl)
+{
+	ia64_efi_f setvirt;
+	struct efi_md *md;
+	efi_status status;
+
+	if (systbl == 0)
+		return (EINVAL);
+	efi_systbl = (struct efi_systbl *)IA64_PHYS_TO_RR7(systbl);
+	if (efi_systbl->st_hdr.th_sig != EFI_SYSTBL_SIG) {
+		efi_systbl = NULL;
+		return (EFAULT);
+	}
+	efi_cfgtbl = (efi_systbl->st_cfgtbl == 0) ? NULL :
+	    (struct efi_cfgtbl *)IA64_PHYS_TO_RR7(efi_systbl->st_cfgtbl);
+	if (efi_cfgtbl == NULL)
+		return (ENOENT);
+	efi_runtime = (efi_systbl->st_rt == 0) ? NULL :
+	    (struct efi_rt *)IA64_PHYS_TO_RR7(efi_systbl->st_rt);
+	if (efi_runtime == NULL)
+		return (ENOENT);
+
+	/*
+	 * Relocate runtime memory segments for firmware.
+	 */
+	md = efi_md_first();
+	while (md != NULL) {
+		if (md->md_attr & EFI_MD_ATTR_RT) {
+			md->md_virt = (md->md_attr & EFI_MD_ATTR_WB) ?
+			    (void *)IA64_PHYS_TO_RR7(md->md_phys) :
+			    (void *)IA64_PHYS_TO_RR6(md->md_phys);
+		}
+		md = efi_md_next(md);
+	}
+	setvirt = (void *)IA64_PHYS_TO_RR7((u_long)efi_runtime->rt_setvirtual);
+	status = ia64_efi_physical(setvirt, bootinfo->bi_memmap_size,
+	    bootinfo->bi_memdesc_size, bootinfo->bi_memdesc_version,
+	    ia64_tpa(bootinfo->bi_memmap));
+	return ((status < 0) ? EFAULT : 0);
+}
+
+void *
+efi_get_table(struct uuid *uuid)
+{
+	struct efi_cfgtbl *ct;
+	u_long count;
+
+	if (efi_cfgtbl == NULL)
+		return (NULL);
+	count = efi_systbl->st_entries;
+	ct = efi_cfgtbl;
+	while (count--) {
+		if (!bcmp(&ct->ct_uuid, uuid, sizeof(*uuid)))
+			return ((void *)IA64_PHYS_TO_RR7(ct->ct_data));
+		ct++;
+	}
+	return (NULL);
+}
+
+void
+efi_get_time(struct efi_tm *tm)
+{
+
+	efi_runtime->rt_gettime(tm, NULL);
+}
+
+struct efi_md *
+efi_md_first(void)
+{
+	struct efi_md *md;
+
+	if (bootinfo->bi_memmap == 0)
+		return (NULL);
+	md = (struct efi_md *)bootinfo->bi_memmap;
+	return (md);
+}
+
+struct efi_md *
+efi_md_last(void)
+{
+	struct efi_md *md;
+
+	if (bootinfo->bi_memmap == 0)
+		return (NULL);
+	md = (struct efi_md *)(bootinfo->bi_memmap + bootinfo->bi_memmap_size -
+	    bootinfo->bi_memdesc_size);
+	return (md);
+}
+
+struct efi_md *
+efi_md_next(struct efi_md *md)
+{
+	struct efi_md *lim;
+
+	lim = efi_md_last();
+	md = (struct efi_md *)((uintptr_t)md + bootinfo->bi_memdesc_size);
+	return ((md > lim) ? NULL : md);
+}
+
+struct efi_md *
+efi_md_prev(struct efi_md *md)
+{
+	struct efi_md *lim;
+
+	lim = efi_md_first();
+	md = (struct efi_md *)((uintptr_t)md - bootinfo->bi_memdesc_size);
+	return ((md < lim) ? NULL : md);
+}
+
+struct efi_md *
+efi_md_find(vm_paddr_t pa)
+{
+	static struct efi_md *last = NULL;
+	struct efi_md *md, *p0, *p1;
+
+	md = (last != NULL) ? last : efi_md_first();
+	p1 = p0 = NULL;
+	while (md != NULL && md != p1) {
+		if (pa >= md->md_phys &&
+		    pa < md->md_phys + md->md_pages * EFI_PAGE_SIZE) {
+			last = md;
+			return (md);
+		}
+
+		p1 = p0;
+		p0 = md;
+		md = (pa < md->md_phys) ? efi_md_prev(md) : efi_md_next(md);
+	}
+
+	return (NULL);
+}
+
+void
+efi_reset_system(void)
+{
+
+	if (efi_runtime != NULL)
+		efi_runtime->rt_reset(EFI_RESET_WARM, 0, 0, NULL);
+	panic("%s: unable to reset the machine", __func__);
+}
+
+int
+efi_set_time(struct efi_tm *tm)
+{
+
+	return (efi_status_to_errno(efi_runtime->rt_settime(tm)));
+}
+
+int
+efi_var_get(efi_char *name, struct uuid *vendor, uint32_t *attrib,
+    size_t *datasize, void *data)
+{
+	efi_status status;
+
+	status = efi_runtime->rt_getvar(name, vendor, attrib, datasize, data);
+	return (efi_status_to_errno(status));
+}
+
+int
+efi_var_nextname(size_t *namesize, efi_char *name, struct uuid *vendor)
+{
+	efi_status status;
+
+	status = efi_runtime->rt_scanvar(namesize, name, vendor);
+	return (efi_status_to_errno(status));
+}
+ 
+int
+efi_var_set(efi_char *name, struct uuid *vendor, uint32_t attrib,
+    size_t datasize, void *data)
+{
+	efi_status status;
+ 
+	status = efi_runtime->rt_setvar(name, vendor, attrib, datasize, data);
+	return (efi_status_to_errno(status));
+}


Property changes on: trunk/sys/ia64/ia64/efi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/elf_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/elf_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/elf_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,329 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/elf_machdep.c 288287 2015-09-27 01:33:43Z kib $
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/namei.h>
+#include <sys/fcntl.h>
+#include <sys/vnode.h>
+#include <sys/linker.h>
+#include <sys/sysent.h>
+#include <sys/imgact_elf.h>
+#include <sys/syscall.h>
+#include <sys/signalvar.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+
+#include <machine/elf.h>
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/unwind.h>
+
+Elf_Addr link_elf_get_gp(linker_file_t);
+
+extern Elf_Addr fptr_storage[];
+
+struct sysentvec elf64_freebsd_sysvec = {
+	.sv_size	= SYS_MAXSYSCALL,
+	.sv_table	= sysent,
+	.sv_mask	= 0,
+	.sv_sigsize	= 0,
+	.sv_sigtbl	= NULL,
+	.sv_errsize	= 0,
+	.sv_errtbl	= NULL,
+	.sv_transtrap	= NULL,
+	.sv_fixup	= __elfN(freebsd_fixup),
+	.sv_sendsig	= sendsig,
+	.sv_sigcode	= NULL,
+	.sv_szsigcode	= NULL,
+	.sv_prepsyscall	= NULL,
+	.sv_name	= "FreeBSD ELF64",
+	.sv_coredump	= __elfN(coredump),
+	.sv_imgact_try	= NULL,
+	.sv_minsigstksz	= MINSIGSTKSZ,
+	.sv_pagesize	= PAGE_SIZE,
+	.sv_minuser	= VM_MIN_ADDRESS,
+	.sv_maxuser	= VM_MAXUSER_ADDRESS,
+	.sv_usrstack	= USRSTACK,
+	.sv_psstrings	= PS_STRINGS,
+	.sv_stackprot	= VM_PROT_READ|VM_PROT_WRITE,
+	.sv_copyout_strings = exec_copyout_strings,
+	.sv_setregs	= exec_setregs,
+	.sv_fixlimit	= NULL,
+	.sv_maxssiz	= NULL,
+	.sv_flags	= SV_ABI_FREEBSD | SV_LP64,
+	.sv_set_syscall_retval = cpu_set_syscall_retval,
+	.sv_fetch_syscall_args = cpu_fetch_syscall_args,
+	.sv_syscallnames = syscallnames,
+	.sv_schedtail	= NULL,
+};
+
+static Elf64_Brandinfo freebsd_brand_info = {
+	.brand		= ELFOSABI_FREEBSD,
+	.machine	= EM_IA_64,
+	.compat_3_brand	= "FreeBSD",
+	.emul_path	= NULL,
+	.interp_path	= "/libexec/ld-elf.so.1",
+	.sysvec		= &elf64_freebsd_sysvec,
+	.interp_newpath	= NULL,
+	.brand_note	= &elf64_freebsd_brandnote,
+	.flags		= BI_CAN_EXEC_DYN | BI_BRAND_NOTE
+};
+SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST,
+    (sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_info);
+
+static Elf64_Brandinfo freebsd_brand_oinfo = {
+	.brand		= ELFOSABI_FREEBSD,
+	.machine	= EM_IA_64,
+	.compat_3_brand	= "FreeBSD",
+	.emul_path	= NULL,
+	.interp_path	= "/usr/libexec/ld-elf.so.1",
+	.sysvec		= &elf64_freebsd_sysvec,
+	.interp_newpath	= NULL,
+	.brand_note	= &elf64_freebsd_brandnote,
+	.flags		= BI_CAN_EXEC_DYN | BI_BRAND_NOTE
+};
+SYSINIT(oelf64, SI_SUB_EXEC, SI_ORDER_ANY,
+    (sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_oinfo);
+
+
+void
+elf64_dump_thread(struct thread *td, void *dst, size_t *off __unused)
+{
+
+	/* Flush the dirty registers onto the backingstore. */
+	if (dst == NULL)
+		ia64_flush_dirty(td, &td->td_frame->tf_special);
+}
+
+
+static int
+lookup_fdesc(linker_file_t lf, Elf_Size symidx, elf_lookup_fn lookup,
+    Elf_Addr *addr1)
+{
+	linker_file_t top;
+	Elf_Addr addr;
+	const char *symname;
+	int i, error;
+	static int eot = 0;
+
+	error = lookup(lf, symidx, 0, &addr);
+	if (error != 0) {
+		top = lf;
+		symname = elf_get_symname(top, symidx);
+		for (i = 0; i < top->ndeps; i++) {
+			lf = top->deps[i];
+			addr = (Elf_Addr)linker_file_lookup_symbol(lf,
+			    symname, 0);
+			if (addr != 0)
+				break;
+		}
+		if (addr == 0)
+			return (EINVAL);
+	}
+
+	if (eot)
+		return (EINVAL);
+
+	/*
+	 * Lookup and/or construct OPD
+	 */
+	for (i = 0; i < 8192; i += 2) {
+		if (fptr_storage[i] == addr) {
+			*addr1 = (Elf_Addr)(fptr_storage + i);
+			return (0);
+		}
+
+		if (fptr_storage[i] == 0) {
+			fptr_storage[i] = addr;
+			fptr_storage[i+1] = link_elf_get_gp(lf);
+			*addr1 = (Elf_Addr)(fptr_storage + i);
+			return (0);
+		}
+	}
+
+	printf("%s: fptr table full\n", __func__);
+	eot = 1;
+
+	return (EINVAL);
+}
+
+/* Process one elf relocation with addend. */
+static int
+elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
+    int type, int local, elf_lookup_fn lookup)
+{
+	Elf_Addr *where;
+	Elf_Addr addend, addr;
+	Elf_Size rtype, symidx;
+	const Elf_Rel *rel;
+	const Elf_Rela *rela;
+	int error;
+
+	switch (type) {
+	case ELF_RELOC_REL:
+		rel = (const Elf_Rel *)data;
+		where = (Elf_Addr *)(relocbase + rel->r_offset);
+		rtype = ELF_R_TYPE(rel->r_info);
+		symidx = ELF_R_SYM(rel->r_info);
+		switch (rtype) {
+		case R_IA_64_DIR64LSB:
+		case R_IA_64_FPTR64LSB:
+		case R_IA_64_REL64LSB:
+			addend = *where;
+			break;
+		default:
+			addend = 0;
+			break;
+		}
+		break;
+	case ELF_RELOC_RELA:
+		rela = (const Elf_Rela *)data;
+		where = (Elf_Addr *)(relocbase + rela->r_offset);
+		rtype = ELF_R_TYPE(rela->r_info);
+		symidx = ELF_R_SYM(rela->r_info);
+		addend = rela->r_addend;
+		break;
+	default:
+		panic("%s: invalid ELF relocation (0x%x)\n", __func__, type);
+	}
+
+	if (local) {
+		if (rtype == R_IA_64_REL64LSB)
+			*where = elf_relocaddr(lf, relocbase + addend);
+		return (0);
+	}
+
+	switch (rtype) {
+	case R_IA_64_NONE:
+		break;
+	case R_IA_64_DIR64LSB:	/* word64 LSB	S + A */
+		error = lookup(lf, symidx, 1, &addr);
+		if (error != 0)
+			return (-1);
+		*where = addr + addend;
+		break;
+	case R_IA_64_FPTR64LSB:	/* word64 LSB	@fptr(S + A) */
+		if (addend != 0) {
+			printf("%s: addend ignored for OPD relocation\n",
+			    __func__);
+		}
+		error = lookup_fdesc(lf, symidx, lookup, &addr);
+		if (error != 0)
+			return (-1);
+		*where = addr;
+		break;
+	case R_IA_64_REL64LSB:	/* word64 LSB	BD + A */
+		break;
+	case R_IA_64_IPLTLSB:
+		error = lookup_fdesc(lf, symidx, lookup, &addr);
+		if (error != 0)
+			return (-1);
+		where[0] = *((Elf_Addr*)addr) + addend;
+		where[1] = *((Elf_Addr*)addr + 1);
+		break;
+	default:
+		printf("%s: unknown relocation (0x%x)\n", __func__,
+		    (int)rtype);
+		return -1;
+	}
+
+	return (0);
+}
+
+int
+elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
+    elf_lookup_fn lookup)
+{
+
+	return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
+}
+
+int
+elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
+    int type, elf_lookup_fn lookup)
+{
+
+	return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
+}
+
+int
+elf_cpu_load_file(linker_file_t lf)
+{
+	Elf_Ehdr *hdr;
+	Elf_Phdr *ph, *phlim;
+	Elf_Addr reloc, vaddr;
+
+	hdr = (Elf_Ehdr *)(lf->address);
+	if (!IS_ELF(*hdr)) {
+		printf("Missing or corrupted ELF header at %p\n", hdr);
+		return (EFTYPE);
+	}
+
+	/*
+	 * Iterate over the segments and register the unwind table if
+	 * we come across it.
+	 */
+	ph = (Elf_Phdr *)(lf->address + hdr->e_phoff);
+	phlim = ph + hdr->e_phnum;
+	reloc = ~0ULL;
+	while (ph < phlim) {
+		if (ph->p_type == PT_LOAD && reloc == ~0ULL)
+			reloc = (Elf_Addr)lf->address - ph->p_vaddr;
+
+		if (ph->p_type == PT_IA_64_UNWIND) {
+			vaddr = ph->p_vaddr + reloc;
+			unw_table_add((vm_offset_t)lf->address, vaddr,
+			    vaddr + ph->p_memsz);
+		}
+		++ph;
+	}
+
+	/*
+	 * Make the I-cache coherent, but don't worry obout the kernel
+	 * itself because the loader needs to do that.
+	 */
+	if (lf->id != 1)
+		ia64_sync_icache((uintptr_t)lf->address, lf->size);
+
+	return (0);
+}
+
+int
+elf_cpu_unload_file(linker_file_t lf)
+{
+
+	unw_table_remove((vm_offset_t)lf->address);
+	return (0);
+}


Property changes on: trunk/sys/ia64/ia64/elf_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/emulate.c
===================================================================
--- trunk/sys/ia64/ia64/emulate.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/emulate.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,90 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2006 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/emulate.c 160040 2006-06-29 19:59:18Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+
+#include <machine/frame.h>
+#include <machine/md_var.h>
+
+#include <ia64/disasm/disasm.h>
+
+int
+ia64_emulate(struct trapframe *tf, struct thread *td)
+{
+	struct asm_bundle bundle;
+	struct asm_inst *i;
+	int slot;
+
+	if (!asm_decode(tf->tf_special.iip, &bundle))
+		return (SIGILL);
+
+	slot = ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_0) ? 0 :
+	    ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_1) ? 1 : 2;
+	if (slot == 1 && bundle.b_templ[slot] == 'L')
+		slot++;
+
+	i = bundle.b_inst + slot;
+	switch (i->i_op) {
+	case ASM_OP_BRL:
+		/*
+		 * We get the fault even if the predicate is false, so we
+		 * need to check the predicate first and simply advance to
+		 * the next bundle in that case.
+		 */
+		if (!(tf->tf_special.pr & (1UL << i->i_oper[0].o_value))) {
+			tf->tf_special.psr &= ~IA64_PSR_RI;
+			tf->tf_special.iip += 16;
+			return (0);
+		}
+		/*
+		 * The brl.cond is the simplest form. We only have to set
+		 * the IP to the address in the instruction and return.
+		 */
+		if (i->i_cmpltr[0].c_type == ASM_CT_COND) {
+			tf->tf_special.psr &= ~IA64_PSR_RI;
+			tf->tf_special.iip += i->i_oper[1].o_value;
+			return (0);
+		}
+		/* Sanity check... */
+		if (i->i_cmpltr[0].c_type != ASM_CT_CALL)
+			break;
+		/*
+		 * The brl.call is more difficult as we need to set-up the
+		 * call properly.
+		 */
+		break;
+	default:
+		break;
+	}
+
+	return (SIGILL);
+}


Property changes on: trunk/sys/ia64/ia64/emulate.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/exception.S
===================================================================
--- trunk/sys/ia64/ia64/exception.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/exception.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,1759 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003,2004 Marcel Moolenaar
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/exception.S 268200 2014-07-02 23:47:43Z marcel $");
+
+#include "opt_xtrace.h"
+
+#include <machine/pte.h>
+#include <assym.s>
+
+/*
+ * Nested TLB restart tokens. These are used by the
+ * nested TLB handler for jumping back to the code
+ * where the nested TLB was caused.
+ */
+#define	NTLBRT_SAVE	0x12c12c
+#define	NTLBRT_RESTORE  0x12c12d
+
+/*
+ * ar.k7 = kernel memory stack
+ * ar.k6 = kernel register stack
+ * ar.k5 = EPC gateway page
+ * ar.k4 = PCPU data
+ */
+
+	.section .ivt.data, "aw"
+
+	.align	8
+	.global ia64_kptdir
+	.size	ia64_kptdir, 8
+ia64_kptdir:	data8	0
+
+#ifdef XTRACE
+
+	.align	8
+	.global	ia64_xtrace_mask
+	.size	ia64_xtrace_mask, 8
+ia64_xtrace_mask:	data8	0
+
+	.align	4
+	.global	ia64_xtrace_enabled
+	.size	ia64_xtrace_enabled, 4
+ia64_xtrace_enabled:	data4	0
+
+#define	XTRACE_HOOK(offset)			\
+{	.mii ;					\
+	nop		0 ;			\
+	mov		r31 = b7 ;		\
+	mov		r28 = pr ;		\
+} ;						\
+{	.mib ;					\
+	nop		0 ;			\
+	mov		r25 = ip ;		\
+	br.sptk		ia64_xtrace_write ;;	\
+} ;						\
+{	.mii ;					\
+	nop		0 ;			\
+	mov		b7 = r31 ;		\
+	mov		pr = r28, 0x1ffff ;;	\
+}
+
+	.section .ivt.text, "ax"
+
+// We can only use r25, r26 & r27
+ENTRY_NOPROFILE(ia64_xtrace_write, 0)
+{	.mlx
+	add	r25 = 16, r25
+	movl	r26 = ia64_xtrace_enabled
+	;;
+}
+{	.mmi
+	mov	r27 = ar.k3
+	ld4	r26 = [r26]
+	mov	b7 = r25
+	;;
+}
+{	.mib
+	add	r25 = -32, r25
+	cmp.eq	p15,p0 = r0, r26
+(p15)	br.dptk.few	b7
+	;;
+}
+{	.mib
+	nop	0
+	cmp.eq	p15,p0 = r0, r27
+(p15)	br.dptk.few	b7
+	;;
+}
+{	.mmi
+	st8	[r27] = r25, 8		// 0x00 IVT
+	mov	r26 = ar.itc
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r26, 8		// 0x08 ITC
+	mov	r25 = cr.iip
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r25, 8		// 0x10 IIP
+	mov	r26 = cr.ifa
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r26, 8		// 0x18 IFA
+	mov	r25 = cr.isr
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r25, 8		// 0x20 ISR
+	mov	r26 = cr.ipsr
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r26, 8		// 0x28 IPSR
+	mov	r25 = cr.itir
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r25, 8		// 0x30 ITIR
+	mov	r26 = cr.iipa
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r26, 8		// 0x38 IIPA
+	mov	r25 = cr.ifs
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r25, 8		// 0x40 IFS
+	mov	r26 = cr.iim
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r26, 8		// 0x48 IIM
+	mov	r25 = cr.iha
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r25, 8		// 0x50 IHA
+	mov	r26 = ar.unat
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r26, 8		// 0x58 UNAT
+	mov	r25 = ar.rsc
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r25, 8		// 0x60 RSC
+	mov	r26 = ar.bsp
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r26, 8		// 0x68 BSP
+	mov	r25 = r13
+	nop	0
+	;;
+}
+{	.mmi
+	st8	[r27] = r25, 8		// 0x70 PCPU/TLS
+	mov	r26 = r12
+	nop	0
+	;;
+}
+{	.mlx
+	st8	[r27] = r26, 8		// 0x78 SP
+	movl	r25 = ia64_xtrace_mask
+	;;
+}
+{	.mmi
+	ld8	r26 = [r25]
+	;;
+	and	r25 = r27, r26
+	nop	0
+	;;
+}
+{	.mib
+	mov	ar.k3 = r25
+	nop	0
+	br.sptk	b7
+	;;
+}
+END(ia64_xtrace_write)
+
+#else /* XTRACE */
+
+#define	XTRACE_HOOK(offset)
+
+	.section .ivt.text, "ax"
+
+#endif /* XTRACE */
+
+/*
+ * exception_save: save interrupted state
+ *
+ * Arguments:
+ *	r16	address of bundle that contains the branch. The
+ *		return address will be the next bundle.
+ *	r17	the value to save as ifa in the trapframe. This
+ *		normally is cr.ifa, but some interruptions set
+ *		set cr.iim and not cr.ifa.
+ *
+ * Returns:
+ *	p15	interrupted from user stack
+ *	p14	interrupted from kernel stack
+ *	p13	interrupted from user backing store
+ *	p12	interrupted from kernel backing store
+ *	p11	interrupts were enabled
+ *	p10	interrupts were disabled
+ */
+ENTRY_NOPROFILE(exception_save, 0)
+{	.mii
+	mov		r20=ar.unat
+	extr.u		r31=sp,61,3
+	mov		r18=pr
+	;;
+}
+{	.mmi
+	cmp.le		p14,p15=IA64_VM_MINKERN_REGION,r31
+	;;
+(p15)	mov		r23=ar.k7		// kernel memory stack
+(p14)	mov		r23=sp
+	;;
+}
+{	.mii
+	mov		r21=ar.rsc
+	add		r30=-SIZEOF_TRAPFRAME,r23
+	;;
+	dep		r30=0,r30,0,10
+	;;
+}
+{	.mmi
+	mov		ar.rsc=0
+	mov		r22=cr.iip
+	addl		r29=NTLBRT_SAVE,r0	// 22-bit restart token.
+	;;
+}
+
+	/*
+	 * We have a 1KB aligned trapframe, pointed to by r30. We can't
+	 * reliably write to the trapframe using virtual addressing, due
+	 * to the fact that TC entries we depend on can be removed by:
+	 * 1.  ptc.g instructions issued by other threads/cores/CPUs, or
+	 * 2.  TC modifications in another thread on the same core.
+	 * When our TC entry gets removed, we get nested TLB faults and
+	 * since no state is saved, we can only deal with those when
+	 * explicitly coded and expected.
+	 * As such, we switch to physical addressing and account for the
+	 * fact that the tpa instruction can cause a nested TLB fault.
+	 * Since the data nested TLB fault does not preserve any state,
+	 * we have to be careful what we clobber. Consequently, we have
+	 * to be careful what we use here. Below a list of registers that
+	 * are considered alive:
+	 *	r16,r17=arguments
+	 *	r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS
+	 *	r29=restart token
+	 *	r30=trapframe pointers
+	 *	p14,p15=memory stack switch
+	 */
+exception_save_restart:
+	tpa		r24=r30			// Nested TLB fault possible
+	sub		r19=r23,r30
+	nop		0
+	;;
+
+	rsm		psr.dt
+	add		r29=16,r19		// Clobber restart token
+	mov		r30=r24
+	;;
+	srlz.d
+	add		r31=8,r24
+	;;
+
+	// r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS
+	// r29=delta
+{	.mmi
+	st8		[r30]=r19,16		// length
+	st8		[r31]=r0,16		// flags
+	;;
+}
+{	.mmi
+	st8.spill	[r30]=sp,16		// sp
+	st8		[r31]=r20,16		// unat
+	sub		sp=r23,r29
+	;;
+}
+{	.mmi
+	mov		r19=ar.rnat
+	mov		r20=ar.bspstore
+	mov		r23=rp
+	;;
+}
+	// r18=pr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=rp
+	// r24=pfs
+{	.mmi
+	st8		[r30]=r23,16		// rp
+	st8		[r31]=r18,16		// pr
+	mov		r24=ar.pfs
+	;;
+}
+{	.mmb
+	st8		[r30]=r24,16		// pfs
+	st8		[r31]=r20,16		// bspstore
+	cover
+	;;
+}
+{	.mmi
+	mov		r18=ar.fpsr
+	mov		r23=cr.ipsr
+	extr.u		r24=r20,61,3
+	;;
+}
+	// r18=fpsr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=ipsr
+{	.mmi
+	st8		[r30]=r19,16		// rnat
+	st8		[r31]=r0,16		// __spare
+	cmp.le		p12,p13=IA64_VM_MINKERN_REGION,r24
+	;;
+}
+{	.mmi
+	st8.spill	[r30]=r13,16		// tp
+	st8		[r31]=r21,16		// rsc
+	tbit.nz		p11,p10=r23,14		// p11=interrupts enabled
+	;;
+}
+{	.mmi
+(p13)	mov		r21=ar.k6		// kernel register stack
+	;;
+	st8		[r30]=r18,16		// fpsr
+(p13)	dep		r20=r20,r21,0,9		// align dirty registers
+	;;
+}
+	// r19=rnat, r20=bspstore, r22=iip, r23=ipsr
+{	.mmi
+	st8		[r31]=r23,16		// psr
+(p13)	mov		ar.bspstore=r20
+	nop		0
+	;;
+}
+{	.mmi
+(p13)	mov		ar.rnat=r19
+	mov		r18=ar.bsp
+	nop		0
+	;;
+}
+{	.mmi
+	mov		r19=cr.ifs
+	st8.spill	[r30]=gp,16		// gp
+	sub		r18=r18,r20
+	;;
+}
+	// r18=ndirty, r19=ifs, r22=iip
+{	.mmi
+	st8		[r31]=r18,16		// ndirty
+	st8		[r30]=r19,16		// cfm
+	nop		0
+	;;
+}
+{	.mmi
+	mov		r18=cr.isr
+	st8		[r31]=r22,16		// iip
+	add		r29=16,r30
+	;;
+}
+{	.mmi
+	st8		[r30]=r17,24		// ifa
+	st8		[r31]=r18,24		// isr
+	nop		0
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r30]=r2,16		// r2
+	.mem.offset	8,0
+	st8.spill	[r31]=r3,16		// r3
+	add		r2=9*8,r29
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r30]=r8,16		// r8
+	.mem.offset	8,0
+	st8.spill	[r31]=r9,16		// r9
+	add		r3=8,r2
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r30]=r10,16		// r10
+	.mem.offset	8,0
+	st8.spill	[r31]=r11,16		// r11
+	add		r8=16,r16
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r30]=r14		// r14
+	.mem.offset	8,0
+	st8.spill	[r31]=r15		// r15
+	mov		r9=r29
+}
+{	.mmb
+	mov		r10=ar.csd
+	mov		r11=ar.ssd
+	bsw.1
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r2]=r16,16		// r16
+	.mem.offset	8,0
+	st8.spill	[r3]=r17,16		// r17
+	mov		r14=b6
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r2]=r18,16		// r18
+	.mem.offset	8,0
+	st8.spill	[r3]=r19,16		// r19
+	mov		r15=b7
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r2]=r20,16		// r20
+	.mem.offset	8,0
+	st8.spill	[r3]=r21,16		// r21
+	mov		b7=r8
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r2]=r22,16		// r22
+	.mem.offset	8,0
+	st8.spill	[r3]=r23,16		// r23
+	;;
+}
+
+	.mem.offset	0,0
+	st8.spill	[r2]=r24,16		// r24
+	.mem.offset	8,0
+	st8.spill	[r3]=r25,16		// r25
+	;;
+	.mem.offset	0,0
+	st8.spill	[r2]=r26,16		// r26
+	.mem.offset	8,0
+	st8.spill	[r3]=r27,16		// r27
+	;;
+	.mem.offset	0,0
+	st8.spill	[r2]=r28,16		// r28
+	.mem.offset	8,0
+	st8.spill	[r3]=r29,16		// r29
+	;;
+	.mem.offset	0,0
+	st8.spill	[r2]=r30,16		// r30
+	.mem.offset	8,0
+	st8.spill	[r3]=r31,16		// r31
+	;;
+
+{	.mmi
+	st8		[r2]=r14,16		// b6
+	mov		r17=ar.unat
+	nop		0
+	;;
+}
+{	.mmi
+	st8		[r3]=r15,16		// b7
+	mov		r16=ar.ccv
+	nop		0
+	;;
+}
+{	.mmi
+	st8		[r2]=r16,16		// ccv
+	st8		[r3]=r10,16		// csd
+	nop		0
+	;;
+}
+{	.mmi
+	st8		[r2]=r11,24		// ssd
+	st8		[r9]=r17
+	nop		0
+	;;
+}
+
+	stf.spill	[r3]=f6,32		// f6
+	stf.spill	[r2]=f7,32		// f7
+	;;
+	stf.spill	[r3]=f8,32		// f8
+	stf.spill	[r2]=f9,32		// f9
+	;;
+	stf.spill	[r3]=f10,32		// f10
+	stf.spill	[r2]=f11,32		// f11
+	;;
+	stf.spill	[r3]=f12,32		// f12
+	stf.spill	[r2]=f13,32		// f13
+	;;
+	stf.spill	[r3]=f14		// f14
+	stf.spill	[r2]=f15		// f15
+	;;
+{	.mmi
+	mov		ar.rsc=3
+	mov		r13=ar.k4
+	nop		0
+	;;
+}
+{	.mlx
+	ssm		psr.dt|psr.ic|psr.dfh
+	movl		gp=__gp
+	;;
+}
+{	.mib
+	srlz.d
+	nop		0
+	br.sptk		b7
+	;;
+}
+END(exception_save)
+
+/*
+ * exception_restore:	restore interrupted state
+ *
+ * Arguments:
+ *	sp+16	trapframe pointer
+ */
+ENTRY_NOPROFILE(exception_restore, 0)
+{	.mmi
+	rsm		psr.i
+	add		sp=16,sp
+	nop		0
+	;;
+}
+
+	// The next instruction can fault. Let it be...
+	tpa		r9=sp
+	;;
+	rsm		psr.dt|psr.ic
+	add		r8=SIZEOF_SPECIAL+16,r9
+	;;
+	srlz.d
+	add		r2=SIZEOF_TRAPFRAME-16,r9
+	add		r3=SIZEOF_TRAPFRAME-32,r9
+	;;
+
+{	.mmi
+	ldf.fill	f15=[r2],-32		// f15
+	ldf.fill	f14=[r3],-32		// f14
+	nop		0
+	;;
+}
+{	.mmi
+	ldf.fill	f13=[r2],-32		// f13
+	ldf.fill	f12=[r3],-32		// f12
+	nop		0
+	;;
+}
+{	.mmi
+	ldf.fill	f11=[r2],-32		// f11
+	ldf.fill	f10=[r3],-32		// f10
+	nop		0
+	;;
+}
+{	.mmi
+	ldf.fill	f9=[r2],-32		// f9
+	ldf.fill	f8=[r3],-32		// f8
+	nop		0
+	;;
+}
+{	.mmi
+	ldf.fill	f7=[r2],-24		// f7
+	ldf.fill	f6=[r3],-16		// f6
+	nop		0
+	;;
+}
+{	.mmi
+	ld8		r8=[r8]			// unat (after)
+	;;
+	mov		ar.unat=r8
+	nop		0
+	;;
+}
+
+	ld8		r10=[r2],-16		// ssd
+	ld8		r11=[r3],-16		// csd
+	;;
+	mov		ar.ssd=r10
+	mov		ar.csd=r11
+
+	ld8		r14=[r2],-16		// ccv
+	ld8		r15=[r3],-16		// b7
+	;;
+
+{	.mmi
+	mov		ar.ccv=r14
+	ld8		r8=[r2],-16		// b6
+	mov		b7=r15
+	;;
+}
+{	.mmi
+	ld8.fill	r31=[r3],-16		// r31
+	ld8.fill	r30=[r2],-16		// r30
+	mov		b6=r8
+	;;
+}
+
+	ld8.fill	r29=[r3],-16		// r29
+	ld8.fill	r28=[r2],-16		// r28
+	;;
+	ld8.fill	r27=[r3],-16		// r27
+	ld8.fill	r26=[r2],-16		// r26
+	;;
+	ld8.fill	r25=[r3],-16		// r25
+	ld8.fill	r24=[r2],-16		// r24
+	;;
+	ld8.fill	r23=[r3],-16		// r23
+	ld8.fill	r22=[r2],-16		// r22
+	;;
+	ld8.fill	r21=[r3],-16		// r21
+	ld8.fill	r20=[r2],-16		// r20
+	;;
+	ld8.fill	r19=[r3],-16		// r19
+	ld8.fill	r18=[r2],-16		// r18
+	;;
+
+{	.mmb
+	ld8.fill	r17=[r3],-16		// r17
+	ld8.fill	r16=[r2],-16		// r16
+	bsw.0
+	;;
+}
+{	.mii
+	ld8		r16=[r9]		// tf_length
+	add		r31=16,r9
+	add		r30=24,r9
+}
+{	.mmi
+	ld8.fill	r15=[r3],-16		// r15
+	ld8.fill	r14=[r2],-16		// r14
+	nop		0
+	;;
+}
+{	.mmi
+	ld8.fill	r11=[r3],-16		// r11
+	ld8.fill	r10=[r2],-16		// r10
+	add		r16=r16,sp		// ar.k7
+	;;
+}
+{	.mmi
+	ld8.fill	r9=[r3],-16		// r9
+	ld8.fill	r8=[r2],-16		// r8
+	nop		0
+	;;
+}
+{	.mmi
+	ld8.fill	r3=[r3]			// r3
+	ld8.fill	r2=[r2]			// r2
+	nop		0
+	;;
+}
+
+	ld8.fill	sp=[r31],16		// sp
+	ld8		r17=[r30],16		// unat
+	;;
+	ld8		r29=[r31],16		// rp
+	ld8		r18=[r30],16		// pr
+	;;
+	ld8		r28=[r31],16		// pfs
+	ld8		r20=[r30],24		// bspstore
+	mov		rp=r29
+	;;
+	ld8		r21=[r31],24		// rnat
+	mov		ar.pfs=r28
+	;;
+	ld8.fill	r26=[r30],16		// tp
+	ld8		r22=[r31],16		// rsc
+	;;
+
+{	.mmi
+	ld8		r23=[r30],16		// fpsr
+	ld8		r24=[r31],16		// psr
+	extr.u		r28=r20,61,3
+	;;
+}
+{	.mmi
+	ld8.fill	r1=[r30],16		// gp
+	ld8		r27=[r31],16		// ndirty
+	cmp.le		p14,p15=IA64_VM_MINKERN_REGION,r28
+	;;
+}
+{	.mmi
+	ld8		r25=[r30]		// cfm
+	ld8		r19=[r31]		// ip
+	nop		0
+	;;
+}
+{	.mii
+	// Switch register stack
+	alloc		r30=ar.pfs,0,0,0,0	// discard current frame
+	shl		r31=r27,16		// value for ar.rsc
+(p15)	mov		r13=r26
+	;;
+}
+	// The loadrs can fault if the backing store is not currently
+	// mapped. We assured forward progress by getting everything we
+	// need from the trapframe so that we don't care if the CPU
+	// purges that translation when it needs to insert a new one for
+	// the backing store.
+{	.mmi
+	mov		ar.rsc=r31		// setup for loadrs
+	mov		ar.k7=r16
+	addl		r29=NTLBRT_RESTORE,r0	// 22-bit restart token 
+	;;
+}
+
+	ssm		psr.dt
+	;;
+	srlz.d
+	mov		r16 = r25
+
+exception_restore_restart:
+{	.mmi
+	mov		r30=ar.bspstore
+	;;
+	loadrs					// load user regs
+	mov		r29=0			// Clobber restart token
+	;;
+}
+{	.mmi
+	mov		r31=ar.bspstore
+	;;
+	mov		ar.bspstore=r20
+	dep		r31=0,r31,0,13		// 8KB aligned
+	;;
+}
+{	.mmi
+	mov		cr.ifs=r16
+	mov		ar.k6=r31
+	mov		pr=r18,0x1ffff
+	;;
+}
+{	.mmi
+	mov		cr.iip=r19
+	mov		ar.unat=r17
+	nop		0
+	;;
+}
+{	.mmi
+	mov		cr.ipsr=r24
+	mov		ar.rnat=r21
+	nop		0
+	;;
+}
+{	.mmb
+	mov		ar.rsc=r22
+	mov		ar.fpsr=r23
+	rfi
+	;;
+}
+END(exception_restore)
+
+/*
+ * Call exception_save_regs to preserve the interrupted state in a
+ * trapframe. Note that we don't use a call instruction because we
+ * must be careful not to lose track of the RSE state. We then call
+ * trap() with the value of _n_ as an argument to handle the
+ * exception. We arrange for trap() to return to exception_restore
+ * which will restore the interrupted state before executing an rfi to
+ * resume it.
+ */
+#define CALL(_func_, _n_, _ifa_)		\
+{	.mib ;					\
+	mov		r17=_ifa_ ;		\
+	mov		r16=ip ;		\
+	br.sptk		exception_save ;;	\
+} ;						\
+{	.mmi ;					\
+	alloc		r15=ar.pfs,0,0,2,0 ;;	\
+(p11)	ssm		psr.i ;			\
+	mov		out0=_n_ ;;		\
+} ;						\
+{	.mib ;					\
+(p11)	srlz.d ;				\
+	add		out1=16,sp ;		\
+	br.call.sptk	rp=_func_ ;;		\
+} ;						\
+{	.mib ;					\
+	nop		0 ;			\
+	nop		0 ;			\
+	br.sptk		exception_restore ;;	\
+}
+
+#define	IVT_ENTRY(name, offset)			\
+	.org	ia64_vector_table + offset;	\
+	.global	ivt_##name;			\
+	.proc	ivt_##name;			\
+	.prologue;				\
+	.unwabi	@svr4, 'I';			\
+	.save	rp, r0;				\
+	.body;					\
+ivt_##name:					\
+	XTRACE_HOOK(offset)
+
+#define	IVT_END(name)				\
+	.endp	ivt_##name
+
+#ifdef COMPAT_FREEBSD32
+#define	IA32_TRAP	ia32_trap
+#else
+#define	IA32_TRAP	trap
+#endif
+
+/*
+ * The IA64 Interrupt Vector Table (IVT) contains 20 slots with 64
+ * bundles per vector and 48 slots with 16 bundles per vector.
+ */
+
+	.section .ivt, "ax"
+
+	.align	32768
+	.global ia64_vector_table
+	.size	ia64_vector_table, 32768
+ia64_vector_table:
+
+IVT_ENTRY(VHPT_Translation, 0x0000)
+	CALL(trap, 0, cr.ifa)
+IVT_END(VHPT_Translation)
+
+IVT_ENTRY(Instruction_TLB, 0x0400)
+	mov	r16=cr.ifa
+	mov	r17=pr
+	;;
+	thash	r18=r16
+	ttag	r19=r16
+	;;
+	add	r21=16,r18		// tag
+	add	r20=24,r18		// collision chain
+	;; 
+	ld8	r21=[r21]		// check VHPT tag
+	ld8	r20=[r20]		// bucket head
+	;;
+	cmp.ne	p15,p0=r21,r19
+(p15)	br.dpnt.few 1f
+	;;
+	ld8	r21=[r18]		// read pte
+	;;
+	itc.i	r21			// insert pte
+	mov	pr=r17,0x1ffff
+	;;
+	rfi				// done
+	;;
+1:	rsm	psr.dt			// turn off data translations
+	dep	r20=0,r20,61,3		// convert vhpt ptr to physical
+	;;
+	srlz.d				// serialize
+	ld8	r20=[r20]		// first entry
+	;;
+2:	cmp.eq	p15,p0=r0,r20		// done?
+(p15)	br.cond.spnt.few 9f		// bail if done
+	;;
+	add	r21=16,r20		// tag location
+	;;
+	ld8	r21=[r21]		// read tag
+	;;
+	cmp.ne	p15,p0=r21,r19		// compare tags
+(p15)	br.cond.sptk.few 3f		// if not, read next in chain
+	;;
+	ld8	r21=[r20]		// read pte
+	mov	r22=PTE_ACCESSED
+	;;
+	or	r21=r21,r22
+	;;
+	st8	[r20]=r21,8
+	;; 
+	ld8	r22=[r20]		// read rest of pte
+	;;
+	dep	r18=0,r18,61,3		// convert vhpt ptr to physical
+	;;
+	add	r20=16,r18		// address of tag
+	;;
+	ld8.acq	r23=[r20]		// read old tag
+	;;
+	dep	r23=-1,r23,63,1		// set ti bit
+	;;
+	st8.rel	[r20]=r23		// store old tag + ti
+	;;
+	mf				// make sure everyone sees
+	;;
+	st8	[r18]=r21,8		// store pte
+	;;
+	st8	[r18]=r22,8
+	;;
+	st8.rel	[r18]=r19		// store new tag
+	;;
+	itc.i	r21			// and place in TLB
+	ssm	psr.dt
+	;; 
+	srlz.d
+	mov	pr=r17,0x1ffff		// restore predicates
+	rfi
+	;;
+3:	add	r20=24,r20		// next in chain
+	;;
+	ld8	r20=[r20]		// read chain
+	br.sptk	2b			// loop
+	;;
+9:	ssm	psr.dt
+	mov	pr=r17,0x1ffff		// restore predicates
+	;;
+	srlz.d
+	;; 
+	CALL(trap, 20, cr.ifa)		// Page Not Present trap
+IVT_END(Instruction_TLB)
+
+IVT_ENTRY(Data_TLB, 0x0800)
+	mov	r16=cr.ifa
+	mov	r17=pr
+	;;
+	thash	r18=r16
+	ttag	r19=r16
+	;;
+	add	r21=16,r18		// tag
+	add	r20=24,r18		// collision chain
+	;; 
+	ld8	r21=[r21]		// check VHPT tag
+	ld8	r20=[r20]		// bucket head
+	;;
+	cmp.ne	p15,p0=r21,r19
+(p15)	br.dpnt.few 1f
+	;;
+	ld8	r21=[r18]		// read pte
+	;;
+	itc.d	r21			// insert pte
+	mov	pr=r17,0x1ffff
+	;;
+	rfi				// done
+	;;
+1:	rsm	psr.dt			// turn off data translations
+	dep	r20=0,r20,61,3		// convert vhpt ptr to physical
+	;; 
+	srlz.d				// serialize
+	ld8	r20=[r20]		// first entry
+	;;
+2:	cmp.eq	p15,p0=r0,r20		// done?
+(p15)	br.cond.spnt.few 9f		// bail if done
+	;;
+	add	r21=16,r20		// tag location
+	;;
+	ld8	r21=[r21]		// read tag
+	;;
+	cmp.ne	p15,p0=r21,r19		// compare tags
+(p15)	br.cond.sptk.few 3f		// if not, read next in chain
+	;;
+	ld8	r21=[r20]		// read pte
+	mov	r22=PTE_ACCESSED
+	;;
+	or	r21=r21,r22
+	;;
+	st8	[r20]=r21,8
+	;; 
+	ld8	r22=[r20]		// read rest of pte
+	;;
+	dep	r18=0,r18,61,3		// convert vhpt ptr to physical
+	;;
+	add	r20=16,r18		// address of tag
+	;;
+	ld8.acq	r23=[r20]		// read old tag
+	;;
+	dep	r23=-1,r23,63,1		// set ti bit
+	;;
+	st8.rel	[r20]=r23		// store old tag + ti
+	;;
+	mf				// make sure everyone sees
+	;;
+	st8	[r18]=r21,8		// store pte
+	;;
+	st8	[r18]=r22,8
+	;;
+	st8.rel	[r18]=r19		// store new tag
+	;;
+	itc.d	r21			// and place in TLB
+	ssm	psr.dt
+	;; 
+	srlz.d
+	mov	pr=r17,0x1ffff		// restore predicates
+	rfi
+	;;
+3:	add	r20=24,r20		// next in chain
+	;;
+	ld8	r20=[r20]		// read chain
+	br.sptk	2b			// loop
+	;;
+9:	ssm	psr.dt
+	mov	pr=r17,0x1ffff		// restore predicates
+	;;
+	srlz.d
+	;; 
+	CALL(trap, 20, cr.ifa)		// Page Not Present trap
+IVT_END(Data_TLB)
+
+IVT_ENTRY(Alternate_Instruction_TLB, 0x0c00)
+	mov	r16=cr.ifa		// where did it happen
+	mov	r18=pr			// save predicates
+	;;
+	extr.u	r17=r16,61,3		// get region number
+	mov	r19=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX
+	;;
+	cmp.eq	p13,p0=IA64_PBVM_RR,r17		// RR4?
+(p13)	br.cond.sptk.few	4f
+	;;
+	cmp.ge	p13,p0=5,r17		// RR0-RR5?
+	cmp.eq	p14,p15=7,r17		// RR7?
+(p13)	br.cond.spnt.few	9f
+	;;
+(p14)	add	r19=PTE_MA_WB,r19
+(p15)	add	r19=PTE_MA_UC,r19
+	dep	r17=0,r16,50,14		// clear bits above PPN
+	;;
+1:	dep	r16=r19,r17,0,12	// put pte bits in 0..11
+	;;
+	itc.i	r16
+	mov	pr=r18,0x1ffff		// restore predicates
+	;;
+	rfi
+	;;
+4:
+	add	r19=PTE_MA_WB,r19
+	movl	r17=IA64_PBVM_BASE
+	;;
+	sub	r17=r16,r17
+	movl	r16=IA64_PBVM_PGTBL
+	;;
+	extr.u	r17=r17,IA64_PBVM_PAGE_SHIFT,61-IA64_PBVM_PAGE_SHIFT
+	;;
+	shladd	r16=r17,3,r16
+	;;
+	ld8	r17=[r16]
+	br.sptk	1b
+	;;
+9:	mov	pr=r18,0x1ffff		// restore predicates
+	CALL(trap, 3, cr.ifa)
+IVT_END(Alternate_Instruction_TLB)
+
+IVT_ENTRY(Alternate_Data_TLB, 0x1000)
+	mov	r16=cr.ifa		// where did it happen
+	mov	r18=pr			// save predicates
+	;;
+	extr.u	r17=r16,61,3		// get region number
+	mov	r19=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX
+	;;
+	cmp.eq	p13,p0=IA64_PBVM_RR,r17		// RR4?
+(p13)	br.cond.sptk.few	4f
+	;;
+	cmp.ge	p13,p0=5,r17		// RR0-RR5?
+	cmp.eq	p14,p15=7,r17		// RR7?
+(p13)	br.cond.spnt.few	9f
+	;;
+(p14)	add	r19=PTE_MA_WB,r19
+(p15)	add	r19=PTE_MA_UC,r19
+	dep	r17=0,r16,50,14		// clear bits above PPN
+	;;
+1:	dep	r16=r19,r17,0,12	// put pte bits in 0..11
+	;;
+	itc.d	r16
+	mov	pr=r18,0x1ffff		// restore predicates
+	;;
+	rfi
+	;;
+4:
+	add	r19=PTE_MA_WB,r19
+	movl	r17=IA64_PBVM_BASE
+	;;
+	sub	r17=r16,r17
+	movl	r16=IA64_PBVM_PGTBL
+	;;
+	extr.u	r17=r17,IA64_PBVM_PAGE_SHIFT,61-IA64_PBVM_PAGE_SHIFT
+	;;
+	shladd	r16=r17,3,r16
+	;;
+	ld8	r17=[r16]
+	br.sptk	1b
+	;;
+9:	mov	pr=r18,0x1ffff		// restore predicates
+	CALL(trap, 4, cr.ifa)
+IVT_END(Alternate_Data_TLB)
+
+IVT_ENTRY(Data_Nested_TLB, 0x1400)
+	// See exception_save_restart and exception_restore_restart for the
+	// contexts that may cause a data nested TLB. We can only use the
+	// banked general registers and predicates, but don't use:
+	//	p14 & p15	-	Set in exception save
+	//	r16 & r17	-	Arguments to exception save
+	//	r30		-	Faulting address (modulo page size)
+	// We assume r30 has the virtual addresses that relate to the data
+	// nested TLB fault. The address does not have to be exact, as long
+	// as it's in the same page. We use physical addressing to avoid
+	// double nested faults. Since all virtual addresses we encounter
+	// here are direct mapped region 7 addresses, we have no problem
+	// constructing physical addresses.
+
+{	.mmi
+	mov		cr.ifa=r30
+	mov		r26=rr[r30]
+	extr.u		r27=r30,61,3
+	;;
+}
+{	.mii
+	nop		0
+	dep		r26=0,r26,0,2
+	cmp.eq		p12,p13=7,r27
+	;;
+}
+{	.mii
+	mov		cr.itir=r26
+(p12)	dep		r28=0,r30,0,12
+(p13)	extr.u		r28=r30,3*PAGE_SHIFT-8, PAGE_SHIFT-3	// dir L0 index
+	;;
+}
+{	.mlx
+(p12)	add		r28=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX+PTE_MA_WB,r28
+(p13)	movl		r27=ia64_kptdir
+	;;
+}
+{	.mib
+(p13)	ld8		r27=[r27]
+(p13)	extr.u		r26=r30,2*PAGE_SHIFT-5, PAGE_SHIFT-3	// dir L1 index
+(p12)	br.cond.spnt.few 1f
+	;;
+}
+{	.mmi
+	rsm		psr.dt
+	;;
+	srlz.d
+	dep		r27=0,r27,61,3
+	;;
+}
+{	.mmi
+	shladd		r27=r28,3,r27
+	;;
+	ld8		r27=[r27]				// dir L1 page
+	extr.u		r28=r30,PAGE_SHIFT,PAGE_SHIFT-5		// pte index
+	;;
+}
+{	.mii
+	shladd		r27=r26,3,r27
+	shl		r28=r28,5
+	;;
+	dep		r27=0,r27,61,3
+	;;
+}
+	ld8		r27=[r27]				// pte page
+	;;
+	add		r27=r28,r27
+	;;
+	dep		r27=0,r27,61,3
+	;;
+	ld8		r28=[r27]				// pte
+	;;
+	or		r28=PTE_DIRTY+PTE_ACCESSED,r28
+	;;
+	st8		[r27]=r28
+	;;
+	ssm		psr.dt
+	;;
+1:
+{	.mmi
+	itc.d		r28
+	;;
+	addl		r26=NTLBRT_SAVE,r0
+	addl		r27=NTLBRT_RESTORE,r0
+	;;
+}
+{	.mmi
+	srlz.d
+	cmp.eq		p12,p0=r29,r26
+	cmp.eq		p13,p0=r29,r27
+	;;
+}
+{	.mbb
+	nop		0
+(p12)	br.cond.sptk.few	exception_save_restart
+(p13)	br.cond.sptk.few	exception_restore_restart
+	;;
+}
+
+{	.mlx
+	mov		r26=ar.bsp
+	movl		r29=kstack
+	;;
+}
+{	.mlx
+	mov		r28=sp
+	movl		r27=kstack_top
+	;;
+}
+{	.mmi
+	add		sp=-16,r27
+	;;
+	mov		r27=ar.bspstore
+	nop		0
+	;;
+}
+	mov		ar.rsc=0
+	dep		r29=r27,r29,0,9
+	;;
+	mov		ar.bspstore=r29
+	;;
+	CALL(trap, 5, r30)
+IVT_END(Data_Nested_TLB)
+
+IVT_ENTRY(Instruction_Key_Miss, 0x1800)
+	CALL(trap, 6, cr.ifa)
+IVT_END(Instruction_Key_Miss)
+
+IVT_ENTRY(Data_Key_Miss, 0x1c00)
+	CALL(trap, 7, cr.ifa)
+IVT_END(Data_Key_Miss)
+
+IVT_ENTRY(Dirty_Bit, 0x2000)
+	mov	r16=cr.ifa
+	mov	r17=pr
+	;;
+	thash	r18=r16
+	;;
+	ttag	r19=r16
+	add	r20=24,r18		// collision chain
+	;; 
+	ld8	r20=[r20]		// bucket head
+	;;
+	rsm	psr.dt			// turn off data translations
+	dep	r20=0,r20,61,3		// convert vhpt ptr to physical
+	;;
+	srlz.d				// serialize
+	ld8	r20=[r20]		// first entry
+	;;
+1:	cmp.eq	p15,p0=r0,r20		// done?
+(p15)	br.cond.spnt.few 9f		// bail if done
+	;;
+	add	r21=16,r20		// tag location
+	;;
+	ld8	r21=[r21]		// read tag
+	;;
+	cmp.ne	p15,p0=r21,r19		// compare tags
+(p15)	br.cond.sptk.few 2f		// if not, read next in chain
+	;;
+	ld8	r21=[r20]		// read pte
+	mov	r22=PTE_DIRTY+PTE_ACCESSED
+	;;
+	or	r21=r22,r21		// set dirty & access bit
+	;;
+	st8	[r20]=r21,8		// store back
+	;; 
+	ld8	r22=[r20]		// read rest of pte
+	;;
+	dep	r18=0,r18,61,3		// convert vhpt ptr to physical
+	;;
+	add	r20=16,r18		// address of tag
+	;;
+	ld8.acq	r23=[r20]		// read old tag
+	;;
+	dep	r23=-1,r23,63,1		// set ti bit
+	;;
+	st8.rel	[r20]=r23		// store old tag + ti
+	;;
+	mf				// make sure everyone sees
+	;;
+	st8	[r18]=r21,8		// store pte
+	;;
+	st8	[r18]=r22,8
+	;;
+	st8.rel	[r18]=r19		// store new tag
+	;;
+	itc.d	r21			// and place in TLB
+	ssm	psr.dt
+	;; 
+	srlz.d
+	mov	pr=r17,0x1ffff		// restore predicates
+	rfi
+	;;
+2:	add	r20=24,r20		// next in chain
+	;;
+	ld8	r20=[r20]		// read chain
+	br.sptk	1b			// loop
+	;;
+9:	ssm	psr.dt
+	mov	pr=r17,0x1ffff		// restore predicates
+	;;
+	srlz.d
+	;;
+	CALL(trap, 8, cr.ifa)			// die horribly
+IVT_END(Dirty_Bit)
+
+IVT_ENTRY(Instruction_Access_Bit, 0x2400)
+	mov	r16=cr.ifa
+	mov	r17=pr
+	;;
+	thash	r18=r16
+	;;
+	ttag	r19=r16
+	add	r20=24,r18		// collision chain
+	;; 
+	ld8	r20=[r20]		// bucket head
+	;;
+	rsm	psr.dt			// turn off data translations
+	dep	r20=0,r20,61,3		// convert vhpt ptr to physical
+	;;
+	srlz.d				// serialize
+	ld8	r20=[r20]		// first entry
+	;;
+1:	cmp.eq	p15,p0=r0,r20		// done?
+(p15)	br.cond.spnt.few 9f		// bail if done
+	;;
+	add	r21=16,r20		// tag location
+	;;
+	ld8	r21=[r21]		// read tag
+	;;
+	cmp.ne	p15,p0=r21,r19		// compare tags
+(p15)	br.cond.sptk.few 2f		// if not, read next in chain
+	;;
+	ld8	r21=[r20]		// read pte
+	mov	r22=PTE_ACCESSED
+	;;
+	or	r21=r22,r21		// set accessed bit
+	;;
+	st8	[r20]=r21,8		// store back
+	;;
+	ld8	r22=[r20]		// read rest of pte
+	;;
+	dep	r18=0,r18,61,3		// convert vhpt ptr to physical
+	;;
+	add	r20=16,r18		// address of tag
+	;;
+	ld8.acq	r23=[r20]		// read old tag
+	;;
+	dep	r23=-1,r23,63,1		// set ti bit
+	;;
+	st8.rel	[r20]=r23		// store old tag + ti
+	;;
+	mf				// make sure everyone sees
+	;;
+	st8	[r18]=r21,8		// store pte
+	;;
+	st8	[r18]=r22,8
+	;;
+	st8.rel	[r18]=r19		// store new tag
+	;;
+	itc.i	r21			// and place in TLB
+	ssm	psr.dt
+	;; 
+	srlz.d
+	mov	pr=r17,0x1ffff		// restore predicates
+	rfi				// walker will retry the access
+	;;
+2:	add	r20=24,r20		// next in chain
+	;;
+	ld8	r20=[r20]		// read chain
+	br.sptk	1b			// loop
+	;;
+9:	ssm	psr.dt
+	mov	pr=r17,0x1ffff		// restore predicates
+	;;
+	srlz.d
+	;;
+	CALL(trap, 9, cr.ifa)
+IVT_END(Instruction_Access_Bit)
+
+IVT_ENTRY(Data_Access_Bit, 0x2800)
+	mov	r16=cr.ifa
+	mov	r17=pr
+	;;
+	thash	r18=r16
+	;;
+	ttag	r19=r16
+	add	r20=24,r18		// collision chain
+	;;
+	ld8	r20=[r20]		// bucket head
+	;;
+	rsm	psr.dt			// turn off data translations
+	dep	r20=0,r20,61,3		// convert vhpt ptr to physical
+	;;
+	srlz.d				// serialize
+	ld8	r20=[r20]		// first entry
+	;;
+1:	cmp.eq	p15,p0=r0,r20		// done?
+(p15)	br.cond.spnt.few 9f		// bail if done
+	;;
+	add	r21=16,r20		// tag location
+	;;
+	ld8	r21=[r21]		// read tag
+	;;
+	cmp.ne	p15,p0=r21,r19		// compare tags
+(p15)	br.cond.sptk.few 2f		// if not, read next in chain
+	;;
+	ld8	r21=[r20]		// read pte
+	mov	r22=PTE_ACCESSED
+	;;
+	or	r21=r22,r21		// set accessed bit
+	;;
+	st8	[r20]=r21,8		// store back
+	;; 
+	ld8	r22=[r20]		// read rest of pte
+	;;
+	dep	r18=0,r18,61,3		// convert vhpt ptr to physical
+	;;
+	add	r20=16,r18		// address of tag
+	;;
+	ld8.acq	r23=[r20]		// read old tag
+	;;
+	dep	r23=-1,r23,63,1		// set ti bit
+	;;
+	st8.rel	[r20]=r23		// store old tag + ti
+	;;
+	mf				// make sure everyone sees
+	;;
+	st8	[r18]=r21,8		// store pte
+	;;
+	st8	[r18]=r22,8
+	;;
+	st8.rel	[r18]=r19		// store new tag
+	;;
+	itc.d	r21			// and place in TLB
+	ssm	psr.dt
+	;; 
+	srlz.d
+	mov	pr=r17,0x1ffff		// restore predicates
+	rfi				// walker will retry the access
+	;;
+2:	add	r20=24,r20		// next in chain
+	;;
+	ld8	r20=[r20]		// read chain
+	br.sptk	1b			// loop
+	;;
+9:	ssm	psr.dt
+	mov	pr=r17,0x1ffff		// restore predicates
+	;;
+	srlz.d
+	;;
+	CALL(trap, 10, cr.ifa)
+IVT_END(Data_Access_Bit)
+
+IVT_ENTRY(Break_Instruction, 0x2c00)
+{	.mib
+	mov		r17=cr.iim
+	mov		r16=ip
+	br.sptk		exception_save
+	;;
+}
+{	.mmi
+	alloc		r15=ar.pfs,0,0,2,0
+	;;
+(p11)	ssm		psr.i
+	mov		out0=11
+	;;
+}
+{	.mmi
+	flushrs
+	;;
+(p11)	srlz.d
+	add		out1=16,sp
+}
+{	.mib
+	nop		0
+	nop		0
+	br.call.sptk	rp=trap
+	;;
+}
+{	.mib
+	nop		0
+	nop		0
+	br.sptk		exception_restore
+	;;
+}
+IVT_END(Break_Instruction)
+
+IVT_ENTRY(External_Interrupt, 0x3000)
+{	.mib
+	mov		r17=0
+	mov		r16=ip
+	br.sptk		exception_save
+	;;
+}
+{	.mmi
+	alloc		r15=ar.pfs,0,0,1,0
+	nop		0
+	nop		0
+	;;
+}
+{	.mib
+	add		out0=16,sp
+	nop		0
+	br.call.sptk	rp=ia64_handle_intr
+	;;
+}
+{	.mib
+	nop		0
+	nop		0
+	br.sptk		exception_restore
+	;;
+}
+IVT_END(External_Interrupt)
+
+IVT_ENTRY(Reserved_3400, 0x3400)
+	CALL(trap, 13, cr.ifa)
+IVT_END(Reserved_3400)
+
+IVT_ENTRY(Reserved_3800, 0x3800)
+	CALL(trap, 14, cr.ifa)
+IVT_END(Reserved_3800)
+
+IVT_ENTRY(Reserved_3c00, 0x3c00)
+	CALL(trap, 15, cr.ifa)
+IVT_END(Reserved_3c00)
+
+IVT_ENTRY(Reserved_4000, 0x4000)
+	CALL(trap, 16, cr.ifa)
+IVT_END(Reserved_4000)
+
+IVT_ENTRY(Reserved_4400, 0x4400)
+	CALL(trap, 17, cr.ifa)
+IVT_END(Reserved_4400)
+
+IVT_ENTRY(Reserved_4800, 0x4800)
+	CALL(trap, 18, cr.ifa)
+IVT_END(Reserved_4800)
+
+IVT_ENTRY(Reserved_4c00, 0x4c00)
+	CALL(trap, 19, cr.ifa)
+IVT_END(Reserved_4c00)
+
+IVT_ENTRY(Page_Not_Present, 0x5000)
+	CALL(trap, 20, cr.ifa)
+IVT_END(Page_Not_Present)
+
+IVT_ENTRY(Key_Permission, 0x5100)
+	CALL(trap, 21, cr.ifa)
+IVT_END(Key_Permission)
+
+IVT_ENTRY(Instruction_Access_Rights, 0x5200)
+	CALL(trap, 22, cr.ifa)
+IVT_END(Instruction_Access_Rights)
+
+IVT_ENTRY(Data_Access_Rights, 0x5300)
+	CALL(trap, 23, cr.ifa)
+IVT_END(Data_Access_Rights)
+
+IVT_ENTRY(General_Exception, 0x5400)
+	CALL(trap, 24, cr.ifa)
+IVT_END(General_Exception)
+
+IVT_ENTRY(Disabled_FP_Register, 0x5500)
+	CALL(trap, 25, cr.ifa)
+IVT_END(Disabled_FP_Register)
+
+IVT_ENTRY(NaT_Consumption, 0x5600)
+	CALL(trap, 26, cr.ifa)
+IVT_END(NaT_Consumption)
+
+IVT_ENTRY(Speculation, 0x5700)
+	CALL(trap, 27, cr.iim)
+IVT_END(Speculation)
+
+IVT_ENTRY(Reserved_5800, 0x5800)
+	CALL(trap, 28, cr.ifa)
+IVT_END(Reserved_5800)
+
+IVT_ENTRY(Debug, 0x5900)
+	CALL(trap, 29, cr.ifa)
+IVT_END(Debug)
+
+IVT_ENTRY(Unaligned_Reference, 0x5a00)
+	CALL(trap, 30, cr.ifa)
+IVT_END(Unaligned_Reference)
+
+IVT_ENTRY(Unsupported_Data_Reference, 0x5b00)
+	CALL(trap, 31, cr.ifa)
+IVT_END(Unsupported_Data_Reference)
+
+IVT_ENTRY(Floating_Point_Fault, 0x5c00)
+	CALL(trap, 32, cr.ifa)
+IVT_END(Floating_Point_Fault)
+
+IVT_ENTRY(Floating_Point_Trap, 0x5d00)
+	CALL(trap, 33, cr.ifa)
+IVT_END(Floating_Point_Trap)
+
+IVT_ENTRY(Lower_Privilege_Transfer_Trap, 0x5e00)
+	CALL(trap, 34, cr.ifa)
+IVT_END(Lower_Privilege_Transfer_Trap)
+
+IVT_ENTRY(Taken_Branch_Trap, 0x5f00)
+	CALL(trap, 35, cr.ifa)
+IVT_END(Taken_Branch_Trap)
+
+IVT_ENTRY(Single_Step_Trap, 0x6000)
+	CALL(trap, 36, cr.ifa)
+IVT_END(Single_Step_Trap)
+
+IVT_ENTRY(Reserved_6100, 0x6100)
+	CALL(trap, 37, cr.ifa)
+IVT_END(Reserved_6100)
+
+IVT_ENTRY(Reserved_6200, 0x6200)
+	CALL(trap, 38, cr.ifa)
+IVT_END(Reserved_6200)
+
+IVT_ENTRY(Reserved_6300, 0x6300)
+	CALL(trap, 39, cr.ifa)
+IVT_END(Reserved_6300)
+
+IVT_ENTRY(Reserved_6400, 0x6400)
+	CALL(trap, 40, cr.ifa)
+IVT_END(Reserved_6400)
+
+IVT_ENTRY(Reserved_6500, 0x6500)
+	CALL(trap, 41, cr.ifa)
+IVT_END(Reserved_6500)
+
+IVT_ENTRY(Reserved_6600, 0x6600)
+	CALL(trap, 42, cr.ifa)
+IVT_END(Reserved_6600)
+
+IVT_ENTRY(Reserved_6700, 0x6700)
+	CALL(trap, 43, cr.ifa)
+IVT_END(Reserved_6700)
+
+IVT_ENTRY(Reserved_6800, 0x6800)
+	CALL(trap, 44, cr.ifa)
+IVT_END(Reserved_6800)
+
+IVT_ENTRY(IA_32_Exception, 0x6900)
+	CALL(IA32_TRAP, 45, cr.ifa)
+IVT_END(IA_32_Exception)
+
+IVT_ENTRY(IA_32_Intercept, 0x6a00)
+	CALL(IA32_TRAP, 46, cr.iim)
+IVT_END(IA_32_Intercept)
+
+IVT_ENTRY(IA_32_Interrupt, 0x6b00)
+	CALL(IA32_TRAP, 47, cr.ifa)
+IVT_END(IA_32_Interrupt)
+
+IVT_ENTRY(Reserved_6c00, 0x6c00)
+	CALL(trap, 48, cr.ifa)
+IVT_END(Reserved_6c00)
+
+IVT_ENTRY(Reserved_6d00, 0x6d00)
+	CALL(trap, 49, cr.ifa)
+IVT_END(Reserved_6d00)
+
+IVT_ENTRY(Reserved_6e00, 0x6e00)
+	CALL(trap, 50, cr.ifa)
+IVT_END(Reserved_6e00)
+
+IVT_ENTRY(Reserved_6f00, 0x6f00)
+	CALL(trap, 51, cr.ifa)
+IVT_END(Reserved_6f00)
+
+IVT_ENTRY(Reserved_7000, 0x7000)
+	CALL(trap, 52, cr.ifa)
+IVT_END(Reserved_7000)
+
+IVT_ENTRY(Reserved_7100, 0x7100)
+	CALL(trap, 53, cr.ifa)
+IVT_END(Reserved_7100)
+
+IVT_ENTRY(Reserved_7200, 0x7200)
+	CALL(trap, 54, cr.ifa)
+IVT_END(Reserved_7200)
+
+IVT_ENTRY(Reserved_7300, 0x7300)
+	CALL(trap, 55, cr.ifa)
+IVT_END(Reserved_7300)
+
+IVT_ENTRY(Reserved_7400, 0x7400)
+	CALL(trap, 56, cr.ifa)
+IVT_END(Reserved_7400)
+
+IVT_ENTRY(Reserved_7500, 0x7500)
+	CALL(trap, 57, cr.ifa)
+IVT_END(Reserved_7500)
+
+IVT_ENTRY(Reserved_7600, 0x7600)
+	CALL(trap, 58, cr.ifa)
+IVT_END(Reserved_7600)
+
+IVT_ENTRY(Reserved_7700, 0x7700)
+	CALL(trap, 59, cr.ifa)
+IVT_END(Reserved_7700)
+
+IVT_ENTRY(Reserved_7800, 0x7800)
+	CALL(trap, 60, cr.ifa)
+IVT_END(Reserved_7800)
+
+IVT_ENTRY(Reserved_7900, 0x7900)
+	CALL(trap, 61, cr.ifa)
+IVT_END(Reserved_7900)
+
+IVT_ENTRY(Reserved_7a00, 0x7a00)
+	CALL(trap, 62, cr.ifa)
+IVT_END(Reserved_7a00)
+
+IVT_ENTRY(Reserved_7b00, 0x7b00)
+	CALL(trap, 63, cr.ifa)
+IVT_END(Reserved_7b00)
+
+IVT_ENTRY(Reserved_7c00, 0x7c00)
+	CALL(trap, 64, cr.ifa)
+IVT_END(Reserved_7c00)
+
+IVT_ENTRY(Reserved_7d00, 0x7d00)
+	CALL(trap, 65, cr.ifa)
+IVT_END(Reserved_7d00)
+
+IVT_ENTRY(Reserved_7e00, 0x7e00)
+	CALL(trap, 66, cr.ifa)
+IVT_END(Reserved_7e00)
+
+IVT_ENTRY(Reserved_7f00, 0x7f00)
+	CALL(trap, 67, cr.ifa)
+IVT_END(Reserved_7f00)


Property changes on: trunk/sys/ia64/ia64/exception.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/gdb_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/gdb_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/gdb_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,188 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/gdb_machdep.c 219808 2011-03-21 01:09:50Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/signal.h>
+
+#include <machine/gdb_machdep.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/reg.h>
+
+#include <gdb/gdb.h>
+#include <gdb/gdb_int.h>
+
+void *
+gdb_cpu_getreg(int regnum, size_t *regsz)
+{
+	static uint64_t synth;
+	uint64_t cfm;
+
+	*regsz = gdb_cpu_regsz(regnum);
+	switch (regnum) {
+	/* Registers 0-127: general registers. */
+	case 1:  return (&kdb_thrctx->pcb_special.gp);
+	case 4:  return (&kdb_thrctx->pcb_preserved.gr4);
+	case 5:  return (&kdb_thrctx->pcb_preserved.gr5);
+	case 6:  return (&kdb_thrctx->pcb_preserved.gr6);
+	case 7:  return (&kdb_thrctx->pcb_preserved.gr7);
+	case 12: return (&kdb_thrctx->pcb_special.sp);
+	case 13: return (&kdb_thrctx->pcb_special.tp);
+	/* Registers 128-255: floating-point registers. */
+	case 130: return (&kdb_thrctx->pcb_preserved_fp.fr2);
+	case 131: return (&kdb_thrctx->pcb_preserved_fp.fr3);
+	case 132: return (&kdb_thrctx->pcb_preserved_fp.fr4);
+	case 133: return (&kdb_thrctx->pcb_preserved_fp.fr5);
+	case 144: return (&kdb_thrctx->pcb_preserved_fp.fr16);
+	case 145: return (&kdb_thrctx->pcb_preserved_fp.fr17);
+	case 146: return (&kdb_thrctx->pcb_preserved_fp.fr18);
+	case 147: return (&kdb_thrctx->pcb_preserved_fp.fr19);
+	case 148: return (&kdb_thrctx->pcb_preserved_fp.fr20);
+	case 149: return (&kdb_thrctx->pcb_preserved_fp.fr21);
+	case 150: return (&kdb_thrctx->pcb_preserved_fp.fr22);
+	case 151: return (&kdb_thrctx->pcb_preserved_fp.fr23);
+	case 152: return (&kdb_thrctx->pcb_preserved_fp.fr24);
+	case 153: return (&kdb_thrctx->pcb_preserved_fp.fr25);
+	case 154: return (&kdb_thrctx->pcb_preserved_fp.fr26);
+	case 155: return (&kdb_thrctx->pcb_preserved_fp.fr27);
+	case 156: return (&kdb_thrctx->pcb_preserved_fp.fr28);
+	case 157: return (&kdb_thrctx->pcb_preserved_fp.fr29);
+	case 158: return (&kdb_thrctx->pcb_preserved_fp.fr30);
+	case 159: return (&kdb_thrctx->pcb_preserved_fp.fr31);
+	/* Registers 320-327: branch registers. */
+	case 320:
+		if (kdb_thrctx->pcb_special.__spare == ~0UL)
+			return (&kdb_thrctx->pcb_special.rp);
+		break;
+	case 321: return (&kdb_thrctx->pcb_preserved.br1);
+	case 322: return (&kdb_thrctx->pcb_preserved.br2);
+	case 323: return (&kdb_thrctx->pcb_preserved.br3);
+	case 324: return (&kdb_thrctx->pcb_preserved.br4);
+	case 325: return (&kdb_thrctx->pcb_preserved.br5);
+	/* Registers 328-333: misc. other registers. */
+	case 330: return (&kdb_thrctx->pcb_special.pr);
+	case 331:
+		if (kdb_thrctx->pcb_special.__spare == ~0UL) {
+			synth = kdb_thrctx->pcb_special.iip;
+			synth += (kdb_thrctx->pcb_special.psr >> 41) & 3;
+			return (&synth);
+		}
+		return (&kdb_thrctx->pcb_special.rp);
+	case 333:
+		if (kdb_thrctx->pcb_special.__spare == ~0UL)
+			return (&kdb_thrctx->pcb_special.cfm);
+		return (&kdb_thrctx->pcb_special.pfs);
+	/* Registers 334-461: application registers. */
+	case 350: return (&kdb_thrctx->pcb_special.rsc);
+	case 351: /* bsp */
+	case 352: /* bspstore. */
+		synth = kdb_thrctx->pcb_special.bspstore;
+		if (kdb_thrctx->pcb_special.__spare == ~0UL) {
+			synth += kdb_thrctx->pcb_special.ndirty;
+		} else {
+			cfm = kdb_thrctx->pcb_special.pfs;
+			synth = ia64_bsp_adjust(synth,
+			    IA64_CFM_SOF(cfm) - IA64_CFM_SOL(cfm));
+		}
+		return (&synth);
+	case 353: return (&kdb_thrctx->pcb_special.rnat);
+	case 370: return (&kdb_thrctx->pcb_special.unat);
+	case 374: return (&kdb_thrctx->pcb_special.fpsr);
+	case 398:
+		if (kdb_thrctx->pcb_special.__spare == ~0UL)
+			return (&kdb_thrctx->pcb_special.pfs);
+		break;
+	case 399: return (&kdb_thrctx->pcb_preserved.lc);
+	}
+	return (NULL);
+}
+
+void
+gdb_cpu_setreg(int regnum, void *val)
+{
+
+	switch (regnum) {
+	case GDB_REG_PC: break;
+	}
+}
+
+int
+gdb_cpu_signal(int vector, int dummy __unused)
+{
+
+	if (vector == IA64_VEC_BREAK || vector == IA64_VEC_SINGLE_STEP_TRAP)
+		return (SIGTRAP);
+	/* Add 100 so GDB won't translate the vector into signal names. */
+	return (vector + 100);
+}
+
+int
+gdb_cpu_query(void)
+{
+#if 0
+	uint64_t bspstore, *kstack;
+#endif
+	uintmax_t slot;
+
+	if (!gdb_rx_equal("Part:dirty:read::"))
+		return (0);
+
+	if (gdb_rx_varhex(&slot) < 0) {
+		gdb_tx_err(EINVAL);
+		return (-1);
+	}
+
+	gdb_tx_err(EINVAL);
+	return (-1);
+
+#if 0
+	/* slot is unsigned. No need to test for negative values. */
+	if (slot >= (kdb_frame->tf_special.ndirty >> 3)) {
+		return (-1);
+	}
+
+	/*
+	 * If the trapframe describes a kernel entry, bspstore holds
+	 * the address of the user backing store. Calculate the right
+	 * kernel stack address. See also ptrace_machdep().
+	 */
+	bspstore = kdb_frame->tf_special.bspstore;
+	kstack = (bspstore >= VM_MAXUSER_ADDRESS) ? (uint64_t*)bspstore :
+	    (uint64_t*)(kdb_thread->td_kstack + (bspstore & 0x1ffUL));
+	gdb_tx_begin('\0');
+	gdb_tx_mem((void*)(kstack + slot), 8);
+	gdb_tx_end();
+	return (1);
+#endif
+}


Property changes on: trunk/sys/ia64/ia64/gdb_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/genassym.c
===================================================================
--- trunk/sys/ia64/ia64/genassym.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/genassym.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,124 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1982, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/genassym.c 246715 2013-02-12 17:38:35Z marcel $
+ */
+
+#include "opt_compat.h"
+#include "opt_kstack_pages.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/assym.h>
+#include <sys/proc.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/errno.h>
+#include <sys/proc.h>
+#include <sys/mount.h>
+#include <sys/socket.h>
+#include <sys/resource.h>
+#include <sys/resourcevar.h>
+#include <sys/ucontext.h>
+#include <machine/frame.h>
+#include <machine/elf.h>
+#include <machine/pal.h>
+#include <machine/pcb.h>
+#include <sys/vmmeter.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <net/if.h>
+#include <netinet/in.h>
+
+#ifdef COMPAT_FREEBSD32
+ASSYM(COMPAT_FREEBSD32,	COMPAT_FREEBSD32);
+#endif
+
+ASSYM(DT_NULL,		DT_NULL);
+ASSYM(DT_RELA,		DT_RELA);
+ASSYM(DT_RELAENT,	DT_RELAENT);
+ASSYM(DT_RELASZ,	DT_RELASZ);
+ASSYM(DT_SYMTAB,	DT_SYMTAB);
+ASSYM(DT_SYMENT,	DT_SYMENT);
+
+ASSYM(EFAULT,		EFAULT);
+ASSYM(ENAMETOOLONG,	ENAMETOOLONG);
+ASSYM(ERESTART,		ERESTART);
+
+ASSYM(FRAME_SYSCALL,	FRAME_SYSCALL);
+
+ASSYM(IA64_PBVM_BASE,	IA64_PBVM_BASE);
+ASSYM(IA64_PBVM_PAGE_SHIFT, IA64_PBVM_PAGE_SHIFT);
+ASSYM(IA64_PBVM_PGTBL,	IA64_PBVM_PGTBL);
+ASSYM(IA64_PBVM_RR,	IA64_PBVM_RR);
+
+ASSYM(IA64_VM_MINKERN_REGION, IA64_VM_MINKERN_REGION);
+
+ASSYM(KSTACK_PAGES,	KSTACK_PAGES);
+
+ASSYM(MC_PRESERVED,	offsetof(mcontext_t, mc_preserved));
+ASSYM(MC_PRESERVED_FP,	offsetof(mcontext_t, mc_preserved_fp));
+ASSYM(MC_SPECIAL,	offsetof(mcontext_t, mc_special));
+ASSYM(MC_SPECIAL_BSPSTORE, offsetof(mcontext_t, mc_special.bspstore));
+ASSYM(MC_SPECIAL_RNAT,	offsetof(mcontext_t, mc_special.rnat));
+
+ASSYM(PAGE_SHIFT,	PAGE_SHIFT);
+ASSYM(PAGE_SIZE,	PAGE_SIZE);
+
+ASSYM(PC_CURRENT_PMAP,	offsetof(struct pcpu, pc_md.current_pmap));
+ASSYM(PC_IDLETHREAD,	offsetof(struct pcpu, pc_idlethread));
+
+ASSYM(PCB_CURRENT_PMAP,	offsetof(struct pcb, pcb_current_pmap));
+ASSYM(PCB_ONFAULT,	offsetof(struct pcb, pcb_onfault));
+ASSYM(PCB_SPECIAL_RP,	offsetof(struct pcb, pcb_special.rp));
+
+ASSYM(R_IA_64_DIR64LSB,	R_IA_64_DIR64LSB);
+ASSYM(R_IA_64_FPTR64LSB, R_IA_64_FPTR64LSB);
+ASSYM(R_IA_64_NONE,	R_IA_64_NONE);
+ASSYM(R_IA_64_REL64LSB,	R_IA_64_REL64LSB);
+
+ASSYM(SIZEOF_PCB,	sizeof(struct pcb));
+ASSYM(SIZEOF_SPECIAL,	sizeof(struct _special));
+ASSYM(SIZEOF_TRAPFRAME,	sizeof(struct trapframe));
+
+ASSYM(TD_FLAGS,		offsetof(struct thread, td_flags));
+ASSYM(TD_KSTACK,	offsetof(struct thread, td_kstack));
+ASSYM(TD_PCB,		offsetof(struct thread, td_pcb));
+
+ASSYM(TDF_ASTPENDING,	TDF_ASTPENDING);
+ASSYM(TDF_NEEDRESCHED,	TDF_NEEDRESCHED);
+
+ASSYM(UC_MCONTEXT,	offsetof(ucontext_t, uc_mcontext));
+
+ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);


Property changes on: trunk/sys/ia64/ia64/genassym.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/highfp.c
===================================================================
--- trunk/sys/ia64/ia64/highfp.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/highfp.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,180 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2009 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/highfp.c 271211 2014-09-06 22:17:54Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/smp.h>
+
+static struct mtx ia64_highfp_mtx;
+
+static void
+ia64_highfp_init(void *_)
+{
+	mtx_init(&ia64_highfp_mtx, "High FP lock", NULL, MTX_SPIN);
+}
+SYSINIT(ia64_highfp_init, SI_SUB_LOCK, SI_ORDER_ANY, ia64_highfp_init, NULL);
+
+#ifdef SMP
+static int
+ia64_highfp_ipi(struct pcpu *cpu)
+{
+	int error;
+
+	ipi_send(cpu, ia64_ipi_highfp);
+	error = msleep_spin(&cpu->pc_fpcurthread, &ia64_highfp_mtx,
+	    "High FP", 0);
+	return (error);
+}
+#endif
+
+int
+ia64_highfp_drop(struct thread *td)
+{
+	struct pcb *pcb;
+	struct pcpu *cpu;
+
+	pcb = td->td_pcb;
+
+	mtx_lock_spin(&ia64_highfp_mtx);
+	cpu = pcb->pcb_fpcpu;
+	if (cpu != NULL) {
+		KASSERT(cpu->pc_fpcurthread == td,
+		    ("cpu->pc_fpcurthread != td"));
+		td->td_frame->tf_special.psr |= IA64_PSR_DFH;
+		pcb->pcb_fpcpu = NULL;
+		cpu->pc_fpcurthread = NULL;
+	}
+	mtx_unlock_spin(&ia64_highfp_mtx);
+
+	return ((cpu != NULL) ? 1 : 0);
+}
+
+int
+ia64_highfp_enable(struct thread *td, struct trapframe *tf)
+{
+	struct pcb *pcb;
+	struct pcpu *cpu;
+	struct thread *td1;
+
+	pcb = td->td_pcb;
+
+	mtx_lock_spin(&ia64_highfp_mtx);
+	cpu = pcb->pcb_fpcpu;
+#ifdef SMP
+	if (cpu != NULL && cpu != pcpup) {
+		KASSERT(cpu->pc_fpcurthread == td,
+		    ("cpu->pc_fpcurthread != td"));
+		ia64_highfp_ipi(cpu);
+	}
+#endif
+	td1 = PCPU_GET(fpcurthread);
+	if (td1 != NULL && td1 != td) {
+		KASSERT(td1->td_pcb->pcb_fpcpu == pcpup,
+		    ("td1->td_pcb->pcb_fpcpu != pcpup"));
+		save_high_fp(&td1->td_pcb->pcb_high_fp);
+		td1->td_frame->tf_special.psr |= IA64_PSR_DFH;
+		td1->td_pcb->pcb_fpcpu = NULL;
+		PCPU_SET(fpcurthread, NULL);
+		td1 = NULL;
+	}
+	if (td1 == NULL) {
+		KASSERT(pcb->pcb_fpcpu == NULL, ("pcb->pcb_fpcpu != NULL"));
+		KASSERT(PCPU_GET(fpcurthread) == NULL,
+		    ("PCPU_GET(fpcurthread) != NULL"));
+		restore_high_fp(&pcb->pcb_high_fp);
+		PCPU_SET(fpcurthread, td);
+		pcb->pcb_fpcpu = pcpup;
+		tf->tf_special.psr &= ~IA64_PSR_MFH;
+	}
+	tf->tf_special.psr &= ~IA64_PSR_DFH;
+	mtx_unlock_spin(&ia64_highfp_mtx);
+
+	return ((td1 != NULL) ? 1 : 0);
+}
+
+int
+ia64_highfp_save(struct thread *td)
+{
+	struct pcb *pcb;
+	struct pcpu *cpu;
+
+	pcb = td->td_pcb;
+
+	mtx_lock_spin(&ia64_highfp_mtx);
+	cpu = pcb->pcb_fpcpu;
+#ifdef SMP
+	if (cpu != NULL && cpu != pcpup) {
+		KASSERT(cpu->pc_fpcurthread == td,
+		    ("cpu->pc_fpcurthread != td"));
+		ia64_highfp_ipi(cpu);
+	} else
+#endif
+	if (cpu != NULL) {
+		KASSERT(cpu->pc_fpcurthread == td,
+		    ("cpu->pc_fpcurthread != td"));
+		save_high_fp(&pcb->pcb_high_fp);
+		td->td_frame->tf_special.psr |= IA64_PSR_DFH;
+		pcb->pcb_fpcpu = NULL;
+		cpu->pc_fpcurthread = NULL;
+	}
+	mtx_unlock_spin(&ia64_highfp_mtx);
+
+	return ((cpu != NULL) ? 1 : 0);
+}
+
+#ifdef SMP
+int
+ia64_highfp_save_ipi(void)
+{
+	struct thread *td;
+
+	mtx_lock_spin(&ia64_highfp_mtx);
+	td = PCPU_GET(fpcurthread);
+	if (td != NULL) {
+		KASSERT(td->td_pcb->pcb_fpcpu == pcpup,
+		    ("td->td_pcb->pcb_fpcpu != pcpup"));
+		save_high_fp(&td->td_pcb->pcb_high_fp);
+		td->td_frame->tf_special.psr |= IA64_PSR_DFH;
+		td->td_pcb->pcb_fpcpu = NULL;
+		PCPU_SET(fpcurthread, NULL);
+	}
+	wakeup(PCPU_PTR(fpcurthread));
+	mtx_unlock_spin(&ia64_highfp_mtx);
+
+	return ((td != NULL) ? 1 : 0);
+}
+#endif


Property changes on: trunk/sys/ia64/ia64/highfp.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/in_cksum.c
===================================================================
--- trunk/sys/ia64/ia64/in_cksum.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/in_cksum.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,242 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/ia64/in_cksum.c 139790 2005-01-06 22:18:23Z imp $ */
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ * Copyright (c) 1996
+ *	Matt Thomas <matt at 3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)in_cksum.c	8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ *    (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x)  (x > 65535 ? x -= 65535 : x)
+#define REDUCE32							  \
+    {									  \
+	q_util.q = sum;							  \
+	sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3];	  \
+    }
+#define REDUCE16							  \
+    {									  \
+	q_util.q = sum;							  \
+	l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+	sum = l_util.s[0] + l_util.s[1];				  \
+	ADDCARRY(sum);							  \
+    }
+
+static const u_int32_t in_masks[] = {
+	/*0 bytes*/ /*1 byte*/	/*2 bytes*/ /*3 bytes*/
+	0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF,	/* offset 0 */
+	0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00,	/* offset 1 */
+	0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000,	/* offset 2 */
+	0x00000000, 0xFF000000, 0xFF000000, 0xFF000000,	/* offset 3 */
+};
+
+union l_util {
+	u_int16_t s[2];
+	u_int32_t l;
+};
+union q_util {
+	u_int16_t s[4];
+	u_int32_t l[2];
+	u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+	const u_int32_t *lw = (const u_int32_t *) buf;
+	u_int64_t sum = 0;
+	u_int64_t prefilled;
+	int offset;
+	union q_util q_util;
+
+	if ((3 & (long) lw) == 0 && len == 20) {
+	     sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+	     REDUCE32;
+	     return sum;
+	}
+
+	if ((offset = 3 & (long) lw) != 0) {
+		const u_int32_t *masks = in_masks + (offset << 2);
+		lw = (u_int32_t *) (((long) lw) - offset);
+		sum = *lw++ & masks[len >= 3 ? 3 : len];
+		len -= 4 - offset;
+		if (len <= 0) {
+			REDUCE32;
+			return sum;
+		}
+	}
+#if 0
+	/*
+	 * Force to cache line boundary.
+	 */
+	offset = 32 - (0x1f & (long) lw);
+	if (offset < 32 && len > offset) {
+		len -= offset;
+		if (4 & offset) {
+			sum += (u_int64_t) lw[0];
+			lw += 1;
+		}
+		if (8 & offset) {
+			sum += (u_int64_t) lw[0] + lw[1];
+			lw += 2;
+		}
+		if (16 & offset) {
+			sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+			lw += 4;
+		}
+	}
+#endif
+	/*
+	 * access prefilling to start load of next cache line.
+	 * then add current cache line
+	 * save result of prefilling for loop iteration.
+	 */
+	prefilled = lw[0];
+	while ((len -= 32) >= 4) {
+		u_int64_t prefilling = lw[8];
+		sum += prefilled + lw[1] + lw[2] + lw[3]
+			+ lw[4] + lw[5] + lw[6] + lw[7];
+		lw += 8;
+		prefilled = prefilling;
+	}
+	if (len >= 0) {
+		sum += prefilled + lw[1] + lw[2] + lw[3]
+			+ lw[4] + lw[5] + lw[6] + lw[7];
+		lw += 8;
+	} else {
+		len += 32;
+	}
+	while ((len -= 16) >= 0) {
+		sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+		lw += 4;
+	}
+	len += 16;
+	while ((len -= 4) >= 0) {
+		sum += (u_int64_t) *lw++;
+	}
+	len += 4;
+	if (len > 0)
+		sum += (u_int64_t) (in_masks[len] & *lw);
+	REDUCE32;
+	return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+	u_int64_t sum = a + b;
+
+	ADDCARRY(sum);
+	return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+	u_int64_t sum;
+	union q_util q_util;
+	union l_util l_util;
+		    
+	sum = (u_int64_t) a + b + c;
+	REDUCE16;
+	return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+	u_int64_t sum = 0;
+	int mlen = 0;
+	int clen = 0;
+	caddr_t addr;
+	union q_util q_util;
+	union l_util l_util;
+
+        len -= skip;
+        for (; skip && m; m = m->m_next) {
+                if (m->m_len > skip) {
+                        mlen = m->m_len - skip;
+			addr = mtod(m, caddr_t) + skip;
+                        goto skip_start;
+                } else {
+                        skip -= m->m_len;
+                }
+        }
+
+	for (; m && len; m = m->m_next) {
+		if (m->m_len == 0)
+			continue;
+		mlen = m->m_len;
+		addr = mtod(m, caddr_t);
+skip_start:
+		if (len < mlen)
+			mlen = len;
+		if ((clen ^ (long) addr) & 1)
+		    sum += in_cksumdata(addr, mlen) << 8;
+		else
+		    sum += in_cksumdata(addr, mlen);
+
+		clen += mlen;
+		len -= mlen;
+	}
+	REDUCE16;
+	return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+    u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+    union q_util q_util;
+    union l_util l_util;
+    REDUCE16;
+    return (~sum & 0xffff);
+}


Property changes on: trunk/sys/ia64/ia64/in_cksum.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/interrupt.c
===================================================================
--- trunk/sys/ia64/ia64/interrupt.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/interrupt.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,412 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2010-2011 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_ddb.h"
+#include "opt_xtrace.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/interrupt.c 268200 2014-07-02 23:47:43Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/vmmeter.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/malloc.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/syslog.h>
+
+#include <machine/cpu.h>
+#include <machine/fpu.h>
+#include <machine/frame.h>
+#include <machine/intr.h>
+#include <machine/intrcnt.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/reg.h>
+#include <machine/smp.h>
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+struct ia64_intr {
+	struct intr_event *event;	/* interrupt event */
+	volatile long *cntp;		/* interrupt counter */
+	struct sapic *sapic;
+	u_int	irq;
+};
+
+ia64_ihtype *ia64_handler[IA64_NXIVS];
+
+static enum ia64_xiv_use ia64_xiv[IA64_NXIVS];
+static struct ia64_intr *ia64_intrs[IA64_NXIVS];
+
+static ia64_ihtype ia64_ih_invalid;
+static ia64_ihtype ia64_ih_irq;
+
+void
+ia64_xiv_init(void)
+{
+	u_int xiv;
+
+	for (xiv = 0; xiv < IA64_NXIVS; xiv++) {
+		ia64_handler[xiv] = ia64_ih_invalid;
+		ia64_xiv[xiv] = IA64_XIV_FREE;
+		ia64_intrs[xiv] = NULL;
+	}
+	(void)ia64_xiv_reserve(15, IA64_XIV_ARCH, NULL);
+}
+
+int
+ia64_xiv_free(u_int xiv, enum ia64_xiv_use what)
+{
+
+	if (xiv >= IA64_NXIVS)
+		return (EINVAL);
+	if (what == IA64_XIV_FREE || what == IA64_XIV_ARCH)
+		return (EINVAL);
+	if (ia64_xiv[xiv] != what)
+		return (ENXIO);
+	ia64_xiv[xiv] = IA64_XIV_FREE;
+	ia64_handler[xiv] = ia64_ih_invalid;
+	return (0);
+}
+
+int
+ia64_xiv_reserve(u_int xiv, enum ia64_xiv_use what, ia64_ihtype ih)
+{
+
+	if (xiv >= IA64_NXIVS)
+		return (EINVAL);
+	if (what == IA64_XIV_FREE)
+		return (EINVAL);
+	if (ia64_xiv[xiv] != IA64_XIV_FREE)
+		return (EBUSY);
+	ia64_xiv[xiv] = what;
+	ia64_handler[xiv] = (ih == NULL) ? ia64_ih_invalid: ih;
+	if (bootverbose)
+		printf("XIV %u: use=%u, IH=%p\n", xiv, what, ih);
+	return (0);
+}
+
+u_int
+ia64_xiv_alloc(u_int prio, enum ia64_xiv_use what, ia64_ihtype ih)
+{
+	u_int hwprio;
+	u_int xiv0, xiv;
+
+	hwprio = prio >> 2;
+	if (hwprio > IA64_MAX_HWPRIO)
+		hwprio = IA64_MAX_HWPRIO;
+
+	xiv0 = IA64_NXIVS - (hwprio + 1) * 16;
+
+	KASSERT(xiv0 >= IA64_MIN_XIV, ("%s: min XIV", __func__));
+	KASSERT(xiv0 < IA64_NXIVS, ("%s: max XIV", __func__));
+
+	xiv = xiv0;
+	while (xiv < IA64_NXIVS && ia64_xiv_reserve(xiv, what, ih))
+		xiv++;
+
+	if (xiv < IA64_NXIVS)
+		return (xiv);
+
+	xiv = xiv0;
+	while (xiv >= IA64_MIN_XIV && ia64_xiv_reserve(xiv, what, ih))
+		xiv--;
+
+	return ((xiv >= IA64_MIN_XIV) ? xiv : 0);
+}
+
+static void
+ia64_intr_eoi(void *arg)
+{
+	u_int xiv = (uintptr_t)arg;
+	struct ia64_intr *i;
+
+	i = ia64_intrs[xiv];
+	KASSERT(i != NULL, ("%s", __func__));
+	sapic_eoi(i->sapic, xiv);
+}
+
+static void
+ia64_intr_mask(void *arg)
+{
+	u_int xiv = (uintptr_t)arg;
+	struct ia64_intr *i;
+
+	i = ia64_intrs[xiv];
+	KASSERT(i != NULL, ("%s", __func__));
+	sapic_mask(i->sapic, i->irq);
+	sapic_eoi(i->sapic, xiv);
+}
+
+static void
+ia64_intr_unmask(void *arg)
+{
+	u_int xiv = (uintptr_t)arg;
+	struct ia64_intr *i;
+
+	i = ia64_intrs[xiv];
+	KASSERT(i != NULL, ("%s", __func__));
+	sapic_unmask(i->sapic, i->irq);
+}
+
+int
+ia64_setup_intr(const char *name, int irq, driver_filter_t filter,
+    driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
+{
+	struct ia64_intr *i;
+	struct sapic *sa;
+	char *intrname;
+	u_int prio, xiv;
+	int error;
+
+	prio = intr_priority(flags);
+	if (prio > PRI_MAX_ITHD)
+		return (EINVAL);
+
+	/* XXX lock */
+
+	/* Get the I/O SAPIC and XIV that corresponds to the IRQ. */
+	sa = sapic_lookup(irq, &xiv);
+	if (sa == NULL) {
+		/* XXX unlock */
+		return (EINVAL);
+	}
+
+	if (xiv == 0) {
+		/* XXX unlock */
+		i = malloc(sizeof(struct ia64_intr), M_DEVBUF,
+		    M_ZERO | M_WAITOK);
+		/* XXX lock */
+		sa = sapic_lookup(irq, &xiv);
+		KASSERT(sa != NULL, ("sapic_lookup"));
+		if (xiv != 0)
+			free(i, M_DEVBUF);
+	}
+
+	/*
+	 * If the IRQ has no XIV assigned to it yet, assign one based
+	 * on the priority.
+	 */
+	if (xiv == 0) {
+		xiv = ia64_xiv_alloc(prio, IA64_XIV_IRQ, ia64_ih_irq);
+		if (xiv == 0) {
+			/* XXX unlock */
+			free(i, M_DEVBUF);
+			return (ENOSPC);
+		}
+
+		error = intr_event_create(&i->event, (void *)(uintptr_t)xiv,
+		    0, irq, ia64_intr_mask, ia64_intr_unmask, ia64_intr_eoi,
+		    NULL, "irq%u:", irq);
+		if (error) {
+			ia64_xiv_free(xiv, IA64_XIV_IRQ);
+			/* XXX unlock */
+			free(i, M_DEVBUF);
+			return (error);
+		}
+
+		i->sapic = sa;
+		i->irq = irq;
+		i->cntp = intrcnt + xiv;
+		ia64_intrs[xiv] = i;
+
+		/* XXX unlock */
+
+		sapic_enable(sa, irq, xiv);
+
+		if (name != NULL && *name != '\0') {
+			/* XXX needs abstraction. Too error prone. */
+			intrname = intrnames + xiv * INTRNAME_LEN;
+			memset(intrname, ' ', INTRNAME_LEN - 1);
+			bcopy(name, intrname, strlen(name));
+		}
+	} else {
+		i = ia64_intrs[xiv];
+		/* XXX unlock */
+	}
+
+	KASSERT(i != NULL, ("XIV mapping bug"));
+
+	error = intr_event_add_handler(i->event, name, filter, handler, arg,
+	    prio, flags, cookiep);
+	return (error);
+}
+
+int
+ia64_teardown_intr(void *cookie)
+{
+
+	return (intr_event_remove_handler(cookie));
+}
+
+void
+ia64_bind_intr(void)
+{
+	struct ia64_intr *i;
+	struct pcpu *pc;
+	u_int xiv;
+	int cpu;
+
+	cpu = MAXCPU;
+	for (xiv = IA64_NXIVS - 1; xiv >= IA64_MIN_XIV; xiv--) {
+		if (ia64_xiv[xiv] != IA64_XIV_IRQ)
+			continue;
+		i = ia64_intrs[xiv];
+		do {
+			cpu = (cpu == 0) ? MAXCPU - 1 : cpu - 1;
+			pc = cpuid_to_pcpu[cpu];
+		} while (pc == NULL || !pc->pc_md.awake);
+		sapic_bind_intr(i->irq, pc);
+	}
+}
+
+/*
+ * Interrupt handlers.
+ */
+
+void
+ia64_handle_intr(struct trapframe *tf)
+{
+	struct thread *td;
+	u_int xiv;
+
+	td = curthread;
+	ia64_set_fpsr(IA64_FPSR_DEFAULT);
+
+#ifdef XTRACE
+	ia64_xtrace_save();
+#endif
+
+	PCPU_INC(cnt.v_intr);
+
+	xiv = ia64_get_ivr();
+	ia64_srlz_d();
+	if (xiv == 15) {
+		PCPU_INC(md.stats.pcs_nstrays);
+		goto out;
+	}
+
+	critical_enter();
+	do {
+		CTR3(KTR_INTR, "INTR: XIV=%u, #%u: frame=%p", xiv,
+		    PCPU_GET(cnt.v_intr), tf);
+		if (!(ia64_handler[xiv])(td, xiv, tf)) {
+			ia64_set_eoi(0);
+			ia64_srlz_d();
+		}
+		xiv = ia64_get_ivr();
+		ia64_srlz_d();
+	} while (xiv != 15);
+	critical_exit();
+
+ out:
+	if (TRAPF_USERMODE(tf)) {
+		while (td->td_flags & (TDF_ASTPENDING|TDF_NEEDRESCHED)) {
+			ia64_enable_intr();
+			ast(tf);
+			ia64_disable_intr();
+		}
+	}
+}
+
+static u_int
+ia64_ih_invalid(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+	panic("invalid XIV: %u", xiv);
+	return (0);
+}
+
+static u_int
+ia64_ih_irq(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+	struct ia64_intr *i;
+	struct intr_event *ie;			/* our interrupt event */
+
+	PCPU_INC(md.stats.pcs_nhwints);
+
+	/* Find the interrupt thread for this XIV. */
+	i = ia64_intrs[xiv];
+	KASSERT(i != NULL, ("%s: unassigned XIV", __func__));
+
+	(*i->cntp)++;
+
+	ie = i->event;
+	KASSERT(ie != NULL, ("%s: interrupt without event", __func__));
+
+	if (intr_event_handle(ie, tf) != 0) {
+		ia64_intr_mask((void *)(uintptr_t)xiv);
+		log(LOG_ERR, "stray irq%u\n", i->irq);
+	}
+
+	return (0);
+}
+
+#ifdef DDB
+
+static void
+db_print_xiv(u_int xiv, int always)
+{
+	struct ia64_intr *i;
+
+	i = ia64_intrs[xiv];
+	if (i != NULL) {
+		db_printf("XIV %u (%p): ", xiv, i);
+		sapic_print(i->sapic, i->irq);
+	} else if (always)
+		db_printf("XIV %u: unassigned\n", xiv);
+}
+
+DB_SHOW_COMMAND(xiv, db_show_xiv)
+{
+	u_int xiv;
+
+	if (have_addr) {
+		xiv = ((addr >> 4) % 16) * 10 + (addr % 16);
+		if (xiv >= IA64_NXIVS)
+			db_printf("error: XIV %u not in range [0..%u]\n",
+			    xiv, IA64_NXIVS - 1);
+		else
+			db_print_xiv(xiv, 1);
+	} else {
+		for (xiv = 0; xiv < IA64_NXIVS; xiv++)
+			db_print_xiv(xiv, 0);
+	}
+}
+
+#endif


Property changes on: trunk/sys/ia64/ia64/interrupt.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/iodev_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/iodev_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/iodev_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,209 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2010 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/iodev_machdep.c 270296 2014-08-21 19:51:07Z emaste $");
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/efi.h>
+#include <sys/fcntl.h>
+#include <sys/ioccom.h>
+#include <sys/malloc.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+#include <machine/iodev.h>
+
+static int iodev_efivar_getvar(struct iodev_efivar_req *req);
+static int iodev_efivar_nextname(struct iodev_efivar_req *req);
+static int iodev_efivar_setvar(struct iodev_efivar_req *req);
+
+/* ARGSUSED */
+int
+iodev_open(struct thread *td __unused)
+{
+
+	return (0);
+}
+
+/* ARGSUSED */
+int
+iodev_close(struct thread *td __unused)
+{
+
+	return (0);
+}
+
+int
+iodev_ioctl(u_long cmd, caddr_t data)
+{
+	struct iodev_efivar_req *efivar_req;
+	int error;
+
+	switch (cmd) {
+	case IODEV_EFIVAR:
+		efivar_req = (struct iodev_efivar_req *)data;
+		efivar_req->result = 0;		/* So it's well-defined */
+		switch (efivar_req->access) {
+		case IODEV_EFIVAR_GETVAR:
+			error = iodev_efivar_getvar(efivar_req);
+			break;
+		case IODEV_EFIVAR_NEXTNAME:
+			error = iodev_efivar_nextname(efivar_req);
+			break;
+		case IODEV_EFIVAR_SETVAR:
+			error = iodev_efivar_setvar(efivar_req);
+			break;
+		default:
+			error = EINVAL;
+			break;
+		}
+		break;
+	default:
+		error = ENOIOCTL;
+	}
+
+	return (error);
+}
+
+static int
+iodev_efivar_getvar(struct iodev_efivar_req *req)
+{
+	void *data;
+	efi_char *name;
+	int error;
+
+	if ((req->namesize & 1) != 0 || req->namesize < 4)
+		return (EINVAL);
+	if (req->datasize == 0)
+		return (EINVAL);
+
+	/*
+	 * Pre-zero the allocated memory and don't copy the last 2 bytes
+	 * of the name. That should be the closing nul character (ucs-2)
+	 * and if not, then we ensured a nul-terminating string. This is
+	 * to protect the firmware and thus ourselves.
+	 */
+	name = malloc(req->namesize, M_TEMP, M_WAITOK | M_ZERO);
+	error = copyin(req->name, name, req->namesize - 2);
+	if (error) {
+		free(name, M_TEMP);
+		return (error);
+	}
+
+	data = malloc(req->datasize, M_TEMP, M_WAITOK);
+	error = efi_var_get(name, &req->vendor, &req->attrib, &req->datasize,
+	    data);
+	if (error == EOVERFLOW || error == ENOENT) {
+		req->result = error;
+		error = 0;
+	}
+	if (!error && !req->result)
+		error = copyout(data, req->data, req->datasize);
+
+	free(data, M_TEMP);
+	free(name, M_TEMP);
+	return (error);
+}
+
+static int 
+iodev_efivar_nextname(struct iodev_efivar_req *req) 
+{
+	efi_char *name;
+	int error;
+
+	/* Enforce a reasonable minimum size of the name buffer. */
+	if (req->namesize < 4)
+		return (EINVAL);
+
+	name = malloc(req->namesize, M_TEMP, M_WAITOK);
+	error = copyin(req->name, name, req->namesize);
+	if (error) {
+		free(name, M_TEMP);
+		return (error);
+	}
+
+	error = efi_var_nextname(&req->namesize, name, &req->vendor);
+	if (error == EOVERFLOW || error == ENOENT) {
+		req->result = error;
+		error = 0;
+	}
+	if (!error && !req->result)
+		error = copyout(name, req->name, req->namesize);
+
+	free(name, M_TEMP);
+	return (error);
+}
+
+static int 
+iodev_efivar_setvar(struct iodev_efivar_req *req) 
+{
+	void *data;
+	efi_char *name;
+	int error;
+
+	if ((req->namesize & 1) != 0 || req->namesize < 4)
+		return (EINVAL);
+
+	/*
+	 * Pre-zero the allocated memory and don't copy the last 2 bytes
+	 * of the name. That should be the closing nul character (ucs-2)
+	 * and if not, then we ensured a nul-terminating string. This is
+	 * to protect the firmware and thus ourselves.
+	 */
+	name = malloc(req->namesize, M_TEMP, M_WAITOK | M_ZERO);
+	error = copyin(req->name, name, req->namesize - 2);
+	if (error) {
+		free(name, M_TEMP);
+		return (error);
+	}
+
+	if (req->datasize) {
+		data = malloc(req->datasize, M_TEMP, M_WAITOK);
+		error = copyin(req->data, data, req->datasize);
+		if (error) {
+			free(data, M_TEMP);
+			free(name, M_TEMP);
+			return (error);
+		}
+	} else
+		data = NULL;
+
+	error = efi_var_set(name, &req->vendor, req->attrib, req->datasize,
+	    data);
+	if (error == EAGAIN || error == ENOENT) {
+		req->result = error;
+		error = 0;
+	}
+
+	free(data, M_TEMP);
+	free(name, M_TEMP);
+	return (error);
+}


Property changes on: trunk/sys/ia64/ia64/iodev_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/locore.S
===================================================================
--- trunk/sys/ia64/ia64/locore.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/locore.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,362 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001-2011 Marcel Moolenaar
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/locore.S 224216 2011-07-19 12:41:57Z attilio $
+ */
+
+#include <machine/asm.h>
+#include <machine/ia64_cpu.h>
+#include <machine/intrcnt.h>
+#include <machine/pte.h>
+#include <assym.s>
+
+/*
+ * The Altix 350 needs more than the architected 16KB (8KB for stack and
+ * 8KB for RSE backing store) when calling EFI to setup virtual mode.
+ */
+#define	FW_STACK_SIZE	3*PAGE_SIZE
+
+	.section .ivt.data, "aw"
+	.align	PAGE_SIZE
+	.global	kstack
+kstack:	.space	FW_STACK_SIZE
+	.global	kstack_top
+kstack_top:
+
+	.text
+
+/*
+ * Not really a leaf but we can't return.
+ * The EFI loader passes the physical address of the bootinfo block in
+ * register r8.
+ */
+ENTRY_NOPROFILE(__start, 1)
+	.prologue
+	.save	rp,r0
+	.body
+{	.mlx
+	mov	ar.rsc=0
+	movl	r16=ia64_vector_table	// set up IVT early
+	;;
+}
+{	.mlx
+	mov	cr.iva=r16
+	movl	r16=kstack
+	;;
+}
+{	.mmi
+	srlz.i
+	;;
+	ssm	IA64_PSR_DFH
+	mov	r17=FW_STACK_SIZE-16
+	;;
+}
+{	.mlx
+	add	sp=r16,r17		// proc0's stack
+	movl	gp=__gp			// find kernel globals
+	;;
+}
+{	.mlx
+	mov	ar.bspstore=r16		// switch backing store
+	movl	r16=bootinfo
+	;;
+}
+{	.mmi
+	st8	[r16]=r8		// save the PA of the bootinfo block
+	loadrs				// invalidate regs
+	mov	r17=IA64_DCR_DEFAULT
+	;;
+}
+{	.mmi
+	mov	cr.dcr=r17
+	mov	ar.rsc=3		// turn rse back on
+	nop	0
+	;;
+}
+{	.mmi
+	srlz.d
+	alloc	r16=ar.pfs,0,0,1,0
+	mov	out0=r0			// we are linked at the right address 
+	;;				// we just need to process fptrs
+}
+{	.mib
+	nop	0
+	nop	0
+	br.call.sptk.many rp=_reloc
+	;;
+}
+{	.mib
+	nop	0
+	nop	0
+	br.call.sptk.many rp=ia64_init
+	;;
+}
+	// We have the new bspstore in r8 and the new sp in r9.
+	// Switch onto the new stack and call mi_startup().
+{	.mmi
+	mov	ar.rsc = 0
+	;;
+	mov	ar.bspstore = r8
+	mov	sp = r9
+	;;
+}
+{	.mmi
+	loadrs
+	;;
+	mov	ar.rsc = 3
+	nop	0
+	;;
+}
+{	.mib
+	nop	0
+	nop	0
+	br.call.sptk.many rp=mi_startup
+	;;
+}
+	/* NOTREACHED */
+1:	br.cond.sptk.few 1b
+END(__start)
+
+/*
+ * fork_trampoline()
+ *
+ * Arrange for a function to be invoked neatly, after a cpu_switch().
+ *
+ * Invokes fork_exit() passing in three arguments: a callout function, an
+ * argument to the callout, and a trapframe pointer.  For child processes
+ * returning from fork(2), the argument is a pointer to the child process.
+ *
+ * The callout function and its argument is in the trapframe in scratch
+ * registers r2 and r3.
+ */
+ENTRY(fork_trampoline, 0)
+	.prologue
+	.save	rp,r0
+	.body
+{	.mmi
+	alloc		r14=ar.pfs,0,0,3,0
+	add		r15=32+SIZEOF_SPECIAL+8,sp
+	add		r16=32+SIZEOF_SPECIAL+16,sp
+	;;
+}
+{	.mmi
+	ld8		out0=[r15]
+	ld8		out1=[r16]
+	nop		0
+}
+{	.mib
+	add		out2=16,sp
+	nop		0
+	br.call.sptk	rp=fork_exit
+	;;
+}
+	// If we get back here, it means we're a user space process that's
+	// the immediate result of fork(2).
+	.global		enter_userland
+	.type		enter_userland, @function
+enter_userland:
+{	.mib
+	nop		0
+	nop		0
+	br.sptk		epc_syscall_return
+	;;
+}
+END(fork_trampoline)
+
+/*
+ * Create a default interrupt name table. The first entry (vector 0) is
+ * hardwaired to the clock interrupt.
+ */
+	.data
+	.align 8
+EXPORT(intrnames)
+	.ascii "clock"
+	.fill INTRNAME_LEN - 5 - 1, 1, ' '
+	.byte 0
+intr_n = 1
+.rept INTRCNT_COUNT - 1
+	.ascii "#"
+	.byte intr_n / 100 + '0'
+	.byte (intr_n % 100) / 10 + '0'
+	.byte intr_n % 10 + '0'
+	.fill INTRNAME_LEN - 1 - 3 - 1, 1, ' '
+	.byte 0
+	intr_n = intr_n + 1
+.endr
+EXPORT(sintrnames)
+	data8 INTRCNT_COUNT * INTRNAME_LEN
+
+	.align 8
+EXPORT(intrcnt)
+	.fill INTRCNT_COUNT, 8, 0
+EXPORT(sintrcnt)
+	data8 INTRCNT_COUNT * 8
+
+	.text
+	// in0:	image base
+STATIC_ENTRY(_reloc, 1)
+	alloc	loc0=ar.pfs,1,2,0,0
+	mov	loc1=rp
+	;; 
+	movl	r15=@gprel(_DYNAMIC)	// find _DYNAMIC etc.
+	movl	r2=@gprel(fptr_storage)
+	movl	r3=@gprel(fptr_storage_end)
+	;;
+	add	r15=r15,gp		// relocate _DYNAMIC etc.
+	add	r2=r2,gp
+	add	r3=r3,gp
+	;;
+1:	ld8	r16=[r15],8		// read r15->d_tag
+	;;
+	ld8	r17=[r15],8		// and r15->d_val
+	;;
+	cmp.eq	p6,p0=DT_NULL,r16	// done?
+(p6)	br.cond.dpnt.few 2f
+	;; 
+	cmp.eq	p6,p0=DT_RELA,r16
+	;; 
+(p6)	add	r18=r17,in0		// found rela section
+	;; 
+	cmp.eq	p6,p0=DT_RELASZ,r16
+	;; 
+(p6)	mov	r19=r17			// found rela size
+	;; 
+	cmp.eq	p6,p0=DT_SYMTAB,r16
+	;; 
+(p6)	add	r20=r17,in0		// found symbol table
+	;; 
+(p6)	setf.sig f8=r20
+	;; 
+	cmp.eq	p6,p0=DT_SYMENT,r16
+	;; 
+(p6)	setf.sig f9=r17			// found symbol entry size
+	;; 
+	cmp.eq	p6,p0=DT_RELAENT,r16
+	;; 
+(p6)	mov	r22=r17			// found rela entry size
+	;;
+	br.sptk.few 1b
+	
+2:	
+	ld8	r15=[r18],8		// read r_offset
+	;; 
+	ld8	r16=[r18],8		// read r_info
+	add	r15=r15,in0		// relocate r_offset
+	;;
+	ld8	r17=[r18],8		// read r_addend
+	sub	r19=r19,r22		// update relasz
+
+	extr.u	r23=r16,0,32		// ELF64_R_TYPE(r16)
+	;;
+	cmp.eq	p6,p0=R_IA_64_NONE,r23
+(p6)	br.cond.dpnt.few 3f
+	;;
+	cmp.eq	p6,p0=R_IA_64_REL64LSB,r23
+(p6)	br.cond.dptk.few 4f
+	;;
+
+	extr.u	r16=r16,32,32		// ELF64_R_SYM(r16)
+	;; 
+	setf.sig f10=r16		// so we can multiply
+	;;
+	xma.lu	f10=f10,f9,f8		// f10=symtab + r_sym*syment
+	;;
+	getf.sig r16=f10
+	;;
+	add	r16=8,r16		// address of st_value
+	;;
+	ld8	r16=[r16]		// read symbol value
+	;;
+	add	r16=r16,in0		// relocate symbol value
+	;;
+
+	cmp.eq	p6,p0=R_IA_64_DIR64LSB,r23
+(p6)	br.cond.dptk.few 5f
+	;;
+	cmp.eq	p6,p0=R_IA_64_FPTR64LSB,r23
+(p6)	br.cond.dptk.few 6f
+	;;
+
+3:
+	cmp.ltu	p6,p0=0,r19		// more?
+(p6)	br.cond.dptk.few 2b		// loop
+	mov	r8=0			// success return value
+	br.cond.sptk.few 9f		// done
+
+4:
+	add	r16=in0,r17		// BD + A
+	;;
+	st8	[r15]=r16		// word64 (LSB)
+	br.cond.sptk.few 3b
+
+5:
+	add	r16=r16,r17		// S + A
+	;;
+	st8	[r15]=r16		// word64 (LSB)
+	br.cond.sptk.few 3b
+
+6:
+	movl	r17=@gprel(fptr_storage)
+	;;
+	add	r17=r17,gp		// start of fptrs
+	;;
+7:	cmp.geu	p6,p0=r17,r2		// end of fptrs?
+(p6)	br.cond.dpnt.few 8f		// can't find existing fptr
+	ld8	r20=[r17]		// read function from fptr
+	;;
+	cmp.eq	p6,p0=r16,r20		// same function?
+	;;
+(p6)	st8	[r15]=r17		// reuse fptr
+(p6)	br.cond.sptk.few 3b		// done
+	add	r17=16,r17		// next fptr
+	br.cond.sptk.few 7b
+
+8:					// allocate new fptr
+	mov	r8=1			// failure return value
+	cmp.geu	p6,p0=r2,r3		// space left?
+(p6)	br.cond.dpnt.few 9f		// bail out
+
+	st8	[r15]=r2		// install fptr
+	st8	[r2]=r16,8		// write fptr address
+	;;
+	st8	[r2]=gp,8		// write fptr gp
+	br.cond.sptk.few 3b
+
+9:
+	mov	ar.pfs=loc0
+	mov	rp=loc1
+	;;
+	br.ret.sptk.few rp
+
+END(_reloc)
+
+	.data
+	.align	16
+	.global fptr_storage
+fptr_storage:
+	.space	4096*16			// XXX
+fptr_storage_end:


Property changes on: trunk/sys/ia64/ia64/locore.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/machdep.c
===================================================================
--- trunk/sys/ia64/ia64/machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,1543 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003,2004 Marcel Moolenaar
+ * Copyright (c) 2000,2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/machdep.c 278412 2015-02-08 22:17:20Z peter $");
+
+#include "opt_compat.h"
+#include "opt_ddb.h"
+#include "opt_kstack_pages.h"
+#include "opt_sched.h"
+#include "opt_xtrace.h"
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/cons.h>
+#include <sys/cpu.h>
+#include <sys/efi.h>
+#include <sys/eventhandler.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <sys/pcpu.h>
+#include <sys/ptrace.h>
+#include <sys/random.h>
+#include <sys/reboot.h>
+#include <sys/rwlock.h>
+#include <sys/sched.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysctl.h>
+#include <sys/sysproto.h>
+#include <sys/ucontext.h>
+#include <sys/uio.h>
+#include <sys/uuid.h>
+#include <sys/vmmeter.h>
+#include <sys/vnode.h>
+
+#include <ddb/ddb.h>
+
+#include <net/netisr.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+
+#include <machine/bootinfo.h>
+#include <machine/cpu.h>
+#include <machine/elf.h>
+#include <machine/fpu.h>
+#include <machine/intr.h>
+#include <machine/kdb.h>
+#include <machine/mca.h>
+#include <machine/md_var.h>
+#include <machine/pal.h>
+#include <machine/pcb.h>
+#include <machine/reg.h>
+#include <machine/sal.h>
+#include <machine/sigframe.h>
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+#include <machine/unwind.h>
+#include <machine/vmparam.h>
+
+/*
+ * For atomicity reasons, we demand that pc_curthread is the first
+ * field in the struct pcpu. It allows us to read the pointer with
+ * a single atomic instruction:
+ *	ld8 %curthread = [r13]
+ * Otherwise we would first have to calculate the load address and
+ * store the result in a temporary register and that for the load:
+ *	add %temp = %offsetof(struct pcpu), r13
+ *	ld8 %curthread = [%temp]
+ * A context switch inbetween the add and the ld8 could have the
+ * thread migrate to a different core. In that case,  %curthread
+ * would be the thread running on the original core and not actually
+ * the current thread.
+ */
+CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
+
+static SYSCTL_NODE(_hw, OID_AUTO, freq, CTLFLAG_RD, 0, "");
+static SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RD, 0, "");
+
+static u_int bus_freq;
+SYSCTL_UINT(_hw_freq, OID_AUTO, bus, CTLFLAG_RD, &bus_freq, 0,
+    "Bus clock frequency");
+
+static u_int cpu_freq;
+SYSCTL_UINT(_hw_freq, OID_AUTO, cpu, CTLFLAG_RD, &cpu_freq, 0,
+    "CPU clock frequency");
+
+static u_int itc_freq;
+SYSCTL_UINT(_hw_freq, OID_AUTO, itc, CTLFLAG_RD, &itc_freq, 0,
+    "ITC frequency");
+
+int cold = 1;
+int unmapped_buf_allowed = 0;
+
+struct bootinfo *bootinfo;
+
+struct pcpu pcpu0;
+
+extern u_int64_t kernel_text[], _end[];
+
+extern u_int64_t ia64_gateway_page[];
+extern u_int64_t break_sigtramp[];
+extern u_int64_t epc_sigtramp[];
+
+struct fpswa_iface *fpswa_iface;
+
+vm_size_t ia64_pal_size;
+vm_paddr_t ia64_pal_base;
+vm_offset_t ia64_port_base;
+
+u_int64_t ia64_lapic_addr = PAL_PIB_DEFAULT_ADDR;
+
+struct ia64_pib *ia64_pib;
+
+static int ia64_sync_icache_needed;
+
+char machine[] = MACHINE;
+SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
+
+static char cpu_model[64];
+SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
+    "The CPU model name");
+
+static char cpu_family[64];
+SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
+    "The CPU family name");
+
+#ifdef DDB
+extern vm_offset_t ksym_start, ksym_end;
+#endif
+
+struct msgbuf *msgbufp = NULL;
+
+/* Other subsystems (e.g., ACPI) can hook this later. */
+void (*cpu_idle_hook)(sbintime_t) = NULL;
+
+struct kva_md_info kmi;
+
+static void
+identifycpu(void)
+{
+	char vendor[17];
+	char *family_name, *model_name;
+	u_int64_t features, tmp;
+	int number, revision, model, family, archrev;
+
+	/*
+	 * Assumes little-endian.
+	 */
+	*(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
+	*(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
+	vendor[16] = '\0';
+
+	tmp = ia64_get_cpuid(3);
+	number = (tmp >> 0) & 0xff;
+	revision = (tmp >> 8) & 0xff;
+	model = (tmp >> 16) & 0xff;
+	family = (tmp >> 24) & 0xff;
+	archrev = (tmp >> 32) & 0xff;
+
+	family_name = model_name = "unknown";
+	switch (family) {
+	case 0x07:
+		family_name = "Itanium";
+		model_name = "Merced";
+		break;
+	case 0x1f:
+		family_name = "Itanium 2";
+		switch (model) {
+		case 0x00:
+			model_name = "McKinley";
+			break;
+		case 0x01:
+			/*
+			 * Deerfield is a low-voltage variant based on the
+			 * Madison core. We need circumstantial evidence
+			 * (i.e. the clock frequency) to identify those.
+			 * Allow for roughly 1% error margin.
+			 */
+			if (cpu_freq > 990 && cpu_freq < 1010)
+				model_name = "Deerfield";
+			else
+				model_name = "Madison";
+			break;
+		case 0x02:
+			model_name = "Madison II";
+			break;
+		}
+		break;
+	case 0x20:
+		ia64_sync_icache_needed = 1;
+
+		family_name = "Itanium 2";
+		switch (model) {
+		case 0x00:
+			model_name = "Montecito";
+			break;
+		case 0x01:
+			model_name = "Montvale";
+			break;
+		}
+		break;
+	}
+	snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
+	snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
+
+	features = ia64_get_cpuid(4);
+
+	printf("CPU: %s (", model_name);
+	if (cpu_freq)
+		printf("%u MHz ", cpu_freq);
+	printf("%s)\n", family_name);
+	printf("  Origin = \"%s\"  Revision = %d\n", vendor, revision);
+	printf("  Features = 0x%b\n", (u_int32_t) features,
+	    "\020"
+	    "\001LB"	/* long branch (brl) instruction. */
+	    "\002SD"	/* Spontaneous deferral. */
+	    "\003AO"	/* 16-byte atomic operations (ld, st, cmpxchg). */ );
+}
+
+static void
+cpu_startup(void *dummy)
+{
+	char nodename[16];
+	struct pcpu *pc;
+	struct pcpu_stats *pcs;
+
+	/*
+	 * Good {morning,afternoon,evening,night}.
+	 */
+	identifycpu();
+
+#ifdef PERFMON
+	perfmon_init();
+#endif
+	printf("real memory  = %ld (%ld MB)\n", ptoa(realmem),
+	    ptoa(realmem) / 1048576);
+
+	vm_ksubmap_init(&kmi);
+
+	printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
+	    ptoa(cnt.v_free_count) / 1048576);
+ 
+	if (fpswa_iface == NULL)
+		printf("Warning: no FPSWA package supplied\n");
+	else
+		printf("FPSWA Revision = 0x%lx, Entry = %p\n",
+		    (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
+
+	/*
+	 * Set up buffers, so they can be used to read disk labels.
+	 */
+	bufinit();
+	vm_pager_bufferinit();
+
+	/*
+	 * Traverse the MADT to discover IOSAPIC and Local SAPIC
+	 * information.
+	 */
+	ia64_probe_sapics();
+	ia64_pib = pmap_mapdev(ia64_lapic_addr, sizeof(*ia64_pib));
+
+	ia64_mca_init();
+
+	/*
+	 * Create sysctl tree for per-CPU information.
+	 */
+	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
+		snprintf(nodename, sizeof(nodename), "%u", pc->pc_cpuid);
+		sysctl_ctx_init(&pc->pc_md.sysctl_ctx);
+		pc->pc_md.sysctl_tree = SYSCTL_ADD_NODE(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_STATIC_CHILDREN(_machdep_cpu), OID_AUTO, nodename,
+		    CTLFLAG_RD, NULL, "");
+		if (pc->pc_md.sysctl_tree == NULL)
+			continue;
+
+		pcs = &pc->pc_md.stats;
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nasts", CTLFLAG_RD, &pcs->pcs_nasts,
+		    "Number of IPI_AST interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nclks", CTLFLAG_RD, &pcs->pcs_nclks,
+		    "Number of clock interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nextints", CTLFLAG_RD, &pcs->pcs_nextints,
+		    "Number of ExtINT interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nhardclocks", CTLFLAG_RD, &pcs->pcs_nhardclocks,
+		    "Number of IPI_HARDCLOCK interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nhighfps", CTLFLAG_RD, &pcs->pcs_nhighfps,
+		    "Number of IPI_HIGH_FP interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nhwints", CTLFLAG_RD, &pcs->pcs_nhwints,
+		    "Number of hardware (device) interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "npreempts", CTLFLAG_RD, &pcs->pcs_npreempts,
+		    "Number of IPI_PREEMPT interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nrdvs", CTLFLAG_RD, &pcs->pcs_nrdvs,
+		    "Number of IPI_RENDEZVOUS interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nstops", CTLFLAG_RD, &pcs->pcs_nstops,
+		    "Number of IPI_STOP interrupts");
+
+		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+		    "nstrays", CTLFLAG_RD, &pcs->pcs_nstrays,
+		    "Number of stray interrupts");
+	}
+}
+SYSINIT(cpu_startup, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
+
+void
+cpu_flush_dcache(void *ptr, size_t len)
+{
+	vm_offset_t lim, va;
+
+	va = (uintptr_t)ptr & ~31;
+	lim = (uintptr_t)ptr + len;
+	while (va < lim) {
+		ia64_fc(va);
+		va += 32;
+	}
+
+	ia64_srlz_d();
+}
+
+/* Get current clock frequency for the given cpu id. */
+int
+cpu_est_clockrate(int cpu_id, uint64_t *rate)
+{
+
+	if (pcpu_find(cpu_id) == NULL || rate == NULL)
+		return (EINVAL);
+	*rate = (u_long)cpu_freq * 1000000ul;
+	return (0);
+}
+
+void
+cpu_halt()
+{
+
+	efi_reset_system();
+}
+
+void
+cpu_idle(int busy)
+{
+	register_t ie;
+	sbintime_t sbt = -1;
+
+	if (!busy) {
+		critical_enter();
+		sbt = cpu_idleclock();
+	}
+
+	ie = intr_disable();
+	KASSERT(ie != 0, ("%s called with interrupts disabled\n", __func__));
+
+	if (sched_runnable())
+		ia64_enable_intr();
+	else if (cpu_idle_hook != NULL) {
+		(*cpu_idle_hook)(sbt);
+		/* The hook must enable interrupts! */
+	} else {
+		ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
+		ia64_enable_intr();
+	}
+
+	if (!busy) {
+		cpu_activeclock();
+		critical_exit();
+	}
+}
+
+int
+cpu_idle_wakeup(int cpu)
+{
+
+	return (0);
+}
+
+void
+cpu_reset()
+{
+
+	efi_reset_system();
+}
+
+void
+cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
+{
+	struct pcb *oldpcb, *newpcb;
+
+	oldpcb = old->td_pcb;
+#ifdef COMPAT_FREEBSD32
+	ia32_savectx(oldpcb);
+#endif
+	if (pcpup->pc_fpcurthread == old)
+		old->td_frame->tf_special.psr |= IA64_PSR_DFH;
+	if (!savectx(oldpcb)) {
+		newpcb = new->td_pcb;
+		oldpcb->pcb_current_pmap =
+		    pmap_switch(newpcb->pcb_current_pmap);
+
+		ia64_mf();
+
+		atomic_store_rel_ptr(&old->td_lock, mtx);
+
+#if defined(SCHED_ULE) && defined(SMP)
+		while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
+			cpu_spinwait();
+#endif
+
+		pcpup->pc_curthread = new;
+
+#ifdef COMPAT_FREEBSD32
+		ia32_restorectx(newpcb);
+#endif
+
+		if (pcpup->pc_fpcurthread == new)
+			new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
+		restorectx(newpcb);
+		/* We should not get here. */
+		panic("cpu_switch: restorectx() returned");
+		/* NOTREACHED */
+	}
+}
+
+void
+cpu_throw(struct thread *old __unused, struct thread *new)
+{
+	struct pcb *newpcb;
+
+	newpcb = new->td_pcb;
+	(void)pmap_switch(newpcb->pcb_current_pmap);
+
+#if defined(SCHED_ULE) && defined(SMP)
+	while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
+		cpu_spinwait();
+#endif
+
+	pcpup->pc_curthread = new;
+
+#ifdef COMPAT_FREEBSD32
+	ia32_restorectx(newpcb);
+#endif
+
+	restorectx(newpcb);
+	/* We should not get here. */
+	panic("cpu_throw: restorectx() returned");
+	/* NOTREACHED */
+}
+
+void
+cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+{
+
+	/*
+	 * Set pc_acpi_id to "uninitialized".
+	 * See sys/dev/acpica/acpi_cpu.c
+	 */
+	pcpu->pc_acpi_id = 0xffffffff;
+}
+
+void
+cpu_pcpu_setup(struct pcpu *pc, u_int acpi_id, u_int sapic_id)
+{
+
+	pc->pc_acpi_id = acpi_id;
+	pc->pc_md.lid = IA64_LID_SET_SAPIC_ID(sapic_id);
+}
+ 
+void
+spinlock_enter(void)
+{
+	struct thread *td;
+	int intr;
+
+	td = curthread;
+	if (td->td_md.md_spinlock_count == 0) {
+		intr = intr_disable();
+		td->td_md.md_spinlock_count = 1;
+		td->td_md.md_saved_intr = intr;
+	} else
+		td->td_md.md_spinlock_count++;
+	critical_enter();
+}
+
+void
+spinlock_exit(void)
+{
+	struct thread *td;
+	int intr;
+
+	td = curthread;
+	critical_exit();
+	intr = td->td_md.md_saved_intr;
+	td->td_md.md_spinlock_count--;
+	if (td->td_md.md_spinlock_count == 0)
+		intr_restore(intr);
+}
+
+void
+kdb_cpu_trap(int vector, int code __unused)
+{
+
+#ifdef XTRACE
+	ia64_xtrace_stop();
+#endif
+	__asm __volatile("flushrs;;");
+
+	/* Restart after the break instruction. */
+	if (vector == IA64_VEC_BREAK &&
+	    kdb_frame->tf_special.ifa == IA64_FIXED_BREAK)
+		kdb_frame->tf_special.psr += IA64_PSR_RI_1;
+}
+
+void
+map_vhpt(uintptr_t vhpt)
+{
+	pt_entry_t pte;
+	uint64_t psr;
+
+	pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
+	    PTE_PL_KERN | PTE_AR_RW;
+	pte |= vhpt & PTE_PPN_MASK;
+
+	__asm __volatile("ptr.d %0,%1" :: "r"(vhpt),
+	    "r"(pmap_vhpt_log2size << 2));
+
+	__asm __volatile("mov   %0=psr" : "=r"(psr));
+	__asm __volatile("rsm   psr.ic|psr.i");
+	ia64_srlz_i();
+	ia64_set_ifa(vhpt);
+	ia64_set_itir(pmap_vhpt_log2size << 2);
+	ia64_srlz_d();
+	__asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
+	__asm __volatile("mov   psr.l=%0" :: "r" (psr));
+	ia64_srlz_i();
+}
+
+void
+map_pal_code(void)
+{
+	pt_entry_t pte;
+	vm_offset_t va;
+	vm_size_t sz;
+	uint64_t psr;
+	u_int shft;
+
+	if (ia64_pal_size == 0)
+		return;
+
+	va = IA64_PHYS_TO_RR7(ia64_pal_base);
+
+	sz = ia64_pal_size;
+	shft = 0;
+	while (sz > 1) {
+		shft++;
+		sz >>= 1;
+	}
+
+	pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
+	    PTE_PL_KERN | PTE_AR_RWX;
+	pte |= ia64_pal_base & PTE_PPN_MASK;
+
+	__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: "r"(va), "r"(shft<<2));
+
+	__asm __volatile("mov	%0=psr" : "=r"(psr));
+	__asm __volatile("rsm	psr.ic|psr.i");
+	ia64_srlz_i();
+	ia64_set_ifa(va);
+	ia64_set_itir(shft << 2);
+	ia64_srlz_d();
+	__asm __volatile("itr.d	dtr[%0]=%1" :: "r"(4), "r"(pte));
+	ia64_srlz_d();
+	__asm __volatile("itr.i	itr[%0]=%1" :: "r"(1), "r"(pte));
+	__asm __volatile("mov	psr.l=%0" :: "r" (psr));
+	ia64_srlz_i();
+}
+
+void
+map_gateway_page(void)
+{
+	pt_entry_t pte;
+	uint64_t psr;
+
+	pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
+	    PTE_PL_KERN | PTE_AR_X_RX;
+	pte |= ia64_tpa((uint64_t)ia64_gateway_page) & PTE_PPN_MASK;
+
+	__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
+	    "r"(VM_MAXUSER_ADDRESS), "r"(PAGE_SHIFT << 2));
+
+	__asm __volatile("mov	%0=psr" : "=r"(psr));
+	__asm __volatile("rsm	psr.ic|psr.i");
+	ia64_srlz_i();
+	ia64_set_ifa(VM_MAXUSER_ADDRESS);
+	ia64_set_itir(PAGE_SHIFT << 2);
+	ia64_srlz_d();
+	__asm __volatile("itr.d	dtr[%0]=%1" :: "r"(5), "r"(pte));
+	ia64_srlz_d();
+	__asm __volatile("itr.i	itr[%0]=%1" :: "r"(2), "r"(pte));
+	__asm __volatile("mov	psr.l=%0" :: "r" (psr));
+	ia64_srlz_i();
+
+	/* Expose the mapping to userland in ar.k5 */
+	ia64_set_k5(VM_MAXUSER_ADDRESS);
+}
+
+static u_int
+freq_ratio(u_long base, u_long ratio)
+{
+	u_long f;
+
+	f = (base * (ratio >> 32)) / (ratio & 0xfffffffful);
+	return ((f + 500000) / 1000000);
+}
+
+static void
+calculate_frequencies(void)
+{
+	struct ia64_sal_result sal;
+	struct ia64_pal_result pal;
+	register_t ie;
+
+	ie = intr_disable();
+	sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
+	pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
+	intr_restore(ie);
+
+	if (sal.sal_status == 0 && pal.pal_status == 0) {
+		if (bootverbose) {
+			printf("Platform clock frequency %ld Hz\n",
+			       sal.sal_result[0]);
+			printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
+			       "ITC ratio %ld/%ld\n",
+			       pal.pal_result[0] >> 32,
+			       pal.pal_result[0] & ((1L << 32) - 1),
+			       pal.pal_result[1] >> 32,
+			       pal.pal_result[1] & ((1L << 32) - 1),
+			       pal.pal_result[2] >> 32,
+			       pal.pal_result[2] & ((1L << 32) - 1));
+		}
+		cpu_freq = freq_ratio(sal.sal_result[0], pal.pal_result[0]);
+		bus_freq = freq_ratio(sal.sal_result[0], pal.pal_result[1]);
+		itc_freq = freq_ratio(sal.sal_result[0], pal.pal_result[2]);
+	}
+}
+
+struct ia64_init_return
+ia64_init(void)
+{
+	struct ia64_init_return ret;
+	struct efi_md *md;
+	pt_entry_t *pbvm_pgtbl_ent, *pbvm_pgtbl_lim;
+	char *p;
+	vm_size_t mdlen;
+	int metadata_missing;
+
+	/*
+	 * NO OUTPUT ALLOWED UNTIL FURTHER NOTICE.
+	 */
+
+	ia64_set_fpsr(IA64_FPSR_DEFAULT);
+
+	/*
+	 * Region 6 is direct mapped UC and region 7 is direct mapped
+	 * WC. The details of this is controlled by the Alt {I,D}TLB
+	 * handlers. Here we just make sure that they have the largest
+	 * possible page size to minimise TLB usage.
+	 */
+	ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (LOG2_ID_PAGE_SIZE << 2));
+	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (LOG2_ID_PAGE_SIZE << 2));
+	ia64_srlz_d();
+
+	/* Initialize/setup physical memory datastructures */
+	ia64_physmem_init();
+
+	/*
+	 * Process the memory map. This gives us the PAL locations,
+	 * the I/O port base address, the available memory regions
+	 * for initializing the physical memory map.
+	 */
+	for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
+		mdlen = md->md_pages * EFI_PAGE_SIZE;
+		switch (md->md_type) {
+		case EFI_MD_TYPE_IOPORT:
+			ia64_port_base = pmap_mapdev_priv(md->md_phys,
+			    mdlen, VM_MEMATTR_UNCACHEABLE);
+			break;
+		case EFI_MD_TYPE_PALCODE:
+			ia64_pal_base = md->md_phys;
+			ia64_pal_size = mdlen;
+			/*FALLTHROUGH*/
+		case EFI_MD_TYPE_BAD:
+		case EFI_MD_TYPE_FIRMWARE:
+		case EFI_MD_TYPE_RECLAIM:
+		case EFI_MD_TYPE_RT_CODE:
+		case EFI_MD_TYPE_RT_DATA:
+			/* Don't use these memory regions. */
+			ia64_physmem_track(md->md_phys, mdlen);
+			break;
+		case EFI_MD_TYPE_BS_CODE:
+		case EFI_MD_TYPE_BS_DATA:
+		case EFI_MD_TYPE_CODE:
+		case EFI_MD_TYPE_DATA:
+		case EFI_MD_TYPE_FREE:
+			/* These are ok to use. */
+			ia64_physmem_add(md->md_phys, mdlen);
+			break;
+		}
+	}
+
+	/*
+	 * Remove the PBVM and its page table from phys_avail. The loader
+	 * passes the physical address of the page table to us. The virtual
+	 * address of the page table is fixed.
+	 * Track and the PBVM limit for later use.
+	 */
+	ia64_physmem_delete(bootinfo->bi_pbvm_pgtbl, bootinfo->bi_pbvm_pgtblsz);
+	pbvm_pgtbl_ent = (void *)IA64_PBVM_PGTBL;
+	pbvm_pgtbl_lim = (void *)(IA64_PBVM_PGTBL + bootinfo->bi_pbvm_pgtblsz);
+	while (pbvm_pgtbl_ent < pbvm_pgtbl_lim) {
+		if ((*pbvm_pgtbl_ent & PTE_PRESENT) == 0)
+			break;
+		ia64_physmem_delete(*pbvm_pgtbl_ent & PTE_PPN_MASK,
+		    IA64_PBVM_PAGE_SIZE);
+		pbvm_pgtbl_ent++;
+	}
+
+	/* Finalize physical memory datastructures */
+	ia64_physmem_fini();
+
+	metadata_missing = 0;
+	if (bootinfo->bi_modulep)
+		preload_metadata = (caddr_t)bootinfo->bi_modulep;
+	else
+		metadata_missing = 1;
+
+	if (envmode == 0 && bootinfo->bi_envp)
+		kern_envp = (caddr_t)bootinfo->bi_envp;
+	else
+		kern_envp = static_env;
+
+	/*
+	 * Look at arguments passed to us and compute boothowto.
+	 */
+	boothowto = bootinfo->bi_boothowto;
+
+	if (boothowto & RB_VERBOSE)
+		bootverbose = 1;
+
+	/*
+	 * Wire things up so we can call the firmware.
+	 */
+	map_pal_code();
+	efi_boot_minimal(bootinfo->bi_systab);
+	ia64_xiv_init();
+	ia64_sal_init();
+	calculate_frequencies();
+
+	set_cputicker(ia64_get_itc, (u_long)itc_freq * 1000000, 0);
+
+	/*
+	 * Setup the PCPU data for the bootstrap processor. It is needed
+	 * by printf(). Also, since printf() has critical sections, we
+	 * need to initialize at least pc_curthread.
+	 */
+	pcpup = &pcpu0;
+	ia64_set_k4((u_int64_t)pcpup);
+	pcpu_init(pcpup, 0, sizeof(pcpu0));
+	dpcpu_init(ia64_physmem_alloc(DPCPU_SIZE, PAGE_SIZE), 0);
+	cpu_pcpu_setup(pcpup, ~0U, ia64_get_lid());
+	pcpup->pc_curthread = &thread0;
+
+	/*
+	 * Initialize the console before we print anything out.
+	 */
+	cninit();
+
+	/* OUTPUT NOW ALLOWED */
+
+	if (metadata_missing)
+		printf("WARNING: loader(8) metadata is missing!\n");
+
+	/* Get FPSWA interface */
+	fpswa_iface = (bootinfo->bi_fpswa == 0) ? NULL :
+	    (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo->bi_fpswa);
+
+	/* Init basic tunables, including hz */
+	init_param1();
+
+	p = getenv("kernelname");
+	if (p != NULL) {
+		strlcpy(kernelname, p, sizeof(kernelname));
+		freeenv(p);
+	}
+
+	init_param2(physmem);
+
+	/*
+	 * Initialize error message buffer (at end of core).
+	 */
+	msgbufp = ia64_physmem_alloc(msgbufsize, PAGE_SIZE);
+	msgbufinit(msgbufp, msgbufsize);
+
+	proc_linkup0(&proc0, &thread0);
+	/*
+	 * Init mapping for kernel stack for proc 0
+	 */
+	p = ia64_physmem_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
+	thread0.td_kstack = (uintptr_t)p;
+	thread0.td_kstack_pages = KSTACK_PAGES;
+
+	mutex_init();
+
+	/*
+	 * Initialize the rest of proc 0's PCB.
+	 *
+	 * Set the kernel sp, reserving space for an (empty) trapframe,
+	 * and make proc0's trapframe pointer point to it for sanity.
+	 * Initialise proc0's backing store to start after u area.
+	 */
+	cpu_thread_alloc(&thread0);
+	thread0.td_frame->tf_flags = FRAME_SYSCALL;
+	thread0.td_pcb->pcb_special.sp =
+	    (u_int64_t)thread0.td_frame - 16;
+	thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
+
+	/*
+	 * Initialize the virtual memory system.
+	 */
+	pmap_bootstrap();
+
+#ifdef XTRACE
+	ia64_xtrace_init_bsp();
+#endif
+
+	/*
+	 * Initialize debuggers, and break into them if appropriate.
+	 */
+#ifdef DDB
+	ksym_start = bootinfo->bi_symtab;
+	ksym_end = bootinfo->bi_esymtab;
+#endif
+
+	kdb_init();
+
+#ifdef KDB
+	if (boothowto & RB_KDB)
+		kdb_enter(KDB_WHY_BOOTFLAGS,
+		    "Boot flags requested debugger\n");
+#endif
+
+	ia64_set_tpr(0);
+	ia64_srlz_d();
+
+	ret.bspstore = thread0.td_pcb->pcb_special.bspstore;
+	ret.sp = thread0.td_pcb->pcb_special.sp;
+	return (ret);
+}
+
+uint64_t
+ia64_get_hcdp(void)
+{
+
+	return (bootinfo->bi_hcdp);
+}
+
+void
+bzero(void *buf, size_t len)
+{
+	caddr_t p = buf;
+
+	while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
+		*p++ = 0;
+		len--;
+	}
+	while (len >= sizeof(u_long) * 8) {
+		*(u_long*) p = 0;
+		*((u_long*) p + 1) = 0;
+		*((u_long*) p + 2) = 0;
+		*((u_long*) p + 3) = 0;
+		len -= sizeof(u_long) * 8;
+		*((u_long*) p + 4) = 0;
+		*((u_long*) p + 5) = 0;
+		*((u_long*) p + 6) = 0;
+		*((u_long*) p + 7) = 0;
+		p += sizeof(u_long) * 8;
+	}
+	while (len >= sizeof(u_long)) {
+		*(u_long*) p = 0;
+		len -= sizeof(u_long);
+		p += sizeof(u_long);
+	}
+	while (len) {
+		*p++ = 0;
+		len--;
+	}
+}
+
+u_int
+ia64_itc_freq(void)
+{
+
+	return (itc_freq);
+}
+
+void
+DELAY(int n)
+{
+	u_int64_t start, end, now;
+
+	sched_pin();
+
+	start = ia64_get_itc();
+	end = start + itc_freq * n;
+	/* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
+	do {
+		now = ia64_get_itc();
+	} while (now < end || (now > start && end < start));
+
+	sched_unpin();
+}
+
+/*
+ * Send an interrupt (signal) to a process.
+ */
+void
+sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
+{
+	struct proc *p;
+	struct thread *td;
+	struct trapframe *tf;
+	struct sigacts *psp;
+	struct sigframe sf, *sfp;
+	u_int64_t sbs, sp;
+	int oonstack;
+	int sig;
+	u_long code;
+
+	td = curthread;
+	p = td->td_proc;
+	PROC_LOCK_ASSERT(p, MA_OWNED);
+	sig = ksi->ksi_signo;
+	code = ksi->ksi_code;
+	psp = p->p_sigacts;
+	mtx_assert(&psp->ps_mtx, MA_OWNED);
+	tf = td->td_frame;
+	sp = tf->tf_special.sp;
+	oonstack = sigonstack(sp);
+	sbs = 0;
+
+	/* save user context */
+	bzero(&sf, sizeof(struct sigframe));
+	sf.sf_uc.uc_sigmask = *mask;
+	sf.sf_uc.uc_stack = td->td_sigstk;
+	sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
+	    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
+
+	/*
+	 * Allocate and validate space for the signal handler
+	 * context. Note that if the stack is in P0 space, the
+	 * call to grow() is a nop, and the useracc() check
+	 * will fail if the process has not already allocated
+	 * the space with a `brk'.
+	 */
+	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
+	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
+		sbs = (u_int64_t)td->td_sigstk.ss_sp;
+		sbs = (sbs + 15) & ~15;
+		sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
+#if defined(COMPAT_43)
+		td->td_sigstk.ss_flags |= SS_ONSTACK;
+#endif
+	} else
+		sfp = (struct sigframe *)sp;
+	sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
+
+	/* Fill in the siginfo structure for POSIX handlers. */
+	if (SIGISMEMBER(psp->ps_siginfo, sig)) {
+		sf.sf_si = ksi->ksi_info;
+		sf.sf_si.si_signo = sig;
+		/*
+		 * XXX this shouldn't be here after code in trap.c
+		 * is fixed
+		 */
+		sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
+		code = (u_int64_t)&sfp->sf_si;
+	}
+
+	mtx_unlock(&psp->ps_mtx);
+	PROC_UNLOCK(p);
+
+	get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
+
+	/* Copy the frame out to userland. */
+	if (copyout(&sf, sfp, sizeof(sf)) != 0) {
+		/*
+		 * Process has trashed its stack; give it an illegal
+		 * instruction to halt it in its tracks.
+		 */
+		PROC_LOCK(p);
+		sigexit(td, SIGILL);
+		return;
+	}
+
+	if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
+		tf->tf_special.psr &= ~IA64_PSR_RI;
+		tf->tf_special.iip = ia64_get_k5() +
+		    ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
+	} else
+		tf->tf_special.iip = ia64_get_k5() +
+		    ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
+
+	/*
+	 * Setup the trapframe to return to the signal trampoline. We pass
+	 * information to the trampoline in the following registers:
+	 *
+	 *	gp	new backing store or NULL
+	 *	r8	signal number
+	 *	r9	signal code or siginfo pointer
+	 *	r10	signal handler (function descriptor)
+	 */
+	tf->tf_special.sp = (u_int64_t)sfp - 16;
+	tf->tf_special.gp = sbs;
+	tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
+	tf->tf_special.ndirty = 0;
+	tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
+	tf->tf_scratch.gr8 = sig;
+	tf->tf_scratch.gr9 = code;
+	tf->tf_scratch.gr10 = (u_int64_t)catcher;
+
+	PROC_LOCK(p);
+	mtx_lock(&psp->ps_mtx);
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken.  Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * state to gain improper privileges.
+ *
+ * MPSAFE
+ */
+int
+sys_sigreturn(struct thread *td,
+	struct sigreturn_args /* {
+		ucontext_t *sigcntxp;
+	} */ *uap)
+{
+	ucontext_t uc;
+	struct trapframe *tf;
+	struct pcb *pcb;
+
+	tf = td->td_frame;
+	pcb = td->td_pcb;
+
+	/*
+	 * Fetch the entire context structure at once for speed.
+	 * We don't use a normal argument to simplify RSE handling.
+	 */
+	if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
+		return (EFAULT);
+
+	set_mcontext(td, &uc.uc_mcontext);
+
+#if defined(COMPAT_43)
+	if (sigonstack(tf->tf_special.sp))
+		td->td_sigstk.ss_flags |= SS_ONSTACK;
+	else
+		td->td_sigstk.ss_flags &= ~SS_ONSTACK;
+#endif
+	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
+
+	return (EJUSTRETURN);
+}
+
+#ifdef COMPAT_FREEBSD4
+int
+freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
+{
+
+	return sys_sigreturn(td, (struct sigreturn_args *)uap);
+}
+#endif
+
+/*
+ * Construct a PCB from a trapframe. This is called from kdb_trap() where
+ * we want to start a backtrace from the function that caused us to enter
+ * the debugger. We have the context in the trapframe, but base the trace
+ * on the PCB. The PCB doesn't have to be perfect, as long as it contains
+ * enough for a backtrace.
+ */
+void
+makectx(struct trapframe *tf, struct pcb *pcb)
+{
+
+	pcb->pcb_special = tf->tf_special;
+	pcb->pcb_special.__spare = ~0UL;	/* XXX see unwind.c */
+	save_callee_saved(&pcb->pcb_preserved);
+	save_callee_saved_fp(&pcb->pcb_preserved_fp);
+}
+
+int
+ia64_flush_dirty(struct thread *td, struct _special *r)
+{
+	struct iovec iov;
+	struct uio uio;
+	uint64_t bspst, kstk, rnat;
+	int error, locked;
+
+	if (r->ndirty == 0)
+		return (0);
+
+	kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
+	if (td == curthread) {
+		__asm __volatile("mov	ar.rsc=0;;");
+		__asm __volatile("mov	%0=ar.bspstore" : "=r"(bspst));
+		/* Make sure we have all the user registers written out. */
+		if (bspst - kstk < r->ndirty) {
+			__asm __volatile("flushrs;;");
+			__asm __volatile("mov	%0=ar.bspstore" : "=r"(bspst));
+		}
+		__asm __volatile("mov	%0=ar.rnat;;" : "=r"(rnat));
+		__asm __volatile("mov	ar.rsc=3");
+		error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
+		kstk += r->ndirty;
+		r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
+		    ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
+	} else {
+		locked = PROC_LOCKED(td->td_proc);
+		if (!locked)
+			PHOLD(td->td_proc);
+		iov.iov_base = (void*)(uintptr_t)kstk;
+		iov.iov_len = r->ndirty;
+		uio.uio_iov = &iov;
+		uio.uio_iovcnt = 1;
+		uio.uio_offset = r->bspstore;
+		uio.uio_resid = r->ndirty;
+		uio.uio_segflg = UIO_SYSSPACE;
+		uio.uio_rw = UIO_WRITE;
+		uio.uio_td = td;
+		error = proc_rwmem(td->td_proc, &uio);
+		/*
+		 * XXX proc_rwmem() doesn't currently return ENOSPC,
+		 * so I think it can bogusly return 0. Neither do
+		 * we allow short writes.
+		 */
+		if (uio.uio_resid != 0 && error == 0)
+			error = ENOSPC;
+		if (!locked)
+			PRELE(td->td_proc);
+	}
+
+	r->bspstore += r->ndirty;
+	r->ndirty = 0;
+	return (error);
+}
+
+int
+get_mcontext(struct thread *td, mcontext_t *mc, int flags)
+{
+	struct trapframe *tf;
+	int error;
+
+	tf = td->td_frame;
+	bzero(mc, sizeof(*mc));
+	mc->mc_special = tf->tf_special;
+	error = ia64_flush_dirty(td, &mc->mc_special);
+	if (tf->tf_flags & FRAME_SYSCALL) {
+		mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
+		mc->mc_scratch = tf->tf_scratch;
+		if (flags & GET_MC_CLEAR_RET) {
+			mc->mc_scratch.gr8 = 0;
+			mc->mc_scratch.gr9 = 0;
+			mc->mc_scratch.gr10 = 0;
+			mc->mc_scratch.gr11 = 0;
+		}
+	} else {
+		mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
+		mc->mc_scratch = tf->tf_scratch;
+		mc->mc_scratch_fp = tf->tf_scratch_fp;
+		/*
+		 * XXX If the thread never used the high FP registers, we
+		 * probably shouldn't waste time saving them.
+		 */
+		ia64_highfp_save(td);
+		mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
+		mc->mc_high_fp = td->td_pcb->pcb_high_fp;
+	}
+	save_callee_saved(&mc->mc_preserved);
+	save_callee_saved_fp(&mc->mc_preserved_fp);
+	return (error);
+}
+
+int
+set_mcontext(struct thread *td, mcontext_t *mc)
+{
+	struct _special s;
+	struct trapframe *tf;
+	uint64_t psrmask;
+
+	tf = td->td_frame;
+
+	KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
+	    ("Whoa there! We have more than 8KB of dirty registers!"));
+
+	s = mc->mc_special;
+	/*
+	 * Only copy the user mask and the restart instruction bit from
+	 * the new context.
+	 */
+	psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
+	    IA64_PSR_MFH | IA64_PSR_RI;
+	s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
+	/* We don't have any dirty registers of the new context. */
+	s.ndirty = 0;
+	if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
+		/*
+		 * We can get an async context passed to us while we
+		 * entered the kernel through a syscall: sigreturn(2)
+		 * takes contexts that could previously be the result of
+		 * a trap or interrupt.
+		 * Hence, we cannot assert that the trapframe is not
+		 * a syscall frame, but we can assert that it's at
+		 * least an expected syscall.
+		 */
+		if (tf->tf_flags & FRAME_SYSCALL) {
+			KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn, ("foo"));
+			tf->tf_flags &= ~FRAME_SYSCALL;
+		}
+		tf->tf_scratch = mc->mc_scratch;
+		tf->tf_scratch_fp = mc->mc_scratch_fp;
+		if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
+			td->td_pcb->pcb_high_fp = mc->mc_high_fp;
+	} else {
+		KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
+		if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
+			s.cfm = tf->tf_special.cfm;
+			s.iip = tf->tf_special.iip;
+			tf->tf_scratch.gr15 = 0;	/* Clear syscall nr. */
+		} else
+			tf->tf_scratch = mc->mc_scratch;
+	}
+	tf->tf_special = s;
+	restore_callee_saved(&mc->mc_preserved);
+	restore_callee_saved_fp(&mc->mc_preserved_fp);
+
+	return (0);
+}
+
+/*
+ * Clear registers on exec.
+ */
+void
+exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
+{
+	struct trapframe *tf;
+	uint64_t *ksttop, *kst;
+
+	tf = td->td_frame;
+	ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
+	    (tf->tf_special.bspstore & 0x1ffUL));
+
+	/*
+	 * We can ignore up to 8KB of dirty registers by masking off the
+	 * lower 13 bits in exception_restore() or epc_syscall(). This
+	 * should be enough for a couple of years, but if there are more
+	 * than 8KB of dirty registers, we lose track of the bottom of
+	 * the kernel stack. The solution is to copy the active part of
+	 * the kernel stack down 1 page (or 2, but not more than that)
+	 * so that we always have less than 8KB of dirty registers.
+	 */
+	KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
+	    ("Whoa there! We have more than 8KB of dirty registers!"));
+
+	bzero(&tf->tf_special, sizeof(tf->tf_special));
+	if ((tf->tf_flags & FRAME_SYSCALL) == 0) {	/* break syscalls. */
+		bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
+		bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
+		tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
+		tf->tf_special.bspstore = IA64_BACKINGSTORE;
+		/*
+		 * Copy the arguments onto the kernel register stack so that
+		 * they get loaded by the loadrs instruction. Skip over the
+		 * NaT collection points.
+		 */
+		kst = ksttop - 1;
+		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
+			*kst-- = 0;
+		*kst-- = 0;
+		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
+			*kst-- = 0;
+		*kst-- = imgp->ps_strings;
+		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
+			*kst-- = 0;
+		*kst = stack;
+		tf->tf_special.ndirty = (ksttop - kst) << 3;
+	} else {				/* epc syscalls (default). */
+		tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
+		tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
+		/*
+		 * Write values for out0, out1 and out2 to the user's backing
+		 * store and arrange for them to be restored into the user's
+		 * initial register frame.
+		 * Assumes that (bspstore & 0x1f8) < 0x1e0.
+		 */
+		suword((caddr_t)tf->tf_special.bspstore - 24, stack);
+		suword((caddr_t)tf->tf_special.bspstore - 16, imgp->ps_strings);
+		suword((caddr_t)tf->tf_special.bspstore -  8, 0);
+	}
+
+	tf->tf_special.iip = imgp->entry_addr;
+	tf->tf_special.sp = (stack & ~15) - 16;
+	tf->tf_special.rsc = 0xf;
+	tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
+	tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
+	    IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
+	    IA64_PSR_CPL_USER;
+}
+
+int
+ptrace_set_pc(struct thread *td, unsigned long addr)
+{
+	uint64_t slot;
+
+	switch (addr & 0xFUL) {
+	case 0:
+		slot = IA64_PSR_RI_0;
+		break;
+	case 1:
+		/* XXX we need to deal with MLX bundles here */
+		slot = IA64_PSR_RI_1;
+		break;
+	case 2:
+		slot = IA64_PSR_RI_2;
+		break;
+	default:
+		return (EINVAL);
+	}
+
+	td->td_frame->tf_special.iip = addr & ~0x0FULL;
+	td->td_frame->tf_special.psr =
+	    (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
+	return (0);
+}
+
+int
+ptrace_single_step(struct thread *td)
+{
+	struct trapframe *tf;
+
+	/*
+	 * There's no way to set single stepping when we're leaving the
+	 * kernel through the EPC syscall path. The way we solve this is
+	 * by enabling the lower-privilege trap so that we re-enter the
+	 * kernel as soon as the privilege level changes. See trap.c for
+	 * how we proceed from there.
+	 */
+	tf = td->td_frame;
+	if (tf->tf_flags & FRAME_SYSCALL)
+		tf->tf_special.psr |= IA64_PSR_LP;
+	else
+		tf->tf_special.psr |= IA64_PSR_SS;
+	return (0);
+}
+
+int
+ptrace_clear_single_step(struct thread *td)
+{
+	struct trapframe *tf;
+
+	/*
+	 * Clear any and all status bits we may use to implement single
+	 * stepping.
+	 */
+	tf = td->td_frame;
+	tf->tf_special.psr &= ~IA64_PSR_SS;
+	tf->tf_special.psr &= ~IA64_PSR_LP;
+	tf->tf_special.psr &= ~IA64_PSR_TB;
+	return (0);
+}
+
+int
+fill_regs(struct thread *td, struct reg *regs)
+{
+	struct trapframe *tf;
+
+	tf = td->td_frame;
+	regs->r_special = tf->tf_special;
+	regs->r_scratch = tf->tf_scratch;
+	save_callee_saved(&regs->r_preserved);
+	return (0);
+}
+
+int
+set_regs(struct thread *td, struct reg *regs)
+{
+	struct trapframe *tf;
+	int error;
+
+	tf = td->td_frame;
+	error = ia64_flush_dirty(td, &tf->tf_special);
+	if (!error) {
+		tf->tf_special = regs->r_special;
+		tf->tf_special.bspstore += tf->tf_special.ndirty;
+		tf->tf_special.ndirty = 0;
+		tf->tf_scratch = regs->r_scratch;
+		restore_callee_saved(&regs->r_preserved);
+	}
+	return (error);
+}
+
+int
+fill_dbregs(struct thread *td, struct dbreg *dbregs)
+{
+
+	return (ENOSYS);
+}
+
+int
+set_dbregs(struct thread *td, struct dbreg *dbregs)
+{
+
+	return (ENOSYS);
+}
+
+int
+fill_fpregs(struct thread *td, struct fpreg *fpregs)
+{
+	struct trapframe *frame = td->td_frame;
+	struct pcb *pcb = td->td_pcb;
+
+	/* Save the high FP registers. */
+	ia64_highfp_save(td);
+
+	fpregs->fpr_scratch = frame->tf_scratch_fp;
+	save_callee_saved_fp(&fpregs->fpr_preserved);
+	fpregs->fpr_high = pcb->pcb_high_fp;
+	return (0);
+}
+
+int
+set_fpregs(struct thread *td, struct fpreg *fpregs)
+{
+	struct trapframe *frame = td->td_frame;
+	struct pcb *pcb = td->td_pcb;
+
+	/* Throw away the high FP registers (should be redundant). */
+	ia64_highfp_drop(td);
+
+	frame->tf_scratch_fp = fpregs->fpr_scratch;
+	restore_callee_saved_fp(&fpregs->fpr_preserved);
+	pcb->pcb_high_fp = fpregs->fpr_high;
+	return (0);
+}
+
+void
+ia64_sync_icache(vm_offset_t va, vm_offset_t sz)
+{
+	vm_offset_t lim;
+
+	if (!ia64_sync_icache_needed)
+		return;
+
+	lim = va + sz;
+	while (va < lim) {
+		ia64_fc_i(va);
+		va += 32;	/* XXX */
+	}
+
+	ia64_sync_i();
+	ia64_srlz_i();
+}


Property changes on: trunk/sys/ia64/ia64/machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/mca.c
===================================================================
--- trunk/sys/ia64/ia64/mca.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/mca.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,332 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002-2010 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/mca.c 253559 2013-07-23 02:38:23Z marcel $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/uuid.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <machine/intr.h>
+#include <machine/mca.h>
+#include <machine/pal.h>
+#include <machine/sal.h>
+#include <machine/smp.h>
+
+static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
+
+struct mca_info {
+	STAILQ_ENTRY(mca_info) mi_link;
+	u_long	mi_seqnr;
+	u_int	mi_cpuid;
+	size_t	mi_recsz;
+	char	mi_record[0];
+};
+
+STAILQ_HEAD(mca_info_list, mca_info);
+
+static int64_t		mca_info_size[SAL_INFO_TYPES];
+static vm_offset_t	mca_info_block;
+static struct mtx	mca_info_block_lock;
+
+static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RW, NULL, "MCA container");
+
+static int mca_count;		/* Number of records stored. */
+static int mca_first;		/* First (lowest) record ID. */
+static int mca_last;		/* Last (highest) record ID. */
+
+SYSCTL_INT(_hw_mca, OID_AUTO, count, CTLFLAG_RD, &mca_count, 0,
+    "Record count");
+SYSCTL_INT(_hw_mca, OID_AUTO, first, CTLFLAG_RD, &mca_first, 0,
+    "First record id");
+SYSCTL_INT(_hw_mca, OID_AUTO, last, CTLFLAG_RD, &mca_last, 0,
+    "Last record id");
+
+static struct mtx mca_sysctl_lock;
+
+static u_int mca_xiv_cmc;
+
+static int
+mca_sysctl_inject(SYSCTL_HANDLER_ARGS)
+{
+	struct ia64_pal_result res;
+	u_int val;
+	int error;
+
+	val = 0;
+	error = sysctl_wire_old_buffer(req, sizeof(u_int));
+	if (!error)
+		error = sysctl_handle_int(oidp, &val, 0, req);
+
+	if (error != 0 || req->newptr == NULL)
+		return (error);
+
+	/*
+	 * Example values for injecting PAL determined machine checks:
+	 *	corrected	9
+	 *	recoverable	73
+	 *	fatal		137
+	 */
+	res = ia64_call_pal_stacked(PAL_MC_ERROR_INJECT, val, 0, 0);
+	printf("%s: %#lx, %#lx, %#lx, %#lx\n", __func__, res.pal_status,
+	    res.pal_result[0], res.pal_result[1], res.pal_result[2]);
+	return (0);
+}
+SYSCTL_PROC(_hw_mca, OID_AUTO, inject, CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
+    mca_sysctl_inject, "I", "set to trigger a MCA");
+
+static int
+mca_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+	int error = 0;
+
+	if (!arg1)
+		return (EINVAL);
+	error = SYSCTL_OUT(req, arg1, arg2);
+
+	if (error || !req->newptr)
+		return (error);
+
+	error = SYSCTL_IN(req, arg1, arg2);
+	return (error);
+}
+
+static void
+ia64_mca_collect_state(int type, struct mca_info_list *reclst)
+{
+	struct ia64_sal_result result;
+	struct mca_record_header *hdr;
+	struct mca_info *rec;
+	uint64_t seqnr;
+	size_t recsz;
+
+	/*
+	 * Don't try to get the state if we couldn't get the size of
+	 * the state information previously.
+	 */
+	if (mca_info_size[type] == -1)
+		return;
+
+	if (mca_info_block == 0)
+		return;
+
+	while (1) {
+		mtx_lock_spin(&mca_info_block_lock);
+		result = ia64_sal_entry(SAL_GET_STATE_INFO, type, 0,
+		    mca_info_block, 0, 0, 0, 0);
+		if (result.sal_status < 0) {
+			mtx_unlock_spin(&mca_info_block_lock);
+			break;
+		}
+
+		hdr = (struct mca_record_header *)mca_info_block;
+		recsz = hdr->rh_length;
+		seqnr = hdr->rh_seqnr;
+
+		mtx_unlock_spin(&mca_info_block_lock);
+
+		rec = malloc(sizeof(struct mca_info) + recsz, M_MCA,
+		    M_NOWAIT | M_ZERO);
+		if (rec == NULL)
+			/* XXX: Not sure what to do. */
+			break;
+
+		rec->mi_seqnr = seqnr;
+		rec->mi_cpuid = PCPU_GET(cpuid);
+
+		mtx_lock_spin(&mca_info_block_lock);
+
+		/*
+		 * If the info block doesn't have our record anymore because
+		 * we temporarily unlocked it, get it again from SAL. I assume
+		 * that it's possible that we could get a different record.
+		 * I expect this to happen in a SMP configuration where the
+		 * record has been cleared by a different processor. So, if
+		 * we get a different record we simply abort with this record
+		 * and start over.
+		 */
+		if (seqnr != hdr->rh_seqnr) {
+			result = ia64_sal_entry(SAL_GET_STATE_INFO, type, 0,
+			    mca_info_block, 0, 0, 0, 0);
+			if (seqnr != hdr->rh_seqnr) {
+				mtx_unlock_spin(&mca_info_block_lock);
+				free(rec, M_MCA);
+				continue;
+			}
+		}
+
+		rec->mi_recsz = recsz;
+		bcopy((char*)mca_info_block, rec->mi_record, recsz);
+
+		/*
+		 * Clear the state so that we get any other records when
+		 * they exist.
+		 */
+		result = ia64_sal_entry(SAL_CLEAR_STATE_INFO, type, 0, 0, 0,
+		    0, 0, 0);
+
+		mtx_unlock_spin(&mca_info_block_lock);
+
+		STAILQ_INSERT_TAIL(reclst, rec, mi_link);
+	}
+}
+
+void
+ia64_mca_save_state(int type)
+{
+	char name[64];
+	struct mca_info_list reclst = STAILQ_HEAD_INITIALIZER(reclst);
+	struct mca_info *rec;
+	struct sysctl_oid *oid;
+
+	ia64_mca_collect_state(type, &reclst);
+
+	STAILQ_FOREACH(rec, &reclst, mi_link) {
+		sprintf(name, "%lu", rec->mi_seqnr);
+		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca),
+		    OID_AUTO, name, CTLFLAG_RW, NULL, name);
+		if (oid == NULL)
+			continue;
+
+		mtx_lock(&mca_sysctl_lock);
+		if (mca_count > 0) {
+			if (rec->mi_seqnr < mca_first)
+				mca_first = rec->mi_seqnr;
+			else if (rec->mi_seqnr > mca_last)
+				mca_last = rec->mi_seqnr;
+		} else
+			mca_first = mca_last = rec->mi_seqnr;
+		mca_count++;
+		mtx_unlock(&mca_sysctl_lock);
+
+		sprintf(name, "%u", rec->mi_cpuid);
+		SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), rec->mi_cpuid,
+		    name, CTLTYPE_OPAQUE | CTLFLAG_RD, rec->mi_record,
+		    rec->mi_recsz, mca_sysctl_handler, "S,MCA", "MCA record");
+	}
+}
+
+static u_int
+ia64_mca_intr(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+	if (xiv == mca_xiv_cmc) {
+		printf("MCA: corrected machine check (CMC) interrupt\n");
+		return (0);
+	}
+
+	return (0);
+}
+
+void
+ia64_mca_init_ap(void)
+{
+
+	if (mca_xiv_cmc != 0)
+		ia64_set_cmcv(mca_xiv_cmc);
+}
+
+void
+ia64_mca_init(void)
+{
+	struct ia64_sal_result result;
+	uint64_t max_size;
+	char *p;
+	int i;
+
+	/*
+	 * Get the sizes of the state information we can get from SAL and
+	 * allocate a common block (forgive me my Fortran :-) for use by
+	 * support functions. We create a region 7 address to make it
+	 * easy on the OS_MCA or OS_INIT handlers to get the state info
+	 * under unreliable conditions.
+	 */
+	max_size = 0;
+	for (i = 0; i < SAL_INFO_TYPES; i++) {
+		result = ia64_sal_entry(SAL_GET_STATE_INFO_SIZE, i, 0, 0, 0,
+		    0, 0, 0);
+		if (result.sal_status == 0) {
+			mca_info_size[i] = result.sal_result[0];
+			if (mca_info_size[i] > max_size)
+				max_size = mca_info_size[i];
+		} else
+			mca_info_size[i] = -1;
+	}
+	max_size = round_page(max_size);
+
+	p = (max_size) ? contigmalloc(max_size, M_TEMP, M_NOWAIT, 0ul, ~0ul,
+	    PAGE_SIZE, 256*1024*1024) : NULL;
+	if (p != NULL) {
+		mca_info_block = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)p));
+
+		if (bootverbose)
+			printf("MCA: allocated %ld bytes for state info.\n",
+			    max_size);
+	}
+
+	/*
+	 * Initialize the spin lock used to protect the info block. When APs
+	 * get launched, there's a short moment of contention, but in all other
+	 * cases it's not a hot spot. I think it's possible to have the MCA
+	 * handler be called on multiple processors at the same time, but that
+	 * should be rare. On top of that, performance is not an issue when
+	 * dealing with machine checks...
+	 */
+	mtx_init(&mca_info_block_lock, "MCA info lock", NULL, MTX_SPIN);
+
+	/*
+	 * Serialize sysctl operations with a sleep lock. Note that this
+	 * implies that we update the sysctl tree in a context that allows
+	 * sleeping.
+	 */
+	mtx_init(&mca_sysctl_lock, "MCA sysctl lock", NULL, MTX_DEF);
+
+	/*
+	 * Get and save any processor and platfom error records. Note that in
+	 * a SMP configuration the processor records are for the BSP only. We
+	 * let the APs get and save their own records when we wake them up.
+	 */
+	for (i = 0; i < SAL_INFO_TYPES; i++)
+		ia64_mca_save_state(i);
+
+	/*
+	 * Allocate a XIV for CMC interrupts, so that we can collect and save
+	 * the corrected processor checks.
+	 */
+	mca_xiv_cmc = ia64_xiv_alloc(PI_SOFT, IA64_XIV_PLAT, ia64_mca_intr);
+	if (mca_xiv_cmc != 0)
+		ia64_set_cmcv(mca_xiv_cmc);
+	else
+		printf("MCA: CMC vector could not be allocated\n");
+}


Property changes on: trunk/sys/ia64/ia64/mca.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/mem.c
===================================================================
--- trunk/sys/ia64/ia64/mem.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/mem.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,180 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and code derived from software contributed to
+ * Berkeley by William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from: Utah $Hdr: mem.c 1.13 89/10/08$
+ *	from: @(#)mem.c	7.2 (Berkeley) 5/9/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/mem.c 270296 2014-08-21 19:51:07Z emaste $");
+
+/*
+ * Memory special file
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/efi.h>
+#include <sys/fcntl.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/memrange.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+
+#include <machine/memdev.h>
+
+struct mem_range_softc mem_range_softc;
+
+static int
+mem_phys2virt(vm_offset_t offset, int prot, void **ptr, u_long *limit)
+{
+	struct efi_md *md;
+
+	if (prot & ~(VM_PROT_READ | VM_PROT_WRITE))
+		return (EPERM);
+
+	md = efi_md_find(offset);
+	if (md == NULL)
+		return (EFAULT);
+
+	if (md->md_type == EFI_MD_TYPE_BAD)
+		return (EIO);
+
+	*ptr = (void *)((md->md_attr & EFI_MD_ATTR_WB)
+	    ? IA64_PHYS_TO_RR7(offset) : IA64_PHYS_TO_RR6(offset));
+	*limit = (md->md_pages * EFI_PAGE_SIZE) - (offset - md->md_phys);
+	return (0);
+}
+
+/* ARGSUSED */
+int
+memrw(struct cdev *dev, struct uio *uio, int flags)
+{
+	struct iovec *iov;
+	off_t ofs;
+	vm_offset_t addr;
+	void *ptr;
+	u_long limit;
+	int count, error, phys, rw;
+
+	error = 0;
+	rw = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE;
+
+	while (uio->uio_resid > 0 && !error) {
+		iov = uio->uio_iov;
+		if (iov->iov_len == 0) {
+			uio->uio_iov++;
+			uio->uio_iovcnt--;
+			if (uio->uio_iovcnt < 0)
+				panic("memrw");
+			continue;
+		}
+
+		ofs = uio->uio_offset;
+
+		phys = (dev2unit(dev) == CDEV_MINOR_MEM) ? 1 : 0;
+		if (phys == 0 && ofs >= IA64_RR_BASE(6)) {
+			ofs = IA64_RR_MASK(ofs);
+			phys++;
+		}
+
+		if (phys) {
+			error = mem_phys2virt(ofs, rw, &ptr, &limit);
+			if (error)
+				return (error);
+
+			count = min(uio->uio_resid, limit);
+			error = uiomove(ptr, count, uio);
+		} else {
+			ptr = (void *)ofs;
+			count = iov->iov_len;
+
+			/*
+			 * Make sure that all of the pages are currently
+			 * resident so that we don't create any zero-fill
+			 * pages.
+			 */
+			limit = round_page(ofs + count);
+			addr = trunc_page(ofs);
+			if (addr < VM_MAXUSER_ADDRESS)
+				return (EINVAL);
+			for (; addr < limit; addr += PAGE_SIZE) {
+				if (pmap_kextract(addr) == 0)
+					return (EFAULT);
+			}
+			if (!kernacc(ptr, count, rw))
+				return (EFAULT);
+			error = uiomove(ptr, count, uio);
+		}
+		/* else panic! */
+	}
+	return (error);
+}
+
+/*
+ * allow user processes to MMAP some memory sections
+ * instead of going through read/write
+ */
+int
+memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
+    int prot, vm_memattr_t *memattr)
+{
+	void *ptr;
+	u_long limit;
+	int error;
+
+	/*
+	 * /dev/mem is the only one that makes sense through this
+	 * interface.  For /dev/kmem any physaddr we return here
+	 * could be transient and hence incorrect or invalid at
+	 * a later time.
+	 */
+	if (dev2unit(dev) != CDEV_MINOR_MEM)
+		return (ENXIO);
+
+	error = mem_phys2virt(offset, prot, &ptr, &limit);
+	if (error)
+		return (error);
+
+	*paddr = offset;
+	*memattr = ((uintptr_t)ptr >= IA64_RR_BASE(7)) ?
+	    VM_MEMATTR_WRITE_BACK : VM_MEMATTR_UNCACHEABLE;
+	return (0);
+}


Property changes on: trunk/sys/ia64/ia64/mem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/mp_locore.S
===================================================================
--- trunk/sys/ia64/ia64/mp_locore.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/mp_locore.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,276 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/mp_locore.S 221271 2011-04-30 20:49:00Z marcel $
+ */
+
+#include <machine/asm.h>
+#include <machine/ia64_cpu.h>
+#include <machine/pte.h>
+#include <assym.s>
+
+/*
+ * AP wake-up entry point. The handoff state is similar as for the BSP,
+ * as described on page 3-9 of the IPF SAL Specification. The difference
+ * lies in the contents of register b0. For APs this register holds the
+ * return address into the SAL rendezvous routine.
+ *
+ * Note that we're responsible for clearing the IRR bit by reading cr.ivr
+ * and issuing the EOI to the local SAPIC.
+ */
+	.align	32
+ENTRY_NOPROFILE(os_boot_rendez,0)
+{	.mmi
+	st8	[gp] = gp		// trace = 0x00
+	mov	r8 = cr.ivr		// clear IRR bit
+	add	r2 = 8, gp
+	;;
+}
+{	.mmi
+	srlz.d
+	mov	cr.eoi = r0		// ACK the wake-up
+	add	r3 = 16, gp
+	;;
+}
+{	.mmi
+	srlz.d
+	rsm	IA64_PSR_IC | IA64_PSR_I
+	mov	r16 = (IA64_PBVM_RR << 8) | (IA64_PBVM_PAGE_SHIFT << 2)
+	;;
+}
+{	.mmi
+	srlz.d
+	st8	[gp] = r2		// trace = 0x08
+	dep.z	r17 = IA64_PBVM_RR, 61, 3
+	;;
+}
+{	.mlx
+	mov     rr[r17] = r16
+	movl	r18 = IA64_PBVM_PGTBL
+	;;
+}
+{	.mmi
+	srlz.i
+	;;
+	st8	[gp] = r3		// trace = 0x10
+	nop	0
+	;;
+}
+{	.mmi
+	ld8	r16 = [r2], 16		// as_pgtbl_pte
+	ld8	r17 = [r3], 16		// as_pgtbl_itir
+	nop	0
+	;;
+}
+{	.mmi
+	mov	cr.itir = r17
+	mov	cr.ifa = r18
+	nop	0
+	;;
+}
+{	.mmi
+	srlz.d
+	ptr.d	r18, r17
+	nop	0
+	;;
+}
+{	.mmi
+	srlz.d
+	st8	[gp] = r2		// trace = 0x18
+	mov	r8 = r0
+	;;
+}
+{	.mmi
+	itr.d	dtr[r8] = r16
+	;;
+	srlz.d
+	mov	r9 = r0
+	;;
+}
+{	.mmi
+	ld8	r16 = [r2], 16		// as_text_va
+	st8	[gp] = r3		// trace = 0x20
+	add	r8 = 1, r8
+	;;
+}
+{	.mmi
+	ld8	r17 = [r3], 16		// as_text_pte
+	ld8	r18 = [r2], 16		// as_text_itir
+	nop	0
+	;;
+}
+{	.mmi
+	mov	cr.ifa = r16
+	mov	cr.itir = r18
+	nop	0
+	;;
+}
+{	.mmi
+	srlz.d
+	ptr.d	r16, r18
+	nop	0
+	;;
+}
+{	.mmi
+	srlz.d
+	st8	[gp] = r3		// trace = 0x30
+	nop	0
+	;;
+}
+{	.mmi
+	itr.d	dtr[r8] = r17
+	;;
+	srlz.d
+	nop	0
+}
+{	.mmi
+	st8	[gp] = r2		// trace = 0x38
+	ptr.i	r16, r18
+	add	r8 = 1, r8
+	;;
+}
+{	.mmi
+	srlz.i
+	;;
+	itr.i	itr[r9] = r17
+	nop	0
+	;;
+}
+{	.mmi
+	srlz.i
+	;;
+	ld8	r16 = [r3], 16          // as_data_va
+	add	r9 = 1, r9
+	;;
+}
+{	.mmi
+	st8	[gp] = r3		// trace = 0x40
+	ld8	r17 = [r2], 16		// as_data_pte
+	nop	0
+	;;
+}
+{	.mmi
+	mov	cr.ifa = r16
+	ld8	r18 = [r3], 16		// as_data_itir
+	nop	0
+	;;
+}
+{	.mmi
+	mov	cr.itir = r18
+	;;
+	srlz.d
+	nop	0
+	;;
+}
+{	.mmi
+	ptr.d	r16, r18
+	;;
+	srlz.d
+	mov	r19 = IA64_DCR_DEFAULT
+	;;
+}
+{	.mmi
+	itr.d	dtr[r8] = r17
+	;;
+	srlz.d
+	add	r8 = 1, r8
+	;;
+}
+{	.mmi
+	st8	[gp] = r2		// trace = 0x48
+	;;
+	ld8	r16 = [r2], 16		// as_kstack
+	nop	0
+}
+{	.mmi
+	ld8	r17 = [r3], 16		// as_kstack_top
+	mov	cr.dcr = r19
+	nop	0
+	;;
+}
+{	.mlx
+	srlz.i
+	movl	r18 = IA64_PSR_BN | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_IC | \
+			IA64_PSR_RT | IA64_PSR_DFH
+	;;
+}
+{	.mlx
+	mov	cr.ipsr = r18
+	movl	r19 = ia64_vector_table		// set up IVT early
+	;;
+}
+{	.mlx
+	mov	cr.iva = r19
+	movl	r18 = 1f
+	;;
+}
+{	.mmi
+	mov	cr.iip = r18
+	mov	cr.ifs = r0
+	nop	0
+	;;
+}
+{	.mmb
+	srlz.d
+	st8	[gp] = r2		// trace = 0x58
+	rfi
+	;;
+}
+
+	.align	32
+1:
+{	.mlx
+	mov	ar.bspstore = r16
+	movl	gp = __gp
+	;;
+}
+{	.mmi
+	loadrs
+	add	sp = -16, r17
+	nop	0
+	;;
+}
+{	.mmi
+	mov	ar.rsc = 3
+	;;
+	alloc	r18 = ar.pfs, 0, 0, 0, 0
+	;;
+}
+{	.mib
+	nop	0
+	nop	0
+	br.call.sptk.few rp = ia64_ap_startup
+	;;
+}
+	/* NOT REACHED */
+9:
+{	.mib
+	nop	0
+	nop	0
+	br.sptk	9b
+	;;
+}
+END(os_boot_rendez)


Property changes on: trunk/sys/ia64/ia64/mp_locore.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/mp_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/mp_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/mp_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,544 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001-2005 Marcel Moolenaar
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/mp_machdep.c 271211 2014-09-06 22:17:54Z marcel $");
+
+#include "opt_kstack_pages.h"
+#include "opt_xtrace.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ktr.h>
+#include <sys/proc.h>
+#include <sys/bus.h>
+#include <sys/kthread.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/pcpu.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/uuid.h>
+
+#include <machine/atomic.h>
+#include <machine/bootinfo.h>
+#include <machine/cpu.h>
+#include <machine/fpu.h>
+#include <machine/intr.h>
+#include <machine/mca.h>
+#include <machine/md_var.h>
+#include <machine/pal.h>
+#include <machine/pcb.h>
+#include <machine/sal.h>
+#include <machine/smp.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+
+extern uint64_t bdata[];
+
+extern int smp_disabled;
+
+MALLOC_DEFINE(M_SMP, "SMP", "SMP related allocations");
+
+void ia64_ap_startup(void);
+
+#define	SAPIC_ID_GET_ID(x)	((u_int)((x) >> 8) & 0xff)
+#define	SAPIC_ID_GET_EID(x)	((u_int)(x) & 0xff)
+#define	SAPIC_ID_SET(id, eid)	((u_int)(((id) & 0xff) << 8) | ((eid) & 0xff))
+
+/* State used to wake and bootstrap APs. */
+struct ia64_ap_state ia64_ap_state;
+
+int ia64_ipi_ast;
+int ia64_ipi_hardclock;
+int ia64_ipi_highfp;
+int ia64_ipi_nmi;
+int ia64_ipi_preempt;
+int ia64_ipi_rndzvs;
+int ia64_ipi_stop;
+
+static u_int
+sz2shft(uint64_t sz)
+{
+	uint64_t s;
+	u_int shft;
+
+	shft = 12;      /* Start with 4K */
+	s = 1 << shft;
+	while (s < sz) {
+		shft++;
+		s <<= 1;
+	}
+	return (shft);
+}
+
+static u_int
+ia64_ih_ast(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+	PCPU_INC(md.stats.pcs_nasts);
+	CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
+	return (0);
+}
+
+static u_int
+ia64_ih_hardclock(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+	struct trapframe *stf;
+
+	PCPU_INC(md.stats.pcs_nhardclocks);
+	CTR1(KTR_SMP, "IPI_HARDCLOCK, cpuid=%d", PCPU_GET(cpuid));
+	stf = td->td_intr_frame;
+	td->td_intr_frame = tf;
+	hardclockintr();
+	td->td_intr_frame = stf;
+	return (0);
+}
+
+static u_int
+ia64_ih_highfp(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+	PCPU_INC(md.stats.pcs_nhighfps);
+	ia64_highfp_save_ipi();
+	return (0);
+}
+
+static u_int
+ia64_ih_preempt(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+	PCPU_INC(md.stats.pcs_npreempts);
+	CTR1(KTR_SMP, "IPI_PREEMPT, cpuid=%d", PCPU_GET(cpuid));
+	sched_preempt(curthread);
+	return (0);
+}
+
+static u_int
+ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+	PCPU_INC(md.stats.pcs_nrdvs);
+	CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));
+	smp_rendezvous_action();
+	return (0);
+}
+
+static u_int
+ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+	u_int cpuid;
+
+	PCPU_INC(md.stats.pcs_nstops);
+	cpuid = PCPU_GET(cpuid);
+
+	savectx(PCPU_PTR(md.pcb));
+
+	CPU_SET_ATOMIC(cpuid, &stopped_cpus);
+	while (!CPU_ISSET(cpuid, &started_cpus))
+		cpu_spinwait();
+	CPU_CLR_ATOMIC(cpuid, &started_cpus);
+	CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
+	return (0);
+}
+
+struct cpu_group *
+cpu_topo(void)
+{
+
+	return smp_topo_none();
+}
+
+static void
+ia64_store_mca_state(void* arg)
+{
+	struct pcpu *pc = arg;
+	struct thread *td = curthread;
+
+	/*
+	 * ia64_mca_save_state() is CPU-sensitive, so bind ourself to our
+	 * target CPU.
+	 */
+	thread_lock(td);
+	sched_bind(td, pc->pc_cpuid);
+	thread_unlock(td);
+
+	ia64_mca_init_ap();
+
+	/*
+	 * Get and save the CPU specific MCA records. Should we get the
+	 * MCA state for each processor, or just the CMC state?
+	 */
+	ia64_mca_save_state(SAL_INFO_MCA);
+	ia64_mca_save_state(SAL_INFO_CMC);
+
+	kproc_exit(0);
+}
+
+void
+ia64_ap_startup(void)
+{
+	uint64_t vhpt;
+
+	ia64_ap_state.as_trace = 0x100;
+
+	ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1);
+	ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (LOG2_ID_PAGE_SIZE << 2));
+	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (LOG2_ID_PAGE_SIZE << 2));
+	ia64_srlz_d();
+
+	pcpup = ia64_ap_state.as_pcpu;
+	ia64_set_k4((intptr_t)pcpup);
+
+	ia64_ap_state.as_trace = 0x108;
+
+	vhpt = pcpup->pc_md.vhpt;
+	map_vhpt(vhpt);
+	ia64_set_pta(vhpt + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
+	ia64_srlz_i();
+
+	ia64_ap_state.as_trace = 0x110;
+
+	ia64_ap_state.as_awake = 1;
+	ia64_ap_state.as_delay = 0;
+
+	map_pal_code();
+	map_gateway_page();
+
+	ia64_set_fpsr(IA64_FPSR_DEFAULT);
+
+#ifdef XTRACE
+	ia64_xtrace_init_ap(ia64_ap_state.as_xtrace_buffer);
+#endif
+
+	/* Wait until it's time for us to be unleashed */
+	while (ia64_ap_state.as_spin)
+		cpu_spinwait();
+
+	/* Initialize curthread. */
+	KASSERT(pcpup->pc_idlethread != NULL, ("no idle thread"));
+	pcpup->pc_curthread = pcpup->pc_idlethread;
+
+	pmap_invalidate_all();
+
+	atomic_add_int(&ia64_ap_state.as_awake, 1);
+	while (!smp_started)
+		cpu_spinwait();
+
+	CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
+
+	cpu_initclocks();
+
+	ia64_set_tpr(0);
+	ia64_srlz_d();
+
+	sched_throw(NULL);
+	/* NOTREACHED */
+}
+
+void
+cpu_mp_setmaxid(void)
+{
+
+	/*
+	 * Count the number of processors in the system by walking the ACPI
+	 * tables. Note that we record the actual number of processors, even
+	 * if this is larger than MAXCPU. We only activate MAXCPU processors.
+	 */
+	mp_ncpus = ia64_count_cpus();
+
+	/*
+	 * Set the largest cpuid we're going to use. This is necessary for
+	 * VM initialization.
+	 */
+	mp_maxid = min(mp_ncpus, MAXCPU) - 1;
+}
+
+int
+cpu_mp_probe(void)
+{
+
+	/*
+	 * If there's only 1 processor, or we don't have a wake-up vector,
+	 * we're not going to enable SMP. Note that no wake-up vector can
+	 * also mean that the wake-up mechanism is not supported. In this
+	 * case we can have multiple processors, but we simply can't wake
+	 * them up...
+	 */
+	return (mp_ncpus > 1 && ia64_ipi_wakeup != 0);
+}
+
+void
+cpu_mp_add(u_int acpi_id, u_int id, u_int eid)
+{
+	struct pcpu *pc;
+	void *dpcpu;
+	u_int cpuid, sapic_id;
+
+	if (smp_disabled)
+		return;
+
+	sapic_id = SAPIC_ID_SET(id, eid);
+	cpuid = (IA64_LID_GET_SAPIC_ID(ia64_get_lid()) == sapic_id)
+	    ? 0 : smp_cpus++;
+
+	KASSERT(!CPU_ISSET(cpuid, &all_cpus),
+	    ("%s: cpu%d already in CPU map", __func__, acpi_id));
+
+	if (cpuid != 0) {
+		pc = (struct pcpu *)malloc(sizeof(*pc), M_SMP, M_WAITOK);
+		pcpu_init(pc, cpuid, sizeof(*pc));
+		dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+		    M_WAITOK | M_ZERO);
+		dpcpu_init(dpcpu, cpuid);
+	} else
+		pc = pcpup;
+
+	cpu_pcpu_setup(pc, acpi_id, sapic_id);
+ 
+	CPU_SET(pc->pc_cpuid, &all_cpus);
+}
+
+void
+cpu_mp_announce()
+{
+	struct pcpu *pc;
+	uint32_t sapic_id;
+	int i;
+
+	for (i = 0; i <= mp_maxid; i++) {
+		pc = pcpu_find(i);
+		if (pc != NULL) {
+			sapic_id = IA64_LID_GET_SAPIC_ID(pc->pc_md.lid);
+			printf("cpu%d: ACPI Id=%x, SAPIC Id=%x, SAPIC Eid=%x",
+			    i, pc->pc_acpi_id, SAPIC_ID_GET_ID(sapic_id),
+			    SAPIC_ID_GET_EID(sapic_id));
+			if (i == 0)
+				printf(" (BSP)\n");
+			else
+				printf("\n");
+		}
+	}
+}
+
+void
+cpu_mp_start()
+{
+	struct ia64_sal_result result;
+	struct ia64_fdesc *fd;
+	struct pcpu *pc;
+	uintptr_t state;
+	u_char *stp;
+
+	state = ia64_tpa((uintptr_t)&ia64_ap_state);
+	fd = (struct ia64_fdesc *) os_boot_rendez;
+	result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ,
+	    ia64_tpa(fd->func), state, 0, 0, 0, 0);
+
+	ia64_ap_state.as_pgtbl_pte = PTE_PRESENT | PTE_MA_WB |
+	    PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW |
+	    (bootinfo->bi_pbvm_pgtbl & PTE_PPN_MASK);
+	ia64_ap_state.as_pgtbl_itir = sz2shft(bootinfo->bi_pbvm_pgtblsz) << 2;
+	ia64_ap_state.as_text_va = IA64_PBVM_BASE;
+	ia64_ap_state.as_text_pte = PTE_PRESENT | PTE_MA_WB |
+	    PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RX |
+	    (ia64_tpa(IA64_PBVM_BASE) & PTE_PPN_MASK);
+	ia64_ap_state.as_text_itir = bootinfo->bi_text_mapped << 2;
+	ia64_ap_state.as_data_va = (uintptr_t)bdata;
+	ia64_ap_state.as_data_pte = PTE_PRESENT | PTE_MA_WB |
+	    PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW |
+	    (ia64_tpa((uintptr_t)bdata) & PTE_PPN_MASK);
+	ia64_ap_state.as_data_itir = bootinfo->bi_data_mapped << 2;
+
+	/* Keep 'em spinning until we unleash them... */
+	ia64_ap_state.as_spin = 1;
+
+	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
+		pc->pc_md.current_pmap = kernel_pmap;
+		/* The BSP is obviously running already. */
+		if (pc->pc_cpuid == 0) {
+			pc->pc_md.awake = 1;
+			continue;
+		}
+
+		ia64_ap_state.as_pcpu = pc;
+		pc->pc_md.vhpt = pmap_alloc_vhpt();
+		if (pc->pc_md.vhpt == 0) {
+			printf("SMP: WARNING: unable to allocate VHPT"
+			    " for cpu%d", pc->pc_cpuid);
+			continue;
+		}
+
+		stp = malloc(KSTACK_PAGES * PAGE_SIZE, M_SMP, M_WAITOK);
+		ia64_ap_state.as_kstack = stp;
+		ia64_ap_state.as_kstack_top = stp + KSTACK_PAGES * PAGE_SIZE;
+
+#ifdef XTRACE
+		ia64_ap_state.as_xtrace_buffer = ia64_xtrace_alloc();
+#endif
+
+		ia64_ap_state.as_trace = 0;
+		ia64_ap_state.as_delay = 2000;
+		ia64_ap_state.as_awake = 0;
+
+		if (bootverbose)
+			printf("SMP: waking up cpu%d\n", pc->pc_cpuid);
+
+		/* Here she goes... */
+		ipi_send(pc, ia64_ipi_wakeup);
+		do {
+			DELAY(1000);
+		} while (--ia64_ap_state.as_delay > 0);
+
+		pc->pc_md.awake = ia64_ap_state.as_awake;
+
+		if (!ia64_ap_state.as_awake) {
+			printf("SMP: WARNING: cpu%d did not wake up (code "
+			    "%#lx)\n", pc->pc_cpuid,
+			    ia64_ap_state.as_trace - state);
+		}
+	}
+}
+
+static void
+cpu_mp_unleash(void *dummy)
+{
+	struct pcpu *pc;
+	int cpus;
+
+	if (mp_ncpus <= 1)
+		return;
+
+	/* Allocate XIVs for IPIs */
+	ia64_ipi_ast = ia64_xiv_alloc(PI_DULL, IA64_XIV_IPI, ia64_ih_ast);
+	ia64_ipi_hardclock = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI,
+	    ia64_ih_hardclock);
+	ia64_ipi_highfp = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_highfp);
+	ia64_ipi_preempt = ia64_xiv_alloc(PI_SOFT, IA64_XIV_IPI,
+	    ia64_ih_preempt);
+	ia64_ipi_rndzvs = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_rndzvs);
+	ia64_ipi_stop = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, ia64_ih_stop);
+
+	/* Reserve the NMI vector for IPI_STOP_HARD if possible */
+	ia64_ipi_nmi = (ia64_xiv_reserve(2, IA64_XIV_IPI, ia64_ih_stop) != 0)
+	    ? ia64_ipi_stop : 0x400;	/* DM=NMI, Vector=n/a */
+
+	cpus = 0;
+	smp_cpus = 0;
+	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
+		cpus++;
+		if (pc->pc_md.awake) {
+			kproc_create(ia64_store_mca_state, pc, NULL, 0, 0,
+			    "mca %u", pc->pc_cpuid);
+			smp_cpus++;
+		}
+	}
+
+	ia64_ap_state.as_awake = 1;
+	ia64_ap_state.as_spin = 0;
+
+	while (ia64_ap_state.as_awake != smp_cpus)
+		cpu_spinwait();
+
+	if (smp_cpus != cpus || cpus != mp_ncpus) {
+		printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
+		    mp_ncpus, cpus, smp_cpus);
+	}
+
+	/* XXX Atomic set operation? */
+	smp_started = 1;
+
+	/*
+	 * Now that all CPUs are up and running, bind interrupts to each of
+	 * them.
+	 */
+	ia64_bind_intr();
+}
+SYSINIT(start_aps, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, cpu_mp_unleash, NULL);
+
+/*
+ * send an IPI to a set of cpus.
+ */
+void
+ipi_selected(cpuset_t cpus, int ipi)
+{
+	struct pcpu *pc;
+
+	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
+		if (CPU_ISSET(pc->pc_cpuid, &cpus))
+			ipi_send(pc, ipi);
+	}
+}
+
+/*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	ipi_send(cpuid_to_pcpu[cpu], ipi);
+}
+
+/*
+ * send an IPI to all CPUs EXCEPT myself.
+ */
+void
+ipi_all_but_self(int ipi)
+{
+	struct pcpu *pc;
+
+	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
+		if (pc != pcpup)
+			ipi_send(pc, ipi);
+	}
+}
+
+/*
+ * Send an IPI to the specified processor.
+ */
+void
+ipi_send(struct pcpu *cpu, int xiv)
+{
+	u_int sapic_id;
+
+	KASSERT(xiv != 0, ("ipi_send"));
+
+	sapic_id = IA64_LID_GET_SAPIC_ID(cpu->pc_md.lid);
+
+	ia64_mf();
+	ia64_st8(&(ia64_pib->ib_ipi[sapic_id][0]), xiv);
+	ia64_mf_a();
+	CTR3(KTR_SMP, "ipi_send(%p, %d): cpuid=%d", cpu, xiv, PCPU_GET(cpuid));
+}


Property changes on: trunk/sys/ia64/ia64/mp_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/nexus.c
===================================================================
--- trunk/sys/ia64/ia64/nexus.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/nexus.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,531 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.  M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose.  It is provided "as is" without express or implied
+ * warranty.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/nexus.c 270296 2014-08-21 19:51:07Z emaste $
+ */
+
+/*
+ * This code implements a `root nexus' for Intel Architecture
+ * machines.  The function of the root nexus is to serve as an
+ * attachment point for both processors and buses, and to manage
+ * resources which are common to all of them.  In particular,
+ * this code implements the core resource managers for interrupt
+ * requests, DMA requests (which rightfully should be a part of the
+ * ISA code but it's easier to do it here for now), I/O port addresses,
+ * and I/O memory address space.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/clock.h>
+#include <sys/efi.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <sys/interrupt.h>
+#include <sys/pcpu.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/intr.h>
+#include <machine/pmap.h>
+#include <machine/resource.h>
+#include <machine/vmparam.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+#include "clock_if.h"
+
+static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device");
+struct nexus_device {
+	struct resource_list	nx_resources;
+};
+
+#define DEVTONX(dev)	((struct nexus_device *)device_get_ivars(dev))
+
+static struct rman irq_rman, port_rman, mem_rman;
+
+static	int nexus_probe(device_t);
+static	int nexus_attach(device_t);
+static	int nexus_print_child(device_t, device_t);
+static device_t nexus_add_child(device_t bus, u_int order, const char *name,
+				int unit);
+static	struct resource *nexus_alloc_resource(device_t, device_t, int, int *,
+					      u_long, u_long, u_long, u_int);
+static	int nexus_adjust_resource(device_t, device_t, int, struct resource *,
+				  u_long, u_long);
+static	int nexus_activate_resource(device_t, device_t, int, int,
+				    struct resource *);
+static	int nexus_deactivate_resource(device_t, device_t, int, int,
+				      struct resource *);
+static	int nexus_release_resource(device_t, device_t, int, int,
+				   struct resource *);
+static	int nexus_setup_intr(device_t, device_t, struct resource *, int flags,
+			     driver_filter_t filter, void (*)(void *), void *, 
+			     void **);
+static	int nexus_teardown_intr(device_t, device_t, struct resource *,
+				void *);
+static struct resource_list *nexus_get_reslist(device_t dev, device_t child);
+static	int nexus_set_resource(device_t, device_t, int, int, u_long, u_long);
+static	int nexus_get_resource(device_t, device_t, int, int, u_long *,
+			       u_long *);
+static void nexus_delete_resource(device_t, device_t, int, int);
+static int nexus_bind_intr(device_t, device_t, struct resource *, int);
+static	int nexus_config_intr(device_t, int, enum intr_trigger,
+			      enum intr_polarity);
+
+static int nexus_gettime(device_t, struct timespec *);
+static int nexus_settime(device_t, struct timespec *);
+
+static device_method_t nexus_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe,		nexus_probe),
+	DEVMETHOD(device_attach,	nexus_attach),
+	DEVMETHOD(device_detach,	bus_generic_detach),
+	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
+	DEVMETHOD(device_suspend,	bus_generic_suspend),
+	DEVMETHOD(device_resume,	bus_generic_resume),
+
+	/* Bus interface */
+	DEVMETHOD(bus_print_child,	nexus_print_child),
+	DEVMETHOD(bus_add_child,	nexus_add_child),
+	DEVMETHOD(bus_alloc_resource,	nexus_alloc_resource),
+	DEVMETHOD(bus_adjust_resource,	nexus_adjust_resource),
+	DEVMETHOD(bus_release_resource,	nexus_release_resource),
+	DEVMETHOD(bus_activate_resource, nexus_activate_resource),
+	DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource),
+	DEVMETHOD(bus_setup_intr,	nexus_setup_intr),
+	DEVMETHOD(bus_teardown_intr,	nexus_teardown_intr),
+	DEVMETHOD(bus_get_resource_list, nexus_get_reslist),
+	DEVMETHOD(bus_set_resource,	nexus_set_resource),
+	DEVMETHOD(bus_get_resource,	nexus_get_resource),
+	DEVMETHOD(bus_delete_resource,	nexus_delete_resource),
+	DEVMETHOD(bus_bind_intr,	nexus_bind_intr),
+	DEVMETHOD(bus_config_intr,	nexus_config_intr),
+
+	/* Clock interface */
+	DEVMETHOD(clock_gettime,	nexus_gettime),
+	DEVMETHOD(clock_settime,	nexus_settime),
+
+	{ 0, 0 }
+};
+
+static driver_t nexus_driver = {
+	"nexus",
+	nexus_methods,
+	1,			/* no softc */
+};
+static devclass_t nexus_devclass;
+
+DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0);
+
+static int
+nexus_probe(device_t dev)
+{
+
+	device_quiet(dev);	/* suppress attach message for neatness */
+
+	irq_rman.rm_type = RMAN_ARRAY;
+	irq_rman.rm_descr = "Interrupt request lines";
+	irq_rman.rm_start = 0;
+	irq_rman.rm_end = IA64_NXIVS - 1;
+	if (rman_init(&irq_rman)
+	    || rman_manage_region(&irq_rman,
+				  irq_rman.rm_start, irq_rman.rm_end))
+		panic("nexus_probe irq_rman");
+
+	port_rman.rm_start = 0;
+	port_rman.rm_end = 0xffff;
+	port_rman.rm_type = RMAN_ARRAY;
+	port_rman.rm_descr = "I/O ports";
+	if (rman_init(&port_rman)
+	    || rman_manage_region(&port_rman, 0, 0xffff))
+		panic("nexus_probe port_rman");
+
+	mem_rman.rm_start = 0;
+	mem_rman.rm_end = ~0ul;
+	mem_rman.rm_type = RMAN_ARRAY;
+	mem_rman.rm_descr = "I/O memory addresses";
+	if (rman_init(&mem_rman)
+	    || rman_manage_region(&mem_rman, 0, ~0))
+		panic("nexus_probe mem_rman");
+
+	return bus_generic_probe(dev);
+}
+
+static int
+nexus_attach(device_t dev)
+{
+
+	if (acpi_identify() == 0)
+		BUS_ADD_CHILD(dev, 10, "acpi", 0);
+	clock_register(dev, 1000);
+	bus_generic_attach(dev);
+	return 0;
+}
+
+static int
+nexus_print_child(device_t bus, device_t child)
+{
+	struct nexus_device *ndev = DEVTONX(child);
+	struct resource_list *rl = &ndev->nx_resources;
+	int retval = 0;
+
+	retval += bus_print_child_header(bus, child);
+	retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
+	retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx");
+	retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
+	if (device_get_flags(child))
+		retval += printf(" flags %#x", device_get_flags(child));
+	retval += printf(" on motherboard\n");	/* XXX "motherboard", ick */
+
+	return (retval);
+}
+
+static device_t
+nexus_add_child(device_t bus, u_int order, const char *name, int unit)
+{
+	device_t		child;
+	struct nexus_device	*ndev;
+
+	ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO);
+	if (!ndev)
+		return(0);
+	resource_list_init(&ndev->nx_resources);
+
+	child = device_add_child_ordered(bus, order, name, unit); 
+
+	/* should we free this in nexus_child_detached? */
+	device_set_ivars(child, ndev);
+
+	return(child);
+}
+
+static struct rman *
+nexus_rman(int type)
+{
+	switch (type) {
+	case SYS_RES_IRQ:
+		return (&irq_rman);
+	case SYS_RES_IOPORT:
+		return (&port_rman);
+	case SYS_RES_MEMORY:
+		return (&mem_rman);
+	default:
+		return (NULL);
+	}
+}
+
+/*
+ * Allocate a resource on behalf of child.  NB: child is usually going to be a
+ * child of one of our descendants, not a direct child of nexus0.
+ * (Exceptions include npx.)
+ */
+static struct resource *
+nexus_alloc_resource(device_t bus, device_t child, int type, int *rid,
+		     u_long start, u_long end, u_long count, u_int flags)
+{
+	struct nexus_device *ndev = DEVTONX(child);
+	struct	resource *rv;
+	struct resource_list_entry *rle;
+	struct	rman *rm;
+	int needactivate = flags & RF_ACTIVE;
+
+	/*
+	 * If this is an allocation of the "default" range for a given RID, and
+	 * we know what the resources for this device are (ie. they aren't maintained
+	 * by a child bus), then work out the start/end values.
+	 */
+	if ((start == 0UL) && (end == ~0UL) && (count == 1)) {
+		if (ndev == NULL)
+			return(NULL);
+		rle = resource_list_find(&ndev->nx_resources, type, *rid);
+		if (rle == NULL)
+			return(NULL);
+		start = rle->start;
+		end = rle->end;
+		count = rle->count;
+	}
+
+	flags &= ~RF_ACTIVE;
+	rm = nexus_rman(type);
+	if (rm == NULL)
+		return (NULL);
+
+	rv = rman_reserve_resource(rm, start, end, count, flags, child);
+	if (rv == 0)
+		return 0;
+	rman_set_rid(rv, *rid);
+
+	if (needactivate) {
+		if (bus_activate_resource(child, type, *rid, rv)) {
+			rman_release_resource(rv);
+			return 0;
+		}
+	}
+	
+	return rv;
+}
+
+static int
+nexus_adjust_resource(device_t bus, device_t child, int type,
+    struct resource *r, u_long start, u_long end)
+{
+	struct rman *rm;
+
+	rm = nexus_rman(type);
+	if (rm == NULL)
+		return (ENXIO);
+	if (!rman_is_region_manager(r, rm))
+		return (EINVAL);
+	return (rman_adjust_resource(r, start, end));
+}
+
+static int
+nexus_activate_resource(device_t bus, device_t child, int type, int rid,
+    struct resource *r)
+{
+	vm_paddr_t paddr;
+	void *vaddr;
+
+	paddr = rman_get_start(r);
+
+	switch (type) {
+	case SYS_RES_IOPORT:
+		rman_set_bustag(r, IA64_BUS_SPACE_IO);
+		rman_set_bushandle(r, paddr);
+		break;
+	case SYS_RES_MEMORY:
+		vaddr = pmap_mapdev(paddr, rman_get_size(r));
+		rman_set_bustag(r, IA64_BUS_SPACE_MEM);
+		rman_set_bushandle(r, (bus_space_handle_t) vaddr);
+		rman_set_virtual(r, vaddr);
+		break;
+	}
+	return (rman_activate_resource(r));
+}
+
+static int
+nexus_deactivate_resource(device_t bus, device_t child, int type, int rid,
+			  struct resource *r)
+{
+		
+	return (rman_deactivate_resource(r));
+}
+
+static int
+nexus_release_resource(device_t bus, device_t child, int type, int rid,
+		       struct resource *r)
+{
+	if (rman_get_flags(r) & RF_ACTIVE) {
+		int error = bus_deactivate_resource(child, type, rid, r);
+		if (error)
+			return error;
+	}
+	return (rman_release_resource(r));
+}
+
+/*
+ * Currently this uses the really grody interface from kern/kern_intr.c
+ * (which really doesn't belong in kern/anything.c).  Eventually, all of
+ * the code in kern_intr.c and machdep_intr.c should get moved here, since
+ * this is going to be the official interface.
+ */
+static int
+nexus_setup_intr(device_t bus, device_t child, struct resource *irq,
+		 int flags, driver_filter_t filter, void (*ihand)(void *), 
+		 void *arg, void **cookiep)
+{
+	driver_t	*driver;
+	int		error;
+
+	/* somebody tried to setup an irq that failed to allocate! */
+	if (irq == NULL)
+		panic("nexus_setup_intr: NULL irq resource!");
+
+	*cookiep = 0;
+	if ((rman_get_flags(irq) & RF_SHAREABLE) == 0)
+		flags |= INTR_EXCL;
+
+	driver = device_get_driver(child);
+
+	/*
+	 * We depend here on rman_activate_resource() being idempotent.
+	 */
+	error = rman_activate_resource(irq);
+	if (error)
+		return (error);
+
+	error = ia64_setup_intr(device_get_nameunit(child),
+	    rman_get_start(irq), filter, ihand, arg, flags, cookiep);
+
+	return (error);
+}
+
+static int
+nexus_teardown_intr(device_t dev, device_t child, struct resource *ires,
+    void *cookie)
+{
+
+	return (ia64_teardown_intr(cookie));
+}
+
+static struct resource_list *
+nexus_get_reslist(device_t dev, device_t child)
+{
+	struct nexus_device *ndev = DEVTONX(child);
+
+	return (&ndev->nx_resources);
+}
+
+static int
+nexus_set_resource(device_t dev, device_t child, int type, int rid,
+    u_long start, u_long count)
+{
+	struct nexus_device	*ndev = DEVTONX(child);
+	struct resource_list	*rl = &ndev->nx_resources;
+
+	if (type == SYS_RES_IOPORT && start > (0x10000 - count)) {
+		/*
+		 * Work around a firmware bug in the HP rx2660, where in ACPI
+		 * an I/O port is really a memory mapped I/O address. The bug
+		 * is in the GAS that describes the address and in particular
+		 * the SpaceId field. The field should not say the address is
+		 * an I/O port when it is in fact an I/O memory address.
+		 */
+		if (bootverbose)
+			printf("%s: invalid port range (%#lx-%#lx); "
+			    "assuming I/O memory range.\n", __func__, start,
+			    start + count - 1);
+		type = SYS_RES_MEMORY;
+	}
+
+	/* XXX this should return a success/failure indicator */
+	resource_list_add(rl, type, rid, start, start + count - 1, count);
+	return(0);
+}
+
+static int
+nexus_get_resource(device_t dev, device_t child, int type, int rid, u_long *startp, u_long *countp)
+{
+	struct nexus_device	*ndev = DEVTONX(child);
+	struct resource_list	*rl = &ndev->nx_resources;
+	struct resource_list_entry *rle;
+
+	rle = resource_list_find(rl, type, rid);
+	device_printf(child, "type %d  rid %d  startp %p  countp %p - got %p\n",
+		      type, rid, startp, countp, rle);
+	if (!rle)
+		return(ENOENT);
+	if (startp)
+		*startp = rle->start;
+	if (countp)
+		*countp = rle->count;
+	return(0);
+}
+
+static void
+nexus_delete_resource(device_t dev, device_t child, int type, int rid)
+{
+	struct nexus_device	*ndev = DEVTONX(child);
+	struct resource_list	*rl = &ndev->nx_resources;
+
+	resource_list_delete(rl, type, rid);
+}
+
+static int
+nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
+    enum intr_polarity pol)
+{
+
+	return (sapic_config_intr(irq, trig, pol));
+}
+
+static int
+nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu)
+{
+	struct pcpu *pc;
+
+	pc = cpuid_to_pcpu[cpu];
+	if (pc == NULL)
+		return (EINVAL);
+	return (sapic_bind_intr(rman_get_start(irq), pc));
+}
+
+static int
+nexus_gettime(device_t dev, struct timespec *ts)
+{
+	struct clocktime ct;
+	struct efi_tm tm;
+
+	efi_get_time(&tm);
+
+	/*
+	 * This code was written in 2005, so logically EFI cannot return
+	 * a year smaller than that. Assume the EFI clock is out of whack
+	 * in that case and reset the EFI clock.
+	 */
+	if (tm.tm_year < 2005)
+		return (EINVAL);
+
+	ct.nsec = tm.tm_nsec;
+	ct.sec = tm.tm_sec;
+	ct.min = tm.tm_min;
+	ct.hour = tm.tm_hour;
+	ct.day = tm.tm_mday;
+	ct.mon = tm.tm_mon;
+	ct.year = tm.tm_year;
+	ct.dow = -1;
+	return (clock_ct_to_ts(&ct, ts));
+}
+
+static int
+nexus_settime(device_t dev, struct timespec *ts)
+{
+	struct clocktime ct;
+	struct efi_tm tm;
+
+	efi_get_time(&tm);
+
+	clock_ts_to_ct(ts, &ct);
+	tm.tm_nsec = ts->tv_nsec;
+	tm.tm_sec = ct.sec;
+	tm.tm_min = ct.min;
+	tm.tm_hour = ct.hour;
+	tm.tm_year = ct.year;
+	tm.tm_mon = ct.mon;
+	tm.tm_mday = ct.day;
+	return (efi_set_time(&tm));
+}


Property changes on: trunk/sys/ia64/ia64/nexus.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/pal.S
===================================================================
--- trunk/sys/ia64/ia64/pal.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/pal.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,117 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000-2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	$FreeBSD: stable/10/sys/ia64/ia64/pal.S 222769 2011-06-06 19:06:15Z marcel $
+ */
+
+#include <machine/asm.h>
+
+	.data
+	.global ia64_pal_entry
+ia64_pal_entry:	.quad 0
+	.text
+
+/*
+ * struct ia64_pal_result ia64_call_pal_static(u_int64_t proc,
+ *	u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
+ */
+ENTRY(ia64_call_pal_static, 4)
+
+	.regstk	4,4,0,0
+palret	=	loc0
+entry	=	loc1
+rpsave	=	loc2
+pfssave =	loc3
+
+	alloc	pfssave=ar.pfs,4,4,0,0
+	;; 
+	mov	rpsave=rp
+	movl	entry=@gprel(ia64_pal_entry)
+
+1:	mov	palret=ip		// for return address
+	;;
+	add	entry=entry,gp
+	add	palret=2f-1b,palret	// calculate return address
+	mov	r28=in0			// procedure number
+	mov	r29=in1			// copy arguments
+	mov	r30=in2
+	mov	r31=in3
+	;;
+	ld8	entry=[entry]		// read entry point
+	mov	b0=palret
+	;;
+	mov	b6=entry
+	;;
+	br.cond.sptk b6			// call into firmware
+	;;
+2:
+	mov	rp=rpsave
+	mov	ar.pfs=pfssave
+	;;
+	br.ret.sptk rp
+	;;
+END(ia64_call_pal_static)
+
+/*
+ * struct ia64_pal_result ia64_call_pal_stacked(u_int64_t proc,
+ *	u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
+ */
+ENTRY(ia64_call_pal_stacked, 4)
+	
+	.regstk	4,4,4,0
+entry	=	loc0
+rpsave	=	loc1
+pfssave =	loc2
+psrsave	=	loc3
+
+	alloc	pfssave=ar.pfs,4,4,4,0
+	;; 
+	mov	rpsave=rp
+	movl	entry=@gprel(ia64_pal_entry)
+	;;
+	add	entry=entry,gp
+	mov	psrsave=psr
+	mov	r28=in0			// procedure number
+	mov	out0=in0
+	;;
+	ld8	entry=[entry]		// read entry point
+	mov	out1=in1		// copy arguments
+	mov	out2=in2
+	mov	out3=in3
+	;;
+	mov	b6=entry
+	;;
+	rsm	psr.i			// disable interrupts
+	;;
+	br.call.sptk.many rp=b6		// call into firmware
+	mov	psr.l=psrsave
+	mov	rp=rpsave
+	mov	ar.pfs=pfssave
+	;;
+	srlz.d
+	br.ret.sptk rp
+
+END(ia64_call_pal_stacked)


Property changes on: trunk/sys/ia64/ia64/pal.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/physical.S
===================================================================
--- trunk/sys/ia64/ia64/physical.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/physical.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,259 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2011 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/physical.S 219841 2011-03-21 18:20:53Z marcel $
+ */
+
+#include <machine/asm.h>
+#include <machine/ia64_cpu.h>
+
+		.text
+
+/*
+ * u_long ia64_efi_physical(ia64_efi_f, u_long, u_long, u_long, u_long)
+ *
+ *	loc0 = ar.pfs
+ *	loc1 = rp
+ *	loc2 = psr
+ *	loc3 = sp
+ *	loc4 = bsp
+ *	loc5 = gp
+ */
+ENTRY(ia64_efi_physical, 5)
+		.prologue
+		.regstk 5,6,4,0
+		.save	ar.pfs,loc0
+		alloc	loc0=ar.pfs,5,6,4,0
+		;;
+		.save   rp,loc1
+		mov	loc1=rp
+		;;
+		.body
+		mov	loc2=psr		// save psr
+		movl	r16=IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | \
+			    IA64_PSR_RT | IA64_PSR_DFL | IA64_PSR_DFH
+		;;
+		andcm	r14=loc2,r16
+		movl	r15=IA64_PSR_BN
+		;;
+		rsm	psr.i
+		mov	r17=ar.rsc
+		or	r16=r14,r15		// new psr
+		;;
+		mov	ar.rsc=0
+		or	loc2=loc2,r15
+		;;
+		flushrs
+		mov	loc3=sp			// save sp
+		;;
+		mov	loc4=ar.bsp		// save ar.bsp
+		mov	r18=ar.rnat
+		;;
+		tpa	r19=loc4		// new bspstore
+		mov	loc5=gp
+		;;
+		tpa	r20=loc3		// new sp
+		ld8	r21=[in0],8
+		;;
+1:
+		mov	r14=ip
+		;;
+		ld8	r22=[in0]
+		add	r15=2f-1b,r14
+		;;
+		tpa	r14=r15
+		;;
+		rsm	psr.ic
+		;;
+		srlz.i
+		;;
+		mov	cr.iip=r14
+		mov	cr.ifs=r0
+		mov	cr.ipsr=r16
+		;;
+		rfi
+2:
+		mov	ar.bspstore=r19
+		mov	sp=r20
+		;;
+		mov	ar.rnat=r18
+		mov	ar.rsc=r17
+		;;
+		mov	b6=r21
+		mov	gp=r22
+		mov	out0=in1
+		mov	out1=in2
+		mov	out2=in3
+		mov	out3=in4
+		;;
+		br.call.sptk.many	rp=b6
+		mov	gp=loc5
+		;;
+		rsm	psr.i | psr.ic
+		mov	r16=ar.rsc
+		;;
+		srlz.i
+		mov	ar.rsc=0
+		;;
+		flushrs
+		;;
+		mov	r17=ar.rnat
+		movl	r18=3f
+		;;
+		mov	cr.iip=r18
+		mov	cr.ifs=r0
+		mov	cr.ipsr=loc2
+		;;
+		rfi
+3:
+		mov	ar.bspstore=loc4
+		mov	sp=loc3
+		;;
+		mov	ar.rnat=r17
+		mov	ar.rsc=r16
+		;;
+		mov	rp=loc1
+		mov	ar.pfs=loc0
+		;;
+		br.ret.sptk.many	rp
+END(ia64_efi_physical)
+
+
+/*
+ * ia64_pal_ret ia64_pal_physical(ia64_fw_f, u_long, u_long, u_long, u_long)
+ *
+ *	loc0 = ar.pfs
+ *	loc1 = rp
+ *	loc2 = psr
+ *	loc3 = sp
+ *	loc4 = bsp
+ *	loc5 = gp
+ */
+ENTRY(ia64_pal_physical, 5)
+		.prologue
+		.regstk 5,6,4,0
+		.save	ar.pfs,loc0
+		alloc	loc0=ar.pfs,5,6,4,0
+		;;
+		.save   rp,loc1
+		mov	loc1=rp
+		;;
+		.body
+		mov	loc2=psr		// save psr
+		movl	r16=IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | \
+			    IA64_PSR_RT | IA64_PSR_DFL | IA64_PSR_DFH
+		;;
+		andcm	r14=loc2,r16
+		movl	r15=IA64_PSR_BN
+		;;
+		rsm	psr.i
+		mov	r17=ar.rsc
+		or	r16=r14,r15		// new psr
+		;;
+		mov	ar.rsc=0
+		or	loc2=loc2,r15
+		;;
+		flushrs
+		mov	loc3=sp			// save sp
+		;;
+		mov	loc4=ar.bsp		// save ar.bsp
+		mov	r18=ar.rnat
+		;;
+		mov	loc5=gp
+		movl	r14=kstack
+		;;
+		tpa	r19=r14			// new bspstore
+		movl	r15=kstack_top
+		;;
+		tpa	r20=r15			// new sp
+		movl	r21=ia64_pal_entry
+		;;
+1:
+		mov	r14=ip
+		ld8	r22=[r21]
+		;;
+		tpa	r21=r22
+		add	r15=2f-1b,r14
+		;;
+		tpa	r14=r15
+		;;
+		rsm	psr.ic
+		;;
+		srlz.i
+		;;
+		mov	cr.iip=r14
+		mov	cr.ifs=r0
+		mov	cr.ipsr=r16
+		;;
+		rfi
+2:
+		mov	ar.bspstore=r19
+		add	sp=-16,r20
+		;;
+		mov	ar.rnat=r18
+		mov	ar.rsc=r17
+		;;
+		mov	b6=r21
+		mov	out0=in0
+		mov	out1=in1
+		mov	out2=in2
+		mov	out3=in3
+		// PAL static calls
+		mov	r28=in0
+		mov	r29=in1
+		mov	r30=in2
+		mov	r31=in3
+		br.call.sptk.many	rp=b6
+		mov	gp=loc5
+		;;
+		rsm	psr.i | psr.ic
+		mov	r16=ar.rsc
+		;;
+		srlz.i
+		mov	ar.rsc=0
+		;;
+		flushrs
+		;;
+		mov	r17=ar.rnat
+		movl	r18=3f
+		;;
+		mov	cr.iip=r18
+		mov	cr.ifs=r0
+		mov	cr.ipsr=loc2
+		;;
+		rfi
+3:
+		mov	ar.bspstore=loc4
+		mov	sp=loc3
+		;;
+		mov	ar.rnat=r17
+		mov	ar.rsc=r16
+		;;
+		mov	rp=loc1
+		mov	ar.pfs=loc0
+		;;
+		br.ret.sptk.many	rp
+END(ia64_pal_physical)


Property changes on: trunk/sys/ia64/ia64/physical.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/physmem.c
===================================================================
--- trunk/sys/ia64/ia64/physmem.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/physmem.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,265 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2012 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/physmem.c 310508 2016-12-24 13:28:39Z avg $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/md_var.h>
+#include <machine/vmparam.h>
+
+static u_int phys_avail_segs;
+
+vm_paddr_t phys_avail[2 * VM_PHYSSEG_MAX + 2];
+
+vm_paddr_t paddr_max;
+long Maxmem;
+long realmem;
+
+static u_int
+ia64_physmem_find(vm_paddr_t base, vm_paddr_t lim)
+{
+	u_int idx;
+
+	for (idx = 0; phys_avail[idx + 1] != 0; idx += 2) {
+		if (phys_avail[idx] >= lim ||
+		    phys_avail[idx + 1] > base)
+			break;
+	}
+	return (idx);
+}
+
+static int
+ia64_physmem_insert(u_int idx, vm_paddr_t base, vm_paddr_t lim)
+{
+	u_int ridx;
+
+	if (phys_avail_segs == VM_PHYSSEG_MAX)
+		return (ENOMEM);
+
+	ridx = phys_avail_segs * 2;
+	while (idx < ridx) {
+		phys_avail[ridx + 1] = phys_avail[ridx - 1];
+		phys_avail[ridx] = phys_avail[ridx - 2];
+		ridx -= 2;
+	}
+	phys_avail[idx] = base;
+	phys_avail[idx + 1] = lim;
+	phys_avail_segs++;
+	return (0);
+}
+
+static int
+ia64_physmem_remove(u_int idx)
+{
+
+	if (phys_avail_segs == 0)
+		return (ENOENT);
+	do {
+		phys_avail[idx] = phys_avail[idx + 2];
+		phys_avail[idx + 1] = phys_avail[idx + 3];
+		idx += 2;
+	} while (phys_avail[idx + 1] != 0);
+	phys_avail_segs--;
+	return (0);
+}
+
+int
+ia64_physmem_add(vm_paddr_t base, vm_size_t len)
+{
+	vm_paddr_t lim;
+	u_int idx;
+
+	realmem += len;
+
+	lim = base + len;
+	idx = ia64_physmem_find(base, lim);
+	if (phys_avail[idx] == lim) {
+		phys_avail[idx] = base;
+		return (0);
+	}
+	if (idx > 0 && phys_avail[idx - 1] == base) {
+		phys_avail[idx - 1] = lim;
+		return (0);
+	}
+	return (ia64_physmem_insert(idx, base, lim));
+}
+
+int
+ia64_physmem_delete(vm_paddr_t base, vm_size_t len)
+{
+	vm_paddr_t lim;
+	u_int idx;
+
+	lim = base + len;
+	idx = ia64_physmem_find(base, lim);
+	if (phys_avail[idx] >= lim || phys_avail[idx + 1] == 0)
+		return (ENOENT);
+	if (phys_avail[idx] < base && phys_avail[idx + 1] > lim) {
+		len = phys_avail[idx + 1] - lim;
+		phys_avail[idx + 1] = base;
+		base = lim;
+		lim = base + len;
+		return (ia64_physmem_insert(idx + 2, base, lim));
+	} else {
+		if (phys_avail[idx] == base)
+			phys_avail[idx] = lim;
+		if (phys_avail[idx + 1] == lim)
+			phys_avail[idx + 1] = base;
+		if (phys_avail[idx] >= phys_avail[idx + 1])
+			return (ia64_physmem_remove(idx));
+	}
+	return (0);
+}
+
+int
+ia64_physmem_fini(void)
+{
+	vm_paddr_t base, lim, size;
+	u_int idx;
+
+	idx = 0;
+	while (phys_avail[idx + 1] != 0) {
+		base = round_page(phys_avail[idx]);
+		lim = trunc_page(phys_avail[idx + 1]);
+		if (base < lim) {
+			phys_avail[idx] = base;
+			phys_avail[idx + 1] = lim;
+			size = lim - base;
+			physmem += atop(size);
+			paddr_max = lim;
+			idx += 2;
+		} else
+			ia64_physmem_remove(idx);
+	}
+
+	/*
+	 * Round realmem to a multple of 128MB. Hopefully that compensates
+	 * for any loss of DRAM that isn't accounted for in the memory map.
+	 * I'm thinking legacy BIOS or VGA here. In any case, it's ok if
+	 * we got it wrong, because we don't actually use realmem. It's
+	 * just for show...
+	 */
+	size = 1U << 27;
+	realmem = (realmem + size - 1) & ~(size - 1);
+	realmem = atop(realmem);
+
+	/*
+	 * Maxmem isn't the "maximum memory", it's one larger than the
+	 * highest page of the physical address space.
+	 */
+	Maxmem = atop(paddr_max);
+	return (0);
+}
+
+int
+ia64_physmem_init(void)
+{
+
+	/* Nothing to do just yet. */
+	return (0);
+}
+
+int
+ia64_physmem_track(vm_paddr_t base, vm_size_t len)
+{
+
+	realmem += len;
+	return (0);
+}
+
+void *
+ia64_physmem_alloc(vm_size_t len, vm_size_t align)
+{
+	vm_paddr_t base, lim, pa;
+	void *ptr;
+	u_int idx;
+
+	if (phys_avail_segs == 0)
+		return (NULL);
+
+	len = round_page(len);
+
+	/*
+	 * Try and allocate with least effort.
+	 */
+	idx = phys_avail_segs * 2;
+	while (idx > 0) {
+		idx -= 2;
+		base = phys_avail[idx];
+		lim = phys_avail[idx + 1];
+
+		if (lim - base < len)
+			continue;
+
+		/* First try from the end. */
+		pa = lim - len;
+		if ((pa & (align - 1)) == 0) {
+			if (pa == base)
+				ia64_physmem_remove(idx);
+			else
+				phys_avail[idx + 1] = pa;
+			goto gotit;
+		}
+
+		/* Try from the start next. */
+		pa = base;
+		if ((pa & (align - 1)) == 0) {
+			if (pa + len == lim)
+				ia64_physmem_remove(idx);
+			else
+				phys_avail[idx] += len;
+			goto gotit;
+		}
+	}
+
+	/*
+	 * Find a good segment and split it up.
+	 */
+	idx = phys_avail_segs * 2;
+	while (idx > 0) {
+		idx -= 2;
+		base = phys_avail[idx];
+		lim = phys_avail[idx + 1];
+
+		pa = (base + align - 1) & ~(align - 1);
+		if (pa + len <= lim) {
+			ia64_physmem_delete(pa, len);
+			goto gotit;
+		}
+	}
+
+	/* Out of luck. */
+	return (NULL);
+
+ gotit:
+	ptr = (void *)IA64_PHYS_TO_RR7(pa);
+	bzero(ptr, len);
+	return (ptr);
+}


Property changes on: trunk/sys/ia64/ia64/physmem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/pmap.c
===================================================================
--- trunk/sys/ia64/ia64/pmap.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/pmap.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,2896 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ * Copyright (c) 1998,2000 Doug Rabson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
+ *	from:	i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
+ *		with some ideas from NetBSD's alpha pmap
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/pmap.c 270920 2014-09-01 07:58:15Z kib $");
+
+#include "opt_pmap.h"
+
+#include <sys/param.h>
+#include <sys/efi.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pageout.h>
+#include <vm/uma.h>
+
+#include <machine/bootinfo.h>
+#include <machine/md_var.h>
+#include <machine/pal.h>
+
+/*
+ *	Manages physical address maps.
+ *
+ *	Since the information managed by this module is
+ *	also stored by the logical address mapping module,
+ *	this module may throw away valid virtual-to-physical
+ *	mappings at almost any time.  However, invalidations
+ *	of virtual-to-physical mappings must be done as
+ *	requested.
+ *
+ *	In order to cope with hardware architectures which
+ *	make virtual-to-physical map invalidates expensive,
+ *	this module may delay invalidate or reduced protection
+ *	operations until such time as they are actually
+ *	necessary.  This module is given full information as
+ *	to which processors are currently using which maps,
+ *	and to when physical maps must be made correct.
+ */
+
+/*
+ * Following the Linux model, region IDs are allocated in groups of
+ * eight so that a single region ID can be used for as many RRs as we
+ * want by encoding the RR number into the low bits of the ID.
+ *
+ * We reserve region ID 0 for the kernel and allocate the remaining
+ * IDs for user pmaps.
+ *
+ * Region 0-3:	User virtually mapped
+ * Region 4:	PBVM and special mappings
+ * Region 5:	Kernel virtual memory
+ * Region 6:	Direct-mapped uncacheable
+ * Region 7:	Direct-mapped cacheable
+ */
+
+/* XXX move to a header. */
+extern uint64_t ia64_gateway_page[];
+
+#if !defined(DIAGNOSTIC)
+#define PMAP_INLINE __inline
+#else
+#define PMAP_INLINE
+#endif
+
+#ifdef PV_STATS
+#define PV_STAT(x)	do { x ; } while (0)
+#else
+#define PV_STAT(x)	do { } while (0)
+#endif
+
+#define	pmap_accessed(lpte)		((lpte)->pte & PTE_ACCESSED)
+#define	pmap_dirty(lpte)		((lpte)->pte & PTE_DIRTY)
+#define	pmap_exec(lpte)			((lpte)->pte & PTE_AR_RX)
+#define	pmap_managed(lpte)		((lpte)->pte & PTE_MANAGED)
+#define	pmap_ppn(lpte)			((lpte)->pte & PTE_PPN_MASK)
+#define	pmap_present(lpte)		((lpte)->pte & PTE_PRESENT)
+#define	pmap_prot(lpte)			(((lpte)->pte & PTE_PROT_MASK) >> 56)
+#define	pmap_wired(lpte)		((lpte)->pte & PTE_WIRED)
+
+#define	pmap_clear_accessed(lpte)	(lpte)->pte &= ~PTE_ACCESSED
+#define	pmap_clear_dirty(lpte)		(lpte)->pte &= ~PTE_DIRTY
+#define	pmap_clear_present(lpte)	(lpte)->pte &= ~PTE_PRESENT
+#define	pmap_clear_wired(lpte)		(lpte)->pte &= ~PTE_WIRED
+
+#define	pmap_set_wired(lpte)		(lpte)->pte |= PTE_WIRED
+
+/*
+ * Individual PV entries are stored in per-pmap chunks.  This saves
+ * space by eliminating the need to record the pmap within every PV
+ * entry.
+ */
+#if PAGE_SIZE == 8192
+#define	_NPCM	6
+#define	_NPCPV	337
+#define	_NPCS	2
+#elif PAGE_SIZE == 16384
+#define	_NPCM	11
+#define	_NPCPV	677
+#define	_NPCS	1
+#endif
+struct pv_chunk {
+	pmap_t			pc_pmap;
+	TAILQ_ENTRY(pv_chunk)	pc_list;
+	u_long			pc_map[_NPCM];	/* bitmap; 1 = free */
+	TAILQ_ENTRY(pv_chunk)	pc_lru;
+	u_long			pc_spare[_NPCS];
+	struct pv_entry		pc_pventry[_NPCPV];
+};
+
+/*
+ * The VHPT bucket head structure.
+ */
+struct ia64_bucket {
+	uint64_t	chain;
+	struct mtx	mutex;
+	u_int		length;
+};
+
+/*
+ * Statically allocated kernel pmap
+ */
+struct pmap kernel_pmap_store;
+
+vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
+
+/*
+ * Kernel virtual memory management.
+ */
+static int nkpt;
+extern struct ia64_lpte ***ia64_kptdir;
+
+#define KPTE_DIR0_INDEX(va) \
+	(((va) >> (3*PAGE_SHIFT-8)) & ((1<<(PAGE_SHIFT-3))-1))
+#define KPTE_DIR1_INDEX(va) \
+	(((va) >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1))
+#define KPTE_PTE_INDEX(va) \
+	(((va) >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1))
+#define NKPTEPG		(PAGE_SIZE / sizeof(struct ia64_lpte))
+
+vm_offset_t kernel_vm_end;
+
+/* Defaults for ptc.e. */
+static uint64_t pmap_ptc_e_base = 0;
+static uint32_t pmap_ptc_e_count1 = 1;
+static uint32_t pmap_ptc_e_count2 = 1;
+static uint32_t pmap_ptc_e_stride1 = 0;
+static uint32_t pmap_ptc_e_stride2 = 0;
+
+struct mtx pmap_ptc_mutex;
+
+/*
+ * Data for the RID allocator
+ */
+static int pmap_ridcount;
+static int pmap_rididx;
+static int pmap_ridmapsz;
+static int pmap_ridmax;
+static uint64_t *pmap_ridmap;
+struct mtx pmap_ridmutex;
+
+static struct rwlock_padalign pvh_global_lock;
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
+static int pv_entry_count;
+
+/*
+ * Data for allocating PTEs for user processes.
+ */
+static uma_zone_t ptezone;
+
+/*
+ * Virtual Hash Page Table (VHPT) data.
+ */
+/* SYSCTL_DECL(_machdep); */
+static SYSCTL_NODE(_machdep, OID_AUTO, vhpt, CTLFLAG_RD, 0, "");
+
+struct ia64_bucket *pmap_vhpt_bucket;
+
+int pmap_vhpt_nbuckets;
+SYSCTL_INT(_machdep_vhpt, OID_AUTO, nbuckets, CTLFLAG_RD,
+    &pmap_vhpt_nbuckets, 0, "");
+
+int pmap_vhpt_log2size = 0;
+TUNABLE_INT("machdep.vhpt.log2size", &pmap_vhpt_log2size);
+SYSCTL_INT(_machdep_vhpt, OID_AUTO, log2size, CTLFLAG_RD,
+    &pmap_vhpt_log2size, 0, "");
+
+static int pmap_vhpt_inserts;
+SYSCTL_INT(_machdep_vhpt, OID_AUTO, inserts, CTLFLAG_RD,
+    &pmap_vhpt_inserts, 0, "");
+
+static int pmap_vhpt_population(SYSCTL_HANDLER_ARGS);
+SYSCTL_PROC(_machdep_vhpt, OID_AUTO, population, CTLTYPE_INT | CTLFLAG_RD,
+    NULL, 0, pmap_vhpt_population, "I", "");
+
+static struct ia64_lpte *pmap_find_vhpt(vm_offset_t va);
+
+static void free_pv_chunk(struct pv_chunk *pc);
+static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
+static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
+static vm_page_t pmap_pv_reclaim(pmap_t locked_pmap);
+
+static void	pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+		    vm_page_t m, vm_prot_t prot);
+static void	pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va);
+static int	pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte,
+		    vm_offset_t va, pv_entry_t pv, int freepte);
+static int	pmap_remove_vhpt(vm_offset_t va);
+static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
+		    vm_page_t m);
+
+static void
+pmap_initialize_vhpt(vm_offset_t vhpt)
+{
+	struct ia64_lpte *pte;
+	u_int i;
+
+	pte = (struct ia64_lpte *)vhpt;
+	for (i = 0; i < pmap_vhpt_nbuckets; i++) {
+		pte[i].pte = 0;
+		pte[i].itir = 0;
+		pte[i].tag = 1UL << 63; /* Invalid tag */
+		pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i);
+	}
+}
+
+#ifdef SMP
+vm_offset_t
+pmap_alloc_vhpt(void)
+{
+	vm_offset_t vhpt;
+	vm_page_t m;
+	vm_size_t size;
+
+	size = 1UL << pmap_vhpt_log2size;
+	m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
+	    VM_ALLOC_WIRED, atop(size), 0UL, ~0UL, size, 0UL,
+	    VM_MEMATTR_DEFAULT);
+	if (m != NULL) {
+		vhpt = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
+		pmap_initialize_vhpt(vhpt);
+		return (vhpt);
+	}
+	return (0);
+}
+#endif
+
+/*
+ *	Bootstrap the system enough to run with virtual memory.
+ */
+void
+pmap_bootstrap()
+{
+	struct ia64_pal_result res;
+	vm_offset_t base;
+	size_t size;
+	int i, ridbits;
+
+	/*
+	 * Query the PAL Code to find the loop parameters for the
+	 * ptc.e instruction.
+	 */
+	res = ia64_call_pal_static(PAL_PTCE_INFO, 0, 0, 0);
+	if (res.pal_status != 0)
+		panic("Can't configure ptc.e parameters");
+	pmap_ptc_e_base = res.pal_result[0];
+	pmap_ptc_e_count1 = res.pal_result[1] >> 32;
+	pmap_ptc_e_count2 = res.pal_result[1];
+	pmap_ptc_e_stride1 = res.pal_result[2] >> 32;
+	pmap_ptc_e_stride2 = res.pal_result[2];
+	if (bootverbose)
+		printf("ptc.e base=0x%lx, count1=%u, count2=%u, "
+		       "stride1=0x%x, stride2=0x%x\n",
+		       pmap_ptc_e_base,
+		       pmap_ptc_e_count1,
+		       pmap_ptc_e_count2,
+		       pmap_ptc_e_stride1,
+		       pmap_ptc_e_stride2);
+
+	mtx_init(&pmap_ptc_mutex, "PTC.G mutex", NULL, MTX_SPIN);
+
+	/*
+	 * Setup RIDs. RIDs 0..7 are reserved for the kernel.
+	 *
+	 * We currently need at least 19 bits in the RID because PID_MAX
+	 * can only be encoded in 17 bits and we need RIDs for 4 regions
+	 * per process. With PID_MAX equalling 99999 this means that we
+	 * need to be able to encode 399996 (=4*PID_MAX).
+	 * The Itanium processor only has 18 bits and the architected
+	 * minimum is exactly that. So, we cannot use a PID based scheme
+	 * in those cases. Enter pmap_ridmap...
+	 * We should avoid the map when running on a processor that has
+	 * implemented enough bits. This means that we should pass the
+	 * process/thread ID to pmap. This we currently don't do, so we
+	 * use the map anyway. However, we don't want to allocate a map
+	 * that is large enough to cover the range dictated by the number
+	 * of bits in the RID, because that may result in a RID map of
+	 * 2MB in size for a 24-bit RID. A 64KB map is enough.
+	 * The bottomline: we create a 32KB map when the processor only
+	 * implements 18 bits (or when we can't figure it out). Otherwise
+	 * we create a 64KB map.
+	 */
+	res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
+	if (res.pal_status != 0) {
+		if (bootverbose)
+			printf("Can't read VM Summary - assuming 18 Region ID bits\n");
+		ridbits = 18; /* guaranteed minimum */
+	} else {
+		ridbits = (res.pal_result[1] >> 8) & 0xff;
+		if (bootverbose)
+			printf("Processor supports %d Region ID bits\n",
+			    ridbits);
+	}
+	if (ridbits > 19)
+		ridbits = 19;
+
+	pmap_ridmax = (1 << ridbits);
+	pmap_ridmapsz = pmap_ridmax / 64;
+	pmap_ridmap = ia64_physmem_alloc(pmap_ridmax / 8, PAGE_SIZE);
+	pmap_ridmap[0] |= 0xff;
+	pmap_rididx = 0;
+	pmap_ridcount = 8;
+	mtx_init(&pmap_ridmutex, "RID allocator lock", NULL, MTX_DEF);
+
+	/*
+	 * Allocate some memory for initial kernel 'page tables'.
+	 */
+	ia64_kptdir = ia64_physmem_alloc(PAGE_SIZE, PAGE_SIZE);
+	nkpt = 0;
+	kernel_vm_end = VM_INIT_KERNEL_ADDRESS;
+
+	/*
+	 * Determine a valid (mappable) VHPT size.
+	 */
+	TUNABLE_INT_FETCH("machdep.vhpt.log2size", &pmap_vhpt_log2size);
+	if (pmap_vhpt_log2size == 0)
+		pmap_vhpt_log2size = 20;
+	else if (pmap_vhpt_log2size < 16)
+		pmap_vhpt_log2size = 16;
+	else if (pmap_vhpt_log2size > 28)
+		pmap_vhpt_log2size = 28;
+	if (pmap_vhpt_log2size & 1)
+		pmap_vhpt_log2size--;
+
+	size = 1UL << pmap_vhpt_log2size;
+	base = (uintptr_t)ia64_physmem_alloc(size, size);
+	if (base == 0)
+		panic("Unable to allocate VHPT");
+
+	PCPU_SET(md.vhpt, base);
+	if (bootverbose)
+		printf("VHPT: address=%#lx, size=%#lx\n", base, size);
+
+	pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
+	pmap_vhpt_bucket = ia64_physmem_alloc(pmap_vhpt_nbuckets *
+	    sizeof(struct ia64_bucket), PAGE_SIZE);
+	for (i = 0; i < pmap_vhpt_nbuckets; i++) {
+		/* Stolen memory is zeroed. */
+		mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL,
+		    MTX_NOWITNESS | MTX_SPIN);
+	}
+
+	pmap_initialize_vhpt(base);
+	map_vhpt(base);
+	ia64_set_pta(base + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
+	ia64_srlz_i();
+
+	virtual_avail = VM_INIT_KERNEL_ADDRESS;
+	virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+	/*
+	 * Initialize the kernel pmap (which is statically allocated).
+	 */
+	PMAP_LOCK_INIT(kernel_pmap);
+	for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
+		kernel_pmap->pm_rid[i] = 0;
+	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
+	PCPU_SET(md.current_pmap, kernel_pmap);
+
+ 	/*
+	 * Initialize the global pv list lock.
+	 */
+	rw_init(&pvh_global_lock, "pmap pv global");
+
+	/* Region 5 is mapped via the VHPT. */
+	ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1);
+
+	/*
+	 * Clear out any random TLB entries left over from booting.
+	 */
+	pmap_invalidate_all();
+
+	map_gateway_page();
+}
+
+static int
+pmap_vhpt_population(SYSCTL_HANDLER_ARGS)
+{
+	int count, error, i;
+
+	count = 0;
+	for (i = 0; i < pmap_vhpt_nbuckets; i++)
+		count += pmap_vhpt_bucket[i].length;
+
+	error = SYSCTL_OUT(req, &count, sizeof(count));
+	return (error);
+}
+
+vm_offset_t
+pmap_page_to_va(vm_page_t m)
+{
+	vm_paddr_t pa;
+	vm_offset_t va;
+
+	pa = VM_PAGE_TO_PHYS(m);
+	va = (m->md.memattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) :
+	    IA64_PHYS_TO_RR7(pa);
+	return (va);
+}
+
+/*
+ *	Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	TAILQ_INIT(&m->md.pv_list);
+	m->md.memattr = VM_MEMATTR_DEFAULT;
+}
+
+/*
+ *	Initialize the pmap module.
+ *	Called by vm_init, to initialize any structures that the pmap
+ *	system needs to map virtual memory.
+ */
+void
+pmap_init(void)
+{
+
+	CTR1(KTR_PMAP, "%s()", __func__);
+
+	ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte), 
+	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
+}
+
+
+/***************************************************
+ * Manipulate TLBs for a pmap
+ ***************************************************/
+
+static void
+pmap_invalidate_page(vm_offset_t va)
+{
+	struct ia64_lpte *pte;
+	struct pcpu *pc;
+	uint64_t tag;
+	u_int vhpt_ofs;
+
+	critical_enter();
+
+	vhpt_ofs = ia64_thash(va) - PCPU_GET(md.vhpt);
+	tag = ia64_ttag(va);
+	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
+		pte = (struct ia64_lpte *)(pc->pc_md.vhpt + vhpt_ofs);
+		atomic_cmpset_64(&pte->tag, tag, 1UL << 63);
+	}
+
+	mtx_lock_spin(&pmap_ptc_mutex);
+
+	ia64_ptc_ga(va, PAGE_SHIFT << 2);
+	ia64_mf();
+	ia64_srlz_i();
+
+	mtx_unlock_spin(&pmap_ptc_mutex);
+
+	ia64_invala();
+
+	critical_exit();
+}
+
+void
+pmap_invalidate_all(void)
+{
+	uint64_t addr;
+	int i, j;
+
+	addr = pmap_ptc_e_base;
+	for (i = 0; i < pmap_ptc_e_count1; i++) {
+		for (j = 0; j < pmap_ptc_e_count2; j++) {
+			ia64_ptc_e(addr);
+			addr += pmap_ptc_e_stride2;
+		}
+		addr += pmap_ptc_e_stride1;
+	}
+	ia64_srlz_i();
+}
+
+static uint32_t
+pmap_allocate_rid(void)
+{
+	uint64_t bit, bits;
+	int rid;
+
+	mtx_lock(&pmap_ridmutex);
+	if (pmap_ridcount == pmap_ridmax)
+		panic("pmap_allocate_rid: All Region IDs used");
+
+	/* Find an index with a free bit. */
+	while ((bits = pmap_ridmap[pmap_rididx]) == ~0UL) {
+		pmap_rididx++;
+		if (pmap_rididx == pmap_ridmapsz)
+			pmap_rididx = 0;
+	}
+	rid = pmap_rididx * 64;
+
+	/* Find a free bit. */
+	bit = 1UL;
+	while (bits & bit) {
+		rid++;
+		bit <<= 1;
+	}
+
+	pmap_ridmap[pmap_rididx] |= bit;
+	pmap_ridcount++;
+	mtx_unlock(&pmap_ridmutex);
+
+	return rid;
+}
+
+static void
+pmap_free_rid(uint32_t rid)
+{
+	uint64_t bit;
+	int idx;
+
+	idx = rid / 64;
+	bit = ~(1UL << (rid & 63));
+
+	mtx_lock(&pmap_ridmutex);
+	pmap_ridmap[idx] &= bit;
+	pmap_ridcount--;
+	mtx_unlock(&pmap_ridmutex);
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+
+static void
+pmap_pinit_common(pmap_t pmap)
+{
+	int i;
+
+	for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
+		pmap->pm_rid[i] = pmap_allocate_rid();
+	TAILQ_INIT(&pmap->pm_pvchunk);
+	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+}
+
+void
+pmap_pinit0(pmap_t pmap)
+{
+
+	CTR2(KTR_PMAP, "%s(pm=%p)", __func__, pmap);
+
+	PMAP_LOCK_INIT(pmap);
+	pmap_pinit_common(pmap);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+int
+pmap_pinit(pmap_t pmap)
+{
+
+	CTR2(KTR_PMAP, "%s(pm=%p)", __func__, pmap);
+
+	pmap_pinit_common(pmap);
+	return (1);
+}
+
+/***************************************************
+ * Pmap allocation/deallocation routines.
+ ***************************************************/
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+	int i;
+
+	CTR2(KTR_PMAP, "%s(pm=%p)", __func__, pmap);
+
+	for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
+		if (pmap->pm_rid[i])
+			pmap_free_rid(pmap->pm_rid[i]);
+}
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+	struct ia64_lpte **dir1;
+	struct ia64_lpte *leaf;
+	vm_page_t nkpg;
+
+	CTR2(KTR_PMAP, "%s(va=%#lx)", __func__, addr);
+
+	while (kernel_vm_end <= addr) {
+		if (nkpt == PAGE_SIZE/8 + PAGE_SIZE*PAGE_SIZE/64)
+			panic("%s: out of kernel address space", __func__);
+
+		dir1 = ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)];
+		if (dir1 == NULL) {
+			nkpg = vm_page_alloc(NULL, nkpt++,
+			    VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED);
+			if (!nkpg)
+				panic("%s: cannot add dir. page", __func__);
+
+			dir1 = (struct ia64_lpte **)pmap_page_to_va(nkpg);
+			bzero(dir1, PAGE_SIZE);
+			ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)] = dir1;
+		}
+
+		nkpg = vm_page_alloc(NULL, nkpt++,
+		    VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED);
+		if (!nkpg)
+			panic("%s: cannot add PTE page", __func__);
+
+		leaf = (struct ia64_lpte *)pmap_page_to_va(nkpg);
+		bzero(leaf, PAGE_SIZE);
+		dir1[KPTE_DIR1_INDEX(kernel_vm_end)] = leaf;
+
+		kernel_vm_end += PAGE_SIZE * NKPTEPG;
+	}
+}
+
+/***************************************************
+ * page management routines.
+ ***************************************************/
+
+CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
+
+static __inline struct pv_chunk *
+pv_to_chunk(pv_entry_t pv)
+{
+
+	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
+}
+
+#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
+
+#define	PC_FREE_FULL	0xfffffffffffffffful
+#define	PC_FREE_PARTIAL	\
+	((1UL << (_NPCPV - sizeof(u_long) * 8 * (_NPCM - 1))) - 1)
+
+#if PAGE_SIZE == 8192
+static const u_long pc_freemask[_NPCM] = {
+	PC_FREE_FULL, PC_FREE_FULL, PC_FREE_FULL,
+	PC_FREE_FULL, PC_FREE_FULL, PC_FREE_PARTIAL
+};
+#elif PAGE_SIZE == 16384
+static const u_long pc_freemask[_NPCM] = {
+	PC_FREE_FULL, PC_FREE_FULL, PC_FREE_FULL,
+	PC_FREE_FULL, PC_FREE_FULL, PC_FREE_FULL,
+	PC_FREE_FULL, PC_FREE_FULL, PC_FREE_FULL,
+	PC_FREE_FULL, PC_FREE_PARTIAL
+};
+#endif
+
+static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
+    "Current number of pv entries");
+
+#ifdef PV_STATS
+static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
+    "Current number of pv entry chunks");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
+    "Current number of pv entry chunks allocated");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
+    "Current number of pv entry chunks frees");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
+    "Number of times tried to get a chunk page but failed.");
+
+static long pv_entry_frees, pv_entry_allocs;
+static int pv_entry_spare;
+
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
+    "Current number of pv entry frees");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
+    "Current number of pv entry allocs");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
+    "Current number of spare pv entries");
+#endif
+
+/*
+ * We are in a serious low memory condition.  Resort to
+ * drastic measures to free some pages so we can allocate
+ * another pv entry chunk.
+ */
+static vm_page_t
+pmap_pv_reclaim(pmap_t locked_pmap)
+{
+	struct pch newtail;
+	struct pv_chunk *pc;
+	struct ia64_lpte *pte;
+	pmap_t pmap;
+	pv_entry_t pv;
+	vm_offset_t va;
+	vm_page_t m, m_pc;
+	u_long inuse;
+	int bit, field, freed, idx;
+
+	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
+	pmap = NULL;
+	m_pc = NULL;
+	TAILQ_INIT(&newtail);
+	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL) {
+		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+		if (pmap != pc->pc_pmap) {
+			if (pmap != NULL) {
+				if (pmap != locked_pmap) {
+					pmap_switch(locked_pmap);
+					PMAP_UNLOCK(pmap);
+				}
+			}
+			pmap = pc->pc_pmap;
+			/* Avoid deadlock and lock recursion. */
+			if (pmap > locked_pmap)
+				PMAP_LOCK(pmap);
+			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
+				pmap = NULL;
+				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
+				continue;
+			}
+			pmap_switch(pmap);
+		}
+
+		/*
+		 * Destroy every non-wired, 8 KB page mapping in the chunk.
+		 */
+		freed = 0;
+		for (field = 0; field < _NPCM; field++) {
+			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
+			    inuse != 0; inuse &= ~(1UL << bit)) {
+				bit = ffsl(inuse) - 1;
+				idx = field * sizeof(inuse) * NBBY + bit;
+				pv = &pc->pc_pventry[idx];
+				va = pv->pv_va;
+				pte = pmap_find_vhpt(va);
+				KASSERT(pte != NULL, ("pte"));
+				if (pmap_wired(pte))
+					continue;
+				pmap_remove_vhpt(va);
+				pmap_invalidate_page(va);
+				m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
+				if (pmap_accessed(pte))
+					vm_page_aflag_set(m, PGA_REFERENCED);
+				if (pmap_dirty(pte))
+					vm_page_dirty(m);
+				pmap_free_pte(pte, va);
+				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+				if (TAILQ_EMPTY(&m->md.pv_list))
+					vm_page_aflag_clear(m, PGA_WRITEABLE);
+				pc->pc_map[field] |= 1UL << bit;
+				freed++;
+			}
+		}
+		if (freed == 0) {
+			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
+			continue;
+		}
+		/* Every freed mapping is for a 8 KB page. */
+		pmap->pm_stats.resident_count -= freed;
+		PV_STAT(pv_entry_frees += freed);
+		PV_STAT(pv_entry_spare += freed);
+		pv_entry_count -= freed;
+		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+		for (field = 0; field < _NPCM; field++)
+			if (pc->pc_map[field] != pc_freemask[field]) {
+				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
+				    pc_list);
+				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
+
+				/*
+				 * One freed pv entry in locked_pmap is
+				 * sufficient.
+				 */
+				if (pmap == locked_pmap)
+					goto out;
+				break;
+			}
+		if (field == _NPCM) {
+			PV_STAT(pv_entry_spare -= _NPCPV);
+			PV_STAT(pc_chunk_count--);
+			PV_STAT(pc_chunk_frees++);
+			/* Entire chunk is free; return it. */
+			m_pc = PHYS_TO_VM_PAGE(IA64_RR_MASK((vm_offset_t)pc));
+			break;
+		}
+	}
+out:
+	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
+	if (pmap != NULL) {
+		if (pmap != locked_pmap) {
+			pmap_switch(locked_pmap);
+			PMAP_UNLOCK(pmap);
+		}
+	}
+	return (m_pc);
+}
+
+/*
+ * free the pv_entry back to the free list
+ */
+static void
+free_pv_entry(pmap_t pmap, pv_entry_t pv)
+{
+	struct pv_chunk *pc;
+	int bit, field, idx;
+
+	rw_assert(&pvh_global_lock, RA_WLOCKED);
+	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+	PV_STAT(pv_entry_frees++);
+	PV_STAT(pv_entry_spare++);
+	pv_entry_count--;
+	pc = pv_to_chunk(pv);
+	idx = pv - &pc->pc_pventry[0];
+	field = idx / (sizeof(u_long) * NBBY);
+	bit = idx % (sizeof(u_long) * NBBY);
+	pc->pc_map[field] |= 1ul << bit;
+	for (idx = 0; idx < _NPCM; idx++)
+		if (pc->pc_map[idx] != pc_freemask[idx]) {
+			/*
+			 * 98% of the time, pc is already at the head of the
+			 * list.  If it isn't already, move it to the head.
+			 */
+			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
+			    pc)) {
+				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
+				    pc_list);
+			}
+			return;
+		}
+	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+	free_pv_chunk(pc);
+}
+
+static void
+free_pv_chunk(struct pv_chunk *pc)
+{
+	vm_page_t m;
+
+ 	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+	PV_STAT(pv_entry_spare -= _NPCPV);
+	PV_STAT(pc_chunk_count--);
+	PV_STAT(pc_chunk_frees++);
+	/* entire chunk is free, return it */
+	m = PHYS_TO_VM_PAGE(IA64_RR_MASK((vm_offset_t)pc));
+	vm_page_unwire(m, 0);
+	vm_page_free(m);
+}
+
+/*
+ * get a new pv_entry, allocating a block from the system
+ * when needed.
+ */
+static pv_entry_t
+get_pv_entry(pmap_t pmap, boolean_t try)
+{
+	struct pv_chunk *pc;
+	pv_entry_t pv;
+	vm_page_t m;
+	int bit, field, idx;
+
+	rw_assert(&pvh_global_lock, RA_WLOCKED);
+	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+	PV_STAT(pv_entry_allocs++);
+	pv_entry_count++;
+retry:
+	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+	if (pc != NULL) {
+		for (field = 0; field < _NPCM; field++) {
+			if (pc->pc_map[field]) {
+				bit = ffsl(pc->pc_map[field]) - 1;
+				break;
+			}
+		}
+		if (field < _NPCM) {
+			idx = field * sizeof(pc->pc_map[field]) * NBBY + bit;
+			pv = &pc->pc_pventry[idx];
+			pc->pc_map[field] &= ~(1ul << bit);
+			/* If this was the last item, move it to tail */
+			for (field = 0; field < _NPCM; field++)
+				if (pc->pc_map[field] != 0) {
+					PV_STAT(pv_entry_spare--);
+					return (pv);	/* not full, return */
+				}
+			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
+			PV_STAT(pv_entry_spare--);
+			return (pv);
+		}
+	}
+	/* No free items, allocate another chunk */
+	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+	    VM_ALLOC_WIRED);
+	if (m == NULL) {
+		if (try) {
+			pv_entry_count--;
+			PV_STAT(pc_chunk_tryfail++);
+			return (NULL);
+		}
+		m = pmap_pv_reclaim(pmap);
+		if (m == NULL)
+			goto retry;
+	}
+	PV_STAT(pc_chunk_count++);
+	PV_STAT(pc_chunk_allocs++);
+	pc = (struct pv_chunk *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
+	pc->pc_pmap = pmap;
+	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
+	for (field = 1; field < _NPCM; field++)
+		pc->pc_map[field] = pc_freemask[field];
+	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+	pv = &pc->pc_pventry[0];
+	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+	PV_STAT(pv_entry_spare += _NPCPV - 1);
+	return (pv);
+}
+
+/*
+ * Conditionally create a pv entry.
+ */
+static boolean_t
+pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
+{
+	pv_entry_t pv;
+
+	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+	rw_assert(&pvh_global_lock, RA_WLOCKED);
+	if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
+		pv->pv_va = va;
+		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+		return (TRUE);
+	} else
+		return (FALSE);
+}
+
+/*
+ * Add an ia64_lpte to the VHPT.
+ */
+static void
+pmap_enter_vhpt(struct ia64_lpte *pte, vm_offset_t va)
+{
+	struct ia64_bucket *bckt;
+	struct ia64_lpte *vhpte;
+	uint64_t pte_pa;
+
+	/* Can fault, so get it out of the way. */
+	pte_pa = ia64_tpa((vm_offset_t)pte);
+
+	vhpte = (struct ia64_lpte *)ia64_thash(va);
+	bckt = (struct ia64_bucket *)vhpte->chain;
+
+	mtx_lock_spin(&bckt->mutex);
+	pte->chain = bckt->chain;
+	ia64_mf();
+	bckt->chain = pte_pa;
+
+	pmap_vhpt_inserts++;
+	bckt->length++;
+	mtx_unlock_spin(&bckt->mutex);
+}
+
+/*
+ * Remove the ia64_lpte matching va from the VHPT. Return zero if it
+ * worked or an appropriate error code otherwise.
+ */
+static int
+pmap_remove_vhpt(vm_offset_t va)
+{
+	struct ia64_bucket *bckt;
+	struct ia64_lpte *pte;
+	struct ia64_lpte *lpte;
+	struct ia64_lpte *vhpte;
+	uint64_t chain, tag;
+
+	tag = ia64_ttag(va);
+	vhpte = (struct ia64_lpte *)ia64_thash(va);
+	bckt = (struct ia64_bucket *)vhpte->chain;
+
+	lpte = NULL;
+	mtx_lock_spin(&bckt->mutex);
+	chain = bckt->chain;
+	pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
+	while (chain != 0 && pte->tag != tag) {
+		lpte = pte;
+		chain = pte->chain;
+		pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
+	}
+	if (chain == 0) {
+		mtx_unlock_spin(&bckt->mutex);
+		return (ENOENT);
+	}
+
+	/* Snip this pv_entry out of the collision chain. */
+	if (lpte == NULL)
+		bckt->chain = pte->chain;
+	else
+		lpte->chain = pte->chain;
+	ia64_mf();
+
+	bckt->length--;
+	mtx_unlock_spin(&bckt->mutex);
+	return (0);
+}
+
+/*
+ * Find the ia64_lpte for the given va, if any.
+ */
+static struct ia64_lpte *
+pmap_find_vhpt(vm_offset_t va)
+{
+	struct ia64_bucket *bckt;
+	struct ia64_lpte *pte;
+	uint64_t chain, tag;
+
+	tag = ia64_ttag(va);
+	pte = (struct ia64_lpte *)ia64_thash(va);
+	bckt = (struct ia64_bucket *)pte->chain;
+
+	mtx_lock_spin(&bckt->mutex);
+	chain = bckt->chain;
+	pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
+	while (chain != 0 && pte->tag != tag) {
+		chain = pte->chain;
+		pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
+	}
+	mtx_unlock_spin(&bckt->mutex);
+	return ((chain != 0) ? pte : NULL);
+}
+
+/*
+ * Remove an entry from the list of managed mappings.
+ */
+static int
+pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv)
+{
+
+	rw_assert(&pvh_global_lock, RA_WLOCKED);
+	if (!pv) {
+		TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+			if (pmap == PV_PMAP(pv) && va == pv->pv_va) 
+				break;
+		}
+	}
+
+	if (pv) {
+		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+		if (TAILQ_FIRST(&m->md.pv_list) == NULL)
+			vm_page_aflag_clear(m, PGA_WRITEABLE);
+
+		free_pv_entry(pmap, pv);
+		return 0;
+	} else {
+		return ENOENT;
+	}
+}
+
+/*
+ * Create a pv entry for page at pa for
+ * (pmap, va).
+ */
+static void
+pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
+{
+	pv_entry_t pv;
+
+	rw_assert(&pvh_global_lock, RA_WLOCKED);
+	pv = get_pv_entry(pmap, FALSE);
+	pv->pv_va = va;
+	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+}
+
+/*
+ *	Routine:	pmap_extract
+ *	Function:
+ *		Extract the physical page address associated
+ *		with the given map/virtual_address pair.
+ */
+vm_paddr_t
+pmap_extract(pmap_t pmap, vm_offset_t va)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap;
+	vm_paddr_t pa;
+
+	CTR3(KTR_PMAP, "%s(pm=%p, va=%#lx)", __func__, pmap, va);
+
+	pa = 0;
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+	pte = pmap_find_vhpt(va);
+	if (pte != NULL && pmap_present(pte))
+		pa = pmap_ppn(pte);
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+	return (pa);
+}
+
+/*
+ *	Routine:	pmap_extract_and_hold
+ *	Function:
+ *		Atomically extract and hold the physical page
+ *		with the given pmap and virtual address pair
+ *		if that mapping permits the given protection.
+ */
+vm_page_t
+pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap;
+	vm_page_t m;
+	vm_paddr_t pa;
+
+	CTR4(KTR_PMAP, "%s(pm=%p, va=%#lx, prot=%#x)", __func__, pmap, va,
+	    prot);
+
+	pa = 0;
+	m = NULL;
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+retry:
+	pte = pmap_find_vhpt(va);
+	if (pte != NULL && pmap_present(pte) &&
+	    (pmap_prot(pte) & prot) == prot) {
+		m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
+		if (vm_page_pa_tryrelock(pmap, pmap_ppn(pte), &pa))
+			goto retry;
+		vm_page_hold(m);
+	}
+	PA_UNLOCK_COND(pa);
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+	return (m);
+}
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+/*
+ * Find the kernel lpte for mapping the given virtual address, which
+ * must be in the part of region 5 which we can cover with our kernel
+ * 'page tables'.
+ */
+static struct ia64_lpte *
+pmap_find_kpte(vm_offset_t va)
+{
+	struct ia64_lpte **dir1;
+	struct ia64_lpte *leaf;
+
+	KASSERT((va >> 61) == 5,
+		("kernel mapping 0x%lx not in region 5", va));
+	KASSERT(va < kernel_vm_end,
+		("kernel mapping 0x%lx out of range", va));
+
+	dir1 = ia64_kptdir[KPTE_DIR0_INDEX(va)];
+	leaf = dir1[KPTE_DIR1_INDEX(va)];
+	return (&leaf[KPTE_PTE_INDEX(va)]);
+}
+
+/*
+ * Find a pte suitable for mapping a user-space address. If one exists 
+ * in the VHPT, that one will be returned, otherwise a new pte is
+ * allocated.
+ */
+static struct ia64_lpte *
+pmap_find_pte(vm_offset_t va)
+{
+	struct ia64_lpte *pte;
+
+	if (va >= VM_MAXUSER_ADDRESS)
+		return pmap_find_kpte(va);
+
+	pte = pmap_find_vhpt(va);
+	if (pte == NULL) {
+		pte = uma_zalloc(ptezone, M_NOWAIT | M_ZERO);
+		pte->tag = 1UL << 63;
+	}
+	return (pte);
+}
+
+/*
+ * Free a pte which is now unused. This simply returns it to the zone
+ * allocator if it is a user mapping. For kernel mappings, clear the
+ * valid bit to make it clear that the mapping is not currently used.
+ */
+static void
+pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va)
+{
+	if (va < VM_MAXUSER_ADDRESS)
+		uma_zfree(ptezone, pte);
+	else
+		pmap_clear_present(pte);
+}
+
+static PMAP_INLINE void
+pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot)
+{
+	static long prot2ar[4] = {
+		PTE_AR_R,		/* VM_PROT_NONE */
+		PTE_AR_RW,		/* VM_PROT_WRITE */
+		PTE_AR_RX|PTE_ED,	/* VM_PROT_EXECUTE */
+		PTE_AR_RWX|PTE_ED	/* VM_PROT_WRITE|VM_PROT_EXECUTE */
+	};
+
+	pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED);
+	pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56;
+	pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap)
+	    ? PTE_PL_KERN : PTE_PL_USER;
+	pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
+}
+
+static PMAP_INLINE void
+pmap_pte_attr(struct ia64_lpte *pte, vm_memattr_t ma)
+{
+
+	pte->pte &= ~PTE_MA_MASK;
+	pte->pte |= (ma & PTE_MA_MASK);
+}
+
+/*
+ * Set a pte to contain a valid mapping and enter it in the VHPT. If
+ * the pte was orginally valid, then its assumed to already be in the
+ * VHPT.
+ * This functions does not set the protection bits.  It's expected
+ * that those have been set correctly prior to calling this function.
+ */
+static void
+pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa,
+    boolean_t wired, boolean_t managed)
+{
+
+	pte->pte &= PTE_PROT_MASK | PTE_MA_MASK | PTE_PL_MASK |
+	    PTE_AR_MASK | PTE_ED;
+	pte->pte |= PTE_PRESENT;
+	pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
+	pte->pte |= (wired) ? PTE_WIRED : 0;
+	pte->pte |= pa & PTE_PPN_MASK;
+
+	pte->itir = PAGE_SHIFT << 2;
+
+	ia64_mf();
+
+	pte->tag = ia64_ttag(va);
+}
+
+/*
+ * Remove the (possibly managed) mapping represented by pte from the
+ * given pmap.
+ */
+static int
+pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va,
+		pv_entry_t pv, int freepte)
+{
+	int error;
+	vm_page_t m;
+
+	/*
+	 * First remove from the VHPT.
+	 */
+	error = pmap_remove_vhpt(va);
+	KASSERT(error == 0, ("%s: pmap_remove_vhpt returned %d",
+	    __func__, error));
+
+	pmap_invalidate_page(va);
+
+	if (pmap_wired(pte))
+		pmap->pm_stats.wired_count -= 1;
+
+	pmap->pm_stats.resident_count -= 1;
+	if (pmap_managed(pte)) {
+		m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
+		if (pmap_dirty(pte))
+			vm_page_dirty(m);
+		if (pmap_accessed(pte))
+			vm_page_aflag_set(m, PGA_REFERENCED);
+
+		error = pmap_remove_entry(pmap, m, va, pv);
+	}
+	if (freepte)
+		pmap_free_pte(pte, va);
+
+	return (error);
+}
+
+/*
+ * Extract the physical page address associated with a kernel
+ * virtual address.
+ */
+vm_paddr_t
+pmap_kextract(vm_offset_t va)
+{
+	struct ia64_lpte *pte;
+	uint64_t *pbvm_pgtbl;
+	vm_paddr_t pa;
+	u_int idx;
+
+	CTR2(KTR_PMAP, "%s(va=%#lx)", __func__, va);
+
+	KASSERT(va >= VM_MAXUSER_ADDRESS, ("Must be kernel VA"));
+
+	/* Regions 6 and 7 are direct mapped. */
+	if (va >= IA64_RR_BASE(6)) {
+		pa = IA64_RR_MASK(va);
+		goto out;
+	}
+
+	/* Region 5 is our KVA. Bail out if the VA is beyond our limits. */
+	if (va >= kernel_vm_end)
+		goto err_out;
+	if (va >= VM_INIT_KERNEL_ADDRESS) {
+		pte = pmap_find_kpte(va);
+		pa = pmap_present(pte) ? pmap_ppn(pte) | (va & PAGE_MASK) : 0;
+		goto out;
+	}
+
+	/* The PBVM page table. */
+	if (va >= IA64_PBVM_PGTBL + bootinfo->bi_pbvm_pgtblsz)
+		goto err_out;
+	if (va >= IA64_PBVM_PGTBL) {
+		pa = (va - IA64_PBVM_PGTBL) + bootinfo->bi_pbvm_pgtbl;
+		goto out;
+	}
+
+	/* The PBVM itself. */
+	if (va >= IA64_PBVM_BASE) {
+		pbvm_pgtbl = (void *)IA64_PBVM_PGTBL;
+		idx = (va - IA64_PBVM_BASE) >> IA64_PBVM_PAGE_SHIFT;
+		if (idx >= (bootinfo->bi_pbvm_pgtblsz >> 3))
+			goto err_out;
+		if ((pbvm_pgtbl[idx] & PTE_PRESENT) == 0)
+			goto err_out;
+		pa = (pbvm_pgtbl[idx] & PTE_PPN_MASK) +
+		    (va & IA64_PBVM_PAGE_MASK);
+		goto out;
+	}
+
+ err_out:
+	printf("XXX: %s: va=%#lx is invalid\n", __func__, va);
+	pa = 0;
+	/* FALLTHROUGH */
+
+ out:
+	return (pa);
+}
+
+/*
+ * Add a list of wired pages to the kva this routine is only used for
+ * temporary kernel mappings that do not need to have page modification
+ * or references recorded.  Note that old mappings are simply written
+ * over.  The page is effectively wired, but it's customary to not have
+ * the PTE reflect that, nor update statistics.
+ */
+void
+pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+{
+	struct ia64_lpte *pte;
+	int i;
+
+	CTR4(KTR_PMAP, "%s(va=%#lx, m_p=%p, cnt=%d)", __func__, va, m, count);
+
+	for (i = 0; i < count; i++) {
+		pte = pmap_find_kpte(va);
+		if (pmap_present(pte))
+			pmap_invalidate_page(va);
+		else
+			pmap_enter_vhpt(pte, va);
+		pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
+		pmap_pte_attr(pte, m[i]->md.memattr);
+		pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
+		va += PAGE_SIZE;
+	}
+}
+
+/*
+ * this routine jerks page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ */
+void
+pmap_qremove(vm_offset_t va, int count)
+{
+	struct ia64_lpte *pte;
+	int i;
+
+	CTR3(KTR_PMAP, "%s(va=%#lx, cnt=%d)", __func__, va, count);
+
+	for (i = 0; i < count; i++) {
+		pte = pmap_find_kpte(va);
+		if (pmap_present(pte)) {
+			pmap_remove_vhpt(va);
+			pmap_invalidate_page(va);
+			pmap_clear_present(pte);
+		}
+		va += PAGE_SIZE;
+	}
+}
+
+/*
+ * Add a wired page to the kva.  As for pmap_qenter(), it's customary
+ * to not have the PTE reflect that, nor update statistics.
+ */
+void 
+pmap_kenter(vm_offset_t va, vm_paddr_t pa)
+{
+	struct ia64_lpte *pte;
+
+	CTR3(KTR_PMAP, "%s(va=%#lx, pa=%#lx)", __func__, va, pa);
+
+	pte = pmap_find_kpte(va);
+	if (pmap_present(pte))
+		pmap_invalidate_page(va);
+	else
+		pmap_enter_vhpt(pte, va);
+	pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
+	pmap_pte_attr(pte, VM_MEMATTR_DEFAULT);
+	pmap_set_pte(pte, va, pa, FALSE, FALSE);
+}
+
+/*
+ * Remove a page from the kva
+ */
+void
+pmap_kremove(vm_offset_t va)
+{
+	struct ia64_lpte *pte;
+
+	CTR2(KTR_PMAP, "%s(va=%#lx)", __func__, va);
+
+	pte = pmap_find_kpte(va);
+	if (pmap_present(pte)) {
+		pmap_remove_vhpt(va);
+		pmap_invalidate_page(va);
+		pmap_clear_present(pte);
+	}
+}
+
+/*
+ *	Used to map a range of physical addresses into kernel
+ *	virtual address space.
+ *
+ *	The value passed in '*virt' is a suggested virtual address for
+ *	the mapping. Architectures which can support a direct-mapped
+ *	physical to virtual region can return the appropriate address
+ *	within that region, leaving '*virt' unchanged. Other
+ *	architectures should map the pages starting at '*virt' and
+ *	update '*virt' with the first usable address after the mapped
+ *	region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+{
+
+	CTR5(KTR_PMAP, "%s(va_p=%p, sva=%#lx, eva=%#lx, prot=%#x)", __func__,
+	    virt, start, end, prot);
+
+	return IA64_PHYS_TO_RR7(start);
+}
+
+/*
+ *	Remove the given range of addresses from the specified map.
+ *
+ *	It is assumed that the start and end are properly
+ *	rounded to the page size.
+ *
+ *	Sparsely used ranges are inefficiently removed.  The VHPT is
+ *	probed for every page within the range.  XXX
+ */
+void
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+	pmap_t oldpmap;
+	vm_offset_t va;
+	struct ia64_lpte *pte;
+
+	CTR4(KTR_PMAP, "%s(pm=%p, sva=%#lx, eva=%#lx)", __func__, pmap, sva,
+	    eva);
+
+	/*
+	 * Perform an unsynchronized read.  This is, however, safe.
+	 */
+	if (pmap->pm_stats.resident_count == 0)
+		return;
+
+	rw_wlock(&pvh_global_lock);
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+	for (va = sva; va < eva; va += PAGE_SIZE) {
+		pte = pmap_find_vhpt(va);
+		if (pte != NULL)
+			pmap_remove_pte(pmap, pte, va, 0, 1);
+	}
+	rw_wunlock(&pvh_global_lock);
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+}
+
+/*
+ *	Routine:	pmap_remove_all
+ *	Function:
+ *		Removes this physical page from
+ *		all physical maps in which it resides.
+ *		Reflects back modify bits to the pager.
+ *
+ *	Notes:
+ *		Original versions of this routine were very
+ *		inefficient because they iteratively called
+ *		pmap_remove (slow...)
+ */
+void
+pmap_remove_all(vm_page_t m)
+{
+	pmap_t oldpmap;
+	pv_entry_t pv;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+	    ("pmap_remove_all: page %p is not managed", m));
+	rw_wlock(&pvh_global_lock);
+	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+		struct ia64_lpte *pte;
+		pmap_t pmap = PV_PMAP(pv);
+		vm_offset_t va = pv->pv_va;
+
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+		pte = pmap_find_vhpt(va);
+		KASSERT(pte != NULL, ("pte"));
+		if (pmap_ppn(pte) != VM_PAGE_TO_PHYS(m))
+			panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m));
+		pmap_remove_pte(pmap, pte, va, pv, 1);
+		pmap_switch(oldpmap);
+		PMAP_UNLOCK(pmap);
+	}
+	vm_page_aflag_clear(m, PGA_WRITEABLE);
+	rw_wunlock(&pvh_global_lock);
+}
+
+/*
+ *	Set the physical protection on the
+ *	specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+	pmap_t oldpmap;
+	struct ia64_lpte *pte;
+
+	CTR5(KTR_PMAP, "%s(pm=%p, sva=%#lx, eva=%#lx, prot=%#x)", __func__,
+	    pmap, sva, eva, prot);
+
+	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+		pmap_remove(pmap, sva, eva);
+		return;
+	}
+
+	if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
+	    (VM_PROT_WRITE|VM_PROT_EXECUTE))
+		return;
+
+	if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
+		panic("pmap_protect: unaligned addresses");
+
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+	for ( ; sva < eva; sva += PAGE_SIZE) {
+		/* If page is invalid, skip this page */
+		pte = pmap_find_vhpt(sva);
+		if (pte == NULL)
+			continue;
+
+		/* If there's no change, skip it too */
+		if (pmap_prot(pte) == prot)
+			continue;
+
+		if ((prot & VM_PROT_WRITE) == 0 &&
+		    pmap_managed(pte) && pmap_dirty(pte)) {
+			vm_paddr_t pa = pmap_ppn(pte);
+			vm_page_t m = PHYS_TO_VM_PAGE(pa);
+
+			vm_page_dirty(m);
+			pmap_clear_dirty(pte);
+		}
+
+		if (prot & VM_PROT_EXECUTE)
+			ia64_sync_icache(sva, PAGE_SIZE);
+
+		pmap_pte_prot(pmap, pte, prot);
+		pmap_invalidate_page(sva);
+	}
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+}
+
+/*
+ *	Insert the given physical page (p) at
+ *	the specified virtual address (v) in the
+ *	target physical map with the protection requested.
+ *
+ *	If specified, the page will be wired down, meaning
+ *	that the related pte can not be reclaimed.
+ *
+ *	NB:  This is the only routine which MAY NOT lazy-evaluate
+ *	or lose information.  That is, this routine must actually
+ *	insert this page into the given map NOW.
+ */
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
+{
+	pmap_t oldpmap;
+	vm_offset_t pa;
+	vm_offset_t opa;
+	struct ia64_lpte origpte;
+	struct ia64_lpte *pte;
+	boolean_t icache_inval, managed, wired;
+
+	CTR5(KTR_PMAP, "pmap_enter(pm=%p, va=%#lx, m=%p, prot=%#x, "
+	    "flags=%u)", pmap, va, m, prot, flags);
+
+	wired = (flags & PMAP_ENTER_WIRED) != 0;
+	rw_wlock(&pvh_global_lock);
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+
+	va &= ~PAGE_MASK;
+ 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+		VM_OBJECT_ASSERT_LOCKED(m->object);
+
+	/*
+	 * Find (or create) a pte for the given mapping.
+	 */
+	while ((pte = pmap_find_pte(va)) == NULL) {
+		pmap_switch(oldpmap);
+		PMAP_UNLOCK(pmap);
+		rw_wunlock(&pvh_global_lock);
+		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+			return (KERN_RESOURCE_SHORTAGE);
+		VM_WAIT;
+		rw_wlock(&pvh_global_lock);
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+	}
+	origpte = *pte;
+	if (!pmap_present(pte)) {
+		opa = ~0UL;
+		pmap_enter_vhpt(pte, va);
+	} else
+		opa = pmap_ppn(pte);
+	managed = FALSE;
+	pa = VM_PAGE_TO_PHYS(m);
+
+	icache_inval = (prot & VM_PROT_EXECUTE) ? TRUE : FALSE;
+
+	/*
+	 * Mapping has not changed, must be protection or wiring change.
+	 */
+	if (opa == pa) {
+		/*
+		 * Wiring change, just update stats. We don't worry about
+		 * wiring PT pages as they remain resident as long as there
+		 * are valid mappings in them. Hence, if a user page is wired,
+		 * the PT page will be also.
+		 */
+		if (wired && !pmap_wired(&origpte))
+			pmap->pm_stats.wired_count++;
+		else if (!wired && pmap_wired(&origpte))
+			pmap->pm_stats.wired_count--;
+
+		managed = (pmap_managed(&origpte)) ? TRUE : FALSE;
+
+		/*
+		 * We might be turning off write access to the page,
+		 * so we go ahead and sense modify status. Otherwise,
+		 * we can avoid I-cache invalidation if the page
+		 * already allowed execution.
+		 */
+		if (managed && pmap_dirty(&origpte))
+			vm_page_dirty(m);
+		else if (pmap_exec(&origpte))
+			icache_inval = FALSE;
+
+		pmap_invalidate_page(va);
+		goto validate;
+	}
+
+	/*
+	 * Mapping has changed, invalidate old range and fall
+	 * through to handle validating new mapping.
+	 */
+	if (opa != ~0UL) {
+		pmap_remove_pte(pmap, pte, va, 0, 0);
+		pmap_enter_vhpt(pte, va);
+	}
+
+	/*
+	 * Enter on the PV list if part of our managed memory.
+	 */
+	if ((m->oflags & VPO_UNMANAGED) == 0) {
+		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
+		    ("pmap_enter: managed mapping within the clean submap"));
+		pmap_insert_entry(pmap, va, m);
+		managed = TRUE;
+	}
+
+	/*
+	 * Increment counters
+	 */
+	pmap->pm_stats.resident_count++;
+	if (wired)
+		pmap->pm_stats.wired_count++;
+
+validate:
+
+	/*
+	 * Now validate mapping with desired protection/wiring. This
+	 * adds the pte to the VHPT if necessary.
+	 */
+	pmap_pte_prot(pmap, pte, prot);
+	pmap_pte_attr(pte, m->md.memattr);
+	pmap_set_pte(pte, va, pa, wired, managed);
+
+	/* Invalidate the I-cache when needed. */
+	if (icache_inval)
+		ia64_sync_icache(va, PAGE_SIZE);
+
+	if ((prot & VM_PROT_WRITE) != 0 && managed)
+		vm_page_aflag_set(m, PGA_WRITEABLE);
+	rw_wunlock(&pvh_global_lock);
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+	return (KERN_SUCCESS);
+}
+
+/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start.  This page is
+ * mapped at the given virtual address start.  Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object.  The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end.  Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
+    vm_page_t m_start, vm_prot_t prot)
+{
+	pmap_t oldpmap;
+	vm_page_t m;
+	vm_pindex_t diff, psize;
+
+	CTR6(KTR_PMAP, "%s(pm=%p, sva=%#lx, eva=%#lx, m=%p, prot=%#x)",
+	    __func__, pmap, start, end, m_start, prot);
+
+	VM_OBJECT_ASSERT_LOCKED(m_start->object);
+
+	psize = atop(end - start);
+	m = m_start;
+	rw_wlock(&pvh_global_lock);
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
+		pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
+		m = TAILQ_NEXT(m, listq);
+	}
+	rw_wunlock(&pvh_global_lock);
+	pmap_switch(oldpmap);
+ 	PMAP_UNLOCK(pmap);
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * but is *MUCH* faster than pmap_enter...
+ */
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+	pmap_t oldpmap;
+
+	CTR5(KTR_PMAP, "%s(pm=%p, va=%#lx, m=%p, prot=%#x)", __func__, pmap,
+	    va, m, prot);
+
+	rw_wlock(&pvh_global_lock);
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+	pmap_enter_quick_locked(pmap, va, m, prot);
+	rw_wunlock(&pvh_global_lock);
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+}
+
+static void
+pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
+    vm_prot_t prot)
+{
+	struct ia64_lpte *pte;
+	boolean_t managed;
+
+	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
+	    (m->oflags & VPO_UNMANAGED) != 0,
+	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
+	rw_assert(&pvh_global_lock, RA_WLOCKED);
+	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+	if ((pte = pmap_find_pte(va)) == NULL)
+		return;
+
+	if (!pmap_present(pte)) {
+		/* Enter on the PV list if the page is managed. */
+		if ((m->oflags & VPO_UNMANAGED) == 0) {
+			if (!pmap_try_insert_pv_entry(pmap, va, m)) {
+				pmap_free_pte(pte, va);
+				return;
+			}
+			managed = TRUE;
+		} else
+			managed = FALSE;
+
+		/* Increment counters. */
+		pmap->pm_stats.resident_count++;
+
+		/* Initialise with R/O protection and enter into VHPT. */
+		pmap_enter_vhpt(pte, va);
+		pmap_pte_prot(pmap, pte,
+		    prot & (VM_PROT_READ | VM_PROT_EXECUTE));
+		pmap_pte_attr(pte, m->md.memattr);
+		pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
+
+		if (prot & VM_PROT_EXECUTE)
+			ia64_sync_icache(va, PAGE_SIZE);
+	}
+}
+
+/*
+ * pmap_object_init_pt preloads the ptes for a given object
+ * into the specified pmap.  This eliminates the blast of soft
+ * faults on process startup and immediately after an mmap.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+    vm_pindex_t pindex, vm_size_t size)
+{
+
+	CTR6(KTR_PMAP, "%s(pm=%p, va=%#lx, obj=%p, idx=%lu, sz=%#lx)",
+	    __func__, pmap, addr, object, pindex, size);
+
+	VM_OBJECT_ASSERT_WLOCKED(object);
+	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
+	    ("pmap_object_init_pt: non-device object"));
+}
+
+/*
+ *	Clear the wired attribute from the mappings for the specified range of
+ *	addresses in the given pmap.  Every valid mapping within that range
+ *	must have the wired attribute set.  In contrast, invalid mappings
+ *	cannot have the wired attribute set, so they are ignored.
+ *
+ *	The wired attribute of the page table entry is not a hardware feature,
+ *	so there is no need to invalidate any TLB entries.
+ */
+void
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+	pmap_t oldpmap;
+	struct ia64_lpte *pte;
+
+	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
+
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+	for (; sva < eva; sva += PAGE_SIZE) {
+		pte = pmap_find_vhpt(sva);
+		if (pte == NULL)
+			continue;
+		if (!pmap_wired(pte))
+			panic("pmap_unwire: pte %p isn't wired", pte);
+		pmap->pm_stats.wired_count--;
+		pmap_clear_wired(pte);
+	}
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+}
+
+/*
+ *	Copy the range specified by src_addr/len
+ *	from the source map to the range dst_addr/len
+ *	in the destination map.
+ *
+ *	This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_va, vm_size_t len,
+    vm_offset_t src_va)
+{
+
+	CTR6(KTR_PMAP, "%s(dpm=%p, spm=%p, dva=%#lx, sz=%#lx, sva=%#lx)",
+	    __func__, dst_pmap, src_pmap, dst_va, len, src_va);
+}
+
+/*
+ *	pmap_zero_page zeros the specified hardware page by
+ *	mapping it into virtual memory and using bzero to clear
+ *	its contents.
+ */
+void
+pmap_zero_page(vm_page_t m)
+{
+	void *p;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	p = (void *)pmap_page_to_va(m);
+	bzero(p, PAGE_SIZE);
+}
+
+/*
+ *	pmap_zero_page_area zeros the specified hardware page by
+ *	mapping it into virtual memory and using bzero to clear
+ *	its contents.
+ *
+ *	off and size must reside within a single page.
+ */
+void
+pmap_zero_page_area(vm_page_t m, int off, int size)
+{
+	char *p;
+
+	CTR4(KTR_PMAP, "%s(m=%p, ofs=%d, len=%d)", __func__, m, off, size);
+
+	p = (void *)pmap_page_to_va(m);
+	bzero(p + off, size);
+}
+
+/*
+ *	pmap_zero_page_idle zeros the specified hardware page by
+ *	mapping it into virtual memory and using bzero to clear
+ *	its contents.  This is for the vm_idlezero process.
+ */
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+	void *p;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	p = (void *)pmap_page_to_va(m);
+	bzero(p, PAGE_SIZE);
+}
+
+/*
+ *	pmap_copy_page copies the specified (machine independent)
+ *	page by mapping the page into virtual memory and using
+ *	bcopy to copy the page, one machine dependent page at a
+ *	time.
+ */
+void
+pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
+{
+	void *dst, *src;
+
+	CTR3(KTR_PMAP, "%s(sm=%p, dm=%p)", __func__, msrc, mdst);
+
+	src = (void *)pmap_page_to_va(msrc);
+	dst = (void *)pmap_page_to_va(mdst);
+	bcopy(src, dst, PAGE_SIZE);
+}
+
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+	void *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	CTR6(KTR_PMAP, "%s(m0=%p, va0=%#lx, m1=%p, va1=%#lx, sz=%#x)",
+	    __func__, ma, a_offset, mb, b_offset, xfersize);
+
+	while (xfersize > 0) {
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		a_cp = (char *)pmap_page_to_va(ma[a_offset >> PAGE_SHIFT]) +
+		    a_pg_offset;
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		b_cp = (char *)pmap_page_to_va(mb[b_offset >> PAGE_SHIFT]) +
+		    b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+}
+
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page.  This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+	pv_entry_t pv;
+	int loops = 0;
+	boolean_t rv;
+
+	CTR3(KTR_PMAP, "%s(pm=%p, m=%p)", __func__, pmap, m);
+
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+	    ("pmap_page_exists_quick: page %p is not managed", m));
+	rv = FALSE;
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		if (PV_PMAP(pv) == pmap) {
+			rv = TRUE;
+			break;
+		}
+		loops++;
+		if (loops >= 16)
+			break;
+	}
+	rw_wunlock(&pvh_global_lock);
+	return (rv);
+}
+
+/*
+ *	pmap_page_wired_mappings:
+ *
+ *	Return the number of managed mappings to the given physical page
+ *	that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap, pmap;
+	pv_entry_t pv;
+	int count;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	count = 0;
+	if ((m->oflags & VPO_UNMANAGED) != 0)
+		return (count);
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+		pte = pmap_find_vhpt(pv->pv_va);
+		KASSERT(pte != NULL, ("pte"));
+		if (pmap_wired(pte))
+			count++;
+		pmap_switch(oldpmap);
+		PMAP_UNLOCK(pmap);
+	}
+	rw_wunlock(&pvh_global_lock);
+	return (count);
+}
+
+/*
+ * Remove all pages from specified address space
+ * this aids process exit speeds.  Also, this code
+ * is special cased for current process only, but
+ * can have the more generic (and slightly slower)
+ * mode enabled.  This is much faster than pmap_remove
+ * in the case of running down an entire address space.
+ */
+void
+pmap_remove_pages(pmap_t pmap)
+{
+	struct pv_chunk *pc, *npc;
+	struct ia64_lpte *pte;
+	pmap_t oldpmap;
+	pv_entry_t pv;
+	vm_offset_t va;
+	vm_page_t m;
+	u_long inuse, bitmask;
+	int allfree, bit, field, idx;
+
+	CTR2(KTR_PMAP, "%s(pm=%p)", __func__, pmap);
+
+	rw_wlock(&pvh_global_lock);
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
+		allfree = 1;
+		for (field = 0; field < _NPCM; field++) {
+			inuse = ~pc->pc_map[field] & pc_freemask[field];
+			while (inuse != 0) {
+				bit = ffsl(inuse) - 1;
+				bitmask = 1UL << bit;
+				idx = field * sizeof(inuse) * NBBY + bit;
+				pv = &pc->pc_pventry[idx];
+				inuse &= ~bitmask;
+				va = pv->pv_va;
+				pte = pmap_find_vhpt(va);
+				KASSERT(pte != NULL, ("pte"));
+				if (pmap_wired(pte)) {
+					allfree = 0;
+					continue;
+				}
+				pmap_remove_vhpt(va);
+				pmap_invalidate_page(va);
+				m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
+				if (pmap_dirty(pte))
+					vm_page_dirty(m);
+				pmap_free_pte(pte, va);
+				/* Mark free */
+				PV_STAT(pv_entry_frees++);
+				PV_STAT(pv_entry_spare++);
+				pv_entry_count--;
+				pc->pc_map[field] |= bitmask;
+				pmap->pm_stats.resident_count--;
+				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+				if (TAILQ_EMPTY(&m->md.pv_list))
+					vm_page_aflag_clear(m, PGA_WRITEABLE);
+			}
+		}
+		if (allfree) {
+			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+			free_pv_chunk(pc);
+		}
+	}
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+	rw_wunlock(&pvh_global_lock);
+}
+
+/*
+ *	pmap_ts_referenced:
+ *
+ *	Return a count of reference bits for a page, clearing those bits.
+ *	It is not necessary for every reference bit to be cleared, but it
+ *	is necessary that 0 only be returned when there are truly no
+ *	reference bits set.
+ * 
+ *	XXX: The exact number of bits to check and clear is a matter that
+ *	should be tested and standardized at some point in the future for
+ *	optimal aging of shared pages.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap, pmap;
+	pv_entry_t pv;
+	int count = 0;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+	    ("pmap_ts_referenced: page %p is not managed", m));
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+		pte = pmap_find_vhpt(pv->pv_va);
+		KASSERT(pte != NULL, ("pte"));
+		if (pmap_accessed(pte)) {
+			count++;
+			pmap_clear_accessed(pte);
+			pmap_invalidate_page(pv->pv_va);
+		}
+		pmap_switch(oldpmap);
+		PMAP_UNLOCK(pmap);
+	}
+	rw_wunlock(&pvh_global_lock);
+	return (count);
+}
+
+/*
+ *	pmap_is_modified:
+ *
+ *	Return whether or not the specified physical page was modified
+ *	in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap, pmap;
+	pv_entry_t pv;
+	boolean_t rv;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+	    ("pmap_is_modified: page %p is not managed", m));
+	rv = FALSE;
+
+	/*
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
+	 * is clear, no PTEs can be dirty.
+	 */
+	VM_OBJECT_ASSERT_WLOCKED(m->object);
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+		return (rv);
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+		pte = pmap_find_vhpt(pv->pv_va);
+		pmap_switch(oldpmap);
+		KASSERT(pte != NULL, ("pte"));
+		rv = pmap_dirty(pte) ? TRUE : FALSE;
+		PMAP_UNLOCK(pmap);
+		if (rv)
+			break;
+	}
+	rw_wunlock(&pvh_global_lock);
+	return (rv);
+}
+
+/*
+ *	pmap_is_prefaultable:
+ *
+ *	Return whether or not the specified virtual address is elgible
+ *	for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+	struct ia64_lpte *pte;
+
+	CTR3(KTR_PMAP, "%s(pm=%p, va=%#lx)", __func__, pmap, addr);
+
+	pte = pmap_find_vhpt(addr);
+	if (pte != NULL && pmap_present(pte))
+		return (FALSE);
+	return (TRUE);
+}
+
+/*
+ *	pmap_is_referenced:
+ *
+ *	Return whether or not the specified physical page was referenced
+ *	in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap, pmap;
+	pv_entry_t pv;
+	boolean_t rv;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+	    ("pmap_is_referenced: page %p is not managed", m));
+	rv = FALSE;
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+		pte = pmap_find_vhpt(pv->pv_va);
+		pmap_switch(oldpmap);
+		KASSERT(pte != NULL, ("pte"));
+		rv = pmap_accessed(pte) ? TRUE : FALSE;
+		PMAP_UNLOCK(pmap);
+		if (rv)
+			break;
+	}
+	rw_wunlock(&pvh_global_lock);
+	return (rv);
+}
+
+/*
+ *	Apply the given advice to the specified range of addresses within the
+ *	given pmap.  Depending on the advice, clear the referenced and/or
+ *	modified flags in each mapping and set the mapped page's dirty field.
+ */
+void
+pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap;
+	vm_page_t m;
+
+	CTR5(KTR_PMAP, "%s(pm=%p, sva=%#lx, eva=%#lx, adv=%d)", __func__,
+	    pmap, sva, eva, advice);
+
+	PMAP_LOCK(pmap);
+	oldpmap = pmap_switch(pmap);
+	for (; sva < eva; sva += PAGE_SIZE) {
+		/* If page is invalid, skip this page. */
+		pte = pmap_find_vhpt(sva);
+		if (pte == NULL)
+			continue;
+
+		/* If it isn't managed, skip it too. */
+		if (!pmap_managed(pte))
+			continue;
+
+		/* Clear its modified and referenced bits. */
+		if (pmap_dirty(pte)) {
+			if (advice == MADV_DONTNEED) {
+				/*
+				 * Future calls to pmap_is_modified() can be
+				 * avoided by making the page dirty now.
+				 */
+				m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
+				vm_page_dirty(m);
+			}
+			pmap_clear_dirty(pte);
+		} else if (!pmap_accessed(pte))
+			continue;
+		pmap_clear_accessed(pte);
+		pmap_invalidate_page(sva);
+	}
+	pmap_switch(oldpmap);
+	PMAP_UNLOCK(pmap);
+}
+
+/*
+ *	Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap, pmap;
+	pv_entry_t pv;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+	    ("pmap_clear_modify: page %p is not managed", m));
+	VM_OBJECT_ASSERT_WLOCKED(m->object);
+	KASSERT(!vm_page_xbusied(m),
+	    ("pmap_clear_modify: page %p is exclusive busied", m));
+
+	/*
+	 * If the page is not PGA_WRITEABLE, then no PTEs can be modified.
+	 * If the object containing the page is locked and the page is not
+	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
+	 */
+	if ((m->aflags & PGA_WRITEABLE) == 0)
+		return;
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+		pte = pmap_find_vhpt(pv->pv_va);
+		KASSERT(pte != NULL, ("pte"));
+		if (pmap_dirty(pte)) {
+			pmap_clear_dirty(pte);
+			pmap_invalidate_page(pv->pv_va);
+		}
+		pmap_switch(oldpmap);
+		PMAP_UNLOCK(pmap);
+	}
+	rw_wunlock(&pvh_global_lock);
+}
+
+/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+pmap_remove_write(vm_page_t m)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap, pmap;
+	pv_entry_t pv;
+	vm_prot_t prot;
+
+	CTR2(KTR_PMAP, "%s(m=%p)", __func__, m);
+
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+	    ("pmap_remove_write: page %p is not managed", m));
+
+	/*
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
+	 */
+	VM_OBJECT_ASSERT_WLOCKED(m->object);
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+		return;
+	rw_wlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+		pte = pmap_find_vhpt(pv->pv_va);
+		KASSERT(pte != NULL, ("pte"));
+		prot = pmap_prot(pte);
+		if ((prot & VM_PROT_WRITE) != 0) {
+			if (pmap_dirty(pte)) {
+				vm_page_dirty(m);
+				pmap_clear_dirty(pte);
+			}
+			prot &= ~VM_PROT_WRITE;
+			pmap_pte_prot(pmap, pte, prot);
+			pmap_pte_attr(pte, m->md.memattr);
+			pmap_invalidate_page(pv->pv_va);
+		}
+		pmap_switch(oldpmap);
+		PMAP_UNLOCK(pmap);
+	}
+	vm_page_aflag_clear(m, PGA_WRITEABLE);
+	rw_wunlock(&pvh_global_lock);
+}
+
+vm_offset_t
+pmap_mapdev_priv(vm_paddr_t pa, vm_size_t sz, vm_memattr_t attr)
+{
+	static vm_offset_t last_va = 0;
+	static vm_paddr_t last_pa = ~0UL;
+	static vm_size_t last_sz = 0;
+	struct efi_md *md;
+
+	if (pa == last_pa && sz == last_sz)
+		return (last_va);
+
+	md = efi_md_find(pa);
+	if (md == NULL) {
+		printf("%s: [%#lx..%#lx] not covered by memory descriptor\n",
+		    __func__, pa, pa + sz - 1);
+		return (IA64_PHYS_TO_RR6(pa));
+	}
+
+	if (md->md_type == EFI_MD_TYPE_FREE) {
+		printf("%s: [%#lx..%#lx] is in DRAM\n", __func__, pa,
+		    pa + sz - 1);
+		return (0);
+	}
+
+	last_va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) :
+	    IA64_PHYS_TO_RR6(pa);
+	last_pa = pa;
+	last_sz = sz;
+	return (last_va);
+}
+
+/*
+ * Map a set of physical memory pages into the kernel virtual
+ * address space. Return a pointer to where it is mapped. This
+ * routine is intended to be used for mapping device memory,
+ * NOT real memory.
+ */
+void *
+pmap_mapdev_attr(vm_paddr_t pa, vm_size_t sz, vm_memattr_t attr)
+{
+	vm_offset_t va;
+
+	CTR4(KTR_PMAP, "%s(pa=%#lx, sz=%#lx, attr=%#x)", __func__, pa, sz,
+	    attr);
+
+	va = pmap_mapdev_priv(pa, sz, attr);
+	return ((void *)(uintptr_t)va);
+}
+
+/*
+ * 'Unmap' a range mapped by pmap_mapdev_attr().
+ */
+void
+pmap_unmapdev(vm_offset_t va, vm_size_t size)
+{
+
+	CTR3(KTR_PMAP, "%s(va=%#lx, sz=%#lx)", __func__, va, size);
+}
+
+/*
+ * Sets the memory attribute for the specified page.
+ */
+static void
+pmap_page_set_memattr_1(void *arg)
+{
+	struct ia64_pal_result res;
+	register_t is;
+	uintptr_t pp = (uintptr_t)arg;
+
+	is = intr_disable();
+	res = ia64_call_pal_static(pp, 0, 0, 0);
+	intr_restore(is);
+}
+
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+	struct ia64_lpte *pte;
+	pmap_t oldpmap, pmap;
+	pv_entry_t pv;
+	void *va;
+
+	CTR3(KTR_PMAP, "%s(m=%p, attr=%#x)", __func__, m, ma);
+
+	rw_wlock(&pvh_global_lock);
+	m->md.memattr = ma;
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		pmap = PV_PMAP(pv);
+		PMAP_LOCK(pmap);
+		oldpmap = pmap_switch(pmap);
+		pte = pmap_find_vhpt(pv->pv_va);
+		KASSERT(pte != NULL, ("pte"));
+		pmap_pte_attr(pte, ma);
+		pmap_invalidate_page(pv->pv_va);
+		pmap_switch(oldpmap);
+		PMAP_UNLOCK(pmap);
+	}
+	rw_wunlock(&pvh_global_lock);
+
+	if (ma == VM_MEMATTR_UNCACHEABLE) {
+#ifdef SMP
+		smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
+		    (void *)PAL_PREFETCH_VISIBILITY);
+#else
+		pmap_page_set_memattr_1((void *)PAL_PREFETCH_VISIBILITY);
+#endif
+		va = (void *)pmap_page_to_va(m);
+		critical_enter();
+		cpu_flush_dcache(va, PAGE_SIZE);
+		critical_exit();
+#ifdef SMP
+		smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
+		    (void *)PAL_MC_DRAIN);
+#else
+		pmap_page_set_memattr_1((void *)PAL_MC_DRAIN);
+#endif
+	}
+}
+
+/*
+ * perform the pmap work for mincore
+ */
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
+{
+	pmap_t oldpmap;
+	struct ia64_lpte *pte, tpte;
+	vm_paddr_t pa;
+	int val;
+
+	CTR4(KTR_PMAP, "%s(pm=%p, va=%#lx, pa_p=%p)", __func__, pmap, addr,
+	    locked_pa);
+
+	PMAP_LOCK(pmap);
+retry:
+	oldpmap = pmap_switch(pmap);
+	pte = pmap_find_vhpt(addr);
+	if (pte != NULL) {
+		tpte = *pte;
+		pte = &tpte;
+	}
+	pmap_switch(oldpmap);
+	if (pte == NULL || !pmap_present(pte)) {
+		val = 0;
+		goto out;
+	}
+	val = MINCORE_INCORE;
+	if (pmap_dirty(pte))
+		val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
+	if (pmap_accessed(pte))
+		val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
+	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
+	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
+	    pmap_managed(pte)) {
+		pa = pmap_ppn(pte);
+		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
+		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
+			goto retry;
+	} else
+out:
+		PA_UNLOCK_COND(*locked_pa);
+	PMAP_UNLOCK(pmap);
+	return (val);
+}
+
+/*
+ *
+ */
+void
+pmap_activate(struct thread *td)
+{
+
+	CTR2(KTR_PMAP, "%s(td=%p)", __func__, td);
+
+	pmap_switch(vmspace_pmap(td->td_proc->p_vmspace));
+}
+
+pmap_t
+pmap_switch(pmap_t pm)
+{
+	pmap_t prevpm;
+	int i;
+
+	critical_enter();
+	prevpm = PCPU_GET(md.current_pmap);
+	if (prevpm == pm)
+		goto out;
+	if (pm == NULL) {
+		for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
+			ia64_set_rr(IA64_RR_BASE(i),
+			    (i << 8)|(PAGE_SHIFT << 2)|1);
+		}
+	} else {
+		for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
+			ia64_set_rr(IA64_RR_BASE(i),
+			    (pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1);
+		}
+	}
+	PCPU_SET(md.current_pmap, pm);
+	ia64_srlz_d();
+
+out:
+	critical_exit();
+	return (prevpm);
+}
+
+/*
+ *
+ */
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+	pmap_t oldpm;
+	struct ia64_lpte *pte;
+	vm_offset_t lim;
+	vm_size_t len;
+
+	CTR4(KTR_PMAP, "%s(pm=%p, va=%#lx, sz=%#lx)", __func__, pm, va, sz);
+
+	sz += va & 31;
+	va &= ~31;
+	sz = (sz + 31) & ~31;
+
+	PMAP_LOCK(pm);
+	oldpm = pmap_switch(pm);
+	while (sz > 0) {
+		lim = round_page(va);
+		len = MIN(lim - va, sz);
+		pte = pmap_find_vhpt(va);
+		if (pte != NULL && pmap_present(pte))
+			ia64_sync_icache(va, len);
+		va += len;
+		sz -= len;
+	}
+	pmap_switch(oldpm);
+	PMAP_UNLOCK(pm);
+}
+
+/*
+ *	Increase the starting virtual address of the given mapping if a
+ *	different alignment might result in more superpage mappings.
+ */
+void
+pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
+    vm_offset_t *addr, vm_size_t size)
+{
+
+	CTR5(KTR_PMAP, "%s(obj=%p, ofs=%#lx, va_p=%p, sz=%#lx)", __func__,
+	    object, offset, addr, size);
+}
+
+#include "opt_ddb.h"
+
+#ifdef DDB
+
+#include <ddb/ddb.h>
+
+static const char*	psnames[] = {
+	"1B",	"2B",	"4B",	"8B",
+	"16B",	"32B",	"64B",	"128B",
+	"256B",	"512B",	"1K",	"2K",
+	"4K",	"8K",	"16K",	"32K",
+	"64K",	"128K",	"256K",	"512K",
+	"1M",	"2M",	"4M",	"8M",
+	"16M",	"32M",	"64M",	"128M",
+	"256M",	"512M",	"1G",	"2G"
+};
+
+static void
+print_trs(int type)
+{
+	struct ia64_pal_result res;
+	int i, maxtr;
+	struct {
+		pt_entry_t	pte;
+		uint64_t	itir;
+		uint64_t	ifa;
+		struct ia64_rr	rr;
+	} buf;
+	static const char *manames[] = {
+		"WB",	"bad",	"bad",	"bad",
+		"UC",	"UCE",	"WC",	"NaT",
+	};
+
+	res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
+	if (res.pal_status != 0) {
+		db_printf("Can't get VM summary\n");
+		return;
+	}
+
+	if (type == 0)
+		maxtr = (res.pal_result[0] >> 40) & 0xff;
+	else
+		maxtr = (res.pal_result[0] >> 32) & 0xff;
+
+	db_printf("V RID    Virtual Page  Physical Page PgSz ED AR PL D A MA  P KEY\n");
+	for (i = 0; i <= maxtr; i++) {
+		bzero(&buf, sizeof(buf));
+		res = ia64_pal_physical(PAL_VM_TR_READ, i, type,
+		    ia64_tpa((uint64_t)&buf));
+		if (!(res.pal_result[0] & 1))
+			buf.pte &= ~PTE_AR_MASK;
+		if (!(res.pal_result[0] & 2))
+			buf.pte &= ~PTE_PL_MASK;
+		if (!(res.pal_result[0] & 4))
+			pmap_clear_dirty(&buf);
+		if (!(res.pal_result[0] & 8))
+			buf.pte &= ~PTE_MA_MASK;
+		db_printf("%d %06x %013lx %013lx %4s %d  %d  %d  %d %d %-3s "
+		    "%d %06x\n", (int)buf.ifa & 1, buf.rr.rr_rid,
+		    buf.ifa >> 12, (buf.pte & PTE_PPN_MASK) >> 12,
+		    psnames[(buf.itir & ITIR_PS_MASK) >> 2],
+		    (buf.pte & PTE_ED) ? 1 : 0,
+		    (int)(buf.pte & PTE_AR_MASK) >> 9,
+		    (int)(buf.pte & PTE_PL_MASK) >> 7,
+		    (pmap_dirty(&buf)) ? 1 : 0,
+		    (pmap_accessed(&buf)) ? 1 : 0,
+		    manames[(buf.pte & PTE_MA_MASK) >> 2],
+		    (pmap_present(&buf)) ? 1 : 0,
+		    (int)((buf.itir & ITIR_KEY_MASK) >> 8));
+	}
+}
+
+DB_COMMAND(itr, db_itr)
+{
+	print_trs(0);
+}
+
+DB_COMMAND(dtr, db_dtr)
+{
+	print_trs(1);
+}
+
+DB_COMMAND(rr, db_rr)
+{
+	int i;
+	uint64_t t;
+	struct ia64_rr rr;
+
+	printf("RR RID    PgSz VE\n");
+	for (i = 0; i < 8; i++) {
+		__asm __volatile ("mov %0=rr[%1]"
+				  : "=r"(t)
+				  : "r"(IA64_RR_BASE(i)));
+		*(uint64_t *) &rr = t;
+		printf("%d  %06x %4s %d\n",
+		       i, rr.rr_rid, psnames[rr.rr_ps], rr.rr_ve);
+	}
+}
+
+DB_COMMAND(thash, db_thash)
+{
+	if (!have_addr)
+		return;
+
+	db_printf("%p\n", (void *) ia64_thash(addr));
+}
+
+DB_COMMAND(ttag, db_ttag)
+{
+	if (!have_addr)
+		return;
+
+	db_printf("0x%lx\n", ia64_ttag(addr));
+}
+
+DB_COMMAND(kpte, db_kpte)
+{
+	struct ia64_lpte *pte;
+
+	if (!have_addr) {
+		db_printf("usage: kpte <kva>\n");
+		return;
+	}
+	if (addr < VM_INIT_KERNEL_ADDRESS) {
+		db_printf("kpte: error: invalid <kva>\n");
+		return;
+	}
+	pte = pmap_find_kpte(addr);
+	db_printf("kpte at %p:\n", pte);
+	db_printf("  pte  =%016lx\n", pte->pte);
+	db_printf("  itir =%016lx\n", pte->itir);
+	db_printf("  tag  =%016lx\n", pte->tag);
+	db_printf("  chain=%016lx\n", pte->chain);
+}
+
+#endif


Property changes on: trunk/sys/ia64/ia64/pmap.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/ptrace_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/ptrace_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/ptrace_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,65 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/ptrace_machdep.c 139790 2005-01-06 22:18:23Z imp $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <machine/frame.h>
+
+int
+cpu_ptrace(struct thread *td, int req, void *addr, int data)
+{
+	struct trapframe *tf;
+	uint64_t *kstack;
+	int error;
+
+	error = EINVAL;
+	tf = td->td_frame;
+
+	switch (req) {
+	case PT_GETKSTACK:
+		if (data >= 0 && data < (tf->tf_special.ndirty >> 3)) {
+			kstack = (uint64_t*)(td->td_kstack +
+			    (tf->tf_special.bspstore & 0x1ffUL));
+			error = copyout(kstack + data, addr, 8);
+		}
+		break;
+	case PT_SETKSTACK:
+		if (data >= 0 && data < (tf->tf_special.ndirty >> 3)) {
+			kstack = (uint64_t*)(td->td_kstack +
+			    (tf->tf_special.bspstore & 0x1ffUL));
+			error = copyin(addr, kstack + data, 8);
+		}
+		break;
+	}
+
+	return (error);
+}


Property changes on: trunk/sys/ia64/ia64/ptrace_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/sal.c
===================================================================
--- trunk/sys/ia64/ia64/sal.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/sal.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,131 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/sal.c 270296 2014-08-21 19:51:07Z emaste $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/efi.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <machine/intr.h>
+#include <machine/md_var.h>
+#include <machine/sal.h>
+#include <machine/smp.h>
+
+int ia64_ipi_wakeup;
+
+static struct ia64_fdesc sal_fdesc;
+static sal_entry_t	fake_sal;
+
+extern u_int64_t	ia64_pal_entry;
+sal_entry_t		*ia64_sal_entry = fake_sal;
+
+static struct uuid sal_table = EFI_TABLE_SAL;
+static struct sal_system_table *sal_systbl;
+
+static struct ia64_sal_result
+fake_sal(u_int64_t a1, u_int64_t a2, u_int64_t a3, u_int64_t a4,
+	 u_int64_t a5, u_int64_t a6, u_int64_t a7, u_int64_t a8)
+{
+	struct ia64_sal_result res;
+	res.sal_status = -3;
+	res.sal_result[0] = 0;
+	res.sal_result[1] = 0;
+	res.sal_result[2] = 0;
+	return res;
+}
+
+void
+ia64_sal_init(void)
+{
+	static int sizes[6] = {
+		48, 32, 16, 32, 16, 16
+	};
+	u_int8_t *p;
+	int error, i;
+
+	sal_systbl = efi_get_table(&sal_table);
+	if (sal_systbl == NULL)
+		return;
+
+	if (bcmp(sal_systbl->sal_signature, SAL_SIGNATURE, 4)) {
+		printf("Bad signature for SAL System Table\n");
+		return;
+	}
+
+	p = (u_int8_t *) (sal_systbl + 1);
+	for (i = 0; i < sal_systbl->sal_entry_count; i++) {
+		switch (*p) {
+		case 0: {
+			struct sal_entrypoint_descriptor *dp;
+
+			dp = (struct sal_entrypoint_descriptor*)p;
+			ia64_pal_entry = IA64_PHYS_TO_RR7(dp->sale_pal_proc);
+			if (bootverbose)
+				printf("PAL Proc at 0x%lx\n", ia64_pal_entry);
+			sal_fdesc.func = IA64_PHYS_TO_RR7(dp->sale_sal_proc);
+			sal_fdesc.gp = IA64_PHYS_TO_RR7(dp->sale_sal_gp);
+			if (bootverbose)
+				printf("SAL Proc at 0x%lx, GP at 0x%lx\n",
+				    sal_fdesc.func, sal_fdesc.gp);
+			ia64_sal_entry = (sal_entry_t *) &sal_fdesc;
+			break;
+		}
+		case 5: {
+			struct sal_ap_wakeup_descriptor *dp;
+
+			dp = (struct sal_ap_wakeup_descriptor*)p;
+			if (dp->sale_mechanism != 0) {
+				printf("SAL: unsupported AP wake-up mechanism "
+				    "(%d)\n", dp->sale_mechanism);
+				break;
+			}
+
+			/* Reserve the XIV so that we won't use it. */
+			error = ia64_xiv_reserve(dp->sale_vector,
+			    IA64_XIV_PLAT, NULL);
+			if (error) {
+				printf("SAL: invalid AP wake-up XIV (%#lx)\n",
+				    dp->sale_vector);
+				break;
+			}
+
+			ia64_ipi_wakeup = dp->sale_vector;
+			if (bootverbose)
+				printf("SAL: AP wake-up XIV: %#x\n",
+				    ia64_ipi_wakeup);
+			break;
+		}
+		}
+		p += sizes[*p];
+	}
+}


Property changes on: trunk/sys/ia64/ia64/sal.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/sapic.c
===================================================================
--- trunk/sys/ia64/ia64/sapic.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/sapic.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,382 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/sapic.c 227293 2011-11-07 06:44:47Z ed $
+ */
+
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/sysctl.h>
+
+#include <machine/intr.h>
+#include <machine/pal.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+/*
+ * Offsets from the SAPIC base in memory. Most registers are accessed
+ * by indexing using the SAPIC_IO_SELECT register.
+ */
+#define	SAPIC_IO_SELECT		0x00
+#define	SAPIC_IO_WINDOW		0x10
+#define	SAPIC_APIC_EOI		0x40
+
+/*
+ * Indexed registers.
+ */
+#define SAPIC_ID		0x00
+#define SAPIC_VERSION		0x01
+#define SAPIC_ARBITRATION_ID	0x02
+#define SAPIC_RTE_BASE		0x10
+
+/* Interrupt polarity. */
+#define	SAPIC_POLARITY_HIGH	0
+#define	SAPIC_POLARITY_LOW	1
+
+/* Interrupt trigger. */
+#define	SAPIC_TRIGGER_EDGE	0
+#define	SAPIC_TRIGGER_LEVEL	1
+
+/* Interrupt delivery mode. */
+#define	SAPIC_DELMODE_FIXED	0
+#define	SAPIC_DELMODE_LOWPRI	1
+#define	SAPIC_DELMODE_PMI	2
+#define	SAPIC_DELMODE_NMI	4
+#define	SAPIC_DELMODE_INIT	5
+#define	SAPIC_DELMODE_EXTINT	7
+
+struct sapic {
+	struct mtx	sa_mtx;
+	uint64_t	sa_registers;	/* virtual address of sapic */
+	u_int		sa_id;		/* I/O SAPIC Id */
+	u_int		sa_base;	/* ACPI vector base */
+	u_int		sa_limit;	/* last ACPI vector handled here */
+};
+
+struct sapic_rte {
+	uint64_t	rte_vector		:8;
+	uint64_t	rte_delivery_mode	:3;
+	uint64_t	rte_destination_mode	:1;
+	uint64_t	rte_delivery_status	:1;
+	uint64_t	rte_polarity		:1;
+	uint64_t	rte_rirr		:1;
+	uint64_t	rte_trigger_mode	:1;
+	uint64_t	rte_mask		:1;
+	uint64_t	rte_flushen		:1;
+	uint64_t	rte_reserved		:30;
+	uint64_t	rte_destination_eid	:8;
+	uint64_t	rte_destination_id	:8;
+};
+
+static MALLOC_DEFINE(M_SAPIC, "sapic", "I/O SAPIC devices");
+
+struct sapic *ia64_sapics[16];		/* XXX make this resizable */
+int ia64_sapic_count;
+
+static int sysctl_machdep_apic(SYSCTL_HANDLER_ARGS);
+
+SYSCTL_OID(_machdep, OID_AUTO, apic, CTLTYPE_STRING|CTLFLAG_RD,
+    NULL, 0, sysctl_machdep_apic, "A", "(x)APIC redirection table entries");
+
+static __inline uint32_t
+sapic_read(struct sapic *sa, int which)
+{
+	uint32_t value;
+
+	ia64_st4((void *)(sa->sa_registers + SAPIC_IO_SELECT), which);
+	ia64_mf_a();
+	value = ia64_ld4((void *)(sa->sa_registers + SAPIC_IO_WINDOW));
+	return (value);
+}
+
+static __inline void
+sapic_write(struct sapic *sa, int which, uint32_t value)
+{
+
+	ia64_st4((void *)(sa->sa_registers + SAPIC_IO_SELECT), which);
+	ia64_mf_a();
+	ia64_st4((void *)(sa->sa_registers + SAPIC_IO_WINDOW), value);
+	ia64_mf_a();
+}
+
+static __inline void
+sapic_read_rte(struct sapic *sa, int which, struct sapic_rte *rte)
+{
+	uint32_t *p = (uint32_t *) rte;
+
+	p[0] = sapic_read(sa, SAPIC_RTE_BASE + 2 * which);
+	p[1] = sapic_read(sa, SAPIC_RTE_BASE + 2 * which + 1);
+}
+
+static __inline void
+sapic_write_rte(struct sapic *sa, int which, struct sapic_rte *rte)
+{
+	uint32_t *p = (uint32_t *) rte;
+
+	sapic_write(sa, SAPIC_RTE_BASE + 2 * which, p[0]);
+	sapic_write(sa, SAPIC_RTE_BASE + 2 * which + 1, p[1]);
+}
+
+struct sapic *
+sapic_lookup(u_int irq, u_int *vecp)
+{
+	struct sapic_rte rte;
+	struct sapic *sa;
+	int i;
+
+	for (i = 0; i < ia64_sapic_count; i++) {
+		sa = ia64_sapics[i];
+		if (irq >= sa->sa_base && irq <= sa->sa_limit) {
+			if (vecp != NULL) {
+				mtx_lock_spin(&sa->sa_mtx);
+				sapic_read_rte(sa, irq - sa->sa_base, &rte);
+				mtx_unlock_spin(&sa->sa_mtx);
+				*vecp = rte.rte_vector;
+			}
+			return (sa);
+		}
+	}
+
+	return (NULL);
+}
+
+
+int
+sapic_bind_intr(u_int irq, struct pcpu *pc)
+{
+	struct sapic_rte rte;
+	struct sapic *sa;
+
+	sa = sapic_lookup(irq, NULL);
+	if (sa == NULL)
+		return (EINVAL);
+
+	mtx_lock_spin(&sa->sa_mtx);
+	sapic_read_rte(sa, irq - sa->sa_base, &rte);
+	rte.rte_destination_id = (pc->pc_md.lid >> 24) & 255;
+	rte.rte_destination_eid = (pc->pc_md.lid >> 16) & 255;
+	rte.rte_delivery_mode = SAPIC_DELMODE_FIXED;
+	sapic_write_rte(sa, irq - sa->sa_base, &rte);
+	mtx_unlock_spin(&sa->sa_mtx);
+	return (0);
+}
+
+int
+sapic_config_intr(u_int irq, enum intr_trigger trig, enum intr_polarity pol)
+{
+	struct sapic_rte rte;
+	struct sapic *sa;
+
+	sa = sapic_lookup(irq, NULL);
+	if (sa == NULL)
+		return (EINVAL);
+
+	mtx_lock_spin(&sa->sa_mtx);
+	sapic_read_rte(sa, irq - sa->sa_base, &rte);
+	if (trig != INTR_TRIGGER_CONFORM)
+		rte.rte_trigger_mode = (trig == INTR_TRIGGER_EDGE) ?
+		    SAPIC_TRIGGER_EDGE : SAPIC_TRIGGER_LEVEL;
+	else
+		rte.rte_trigger_mode = (irq < 16) ? SAPIC_TRIGGER_EDGE :
+		    SAPIC_TRIGGER_LEVEL;
+	if (pol != INTR_POLARITY_CONFORM)
+		rte.rte_polarity = (pol == INTR_POLARITY_HIGH) ?
+		    SAPIC_POLARITY_HIGH : SAPIC_POLARITY_LOW;
+	else
+		rte.rte_polarity = (irq < 16) ? SAPIC_POLARITY_HIGH :
+		    SAPIC_POLARITY_LOW;
+	sapic_write_rte(sa, irq - sa->sa_base, &rte);
+	mtx_unlock_spin(&sa->sa_mtx);
+	return (0);
+}
+
+struct sapic *
+sapic_create(u_int id, u_int base, uint64_t address)
+{
+	struct sapic_rte rte;
+	struct sapic *sa;
+	u_int i, max;
+
+	sa = malloc(sizeof(struct sapic), M_SAPIC, M_ZERO | M_NOWAIT);
+	if (sa == NULL)
+		return (NULL);
+
+	sa->sa_id = id;
+	sa->sa_base = base;
+	sa->sa_registers = (uintptr_t)pmap_mapdev(address, 1048576);
+
+	mtx_init(&sa->sa_mtx, "I/O SAPIC lock", NULL, MTX_SPIN);
+
+	max = (sapic_read(sa, SAPIC_VERSION) >> 16) & 0xff;
+	sa->sa_limit = base + max;
+
+	ia64_sapics[ia64_sapic_count++] = sa;
+
+	/*
+	 * Initialize all RTEs with a default trigger mode and polarity.
+	 * This may be changed later by calling sapic_config_intr(). We
+	 * mask all interrupts by default.
+	 */
+	bzero(&rte, sizeof(rte));
+	rte.rte_mask = 1;
+	for (i = base; i <= sa->sa_limit; i++) {
+		rte.rte_trigger_mode = (i < 16) ? SAPIC_TRIGGER_EDGE :
+		    SAPIC_TRIGGER_LEVEL;
+		rte.rte_polarity = (i < 16) ? SAPIC_POLARITY_HIGH :
+		    SAPIC_POLARITY_LOW;
+		sapic_write_rte(sa, i - base, &rte);
+	}
+
+	return (sa);
+}
+
+int
+sapic_enable(struct sapic *sa, u_int irq, u_int vector)
+{
+	struct sapic_rte rte;
+	uint64_t lid = ia64_get_lid();
+
+	mtx_lock_spin(&sa->sa_mtx);
+	sapic_read_rte(sa, irq - sa->sa_base, &rte);
+	rte.rte_destination_id = (lid >> 24) & 255;
+	rte.rte_destination_eid = (lid >> 16) & 255;
+	rte.rte_delivery_mode = SAPIC_DELMODE_FIXED;
+	rte.rte_vector = vector;
+	rte.rte_mask = 0;
+	sapic_write_rte(sa, irq - sa->sa_base, &rte);
+	mtx_unlock_spin(&sa->sa_mtx);
+	return (0);
+}
+
+void
+sapic_eoi(struct sapic *sa, u_int vector)
+{
+
+	ia64_st4((void *)(sa->sa_registers + SAPIC_APIC_EOI), vector);
+	ia64_mf_a();
+}
+
+/* Expected to be called with interrupts disabled. */
+void
+sapic_mask(struct sapic *sa, u_int irq)
+{
+	struct sapic_rte rte;
+
+	mtx_lock_spin(&sa->sa_mtx);
+	sapic_read_rte(sa, irq - sa->sa_base, &rte);
+	rte.rte_mask = 1;
+	sapic_write_rte(sa, irq - sa->sa_base, &rte);
+	mtx_unlock_spin(&sa->sa_mtx);
+}
+
+/* Expected to be called with interrupts disabled. */
+void
+sapic_unmask(struct sapic *sa, u_int irq)
+{
+	struct sapic_rte rte;
+
+	mtx_lock_spin(&sa->sa_mtx);
+	sapic_read_rte(sa, irq - sa->sa_base, &rte);
+	rte.rte_mask = 0;
+	sapic_write_rte(sa, irq - sa->sa_base, &rte);
+	mtx_unlock_spin(&sa->sa_mtx);
+}
+
+static int
+sysctl_machdep_apic(SYSCTL_HANDLER_ARGS)
+{
+	char buf[80];
+	struct sapic_rte rte;
+	struct sapic *sa;
+	int apic, count, error, index, len;
+
+	len = sprintf(buf, "\n    APIC Idx: Id,EId : RTE\n");
+	error = SYSCTL_OUT(req, buf, len);
+	if (error)
+		return (error);
+
+	for (apic = 0; apic < ia64_sapic_count; apic++) {
+		sa = ia64_sapics[apic];
+		count = sa->sa_limit - sa->sa_base + 1;
+		for (index = 0; index < count; index++) {
+			mtx_lock_spin(&sa->sa_mtx);
+			sapic_read_rte(sa, index, &rte);
+			mtx_unlock_spin(&sa->sa_mtx);
+			if (rte.rte_vector == 0)
+				continue;
+			len = sprintf(buf,
+    "    0x%02x %3d: (%02x,%02x): %3d %d %d %s %s %s %s %s\n",
+			    sa->sa_id, index,
+			    rte.rte_destination_id, rte.rte_destination_eid,
+			    rte.rte_vector, rte.rte_delivery_mode,
+			    rte.rte_destination_mode,
+			    rte.rte_delivery_status ? "DS" : "  ",
+			    rte.rte_polarity ? "low-active " : "high-active",
+			    rte.rte_rirr ? "RIRR" : "    ",
+			    rte.rte_trigger_mode ? "level" : "edge ",
+			    rte.rte_flushen ? "F" : " ");
+			error = SYSCTL_OUT(req, buf, len);
+			if (error)
+				return (error);
+		}
+	}
+
+	return (0);
+}
+
+#ifdef DDB
+
+#include <ddb/ddb.h>
+
+void
+sapic_print(struct sapic *sa, u_int irq)
+{
+	struct sapic_rte rte;
+
+	db_printf("sapic=%u, irq=%u: ", sa->sa_id, irq);
+	sapic_read_rte(sa, irq - sa->sa_base, &rte);
+	db_printf("%3d %x->%x:%x %d %s %s %s %s %s %s\n", rte.rte_vector,
+	    rte.rte_delivery_mode,
+	    rte.rte_destination_id, rte.rte_destination_eid,
+	    rte.rte_destination_mode,
+	    rte.rte_delivery_status ? "DS" : "  ",
+	    rte.rte_polarity ? "low-active " : "high-active",
+	    rte.rte_rirr ? "RIRR" : "    ",
+	    rte.rte_trigger_mode ? "level" : "edge ",
+	    rte.rte_flushen ? "F" : " ",
+	    rte.rte_mask ? "(masked)" : "");
+}
+
+#endif


Property changes on: trunk/sys/ia64/ia64/sapic.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/setjmp.S
===================================================================
--- trunk/sys/ia64/ia64/setjmp.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/setjmp.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,350 @@
+/* $MidnightBSD$ */
+// $FreeBSD: stable/10/sys/ia64/ia64/setjmp.S 139790 2005-01-06 22:18:23Z imp $
+
+//-
+// Copyright (c) 1999, 2000
+// Intel Corporation.
+// All rights reserved.
+// 
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 
+// 2. Redistributions in binary form must reproduce the above copyright
+//   notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 
+// 3. All advertising materials mentioning features or use of this software
+//    must display the following acknowledgement:
+// 
+//    This product includes software developed by Intel Corporation and
+//    its contributors.
+// 
+// 4. Neither the name of Intel Corporation or its contributors may be
+//    used to endorse or promote products derived from this software
+//    without specific prior written permission.
+// 
+// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION AND CONTRIBUTORS ``AS IS''
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED.  IN NO EVENT SHALL INTEL CORPORATION OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+// THE POSSIBILITY OF SUCH DAMAGE.
+// 
+//
+
+//
+// Module Name:
+//
+//  setjmp.s
+//
+// Abstract:
+//
+//  Contains an implementation of setjmp and longjmp for the
+//  IA-64 architecture.
+
+    .file   "setjmp.s"
+
+#include    <machine/asm.h>
+#include    <machine/setjmp.h>
+
+// int setjmp(struct jmp_buffer *)
+//
+//  Setup a non-local goto.
+//
+// Description:
+//
+//  SetJump stores the current register set in the area pointed to
+//  by "save".  It returns zero.  Subsequent calls to "LongJump" will
+//  restore the registers and return non-zero to the same location.
+//
+// On entry, r32 contains the pointer to the jmp_buffer
+//
+
+ENTRY(setjmp, 1)
+    //
+    //  Make sure buffer is aligned at 16byte boundary
+    //
+    add     r10 = -0x10,r0  ;;  // mask the lower 4 bits
+    and     r32 = r32, r10;; 
+    add     r32 = 0x10, r32;;   // move to next 16 byte boundary
+
+    add     r10 = J_PREDS, r32  // skip Unats & pfs save area
+    add     r11 = J_BSP, r32
+    //
+    //  save immediate context
+    //
+    mov     r2 = ar.bsp         // save backing store pointer
+    mov     r3 = pr             // save predicates
+    flushrs
+    ;;
+    //
+    // save user Unat register
+    //
+    mov     r16 = ar.lc         // save loop count register
+    mov     r14 = ar.unat       // save user Unat register
+
+    st8     [r10] = r3, J_LC-J_PREDS
+    st8     [r11] = r2, J_R4-J_BSP
+    ;;
+    st8     [r10] = r16, J_R5-J_LC
+    st8     [r32] = r14, J_NATS // Note: Unat at the 
+                                // beginning of the save area
+    mov     r15 = ar.pfs
+    ;;
+    //
+    //  save preserved general registers & NaT's
+    //
+    st8.spill   [r11] = r4, J_R6-J_R4
+    ;;
+    st8.spill   [r10] = r5, J_R7-J_R5 
+    ;;
+    st8.spill   [r11] = r6, J_SP-J_R6
+    ;;
+    st8.spill   [r10] = r7, J_F3-J_R7 
+    ;;
+    st8.spill   [r11] = sp, J_F2-J_SP
+    ;;
+    //
+    // save spilled Unat and pfs registers
+    //
+    mov     r2 = ar.unat        // save Unat register after spill
+    ;;
+    st8     [r32] = r2, J_PFS-J_NATS    // save unat for spilled regs
+    ;;
+    st8     [r32] = r15         // save pfs
+    //
+    //  save floating registers 
+    //
+    stf.spill   [r11] = f2, J_F4-J_F2
+    stf.spill   [r10] = f3, J_F5-J_F3 
+    ;;
+    stf.spill   [r11] = f4, J_F16-J_F4
+    stf.spill   [r10] = f5, J_F17-J_F5 
+    ;;
+    stf.spill   [r11] = f16, J_F18-J_F16
+    stf.spill   [r10] = f17, J_F19-J_F17 
+    ;;
+    stf.spill   [r11] = f18, J_F20-J_F18
+    stf.spill   [r10] = f19, J_F21-J_F19 
+    ;;
+    stf.spill   [r11] = f20, J_F22-J_F20
+    stf.spill   [r10] = f21, J_F23-J_F21 
+    ;;
+    stf.spill   [r11] = f22, J_F24-J_F22
+    stf.spill   [r10] = f23, J_F25-J_F23 
+    ;;
+    stf.spill   [r11] = f24, J_F26-J_F24
+    stf.spill   [r10] = f25, J_F27-J_F25 
+    ;;
+    stf.spill   [r11] = f26, J_F28-J_F26
+    stf.spill   [r10] = f27, J_F29-J_F27 
+    ;;
+    stf.spill   [r11] = f28, J_F30-J_F28
+    stf.spill   [r10] = f29, J_F31-J_F29 
+    ;;
+    stf.spill   [r11] = f30, J_FPSR-J_F30
+    stf.spill   [r10] = f31, J_B0-J_F31     // size of f31 + fpsr
+    //
+    // save FPSR register & branch registers
+    //
+    mov     r2 = ar.fpsr    // save fpsr register
+    mov     r3 = b0 
+    ;;
+    st8     [r11] = r2, J_B1-J_FPSR
+    st8     [r10] = r3, J_B2-J_B0
+    mov     r2 = b1
+    mov     r3 = b2 
+    ;;
+    st8     [r11] = r2, J_B3-J_B1
+    st8     [r10] = r3, J_B4-J_B2
+    mov     r2 = b3
+    mov     r3 = b4 
+    ;;
+    st8     [r11] = r2, J_B5-J_B3
+    st8     [r10] = r3
+    mov     r2 = b5 
+    ;;
+    st8     [r11] = r2
+    ;;
+    //
+    // return
+    //
+    mov     r8 = r0         // return 0 from setjmp
+    mov     ar.unat = r14   // restore unat
+    br.ret.sptk b0
+
+END(setjmp)
+
+
+//
+// void longjmp(struct jmp_buffer *, int val)
+//
+//  Perform a non-local goto.
+//
+// Description:
+//
+//  LongJump initializes the register set to the values saved by a
+//  previous 'SetJump' and jumps to the return location saved by that
+//  'SetJump'.  This has the effect of unwinding the stack and returning
+//  for a second time to the 'SetJump'.
+//
+
+ENTRY(longjmp, 2)
+    //
+    //  Make sure buffer is aligned at 16byte boundary
+    //
+    add     r10 = -0x10,r0  ;;  // mask the lower 4 bits
+    and     r32 = r32, r10;; 
+    add     r32 = 0x10, r32;;   // move to next 16 byte boundary
+
+    //
+    // caching the return value as we do invala in the end
+    //
+    mov     r8 = r33            // return value
+
+    //
+    //  get immediate context
+    //
+    mov     r14 = ar.rsc        // get user RSC conf 
+    add     r10 = J_PFS, r32    // get address of pfs
+    add     r11 = J_NATS, r32
+    ;;
+    ld8     r15 = [r10], J_BSP-J_PFS    // get pfs
+    ld8     r2 = [r11], J_LC-J_NATS     // get unat for spilled regs
+    ;;
+    mov     ar.unat = r2
+    ;;
+    ld8     r16 = [r10], J_PREDS-J_BSP  // get backing store pointer
+    mov     ar.rsc = r0         // put RSE in enforced lazy 
+    mov     ar.pfs = r15
+    ;;
+    
+    //
+    // while returning from longjmp the BSPSTORE and BSP needs to be
+    // same and discard all the registers allocated after we did
+    // setjmp. Also, we need to generate the RNAT register since we
+    // did not flushed the RSE on setjmp.
+    //
+    mov     r17 = ar.bspstore   // get current BSPSTORE
+    ;;
+    cmp.ltu p6,p7 = r17, r16    // is it less than BSP of 
+(p6)    br.spnt.few .flush_rse
+    mov     r19 = ar.rnat       // get current RNAT
+    ;;
+    loadrs                      // invalidate dirty regs
+    br.sptk.many    .restore_rnat       // restore RNAT
+
+.flush_rse:
+    flushrs
+    ;;
+    mov     r19 = ar.rnat       // get current RNAT
+    mov     r17 = r16           // current BSPSTORE
+    ;;
+.restore_rnat:
+    //
+    // check if RNAT is saved between saved BSP and curr BSPSTORE
+    //
+    mov     r18 = 0x3f
+    ;; 
+    dep     r18 = r18,r16,3,6   // get RNAT address
+    ;;
+    cmp.ltu p8,p9 = r18, r17    // RNAT saved on RSE
+    ;;
+(p8)    ld8     r19 = [r18]     // get RNAT from RSE
+    ;;
+    mov     ar.bspstore = r16   // set new BSPSTORE 
+    ;;
+    mov     ar.rnat = r19       // restore RNAT
+    mov     ar.rsc = r14        // restore RSC conf
+
+
+    ld8     r3 = [r11], J_R4-J_LC       // get lc register
+    ld8     r2 = [r10], J_R5-J_PREDS    // get predicates
+    ;;
+    mov     pr = r2, -1
+    mov     ar.lc = r3
+    //
+    //  restore preserved general registers & NaT's
+    //
+    ld8.fill    r4 = [r11], J_R6-J_R4
+    ;;
+    ld8.fill    r5 = [r10], J_R7-J_R5 
+    ld8.fill    r6 = [r11], J_SP-J_R6
+    ;;
+    ld8.fill    r7 = [r10], J_F2-J_R7
+    ld8.fill    sp = [r11], J_F3-J_SP
+    ;;
+    //
+    //  restore floating registers 
+    //
+    ldf.fill    f2 = [r10], J_F4-J_F2
+    ldf.fill    f3 = [r11], J_F5-J_F3 
+    ;;
+    ldf.fill    f4 = [r10], J_F16-J_F4
+    ldf.fill    f5 = [r11], J_F17-J_F5 
+    ;;
+    ldf.fill    f16 = [r10], J_F18-J_F16
+    ldf.fill    f17 = [r11], J_F19-J_F17
+    ;;
+    ldf.fill    f18 = [r10], J_F20-J_F18
+    ldf.fill    f19 = [r11], J_F21-J_F19
+    ;;
+    ldf.fill    f20 = [r10], J_F22-J_F20
+    ldf.fill    f21 = [r11], J_F23-J_F21
+    ;;
+    ldf.fill    f22 = [r10], J_F24-J_F22
+    ldf.fill    f23 = [r11], J_F25-J_F23 
+    ;;
+    ldf.fill    f24 = [r10], J_F26-J_F24
+    ldf.fill    f25 = [r11], J_F27-J_F25
+    ;;
+    ldf.fill    f26 = [r10], J_F28-J_F26
+    ldf.fill    f27 = [r11], J_F29-J_F27
+    ;;
+    ldf.fill    f28 = [r10], J_F30-J_F28
+    ldf.fill    f29 = [r11], J_F31-J_F29 
+    ;;
+    ldf.fill    f30 = [r10], J_FPSR-J_F30
+    ldf.fill    f31 = [r11], J_B0-J_F31 ;;
+
+    //
+    // restore branch registers and fpsr
+    //
+    ld8     r16 = [r10], J_B1-J_FPSR    // get fpsr
+    ld8     r17 = [r11], J_B2-J_B0      // get return pointer
+    ;;
+    mov     ar.fpsr = r16
+    mov     b0 = r17
+    ld8     r2 = [r10], J_B3-J_B1
+    ld8     r3 = [r11], J_B4-J_B2
+    ;;
+    mov     b1 = r2
+    mov     b2 = r3
+    ld8     r2 = [r10], J_B5-J_B3
+    ld8     r3 = [r11]
+    ;;
+    mov     b3 = r2
+    mov     b4 = r3 
+    ld8     r2 = [r10]
+    ld8     r21 = [r32]         // get user unat
+    ;;
+    mov     b5 = r2
+    mov     ar.unat = r21
+
+    //
+    // invalidate ALAT
+    //
+    invala ;;
+
+    br.ret.sptk b0
+
+END(longjmp)


Property changes on: trunk/sys/ia64/ia64/setjmp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/stack_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/stack_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/stack_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,58 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2005 Antoine Brodin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/stack_machdep.c 174195 2007-12-02 20:40:35Z rwatson $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/stack.h>
+
+void
+stack_save_td(struct stack *st, struct thread *td)
+{
+
+	if (TD_IS_SWAPPED(td))
+		panic("stack_save_td: swapped");
+	if (TD_IS_RUNNING(td))
+		panic("stack_save_td: running");
+
+	stack_zero(st);
+}
+
+void
+stack_save(struct stack *st)
+{
+
+	stack_zero(st);
+	/*
+	 * Nothing for now.
+	 * Is libuwx reentrant?
+	 * Can unw_create* sleep?
+	 */
+}


Property changes on: trunk/sys/ia64/ia64/stack_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/support.S
===================================================================
--- trunk/sys/ia64/ia64/support.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/support.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,844 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/support.S 246715 2013-02-12 17:38:35Z marcel $
+ */
+/*-
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution at CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <machine/asm.h>
+#include <machine/ia64_cpu.h>
+#include <assym.s>
+
+	.text
+
+ENTRY(fusufault, 0)
+{	.mib
+	st8.rel		[r15]=r0		// Clear onfault.
+	add		ret0=-1,r0
+	br.ret.sptk	rp
+	;;
+}
+END(fusufault)
+
+/*
+ * casuword(u_long *p, u_long old, u_long new)
+ *	Perform a compare-exchange in user space.
+ */
+ENTRY(casuword, 3)
+{	.mlx
+	ld8.acq		r15=[r13]		// r15 = curthread
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	add		r15=TD_PCB,r15
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	ld8		r15=[r15]		// r15 = PCB
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	mov		ar.ccv=in1
+	add		r15=PCB_ONFAULT,r15
+	nop		0
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	cmpxchg8.rel	ret0=[in0],in2,ar.ccv
+	nop		0
+	;;
+}
+{	.mib
+	st8.rel		[r15]=r0		// Clear onfault
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(casuword)
+
+/*
+ * casuword32(uint32_t *p, uint32_t old, uint32_t new)
+ *	Perform a 32-bit compare-exchange in user space.
+ */
+ENTRY(casuword32, 3)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	mov		ar.ccv=in1
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	cmpxchg4.rel	ret0=[in0],in2,ar.ccv
+	nop		0
+	;;
+}
+{	.mib
+	st8.rel		[r15]=r0		// Clear onfault
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(casuword32)
+
+/*
+ * subyte(void *addr, int byte)
+ * suword16(void *addr, int word)
+ * suword32(void *addr, int word)
+ * suword64|suword(void *addr, long word)
+ *	Store in user space
+ */
+
+ENTRY(subyte, 2)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	nop		0
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	st1.rel		[in0]=in1
+	nop		0
+	;;
+}
+{	.mib
+	st8.rel		[r15]=r0		// Clear onfault
+	mov		ret0=r0
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(subyte)
+
+ENTRY(suword16, 2)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	nop		0
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	st2.rel		[in0]=in1
+	nop		0
+	;;
+}
+{	.mib
+	st8.rel		[r15]=r0		// Clear onfault
+	mov		ret0=r0
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(suword16)
+
+ENTRY(suword32, 2)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	nop		0
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	st4.rel		[in0]=in1
+	nop		0
+	;;
+}
+{	.mib
+	st8.rel		[r15]=r0		// Clear onfault
+	mov		ret0=r0
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(suword32)
+
+ENTRY(suword64, 2)
+XENTRY(suword)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	nop		0
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	st8.rel		[in0]=in1
+	nop		0
+	;;
+}
+{	.mib
+	st8.rel		[r15]=r0		// Clear onfault
+	mov		ret0=r0
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(suword64)
+
+/*
+ * fubyte(void *addr, int byte)
+ * fuword16(void *addr, int word)
+ * fuword32(void *addr, int word)
+ * fuword64|fuword(void *addr, long word)
+ *	Fetch from user space
+ */
+
+ENTRY(fubyte, 1)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	nop		0
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	mf
+	nop		0
+	;;
+}
+{	.mmb
+	ld1		ret0=[in0]
+	st8.rel		[r15]=r0		// Clear onfault
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(fubyte)
+
+ENTRY(fuword16, 2)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	nop		0
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	mf
+	nop		0
+	;;
+}
+{	.mmb
+	ld2		ret0=[in0]
+	st8.rel		[r15]=r0		// Clear onfault
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(fuword16)
+
+ENTRY(fuword32, 2)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	nop		0
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	mf
+	nop		0
+	;;
+}
+{	.mmb
+	ld4		ret0=[in0]
+	st8.rel		[r15]=r0		// Clear onfault
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(fuword32)
+
+ENTRY(fuword64, 2)
+XENTRY(fuword)
+{	.mlx
+	movl		r14=VM_MAXUSER_ADDRESS
+	;;
+}
+{	.mib
+	ld8.acq		r15=[r13]		// r15 = curthread
+	cmp.geu		p6,p0=in0,r14
+(p6)	br.dpnt.few	1f
+	;;
+}
+{	.mlx
+	add		r15=TD_PCB,r15
+	movl		r14=fusufault
+	;;
+}
+{	.mmi
+	ld8		r15=[r15]		// r15 = PCB
+	;;
+	nop		0
+	add		r15=PCB_ONFAULT,r15
+	;;
+}
+{	.mmi
+	st8		[r15]=r14		// Set onfault
+	;;
+	mf
+	nop		0
+	;;
+}
+{	.mmb
+	ld8		ret0=[in0]
+	st8.rel		[r15]=r0		// Clear onfault
+	br.ret.sptk	rp
+	;;
+}
+1:
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(fuword64)
+
+/*
+ * fuswintr(void *addr)
+ * suswintr(void *addr)
+ */
+
+ENTRY(fuswintr, 1)
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(fuswintr)
+
+ENTRY(suswintr, 0)
+{	.mib
+	add		ret0=-1,r0
+	nop		0
+	br.ret.sptk	rp
+	;;
+}
+END(suswintr)
+
+/**************************************************************************/
+
+/*
+ * Copy a null-terminated string within the kernel's address space.
+ * If lenp is not NULL, store the number of chars copied in *lenp
+ *
+ * int copystr(char *from, char *to, size_t len, size_t *lenp);
+ */
+ENTRY(copystr, 4)
+	mov	r14=in2			// r14 = i = len
+	cmp.eq	p6,p0=r0,in2
+(p6)	br.cond.spnt.few 2f		// if (len == 0), bail out
+
+1:	ld1	r15=[in0],1		// read one byte
+	;;
+	st1	[in1]=r15,1		// write that byte
+	add	in2=-1,in2		// len--
+	;;
+	cmp.eq	p6,p0=r0,r15
+	cmp.ne	p7,p0=r0,in2
+	;; 
+(p6)	br.cond.spnt.few 2f		// if (*from == 0), bail out
+(p7)	br.cond.sptk.few 1b		// if (len != 0) copy more
+
+2:	cmp.eq	p6,p0=r0,in3
+(p6)	br.cond.dpnt.few 3f		// if (lenp != NULL)
+	sub	r14=r14,in2		// *lenp = (i - len)
+	;;
+	st8	[in3]=r14
+	
+3:	cmp.eq	p6,p0=r0,r15
+(p6)	br.cond.spnt.few 4f		// *from == '\0'; leave quietly
+
+	mov	ret0=ENAMETOOLONG	// *from != '\0'; error.
+	br.ret.sptk.few rp
+
+4:	mov	ret0=0			// return 0.
+	br.ret.sptk.few rp
+END(copystr)
+
+ENTRY(copyinstr, 4)
+	.prologue
+	.regstk	4, 3, 4, 0
+	.save	ar.pfs,loc0
+	alloc	loc0=ar.pfs,4,3,4,0
+	.save	rp,loc1
+	mov	loc1=rp
+	.body
+
+	movl	loc2=VM_MAXUSER_ADDRESS		// make sure that src addr
+	;; 
+	cmp.geu	p6,p0=in0,loc2			// is in user space.
+	;; 
+(p6)	br.cond.spnt.few copyerr		// if it's not, error out.
+	ld8.acq	r15=[r13]
+	movl	r14=copyerr			// set up fault handler.
+	;;
+	add	r15=TD_PCB,r15			// find pcb
+	;;
+	ld8	r15=[r15]
+	;;
+	add	loc2=PCB_ONFAULT,r15
+	;;
+	st8	[loc2]=r14
+	;;
+	mov	out0=in0
+	mov	out1=in1
+	mov	out2=in2
+	mov	out3=in3
+	;;
+	br.call.sptk.few rp=copystr		// do the copy.
+	st8	[loc2]=r0			// kill the fault handler.
+	mov	ar.pfs=loc0			// restore ar.pfs
+	mov	rp=loc1				// restore ra.
+	br.ret.sptk.few rp			// ret0 left over from copystr
+END(copyinstr)
+
+/*
+ * Not the fastest bcopy in the world.
+ */
+ENTRY(bcopy, 3)
+	mov	ret0=r0				// return zero for copy{in,out}
+	;; 
+	cmp.le	p6,p0=in2,r0			// bail if len <= 0
+(p6)	br.ret.spnt.few rp
+
+	sub	r14=in1,in0 ;;			// check for overlap
+	cmp.ltu	p6,p0=r14,in2			// dst-src < len
+(p6)	br.cond.spnt.few 5f
+
+	extr.u	r14=in0,0,3			// src & 7
+	extr.u	r15=in1,0,3 ;;			// dst & 7
+	cmp.eq	p6,p0=r14,r15			// different alignment?
+(p6)	br.cond.spnt.few 2f			// branch if same alignment
+
+1:	ld1	r14=[in0],1 ;;			// copy bytewise
+	st1	[in1]=r14,1
+	add	in2=-1,in2 ;;			// len--
+	cmp.ne	p6,p0=r0,in2
+(p6)	br.cond.dptk.few 1b			// loop
+	br.ret.sptk.few rp			// done
+
+2:	cmp.eq	p6,p0=r14,r0			// aligned?
+(p6)	br.cond.sptk.few 4f
+
+3:	ld1	r14=[in0],1 ;;			// copy bytewise
+	st1	[in1]=r14,1
+	extr.u	r15=in0,0,3			// src & 7
+	add	in2=-1,in2 ;;			// len--
+	cmp.eq	p6,p0=r0,in2			// done?
+	cmp.eq	p7,p0=r0,r15 ;;			// aligned now?
+(p6)	br.ret.spnt.few rp			// return if done
+(p7)	br.cond.spnt.few 4f			// go to main copy
+	br.cond.sptk.few 3b			// more bytes to copy
+
+	// At this point, in2 is non-zero
+
+4:	mov	r14=8 ;;
+	cmp.ltu	p6,p0=in2,r14 ;;		// len < 8?
+(p6)	br.cond.spnt.few 1b			// byte copy the end
+	ld8	r15=[in0],8 ;;			// copy word
+	st8	[in1]=r15,8
+	add	in2=-8,in2 ;;			// len -= 8
+	cmp.ne	p6,p0=r0,in2			// done?
+(p6)	br.cond.spnt.few 4b			// again
+
+	br.ret.sptk.few rp			// return
+
+	// Don't bother optimising overlap case
+
+5:	add	in0=in0,in2
+	add	in1=in1,in2 ;;
+	add	in0=-1,in0
+	add	in1=-1,in1 ;;
+
+6:	ld1	r14=[in0],-1 ;;
+	st1	[in1]=r14,-1
+	add	in2=-1,in2 ;;
+	cmp.ne	p6,p0=r0,in2
+(p6)	br.cond.spnt.few 6b
+
+	br.ret.sptk.few rp
+END(bcopy)
+
+ENTRY(memcpy,3)
+	mov	r14=in0 ;;
+	mov	in0=in1 ;;
+	mov	in1=r14
+	br.cond.sptk.few bcopy
+END(memcpy)
+	
+ENTRY(copyin, 3)
+	.prologue
+	.regstk	3, 3, 3, 0
+	.save	ar.pfs,loc0
+	alloc	loc0=ar.pfs,3,3,3,0
+	.save	rp,loc1
+	mov	loc1=rp
+	.body
+
+	movl	loc2=VM_MAXUSER_ADDRESS		// make sure that src addr
+	;; 
+	cmp.geu	p6,p0=in0,loc2			// is in user space.
+	;; 
+(p6)	br.cond.spnt.few copyerr		// if it's not, error out.
+	ld8.acq	r15=[r13]
+	movl	r14=copyerr			// set up fault handler.
+	;;
+	add	r15=TD_PCB,r15			// find pcb
+	;;
+	ld8	r15=[r15]
+	;;
+	add	loc2=PCB_ONFAULT,r15
+	;;
+	st8	[loc2]=r14
+	;;
+	mov	out0=in0
+	mov	out1=in1
+	mov	out2=in2
+	;;
+	br.call.sptk.few rp=bcopy		// do the copy.
+	st8	[loc2]=r0			// kill the fault handler.
+	mov	ar.pfs=loc0			// restore ar.pfs
+	mov	rp=loc1				// restore ra.
+	br.ret.sptk.few rp			// ret0 left over from bcopy
+END(copyin)
+
+ENTRY(copyout, 3)
+	.prologue
+	.regstk	3, 3, 3, 0
+	.save	ar.pfs,loc0
+	alloc	loc0=ar.pfs,3,3,3,0
+	.save	rp,loc1
+	mov	loc1=rp
+	.body
+
+	movl	loc2=VM_MAXUSER_ADDRESS		// make sure that dest addr
+	;; 
+	cmp.geu	p6,p0=in1,loc2			// is in user space.
+	;; 
+(p6)	br.cond.spnt.few copyerr		// if it's not, error out.
+	ld8.acq	r15=[r13]
+	movl	r14=copyerr			// set up fault handler.
+	;;
+	add	r15=TD_PCB,r15			// find pcb
+	;;
+	ld8	r15=[r15]
+	;;
+	add	loc2=PCB_ONFAULT,r15
+	;;
+	st8	[loc2]=r14
+	;;
+	mov	out0=in0
+	mov	out1=in1
+	mov	out2=in2
+	;;
+	br.call.sptk.few rp=bcopy		// do the copy.
+	st8	[loc2]=r0			// kill the fault handler.
+	mov	ar.pfs=loc0			// restore ar.pfs
+	mov	rp=loc1				// restore ra.
+	br.ret.sptk.few rp			// ret0 left over from bcopy
+END(copyout)
+
+ENTRY(copyerr, 0)
+	ld8.acq	r14=[r13] ;;
+	add	r14=TD_PCB,r14 ;;		// curthread->td_addr
+	ld8	r14=[r14] ;;
+	add	r14=PCB_ONFAULT,r14 ;;		// &curthread->td_pcb->pcb_onfault
+	st8	[r14]=r0			// reset fault handler
+	
+	mov	ret0=EFAULT			// return EFAULT
+	br.ret.sptk.few rp
+END(copyerr)
+
+#if defined(GPROF)
+/*
+ * Important registers:
+ *      r8      structure return address
+ *      rp      our return address
+ *      in0     caller's ar.pfs
+ *      in1     caller's gp
+ *      in2     caller's rp
+ *      in3     GOT entry
+ *      ar.pfs  our pfs
+ */
+ENTRY_NOPROFILE(_mcount, 4)
+	alloc		loc0 = ar.pfs, 4, 3, 2, 0
+	mov		loc1 = r8
+	mov		loc2 = rp
+	;;
+	mov		out0 = in2
+	mov		out1 = rp
+	br.call.sptk	rp = __mcount
+	;;
+1:
+	mov		gp = in1
+	mov		r14 = ip
+	mov		b7 = loc2
+	;;
+	add		r14 = 2f - 1b, r14
+	mov		ar.pfs = loc0
+	mov		rp = in2
+	;;
+	mov		b7 = r14
+	mov		b6 = loc2
+	mov		r8 = loc1
+	mov		r14 = in0
+	br.ret.sptk	b7
+	;;
+2:
+	mov		ar.pfs = r14
+	br.sptk		b6
+	;;
+END(_mcount)
+#endif


Property changes on: trunk/sys/ia64/ia64/support.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/sys_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/sys_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/sys_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,61 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/sys_machdep.c 202097 2010-01-11 18:10:13Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+#include <sys/sysent.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/sysarch.h>
+
+#ifndef _SYS_SYSPROTO_H_
+struct sysarch_args {
+	int op;
+	char *parms;
+};
+#endif
+
+int
+sysarch(struct thread *td, struct sysarch_args *uap)
+{
+	int error;
+
+	switch(uap->op) {
+	default:
+		error = EINVAL;
+		break;
+	}
+	return (error);
+}


Property changes on: trunk/sys/ia64/ia64/sys_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/syscall.S
===================================================================
--- trunk/sys/ia64/ia64/syscall.S	                        (rev 0)
+++ trunk/sys/ia64/ia64/syscall.S	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,570 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002, 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/syscall.S 270573 2014-08-25 15:15:59Z marcel $
+ */
+
+#include <sys/syscall.h>
+#include <machine/asm.h>
+#include <assym.s>
+
+/*
+ * A process performs a syscall by performing an indirect call to the
+ * address stored in ar.k5. The contents of ar.pfs and rp should be
+ * saved prior to the syscall in r9 and r10 respectively. The kernel
+ * will restore these values on return. The value of gp is preserved
+ * across the call. This allows for small enough syscall stubs without
+ * getting too weird.
+ * The address in ar.k5 is the start of the EPC gateway page and also
+ * the syscall entry point. The syscall code in the gateway page is
+ * primarily responsible for increasing the privilege level, but will
+ * also make sure we have a reliable psr.
+ *
+ * A process defines:
+ *	r8		-	syscall number
+ *	r9		-	copy of ar.pfs
+ *	r10		-	copy of rp
+ *	in0-in7		-	syscall arguments
+ *
+ * A syscall returns:
+ *	r8+r9		-	syscall return value(s)
+ *	r10		-	syscall error flag
+ *	ar.pfs		-	restored from r9
+ *	rp		-	restored from r10
+ *	gp		-	preserved
+ *
+ * The EPC syscall code defines:
+ *	r11		-	copy of psr.l
+ *	r14		-	Kernel memory stack
+ *	r15		-	Kernel register stack
+ *
+ * Also in the gateway page are the signal trampolines. As such, stacks
+ * don't have to be made executable per se. Since debuggers have a need
+ * to know about trampolines, we probably need to define a table of
+ * vectors or something along those lines so that debuggers can get the
+ * information they need and we have the freedom to move code around.
+ */
+
+	.section	.text.gateway, "ax"
+	.align		PAGE_SIZE
+	.global		ia64_gateway_page
+ia64_gateway_page:
+{	.mmb
+	mov		r14=ar.k7		// Memory stack
+	mov		r15=ar.k6		// Register stack
+	epc
+	;;
+}
+{	.mlx
+	mov		r11=psr
+	movl		r31=epc_syscall
+	;;
+}
+{	.mib
+	rum		psr.be
+	mov		b7=r31
+	br		b7
+	;;
+}
+gw_ret:
+{	.mmi
+	mov		ar.rnat=r22
+	;;
+	mov		ar.rsc=r24
+	mov		ar.pfs=r20
+}
+{	.mib
+	mov		ar.fpsr=r25
+	mov		b0=r18
+	br.sptk		b6
+	;;
+}
+gw_ret_ia32:
+{	.mmi
+	flushrs
+	nop		0
+	nop		0
+	;;
+}
+{	.mib
+	nop		0
+	nop		0
+	br.ia.sptk	b6
+	;;
+}
+
+
+ENTRY_NOPROFILE(break_sigtramp, 0)
+{	.mib
+	mov		ar.rsc=0
+	cmp.ne		p15,p0=0,gp
+	cover
+	;;
+}
+{	.mmi
+	flushrs
+(p15)	invala
+	add		r16=16+UC_MCONTEXT+MC_SPECIAL,sp
+	;;
+}
+{	.mmi
+	mov		r17=ar.bsp
+	mov		r18=ar.rnat
+	add		r14=40,r16
+	;;
+}
+{	.mmi
+	st8		[r14]=r17,64		// bspstore
+(p15)	mov		ar.bspstore=gp
+	add		r15=48,r16
+	;;
+}
+{	.mmi
+	st8		[r15]=r18		// rnat
+	st8		[r14]=r0		// ndirty
+	nop		0
+	;;
+}
+{	.mmi
+	alloc		r14=ar.pfs, 0, 0, 3, 0
+	mov		ar.rsc=15
+	mov		out0=r8
+	;;
+}
+{	.mmi
+	ld8		r16=[r10],8		// function address
+	;;
+	ld8		gp=[r10]		// function's gp value
+	mov		b7=r16
+	;;
+}
+{	.mib
+	mov		out1=r9
+	add		out2=16,sp
+	br.call.sptk	rp=b7
+	;;
+}
+{	.mmi
+	mov		r15=SYS_sigreturn
+	add		out0=16,sp
+	break		0x100000
+	;;
+}
+{	.mmi
+	mov		r15=SYS_exit
+	mov		out0=ret0
+	break		0x100000
+	;;
+}
+END(break_sigtramp)
+
+ENTRY_NOPROFILE(epc_sigtramp, 0)
+{	.mmi
+	ld8		r16=[r10],8		// function address
+	mov		ar.rsc=0
+	cmp.ne		p15,p0=0,gp
+	;;
+}
+{	.mmi
+(p15)	invala
+(p15)	mov		ar.bspstore=gp
+	mov		b7=r16
+	;;
+}
+{	.mmi
+	alloc		r14=ar.pfs, 0, 0, 3, 0
+	mov		ar.rsc=15
+	nop		0
+	;;
+}
+{	.mii
+	ld8		gp=[r10]		// function's gp value
+	mov		out0=r8
+	mov		out1=r9
+}
+{	.mib
+	add		out2=16,sp
+	nop		0
+	br.call.sptk	rp=b7
+	;;
+}
+	add		out0=16,sp
+	CALLSYS_NOERROR(sigreturn)
+	mov		out0=ret0
+	CALLSYS_NOERROR(exit)
+END(epc_sigtramp)
+
+	.align		PAGE_SIZE
+
+	.text
+
+ENTRY_NOPROFILE(epc_syscall, 8)
+	.prologue
+	.unwabi		@svr4, 'E'
+	.save		rp, r0
+	.body
+{	.mmi
+	mov		r16=ar.rsc
+	mov		ar.rsc=0
+	nop		0
+	;;
+}
+{	.mmi
+	mov		r18=ar.bspstore
+	;;
+	mov		r19=ar.rnat
+	dep		r15=r18,r15,0,9
+	;;
+}
+{	.mmi
+	mov		r21=ar.unat
+	add		r30=-SIZEOF_TRAPFRAME,r14
+	mov		r20=sp
+	;;
+}
+{	.mii
+	mov		r17=r13
+	dep		r30=0,r30,0,10
+	;;
+	add		sp=-16,r30
+	;;
+}
+{	.mmi
+	mov		ar.bspstore=r15
+	;;
+	mov		ar.rnat=r19
+	add		r31=8,r30
+	;;
+}
+{	.mmi
+	mov		r13=ar.k4
+	mov		r22=ar.fpsr
+	sub		r29=r14,r30
+}
+{	.mmi
+	mov		r23=ar.bsp
+	mov		ar.rsc=3
+	add		r28=FRAME_SYSCALL,r0
+	;;
+}
+{	.mmi
+	st8		[r30]=r29,16		// tf_length
+	st8		[r31]=r28,16		// tf_flags
+	mov		r24=rp
+	;;
+}
+{	.mmi
+	st8		[r30]=r20,16		// sp
+	st8		[r31]=r21,16		// unat
+	mov		r25=pr
+	;;
+}
+{	.mmi
+	st8		[r30]=r10,16		// rp (syscall caller)
+	st8		[r31]=r25,16		// pr
+	mov		r26=ar.pfs
+	;;
+}
+{	.mmi
+	st8		[r30]=r9,16		// pfs (syscall caller)
+	st8		[r31]=r18,16		// bspstore
+	sub		r27=r23,r15
+	;;
+}
+{	.mmi
+	st8		[r30]=r19,16		// rnat
+	st8		[r31]=r0,16		// __spare
+	dep		r11=-1,r11,44,1		// Set psr.bn=1
+	;;
+}
+{	.mmi
+	st8		[r30]=r17,16		// tp
+	st8		[r31]=r16,16		// rsc
+	dep		r11=-1,r11,32,2		// Set psr.cpl=3
+	;;
+}
+{	.mmi
+	st8		[r30]=r22,16		// fpsr
+	st8		[r31]=r11,16		// psr
+	nop		0
+	;;
+}
+{	.mmi
+	st8		[r30]=r1,16		// gp
+	st8		[r31]=r27,16		// ndirty
+	nop		0
+	;;
+}
+{	.mmi
+	st8		[r30]=r26,16		// pfs (syscall stub)
+	st8		[r31]=r24,16		// rp (syscall stub)
+	nop		0
+	;;
+}
+{	.mmi
+	st8		[r30]=r0,80		// ifa
+	st8		[r31]=r0,80		// isr
+	nop		0
+	;;
+}
+{	.mmi
+	alloc		r14=ar.pfs,0,0,8,0
+	st8		[r30]=r8,16		// syscall number (=r15)
+	nop		0
+	;;
+}
+{	.mmi
+	.mem.offset	0,0
+	st8.spill	[r31]=r32,16		// arg0 (=r16)
+	.mem.offset	8,0
+	st8.spill	[r30]=r33,16		// arg1 (=r17)
+	nop		0
+	;;
+}
+{	.mmi
+	.mem.offset	16,0
+	st8.spill	[r31]=r34,16		// arg2 (=r18)
+	.mem.offset	24,0
+	st8.spill	[r30]=r35,16		// arg3 (=r19)
+	nop		0
+	;;
+}
+{	.mmi
+	.mem.offset	32,0
+	st8.spill	[r31]=r36,16		// arg4 (=r20)
+	.mem.offset	40,0
+	st8.spill	[r30]=r37,16		// arg5 (=r21)
+	nop		0
+	;;
+}
+{	.mmi
+	.mem.offset	48,0
+	st8.spill	[r31]=r38		// arg6 (=r22)
+	.mem.offset	56,0
+	st8.spill	[r30]=r39		// arg7 (=r23)
+	nop		0
+	;;
+}
+{	.mlx
+	ssm		psr.dfh|psr.ac
+	movl		gp=__gp
+	;;
+}
+1:
+{	.mib
+	srlz.d
+	add		out0=16,sp
+	br.call.sptk	rp=syscall
+	;;
+}
+	.global		epc_syscall_return
+epc_syscall_return:
+{	.mib
+	add		out0=16,sp
+	nop		0
+	br.call.sptk	rp=do_ast
+	;;
+}
+{	.mib
+	cmp4.eq		p15,p0=ERESTART,r8
+	add		r14=24,sp
+(p15)	br.spnt		1b			// restart syscall
+	;;
+}
+{	.mmi
+	ld8		r14=[r14]		// tf_flags
+	nop		0
+	nop		0
+	;;
+}
+{	.mib
+	nop		0
+	tbit.z		p15,p0=r14,0
+(p15)	br.spnt		exception_restore
+	;;
+}
+{	.mmi
+	alloc		r31=ar.pfs,0,0,0,0
+	add		r14=32,sp
+	add		r15=16,sp
+	;;
+}
+{	.mmi
+	ld8		r31=[r15],24		// tf_length
+	ld8		r16=[r14],16		// sp
+	add		sp=16,sp
+	;;
+}
+{	.mmi
+	ld8		r17=[r15],16		// unat (before)
+	ld8		r18=[r14],16		// rp (syscall caller)
+	add		r31=r31,sp
+	;;
+}
+{	.mmi
+	ld8		r19=[r15],16		// pr
+	ld8		r20=[r14],16		// pfs (syscall caller)
+	nop		0
+	;;
+}
+{	.mmi
+	ld8		r21=[r15],24		// bspstore
+	ld8		r22=[r14],24		// rnat
+	mov		pr=r19,0x1fffe
+	;;
+}
+{	.mmi
+	ld8		r23=[r15],16		// tp
+	ld8		r24=[r14],16		// rsc
+	nop		0
+	;;
+}
+{	.mmi
+	ld8		r25=[r15],16		// fpsr
+	ld8		r26=[r14],16		// psr
+	nop		0
+	;;
+}
+{	.mmi
+	ld8		gp=[r15],16		// gp
+	ld8		r27=[r14],16		// ndirty
+	tbit.z		p14,p15=r26,34		// p14=ia64, p15=ia32
+	;;
+}
+{	.mmi
+	ld8		r28=[r15],56		// pfs (syscall stub)
+	ld8		r29=[r14],56		// rp (syscall stub)
+	shl		r27=r27,16
+	;;
+}
+{	.mmi
+	ld8		r8=[r15],16		// r8
+	mov		ar.rsc=r27
+	mov		b6=r29
+	;;
+}
+{	.mmb
+	ld8		r9=[r14],40		// r9
+	ld8		r10=[r15],40		// r10
+(p15)	br.spnt		epc_syscall_setup_ia32
+	;;
+}
+{	.mmi
+	loadrs
+	mov		ar.k7=r31
+	mov		sp=r16
+	;;
+}
+{	.mmi
+	mov		r30=ar.bspstore
+	mov		r14=ar.k5
+	mov		ar.pfs=r28
+	;;
+}
+{	.mmi
+	mov		ar.bspstore=r21
+	add		r14=gw_ret-ia64_gateway_page,r14
+	dep		r30=0,r30,0,13		// 8KB aligned.
+	;;
+}
+{	.mii
+	mov		ar.k6=r30
+	mov		r13=r23
+	nop		0
+}
+{	.mmi
+	mov		psr.l=r26
+	mov		ar.unat=r17
+	nop		0
+	;;
+}
+{	.mib
+	srlz.d
+	mov		b7=r14
+	br.ret.sptk	b7
+	;;
+}
+epc_syscall_setup_ia32:
+{	.mmi
+	loadrs
+	mov		ar.k7=r31
+	mov		sp=r16
+	;;
+}
+{	.mmi
+	mov		r30=ar.bspstore
+	;;
+	mov		ar.unat=r17
+	dep		r30=0,r30,0,13		// 8KB aligned
+	;;
+}
+{	.mmi
+	mov		ar.k6=r30
+	mov		ar.bspstore=r21
+	mov		r11=r0
+	;;
+}
+{	.mmi
+	ld8		r16=[r14],64
+	ld8		r17=[r15],80
+	mov		r13=r0
+	;;
+}
+ 
+	ld8		r24=[r14],32
+	ld8		r27=[r15],16
+	;;
+	ld8		r28=[r14],16
+	ld8		r29=[r15],16
+	;;
+	ld8		r30=[r14],40
+	ld8		r31=[r15],40
+	;;
+
+{	.mmi
+	ld8		r2=[r14]
+	ld8		r3=[r15]
+	mov		r14=r0
+	;;
+}
+{	.mmi
+	mov		ar.csd=r2
+	mov		ar.ssd=r3
+	mov		r15=r0
+	;;
+}
+
+	mov		r2=ar.k5
+	mov		psr.l=r26
+	;;
+	srlz.d
+	add		r2=gw_ret_ia32-ia64_gateway_page,r2
+	;;
+	mov		ar.rsc=0
+	mov		b7=r2
+	br.ret.sptk	b7
+	;;
+END(epc_syscall)


Property changes on: trunk/sys/ia64/ia64/syscall.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/trap.c
===================================================================
--- trunk/sys/ia64/ia64/trap.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/trap.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,963 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2005 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/trap.c 270296 2014-08-21 19:51:07Z emaste $");
+
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kdb.h>
+#include <sys/ktr.h>
+#include <sys/sysproto.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/efi.h>
+#include <sys/exec.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/vmmeter.h>
+#include <sys/sysent.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <sys/pioctl.h>
+#include <sys/ptrace.h>
+#include <sys/sysctl.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_param.h>
+#include <sys/ptrace.h>
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/reg.h>
+#include <machine/pal.h>
+#include <machine/fpu.h>
+#include <machine/pcb.h>
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+
+#include <security/audit/audit.h>
+
+#include <ia64/disasm/disasm.h>
+
+static int print_usertrap = 0;
+SYSCTL_INT(_machdep, OID_AUTO, print_usertrap,
+    CTLFLAG_RW, &print_usertrap, 0, "");
+
+static void break_syscall(struct trapframe *tf);
+
+/*
+ * EFI-Provided FPSWA interface (Floating Point SoftWare Assist)
+ */
+extern struct fpswa_iface *fpswa_iface;
+
+static const char *ia64_vector_names[] = {
+	"VHPT Translation",			/* 0 */
+	"Instruction TLB",			/* 1 */
+	"Data TLB",				/* 2 */
+	"Alternate Instruction TLB",		/* 3 */
+	"Alternate Data TLB",			/* 4 */
+	"Data Nested TLB",			/* 5 */
+	"Instruction Key Miss",			/* 6 */
+	"Data Key Miss",			/* 7 */
+	"Dirty-Bit",				/* 8 */
+	"Instruction Access-Bit",		/* 9 */
+	"Data Access-Bit",			/* 10 */
+	"Break Instruction",			/* 11 */
+	"External Interrupt",			/* 12 */
+	"Reserved 13",				/* 13 */
+	"Reserved 14",				/* 14 */
+	"Reserved 15",				/* 15 */
+	"Reserved 16",				/* 16 */
+	"Reserved 17",				/* 17 */
+	"Reserved 18",				/* 18 */
+	"Reserved 19",				/* 19 */
+	"Page Not Present",			/* 20 */
+	"Key Permission",			/* 21 */
+	"Instruction Access Rights",		/* 22 */
+	"Data Access Rights",			/* 23 */
+	"General Exception",			/* 24 */
+	"Disabled FP-Register",			/* 25 */
+	"NaT Consumption",			/* 26 */
+	"Speculation",				/* 27 */
+	"Reserved 28",				/* 28 */
+	"Debug",				/* 29 */
+	"Unaligned Reference",			/* 30 */
+	"Unsupported Data Reference",		/* 31 */
+	"Floating-point Fault",			/* 32 */
+	"Floating-point Trap",			/* 33 */
+	"Lower-Privilege Transfer Trap",	/* 34 */
+	"Taken Branch Trap",			/* 35 */
+	"Single Step Trap",			/* 36 */
+	"Reserved 37",				/* 37 */
+	"Reserved 38",				/* 38 */
+	"Reserved 39",				/* 39 */
+	"Reserved 40",				/* 40 */
+	"Reserved 41",				/* 41 */
+	"Reserved 42",				/* 42 */
+	"Reserved 43",				/* 43 */
+	"Reserved 44",				/* 44 */
+	"IA-32 Exception",			/* 45 */
+	"IA-32 Intercept",			/* 46 */
+	"IA-32 Interrupt",			/* 47 */
+	"Reserved 48",				/* 48 */
+	"Reserved 49",				/* 49 */
+	"Reserved 50",				/* 50 */
+	"Reserved 51",				/* 51 */
+	"Reserved 52",				/* 52 */
+	"Reserved 53",				/* 53 */
+	"Reserved 54",				/* 54 */
+	"Reserved 55",				/* 55 */
+	"Reserved 56",				/* 56 */
+	"Reserved 57",				/* 57 */
+	"Reserved 58",				/* 58 */
+	"Reserved 59",				/* 59 */
+	"Reserved 60",				/* 60 */
+	"Reserved 61",				/* 61 */
+	"Reserved 62",				/* 62 */
+	"Reserved 63",				/* 63 */
+	"Reserved 64",				/* 64 */
+	"Reserved 65",				/* 65 */
+	"Reserved 66",				/* 66 */
+	"Reserved 67",				/* 67 */
+};
+
+struct bitname {
+	uint64_t mask;
+	const char* name;
+};
+
+static void
+printbits(uint64_t mask, struct bitname *bn, int count)
+{
+	int i, first = 1;
+	uint64_t bit;
+
+	for (i = 0; i < count; i++) {
+		/*
+		 * Handle fields wider than one bit.
+		 */
+		bit = bn[i].mask & ~(bn[i].mask - 1);
+		if (bn[i].mask > bit) {
+			if (first)
+				first = 0;
+			else
+				printf(",");
+			printf("%s=%ld", bn[i].name,
+			       (mask & bn[i].mask) / bit);
+		} else if (mask & bit) {
+			if (first)
+				first = 0;
+			else
+				printf(",");
+			printf("%s", bn[i].name);
+		}
+	}
+}
+
+struct bitname psr_bits[] = {
+	{IA64_PSR_BE,	"be"},
+	{IA64_PSR_UP,	"up"},
+	{IA64_PSR_AC,	"ac"},
+	{IA64_PSR_MFL,	"mfl"},
+	{IA64_PSR_MFH,	"mfh"},
+	{IA64_PSR_IC,	"ic"},
+	{IA64_PSR_I,	"i"},
+	{IA64_PSR_PK,	"pk"},
+	{IA64_PSR_DT,	"dt"},
+	{IA64_PSR_DFL,	"dfl"},
+	{IA64_PSR_DFH,	"dfh"},
+	{IA64_PSR_SP,	"sp"},
+	{IA64_PSR_PP,	"pp"},
+	{IA64_PSR_DI,	"di"},
+	{IA64_PSR_SI,	"si"},
+	{IA64_PSR_DB,	"db"},
+	{IA64_PSR_LP,	"lp"},
+	{IA64_PSR_TB,	"tb"},
+	{IA64_PSR_RT,	"rt"},
+	{IA64_PSR_CPL,	"cpl"},
+	{IA64_PSR_IS,	"is"},
+	{IA64_PSR_MC,	"mc"},
+	{IA64_PSR_IT,	"it"},
+	{IA64_PSR_ID,	"id"},
+	{IA64_PSR_DA,	"da"},
+	{IA64_PSR_DD,	"dd"},
+	{IA64_PSR_SS,	"ss"},
+	{IA64_PSR_RI,	"ri"},
+	{IA64_PSR_ED,	"ed"},
+	{IA64_PSR_BN,	"bn"},
+	{IA64_PSR_IA,	"ia"},
+};
+
+static void
+printpsr(uint64_t psr)
+{
+	printbits(psr, psr_bits, sizeof(psr_bits)/sizeof(psr_bits[0]));
+}
+
+struct bitname isr_bits[] = {
+	{IA64_ISR_CODE,	"code"},
+	{IA64_ISR_VECTOR, "vector"},
+	{IA64_ISR_X,	"x"},
+	{IA64_ISR_W,	"w"},
+	{IA64_ISR_R,	"r"},
+	{IA64_ISR_NA,	"na"},
+	{IA64_ISR_SP,	"sp"},
+	{IA64_ISR_RS,	"rs"},
+	{IA64_ISR_IR,	"ir"},
+	{IA64_ISR_NI,	"ni"},
+	{IA64_ISR_SO,	"so"},
+	{IA64_ISR_EI,	"ei"},
+	{IA64_ISR_ED,	"ed"},
+};
+
+static void printisr(uint64_t isr)
+{
+	printbits(isr, isr_bits, sizeof(isr_bits)/sizeof(isr_bits[0]));
+}
+
+static void
+printtrap(int vector, struct trapframe *tf, int isfatal, int user)
+{
+	printf("\n");
+	printf("%s %s trap (cpu %d):\n", isfatal? "fatal" : "handled",
+	       user ? "user" : "kernel", PCPU_GET(cpuid));
+	printf("\n");
+	printf("    trap vector = 0x%x (%s)\n",
+	       vector, ia64_vector_names[vector]);
+	printf("    cr.iip      = 0x%lx\n", tf->tf_special.iip);
+	printf("    cr.ipsr     = 0x%lx (", tf->tf_special.psr);
+	printpsr(tf->tf_special.psr);
+	printf(")\n");
+	printf("    cr.isr      = 0x%lx (", tf->tf_special.isr);
+	printisr(tf->tf_special.isr);
+	printf(")\n");
+	printf("    cr.ifa      = 0x%lx\n", tf->tf_special.ifa);
+	if (tf->tf_special.psr & IA64_PSR_IS) {
+		printf("    ar.cflg     = 0x%lx\n", ia64_get_cflg());
+		printf("    ar.csd      = 0x%lx\n", ia64_get_csd());
+		printf("    ar.ssd      = 0x%lx\n", ia64_get_ssd());
+	}
+	printf("    curthread   = %p\n", curthread);
+	if (curthread != NULL)
+		printf("        pid = %d, comm = %s\n",
+		       curthread->td_proc->p_pid, curthread->td_name);
+	printf("\n");
+}
+
+/*
+ * We got a trap caused by a break instruction and the immediate was 0.
+ * This indicates that we may have a break.b with some non-zero immediate.
+ * The break.b doesn't cause the immediate to be put in cr.iim.  Hence,
+ * we need to disassemble the bundle and return the immediate found there.
+ * This may be a 0 value anyway.  Return 0 for any error condition.  This
+ * will result in a SIGILL, which is pretty much the best thing to do.
+ */
+static uint64_t
+trap_decode_break(struct trapframe *tf)
+{
+	struct asm_bundle bundle;
+	struct asm_inst *inst;
+	int slot;
+
+	if (!asm_decode(tf->tf_special.iip, &bundle))
+		return (0);
+
+	slot = ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_0) ? 0 :
+            ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_1) ? 1 : 2;
+	inst = bundle.b_inst + slot;
+
+	/*
+	 * Sanity checking: It must be a break instruction and the operand
+	 * that has the break value must be an immediate.
+	 */
+	if (inst->i_op != ASM_OP_BREAK ||
+	    inst->i_oper[1].o_type != ASM_OPER_IMM)
+		return (0);
+
+	return (inst->i_oper[1].o_value);
+}
+
+void
+trap_panic(int vector, struct trapframe *tf)
+{
+
+	printtrap(vector, tf, 1, TRAPF_USERMODE(tf));
+#ifdef KDB
+	kdb_trap(vector, 0, tf);
+#endif
+	panic("trap");
+}
+
+/*
+ *
+ */
+int
+do_ast(struct trapframe *tf)
+{
+
+	ia64_disable_intr();
+	while (curthread->td_flags & (TDF_ASTPENDING|TDF_NEEDRESCHED)) {
+		ia64_enable_intr();
+		ast(tf);
+		ia64_disable_intr();
+	}
+	/*
+	 * Keep interrupts disabled. We return r10 as a favor to the EPC
+	 * syscall code so that it can quicky determine if the syscall
+	 * needs to be restarted or not.
+	 */
+	return (tf->tf_scratch.gr10);
+}
+
+/*
+ * Trap is called from exception.s to handle most types of processor traps.
+ */
+/*ARGSUSED*/
+void
+trap(int vector, struct trapframe *tf)
+{
+	struct proc *p;
+	struct thread *td;
+	uint64_t ucode;
+	int error, sig, user;
+	ksiginfo_t ksi;
+
+	user = TRAPF_USERMODE(tf) ? 1 : 0;
+	if (user)
+		ia64_set_fpsr(IA64_FPSR_DEFAULT);
+
+#ifdef XTRACE
+	ia64_xtrace_save();
+#endif
+
+	PCPU_INC(cnt.v_trap);
+
+	td = curthread;
+	p = td->td_proc;
+	ucode = 0;
+
+	if (user) {
+		td->td_pticks = 0;
+		td->td_frame = tf;
+		if (td->td_ucred != p->p_ucred)
+			cred_update_thread(td);
+	} else {
+		KASSERT(cold || td->td_ucred != NULL,
+		    ("kernel trap doesn't have ucred"));
+#ifdef KDB
+		if (kdb_active)
+			kdb_reenter();
+#endif
+	}
+
+	sig = 0;
+	switch (vector) {
+	case IA64_VEC_VHPT:
+		/*
+		 * This one is tricky. We should hardwire the VHPT, but
+		 * don't at this time. I think we're mostly lucky that
+		 * the VHPT is mapped.
+		 */
+		trap_panic(vector, tf);
+		break;
+
+	case IA64_VEC_ITLB:
+	case IA64_VEC_DTLB:
+	case IA64_VEC_EXT_INTR:
+		/* We never call trap() with these vectors. */
+		trap_panic(vector, tf);
+		break;
+
+	case IA64_VEC_ALT_ITLB:
+	case IA64_VEC_ALT_DTLB:
+		/*
+		 * These should never happen, because regions 0-4 use the
+		 * VHPT. If we get one of these it means we didn't program
+		 * the region registers correctly.
+		 */
+		trap_panic(vector, tf);
+		break;
+
+	case IA64_VEC_NESTED_DTLB:
+		/*
+		 * When the nested TLB handler encounters an unexpected
+		 * condition, it'll switch to the backup stack and transfer
+		 * here. All we need to do is panic.
+		 */
+		trap_panic(vector, tf);
+		break;
+
+	case IA64_VEC_IKEY_MISS:
+	case IA64_VEC_DKEY_MISS:
+	case IA64_VEC_KEY_PERMISSION:
+		/*
+		 * We don't use protection keys, so we should never get
+		 * these faults.
+		 */
+		trap_panic(vector, tf);
+		break;
+
+	case IA64_VEC_DIRTY_BIT:
+	case IA64_VEC_INST_ACCESS:
+	case IA64_VEC_DATA_ACCESS:
+		/*
+		 * We get here if we read or write to a page of which the
+		 * PTE does not have the access bit or dirty bit set and
+		 * we can not find the PTE in our datastructures. This
+		 * either means we have a stale PTE in the TLB, or we lost
+		 * the PTE in our datastructures.
+		 */
+		trap_panic(vector, tf);
+		break;
+
+	case IA64_VEC_BREAK:
+		if (user) {
+			ucode = (int)tf->tf_special.ifa & 0x1FFFFF;
+			if (ucode == 0) {
+				/*
+				 * A break.b doesn't cause the immediate to be
+				 * stored in cr.iim (and saved in the TF in
+				 * tf_special.ifa).  We need to decode the
+				 * instruction to find out what the immediate
+				 * was.  Note that if the break instruction
+				 * didn't happen to be a break.b, but any
+				 * other break with an immediate of 0, we
+				 * will do unnecessary work to get the value
+				 * we already had.  Not an issue, because a
+				 * break 0 is invalid.
+				 */
+				ucode = trap_decode_break(tf);
+			}
+			if (ucode < 0x80000) {
+				/* Software interrupts. */
+				switch (ucode) {
+				case 0:		/* Unknown error. */
+					sig = SIGILL;
+					break;
+				case 1:		/* Integer divide by zero. */
+					sig = SIGFPE;
+					ucode = FPE_INTDIV;
+					break;
+				case 2:		/* Integer overflow. */
+					sig = SIGFPE;
+					ucode = FPE_INTOVF;
+					break;
+				case 3:		/* Range check/bounds check. */
+					sig = SIGFPE;
+					ucode = FPE_FLTSUB;
+					break;
+				case 6: 	/* Decimal overflow. */
+				case 7: 	/* Decimal divide by zero. */
+				case 8: 	/* Packed decimal error. */
+				case 9: 	/* Invalid ASCII digit. */
+				case 10:	/* Invalid decimal digit. */
+					sig = SIGFPE;
+					ucode = FPE_FLTINV;
+					break;
+				case 4:		/* Null pointer dereference. */
+				case 5:		/* Misaligned data. */
+				case 11:	/* Paragraph stack overflow. */
+					sig = SIGSEGV;
+					break;
+				default:
+					sig = SIGILL;
+					break;
+				}
+			} else if (ucode < 0x100000) {
+				/* Debugger breakpoint. */
+				tf->tf_special.psr &= ~IA64_PSR_SS;
+				sig = SIGTRAP;
+			} else if (ucode == 0x100000) {
+				break_syscall(tf);
+				return;		/* do_ast() already called. */
+			} else if (ucode == 0x180000) {
+				mcontext_t mc;
+
+				error = copyin((void*)tf->tf_scratch.gr8,
+				    &mc, sizeof(mc));
+				if (!error) {
+					set_mcontext(td, &mc);
+					return;	/* Don't call do_ast()!!! */
+				}
+				sig = SIGSEGV;
+				ucode = tf->tf_scratch.gr8;
+			} else
+				sig = SIGILL;
+		} else {
+#ifdef KDB
+			if (kdb_trap(vector, 0, tf))
+				return;
+			panic("trap");
+#else
+			trap_panic(vector, tf);
+#endif
+		}
+		break;
+
+	case IA64_VEC_PAGE_NOT_PRESENT:
+	case IA64_VEC_INST_ACCESS_RIGHTS:
+	case IA64_VEC_DATA_ACCESS_RIGHTS: {
+		vm_offset_t va;
+		struct vmspace *vm;
+		vm_map_t map;
+		vm_prot_t ftype;
+		int rv;
+
+		rv = 0;
+		va = trunc_page(tf->tf_special.ifa);
+
+		if (va >= VM_MAXUSER_ADDRESS) {
+			/*
+			 * Don't allow user-mode faults for kernel virtual
+			 * addresses, including the gateway page.
+			 */
+			if (user)
+				goto no_fault_in;
+			map = kernel_map;
+		} else {
+			vm = (p != NULL) ? p->p_vmspace : NULL;
+			if (vm == NULL)
+				goto no_fault_in;
+			map = &vm->vm_map;
+		}
+
+		if (tf->tf_special.isr & IA64_ISR_X)
+			ftype = VM_PROT_EXECUTE;
+		else if (tf->tf_special.isr & IA64_ISR_W)
+			ftype = VM_PROT_WRITE;
+		else
+			ftype = VM_PROT_READ;
+
+		if (map != kernel_map) {
+			/*
+			 * Keep swapout from messing with us during this
+			 * critical time.
+			 */
+			PROC_LOCK(p);
+			++p->p_lock;
+			PROC_UNLOCK(p);
+
+			/* Fault in the user page: */
+			rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
+
+			PROC_LOCK(p);
+			--p->p_lock;
+			PROC_UNLOCK(p);
+		} else {
+			/*
+			 * Don't have to worry about process locking or
+			 * stacks in the kernel.
+			 */
+			rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
+		}
+
+		if (rv == KERN_SUCCESS)
+			goto out;
+
+	no_fault_in:
+		if (!user) {
+			/* Check for copyin/copyout fault. */
+			if (td != NULL && td->td_pcb->pcb_onfault != 0) {
+				tf->tf_special.iip =
+				    td->td_pcb->pcb_onfault;
+				tf->tf_special.psr &= ~IA64_PSR_RI;
+				td->td_pcb->pcb_onfault = 0;
+				goto out;
+			}
+			trap_panic(vector, tf);
+		}
+		ucode = va;
+		sig = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
+		break;
+	}
+
+	case IA64_VEC_GENERAL_EXCEPTION: {
+		int code;
+
+		if (!user)
+			trap_panic(vector, tf);
+
+		code = tf->tf_special.isr & (IA64_ISR_CODE & 0xf0ull);
+		switch (code) {
+		case 0x0:	/* Illegal Operation Fault. */
+			sig = ia64_emulate(tf, td);
+			break;
+		default:
+			sig = SIGILL;
+			break;
+		}
+		if (sig == 0)
+			goto out;
+		ucode = vector;
+		break;
+	}
+
+	case IA64_VEC_SPECULATION:
+		/*
+		 * The branching behaviour of the chk instruction is not
+		 * implemented by the processor. All we need to do is
+		 * compute the target address of the branch and make sure
+		 * that control is transfered to that address.
+		 * We should do this in the IVT table and not by entring
+		 * the kernel...
+		 */
+		tf->tf_special.iip += tf->tf_special.ifa << 4;
+		tf->tf_special.psr &= ~IA64_PSR_RI;
+		goto out;
+
+	case IA64_VEC_NAT_CONSUMPTION:
+	case IA64_VEC_UNSUPP_DATA_REFERENCE:
+		if (user) {
+			ucode = vector;
+			sig = SIGILL;
+		} else
+			trap_panic(vector, tf);
+		break;
+
+	case IA64_VEC_DISABLED_FP: {
+		if (user)
+			ia64_highfp_enable(td, tf);
+		else
+			trap_panic(vector, tf);
+		goto out;
+	}
+
+	case IA64_VEC_DEBUG:
+	case IA64_VEC_SINGLE_STEP_TRAP:
+		tf->tf_special.psr &= ~IA64_PSR_SS;
+		if (!user) {
+#ifdef KDB
+			if (kdb_trap(vector, 0, tf))
+				return;
+			panic("trap");
+#else
+			trap_panic(vector, tf);
+#endif
+		}
+		sig = SIGTRAP;
+		break;
+
+	case IA64_VEC_UNALIGNED_REFERENCE:
+		/*
+		 * If user-land, do whatever fixups, printing, and
+		 * signalling is appropriate (based on system-wide
+		 * and per-process unaligned-access-handling flags).
+		 */
+		if (user) {
+			sig = unaligned_fixup(tf, td);
+			if (sig == 0)
+				goto out;
+			ucode = tf->tf_special.ifa;	/* VA */
+		} else {
+			/* Check for copyin/copyout fault. */
+			if (td != NULL && td->td_pcb->pcb_onfault != 0) {
+				tf->tf_special.iip =
+				    td->td_pcb->pcb_onfault;
+				tf->tf_special.psr &= ~IA64_PSR_RI;
+				td->td_pcb->pcb_onfault = 0;
+				goto out;
+			}
+			trap_panic(vector, tf);
+		}
+		break;
+
+	case IA64_VEC_FLOATING_POINT_FAULT:
+	case IA64_VEC_FLOATING_POINT_TRAP: {
+		struct fpswa_bundle bundle;
+		struct fpswa_fpctx fpctx;
+		struct fpswa_ret ret;
+		char *ip;
+		u_long fault;
+
+		/* Always fatal in kernel. Should never happen. */
+		if (!user)
+			trap_panic(vector, tf);
+
+		if (fpswa_iface == NULL) {
+			sig = SIGFPE;
+			ucode = 0;
+			break;
+		}
+
+		ip = (char *)tf->tf_special.iip;
+		if (vector == IA64_VEC_FLOATING_POINT_TRAP &&
+		    (tf->tf_special.psr & IA64_PSR_RI) == 0)
+			ip -= 16;
+		error = copyin(ip, &bundle, sizeof(bundle));
+		if (error) {
+			sig = SIGBUS;	/* EFAULT, basically */
+			ucode = 0;	/* exception summary */
+			break;
+		}
+
+		/* f6-f15 are saved in exception_save */
+		fpctx.mask_low = 0xffc0;		/* bits 6 - 15 */
+		fpctx.mask_high = 0;
+		fpctx.fp_low_preserved = NULL;
+		fpctx.fp_low_volatile = &tf->tf_scratch_fp.fr6;
+		fpctx.fp_high_preserved = NULL;
+		fpctx.fp_high_volatile = NULL;
+
+		fault = (vector == IA64_VEC_FLOATING_POINT_FAULT) ? 1 : 0;
+
+		/*
+		 * We have the high FP registers disabled while in the
+		 * kernel. Enable them for the FPSWA handler only.
+		 */
+		ia64_enable_highfp();
+
+		/* The docs are unclear.  Is Fpswa reentrant? */
+		ret = fpswa_iface->if_fpswa(fault, &bundle,
+		    &tf->tf_special.psr, &tf->tf_special.fpsr,
+		    &tf->tf_special.isr, &tf->tf_special.pr,
+		    &tf->tf_special.cfm, &fpctx);
+
+		ia64_disable_highfp();
+
+		/*
+		 * Update ipsr and iip to next instruction. We only
+		 * have to do that for faults.
+		 */
+		if (fault && (ret.status == 0 || (ret.status & 2))) {
+			int ei;
+
+			ei = (tf->tf_special.isr >> 41) & 0x03;
+			if (ei == 0) {		/* no template for this case */
+				tf->tf_special.psr &= ~IA64_ISR_EI;
+				tf->tf_special.psr |= IA64_ISR_EI_1;
+			} else if (ei == 1) {	/* MFI or MFB */
+				tf->tf_special.psr &= ~IA64_ISR_EI;
+				tf->tf_special.psr |= IA64_ISR_EI_2;
+			} else if (ei == 2) {	/* MMF */
+				tf->tf_special.psr &= ~IA64_ISR_EI;
+				tf->tf_special.iip += 0x10;
+			}
+		}
+
+		if (ret.status == 0) {
+			goto out;
+		} else if (ret.status == -1) {
+			printf("FATAL: FPSWA err1 %lx, err2 %lx, err3 %lx\n",
+			    ret.err1, ret.err2, ret.err3);
+			panic("fpswa fatal error on fp fault");
+		} else {
+			sig = SIGFPE;
+			ucode = 0;		/* XXX exception summary */
+			break;
+		}
+	}
+
+	case IA64_VEC_LOWER_PRIVILEGE_TRANSFER:
+		/*
+		 * The lower-privilege transfer trap is used by the EPC
+		 * syscall code to trigger re-entry into the kernel when the
+		 * process should be single stepped. The problem is that
+		 * there's no way to set single stepping directly without
+		 * using the rfi instruction. So instead we enable the
+		 * lower-privilege transfer trap and when we get here we
+		 * know that the process is about to enter userland (and
+		 * has already lowered its privilege).
+		 * However, there's another gotcha. When the process has
+		 * lowered it's privilege it's still running in the gateway
+		 * page. If we enable single stepping, we'll be stepping
+		 * the code in the gateway page. In and by itself this is
+		 * not a problem, but it's an address debuggers won't know
+		 * anything about. Hence, it can only cause confusion.
+		 * We know that we need to branch to get out of the gateway
+		 * page, so what we do here is enable the taken branch
+		 * trap and just let the process continue. When we branch
+		 * out of the gateway page we'll get back into the kernel
+		 * and then we enable single stepping.
+		 * Since this a rather round-about way of enabling single
+		 * stepping, don't make things even more complicated by
+		 * calling userret() and do_ast(). We do that later...
+		 */
+		tf->tf_special.psr &= ~IA64_PSR_LP;
+		tf->tf_special.psr |= IA64_PSR_TB;
+		return;
+
+	case IA64_VEC_TAKEN_BRANCH_TRAP:
+		/*
+		 * Don't assume there aren't any branches other than the
+		 * branch that takes us out of the gateway page. Check the
+		 * iip and enable single stepping only when it's an user
+		 * address.
+		 */
+		if (tf->tf_special.iip >= VM_MAXUSER_ADDRESS)
+			return;
+		tf->tf_special.psr &= ~IA64_PSR_TB;
+		tf->tf_special.psr |= IA64_PSR_SS;
+		return;
+
+	case IA64_VEC_IA32_EXCEPTION:
+	case IA64_VEC_IA32_INTERCEPT:
+	case IA64_VEC_IA32_INTERRUPT:
+		sig = SIGEMT;
+		ucode = tf->tf_special.iip;
+		break;
+
+	default:
+		/* Reserved vectors get here. Should never happen of course. */
+		trap_panic(vector, tf);
+		break;
+	}
+
+	KASSERT(sig != 0, ("foo"));
+
+	if (print_usertrap)
+		printtrap(vector, tf, 1, user);
+
+	ksiginfo_init(&ksi);
+	ksi.ksi_signo = sig;
+	ksi.ksi_code = ucode;
+	trapsignal(td, &ksi);
+
+out:
+	if (user) {
+		userret(td, tf);
+		do_ast(tf);
+	}
+	return;
+}
+
+/*
+ * Handle break instruction based system calls.
+ */
+void
+break_syscall(struct trapframe *tf)
+{
+	uint64_t *bsp, *tfp;
+	uint64_t iip, psr;
+	int error, nargs;
+
+	/* Save address of break instruction. */
+	iip = tf->tf_special.iip;
+	psr = tf->tf_special.psr;
+
+	/* Advance to the next instruction. */
+	tf->tf_special.psr += IA64_PSR_RI_1;
+	if ((tf->tf_special.psr & IA64_PSR_RI) > IA64_PSR_RI_2) {
+		tf->tf_special.iip += 16;
+		tf->tf_special.psr &= ~IA64_PSR_RI;
+	}
+
+	/*
+	 * Copy the arguments on the register stack into the trapframe
+	 * to avoid having interleaved NaT collections.
+	 */
+	tfp = &tf->tf_scratch.gr16;
+	nargs = tf->tf_special.cfm & 0x7f;
+	bsp = (uint64_t*)(curthread->td_kstack + tf->tf_special.ndirty +
+	    (tf->tf_special.bspstore & 0x1ffUL));
+	bsp -= (((uintptr_t)bsp & 0x1ff) < (nargs << 3)) ? (nargs + 1): nargs;
+	while (nargs--) {
+		*tfp++ = *bsp++;
+		if (((uintptr_t)bsp & 0x1ff) == 0x1f8)
+			bsp++;
+	}
+	error = syscall(tf);
+	if (error == ERESTART) {
+		tf->tf_special.iip = iip;
+		tf->tf_special.psr = psr;
+	}
+
+	do_ast(tf);
+}
+
+int
+cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
+{
+	struct proc *p;
+	struct trapframe *tf;
+
+	p = td->td_proc;
+	tf = td->td_frame;
+
+	sa->code = tf->tf_scratch.gr15;
+	sa->args = &tf->tf_scratch.gr16;
+
+	/*
+	 * syscall() and __syscall() are handled the same on
+	 * the ia64, as everything is 64-bit aligned, anyway.
+	 */
+	if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
+		/*
+		 * Code is first argument, followed by actual args.
+		 */
+		sa->code = sa->args[0];
+		sa->args++;
+	}
+
+ 	if (p->p_sysent->sv_mask)
+ 		sa->code &= p->p_sysent->sv_mask;
+ 	if (sa->code >= p->p_sysent->sv_size)
+ 		sa->callp = &p->p_sysent->sv_table[0];
+ 	else
+		sa->callp = &p->p_sysent->sv_table[sa->code];
+	sa->narg = sa->callp->sy_narg;
+
+	td->td_retval[0] = 0;
+	td->td_retval[1] = 0;
+
+	return (0);
+}
+
+#include "../../kern/subr_syscall.c"
+
+/*
+ * Process a system call.
+ *
+ * See syscall.s for details as to how we get here. In order to support
+ * the ERESTART case, we return the error to our caller. They deal with
+ * the hairy details.
+ */
+int
+syscall(struct trapframe *tf)
+{
+	struct syscall_args sa;
+	struct thread *td;
+	int error;
+
+	td = curthread;
+	td->td_frame = tf;
+
+	ia64_set_fpsr(IA64_FPSR_DEFAULT);
+	tf->tf_scratch.gr10 = EJUSTRETURN;
+
+	error = syscallenter(td, &sa);
+	syscallret(td, error, &sa);
+
+	return (error);
+}


Property changes on: trunk/sys/ia64/ia64/trap.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/uio_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/uio_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/uio_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,121 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Alan L. Cox <alc at cs.rice.edu>
+ * Copyright (c) 1982, 1986, 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/uio_machdep.c 266312 2014-05-17 13:59:11Z ian $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <machine/vmparam.h>
+
+/*
+ * Implement uiomove(9) from physical memory using the direct map to
+ * avoid the creation and destruction of ephemeral mappings.
+ */
+int
+uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
+{
+	struct thread *td = curthread;
+	struct iovec *iov;
+	void *cp;
+	vm_offset_t page_offset;
+	size_t cnt;
+	int error = 0;
+	int save = 0;
+
+	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
+	    ("uiomove_fromphys: mode"));
+	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
+	    ("uiomove_fromphys proc"));
+	save = td->td_pflags & TDP_DEADLKTREAT;
+	td->td_pflags |= TDP_DEADLKTREAT;
+	while (n > 0 && uio->uio_resid) {
+		iov = uio->uio_iov;
+		cnt = iov->iov_len;
+		if (cnt == 0) {
+			uio->uio_iov++;
+			uio->uio_iovcnt--;
+			continue;
+		}
+		if (cnt > n)
+			cnt = n;
+		page_offset = offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - page_offset);
+		cp = (char *)
+		    IA64_PHYS_TO_RR7(ma[offset >> PAGE_SHIFT]->phys_addr) +
+		    page_offset;
+		switch (uio->uio_segflg) {
+		case UIO_USERSPACE:
+			maybe_yield();
+			if (uio->uio_rw == UIO_READ)
+				error = copyout(cp, iov->iov_base, cnt);
+			else
+				error = copyin(iov->iov_base, cp, cnt);
+			if (error)
+				goto out;
+			break;
+		case UIO_SYSSPACE:
+			if (uio->uio_rw == UIO_READ)
+				bcopy(cp, iov->iov_base, cnt);
+			else
+				bcopy(iov->iov_base, cp, cnt);
+			break;
+		case UIO_NOCOPY:
+			break;
+		}
+		iov->iov_base = (char *)iov->iov_base + cnt;
+		iov->iov_len -= cnt;
+		uio->uio_resid -= cnt;
+		uio->uio_offset += cnt;
+		offset += cnt;
+		n -= cnt;
+	}
+out:
+	if (save == 0)
+		td->td_pflags &= ~TDP_DEADLKTREAT;
+	return (error);
+}


Property changes on: trunk/sys/ia64/ia64/uio_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/uma_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/uma_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/uma_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,78 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003 The FreeBSD Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/uma_machdep.c 287945 2015-09-17 23:31:44Z rstone $");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/uma.h>
+#include <vm/uma_int.h>
+#include <machine/vmparam.h>
+
+void *
+uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
+{
+	void *va;
+	vm_page_t m;
+	int pflags;
+
+	*flags = UMA_SLAB_PRIV;
+	pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
+
+	for (;;) {
+		m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
+		if (m == NULL) {
+			if (wait & M_NOWAIT)
+				return (NULL);
+			VM_WAIT;
+		} else
+			break;
+	}
+
+	va = (void *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
+	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
+		bzero(va, PAGE_SIZE);
+	return (va);
+}
+
+void
+uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
+{
+	vm_page_t m;
+
+	m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem));
+	m->wire_count--;
+	vm_page_free(m);
+	atomic_subtract_int(&cnt.v_wire_count, 1);
+}


Property changes on: trunk/sys/ia64/ia64/uma_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/unaligned.c
===================================================================
--- trunk/sys/ia64/ia64/unaligned.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/unaligned.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,308 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003 Marcel Moolenaar
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/ia64/unaligned.c 219741 2011-03-18 15:36:28Z marcel $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <ia64/disasm/disasm.h>
+
+static int ia64_unaligned_print = 0;
+SYSCTL_INT(_debug, OID_AUTO, unaligned_print, CTLFLAG_RW,
+    &ia64_unaligned_print, 0, "warn about unaligned accesses");
+
+static int ia64_unaligned_test = 0;
+SYSCTL_INT(_debug, OID_AUTO, unaligned_test, CTLFLAG_RW,
+    &ia64_unaligned_test, 0, "test emulation when PSR.ac is set");
+
+static void *
+fpreg_ptr(mcontext_t *mc, int fr)
+{
+	union _ia64_fpreg *p;
+
+	if (fr <= 1 || fr >= 128)
+		return (NULL);
+	if (fr >= 32) {
+		p = &mc->mc_high_fp.fr32;
+		fr -= 32;
+	} else if (fr >= 16) {
+		p = &mc->mc_preserved_fp.fr16;
+		fr -= 16;
+	} else if (fr >= 6) {
+		p = &mc->mc_scratch_fp.fr6;
+		fr -= 6;
+	} else {
+		p = &mc->mc_preserved_fp.fr2;
+		fr -= 2;
+	}
+	return ((void*)(p + fr));
+}
+
+static void *
+greg_ptr(mcontext_t *mc, int gr)
+{
+	uint64_t *p;
+	int nslots;
+
+	if (gr <= 0 || gr >= 32 + (mc->mc_special.cfm & 0x7f))
+		return (NULL);
+	if (gr >= 32) {
+	 	nslots = IA64_CFM_SOF(mc->mc_special.cfm) - gr + 32;
+		p = (void *)ia64_bsp_adjust(mc->mc_special.bspstore, -nslots);
+		gr = 0;
+	} else if (gr >= 14) {
+		p = &mc->mc_scratch.gr14;
+		gr -= 14;
+	} else if (gr == 13) {
+		p = &mc->mc_special.tp;
+		gr = 0;
+	} else if (gr == 12) {
+		p = &mc->mc_special.sp;
+		gr = 0;
+	} else if (gr >= 8) {
+		p = &mc->mc_scratch.gr8;
+		gr -= 8;
+	} else if (gr >= 4) {
+		p = &mc->mc_preserved.gr4;
+		gr -= 4;
+	} else if (gr >= 2) {
+		p = &mc->mc_scratch.gr2;
+		gr -= 2;
+	} else {
+		p = &mc->mc_special.gp;
+		gr = 0;
+	}
+	return ((void*)(p + gr));
+}
+
+static uint64_t
+rdreg(uint64_t *addr)
+{
+	if ((uintptr_t)addr < VM_MAXUSER_ADDRESS)
+		return (fuword(addr));
+	return (*addr);
+}
+
+static void
+wrreg(uint64_t *addr, uint64_t val)
+{
+	if ((uintptr_t)addr < VM_MAXUSER_ADDRESS)
+		suword(addr, val);
+	else
+		*addr = val;
+}
+
+static int
+fixup(struct asm_inst *i, mcontext_t *mc, uint64_t va)
+{
+	union {
+		double d;
+		long double e;
+		uint64_t i;
+		float s;
+	} buf;
+	void *reg;
+	uint64_t postinc;
+
+	switch (i->i_op) {
+	case ASM_OP_LD2:
+		copyin((void*)va, (void*)&buf.i, 2);
+		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		wrreg(reg, buf.i & 0xffffU);
+		break;
+	case ASM_OP_LD4:
+		copyin((void*)va, (void*)&buf.i, 4);
+		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		wrreg(reg, buf.i & 0xffffffffU);
+		break;
+	case ASM_OP_LD8:
+		copyin((void*)va, (void*)&buf.i, 8);
+		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		wrreg(reg, buf.i);
+		break;
+	case ASM_OP_LDFD:
+		copyin((void*)va, (void*)&buf.d, sizeof(buf.d));
+		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		__asm("ldfd f6=%1;; stf.spill %0=f6" : "=m"(*(double *)reg) :
+		    "m"(buf.d) : "f6");
+		break;
+	case ASM_OP_LDFE:
+		copyin((void*)va, (void*)&buf.e, sizeof(buf.e));
+		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		__asm("ldfe f6=%1;; stf.spill %0=f6" :
+		    "=m"(*(long double *)reg) : "m"(buf.e) : "f6");
+		break;
+	case ASM_OP_LDFS:
+		copyin((void*)va, (void*)&buf.s, sizeof(buf.s));
+		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		__asm("ldfs f6=%1;; stf.spill %0=f6" : "=m"(*(float *)reg) :
+		    "m"(buf.s) : "f6");
+		break;
+	case ASM_OP_ST2:
+		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		buf.i = rdreg(reg);
+		copyout((void*)&buf.i, (void*)va, 2);
+		break;
+	case ASM_OP_ST4:
+		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		buf.i = rdreg(reg);
+		copyout((void*)&buf.i, (void*)va, 4);
+		break;
+	case ASM_OP_ST8:
+		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		buf.i = rdreg(reg);
+		copyout((void*)&buf.i, (void*)va, 8);
+		break;
+	case ASM_OP_STFD:
+		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		__asm("ldf.fill f6=%1;; stfd %0=f6" : "=m"(buf.d) :
+		    "m"(*(double *)reg) : "f6");
+		copyout((void*)&buf.d, (void*)va, sizeof(buf.d));
+		break;
+	case ASM_OP_STFE:
+		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		__asm("ldf.fill f6=%1;; stfe %0=f6" : "=m"(buf.e) :
+		    "m"(*(long double *)reg) : "f6");
+		copyout((void*)&buf.e, (void*)va, sizeof(buf.e));
+		break;
+	case ASM_OP_STFS:
+		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		__asm("ldf.fill f6=%1;; stfs %0=f6" : "=m"(buf.s) :
+		    "m"(*(float *)reg) : "f6");
+		copyout((void*)&buf.s, (void*)va, sizeof(buf.s));
+		break;
+	default:
+		return (ENOENT);
+	}
+
+	/* Handle post-increment. */
+	if (i->i_oper[3].o_type == ASM_OPER_GREG) {
+		reg = greg_ptr(mc, (int)i->i_oper[3].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		postinc = rdreg(reg);
+	} else
+		postinc = (i->i_oper[3].o_type == ASM_OPER_IMM)
+		    ? i->i_oper[3].o_value : 0;
+	if (postinc != 0) {
+		if (i->i_oper[1].o_type == ASM_OPER_MEM)
+			reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
+		else
+			reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
+		if (reg == NULL)
+			return (EINVAL);
+		postinc += rdreg(reg);
+		wrreg(reg, postinc);
+	}
+	return (0);
+}
+
+int
+unaligned_fixup(struct trapframe *tf, struct thread *td)
+{
+	mcontext_t context;
+	struct asm_bundle bundle;
+	int error, slot;
+
+	slot = ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_0) ? 0 :
+	    ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_1) ? 1 : 2;
+
+	if (ia64_unaligned_print) {
+		uprintf("pid %d (%s): unaligned access: va=0x%lx, pc=0x%lx\n",
+		    td->td_proc->p_pid, td->td_proc->p_comm,
+		    tf->tf_special.ifa, tf->tf_special.iip + slot);
+	}
+
+	/*
+	 * If PSR.ac is set, the process wants to be signalled about mis-
+	 * aligned loads and stores. Send it a SIGBUS. In order for us to
+	 * test the emulation of misaligned loads and stores, we have a
+	 * sysctl that tells us that we must emulate the load or store,
+	 * instead of sending the signal. We need the sysctl because if
+	 * PSR.ac is not set, the CPU may (and likely will) deal with the
+	 * misaligned load or store itself. As such, we won't get the
+	 * exception.
+	 */
+	if ((tf->tf_special.psr & IA64_PSR_AC) && !ia64_unaligned_test)
+		return (SIGBUS);
+
+	if (!asm_decode(tf->tf_special.iip, &bundle))
+		return (SIGILL);
+
+	get_mcontext(td, &context, 0);
+
+	error = fixup(bundle.b_inst + slot, &context, tf->tf_special.ifa);
+	if (error == ENOENT) {
+		printf("unhandled misaligned memory access:\n\t");
+		asm_print_inst(&bundle, slot, tf->tf_special.iip);
+		return (SIGILL);
+	} else if (error != 0)
+		return (SIGBUS);
+
+	set_mcontext(td, &context);
+
+	/* Advance to the next instruction. */
+	if (slot == 2) {
+		tf->tf_special.psr &= ~IA64_PSR_RI;
+		tf->tf_special.iip += 16;
+	} else
+		tf->tf_special.psr += IA64_PSR_RI_1;
+
+	return (0);
+}


Property changes on: trunk/sys/ia64/ia64/unaligned.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/unwind.c
===================================================================
--- trunk/sys/ia64/ia64/unwind.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/unwind.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,484 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003, 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/unwind.c 227293 2011-11-07 06:44:47Z ed $");
+
+#include <sys/param.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/unwind.h>
+
+#include <uwx.h>
+
+static MALLOC_DEFINE(M_UNWIND, "Unwind", "Unwind information");
+
+struct unw_entry {
+	uint64_t	ue_start;	/* procedure start */
+	uint64_t	ue_end;		/* procedure end */
+	uint64_t	ue_info;	/* offset to procedure descriptors */
+};
+
+struct unw_table {
+	LIST_ENTRY(unw_table) ut_link;
+	uint64_t	ut_base;
+	uint64_t	ut_limit;
+	struct unw_entry *ut_start;
+	struct unw_entry *ut_end;
+};
+
+LIST_HEAD(unw_table_list, unw_table);
+
+static struct unw_table_list unw_tables;
+
+#ifdef KDB
+#define	KDBHEAPSZ	8192
+
+struct mhdr {
+	uint32_t	sig;
+#define	MSIG_FREE	0x65657246	/* "Free". */
+#define	MSIG_USED	0x64657355	/* "Used". */
+	uint32_t	size;
+	int32_t		next;
+	int32_t		prev;
+};
+
+static struct mhdr *kdbheap;
+#endif /* KDB */
+
+static void *
+unw_alloc(size_t sz)
+{
+#ifdef KDB
+	struct mhdr *hdr, *hfree;
+
+	if (kdb_active) {
+		sz = (sz + 15) >> 4;
+		hdr = kdbheap;
+		while (hdr->sig != MSIG_FREE || hdr->size < sz) {
+			if (hdr->next == -1)
+				return (NULL);
+			hdr = kdbheap + hdr->next;
+		}
+		if (hdr->size > sz + 1) {
+			hfree = hdr + sz + 1;
+			hfree->sig = MSIG_FREE;
+			hfree->size = hdr->size - sz - 1;
+			hfree->prev = hdr - kdbheap;
+			hfree->next = hdr->next;
+			hdr->size = sz;
+			hdr->next = hfree - kdbheap;
+			if (hfree->next >= 0) {
+				hfree = kdbheap + hfree->next;
+				hfree->prev = hdr->next;
+			}
+		}
+		hdr->sig = MSIG_USED;
+		return (void*)(hdr + 1);
+	}
+#endif
+	return (malloc(sz, M_UNWIND, M_NOWAIT));
+}
+
+static void
+unw_free(void *p)
+{
+#ifdef KDB
+	struct mhdr *hdr, *hfree;
+
+	if (kdb_active) {
+		hdr = (struct mhdr*)p - 1;
+		if (hdr->sig != MSIG_USED)
+			return;
+		hdr->sig = MSIG_FREE;
+		if (hdr->prev >= 0 && kdbheap[hdr->prev].sig == MSIG_FREE) {
+			hfree = kdbheap + hdr->prev;
+			hfree->size += hdr->size + 1;
+			hfree->next = hdr->next;
+			if (hdr->next >= 0) {
+				hfree = kdbheap + hdr->next;
+				hfree->prev = hdr->prev;
+			}
+		} else if (hdr->next >= 0 &&
+		    kdbheap[hdr->next].sig == MSIG_FREE) {
+			hfree = kdbheap + hdr->next;
+			hdr->size += hfree->size + 1;
+			hdr->next = hfree->next;
+			if (hdr->next >= 0) {
+				hfree = kdbheap + hdr->next;
+				hfree->prev = hdr - kdbheap;
+			}
+		}
+		return;
+	}
+#endif
+	free(p, M_UNWIND);
+}
+
+static struct unw_table *
+unw_table_lookup(uint64_t ip)
+{
+	struct unw_table *ut;
+
+	LIST_FOREACH(ut, &unw_tables, ut_link) {
+		if (ip >= ut->ut_base && ip < ut->ut_limit)
+			return (ut);
+	}
+	return (NULL);
+}
+
+static uint64_t
+unw_copyin_from_frame(struct trapframe *tf, uint64_t from)
+{
+	uint64_t val;
+	int reg;
+
+	if (from == UWX_REG_AR_PFS)
+		val = tf->tf_special.pfs;
+	else if (from == UWX_REG_PREDS)
+		val = tf->tf_special.pr;
+	else if (from == UWX_REG_AR_RNAT)
+		val = tf->tf_special.rnat;
+	else if (from == UWX_REG_AR_UNAT)
+		val = tf->tf_special.unat;
+	else if (from >= UWX_REG_GR(0) && from <= UWX_REG_GR(127)) {
+		reg = from - UWX_REG_GR(0);
+		if (reg == 1)
+			val = tf->tf_special.gp;
+		else if (reg == 12)
+			val = tf->tf_special.sp;
+		else if (reg == 13)
+			val = tf->tf_special.tp;
+		else if (reg >= 2 && reg <= 3)
+			val = (&tf->tf_scratch.gr2)[reg - 2];
+		else if (reg >= 8 && reg <= 11)
+			val = (&tf->tf_scratch.gr8)[reg - 8];
+		else if (reg >= 14 && reg <= 31)
+			val = (&tf->tf_scratch.gr14)[reg - 14];
+		else
+			goto oops;
+	} else if (from >= UWX_REG_BR(0) && from <= UWX_REG_BR(7)) {
+		reg = from - UWX_REG_BR(0);
+		if (reg == 0)
+			val = tf->tf_special.rp;
+		else if (reg >= 6 && reg <= 7)
+			val = (&tf->tf_scratch.br6)[reg - 6];
+		else
+			goto oops;
+	} else
+		goto oops;
+	return (val);
+
+ oops:
+	printf("UNW: %s(%p, %lx)\n", __func__, tf, from);
+	return (0UL);
+}
+
+static uint64_t
+unw_copyin_from_pcb(struct pcb *pcb, uint64_t from)
+{
+	uint64_t val;
+	int reg;
+
+	if (from == UWX_REG_AR_PFS)
+		val = pcb->pcb_special.pfs;
+	else if (from == UWX_REG_PREDS)
+		val = pcb->pcb_special.pr;
+	else if (from == UWX_REG_AR_RNAT)
+		val = pcb->pcb_special.rnat;
+	else if (from == UWX_REG_AR_UNAT)
+		val = pcb->pcb_special.unat;
+	else if (from >= UWX_REG_GR(0) && from <= UWX_REG_GR(127)) {
+		reg = from - UWX_REG_GR(0);
+		if (reg == 1)
+			val = pcb->pcb_special.gp;
+		else if (reg == 12)
+			val = pcb->pcb_special.sp;
+		else if (reg == 13)
+			val = pcb->pcb_special.tp;
+		else if (reg >= 4 && reg <= 7)
+			val = (&pcb->pcb_preserved.gr4)[reg - 4];
+		else
+			goto oops;
+	} else if (from >= UWX_REG_BR(0) && from <= UWX_REG_BR(7)) {
+		reg = from - UWX_REG_BR(0);
+		if (reg == 0)
+			val = pcb->pcb_special.rp;
+		else if (reg >= 1 && reg <= 5)
+			val = (&pcb->pcb_preserved.br1)[reg - 1];
+		else
+			goto oops;
+	} else
+		goto oops;
+	return (val);
+
+ oops:
+	printf("UNW: %s(%p, %lx)\n", __func__, pcb, from);
+	return (0UL);
+}
+
+static int
+unw_cb_copyin(int req, char *to, uint64_t from, int len, intptr_t tok)
+{
+	struct unw_regstate *rs = (void*)tok;
+	uint64_t val;
+
+	switch (req) {
+	case UWX_COPYIN_UINFO:
+		break;
+	case UWX_COPYIN_MSTACK:
+		*((uint64_t*)to) = *((uint64_t*)from);
+		return (8);
+	case UWX_COPYIN_RSTACK:
+		*((uint64_t*)to) = *((uint64_t*)from);
+		return (8);
+	case UWX_COPYIN_REG:
+		if (rs->frame != NULL)
+			val = unw_copyin_from_frame(rs->frame, from);
+		else if (rs->pcb != NULL)
+			val = unw_copyin_from_pcb(rs->pcb, from);
+		else
+			goto oops;
+		*((uint64_t*)to) = val;
+		return (len);
+	}
+
+ oops:
+	printf("UNW: %s(%d, %p, %lx, %d, %lx)\n", __func__, req, to, from,
+	    len, tok);
+	return (0);
+}
+
+static int
+unw_cb_lookup(int req, uint64_t ip, intptr_t tok, uint64_t **vec)
+{
+	struct unw_regstate *rs = (void*)tok;
+	struct unw_table *ut;
+
+	switch (req) {
+	case UWX_LKUP_LOOKUP:
+		ut = unw_table_lookup(ip);
+		if (ut == NULL)
+			return (UWX_LKUP_NOTFOUND);
+		rs->keyval[0] = UWX_KEY_TBASE;
+		rs->keyval[1] = ut->ut_base;
+		rs->keyval[2] = UWX_KEY_USTART;
+		rs->keyval[3] = (intptr_t)ut->ut_start;
+		rs->keyval[4] = UWX_KEY_UEND;
+		rs->keyval[5] = (intptr_t)ut->ut_end;
+		rs->keyval[6] = 0;
+		rs->keyval[7] = 0;
+		*vec = rs->keyval;
+		return (UWX_LKUP_UTABLE);
+	case UWX_LKUP_FREE:
+		return (0);
+	}
+
+	return (UWX_LKUP_ERR);
+}
+
+int
+unw_create_from_frame(struct unw_regstate *rs, struct trapframe *tf)
+{
+	uint64_t bsp, ip;
+	int uwxerr;
+
+	rs->frame = tf;
+	rs->pcb = NULL;
+	rs->env = uwx_init();
+	if (rs->env == NULL)
+		return (ENOMEM);
+
+	uwxerr = uwx_register_callbacks(rs->env, (intptr_t)rs,
+	    unw_cb_copyin, unw_cb_lookup);
+	if (uwxerr)
+		return (EINVAL);		/* XXX */
+
+	bsp = tf->tf_special.bspstore + tf->tf_special.ndirty;
+	bsp = ia64_bsp_adjust(bsp, -IA64_CFM_SOF(tf->tf_special.cfm));
+	ip = tf->tf_special.iip + ((tf->tf_special.psr >> 41) & 3);
+
+	uwxerr = uwx_init_context(rs->env, ip, tf->tf_special.sp, bsp,
+	    tf->tf_special.cfm);
+
+	return ((uwxerr) ? EINVAL : 0);		/* XXX */
+}
+
+int
+unw_create_from_pcb(struct unw_regstate *rs, struct pcb *pcb)
+{
+	uint64_t bsp, cfm, ip;
+	int uwxerr;
+
+	rs->frame = NULL;
+	rs->pcb = pcb;
+	rs->env = uwx_init();
+	if (rs->env == NULL)
+		return (ENOMEM);
+
+	uwxerr = uwx_register_callbacks(rs->env, (intptr_t)rs,
+	    unw_cb_copyin, unw_cb_lookup);
+	if (uwxerr)
+		return (EINVAL);		/* XXX */
+
+	bsp = pcb->pcb_special.bspstore;
+	if (pcb->pcb_special.__spare == ~0UL) {
+		ip = pcb->pcb_special.iip + ((pcb->pcb_special.psr >> 41) & 3);
+		cfm = pcb->pcb_special.cfm;
+		bsp += pcb->pcb_special.ndirty;
+		bsp = ia64_bsp_adjust(bsp, -IA64_CFM_SOF(cfm));
+	} else {
+		ip = pcb->pcb_special.rp;
+		cfm = pcb->pcb_special.pfs;
+		bsp = ia64_bsp_adjust(bsp, -IA64_CFM_SOL(cfm));
+	}
+	uwxerr = uwx_init_context(rs->env, ip, pcb->pcb_special.sp, bsp, cfm);
+
+	return ((uwxerr) ? EINVAL : 0);		/* XXX */
+}
+
+void
+unw_delete(struct unw_regstate *rs)
+{
+
+	if (rs->env != NULL)
+		uwx_free(rs->env);
+}
+
+int
+unw_step(struct unw_regstate *rs)
+{
+	int err;
+
+	switch (uwx_step(rs->env)) {
+	case UWX_ABI_FRAME:
+		err = ERESTART;
+		break;
+	case UWX_BOTTOM:
+		err = EJUSTRETURN;
+		break;
+	case UWX_OK:
+		err = 0;
+		break;
+	default:
+		err = EINVAL;		/* XXX */
+		break;
+	}
+	return (err);
+}
+
+int
+unw_get_bsp(struct unw_regstate *s, uint64_t *r)
+{
+	int uwxerr;
+
+	uwxerr = uwx_get_reg(s->env, UWX_REG_BSP, r);
+	return ((uwxerr) ? EINVAL : 0); 	/* XXX */
+}
+
+int
+unw_get_cfm(struct unw_regstate *s, uint64_t *r)
+{
+	int uwxerr;
+
+	uwxerr = uwx_get_reg(s->env, UWX_REG_CFM, r);
+	return ((uwxerr) ? EINVAL : 0); 	/* XXX */
+}
+
+int
+unw_get_ip(struct unw_regstate *s, uint64_t *r)
+{
+	int uwxerr;
+
+	uwxerr = uwx_get_reg(s->env, UWX_REG_IP, r);
+	return ((uwxerr) ? EINVAL : 0); 	/* XXX */
+}
+
+int
+unw_get_sp(struct unw_regstate *s, uint64_t *r)
+{
+	int uwxerr;
+
+	uwxerr = uwx_get_reg(s->env, UWX_REG_SP, r);
+	return ((uwxerr) ? EINVAL : 0); 	/* XXX */
+}
+
+int
+unw_table_add(uint64_t base, uint64_t start, uint64_t end)
+{
+	struct unw_table *ut;
+
+	ut = malloc(sizeof(struct unw_table), M_UNWIND, M_WAITOK);
+	ut->ut_base = base;
+	ut->ut_start = (struct unw_entry*)start;
+	ut->ut_end = (struct unw_entry*)end;
+	ut->ut_limit = base + ut->ut_end[-1].ue_end;
+	LIST_INSERT_HEAD(&unw_tables, ut, ut_link);
+
+	if (bootverbose)
+		printf("UNWIND: table added: base=%lx, start=%lx, end=%lx\n",
+		    base, start, end);
+
+	return (0);
+}
+
+void
+unw_table_remove(uint64_t base)
+{
+	struct unw_table *ut;
+
+	ut = unw_table_lookup(base);
+	if (ut != NULL) {
+		LIST_REMOVE(ut, ut_link);
+		free(ut, M_UNWIND);
+		if (bootverbose)
+			printf("UNWIND: table removed: base=%lx\n", base);
+	}
+}
+
+static void
+unw_initialize(void *dummy __unused)
+{
+
+	LIST_INIT(&unw_tables);
+	uwx_register_alloc_cb(unw_alloc, unw_free);
+#ifdef KDB
+	kdbheap = malloc(KDBHEAPSZ, M_UNWIND, M_WAITOK);
+	kdbheap->sig = MSIG_FREE;
+	kdbheap->size = (KDBHEAPSZ - sizeof(struct mhdr)) >> 4;
+	kdbheap->next = -1;
+	kdbheap->prev = -1;
+#endif
+}
+SYSINIT(unwind, SI_SUB_KMEM, SI_ORDER_ANY, unw_initialize, 0);


Property changes on: trunk/sys/ia64/ia64/unwind.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/vm_machdep.c
===================================================================
--- trunk/sys/ia64/ia64/vm_machdep.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/vm_machdep.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,364 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * Copyright (c) 1994 John Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
+ *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
+ * $FreeBSD: stable/10/sys/ia64/ia64/vm_machdep.c 255289 2013-09-06 05:37:49Z glebius $
+ */
+/*-
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ * 
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ * 
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ * 
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution at CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/sysent.h>
+#include <sys/vnode.h>
+#include <sys/vmmeter.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/sysctl.h>
+#include <sys/unistd.h>
+
+#include <machine/cpu.h>
+#include <machine/fpu.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+
+void
+cpu_thread_exit(struct thread *td)
+{
+
+	/* Throw away the high FP registers. */
+	ia64_highfp_drop(td);
+}
+
+void
+cpu_thread_clean(struct thread *td)
+{
+}
+
+void
+cpu_thread_alloc(struct thread *td)
+{
+	intptr_t sp;
+
+	sp = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
+	sp -= sizeof(struct pcb);
+	td->td_pcb = (struct pcb *)sp;
+	sp -= sizeof(struct trapframe);
+	td->td_frame = (struct trapframe *)sp;
+	td->td_frame->tf_length = sizeof(struct trapframe);
+}
+
+void
+cpu_thread_free(struct thread *td)
+{
+}
+
+void
+cpu_thread_swapin(struct thread *td)
+{
+}
+
+void
+cpu_thread_swapout(struct thread *td)
+{
+
+	ia64_highfp_save(td);
+}
+
+void
+cpu_set_syscall_retval(struct thread *td, int error)
+{
+	struct proc *p;
+	struct trapframe *tf;
+
+	if (error == EJUSTRETURN)
+		return;
+
+	tf = td->td_frame;
+
+	/*
+	 * Save the "raw" error code in r10. We use this to handle
+	 * syscall restarts (see do_ast()).
+	 */
+	tf->tf_scratch.gr10 = error;
+	if (error == 0) {
+		tf->tf_scratch.gr8 = td->td_retval[0];
+		tf->tf_scratch.gr9 = td->td_retval[1];
+	} else if (error != ERESTART) {
+		p = td->td_proc;
+		if (error < p->p_sysent->sv_errsize)
+			error = p->p_sysent->sv_errtbl[error];
+		/*
+		 * Translated error codes are returned in r8. User
+		 */
+		tf->tf_scratch.gr8 = error;
+	}
+}
+
+void
+cpu_set_upcall(struct thread *td, struct thread *td0)
+{
+	struct pcb *pcb;
+	struct trapframe *tf;
+
+	ia64_highfp_save(td0);
+
+	tf = td->td_frame;
+	KASSERT(tf != NULL, ("foo"));
+	bcopy(td0->td_frame, tf, sizeof(*tf));
+	tf->tf_length = sizeof(struct trapframe);
+	tf->tf_flags = FRAME_SYSCALL;
+	tf->tf_special.ndirty = 0;
+	tf->tf_special.bspstore &= ~0x1ffUL;
+	tf->tf_scratch.gr8 = 0;
+	tf->tf_scratch.gr9 = 1;
+	tf->tf_scratch.gr10 = 0;
+
+	pcb = td->td_pcb;
+	KASSERT(pcb != NULL, ("foo"));
+	bcopy(td0->td_pcb, pcb, sizeof(*pcb));
+	pcb->pcb_special.bspstore = td->td_kstack;
+	pcb->pcb_special.pfs = 0;
+	pcb->pcb_current_pmap = vmspace_pmap(td0->td_proc->p_vmspace);
+	pcb->pcb_special.sp = (uintptr_t)tf - 16;
+	pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
+	cpu_set_fork_handler(td, (void (*)(void*))fork_return, td);
+
+	/* Setup to release the spin count in fork_exit(). */
+	td->td_md.md_spinlock_count = 1;
+	td->td_md.md_saved_intr = 1;
+}
+
+void
+cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
+	stack_t *stack)
+{
+	struct ia64_fdesc *fd;
+	struct trapframe *tf;
+	uint64_t ndirty, sp;
+
+	tf = td->td_frame;
+	ndirty = tf->tf_special.ndirty + (tf->tf_special.bspstore & 0x1ffUL);
+
+	KASSERT((ndirty & ~PAGE_MASK) == 0,
+	    ("Whoa there! We have more than 8KB of dirty registers!"));
+
+	fd = (struct ia64_fdesc *)entry;
+	sp = (uint64_t)stack->ss_sp;
+
+	bzero(&tf->tf_special, sizeof(tf->tf_special));
+	tf->tf_special.iip = fuword(&fd->func);
+	tf->tf_special.gp = fuword(&fd->gp);
+	tf->tf_special.sp = (sp + stack->ss_size - 16) & ~15;
+	tf->tf_special.rsc = 0xf;
+	tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
+	tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
+	    IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
+	    IA64_PSR_CPL_USER;
+
+	if (tf->tf_flags & FRAME_SYSCALL) {
+		tf->tf_special.cfm = (3UL<<62) | (1UL<<7) | 1UL;
+		tf->tf_special.bspstore = sp + 8;
+		suword((caddr_t)sp, (uint64_t)arg);
+	} else {
+		tf->tf_special.cfm = (1UL<<63) | (1UL<<7) | 1UL;
+		tf->tf_special.bspstore = sp;
+		tf->tf_special.ndirty = 8;
+		sp = td->td_kstack + ndirty - 8;
+		if ((sp & 0x1ff) == 0x1f8) {
+			*(uint64_t*)sp = 0;
+			tf->tf_special.ndirty += 8;
+			sp -= 8;
+		}
+		*(uint64_t*)sp = (uint64_t)arg;
+	}
+}
+
+int
+cpu_set_user_tls(struct thread *td, void *tls_base)
+{
+	td->td_frame->tf_special.tp = (unsigned long)tls_base;
+	return (0);
+}
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the pcb, set up the stack so that the child
+ * ready to run and return to user mode.
+ */
+void
+cpu_fork(struct thread *td1, struct proc *p2 __unused, struct thread *td2,
+    int flags)
+{
+	char *stackp;
+	uint64_t ndirty;
+
+	KASSERT(td1 == curthread || td1 == &thread0,
+	    ("cpu_fork: td1 not curthread and not thread0"));
+
+	if ((flags & RFPROC) == 0)
+		return;
+
+	/*
+	 * Save the preserved registers and the high FP registers in the
+	 * PCB if we're the parent (ie td1 == curthread) so that we have
+	 * a valid PCB. This also causes a RSE flush. We don't have to
+	 * do that otherwise, because there wouldn't be anything important
+	 * to save.
+	 */
+	if (td1 == curthread) {
+		if (savectx(td1->td_pcb) != 0)
+			panic("unexpected return from savectx()");
+		ia64_highfp_save(td1);
+	}
+
+	/*
+	 * create the child's kernel stack and backing store. We basicly
+	 * create an image of the parent's stack and backing store and
+	 * adjust where necessary.
+	 */
+	stackp = (char *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE);
+
+	stackp -= sizeof(struct pcb);
+	td2->td_pcb = (struct pcb *)stackp;
+	bcopy(td1->td_pcb, td2->td_pcb, sizeof(struct pcb));
+
+	stackp -= sizeof(struct trapframe);
+	td2->td_frame = (struct trapframe *)stackp;
+	bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
+	td2->td_frame->tf_length = sizeof(struct trapframe);
+	ndirty = td2->td_frame->tf_special.ndirty +
+	    (td2->td_frame->tf_special.bspstore & 0x1ffUL);
+	bcopy((void*)td1->td_kstack, (void*)td2->td_kstack, ndirty);
+
+	/* Set-up the return values as expected by the fork() libc stub. */
+	if (td2->td_frame->tf_special.psr & IA64_PSR_IS) {
+		td2->td_frame->tf_scratch.gr8 = 0;
+		td2->td_frame->tf_scratch.gr10 = 1;
+	} else {
+		td2->td_frame->tf_scratch.gr8 = 0;
+		td2->td_frame->tf_scratch.gr9 = 1;
+		td2->td_frame->tf_scratch.gr10 = 0;
+	}
+
+	td2->td_pcb->pcb_special.bspstore = td2->td_kstack + ndirty;
+	td2->td_pcb->pcb_special.pfs = 0;
+	td2->td_pcb->pcb_current_pmap = vmspace_pmap(td2->td_proc->p_vmspace);
+
+	td2->td_pcb->pcb_special.sp = (uintptr_t)stackp - 16;
+	td2->td_pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
+	cpu_set_fork_handler(td2, (void (*)(void*))fork_return, td2);
+
+	/* Setup to release the spin count in fork_exit(). */
+	td2->td_md.md_spinlock_count = 1;
+	td2->td_md.md_saved_intr = 1;
+}
+
+/*
+ * Intercept the return address from a freshly forked process that has NOT
+ * been scheduled yet.
+ *
+ * This is needed to make kernel threads stay in kernel mode.
+ */
+void
+cpu_set_fork_handler(td, func, arg)
+	struct thread *td;
+	void (*func)(void *);
+	void *arg;
+{
+	td->td_frame->tf_scratch.gr2 = (u_int64_t)func;
+	td->td_frame->tf_scratch.gr3 = (u_int64_t)arg;
+}
+
+/*
+ * cpu_exit is called as the last action during exit.
+ * We drop the fp state (if we have it) and switch to a live one.
+ */
+void
+cpu_exit(struct thread *td)
+{
+}
+
+/*
+ * Software interrupt handler for queued VM system processing.
+ */   
+void  
+swi_vm(void *dummy) 
+{
+
+	if (busdma_swi_pending != 0)
+		busdma_swi();
+}


Property changes on: trunk/sys/ia64/ia64/vm_machdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/ia64/xtrace.c
===================================================================
--- trunk/sys/ia64/ia64/xtrace.c	                        (rev 0)
+++ trunk/sys/ia64/ia64/xtrace.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,221 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_ddb.h"
+#include "opt_xtrace.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/xtrace.c 271211 2014-09-06 22:17:54Z marcel $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/malloc.h>
+#include <sys/pcpu.h>
+#include <machine/md_var.h>
+#include <machine/pte.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+
+#define	XTRACE_LOG2SZ	14	/* 16KB trace buffers */
+
+struct ia64_xtrace_record {
+	uint64_t	ivt;
+	uint64_t	itc;
+	uint64_t	iip;
+	uint64_t	ifa;
+	uint64_t	isr;
+	uint64_t	ipsr;
+	uint64_t	itir;
+	uint64_t	iipa;
+
+	uint64_t	ifs;
+	uint64_t	iim;
+	uint64_t	iha;
+	uint64_t	unat;
+	uint64_t	rsc;
+	uint64_t	bsp;
+	uint64_t	tp;
+	uint64_t	sp;
+};
+
+extern uint32_t ia64_xtrace_enabled;
+extern uint64_t ia64_xtrace_mask;
+
+static uint64_t ia64_xtrace_base;
+
+static void
+ia64_xtrace_init_common(vm_paddr_t pa)
+{
+	uint64_t psr;
+	pt_entry_t pte;
+
+	pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
+	    PTE_PL_KERN | PTE_AR_RW;
+	pte |= pa & PTE_PPN_MASK;
+
+	__asm __volatile("ptr.d %0,%1" :: "r"(ia64_xtrace_base),
+	    "r"(XTRACE_LOG2SZ << 2));
+
+	__asm __volatile("mov   %0=psr" : "=r"(psr));
+	__asm __volatile("rsm   psr.ic|psr.i");
+	ia64_srlz_i();
+
+	ia64_set_ifa(ia64_xtrace_base);
+	ia64_set_itir(XTRACE_LOG2SZ << 2);
+	ia64_srlz_d();
+	__asm __volatile("itr.d dtr[%0]=%1" :: "r"(6), "r"(pte));
+
+	__asm __volatile("mov   psr.l=%0" :: "r" (psr));
+	ia64_srlz_i();
+
+	pcpup->pc_md.xtrace_tail = ia64_xtrace_base;
+	ia64_set_k3(ia64_xtrace_base);
+}
+
+void *
+ia64_xtrace_alloc(void)
+{
+	uintptr_t buf;
+	size_t sz;
+
+	sz = 1UL << XTRACE_LOG2SZ;
+	buf = kmem_alloc_contig(kernel_arena, sz, M_WAITOK | M_ZERO,
+	    0UL, ~0UL, sz, 0, VM_MEMATTR_DEFAULT);
+	return ((void *)buf);
+}
+
+void
+ia64_xtrace_init_ap(void *buf)
+{
+	vm_paddr_t pa;
+
+	if (buf == NULL) {
+		ia64_set_k3(0);
+		return;
+	}
+	pcpup->pc_md.xtrace_buffer = buf;
+	pa = ia64_tpa((uintptr_t)buf);
+	ia64_xtrace_init_common(pa);
+}
+
+void
+ia64_xtrace_init_bsp(void)
+{
+	void *buf;
+	vm_paddr_t pa;
+	size_t sz;
+
+	sz = 1UL << XTRACE_LOG2SZ;
+	ia64_xtrace_base = VM_MIN_KERNEL_ADDRESS + (sz << 1);
+	ia64_xtrace_mask = ~sz;
+
+	buf = ia64_physmem_alloc(sz, sz);
+	if (buf == NULL) {
+		ia64_set_k3(0);
+		return;
+	}
+	pcpup->pc_md.xtrace_buffer = buf;
+	pa = IA64_RR_MASK((uintptr_t)buf);
+	ia64_xtrace_init_common(pa);
+}
+
+static void
+ia64_xtrace_init(void *dummy __unused)
+{
+
+	TUNABLE_INT_FETCH("machdep.xtrace.enabled", &ia64_xtrace_enabled);
+}
+SYSINIT(xtrace, SI_SUB_CPU, SI_ORDER_ANY, ia64_xtrace_init, NULL);
+
+void
+ia64_xtrace_save(void)
+{
+	struct ia64_xtrace_record *rec;
+	uint64_t head, tail;
+
+	critical_enter();
+	head = ia64_get_k3();
+	tail = PCPU_GET(md.xtrace_tail);
+	if (head == 0 || tail == 0) {
+		critical_exit();
+		return;
+	}
+	while (head != tail) {
+		rec = (void *)(uintptr_t)tail;
+		CTR6(KTR_TRAP, "XTRACE: itc=%lu, ticks=%d: "
+		    "IVT=%#lx, IIP=%#lx, IFA=%#lx, ISR=%#lx",
+		    rec->itc, ticks,
+		    rec->ivt, rec->iip, rec->ifa, rec->isr);
+		tail += sizeof(*rec);
+		tail &= ia64_xtrace_mask;
+	}
+	PCPU_SET(md.xtrace_tail, tail);
+	critical_exit();
+}
+
+void
+ia64_xtrace_stop(void)
+{
+	ia64_xtrace_enabled = 0;
+}
+
+#if 0
+#ifdef DDB
+
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(xtrace, db_xtrace)
+{
+        struct ia64_xtrace_record *p, *r;
+
+        p = (ia64_xtptr == 0) ? ia64_xtptr1 : ia64_xtptr;
+        if (p == 0) {
+                db_printf("Exception trace buffer not allocated\n");
+                return;
+        }
+
+        r = (p->ivt == 0) ? ia64_xtbase : p;
+        if (r->ivt == 0) {
+                db_printf("No exception trace records written\n");
+                return;
+        }
+
+        db_printf("IVT\t\t ITC\t\t  IIP\t\t   IFA\n");
+        do {
+                db_printf("%016lx %016lx %016lx %016lx\n",
+                    r->ivt, r->itc, r->iip, r->ifa);
+                r++;
+                if (r == ia64_xtlim)
+                        r = ia64_xtbase;
+        } while (r != p);
+}
+
+#endif /* DDB */
+#endif


Property changes on: trunk/sys/ia64/ia64/xtrace.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/_align.h
===================================================================
--- trunk/sys/ia64/include/_align.h	                        (rev 0)
+++ trunk/sys/ia64/include/_align.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,54 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/_align.h 196994 2009-09-08 20:45:40Z phk $ */
+/* From: NetBSD: param.h,v 1.20 1997/09/19 13:52:53 leo Exp */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: machparam.h 1.11 89/08/14$
+ *
+ *	@(#)param.h	8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _IA64_INCLUDE__ALIGN_H_
+#define	_IA64_INCLUDE__ALIGN_H_
+
+/*
+ * Round p (pointer or byte index) up to a correctly-aligned value for all
+ * data types (int, long, ...).   The result is u_long and must be cast to
+ * any desired pointer type.
+ */
+#define	_ALIGNBYTES		15
+#define	_ALIGN(p)		(((u_long)(p) + _ALIGNBYTES) &~ _ALIGNBYTES)
+
+#endif /* !_IA64_INCLUDE__ALIGN_H_ */


Property changes on: trunk/sys/ia64/include/_align.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/_bus.h
===================================================================
--- trunk/sys/ia64/include/_bus.h	                        (rev 0)
+++ trunk/sys/ia64/include/_bus.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,47 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2005 M. Warner Losh.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/_bus.h 145253 2005-04-18 21:45:34Z imp $
+ */
+
+#ifndef IA64_INCLUDE__BUS_H
+#define IA64_INCLUDE__BUS_H
+
+/*
+ * Bus address and size types
+ */
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+
+/*
+ * Access methods for bus resources and address space.
+ */
+typedef	int bus_space_tag_t;
+typedef	u_long bus_space_handle_t;
+
+#endif /* IA64_INCLUDE__BUS_H */


Property changes on: trunk/sys/ia64/include/_bus.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/_inttypes.h
===================================================================
--- trunk/sys/ia64/include/_inttypes.h	                        (rev 0)
+++ trunk/sys/ia64/include/_inttypes.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,214 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *	From: $NetBSD: int_fmtio.h,v 1.2 2001/04/26 16:25:21 kleink Exp $
+ * $FreeBSD: stable/10/sys/ia64/include/_inttypes.h 204646 2010-03-03 17:55:51Z joel $
+ */
+
+#ifndef _MACHINE_INTTYPES_H_
+#define _MACHINE_INTTYPES_H_
+
+/*
+ * Macros for format specifiers.
+ */
+
+/* fprintf(3) macros for signed integers. */
+
+#define	PRId8		"d"	/* int8_t */
+#define	PRId16		"d"	/* int16_t */
+#define	PRId32		"d"	/* int32_t */
+#define	PRId64		"ld"	/* int64_t */
+#define	PRIdLEAST8	"d"	/* int_least8_t */
+#define	PRIdLEAST16	"d"	/* int_least16_t */
+#define	PRIdLEAST32	"d"	/* int_least32_t */
+#define	PRIdLEAST64	"ld"	/* int_least64_t */
+#define	PRIdFAST8	"d"	/* int_fast8_t */
+#define	PRIdFAST16	"d"	/* int_fast16_t */
+#define	PRIdFAST32	"d"	/* int_fast32_t */
+#define	PRIdFAST64	"ld"	/* int_fast64_t */
+#define	PRIdMAX		"jd"	/* intmax_t */
+#define	PRIdPTR		"ld"	/* intptr_t */
+
+#define	PRIi8		"i"	/* int8_t */
+#define	PRIi16		"i"	/* int16_t */
+#define	PRIi32		"i"	/* int32_t */
+#define	PRIi64		"li"	/* int64_t */
+#define	PRIiLEAST8	"i"	/* int_least8_t  */
+#define	PRIiLEAST16	"i"	/* int_least16_t */
+#define	PRIiLEAST32	"i"	/* int_least32_t */
+#define	PRIiLEAST64	"li"	/* int_least64_t */
+#define	PRIiFAST8	"i"	/* int_fast8_t */
+#define	PRIiFAST16	"i"	/* int_fast16_t */
+#define	PRIiFAST32	"i"	/* int_fast32_t */
+#define	PRIiFAST64	"li"	/* int_fast64_t */
+#define	PRIiMAX		"ji"	/* intmax_t */
+#define	PRIiPTR		"li"	/* intptr_t */
+
+/* fprintf(3) macros for unsigned integers. */
+
+#define	PRIo8		"o"	/* uint8_t */
+#define	PRIo16		"o"	/* uint16_t */
+#define	PRIo32		"o"	/* uint32_t */
+#define	PRIo64		"lo"	/* uint64_t */
+#define	PRIoLEAST8	"o"	/* uint_least8_t */
+#define	PRIoLEAST16	"o"	/* uint_least16_t */
+#define	PRIoLEAST32	"o"	/* uint_least32_t */
+#define	PRIoLEAST64	"lo"	/* uint_least64_t */
+#define	PRIoFAST8	"o"	/* uint_fast8_t */
+#define	PRIoFAST16	"o"	/* uint_fast16_t */
+#define	PRIoFAST32	"o"	/* uint_fast32_t */
+#define	PRIoFAST64	"lo"	/* uint_fast64_t */
+#define	PRIoMAX		"jo"	/* uintmax_t */
+#define	PRIoPTR		"lo"	/* uintptr_t */
+
+#define	PRIu8		"u"	/* uint8_t */
+#define	PRIu16		"u"	/* uint16_t */
+#define	PRIu32		"u"	/* uint32_t */
+#define	PRIu64		"lu"	/* uint64_t */
+#define	PRIuLEAST8	"u"	/* uint_least8_t */
+#define	PRIuLEAST16	"u"	/* uint_least16_t */
+#define	PRIuLEAST32	"u"	/* uint_least32_t */
+#define	PRIuLEAST64	"lu"	/* uint_least64_t */
+#define	PRIuFAST8	"u"	/* uint_fast8_t */
+#define	PRIuFAST16	"u"	/* uint_fast16_t */
+#define	PRIuFAST32	"u"	/* uint_fast32_t */
+#define	PRIuFAST64	"lu"	/* uint_fast64_t */
+#define	PRIuMAX		"ju"	/* uintmax_t */
+#define	PRIuPTR		"lu"	/* uintptr_t */
+
+#define	PRIx8		"x"	/* uint8_t */
+#define	PRIx16		"x"	/* uint16_t */
+#define	PRIx32		"x"	/* uint32_t */
+#define	PRIx64		"lx"	/* uint64_t */
+#define	PRIxLEAST8	"x"	/* uint_least8_t */
+#define	PRIxLEAST16	"x"	/* uint_least16_t */
+#define	PRIxLEAST32	"x"	/* uint_least32_t */
+#define	PRIxLEAST64	"lx"	/* uint_least64_t */
+#define	PRIxFAST8	"x"	/* uint_fast8_t */
+#define	PRIxFAST16	"x"	/* uint_fast16_t */
+#define	PRIxFAST32	"x"	/* uint_fast32_t */
+#define	PRIxFAST64	"lx"	/* uint_fast64_t */
+#define	PRIxMAX		"jx"	/* uintmax_t */
+#define	PRIxPTR		"lx"	/* uintptr_t */
+
+#define	PRIX8		"X"	/* uint8_t */
+#define	PRIX16		"X"	/* uint16_t */
+#define	PRIX32		"X"	/* uint32_t */
+#define	PRIX64		"lX"	/* uint64_t */
+#define	PRIXLEAST8	"X"	/* uint_least8_t */
+#define	PRIXLEAST16	"X"	/* uint_least16_t */
+#define	PRIXLEAST32	"X"	/* uint_least32_t */
+#define	PRIXLEAST64	"lX"	/* uint_least64_t */
+#define	PRIXFAST8	"X"	/* uint_fast8_t */
+#define	PRIXFAST16	"X"	/* uint_fast16_t */
+#define	PRIXFAST32	"X"	/* uint_fast32_t */
+#define	PRIXFAST64	"lX"	/* uint_fast64_t */
+#define	PRIXMAX		"jX"	/* uintmax_t */
+#define	PRIXPTR		"lX"	/* uintptr_t */
+
+/* fscanf(3) macros for signed integers. */
+
+#define	SCNd8		"hhd"	/* int8_t */
+#define	SCNd16		"hd"	/* int16_t */
+#define	SCNd32		"d"	/* int32_t */
+#define	SCNd64		"ld"	/* int64_t */
+#define	SCNdLEAST8	"hhd"	/* int_least8_t */
+#define	SCNdLEAST16	"hd"	/* int_least16_t */
+#define	SCNdLEAST32	"d"	/* int_least32_t */
+#define	SCNdLEAST64	"ld"	/* int_least64_t */
+#define	SCNdFAST8	"d"	/* int_fast8_t */
+#define	SCNdFAST16	"d"	/* int_fast16_t */
+#define	SCNdFAST32	"d"	/* int_fast32_t */
+#define	SCNdFAST64	"ld"	/* int_fast64_t */
+#define	SCNdMAX		"jd"	/* intmax_t */
+#define	SCNdPTR		"ld"	/* intptr_t */
+
+#define	SCNi8		"hhi"	/* int8_t */
+#define	SCNi16		"hi"	/* int16_t */
+#define	SCNi32		"i"	/* int32_t */
+#define	SCNi64		"li"	/* int64_t */
+#define	SCNiLEAST8	"hhi"	/* int_least8_t */
+#define	SCNiLEAST16	"hi"	/* int_least16_t */
+#define	SCNiLEAST32	"i"	/* int_least32_t */
+#define	SCNiLEAST64	"li"	/* int_least64_t */
+#define	SCNiFAST8	"i"	/* int_fast8_t */
+#define	SCNiFAST16	"i"	/* int_fast16_t */
+#define	SCNiFAST32	"i"	/* int_fast32_t */
+#define	SCNiFAST64	"li"	/* int_fast64_t */
+#define	SCNiMAX		"ji"	/* intmax_t */
+#define	SCNiPTR		"li"	/* intptr_t */
+
+/* fscanf(3) macros for unsigned integers. */
+
+#define	SCNo8		"hho"	/* uint8_t */
+#define	SCNo16		"ho"	/* uint16_t */
+#define	SCNo32		"o"	/* uint32_t */
+#define	SCNo64		"lo"	/* uint64_t */
+#define	SCNoLEAST8	"hho"	/* uint_least8_t */
+#define	SCNoLEAST16	"ho"	/* uint_least16_t */
+#define	SCNoLEAST32	"o"	/* uint_least32_t */
+#define	SCNoLEAST64	"lo"	/* uint_least64_t */
+#define	SCNoFAST8	"o"	/* uint_fast8_t */
+#define	SCNoFAST16	"o"	/* uint_fast16_t */
+#define	SCNoFAST32	"o"	/* uint_fast32_t */
+#define	SCNoFAST64	"lo"	/* uint_fast64_t */
+#define	SCNoMAX		"jo"	/* uintmax_t */
+#define	SCNoPTR		"lo"	/* uintptr_t */
+
+#define	SCNu8		"hhu"	/* uint8_t */
+#define	SCNu16		"hu"	/* uint16_t */
+#define	SCNu32		"u"	/* uint32_t */
+#define	SCNu64		"lu"	/* uint64_t */
+#define	SCNuLEAST8	"hhu"	/* uint_least8_t */
+#define	SCNuLEAST16	"hu"	/* uint_least16_t */
+#define	SCNuLEAST32	"u"	/* uint_least32_t */
+#define	SCNuLEAST64	"lu"	/* uint_least64_t */
+#define	SCNuFAST8	"u"	/* uint_fast8_t */
+#define	SCNuFAST16	"u"	/* uint_fast16_t */
+#define	SCNuFAST32	"u"	/* uint_fast32_t */
+#define	SCNuFAST64	"lu"	/* uint_fast64_t */
+#define	SCNuMAX		"ju"	/* uintmax_t */
+#define	SCNuPTR		"lu"	/* uintptr_t */
+
+#define	SCNx8		"hhx"	/* uint8_t */
+#define	SCNx16		"hx"	/* uint16_t */
+#define	SCNx32		"x"	/* uint32_t */
+#define	SCNx64		"lx"	/* uint64_t */
+#define	SCNxLEAST8	"hhx"	/* uint_least8_t */
+#define	SCNxLEAST16	"hx"	/* uint_least16_t */
+#define	SCNxLEAST32	"x"	/* uint_least32_t */
+#define	SCNxLEAST64	"lx"	/* uint_least64_t */
+#define	SCNxFAST8	"x"	/* uint_fast8_t */
+#define	SCNxFAST16	"x"	/* uint_fast16_t */
+#define	SCNxFAST32	"x"	/* uint_fast32_t */
+#define	SCNxFAST64	"lx"	/* uint_fast64_t */
+#define	SCNxMAX		"jx"	/* uintmax_t */
+#define	SCNxPTR		"lx"	/* uintptr_t */
+
+#endif /* !_MACHINE_INTTYPES_H_ */


Property changes on: trunk/sys/ia64/include/_inttypes.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/_limits.h
===================================================================
--- trunk/sys/ia64/include/_limits.h	                        (rev 0)
+++ trunk/sys/ia64/include/_limits.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,91 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/_limits.h 217145 2011-01-08 11:13:34Z tijl $ */
+/* From: NetBSD: limits.h,v 1.3 1997/04/06 08:47:31 cgd Exp */
+
+/*-
+ * Copyright (c) 1988, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)limits.h	8.3 (Berkeley) 1/4/94
+ */
+
+#ifndef _MACHINE__LIMITS_H_
+#define	_MACHINE__LIMITS_H_
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives.  Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions.  The subtraction for
+ * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ */
+
+#define	__CHAR_BIT	8		/* number of bits in a char */
+
+#define	__SCHAR_MAX	0x7f		/* max value for a signed char */
+#define	__SCHAR_MIN	(-0x7f-1)	/* min value for a signed char */
+
+#define	__UCHAR_MAX	0xff		/* max value for an unsigned char */
+
+#define	__USHRT_MAX	0xffff		/* max value for an unsigned short */
+#define	__SHRT_MAX	0x7fff		/* max value for a short */
+#define	__SHRT_MIN	(-0x7fff-1)	/* min value for a short */
+
+#define	__UINT_MAX	0xffffffff	/* max value for an unsigned int */
+#define	__INT_MAX	0x7fffffff	/* max value for an int */
+#define	__INT_MIN	(-0x7fffffff-1)	/* min value for an int */
+
+#define	__ULONG_MAX	0xffffffffffffffff	/* max for an unsigned long */
+#define	__LONG_MAX	0x7fffffffffffffff	/* max for a long */
+#define	__LONG_MIN	(-0x7fffffffffffffff-1) /* min for a long */
+
+/* Long longs have the same size but not the same type as longs. */
+					/* max for an unsigned long long */
+#define	__ULLONG_MAX	0xffffffffffffffffULL
+#define	__LLONG_MAX	0x7fffffffffffffffLL	/* max for a long long */
+#define	__LLONG_MIN	(-0x7fffffffffffffffLL-1) /* min for a long long */
+
+#define	__SSIZE_MAX	__LONG_MAX	/* max value for a ssize_t */
+
+#define	__SIZE_T_MAX	__ULONG_MAX	/* max value for a size_t */
+
+#define	__OFF_MAX	__LONG_MAX	/* max value for an off_t */
+#define	__OFF_MIN	__LONG_MIN	/* min value for an off_t */
+
+/* Quads and longs are the same.  Ensure they stay in sync. */
+#define	__UQUAD_MAX	(__ULONG_MAX)	/* max value for a uquad_t */
+#define	__QUAD_MAX	(__LONG_MAX)	/* max value for a quad_t */
+#define	__QUAD_MIN	(__LONG_MIN)	/* min value for a quad_t */
+
+#define	__LONG_BIT	64
+#define	__WORD_BIT	32
+
+/* Minimum signal stack size. */
+#define	__MINSIGSTKSZ	(3072 * 4)
+
+#endif /* !_MACHINE__LIMITS_H_ */


Property changes on: trunk/sys/ia64/include/_limits.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/_regset.h
===================================================================
--- trunk/sys/ia64/include/_regset.h	                        (rev 0)
+++ trunk/sys/ia64/include/_regset.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,276 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002, 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/_regset.h 139790 2005-01-06 22:18:23Z imp $
+ */
+
+#ifndef _MACHINE_REGSET_H_
+#define	_MACHINE_REGSET_H_
+
+/*
+ * Create register sets, based on the runtime specification. This allows
+ * us to better reuse code and to copy sets around more efficiently.
+ * Contexts are defined in terms of these sets. These include trapframe,
+ * sigframe, pcb, mcontext, reg and fpreg. Other candidates are unwind
+ * and coredump related contexts.
+ *
+ * Notes:
+ * o  Constant registers (r0, f0 and f1) are not accounted for,
+ * o  The stacked registers (r32-r127) are not accounted for,
+ * o  Predicates are not split across sets.
+ */
+
+/* A single FP register. */
+union _ia64_fpreg {
+	unsigned char	fpr_bits[16];
+	long double	fpr_flt;
+};
+
+/*
+ * Special registers.
+ */
+struct _special {
+	unsigned long		sp;
+	unsigned long		unat;		/* NaT before spilling */
+	unsigned long		rp;
+	unsigned long		pr;
+	unsigned long		pfs;
+	unsigned long		bspstore;
+	unsigned long		rnat;
+	unsigned long		__spare;
+	/* Userland context and syscalls */
+	unsigned long		tp;
+	unsigned long		rsc;
+	unsigned long		fpsr;
+	unsigned long		psr;
+	/* ASYNC: Interrupt specific */
+	unsigned long		gp;
+	unsigned long		ndirty;
+	unsigned long		cfm;
+	unsigned long		iip;
+	unsigned long		ifa;
+	unsigned long		isr;
+};
+
+struct _high_fp {
+	union _ia64_fpreg	fr32;
+	union _ia64_fpreg	fr33;
+	union _ia64_fpreg	fr34;
+	union _ia64_fpreg	fr35;
+	union _ia64_fpreg	fr36;
+	union _ia64_fpreg	fr37;
+	union _ia64_fpreg	fr38;
+	union _ia64_fpreg	fr39;
+	union _ia64_fpreg	fr40;
+	union _ia64_fpreg	fr41;
+	union _ia64_fpreg	fr42;
+	union _ia64_fpreg	fr43;
+	union _ia64_fpreg	fr44;
+	union _ia64_fpreg	fr45;
+	union _ia64_fpreg	fr46;
+	union _ia64_fpreg	fr47;
+	union _ia64_fpreg	fr48;
+	union _ia64_fpreg	fr49;
+	union _ia64_fpreg	fr50;
+	union _ia64_fpreg	fr51;
+	union _ia64_fpreg	fr52;
+	union _ia64_fpreg	fr53;
+	union _ia64_fpreg	fr54;
+	union _ia64_fpreg	fr55;
+	union _ia64_fpreg	fr56;
+	union _ia64_fpreg	fr57;
+	union _ia64_fpreg	fr58;
+	union _ia64_fpreg	fr59;
+	union _ia64_fpreg	fr60;
+	union _ia64_fpreg	fr61;
+	union _ia64_fpreg	fr62;
+	union _ia64_fpreg	fr63;
+	union _ia64_fpreg	fr64;
+	union _ia64_fpreg	fr65;
+	union _ia64_fpreg	fr66;
+	union _ia64_fpreg	fr67;
+	union _ia64_fpreg	fr68;
+	union _ia64_fpreg	fr69;
+	union _ia64_fpreg	fr70;
+	union _ia64_fpreg	fr71;
+	union _ia64_fpreg	fr72;
+	union _ia64_fpreg	fr73;
+	union _ia64_fpreg	fr74;
+	union _ia64_fpreg	fr75;
+	union _ia64_fpreg	fr76;
+	union _ia64_fpreg	fr77;
+	union _ia64_fpreg	fr78;
+	union _ia64_fpreg	fr79;
+	union _ia64_fpreg	fr80;
+	union _ia64_fpreg	fr81;
+	union _ia64_fpreg	fr82;
+	union _ia64_fpreg	fr83;
+	union _ia64_fpreg	fr84;
+	union _ia64_fpreg	fr85;
+	union _ia64_fpreg	fr86;
+	union _ia64_fpreg	fr87;
+	union _ia64_fpreg	fr88;
+	union _ia64_fpreg	fr89;
+	union _ia64_fpreg	fr90;
+	union _ia64_fpreg	fr91;
+	union _ia64_fpreg	fr92;
+	union _ia64_fpreg	fr93;
+	union _ia64_fpreg	fr94;
+	union _ia64_fpreg	fr95;
+	union _ia64_fpreg	fr96;
+	union _ia64_fpreg	fr97;
+	union _ia64_fpreg	fr98;
+	union _ia64_fpreg	fr99;
+	union _ia64_fpreg	fr100;
+	union _ia64_fpreg	fr101;
+	union _ia64_fpreg	fr102;
+	union _ia64_fpreg	fr103;
+	union _ia64_fpreg	fr104;
+	union _ia64_fpreg	fr105;
+	union _ia64_fpreg	fr106;
+	union _ia64_fpreg	fr107;
+	union _ia64_fpreg	fr108;
+	union _ia64_fpreg	fr109;
+	union _ia64_fpreg	fr110;
+	union _ia64_fpreg	fr111;
+	union _ia64_fpreg	fr112;
+	union _ia64_fpreg	fr113;
+	union _ia64_fpreg	fr114;
+	union _ia64_fpreg	fr115;
+	union _ia64_fpreg	fr116;
+	union _ia64_fpreg	fr117;
+	union _ia64_fpreg	fr118;
+	union _ia64_fpreg	fr119;
+	union _ia64_fpreg	fr120;
+	union _ia64_fpreg	fr121;
+	union _ia64_fpreg	fr122;
+	union _ia64_fpreg	fr123;
+	union _ia64_fpreg	fr124;
+	union _ia64_fpreg	fr125;
+	union _ia64_fpreg	fr126;
+	union _ia64_fpreg	fr127;
+};
+
+/*
+ * Preserved registers.
+ */
+struct _callee_saved {
+	unsigned long		unat;		/* NaT after spilling. */
+	unsigned long		gr4;
+	unsigned long		gr5;
+	unsigned long		gr6;
+	unsigned long		gr7;
+	unsigned long		br1;
+	unsigned long		br2;
+	unsigned long		br3;
+	unsigned long		br4;
+	unsigned long		br5;
+	unsigned long		lc;
+	unsigned long		__spare;
+};
+
+struct _callee_saved_fp {
+	union _ia64_fpreg	fr2;
+	union _ia64_fpreg	fr3;
+	union _ia64_fpreg	fr4;
+	union _ia64_fpreg	fr5;
+	union _ia64_fpreg	fr16;
+	union _ia64_fpreg	fr17;
+	union _ia64_fpreg	fr18;
+	union _ia64_fpreg	fr19;
+	union _ia64_fpreg	fr20;
+	union _ia64_fpreg	fr21;
+	union _ia64_fpreg	fr22;
+	union _ia64_fpreg	fr23;
+	union _ia64_fpreg	fr24;
+	union _ia64_fpreg	fr25;
+	union _ia64_fpreg	fr26;
+	union _ia64_fpreg	fr27;
+	union _ia64_fpreg	fr28;
+	union _ia64_fpreg	fr29;
+	union _ia64_fpreg	fr30;
+	union _ia64_fpreg	fr31;
+};
+
+/*
+ * Scratch registers.
+ */
+struct _caller_saved {
+	unsigned long		unat;		/* NaT after spilling. */
+	unsigned long		gr2;
+	unsigned long		gr3;
+	unsigned long		gr8;
+	unsigned long		gr9;
+	unsigned long		gr10;
+	unsigned long		gr11;
+	unsigned long		gr14;
+	unsigned long		gr15;
+	unsigned long		gr16;
+	unsigned long		gr17;
+	unsigned long		gr18;
+	unsigned long		gr19;
+	unsigned long		gr20;
+	unsigned long		gr21;
+	unsigned long		gr22;
+	unsigned long		gr23;
+	unsigned long		gr24;
+	unsigned long		gr25;
+	unsigned long		gr26;
+	unsigned long		gr27;
+	unsigned long		gr28;
+	unsigned long		gr29;
+	unsigned long		gr30;
+	unsigned long		gr31;
+	unsigned long		br6;
+	unsigned long		br7;
+	unsigned long		ccv;
+	unsigned long		csd;
+	unsigned long		ssd;
+};
+
+struct _caller_saved_fp {
+	union _ia64_fpreg	fr6;
+	union _ia64_fpreg	fr7;
+	union _ia64_fpreg	fr8;
+	union _ia64_fpreg	fr9;
+	union _ia64_fpreg	fr10;
+	union _ia64_fpreg	fr11;
+	union _ia64_fpreg	fr12;
+	union _ia64_fpreg	fr13;
+	union _ia64_fpreg	fr14;
+	union _ia64_fpreg	fr15;
+};
+
+#ifdef _KERNEL
+void	restore_callee_saved(const struct _callee_saved *);
+void	restore_callee_saved_fp(const struct _callee_saved_fp *);
+void	restore_high_fp(const struct _high_fp *);
+void	save_callee_saved(struct _callee_saved *);
+void	save_callee_saved_fp(struct _callee_saved_fp *);
+void	save_high_fp(struct _high_fp *);
+#endif
+
+#endif	/* _MACHINE_REGSET_H_ */


Property changes on: trunk/sys/ia64/include/_regset.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/_stdint.h
===================================================================
--- trunk/sys/ia64/include/_stdint.h	                        (rev 0)
+++ trunk/sys/ia64/include/_stdint.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,159 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001, 2002 Mike Barcroft <mike at FreeBSD.org>
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/_stdint.h 237517 2012-06-24 04:15:58Z andrew $
+ */
+
+#ifndef	_MACHINE__STDINT_H_
+#define	_MACHINE__STDINT_H_
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS)
+
+#define	INT8_C(c)		(c)
+#define	INT16_C(c)		(c)
+#define	INT32_C(c)		(c)
+#define	INT64_C(c)		(c ## L)
+
+#define	UINT8_C(c)		(c)
+#define	UINT16_C(c)		(c)
+#define	UINT32_C(c)		(c ## U)
+#define	UINT64_C(c)		(c ## UL)
+
+#define	INTMAX_C(c)		INT64_C(c)
+#define	UINTMAX_C(c)		UINT64_C(c)
+
+#endif /* !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) */
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS)
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.1 Limits of exact-width integer types
+ */
+/* Minimum values of exact-width signed integer types. */
+#define	INT8_MIN	(-0x7f-1)
+#define	INT16_MIN	(-0x7fff-1)
+#define	INT32_MIN	(-0x7fffffff-1)
+#define	INT64_MIN	(-0x7fffffffffffffffL-1)
+
+/* Maximum values of exact-width signed integer types. */
+#define	INT8_MAX	0x7f
+#define	INT16_MAX	0x7fff
+#define	INT32_MAX	0x7fffffff
+#define	INT64_MAX	0x7fffffffffffffffL
+
+/* Maximum values of exact-width unsigned integer types. */
+#define	UINT8_MAX	0xff
+#define	UINT16_MAX	0xffff
+#define	UINT32_MAX	0xffffffffU
+#define	UINT64_MAX	0xffffffffffffffffUL
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.2  Limits of minimum-width integer types
+ */
+/* Minimum values of minimum-width signed integer types. */
+#define	INT_LEAST8_MIN	INT8_MIN
+#define	INT_LEAST16_MIN	INT16_MIN
+#define	INT_LEAST32_MIN	INT32_MIN
+#define	INT_LEAST64_MIN	INT64_MIN
+
+/* Maximum values of minimum-width signed integer types. */
+#define	INT_LEAST8_MAX	INT8_MAX
+#define	INT_LEAST16_MAX	INT16_MAX
+#define	INT_LEAST32_MAX	INT32_MAX
+#define	INT_LEAST64_MAX	INT64_MAX
+
+/* Maximum values of minimum-width unsigned integer types. */
+#define	UINT_LEAST8_MAX	 UINT8_MAX
+#define	UINT_LEAST16_MAX UINT16_MAX
+#define	UINT_LEAST32_MAX UINT32_MAX
+#define	UINT_LEAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.3  Limits of fastest minimum-width integer types
+ */
+/* Minimum values of fastest minimum-width signed integer types. */
+#define	INT_FAST8_MIN	INT32_MIN
+#define	INT_FAST16_MIN	INT32_MIN
+#define	INT_FAST32_MIN	INT32_MIN
+#define	INT_FAST64_MIN	INT64_MIN
+
+/* Maximum values of fastest minimum-width signed integer types. */
+#define	INT_FAST8_MAX	INT32_MAX
+#define	INT_FAST16_MAX	INT32_MAX
+#define	INT_FAST32_MAX	INT32_MAX
+#define	INT_FAST64_MAX	INT64_MAX
+
+/* Maximum values of fastest minimum-width unsigned integer types. */
+#define	UINT_FAST8_MAX	UINT32_MAX
+#define	UINT_FAST16_MAX	UINT32_MAX
+#define	UINT_FAST32_MAX	UINT32_MAX
+#define	UINT_FAST64_MAX	UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.4  Limits of integer types capable of holding object pointers
+ */
+#define	INTPTR_MIN	INT64_MIN
+#define	INTPTR_MAX	INT64_MAX
+#define	UINTPTR_MAX	UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.5  Limits of greatest-width integer types
+ */
+#define	INTMAX_MIN	INT64_MIN
+#define	INTMAX_MAX	INT64_MAX
+#define	UINTMAX_MAX	UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.3  Limits of other integer types
+ */
+/* Limits of ptrdiff_t. */
+#define	PTRDIFF_MIN	INT64_MIN	
+#define	PTRDIFF_MAX	INT64_MAX
+
+/* Limits of sig_atomic_t. */
+#define	SIG_ATOMIC_MIN	INT32_MIN
+#define	SIG_ATOMIC_MAX	INT32_MAX
+
+/* Limit of size_t. */
+#define	SIZE_MAX	UINT64_MAX
+
+/* Limits of wint_t. */
+#define	WINT_MIN	INT32_MIN
+#define	WINT_MAX	INT32_MAX
+
+#endif /* !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) */
+
+#endif /* !_MACHINE__STDINT_H_ */


Property changes on: trunk/sys/ia64/include/_stdint.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/_types.h
===================================================================
--- trunk/sys/ia64/include/_types.h	                        (rev 0)
+++ trunk/sys/ia64/include/_types.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,123 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike at FreeBSD.org>
+ * Copyright (c) 1990, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	From: @(#)ansi.h	8.2 (Berkeley) 1/4/94
+ *	From: @(#)types.h	8.3 (Berkeley) 1/5/94
+ * $FreeBSD: stable/10/sys/ia64/include/_types.h 264496 2014-04-15 09:41:52Z tijl $
+ */
+
+#ifndef _MACHINE__TYPES_H_
+#define	_MACHINE__TYPES_H_
+
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+/*
+ * Basic types upon which most other types are built.
+ */
+typedef	signed char		__int8_t;
+typedef	unsigned char		__uint8_t;
+typedef	short			__int16_t;
+typedef	unsigned short		__uint16_t;
+typedef	int			__int32_t;
+typedef	unsigned int		__uint32_t;
+typedef	long			__int64_t;
+typedef	unsigned long		__uint64_t;
+
+/*
+ * Standard type definitions.
+ */
+typedef	__int32_t	__clock_t;		/* clock()... */
+typedef	__int64_t	__critical_t;
+typedef	double		__double_t;
+typedef	float		__float_t;
+typedef	__int64_t	__intfptr_t;
+typedef	__int64_t	__intmax_t;
+typedef	__int64_t	__intptr_t;
+typedef	__int32_t	__int_fast8_t;
+typedef	__int32_t	__int_fast16_t;
+typedef	__int32_t	__int_fast32_t;
+typedef	__int64_t	__int_fast64_t;
+typedef	__int8_t	__int_least8_t;
+typedef	__int16_t	__int_least16_t;
+typedef	__int32_t	__int_least32_t;
+typedef	__int64_t	__int_least64_t;
+typedef	__int64_t	__ptrdiff_t;		/* ptr1 - ptr2 */
+typedef	__int64_t	__register_t;
+typedef	__int64_t	__segsz_t;		/* segment size (in pages) */
+typedef	__uint64_t	__size_t;		/* sizeof() */
+typedef	__int64_t	__ssize_t;		/* byte count or error */
+typedef	__int64_t	__time_t;		/* time()... */
+typedef	__uint64_t	__uintfptr_t;
+typedef	__uint64_t	__uintmax_t;
+typedef	__uint64_t	__uintptr_t;
+typedef	__uint32_t	__uint_fast8_t;
+typedef	__uint32_t	__uint_fast16_t;
+typedef	__uint32_t	__uint_fast32_t;
+typedef	__uint64_t	__uint_fast64_t;
+typedef	__uint8_t	__uint_least8_t;
+typedef	__uint16_t	__uint_least16_t;
+typedef	__uint32_t	__uint_least32_t;
+typedef	__uint64_t	__uint_least64_t;
+typedef	__uint64_t	__u_register_t;
+typedef	__uint64_t	__vm_offset_t;
+typedef	__int64_t	__vm_ooffset_t;
+typedef	__uint64_t	__vm_paddr_t;
+typedef	__uint64_t	__vm_pindex_t;
+typedef	__uint64_t	__vm_size_t;
+typedef	int		___wchar_t;
+
+#define	__WCHAR_MIN	__INT_MIN	/* min value for a wchar_t */
+#define	__WCHAR_MAX	__INT_MAX	/* max value for a wchar_t */
+
+/*
+ * Unusual type definitions.
+ */
+#ifdef __GNUCLIKE_BUILTIN_VARARGS
+typedef __builtin_va_list	__va_list;	/* internally known to gcc */
+#if defined(__GNUC_VA_LIST_COMPATIBILITY) && !defined(__GNUC_VA_LIST) \
+    && !defined(__NO_GNUC_VA_LIST)
+#define	__GNUC_VA_LIST
+typedef	__va_list	__gnuc_va_list;		/* compat. with GNU headers */
+#endif
+#else
+#ifdef lint
+typedef char *			__va_list;	/* non-functional */
+#else
+#error Must add va_list support for this non-GCC compiler.   
+#endif /* lint */
+#endif /* __GNUCLIKE_BUILTIN_VARARGS */
+
+#endif /* !_MACHINE__TYPES_H_ */


Property changes on: trunk/sys/ia64/include/_types.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/acpica_machdep.h
===================================================================
--- trunk/sys/ia64/include/acpica_machdep.h	                        (rev 0)
+++ trunk/sys/ia64/include/acpica_machdep.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,78 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002 Mitsuru IWASAKI
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/acpica_machdep.h 254300 2013-08-13 21:34:03Z jkim $
+ */
+
+/******************************************************************************
+ *
+ * Name: acpica_machdep.h - arch-specific defines, etc.
+ *       $Revision$
+ *
+ *****************************************************************************/
+
+#ifndef __ACPICA_MACHDEP_H__
+#define	__ACPICA_MACHDEP_H__
+
+#ifdef _KERNEL
+
+/*
+ * Calling conventions:
+ *
+ * ACPI_SYSTEM_XFACE        - Interfaces to host OS (handlers, threads)
+ * ACPI_EXTERNAL_XFACE      - External ACPI interfaces 
+ * ACPI_INTERNAL_XFACE      - Internal ACPI interfaces
+ * ACPI_INTERNAL_VAR_XFACE  - Internal variable-parameter list interfaces
+ */
+#define	ACPI_SYSTEM_XFACE
+#define	ACPI_EXTERNAL_XFACE
+#define	ACPI_INTERNAL_XFACE
+#define	ACPI_INTERNAL_VAR_XFACE
+
+/* Asm macros */
+
+#define	ACPI_ASM_MACROS
+#define	BREAKPOINT3
+#define	ACPI_DISABLE_IRQS()	ia64_disable_intr()
+#define	ACPI_ENABLE_IRQS()	ia64_enable_intr()
+
+#define	ACPI_FLUSH_CPU_CACHE()	/* XXX ia64_fc()? */
+
+/* Section 5.2.10.1: global lock acquire/release functions */
+int	acpi_acquire_global_lock(volatile uint32_t *);
+int	acpi_release_global_lock(volatile uint32_t *);
+#define	ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq)	do {			\
+	(Acq) = acpi_acquire_global_lock(&((GLptr)->GlobalLock));	\
+} while (0)
+#define	ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq)	do {			\
+	(Acq) = acpi_release_global_lock(&((GLptr)->GlobalLock));	\
+} while (0)
+
+void	acpi_cpu_c1(void);
+
+#endif /* _KERNEL */
+
+#endif /* __ACPICA_MACHDEP_H__ */


Property changes on: trunk/sys/ia64/include/acpica_machdep.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/asm.h
===================================================================
--- trunk/sys/ia64/include/asm.h	                        (rev 0)
+++ trunk/sys/ia64/include/asm.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,193 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/asm.h 209618 2010-07-01 00:30:35Z marcel $ */
+/* From: NetBSD: asm.h,v 1.18 1997/11/03 04:22:06 ross Exp */
+
+/*-
+ * Copyright (c) 1991,1990,1989,1994,1995,1996 Carnegie Mellon University
+ * All Rights Reserved.
+ * 
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ * 
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ * 
+ * Carnegie Mellon requests users of this software to return to
+ * 
+ *  Software Distribution Coordinator  or  Software.Distribution at CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ * 
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ *	Assembly coding style
+ *
+ *	This file contains macros and register defines to
+ *	aid in writing more readable assembly code.
+ *	Some rules to make assembly code understandable by
+ *	a debugger are also noted.
+ */
+
+/*
+ * Macro to make a local label name.
+ */
+#define	LLABEL(name,num)	L ## name ## num
+
+/*
+ * MCOUNT
+ */
+#if defined(PROF) || (defined(_KERNEL) && defined(GPROF))
+#define	MCOUNT					\
+	alloc	out0 = ar.pfs, 8, 0, 4, 0;	\
+	mov	out1 = r1;			\
+	mov	out2 = b0;;			\
+	mov	out3 = r0;			\
+	br.call.sptk b0 = _mcount;;
+#else
+#define	MCOUNT	/* nothing */
+#endif
+
+/*
+ * ENTRY
+ *	Declare a global leaf function.
+ *	A leaf function does not call other functions.
+ */
+#define	ENTRY(_name_, _n_args_)			\
+	.global	_name_;				\
+	.align	32;				\
+	.proc	_name_;				\
+_name_:;					\
+	.regstk	_n_args_, 0, 0, 0;		\
+	MCOUNT
+
+#define	ENTRY_NOPROFILE(_name_, _n_args_)	\
+	.global	_name_;				\
+	.align	32;				\
+	.proc	_name_;				\
+_name_:;					\
+	.regstk	_n_args_, 0, 0, 0
+
+/*
+ * STATIC_ENTRY
+ *	Declare a local leaf function.
+ */
+#define STATIC_ENTRY(_name_, _n_args_)		\
+	.align	32;				\
+	.proc	_name_;				\
+_name_:;					\
+	.regstk	_n_args_, 0, 0, 0		\
+	MCOUNT
+/*
+ * XENTRY
+ *	Global alias for a leaf function, or alternate entry point
+ */
+#define	XENTRY(_name_)				\
+	.globl	_name_;				\
+_name_:
+
+/*
+ * STATIC_XENTRY
+ *	Local alias for a leaf function, or alternate entry point
+ */
+#define	STATIC_XENTRY(_name_)			\
+_name_:
+
+
+/*
+ * END
+ *	Function delimiter
+ */
+#define	END(_name_)						\
+	.endp	_name_
+
+
+/*
+ * EXPORT
+ *	Export a symbol
+ */
+#define	EXPORT(_name_)						\
+	.global	_name_;						\
+_name_:
+
+
+/*
+ * IMPORT
+ *	Make an external name visible, typecheck the size
+ */
+#define	IMPORT(_name_, _size_)					\
+	/* .extern	_name_,_size_ */
+
+
+/*
+ * ABS
+ *	Define an absolute symbol
+ */
+#define	ABS(_name_, _value_)					\
+	.globl	_name_;						\
+_name_	=	_value_
+
+
+/*
+ * BSS
+ *	Allocate un-initialized space for a global symbol
+ */
+#define	BSS(_name_,_numbytes_)					\
+	.comm	_name_,_numbytes_
+
+
+/*
+ * MSG
+ *	Allocate space for a message (a read-only ascii string)
+ */
+#define	ASCIZ	.asciz
+#define	MSG(msg,reg,label)			\
+	addl reg, at ltoff(label),gp;;		\
+	ld8 reg=[reg];;				\
+	.data;					\
+label:	ASCIZ msg;				\
+	.text;
+
+
+/*
+ * System call glue.
+ */
+#define	SYSCALLNUM(name)	SYS_ ## name
+
+#define	CALLSYS_NOERROR(name)					\
+	.prologue ;						\
+	.unwabi		@svr4, 'S' ;				\
+	.save		rp, r0 ;				\
+	.body ;							\
+{	.mmi ;							\
+	alloc		r9 = ar.pfs, 0, 0, 8, 0 ;		\
+	mov		r31 = ar.k5 ;				\
+	mov		r10 = b0 ;; }				\
+{	.mib ;							\
+	mov		r8 = SYSCALLNUM(name) ;			\
+	mov		b7 = r31 ; 				\
+	br.call.sptk	b0 = b7 ;; }
+
+
+/*
+ * WEAK_ALIAS: create a weak alias (ELF only).
+ */
+#define WEAK_ALIAS(alias,sym)					\
+	.weak alias;						\
+	alias = sym
+
+/*
+ * ID tag macros
+ */
+#if !defined(lint) && !defined(STRIP_FBSDID)
+#define __FBSDID(s)	.ident s
+#else
+#define __FBSDID(s)	/* nothing */
+#endif /* not lint and not STRIP_FBSDID */


Property changes on: trunk/sys/ia64/include/asm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/atomic.h
===================================================================
--- trunk/sys/ia64/include/atomic.h	                        (rev 0)
+++ trunk/sys/ia64/include/atomic.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,418 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/atomic.h 262004 2014-02-16 23:08:21Z marcel $
+ */
+
+#ifndef _MACHINE_ATOMIC_H_
+#define	_MACHINE_ATOMIC_H_
+
+#define	mb()	__asm __volatile("mf")
+#define	wmb()	mb()
+#define	rmb()	mb()
+
+/*
+ * Various simple arithmetic on memory which is atomic in the presence
+ * of interrupts and SMP safe.
+ */
+
+/*
+ * Everything is built out of cmpxchg.
+ */
+#define	IA64_CMPXCHG(sz, sem, p, cmpval, newval, ret)			\
+	__asm __volatile (						\
+		"mov ar.ccv=%2;;\n\t"					\
+		"cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t"		\
+		: "=r" (ret), "=m" (*p)					\
+		: "r" ((uint64_t)cmpval), "r" (newval), "m" (*p)	\
+		: "memory")
+
+/*
+ * Some common forms of cmpxch.
+ */
+static __inline uint32_t
+ia64_cmpxchg_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
+{
+	uint32_t ret;
+	IA64_CMPXCHG(4, acq, p, cmpval, newval, ret);
+	return (ret);
+}
+
+static __inline uint32_t
+ia64_cmpxchg_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
+{
+	uint32_t ret;
+	IA64_CMPXCHG(4, rel, p, cmpval, newval, ret);
+	return (ret);
+}
+
+static __inline uint64_t
+ia64_cmpxchg_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
+{
+	uint64_t ret;
+	IA64_CMPXCHG(8, acq, p, cmpval, newval, ret);
+	return (ret);
+}
+
+static __inline uint64_t
+ia64_cmpxchg_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
+{
+	uint64_t ret;
+	IA64_CMPXCHG(8, rel, p, cmpval, newval, ret);
+	return (ret);
+}
+
+#define	ATOMIC_STORE_LOAD(type, width, size)				\
+	static __inline uint##width##_t					\
+	ia64_ld_acq_##width(volatile uint##width##_t* p)		\
+	{								\
+		uint##width##_t v;					\
+		__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)	\
+		    : "m" (*p) : "memory");				\
+		return (v);						\
+	}								\
+									\
+	static __inline uint##width##_t					\
+	atomic_load_acq_##width(volatile uint##width##_t* p)		\
+	{								\
+		uint##width##_t v;					\
+		__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)	\
+		    : "m" (*p) : "memory");				\
+		return (v);						\
+	}								\
+									\
+	static __inline uint##width##_t					\
+	atomic_load_acq_##type(volatile uint##width##_t* p)		\
+	{								\
+		uint##width##_t v;					\
+		__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)	\
+		    : "m" (*p) : "memory");				\
+		return (v);						\
+	}								\
+								       	\
+	static __inline void						\
+	ia64_st_rel_##width(volatile uint##width##_t* p, uint##width##_t v) \
+	{								\
+		__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)	\
+		    : "r" (v) : "memory");				\
+	}								\
+									\
+	static __inline void						\
+	atomic_store_rel_##width(volatile uint##width##_t* p,		\
+	    uint##width##_t v)						\
+	{								\
+		__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)	\
+		    : "r" (v) : "memory");				\
+	}								\
+									\
+	static __inline void						\
+	atomic_store_rel_##type(volatile uint##width##_t* p,		\
+	    uint##width##_t v)						\
+	{								\
+		__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)	\
+		    : "r" (v) : "memory");				\
+	}
+
+ATOMIC_STORE_LOAD(char,	 8,  "1")
+ATOMIC_STORE_LOAD(short, 16, "2")
+ATOMIC_STORE_LOAD(int,	 32, "4")
+ATOMIC_STORE_LOAD(long,	 64, "8")
+
+#undef ATOMIC_STORE_LOAD
+
+#define	atomic_load_acq_ptr(p)		\
+    ((void *)atomic_load_acq_64((volatile uint64_t *)p))
+
+#define	atomic_store_rel_ptr(p, v)	\
+    atomic_store_rel_64((volatile uint64_t *)p, (uint64_t)v)
+
+#define	IA64_ATOMIC(sz, type, name, width, op)				\
+	static __inline type						\
+	atomic_##name##_acq_##width(volatile type *p, type v)		\
+	{								\
+		type old, ret;						\
+		do {							\
+			old = *p;					\
+			IA64_CMPXCHG(sz, acq, p, old, old op v, ret);	\
+		} while (ret != old);					\
+		return (old);						\
+	}								\
+									\
+	static __inline type						\
+	atomic_##name##_rel_##width(volatile type *p, type v)		\
+	{								\
+		type old, ret;						\
+		do {							\
+			old = *p;					\
+			IA64_CMPXCHG(sz, rel, p, old, old op v, ret);	\
+		} while (ret != old);					\
+		return (old);						\
+	}
+
+IA64_ATOMIC(1, uint8_t,	 set, 8,  |)
+IA64_ATOMIC(2, uint16_t, set, 16, |)
+IA64_ATOMIC(4, uint32_t, set, 32, |)
+IA64_ATOMIC(8, uint64_t, set, 64, |)
+
+IA64_ATOMIC(1, uint8_t,  clear,	8,  &~)
+IA64_ATOMIC(2, uint16_t, clear,	16, &~)
+IA64_ATOMIC(4, uint32_t, clear,	32, &~)
+IA64_ATOMIC(8, uint64_t, clear,	64, &~)
+
+IA64_ATOMIC(1, uint8_t,  add, 8,  +)
+IA64_ATOMIC(2, uint16_t, add, 16, +)
+IA64_ATOMIC(4, uint32_t, add, 32, +)
+IA64_ATOMIC(8, uint64_t, add, 64, +)
+
+IA64_ATOMIC(1, uint8_t,  subtract, 8,  -)
+IA64_ATOMIC(2, uint16_t, subtract, 16, -)
+IA64_ATOMIC(4, uint32_t, subtract, 32, -)
+IA64_ATOMIC(8, uint64_t, subtract, 64, -)
+
+#undef IA64_ATOMIC
+
+#define	atomic_set_8			atomic_set_acq_8
+#define	atomic_clear_8			atomic_clear_acq_8
+#define	atomic_add_8			atomic_add_acq_8
+#define	atomic_subtract_8		atomic_subtract_acq_8
+
+#define	atomic_set_16			atomic_set_acq_16
+#define	atomic_clear_16			atomic_clear_acq_16
+#define	atomic_add_16			atomic_add_acq_16
+#define	atomic_subtract_16		atomic_subtract_acq_16
+
+#define	atomic_set_32			atomic_set_acq_32
+#define	atomic_clear_32			atomic_clear_acq_32
+#define	atomic_add_32			atomic_add_acq_32
+#define	atomic_subtract_32		atomic_subtract_acq_32
+
+#define	atomic_set_64			atomic_set_acq_64
+#define	atomic_clear_64			atomic_clear_acq_64
+#define	atomic_add_64			atomic_add_acq_64
+#define	atomic_subtract_64		atomic_subtract_acq_64
+
+#define	atomic_set_char			atomic_set_8
+#define	atomic_clear_char		atomic_clear_8
+#define	atomic_add_char			atomic_add_8
+#define	atomic_subtract_char		atomic_subtract_8
+#define	atomic_set_acq_char		atomic_set_acq_8
+#define	atomic_clear_acq_char		atomic_clear_acq_8
+#define	atomic_add_acq_char		atomic_add_acq_8
+#define	atomic_subtract_acq_char	atomic_subtract_acq_8
+#define	atomic_set_rel_char		atomic_set_rel_8
+#define	atomic_clear_rel_char		atomic_clear_rel_8
+#define	atomic_add_rel_char		atomic_add_rel_8
+#define	atomic_subtract_rel_char	atomic_subtract_rel_8
+
+#define	atomic_set_short		atomic_set_16
+#define	atomic_clear_short		atomic_clear_16
+#define	atomic_add_short		atomic_add_16
+#define	atomic_subtract_short		atomic_subtract_16
+#define	atomic_set_acq_short		atomic_set_acq_16
+#define	atomic_clear_acq_short		atomic_clear_acq_16
+#define	atomic_add_acq_short		atomic_add_acq_16
+#define	atomic_subtract_acq_short	atomic_subtract_acq_16
+#define	atomic_set_rel_short		atomic_set_rel_16
+#define	atomic_clear_rel_short		atomic_clear_rel_16
+#define	atomic_add_rel_short		atomic_add_rel_16
+#define	atomic_subtract_rel_short	atomic_subtract_rel_16
+
+#define	atomic_set_int			atomic_set_32
+#define	atomic_clear_int		atomic_clear_32
+#define	atomic_add_int			atomic_add_32
+#define	atomic_subtract_int		atomic_subtract_32
+#define	atomic_set_acq_int		atomic_set_acq_32
+#define	atomic_clear_acq_int		atomic_clear_acq_32
+#define	atomic_add_acq_int		atomic_add_acq_32
+#define	atomic_subtract_acq_int		atomic_subtract_acq_32
+#define	atomic_set_rel_int		atomic_set_rel_32
+#define	atomic_clear_rel_int		atomic_clear_rel_32
+#define	atomic_add_rel_int		atomic_add_rel_32
+#define	atomic_subtract_rel_int		atomic_subtract_rel_32
+
+#define	atomic_set_long			atomic_set_64
+#define	atomic_clear_long		atomic_clear_64
+#define	atomic_add_long			atomic_add_64
+#define	atomic_subtract_long		atomic_subtract_64
+#define	atomic_set_acq_long		atomic_set_acq_64
+#define	atomic_clear_acq_long		atomic_clear_acq_64
+#define	atomic_add_acq_long		atomic_add_acq_64
+#define	atomic_subtract_acq_long	atomic_subtract_acq_64
+#define	atomic_set_rel_long		atomic_set_rel_64
+#define	atomic_clear_rel_long		atomic_clear_rel_64
+#define	atomic_add_rel_long		atomic_add_rel_64
+#define	atomic_subtract_rel_long	atomic_subtract_rel_64
+
+/* XXX Needs casting. */
+#define	atomic_set_ptr			atomic_set_64
+#define	atomic_clear_ptr		atomic_clear_64
+#define	atomic_add_ptr			atomic_add_64
+#define	atomic_subtract_ptr		atomic_subtract_64
+#define	atomic_set_acq_ptr		atomic_set_acq_64
+#define	atomic_clear_acq_ptr		atomic_clear_acq_64
+#define	atomic_add_acq_ptr		atomic_add_acq_64
+#define	atomic_subtract_acq_ptr		atomic_subtract_acq_64
+#define	atomic_set_rel_ptr		atomic_set_rel_64
+#define	atomic_clear_rel_ptr		atomic_clear_rel_64
+#define	atomic_add_rel_ptr		atomic_add_rel_64
+#define	atomic_subtract_rel_ptr		atomic_subtract_rel_64
+
+#undef IA64_CMPXCHG
+
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+static __inline int
+atomic_cmpset_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
+{
+	return (ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval);
+}
+
+static __inline int
+atomic_cmpset_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
+{
+	return (ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval);
+}
+
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+static __inline int
+atomic_cmpset_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
+{
+	return (ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval);
+}
+
+static __inline int
+atomic_cmpset_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
+{
+	return (ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval);
+}
+
+#define	atomic_cmpset_32		atomic_cmpset_acq_32
+#define	atomic_cmpset_64		atomic_cmpset_acq_64
+#define	atomic_cmpset_int		atomic_cmpset_32
+#define	atomic_cmpset_long		atomic_cmpset_64
+#define	atomic_cmpset_acq_int		atomic_cmpset_acq_32
+#define	atomic_cmpset_rel_int		atomic_cmpset_rel_32
+#define	atomic_cmpset_acq_long		atomic_cmpset_acq_64
+#define	atomic_cmpset_rel_long		atomic_cmpset_rel_64
+
+#define	atomic_cmpset_acq_ptr(p, o, n)	\
+    (atomic_cmpset_acq_64((volatile uint64_t *)p, (uint64_t)o, (uint64_t)n))
+
+#define	atomic_cmpset_ptr		atomic_cmpset_acq_ptr
+
+#define	atomic_cmpset_rel_ptr(p, o, n)	\
+    (atomic_cmpset_rel_64((volatile uint64_t *)p, (uint64_t)o, (uint64_t)n))
+
+static __inline uint32_t
+atomic_readandclear_32(volatile uint32_t* p)
+{
+	uint32_t val;
+	do {
+		val = *p;
+	} while (!atomic_cmpset_32(p, val, 0));
+	return (val);
+}
+
+static __inline uint64_t
+atomic_readandclear_64(volatile uint64_t* p)
+{
+	uint64_t val;
+	do {
+		val = *p;
+	} while (!atomic_cmpset_64(p, val, 0));
+	return (val);
+}
+
+#define	atomic_readandclear_int		atomic_readandclear_32
+#define	atomic_readandclear_long	atomic_readandclear_64
+#define	atomic_readandclear_ptr		atomic_readandclear_64
+
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ *
+ * XXX: Should we use the fetchadd instruction here?
+ */
+static __inline uint32_t
+atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
+{
+	uint32_t value;
+
+	do {
+		value = *p;
+	} while (!atomic_cmpset_32(p, value, value + v));
+	return (value);
+}
+
+#define	atomic_fetchadd_int		atomic_fetchadd_32
+
+static __inline u_long
+atomic_fetchadd_long(volatile u_long *p, u_long v)
+{
+	u_long value;
+
+	do {
+		value = *p;
+	} while (!atomic_cmpset_64(p, value, value + v));
+	return (value);
+}
+
+/*
+ * <type> atomic_swap_<type>(volatile <type> *p, <type> v);
+ */
+
+static __inline uint32_t
+atomic_swap_32(volatile uint32_t *p, uint32_t v)
+{
+	uint32_t r;
+
+	__asm __volatile ("xchg4 %0 = %3, %2;;" : "=r"(r), "=m"(*p) :
+	    "r"(v), "m"(*p) : "memory");
+	return (r);
+}
+
+static __inline uint64_t
+atomic_swap_64(volatile uint64_t *p, uint64_t v)
+{
+	uint64_t r;
+
+	__asm __volatile ("xchg8 %0 = %3, %2;;" : "=r"(r), "=m"(*p) :
+	    "r"(v), "m"(*p) : "memory");
+	return (r);
+}
+
+#define	atomic_swap_int		atomic_swap_32
+#define	atomic_swap_long	atomic_swap_64
+#define	atomic_swap_ptr		atomic_swap_64
+
+#endif /* ! _MACHINE_ATOMIC_H_ */


Property changes on: trunk/sys/ia64/include/atomic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/bootinfo.h
===================================================================
--- trunk/sys/ia64/include/bootinfo.h	                        (rev 0)
+++ trunk/sys/ia64/include/bootinfo.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,56 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/bootinfo.h 220313 2011-04-03 23:49:20Z marcel $ */
+/*-
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ * 
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ * 
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ * 
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution at CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+struct bootinfo {
+	uint64_t	bi_magic;		/* BOOTINFO_MAGIC */
+#define	BOOTINFO_MAGIC		0xdeadbeeffeedface
+	uint64_t	bi_version;		/* version 1 */
+	uint64_t	bi_spare[3];		/* was: name of booted kernel */
+	uint32_t	bi_itr_used;		/* Number of ITR and DTR ... */
+	uint32_t	bi_dtr_used;		/* ... entries used. */
+	uint32_t	bi_text_mapped;		/* Size of text mapped. */
+	uint32_t	bi_data_mapped;		/* Size of data mapped. */
+	uint64_t	bi_pbvm_pgtbl;		/* PA of PBVM page table. */
+	uint64_t	bi_hcdp;		/* DIG64 HCDP table */
+	uint64_t	bi_fpswa;		/* FPSWA interface */
+	uint64_t	bi_boothowto;		/* value for boothowto */
+	uint64_t	bi_systab;		/* pa of EFI system table */
+	uint64_t	bi_memmap;		/* pa of EFI memory map */
+	uint64_t	bi_memmap_size;		/* size of EFI memory map */
+	uint64_t	bi_memdesc_size;	/* sizeof EFI memory desc */
+	uint32_t	bi_memdesc_version;	/* EFI memory desc version */
+	uint32_t	bi_pbvm_pgtblsz;	/* PBVM page table size. */
+	uint64_t	bi_symtab;		/* start of kernel sym table */
+	uint64_t	bi_esymtab;		/* end of kernel sym table */
+	uint64_t	bi_kernend;		/* end of kernel space */
+	uint64_t	bi_envp;		/* environment */
+	uint64_t	bi_modulep;		/* preloaded modules */
+};
+
+extern struct bootinfo *bootinfo;


Property changes on: trunk/sys/ia64/include/bootinfo.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/bus.h
===================================================================
--- trunk/sys/ia64/include/bus.h	                        (rev 0)
+++ trunk/sys/ia64/include/bus.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,824 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2009 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*	$NetBSD: bus.h,v 1.12 1997/10/01 08:25:15 fvdl Exp $	*/
+
+/*-
+ * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1996 Charles M. Hannum.  All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *      This product includes software developed by Christopher G. Demetriou
+ *	for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $FreeBSD: stable/10/sys/ia64/include/bus.h 292348 2015-12-16 19:01:14Z ken $ */
+
+#ifndef _MACHINE_BUS_H_
+#define _MACHINE_BUS_H_
+
+#include <machine/_bus.h>
+#include <machine/cpufunc.h>
+
+/*
+ * I/O port reads with ia32 semantics.
+ */
+#define inb     bus_space_read_io_1
+#define inw     bus_space_read_io_2
+#define inl     bus_space_read_io_4
+
+#define outb    bus_space_write_io_1
+#define outw    bus_space_write_io_2
+#define outl    bus_space_write_io_4
+
+/*
+ * Values for the ia64 bus space tag, not to be used directly by MI code.
+ */
+#define	IA64_BUS_SPACE_IO	0	/* space is i/o space */
+#define IA64_BUS_SPACE_MEM	1	/* space is mem space */
+
+#define	BUS_SPACE_BARRIER_READ	0x01	/* force read barrier */
+#define	BUS_SPACE_BARRIER_WRITE	0x02	/* force write barrier */
+
+#define BUS_SPACE_MAXSIZE_24BIT	0xFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE	0xFFFFFFFFFFFFFFFF
+#define BUS_SPACE_MAXADDR_24BIT	0xFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXADDR	0xFFFFFFFFFFFFFFFF
+
+#define BUS_SPACE_UNRESTRICTED	(~0)
+
+#ifdef _KERNEL
+
+/*
+ * Map and unmap a region of device bus space into CPU virtual address space.
+ */
+int
+bus_space_map(bus_space_tag_t, bus_addr_t, bus_size_t, int,
+    bus_space_handle_t *);
+
+void
+bus_space_unmap(bus_space_tag_t, bus_space_handle_t, bus_size_t size);
+
+/*
+ * Get a new handle for a subregion of an already-mapped area of bus space.
+ */
+static __inline int
+bus_space_subregion(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, bus_size_t size __unused, bus_space_handle_t *nbshp)
+{
+	*nbshp = bsh + ofs;
+	return (0);
+}
+
+
+/*
+ * Allocate a region of memory that is accessible to devices in bus space.
+ */
+int
+bus_space_alloc(bus_space_tag_t bst, bus_addr_t rstart, bus_addr_t rend,
+    bus_size_t size, bus_size_t align, bus_size_t boundary, int flags,
+    bus_addr_t *addrp, bus_space_handle_t *bshp);
+
+
+/*
+ * Free a region of bus space accessible memory.
+ */
+void
+bus_space_free(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t size);
+
+
+/*
+ * Bus read/write barrier method.
+ */
+static __inline void
+bus_space_barrier(bus_space_tag_t bst __unused, bus_space_handle_t bsh __unused,
+    bus_size_t ofs __unused, bus_size_t size __unused, int flags __unused)
+{
+	ia64_mf_a();
+	ia64_mf();
+}
+
+
+/*
+ * Read 1 unit of data from bus space described by the tag, handle and ofs
+ * tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is returned.
+ */
+uint8_t  bus_space_read_io_1(u_long);
+uint16_t bus_space_read_io_2(u_long);
+uint32_t bus_space_read_io_4(u_long);
+uint64_t bus_space_read_io_8(u_long);
+
+static __inline uint8_t
+bus_space_read_1(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t ofs)
+{
+	uint8_t val;
+
+	val = (__predict_false(bst == IA64_BUS_SPACE_IO))
+	    ? bus_space_read_io_1(bsh + ofs)
+	    : ia64_ld1((void *)(bsh + ofs));
+	return (val);
+}
+
+static __inline uint16_t
+bus_space_read_2(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t ofs)
+{
+	uint16_t val;
+
+	val = (__predict_false(bst == IA64_BUS_SPACE_IO))
+	    ? bus_space_read_io_2(bsh + ofs)
+	    : ia64_ld2((void *)(bsh + ofs));
+	return (val);
+}
+
+static __inline uint32_t
+bus_space_read_4(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t ofs)
+{
+	uint32_t val;
+
+	val = (__predict_false(bst == IA64_BUS_SPACE_IO))
+	    ? bus_space_read_io_4(bsh + ofs)
+	    : ia64_ld4((void *)(bsh + ofs));
+	return (val);
+}
+
+static __inline uint64_t
+bus_space_read_8(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t ofs)
+{
+	uint64_t val;
+
+	val = (__predict_false(bst == IA64_BUS_SPACE_IO))
+	    ? bus_space_read_io_8(bsh + ofs)
+	    : ia64_ld8((void *)(bsh + ofs));
+	return (val);
+}
+
+
+/*
+ * Write 1 unit of data to bus space described by the tag, handle and ofs
+ * tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is passed by value.
+ */
+void bus_space_write_io_1(u_long, uint8_t);
+void bus_space_write_io_2(u_long, uint16_t);
+void bus_space_write_io_4(u_long, uint32_t);
+void bus_space_write_io_8(u_long, uint64_t);
+
+static __inline void
+bus_space_write_1(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t ofs,
+    uint8_t val)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_io_1(bsh + ofs, val);
+	else
+		ia64_st1((void *)(bsh + ofs), val);
+}
+
+static __inline void
+bus_space_write_2(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t ofs,
+    uint16_t val)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_io_2(bsh + ofs, val);
+	else
+		ia64_st2((void *)(bsh + ofs), val);
+}
+
+static __inline void
+bus_space_write_4(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t ofs,
+    uint32_t val)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_io_4(bsh + ofs, val);
+	else
+		ia64_st4((void *)(bsh + ofs), val);
+}
+
+static __inline void
+bus_space_write_8(bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t ofs,
+    uint64_t val)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_io_8(bsh + ofs, val);
+	else
+		ia64_st8((void *)(bsh + ofs), val);
+}
+
+
+/*
+ * Read count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is returned in the buffer passed by reference.
+ */
+void bus_space_read_multi_io_1(u_long, uint8_t *, size_t);
+void bus_space_read_multi_io_2(u_long, uint16_t *, size_t);
+void bus_space_read_multi_io_4(u_long, uint32_t *, size_t);
+void bus_space_read_multi_io_8(u_long, uint64_t *, size_t);
+
+static __inline void
+bus_space_read_multi_1(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint8_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_read_multi_io_1(bsh + ofs, bufp, count);
+	else {
+		while (count-- > 0)
+			*bufp++ = ia64_ld1((void *)(bsh + ofs));
+	}
+}
+
+static __inline void
+bus_space_read_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint16_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_read_multi_io_2(bsh + ofs, bufp, count);
+	else {
+		while (count-- > 0)
+			*bufp++ = ia64_ld2((void *)(bsh + ofs));
+	}
+}
+
+static __inline void
+bus_space_read_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint32_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_read_multi_io_4(bsh + ofs, bufp, count);
+	else {
+		while (count-- > 0)
+			*bufp++ = ia64_ld4((void *)(bsh + ofs));
+	}
+}
+
+static __inline void
+bus_space_read_multi_8(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint64_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_read_multi_io_8(bsh + ofs, bufp, count);
+	else {
+		while (count-- > 0)
+			*bufp++ = ia64_ld8((void *)(bsh + ofs));
+	}
+}
+
+
+/*
+ * Write count units of data to bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is read from the buffer passed by reference.
+ */
+void bus_space_write_multi_io_1(u_long, const uint8_t *, size_t);
+void bus_space_write_multi_io_2(u_long, const uint16_t *, size_t);
+void bus_space_write_multi_io_4(u_long, const uint32_t *, size_t);
+void bus_space_write_multi_io_8(u_long, const uint64_t *, size_t);
+
+static __inline void
+bus_space_write_multi_1(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, const uint8_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_multi_io_1(bsh + ofs, bufp, count);
+	else {
+		while (count-- > 0)
+			ia64_st1((void *)(bsh + ofs), *bufp++);
+	}
+}
+
+static __inline void
+bus_space_write_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, const uint16_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_multi_io_2(bsh + ofs, bufp, count);
+	else {
+		while (count-- > 0)
+			ia64_st2((void *)(bsh + ofs), *bufp++);
+	}
+}
+
+static __inline void
+bus_space_write_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, const uint32_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_multi_io_4(bsh + ofs, bufp, count);
+	else {
+		while (count-- > 0)
+			ia64_st4((void *)(bsh + ofs), *bufp++);
+	}
+}
+
+static __inline void
+bus_space_write_multi_8(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, const uint64_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_multi_io_8(bsh + ofs, bufp, count);
+	else {
+		while (count-- > 0)
+			ia64_st8((void *)(bsh + ofs), *bufp++);
+	}
+}
+
+
+/*
+ * Read count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is written to the buffer passed by reference and read from successive
+ * bus space addresses. Access is unordered.
+ */
+void bus_space_read_region_io_1(u_long, uint8_t *, size_t);
+void bus_space_read_region_io_2(u_long, uint16_t *, size_t);
+void bus_space_read_region_io_4(u_long, uint32_t *, size_t);
+void bus_space_read_region_io_8(u_long, uint64_t *, size_t);
+
+static __inline void
+bus_space_read_region_1(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint8_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_read_region_io_1(bsh + ofs, bufp, count);
+	else {
+		uint8_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			*bufp++ = ia64_ld1(bsp++);
+	}
+}
+
+static __inline void
+bus_space_read_region_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint16_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_read_region_io_2(bsh + ofs, bufp, count);
+	else {
+		uint16_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			*bufp++ = ia64_ld2(bsp++);
+	}
+}
+
+static __inline void
+bus_space_read_region_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint32_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_read_region_io_4(bsh + ofs, bufp, count);
+	else {
+		uint32_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			*bufp++ = ia64_ld4(bsp++);
+	}
+}
+
+static __inline void
+bus_space_read_region_8(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint64_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_read_region_io_8(bsh + ofs, bufp, count);
+	else {
+		uint64_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			*bufp++ = ia64_ld8(bsp++);
+	}
+}
+
+
+/*
+ * Write count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is read from the buffer passed by reference and written to successive
+ * bus space addresses. Access is unordered.
+ */
+void bus_space_write_region_io_1(u_long, const uint8_t *, size_t);
+void bus_space_write_region_io_2(u_long, const uint16_t *, size_t);
+void bus_space_write_region_io_4(u_long, const uint32_t *, size_t);
+void bus_space_write_region_io_8(u_long, const uint64_t *, size_t);
+
+static __inline void
+bus_space_write_region_1(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, const uint8_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_region_io_1(bsh + ofs, bufp, count);
+	else {
+		uint8_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			ia64_st1(bsp++, *bufp++);
+	}
+}
+
+static __inline void
+bus_space_write_region_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, const uint16_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_region_io_2(bsh + ofs, bufp, count);
+	else {
+		uint16_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			ia64_st2(bsp++, *bufp++);
+	}
+}
+
+static __inline void
+bus_space_write_region_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, const uint32_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_region_io_4(bsh + ofs, bufp, count);
+	else {
+		uint32_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			ia64_st4(bsp++, *bufp++);
+	}
+}
+
+static __inline void
+bus_space_write_region_8(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, const uint64_t *bufp, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_write_region_io_8(bsh + ofs, bufp, count);
+	else {
+		uint64_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			ia64_st8(bsp++, *bufp++);
+	}
+}
+
+
+/*
+ * Write count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is passed by value. Writes are unordered.
+ */
+static __inline void
+bus_space_set_multi_1(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint8_t val, size_t count)
+{
+
+	while (count-- > 0)
+		bus_space_write_1(bst, bsh, ofs, val);
+}
+
+static __inline void
+bus_space_set_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint16_t val, size_t count)
+{
+
+	while (count-- > 0)
+		bus_space_write_2(bst, bsh, ofs, val);
+}
+
+static __inline void
+bus_space_set_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint32_t val, size_t count)
+{
+
+	while (count-- > 0)
+		bus_space_write_4(bst, bsh, ofs, val);
+}
+
+static __inline void
+bus_space_set_multi_8(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint64_t val, size_t count)
+{
+
+	while (count-- > 0)
+		bus_space_write_8(bst, bsh, ofs, val);
+}
+
+
+/*
+ * Write count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is passed by value and written to successive bus space addresses.
+ * Writes are unordered.
+ */
+void bus_space_set_region_io_1(u_long, uint8_t, size_t);
+void bus_space_set_region_io_2(u_long, uint16_t, size_t);
+void bus_space_set_region_io_4(u_long, uint32_t, size_t);
+void bus_space_set_region_io_8(u_long, uint64_t, size_t);
+
+static __inline void
+bus_space_set_region_1(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint8_t val, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_set_region_io_1(bsh + ofs, val, count);
+	else {
+		uint8_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			ia64_st1(bsp++, val);
+	}
+}
+
+static __inline void
+bus_space_set_region_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint16_t val, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_set_region_io_2(bsh + ofs, val, count);
+	else {
+		uint16_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			ia64_st2(bsp++, val);
+	}
+}
+
+static __inline void
+bus_space_set_region_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint32_t val, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_set_region_io_4(bsh + ofs, val, count);
+	else {
+		uint32_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			ia64_st4(bsp++, val);
+	}
+}
+
+static __inline void
+bus_space_set_region_8(bus_space_tag_t bst, bus_space_handle_t bsh,
+    bus_size_t ofs, uint64_t val, size_t count)
+{
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO))
+		bus_space_set_region_io_4(bsh + ofs, val, count);
+	else {
+		uint64_t *bsp = (void *)(bsh + ofs);
+		while (count-- > 0)
+			ia64_st8(bsp++, val);
+	}
+}
+
+
+/*
+ * Copy count units of data from bus space described by the tag and the first
+ * handle and ofs pair to bus space described by the tag and the second handle
+ * and ofs pair. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes.
+ * The data is read from successive bus space addresses and also written to
+ * successive bus space addresses. Both reads and writes are unordered.
+ */
+void bus_space_copy_region_io_1(u_long, u_long, size_t);
+void bus_space_copy_region_io_2(u_long, u_long, size_t);
+void bus_space_copy_region_io_4(u_long, u_long, size_t);
+void bus_space_copy_region_io_8(u_long, u_long, size_t);
+
+static __inline void
+bus_space_copy_region_1(bus_space_tag_t bst, bus_space_handle_t sbsh,
+    bus_size_t sofs, bus_space_handle_t dbsh, bus_size_t dofs, size_t count)
+{
+	uint8_t *dst, *src;
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO)) {
+		bus_space_copy_region_io_1(sbsh + sofs, dbsh + dofs, count);
+		return;
+	}
+
+	src = (void *)(sbsh + sofs);
+	dst = (void *)(dbsh + dofs);
+	if (src < dst) {
+		src += count - 1;
+		dst += count - 1;
+		while (count-- > 0)
+			ia64_st1(dst--, ia64_ld1(src--));
+	} else {
+		while (count-- > 0)
+			ia64_st1(dst++, ia64_ld1(src++));
+	}
+}
+
+static __inline void
+bus_space_copy_region_2(bus_space_tag_t bst, bus_space_handle_t sbsh,
+    bus_size_t sofs, bus_space_handle_t dbsh, bus_size_t dofs, size_t count)
+{
+	uint16_t *dst, *src;
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO)) {
+		bus_space_copy_region_io_2(sbsh + sofs, dbsh + dofs, count);
+		return;
+	}
+
+	src = (void *)(sbsh + sofs);
+	dst = (void *)(dbsh + dofs);
+	if (src < dst) {
+		src += count - 1;
+		dst += count - 1;
+		while (count-- > 0)
+			ia64_st2(dst--, ia64_ld2(src--));
+	} else {
+		while (count-- > 0)
+			ia64_st2(dst++, ia64_ld2(src++));
+	}
+}
+
+static __inline void
+bus_space_copy_region_4(bus_space_tag_t bst, bus_space_handle_t sbsh,
+    bus_size_t sofs, bus_space_handle_t dbsh, bus_size_t dofs, size_t count)
+{
+	uint32_t *dst, *src;
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO)) {
+		bus_space_copy_region_io_4(sbsh + sofs, dbsh + dofs, count);
+		return;
+	}
+
+	src = (void *)(sbsh + sofs);
+	dst = (void *)(dbsh + dofs);
+	if (src < dst) {
+		src += count - 1;
+		dst += count - 1;
+		while (count-- > 0)
+			ia64_st4(dst--, ia64_ld4(src--));
+	} else {
+		while (count-- > 0)
+			ia64_st4(dst++, ia64_ld4(src++));
+	}
+}
+
+static __inline void
+bus_space_copy_region_8(bus_space_tag_t bst, bus_space_handle_t sbsh,
+    bus_size_t sofs, bus_space_handle_t dbsh, bus_size_t dofs, size_t count)
+{
+	uint64_t *dst, *src;
+
+	if (__predict_false(bst == IA64_BUS_SPACE_IO)) {
+		bus_space_copy_region_io_8(sbsh + sofs, dbsh + dofs, count);
+		return;
+	}
+
+	src = (void *)(sbsh + sofs);
+	dst = (void *)(dbsh + dofs);
+	if (src < dst) {
+		src += count - 1;
+		dst += count - 1;
+		while (count-- > 0)
+			ia64_st8(dst--, ia64_ld8(src--));
+	} else {
+		while (count-- > 0)
+			ia64_st8(dst++, ia64_ld8(src++));
+	}
+}
+
+
+/*
+ * Stream accesses are the same as normal accesses on ia64; there are no
+ * supported bus systems with an endianess different from the host one.
+ */
+
+#define	bus_space_read_stream_1		bus_space_read_1
+#define	bus_space_read_stream_2		bus_space_read_2
+#define	bus_space_read_stream_4		bus_space_read_4
+#define	bus_space_read_stream_8		bus_space_read_8
+
+#define	bus_space_write_stream_1	bus_space_write_1
+#define	bus_space_write_stream_2	bus_space_write_2
+#define	bus_space_write_stream_4	bus_space_write_4
+#define	bus_space_write_stream_8	bus_space_write_8
+
+#define	bus_space_read_multi_stream_1	bus_space_read_multi_1
+#define	bus_space_read_multi_stream_2	bus_space_read_multi_2
+#define	bus_space_read_multi_stream_4	bus_space_read_multi_4
+#define	bus_space_read_multi_stream_8	bus_space_read_multi_8
+
+#define	bus_space_write_multi_stream_1	bus_space_write_multi_1
+#define	bus_space_write_multi_stream_2	bus_space_write_multi_2
+#define	bus_space_write_multi_stream_4	bus_space_write_multi_4
+#define	bus_space_write_multi_stream_8	bus_space_write_multi_8
+
+#define	bus_space_read_region_stream_1	bus_space_read_region_1
+#define	bus_space_read_region_stream_2	bus_space_read_region_2
+#define	bus_space_read_region_stream_4	bus_space_read_region_4
+#define	bus_space_read_region_stream_8	bus_space_read_region_8
+
+#define	bus_space_write_region_stream_1	bus_space_write_region_1
+#define	bus_space_write_region_stream_2	bus_space_write_region_2
+#define	bus_space_write_region_stream_4	bus_space_write_region_4
+#define	bus_space_write_region_stream_8	bus_space_write_region_8
+
+#define	bus_space_set_multi_stream_1	bus_space_set_multi_1
+#define	bus_space_set_multi_stream_2	bus_space_set_multi_2
+#define	bus_space_set_multi_stream_4	bus_space_set_multi_4
+#define	bus_space_set_multi_stream_8	bus_space_set_multi_8
+
+#define	bus_space_set_region_stream_1	bus_space_set_region_1
+#define	bus_space_set_region_stream_2	bus_space_set_region_2
+#define	bus_space_set_region_stream_4	bus_space_set_region_4
+#define	bus_space_set_region_stream_8	bus_space_set_region_8
+
+#define	bus_space_copy_region_stream_1	bus_space_copy_region_1
+#define	bus_space_copy_region_stream_2	bus_space_copy_region_2
+#define	bus_space_copy_region_stream_4	bus_space_copy_region_4
+#define	bus_space_copy_region_stream_8	bus_space_copy_region_8
+
+#endif /* _KERNEL */
+
+#include <machine/bus_dma.h>
+
+#endif /* _MACHINE_BUS_H_ */


Property changes on: trunk/sys/ia64/include/bus.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/bus_dma.h
===================================================================
--- trunk/sys/ia64/include/bus_dma.h	                        (rev 0)
+++ trunk/sys/ia64/include/bus_dma.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,34 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2005 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/* $FreeBSD: stable/10/sys/ia64/include/bus_dma.h 143598 2005-03-14 16:46:28Z scottl $ */
+
+#ifndef _IA64_BUS_DMA_H_
+#define _IA64_BUS_DMA_H_
+
+#include <sys/bus_dma.h>
+
+#endif /* _IA64_BUS_DMA_H_ */


Property changes on: trunk/sys/ia64/include/bus_dma.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/clock.h
===================================================================
--- trunk/sys/ia64/include/clock.h	                        (rev 0)
+++ trunk/sys/ia64/include/clock.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,13 @@
+/* $MidnightBSD$ */
+/*-
+ * Kernel interface to machine-dependent clock driver.
+ * Garrett Wollman, September 1994.
+ * This file is in the public domain.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/clock.h 205234 2010-03-17 00:37:15Z marcel $
+ */
+
+#ifndef _MACHINE_CLOCK_H_
+#define	_MACHINE_CLOCK_H_
+
+#endif /* !_MACHINE_CLOCK_H_ */


Property changes on: trunk/sys/ia64/include/clock.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/counter.h
===================================================================
--- trunk/sys/ia64/include/counter.h	                        (rev 0)
+++ trunk/sys/ia64/include/counter.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,94 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2012 Konstantin Belousov <kib at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/counter.h 252434 2013-07-01 02:48:27Z kib $
+ */
+
+#ifndef __MACHINE_COUNTER_H__
+#define __MACHINE_COUNTER_H__
+
+#include <sys/pcpu.h>
+#ifdef INVARIANTS
+#include <sys/proc.h>
+#endif
+
+#define	counter_enter()	critical_enter()
+#define	counter_exit()	critical_exit()
+
+#ifdef IN_SUBR_COUNTER_C
+static inline uint64_t
+counter_u64_read_one(uint64_t *p, int cpu)
+{
+
+	return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu));
+}
+
+static inline uint64_t
+counter_u64_fetch_inline(uint64_t *p)
+{
+	uint64_t r;
+	int i;
+
+	r = 0;
+	for (i = 0; i < mp_ncpus; i++)
+		r += counter_u64_read_one((uint64_t *)p, i);
+
+	return (r);
+}
+
+/* XXXKIB might interrupt increment */
+static void
+counter_u64_zero_one_cpu(void *arg)
+{
+
+	*((uint64_t *)((char *)arg + sizeof(struct pcpu) *
+	    PCPU_GET(cpuid))) = 0;
+}
+
+static inline void
+counter_u64_zero_inline(counter_u64_t c)
+{
+
+	smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu,
+	    smp_no_rendevous_barrier, c);
+}
+#endif
+
+#define	counter_u64_add_protected(c, inc)	do {	\
+	CRITICAL_ASSERT(curthread);			\
+	*(uint64_t *)zpcpu_get(c) += (inc);		\
+} while (0)
+
+static inline void
+counter_u64_add(counter_u64_t c, int64_t inc)
+{
+
+	counter_enter();
+	counter_u64_add_protected(c, inc);
+	counter_exit();
+}
+
+#endif	/* ! __MACHINE_COUNTER_H__ */


Property changes on: trunk/sys/ia64/include/counter.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/cpu.h
===================================================================
--- trunk/sys/ia64/include/cpu.h	                        (rev 0)
+++ trunk/sys/ia64/include/cpu.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,76 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/cpu.h 253750 2013-07-28 18:44:17Z avg $ */
+/* From: NetBSD: cpu.h,v 1.18 1997/09/23 23:17:49 mjacob Exp */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1990, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: cpu.h 1.16 91/03/25$
+ *
+ *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
+ */
+
+#ifndef _MACHINE_CPU_H_
+#define _MACHINE_CPU_H_
+
+#include <machine/frame.h>
+
+#define	TRAPF_PC(tf)		((tf)->tf_special.iip)
+#define	TRAPF_CPL(tf)		((tf)->tf_special.psr & IA64_PSR_CPL)
+#define	TRAPF_USERMODE(tf)	(TRAPF_CPL(tf) != IA64_PSR_CPL_KERN)
+
+#ifdef _KERNEL
+
+#ifdef GPROF
+extern char btext[];
+extern char etext[];
+#endif
+
+/*
+ * Return contents of in-cpu fast counter as a sort of "bogo-time"
+ * for non-critical timing.
+ */
+#define	get_cyclecount		ia64_get_itc
+
+/* Used by signaling code. */
+#define	cpu_getstack(td)	((td)->td_frame->tf_special.sp)
+#define	cpu_spinwait()		/* nothing */
+
+void	cpu_halt(void);
+void	cpu_reset(void);
+void	fork_trampoline(void);				/* MAGIC */
+void	swi_vm(void *);
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_CPU_H_ */


Property changes on: trunk/sys/ia64/include/cpu.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/cpufunc.h
===================================================================
--- trunk/sys/ia64/include/cpufunc.h	                        (rev 0)
+++ trunk/sys/ia64/include/cpufunc.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,92 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/cpufunc.h 205713 2010-03-26 21:22:02Z marcel $
+ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define _MACHINE_CPUFUNC_H_
+
+#ifdef _KERNEL
+
+#include <sys/types.h>
+#include <machine/ia64_cpu.h>
+#include <machine/vmparam.h>
+
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+struct thread;
+
+#define	IA64_FIXED_BREAK	0x84B5D
+
+#ifdef __GNUCLIKE_ASM
+
+static __inline void
+breakpoint(void)
+{
+	__asm __volatile("break.m %0" :: "i"(IA64_FIXED_BREAK));
+}
+
+#define	HAVE_INLINE_FFS
+#define	ffs(x)	__builtin_ffs(x)
+
+
+static __inline void
+ia64_disable_intr(void)
+{
+	__asm __volatile ("rsm psr.i");
+}
+
+static __inline void
+ia64_enable_intr(void)
+{
+	__asm __volatile ("ssm psr.i;; srlz.d");
+}
+
+static __inline register_t
+intr_disable(void)
+{
+	register_t psr;
+
+	__asm __volatile ("mov %0=psr;;" : "=r"(psr));
+	ia64_disable_intr();
+	return ((psr & IA64_PSR_I) ? 1 : 0);
+}
+
+static __inline void
+intr_restore(register_t ie)
+{
+	if (ie)
+		ia64_enable_intr();
+}
+
+#endif /* __GNUCLIKE_ASM */
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_CPUFUNC_H_ */


Property changes on: trunk/sys/ia64/include/cpufunc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/db_machdep.h
===================================================================
--- trunk/sys/ia64/include/db_machdep.h	                        (rev 0)
+++ trunk/sys/ia64/include/db_machdep.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,81 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/db_machdep.h 147745 2005-07-02 23:52:37Z marcel $
+ */
+
+#ifndef	_MACHINE_DB_MACHDEP_H_
+#define	_MACHINE_DB_MACHDEP_H_
+
+#include <machine/ia64_cpu.h>
+
+/* We define some of our own commands. */
+#define	DB_MACHINE_COMMANDS
+
+/* We use Elf64 symbols in DDB. */
+#define	DB_ELFSIZE	64
+
+/* Pretty arbitrary. */
+#define	DB_SMALL_VALUE_MAX	0x7fffffff
+#define	DB_SMALL_VALUE_MIN	(-0x400001)
+
+typedef	vm_offset_t	db_addr_t;	/* address - unsigned */
+typedef	long		db_expr_t;	/* expression - signed */
+
+#define	PC_REGS()	((kdb_thrctx->pcb_special.__spare == 0) ?	\
+	kdb_thrctx->pcb_special.rp :					\
+	kdb_thrctx->pcb_special.iip + ((kdb_thrctx->pcb_special.psr>>41) & 3))
+
+#define BKPT_WRITE(addr, storage)	db_bkpt_write(addr, storage)
+#define BKPT_CLEAR(addr, storage)	db_bkpt_clear(addr, storage)
+#define BKPT_SKIP			db_bkpt_skip()
+#define BKPT_INST_TYPE			uint64_t
+
+void db_bkpt_write(db_addr_t, BKPT_INST_TYPE *storage);
+void db_bkpt_clear(db_addr_t, uint64_t *storage);
+void db_bkpt_skip(void);
+
+#define db_clear_single_step		kdb_cpu_clear_singlestep
+#define db_set_single_step		kdb_cpu_set_singlestep
+
+#define	IS_BREAKPOINT_TRAP(type, code)	(type == IA64_VEC_BREAK)
+#define	IS_WATCHPOINT_TRAP(type, code)	0
+
+#define	inst_trap_return(ins)	(ins & 0)
+#define	inst_return(ins)	(ins & 0)
+#define	inst_call(ins)		(ins & 0)
+#define	inst_branch(ins)	(ins & 0)
+#define	inst_load(ins)		(ins & 0)
+#define	inst_store(ins)		(ins & 0)
+#define	inst_unconditional_flow_transfer(ins) (ins & 0)
+
+#define	branch_taken(ins, pc, regs) pc
+
+/* Function call support. */
+#define	DB_MAXARGS	8	/* Only support arguments in registers. */
+#define	DB_CALL		db_fncall_ia64
+
+#endif	/* _MACHINE_DB_MACHDEP_H_ */


Property changes on: trunk/sys/ia64/include/db_machdep.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/dig64.h
===================================================================
--- trunk/sys/ia64/include/dig64.h	                        (rev 0)
+++ trunk/sys/ia64/include/dig64.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,91 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/dig64.h 139790 2005-01-06 22:18:23Z imp $
+ */
+
+#ifndef _MACHINE_DIG64_H_
+#define	_MACHINE_DIG64_H_
+
+struct dig64_gas {
+	uint8_t		addr_space;
+	uint8_t		bit_width;
+	uint8_t		bit_offset;
+	uint8_t		_reserved_;
+	/*
+	 * XXX using a 64-bit type for the address would cause padding and
+	 * using __packed would cause unaligned accesses...
+	 */
+	uint32_t	addr_low;
+	uint32_t	addr_high;
+};
+
+struct dig64_hcdp_entry {
+	uint8_t		type;
+#define	DIG64_HCDP_CONSOLE	0
+#define	DIG64_HCDP_DBGPORT	1
+	uint8_t		databits;
+	uint8_t		parity;
+	uint8_t		stopbits;
+	uint8_t		pci_segment;
+	uint8_t		pci_bus;
+	uint8_t		pci_device:5;
+	uint8_t		_reserved1_:3;
+	uint8_t		pci_function:3;
+	uint8_t		_reserved2_:3;
+	uint8_t		interrupt:1;
+	uint8_t		pci_flag:1;
+	/*
+	 * XXX using a 64-bit type for the baudrate would cause padding and
+	 * using __packed would cause unaligned accesses...
+	 */
+	uint32_t	baud_low;
+	uint32_t	baud_high;
+	struct dig64_gas address;
+	uint16_t	pci_devid;
+	uint16_t	pci_vendor;
+	uint32_t	irq;
+	uint32_t	pclock;
+	uint8_t		pci_interface;
+	uint8_t		_reserved3_[7];
+};
+
+struct dig64_hcdp_table {
+	char		signature[4];
+#define	HCDP_SIGNATURE	"HCDP"
+	uint32_t	length;
+	uint8_t		revision;
+	uint8_t		checksum;
+	char		oem_id[6];
+	char		oem_tbl_id[8];
+	uint32_t	oem_rev;
+	char		creator_id[4];
+	uint32_t	creator_rev;
+	uint32_t	entries;
+	struct dig64_hcdp_entry entry[1];
+};
+
+#endif


Property changes on: trunk/sys/ia64/include/dig64.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/elf.h
===================================================================
--- trunk/sys/ia64/include/elf.h	                        (rev 0)
+++ trunk/sys/ia64/include/elf.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,156 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1996-1997 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/elf.h 237430 2012-06-22 06:38:31Z kib $
+ */
+
+#ifndef _MACHINE_ELF_H_
+#define	_MACHINE_ELF_H_ 1
+
+/*
+ * ELF definitions for the IA-64 architecture.
+ */
+
+#ifndef __ELF_WORD_SIZE
+#define	__ELF_WORD_SIZE	64
+#endif
+
+#include <sys/elf64.h>	/* Definitions common to all 64 bit architectures. */
+#include <sys/elf32.h>	/* Definitions common to all 32 bit architectures. */
+
+#include <sys/elf_generic.h>
+
+#define	ELF_ARCH	EM_IA_64
+#define	ELF_ARCH32	EM_386
+
+#define	ELF_MACHINE_OK(x) ((x) == EM_IA_64)
+
+/*
+ * Auxiliary vector entries for passing information to the interpreter.
+ *
+ * The i386 supplement to the SVR4 ABI specification names this "auxv_t",
+ * but POSIX lays claim to all symbols ending with "_t".
+ */
+
+typedef struct {	/* Auxiliary vector entry on initial stack */
+	int	a_type;			/* Entry type. */
+	union {
+		int	a_val;		/* Integer value. */
+	} a_un;
+} Elf32_Auxinfo;
+
+typedef struct {	/* Auxiliary vector entry on initial stack */
+	int	a_type;			/* Entry type. */
+	union {
+		long	a_val;		/* Integer value. */
+		void	*a_ptr;		/* Address. */
+		void	(*a_fcn)(void);	/* Function pointer (not used). */
+	} a_un;
+} Elf64_Auxinfo;
+
+__ElfType(Auxinfo);
+
+/* Values for a_type. */
+#define	AT_NULL		0	/* Terminates the vector. */
+#define	AT_IGNORE	1	/* Ignored entry. */
+#define	AT_EXECFD	2	/* File descriptor of program to load. */
+#define	AT_PHDR		3	/* Program header of program already loaded. */
+#define	AT_PHENT	4	/* Size of each program header entry. */
+#define	AT_PHNUM	5	/* Number of program header entries. */
+#define	AT_PAGESZ	6	/* Page size in bytes. */
+#define	AT_BASE		7	/* Interpreter's base address. */
+#define	AT_FLAGS	8	/* Flags (unused for i386). */
+#define	AT_ENTRY	9	/* Where interpreter should transfer control. */
+#define	AT_NOTELF	10	/* Program is not ELF ?? */
+#define	AT_UID		11	/* Real uid. */
+#define	AT_EUID		12	/* Effective uid. */
+#define	AT_GID		13	/* Real gid. */
+#define	AT_EGID		14	/* Effective gid. */
+#define	AT_EXECPATH	15	/* Path to the executable. */
+#define	AT_CANARY	16	/* Canary for SSP */
+#define	AT_CANARYLEN	17	/* Length of the canary. */
+#define	AT_OSRELDATE	18	/* OSRELDATE. */
+#define	AT_NCPUS	19	/* Number of CPUs. */
+#define	AT_PAGESIZES	20	/* Pagesizes. */
+#define	AT_PAGESIZESLEN	21	/* Number of pagesizes. */
+#define	AT_TIMEKEEP	22	/* Pointer to timehands. */
+#define	AT_STACKPROT	23	/* Initial stack protection. */
+
+#define	AT_COUNT	24	/* Count of defined aux entry types. */
+
+/*
+ * Values for e_flags.
+ */
+#define	EF_IA_64_MASKOS		0x00ff000f
+#define	EF_IA_64_ABI64		0x00000010
+#define	EF_IA_64_REDUCEDFP	0x00000020
+#define	EF_IA_64_CONS_GP	0x00000040
+#define	EF_IA_64_NOFUNCDESC_CONS_GP 0x00000080
+#define	EF_IA_64_ABSOLUTE	0x00000100
+#define	EF_IA_64_ARCH		0xff000000
+
+/*
+ * Segment types.
+ */
+#define	PT_IA_64_ARCHEXT	0x70000000
+#define	PT_IA_64_UNWIND		0x70000001
+
+/*
+ * Segment attributes.
+ */
+#define	PF_IA_64_NORECOV	0x80000000
+
+/*
+ * Section types.
+ */
+#define	SHT_IA_64_EXT		0x70000000
+#define	SHT_IA_64_UNWIND	0x70000001
+#define	SHT_IA_64_LOPSREG	0x78000000
+#define	SHT_IA_64_HIPSREG	0x7fffffff
+
+/*
+ * Section attribute flags.
+ */
+#define	SHF_IA_64_SHORT		0x10000000
+#define	SHF_IA_64_NORECOV	0x20000000
+
+/* Define "machine" characteristics */
+#if __ELF_WORD_SIZE == 32
+#define	ELF_TARG_CLASS	ELFCLASS32
+#else
+#define	ELF_TARG_CLASS	ELFCLASS64
+#endif
+#define	ELF_TARG_DATA	ELFDATA2LSB
+#define	ELF_TARG_MACH	EM_IA_64
+#define	ELF_TARG_VER	1
+
+/* Processor specific dynmamic section tags. */
+
+#define	DT_IA_64_PLT_RESERVE	0x70000000
+
+#define	ET_DYN_LOAD_ADDR 0x2500000000000000
+
+#endif /* !_MACHINE_ELF_H_ */


Property changes on: trunk/sys/ia64/include/elf.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/endian.h
===================================================================
--- trunk/sys/ia64/include/endian.h	                        (rev 0)
+++ trunk/sys/ia64/include/endian.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,110 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1987, 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)endian.h	8.1 (Berkeley) 6/10/93
+ *	$NetBSD: endian.h,v 1.5 1997/10/09 15:42:19 bouyer Exp $
+ * $FreeBSD: stable/10/sys/ia64/include/endian.h 143063 2005-03-02 21:33:29Z joerg $
+ */
+
+#ifndef _MACHINE_ENDIAN_H_
+#define	_MACHINE_ENDIAN_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+/*
+ * Define the order of 32-bit words in 64-bit words.
+ */
+#define	_QUAD_HIGHWORD 1
+#define	_QUAD_LOWWORD 0
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define	_LITTLE_ENDIAN	1234	/* LSB first: i386, vax */
+#define	_BIG_ENDIAN	4321	/* MSB first: 68000, ibm, net */
+#define	_PDP_ENDIAN	3412	/* LSB first in word, MSW first in long */
+
+#define	_BYTE_ORDER	_LITTLE_ENDIAN
+
+/*
+ * Deprecated variants that don't have enough underscores to be useful in more
+ * strict namespaces.
+ */
+#if __BSD_VISIBLE
+#define	LITTLE_ENDIAN	_LITTLE_ENDIAN
+#define	BIG_ENDIAN	_BIG_ENDIAN
+#define	PDP_ENDIAN	_PDP_ENDIAN
+#define	BYTE_ORDER	_BYTE_ORDER
+#endif
+
+#if defined(__CC_SUPPORTS___INLINE) && defined(__GNUCLIKE_ASM)
+
+static __inline __uint64_t
+__bswap64(__uint64_t _x)
+{
+	__uint64_t __r;
+
+	__asm __volatile("mux1 %0=%1, at rev"
+			 : "=r" (__r) : "r"(_x));
+	return __r;
+}
+
+static __inline __uint32_t
+__bswap32(__uint32_t _x)
+{
+
+	return (__bswap64(_x) >> 32);
+}
+
+static __inline __uint16_t
+__bswap16(__uint16_t _x)
+{
+
+	return (__bswap64(_x) >> 48);
+}
+
+#define	__htonl(x)	__bswap32(x)
+#define	__htons(x)	__bswap16(x)
+#define	__ntohl(x)	__bswap32(x)
+#define	__ntohs(x)	__bswap16(x)
+
+#else /* !(__CC_SUPPORTS___INLINE && __GNUCLIKE_ASM) */
+
+/*
+ * No optimizations are available for this compiler.  Fall back to
+ * non-optimized functions by defining the constant usually used to prevent
+ * redefinition.
+ */
+#define	_BYTEORDER_FUNC_DEFINED
+
+#endif /* __CC_SUPPORTS___INLINE && __GNUCLIKE_ASM */
+
+#endif /* !_MACHINE_ENDIAN_H_ */


Property changes on: trunk/sys/ia64/include/endian.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/exec.h
===================================================================
--- trunk/sys/ia64/include/exec.h	                        (rev 0)
+++ trunk/sys/ia64/include/exec.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,38 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 John Birrell <jb at cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/exec.h 165967 2007-01-12 07:26:21Z imp $
+ */
+
+#ifndef	_MACHINE_EXEC_H_
+#define	_MACHINE_EXEC_H_
+
+#define	__LDPGSZ	4096
+
+#endif /* !_MACHINE_EXEC_H_ */


Property changes on: trunk/sys/ia64/include/exec.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/float.h
===================================================================
--- trunk/sys/ia64/include/float.h	                        (rev 0)
+++ trunk/sys/ia64/include/float.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,95 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/float.h 230475 2012-01-23 06:36:41Z das $ */
+/* From: NetBSD: float.h,v 1.6 1997/07/17 21:36:03 thorpej Exp */
+
+/*-
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int __flt_rounds(void);
+__END_DECLS
+
+#define FLT_RADIX	2		/* b */
+#define FLT_ROUNDS	__flt_rounds()
+#if __ISO_C_VISIBLE >= 1999
+#define	FLT_EVAL_METHOD	0		/* no promotions */
+#define	DECIMAL_DIG	21		/* max precision in decimal digits */
+#endif
+
+#define FLT_MANT_DIG	24		/* p */
+#define FLT_EPSILON	1.19209290E-07F	/* b**(1-p) */
+#define FLT_DIG		6		/* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP	(-125)		/* emin */
+#define FLT_MIN		1.17549435E-38F	/* b**(emin-1) */
+#define FLT_MIN_10_EXP	(-37)		/* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP	128		/* emax */
+#define FLT_MAX		3.40282347E+38F	/* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP	38		/* floor(log10((1-b**(-p))*b**emax)) */
+#if __ISO_C_VISIBLE >= 2011
+#define	FLT_TRUE_MIN	1.40129846E-45F	/* b**(emin-p) */
+#define	FLT_DECIMAL_DIG	9		/* ceil(1+p*log10(b)) */
+#define	FLT_HAS_SUBNORM	1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define DBL_MANT_DIG	53
+#define DBL_EPSILON	2.2204460492503131E-16
+#define DBL_DIG		15
+#define DBL_MIN_EXP	(-1021)
+#define DBL_MIN		2.2250738585072014E-308
+#define DBL_MIN_10_EXP	(-307)
+#define DBL_MAX_EXP	1024
+#define DBL_MAX		1.7976931348623157E+308
+#define DBL_MAX_10_EXP	308
+#if __ISO_C_VISIBLE >= 2011
+#define	DBL_TRUE_MIN	4.9406564584124654E-324
+#define	DBL_DECIMAL_DIG	17
+#define	DBL_HAS_SUBNORM	1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define LDBL_MANT_DIG	64
+#define LDBL_EPSILON	1.0842021724855044340E-19L
+#define LDBL_DIG	18
+#define LDBL_MIN_EXP	(-16381)
+#define LDBL_MIN	3.3621031431120935063E-4932L
+#define LDBL_MIN_10_EXP	(-4931)
+#define LDBL_MAX_EXP	16384
+#define LDBL_MAX	1.1897314953572317650E+4932L
+#define LDBL_MAX_10_EXP	4932
+#if __ISO_C_VISIBLE >= 2011
+#define	LDBL_TRUE_MIN	3.6451995318824746025E-4951L
+#define	LDBL_DECIMAL_DIG 21
+#define	LDBL_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#endif	/* _MACHINE_FLOAT_H_ */


Property changes on: trunk/sys/ia64/include/float.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/floatingpoint.h
===================================================================
--- trunk/sys/ia64/include/floatingpoint.h	                        (rev 0)
+++ trunk/sys/ia64/include/floatingpoint.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,33 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 John Birrell <jb at cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/floatingpoint.h 165967 2007-01-12 07:26:21Z imp $
+ */
+
+#include <machine/ieeefp.h>


Property changes on: trunk/sys/ia64/include/floatingpoint.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/fpu.h
===================================================================
--- trunk/sys/ia64/include/fpu.h	                        (rev 0)
+++ trunk/sys/ia64/include/fpu.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,110 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/fpu.h 138674 2004-12-11 06:15:12Z marcel $
+ */
+
+#ifndef _MACHINE_FPU_H_
+#define _MACHINE_FPU_H_
+
+/*
+ * Floating point status register bits.
+ */
+
+#define IA64_FPSR_TRAP_VD	0x0000000000000001L
+#define IA64_FPSR_TRAP_DD	0x0000000000000002L
+#define IA64_FPSR_TRAP_ZD	0x0000000000000004L
+#define IA64_FPSR_TRAP_OD	0x0000000000000008L
+#define IA64_FPSR_TRAP_UD	0x0000000000000010L
+#define IA64_FPSR_TRAP_ID	0x0000000000000020L
+#define IA64_FPSR_SF(i,v)	((v) << ((i)*13+6))
+
+#define IA64_SF_FTZ		0x0001L
+#define IA64_SF_WRE		0x0002L
+#define IA64_SF_PC		0x000cL
+#define IA64_SF_PC_0		0x0000L
+#define IA64_SF_PC_1		0x0004L
+#define IA64_SF_PC_2		0x0008L
+#define IA64_SF_PC_3		0x000cL
+#define IA64_SF_RC		0x0030L
+#define IA64_SF_RC_NEAREST	0x0000L
+#define IA64_SF_RC_NEGINF	0x0010L
+#define IA64_SF_RC_POSINF	0x0020L
+#define IA64_SF_RC_TRUNC	0x0030L
+#define IA64_SF_TD		0x0040L
+#define IA64_SF_V		0x0080L
+#define IA64_SF_D		0x0100L
+#define IA64_SF_Z		0x0200L
+#define IA64_SF_O		0x0400L
+#define IA64_SF_U		0x0800L
+#define IA64_SF_I		0x1000L
+
+#define IA64_SF_DEFAULT		(IA64_SF_PC_3 | IA64_SF_RC_NEAREST)
+
+#define IA64_FPSR_DEFAULT	(IA64_FPSR_TRAP_VD			\
+				 | IA64_FPSR_TRAP_DD			\
+				 | IA64_FPSR_TRAP_ZD			\
+				 | IA64_FPSR_TRAP_OD			\
+				 | IA64_FPSR_TRAP_UD			\
+				 | IA64_FPSR_TRAP_ID			\
+				 | IA64_FPSR_SF(0, IA64_SF_DEFAULT)	\
+				 | IA64_FPSR_SF(1, (IA64_SF_DEFAULT	\
+						    | IA64_SF_TD	\
+						    | IA64_SF_WRE))	\
+				 | IA64_FPSR_SF(2, (IA64_SF_DEFAULT	\
+						    | IA64_SF_TD))	\
+				 | IA64_FPSR_SF(3, (IA64_SF_DEFAULT	\
+						    | IA64_SF_TD)))
+
+struct fpswa_ret {
+	unsigned long	status;
+	unsigned long	err1;
+	unsigned long	err2;
+	unsigned long	err3;
+};
+
+struct fpswa_bundle {
+	long double	bits;		/* Force 16-byte alignment. */
+};
+
+struct fpswa_fpctx {
+	unsigned long	mask_low;			/* f63 - f2 */
+	unsigned long	mask_high;			/* f127 - f64 */
+	union _ia64_fpreg *fp_low_preserved;		/* f2 - f5 */
+	union _ia64_fpreg *fp_low_volatile;		/* f6 - f15 */
+	union _ia64_fpreg *fp_high_preserved;		/* f16 - f31 */
+	union _ia64_fpreg *fp_high_volatile;		/* f32 - f127 */
+};
+
+struct fpswa_iface {
+	unsigned int	if_rev;
+	unsigned int	__res;
+	struct fpswa_ret (*if_fpswa)(unsigned long, struct fpswa_bundle *,
+	    unsigned long *, unsigned long *, unsigned long *, unsigned long *,
+	    unsigned long *, struct fpswa_fpctx *);
+};
+
+#endif /* ! _MACHINE_FPU_H_ */


Property changes on: trunk/sys/ia64/include/fpu.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/frame.h
===================================================================
--- trunk/sys/ia64/include/frame.h	                        (rev 0)
+++ trunk/sys/ia64/include/frame.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,49 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	$FreeBSD: stable/10/sys/ia64/include/frame.h 205428 2010-03-21 22:33:09Z marcel $
+ */
+
+#ifndef _MACHINE_FRAME_H_
+#define	_MACHINE_FRAME_H_
+
+#ifndef _MACHINE_REGSET_H_
+#include <machine/_regset.h>
+#endif
+
+/*
+ * Software trap, exception, and syscall frame.
+ */
+struct trapframe {
+	uint64_t		tf_length;
+	uint64_t		tf_flags;
+#define FRAME_SYSCALL		1	/* syscalls use a partial trapframe */
+	struct _special		tf_special;
+	struct _caller_saved	tf_scratch;
+	struct _caller_saved_fp	tf_scratch_fp;
+};
+
+#endif /* _MACHINE_FRAME_H_ */


Property changes on: trunk/sys/ia64/include/frame.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/gdb_machdep.h
===================================================================
--- trunk/sys/ia64/include/gdb_machdep.h	                        (rev 0)
+++ trunk/sys/ia64/include/gdb_machdep.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,49 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/gdb_machdep.h 139790 2005-01-06 22:18:23Z imp $
+ */
+
+#ifndef _MACHINE_GDB_MACHDEP_H_
+#define	_MACHINE_GDB_MACHDEP_H_
+
+#define GDB_NREGS	462
+#define	GDB_REG_PC	331
+
+#define	GDB_BUFSZ	(GDB_NREGS*16+128*16)
+
+static __inline size_t
+gdb_cpu_regsz(int regnum)
+{
+	return ((regnum >= 128 && regnum < 256) ? 16 : 8);
+}
+
+void *gdb_cpu_getreg(int, size_t *);
+void gdb_cpu_setreg(int, void *);
+int gdb_cpu_signal(int, int);
+int gdb_cpu_query(void);
+
+#endif /* !_MACHINE_GDB_MACHDEP_H_ */


Property changes on: trunk/sys/ia64/include/gdb_machdep.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/ia64_cpu.h
===================================================================
--- trunk/sys/ia64/include/ia64_cpu.h	                        (rev 0)
+++ trunk/sys/ia64/include/ia64_cpu.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,533 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2007 Marcel Moolenaar
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/ia64_cpu.h 223170 2011-06-17 04:26:03Z marcel $
+ */
+
+#ifndef _MACHINE_IA64_CPU_H_
+#define _MACHINE_IA64_CPU_H_
+
+/*
+ * Local Interrupt ID.
+ */
+#define	IA64_LID_GET_SAPIC_ID(x)	((u_int)((x) >> 16) & 0xffff)
+#define	IA64_LID_SET_SAPIC_ID(x)	((u_int)((x) & 0xffff) << 16)
+
+/*
+ * Definition of DCR bits.
+ */
+#define	IA64_DCR_PP		0x0000000000000001
+#define	IA64_DCR_BE		0x0000000000000002
+#define	IA64_DCR_LC		0x0000000000000004
+#define	IA64_DCR_DM		0x0000000000000100
+#define	IA64_DCR_DP		0x0000000000000200
+#define	IA64_DCR_DK		0x0000000000000400
+#define	IA64_DCR_DX		0x0000000000000800
+#define	IA64_DCR_DR		0x0000000000001000
+#define	IA64_DCR_DA		0x0000000000002000
+#define	IA64_DCR_DD		0x0000000000004000
+
+#define	IA64_DCR_DEFAULT					\
+    (IA64_DCR_DM | IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX |	\
+     IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD)
+
+/*
+ * Definition of PSR and IPSR bits.
+ */
+#define IA64_PSR_BE		0x0000000000000002
+#define IA64_PSR_UP		0x0000000000000004
+#define IA64_PSR_AC		0x0000000000000008
+#define IA64_PSR_MFL		0x0000000000000010
+#define IA64_PSR_MFH		0x0000000000000020
+#define IA64_PSR_IC		0x0000000000002000
+#define IA64_PSR_I		0x0000000000004000
+#define IA64_PSR_PK		0x0000000000008000
+#define IA64_PSR_DT		0x0000000000020000
+#define IA64_PSR_DFL		0x0000000000040000
+#define IA64_PSR_DFH		0x0000000000080000
+#define IA64_PSR_SP		0x0000000000100000
+#define IA64_PSR_PP		0x0000000000200000
+#define IA64_PSR_DI		0x0000000000400000
+#define IA64_PSR_SI		0x0000000000800000
+#define IA64_PSR_DB		0x0000000001000000
+#define IA64_PSR_LP		0x0000000002000000
+#define IA64_PSR_TB		0x0000000004000000
+#define IA64_PSR_RT		0x0000000008000000
+#define IA64_PSR_CPL		0x0000000300000000
+#define IA64_PSR_CPL_KERN	0x0000000000000000
+#define IA64_PSR_CPL_1		0x0000000100000000
+#define IA64_PSR_CPL_2		0x0000000200000000
+#define IA64_PSR_CPL_USER	0x0000000300000000
+#define IA64_PSR_IS		0x0000000400000000
+#define IA64_PSR_MC		0x0000000800000000
+#define IA64_PSR_IT		0x0000001000000000
+#define IA64_PSR_ID		0x0000002000000000
+#define IA64_PSR_DA		0x0000004000000000
+#define IA64_PSR_DD		0x0000008000000000
+#define IA64_PSR_SS		0x0000010000000000
+#define IA64_PSR_RI		0x0000060000000000
+#define IA64_PSR_RI_0		0x0000000000000000
+#define IA64_PSR_RI_1		0x0000020000000000
+#define IA64_PSR_RI_2		0x0000040000000000
+#define IA64_PSR_ED		0x0000080000000000
+#define IA64_PSR_BN		0x0000100000000000
+#define IA64_PSR_IA		0x0000200000000000
+
+/*
+ * Definition of ISR bits.
+ */
+#define IA64_ISR_CODE		0x000000000000ffff
+#define IA64_ISR_VECTOR		0x0000000000ff0000
+#define IA64_ISR_X		0x0000000100000000
+#define IA64_ISR_W		0x0000000200000000
+#define IA64_ISR_R		0x0000000400000000
+#define IA64_ISR_NA		0x0000000800000000
+#define IA64_ISR_SP		0x0000001000000000
+#define IA64_ISR_RS		0x0000002000000000
+#define IA64_ISR_IR		0x0000004000000000
+#define IA64_ISR_NI		0x0000008000000000
+#define IA64_ISR_SO		0x0000010000000000
+#define IA64_ISR_EI		0x0000060000000000
+#define IA64_ISR_EI_0		0x0000000000000000
+#define IA64_ISR_EI_1		0x0000020000000000
+#define IA64_ISR_EI_2		0x0000040000000000
+#define IA64_ISR_ED		0x0000080000000000
+
+/*
+ * Vector numbers for various ia64 interrupts.
+ */
+#define IA64_VEC_VHPT			0
+#define IA64_VEC_ITLB			1
+#define IA64_VEC_DTLB			2
+#define IA64_VEC_ALT_ITLB		3
+#define IA64_VEC_ALT_DTLB		4
+#define IA64_VEC_NESTED_DTLB		5
+#define IA64_VEC_IKEY_MISS		6
+#define IA64_VEC_DKEY_MISS		7
+#define IA64_VEC_DIRTY_BIT		8
+#define IA64_VEC_INST_ACCESS		9
+#define IA64_VEC_DATA_ACCESS		10
+#define IA64_VEC_BREAK			11
+#define IA64_VEC_EXT_INTR		12
+#define IA64_VEC_PAGE_NOT_PRESENT	20
+#define IA64_VEC_KEY_PERMISSION		21
+#define IA64_VEC_INST_ACCESS_RIGHTS	22
+#define IA64_VEC_DATA_ACCESS_RIGHTS	23
+#define IA64_VEC_GENERAL_EXCEPTION	24
+#define IA64_VEC_DISABLED_FP		25
+#define IA64_VEC_NAT_CONSUMPTION	26
+#define IA64_VEC_SPECULATION		27
+#define IA64_VEC_DEBUG			29
+#define IA64_VEC_UNALIGNED_REFERENCE	30
+#define IA64_VEC_UNSUPP_DATA_REFERENCE	31
+#define IA64_VEC_FLOATING_POINT_FAULT	32
+#define IA64_VEC_FLOATING_POINT_TRAP	33
+#define IA64_VEC_LOWER_PRIVILEGE_TRANSFER 34
+#define IA64_VEC_TAKEN_BRANCH_TRAP	35
+#define IA64_VEC_SINGLE_STEP_TRAP	36
+#define IA64_VEC_IA32_EXCEPTION		45
+#define IA64_VEC_IA32_INTERCEPT		46
+#define IA64_VEC_IA32_INTERRUPT		47
+
+/*
+ * IA-32 exceptions.
+ */
+#define IA32_EXCEPTION_DIVIDE		0
+#define IA32_EXCEPTION_DEBUG		1
+#define IA32_EXCEPTION_BREAK		3
+#define IA32_EXCEPTION_OVERFLOW		4
+#define IA32_EXCEPTION_BOUND		5
+#define IA32_EXCEPTION_DNA		7
+#define IA32_EXCEPTION_NOT_PRESENT	11
+#define IA32_EXCEPTION_STACK_FAULT	12
+#define IA32_EXCEPTION_GPFAULT		13
+#define IA32_EXCEPTION_FPERROR		16
+#define IA32_EXCEPTION_ALIGNMENT_CHECK	17
+#define IA32_EXCEPTION_STREAMING_SIMD	19
+
+#define IA32_INTERCEPT_INSTRUCTION	0
+#define IA32_INTERCEPT_GATE		1
+#define IA32_INTERCEPT_SYSTEM_FLAG	2
+#define IA32_INTERCEPT_LOCK		4
+
+#ifndef LOCORE
+
+/*
+ * Various special ia64 instructions.
+ */
+
+/*
+ * Memory Fence.
+ */
+static __inline void
+ia64_mf(void)
+{
+	__asm __volatile("mf");
+}
+
+static __inline void
+ia64_mf_a(void)
+{
+	__asm __volatile("mf.a");
+}
+
+/*
+ * Flush Cache.
+ */
+static __inline void
+ia64_fc(uint64_t va)
+{
+	__asm __volatile("fc %0" :: "r"(va));
+}
+
+static __inline void
+ia64_fc_i(uint64_t va)
+{
+	__asm __volatile("fc.i %0" :: "r"(va));
+}
+
+/*
+ * Sync instruction stream.
+ */
+static __inline void
+ia64_sync_i(void)
+{
+	__asm __volatile("sync.i");
+}
+
+/*
+ * Calculate address in VHPT for va.
+ */
+static __inline uint64_t
+ia64_thash(uint64_t va)
+{
+	uint64_t result;
+	__asm __volatile("thash %0=%1" : "=r" (result) : "r" (va));
+	return result;
+}
+
+/*
+ * Calculate VHPT tag for va.
+ */
+static __inline uint64_t
+ia64_ttag(uint64_t va)
+{
+	uint64_t result;
+	__asm __volatile("ttag %0=%1" : "=r" (result) : "r" (va));
+	return result;
+}
+
+/*
+ * Convert virtual address to physical.
+ */
+static __inline uint64_t
+ia64_tpa(uint64_t va)
+{
+	uint64_t result;
+	__asm __volatile("tpa %0=%1" : "=r" (result) : "r" (va));
+	return result;
+}
+
+/*
+ * Generate a ptc.e instruction.
+ */
+static __inline void
+ia64_ptc_e(uint64_t v)
+{
+	__asm __volatile("ptc.e %0;; srlz.i;;" :: "r"(v));
+}
+
+/*
+ * Generate a ptc.g instruction.
+ */
+static __inline void
+ia64_ptc_g(uint64_t va, uint64_t log2size)
+{
+	__asm __volatile("ptc.g %0,%1;;" :: "r"(va), "r"(log2size));
+}
+
+/*
+ * Generate a ptc.ga instruction.
+ */
+static __inline void
+ia64_ptc_ga(uint64_t va, uint64_t log2size)
+{
+	__asm __volatile("ptc.ga %0,%1;;" :: "r"(va), "r"(log2size));
+}
+
+/*
+ * Generate a ptc.l instruction.
+ */
+static __inline void
+ia64_ptc_l(uint64_t va, uint64_t log2size)
+{
+	__asm __volatile("ptc.l %0,%1;; srlz.i;;" :: "r"(va), "r"(log2size));
+}
+
+/*
+ * Invalidate the ALAT on the local processor.
+ */
+static __inline void
+ia64_invala(void)
+{
+	__asm __volatile("invala;;");
+}
+
+/*
+ * Unordered memory load.
+ */
+
+static __inline uint8_t
+ia64_ld1(uint8_t *p)
+{
+	uint8_t v;
+
+	__asm __volatile("ld1 %0=[%1];;" : "=r"(v) : "r"(p));
+	return (v);
+}
+
+static __inline uint16_t
+ia64_ld2(uint16_t *p)        
+{
+	uint16_t v;
+
+	__asm __volatile("ld2 %0=[%1];;" : "=r"(v) : "r"(p));
+	return (v);
+}
+
+static __inline uint32_t
+ia64_ld4(uint32_t *p)        
+{
+	uint32_t v;
+
+	__asm __volatile("ld4 %0=[%1];;" : "=r"(v) : "r"(p));
+	return (v);
+}
+
+static __inline uint64_t
+ia64_ld8(uint64_t *p)        
+{
+	uint64_t v;
+
+	__asm __volatile("ld8 %0=[%1];;" : "=r"(v) : "r"(p));
+	return (v);
+}
+
+/*
+ * Unordered memory store.
+ */
+
+static __inline void
+ia64_st1(uint8_t *p, uint8_t v)
+{
+	__asm __volatile("st1 [%0]=%1;;" :: "r"(p), "r"(v));
+}
+
+static __inline void
+ia64_st2(uint16_t *p, uint16_t v)
+{
+	__asm __volatile("st2 [%0]=%1;;" :: "r"(p), "r"(v));
+}
+
+static __inline void
+ia64_st4(uint32_t *p, uint32_t v)
+{
+	__asm __volatile("st4 [%0]=%1;;" :: "r"(p), "r"(v));
+}
+
+static __inline void
+ia64_st8(uint64_t *p, uint64_t v)
+{
+	__asm __volatile("st8 [%0]=%1;;" :: "r"(p), "r"(v));
+}
+
+/*
+ * Read the value of psr.
+ */
+static __inline uint64_t
+ia64_get_psr(void)
+{
+	uint64_t result;
+	__asm __volatile("mov %0=psr;;" : "=r" (result));
+	return result;
+}
+
+/*
+ * Define accessors for application registers.
+ */
+
+#define IA64_AR(name)						\
+								\
+static __inline uint64_t					\
+ia64_get_##name(void)						\
+{								\
+	uint64_t result;					\
+	__asm __volatile("mov %0=ar." #name : "=r" (result));	\
+	return result;						\
+}								\
+								\
+static __inline void						\
+ia64_set_##name(uint64_t v)					\
+{								\
+	__asm __volatile("mov ar." #name "=%0;;" :: "r" (v));	\
+}
+
+IA64_AR(k0)
+IA64_AR(k1)
+IA64_AR(k2)
+IA64_AR(k3)
+IA64_AR(k4)
+IA64_AR(k5)
+IA64_AR(k6)
+IA64_AR(k7)
+
+IA64_AR(rsc)
+IA64_AR(bsp)
+IA64_AR(bspstore)
+IA64_AR(rnat)
+
+IA64_AR(fcr)
+
+IA64_AR(eflag)
+IA64_AR(csd)
+IA64_AR(ssd)
+IA64_AR(cflg)
+IA64_AR(fsr)
+IA64_AR(fir)
+IA64_AR(fdr)
+
+IA64_AR(ccv)
+
+IA64_AR(unat)
+
+IA64_AR(fpsr)
+
+IA64_AR(itc)
+
+IA64_AR(pfs)
+IA64_AR(lc)
+IA64_AR(ec)
+
+/*
+ * Define accessors for control registers.
+ */
+
+#define IA64_CR(name)						\
+								\
+static __inline uint64_t					\
+ia64_get_##name(void)						\
+{								\
+	uint64_t result;					\
+	__asm __volatile("mov %0=cr." #name : "=r" (result));	\
+	return result;						\
+}								\
+								\
+static __inline void						\
+ia64_set_##name(uint64_t v)					\
+{								\
+	__asm __volatile("mov cr." #name "=%0;;" :: "r" (v));	\
+}
+
+IA64_CR(dcr)
+IA64_CR(itm)
+IA64_CR(iva)
+
+IA64_CR(pta)
+
+IA64_CR(ipsr)
+IA64_CR(isr)
+
+IA64_CR(iip)
+IA64_CR(ifa)
+IA64_CR(itir)
+IA64_CR(iipa)
+IA64_CR(ifs)
+IA64_CR(iim)
+IA64_CR(iha)
+
+IA64_CR(lid)
+IA64_CR(ivr)
+IA64_CR(tpr)
+IA64_CR(eoi)
+IA64_CR(irr0)
+IA64_CR(irr1)
+IA64_CR(irr2)
+IA64_CR(irr3)
+IA64_CR(itv)
+IA64_CR(pmv)
+IA64_CR(cmcv)
+
+IA64_CR(lrr0)
+IA64_CR(lrr1)
+
+/*
+ * Write a region register.
+ */
+static __inline void
+ia64_set_rr(uint64_t rrbase, uint64_t v)
+{
+	__asm __volatile("mov rr[%0]=%1"
+			 :: "r"(rrbase), "r"(v) : "memory");
+}
+
+/*
+ * Read a CPUID register.
+ */
+static __inline uint64_t
+ia64_get_cpuid(int i)
+{
+	uint64_t result;
+	__asm __volatile("mov %0=cpuid[%1]"
+			 : "=r" (result) : "r"(i));
+	return result;
+}
+
+static __inline void
+ia64_disable_highfp(void)
+{
+	__asm __volatile("ssm psr.dfh;; srlz.d");
+}
+
+static __inline void
+ia64_enable_highfp(void)
+{
+	__asm __volatile("rsm psr.dfh;; srlz.d");
+}
+
+/*
+ * Avoid inline functions for the following so that they still work
+ * correctly when inlining is not enabled (e.g. -O0). Function calls
+ * need data serialization after setting psr, which results in a
+ * hazard.
+ */
+#define	ia64_srlz_d()	__asm __volatile("srlz.d")
+#define	ia64_srlz_i()	__asm __volatile("srlz.i;;")
+
+#endif /* !LOCORE */
+
+#endif /* _MACHINE_IA64_CPU_H_ */
+


Property changes on: trunk/sys/ia64/include/ia64_cpu.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/ieee.h
===================================================================
--- trunk/sys/ia64/include/ieee.h	                        (rev 0)
+++ trunk/sys/ia64/include/ieee.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,116 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/ieee.h 139790 2005-01-06 22:18:23Z imp $ */
+/* From: NetBSD: ieee.h,v 1.2 1997/04/06 08:47:27 cgd Exp */
+
+/*-
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)ieee.h	8.1 (Berkeley) 6/11/93
+ *
+ * from: Header: ieee.h,v 1.7 92/11/26 02:04:37 torek Exp 
+ */
+
+/*
+ * ieee.h defines the machine-dependent layout of the machine's IEEE
+ * floating point.  It does *not* define (yet?) any of the rounding
+ * mode bits, exceptions, and so forth.
+ */
+
+/*
+ * Define the number of bits in each fraction and exponent.
+ *
+ *		     k	         k+1
+ * Note that  1.0 x 2  == 0.1 x 2      and that denorms are represented
+ *
+ *					  (-exp_bias+1)
+ * as fractions that look like 0.fffff x 2             .  This means that
+ *
+ *			 -126
+ * the number 0.10000 x 2    , for instance, is the same as the normalized
+ *
+ *		-127			   -128
+ * float 1.0 x 2    .  Thus, to represent 2    , we need one leading zero
+ *
+ *				  -129
+ * in the fraction; to represent 2    , we need two, and so on.  This
+ *
+ *						     (-exp_bias-fracbits+1)
+ * implies that the smallest denormalized number is 2
+ *
+ * for whichever format we are talking about: for single precision, for
+ *
+ *						-126		-149
+ * instance, we get .00000000000000000000001 x 2    , or 1.0 x 2    , and
+ *
+ * -149 == -127 - 23 + 1.
+ */
+#define	SNG_EXPBITS	8
+#define	SNG_FRACBITS	23
+
+#define	DBL_EXPBITS	11
+#define	DBL_FRACBITS	52
+
+struct ieee_single {
+	u_int	sng_frac:23;
+	u_int	sng_exp:8;
+	u_int	sng_sign:1;
+};
+
+struct ieee_double {
+	u_int	dbl_fracl;
+	u_int	dbl_frach:20;
+	u_int	dbl_exp:11;
+	u_int	dbl_sign:1;
+};
+
+/*
+ * Floats whose exponent is in [1..INFNAN) (of whatever type) are
+ * `normal'.  Floats whose exponent is INFNAN are either Inf or NaN.
+ * Floats whose exponent is zero are either zero (iff all fraction
+ * bits are zero) or subnormal values.
+ *
+ * A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
+ * high fraction; if the bit is set, it is a `quiet NaN'.
+ */
+#define	SNG_EXP_INFNAN	255
+#define	DBL_EXP_INFNAN	2047
+
+#if 0
+#define	SNG_QUIETNAN	(1 << 22)
+#define	DBL_QUIETNAN	(1 << 19)
+#endif
+
+/*
+ * Exponent biases.
+ */
+#define	SNG_EXP_BIAS	127
+#define	DBL_EXP_BIAS	1023


Property changes on: trunk/sys/ia64/include/ieee.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/ieeefp.h
===================================================================
--- trunk/sys/ia64/include/ieeefp.h	                        (rev 0)
+++ trunk/sys/ia64/include/ieeefp.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,58 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/ieeefp.h 226607 2011-10-21 06:41:46Z das $
+ */
+
+#ifndef _MACHINE_IEEEFP_H_
+#define _MACHINE_IEEEFP_H_
+
+/* Deprecated historical FPU control interface */
+
+#include <machine/fpu.h>
+
+typedef int fp_except_t;
+#define	FP_X_INV	IA64_FPSR_TRAP_VD /* invalid operation exception */
+#define	FP_X_DZ		IA64_FPSR_TRAP_ZD /* divide-by-zero exception */
+#define	FP_X_OFL	IA64_FPSR_TRAP_OD /* overflow exception */
+#define	FP_X_UFL	IA64_FPSR_TRAP_UD /* underflow exception */
+#define	FP_X_IMP	IA64_FPSR_TRAP_ID /* imprecise(inexact) exception */
+
+typedef enum {
+	FP_RN = 0,		/* round to nearest */
+	FP_RM,			/* round toward minus infinity */
+	FP_RP,			/* round toward plus infinity */
+	FP_RZ			/* round toward zero */
+} fp_rnd_t;
+
+__BEGIN_DECLS
+extern fp_rnd_t    fpgetround(void);
+extern fp_rnd_t    fpsetround(fp_rnd_t);
+extern fp_except_t fpgetmask(void);
+extern fp_except_t fpsetmask(fp_except_t);
+__END_DECLS
+
+#endif /* !_MACHINE_IEEEFP_H_ */


Property changes on: trunk/sys/ia64/include/ieeefp.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/in_cksum.h
===================================================================
--- trunk/sys/ia64/include/in_cksum.h	                        (rev 0)
+++ trunk/sys/ia64/include/in_cksum.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,81 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from tahoe:	in_cksum.c	1.2	86/01/05
+ *	from:		@(#)in_cksum.c	1.3 (Berkeley) 1/19/91
+ *	from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD: stable/10/sys/ia64/include/in_cksum.h 235941 2012-05-24 22:00:48Z bz $
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define	_MACHINE_IN_CKSUM_H_	1
+
+#include <sys/cdefs.h>
+
+#define in_cksum(m, len)	in_cksum_skip(m, len, 0)
+
+#if defined(IPVERSION) && (IPVERSION == 4)
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __CC_SUPPORTS___INLINE
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+	int __tmpsum;
+	__tmpsum = (int)ntohs(ip->ip_sum) + 256;
+	ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define	in_cksum_update(ip) \
+	do { \
+		int __tmpsum; \
+		__tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+		ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+	} while(0)
+
+#endif
+#endif
+
+#ifdef _KERNEL
+#if defined(IPVERSION) && (IPVERSION == 4)
+u_int in_cksum_hdr(const struct ip *ip);
+#endif
+u_short	in_addword(u_short sum, u_short b);
+u_short	in_pseudo(u_int sum, u_int b, u_int c);
+u_short	in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */


Property changes on: trunk/sys/ia64/include/in_cksum.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/intr.h
===================================================================
--- trunk/sys/ia64/include/intr.h	                        (rev 0)
+++ trunk/sys/ia64/include/intr.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,94 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2007-2010 Marcel Moolenaar
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/intr.h 205726 2010-03-27 05:40:50Z marcel $
+ */
+
+#ifndef _MACHINE_INTR_H_
+#define	_MACHINE_INTR_H_
+
+#define	IA64_NXIVS		256	/* External Interrupt Vectors */
+#define	IA64_MIN_XIV		16
+
+#define	IA64_MAX_HWPRIO		14
+
+struct pcpu;
+struct sapic;
+struct thread;
+struct trapframe;
+
+/*
+ * Layout of the Processor Interrupt Block.
+ */
+struct ia64_pib
+{
+	uint64_t	ib_ipi[65536][2];	/* 64K-way IPIs (1MB area). */
+	uint8_t		_rsvd1[0xe0000];
+	uint8_t		ib_inta;		/* Generate INTA cycle. */
+	uint8_t		_rsvd2[7];
+	uint8_t		ib_xtp;			/* External Task Priority. */
+	uint8_t		_rsvd3[7];
+	uint8_t		_rsvd4[0x1fff0];
+};
+
+enum ia64_xiv_use {
+	IA64_XIV_FREE,
+	IA64_XIV_ARCH,		/* Architecturally defined. */
+	IA64_XIV_PLAT,		/* Platform defined. */
+	IA64_XIV_IPI,		/* Used for IPIs. */
+	IA64_XIV_IRQ		/* Used for external interrupts. */
+};
+
+typedef u_int (ia64_ihtype)(struct thread *, u_int, struct trapframe *);
+
+extern struct ia64_pib *ia64_pib;
+
+void	ia64_bind_intr(void);
+void	ia64_handle_intr(struct trapframe *);
+int	ia64_setup_intr(const char *, int, driver_filter_t, driver_intr_t,
+	    void *, enum intr_type, void **);
+int	ia64_teardown_intr(void *);
+
+void	ia64_xiv_init(void);
+u_int	ia64_xiv_alloc(u_int, enum ia64_xiv_use, ia64_ihtype);
+int	ia64_xiv_free(u_int, enum ia64_xiv_use);
+int	ia64_xiv_reserve(u_int, enum ia64_xiv_use, ia64_ihtype);
+
+int	sapic_bind_intr(u_int, struct pcpu *);
+int	sapic_config_intr(u_int, enum intr_trigger, enum intr_polarity);
+struct sapic *sapic_create(u_int, u_int, uint64_t);
+int	sapic_enable(struct sapic *, u_int, u_int);
+void	sapic_eoi(struct sapic *, u_int);
+struct sapic *sapic_lookup(u_int, u_int *);
+void	sapic_mask(struct sapic *, u_int);
+void	sapic_unmask(struct sapic *, u_int);
+
+#ifdef DDB
+void	sapic_print(struct sapic *, u_int);
+#endif
+
+#endif /* !_MACHINE_INTR_H_ */


Property changes on: trunk/sys/ia64/include/intr.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/intrcnt.h
===================================================================
--- trunk/sys/ia64/include/intrcnt.h	                        (rev 0)
+++ trunk/sys/ia64/include/intrcnt.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,41 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/intrcnt.h 205234 2010-03-17 00:37:15Z marcel $ */
+/* $NetBSD: intrcnt.h,v 1.17 1998/11/19 01:48:04 ross Exp $ */
+
+/*-
+ * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution at CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#define	INTRCNT_CLOCK		0
+#define	INTRCNT_COUNT		256
+
+/*
+ * Maximum name length in intrnames table (including terminating '\0'.
+ * Since vmstat(8) assumes a maximum length of 13 (including '\0'), we're
+ * pretty much limited to that (unless we don't care about the alignment
+ * of the columns :-)
+ */
+#define INTRNAME_LEN		13


Property changes on: trunk/sys/ia64/include/intrcnt.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/iodev.h
===================================================================
--- trunk/sys/ia64/include/iodev.h	                        (rev 0)
+++ trunk/sys/ia64/include/iodev.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,69 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2010 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/iodev.h 207329 2010-04-28 15:38:01Z attilio $
+ */
+
+#ifndef _MACHINE_IODEV_H_
+#define	_MACHINE_IODEV_H_
+
+#include <sys/uuid.h>
+
+#ifdef _KERNEL
+#include <machine/bus.h>
+#endif
+
+#define	IODEV_EFIVAR_GETVAR	0
+#define	IODEV_EFIVAR_NEXTNAME	1
+#define	IODEV_EFIVAR_SETVAR	2
+
+struct iodev_efivar_req {
+	u_int	access;
+	u_int	result;			/* errno value */
+	size_t	namesize;
+	u_short	*name;			/* UCS-2 */
+	struct uuid vendor;
+	uint32_t attrib;
+	size_t	datasize;
+	void	*data;
+};
+
+#define	IODEV_EFIVAR	_IOWR('I', 1, struct iodev_efivar_req)
+
+#ifdef _KERNEL
+#define	iodev_read_1	bus_space_read_io_1
+#define	iodev_read_2	bus_space_read_io_2
+#define	iodev_read_4	bus_space_read_io_4
+#define	iodev_write_1	bus_space_write_io_1
+#define	iodev_write_2	bus_space_write_io_2
+#define	iodev_write_4	bus_space_write_io_4
+
+int	 iodev_open(struct thread *td);
+int	 iodev_close(struct thread *td);
+int	 iodev_ioctl(u_long, caddr_t data);
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_IODEV_H_ */


Property changes on: trunk/sys/ia64/include/iodev.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/kdb.h
===================================================================
--- trunk/sys/ia64/include/kdb.h	                        (rev 0)
+++ trunk/sys/ia64/include/kdb.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,68 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/kdb.h 268199 2014-07-02 23:37:14Z marcel $
+ */
+
+#ifndef _MACHINE_KDB_H_
+#define _MACHINE_KDB_H_
+
+#include <machine/cpufunc.h>
+#include <machine/frame.h>
+#include <machine/ia64_cpu.h>
+
+#define	KDB_STOPPEDPCB(pc)	(&(pc)->pc_md.pcb)
+
+void kdb_cpu_trap(int, int);
+
+static __inline void
+kdb_cpu_clear_singlestep(void)
+{
+	kdb_frame->tf_special.psr &= ~IA64_PSR_SS;
+}
+
+static __inline void
+kdb_cpu_set_singlestep(void)
+{
+	kdb_frame->tf_special.psr |= IA64_PSR_SS;
+}
+
+static __inline void
+kdb_cpu_sync_icache(unsigned char *addr, size_t size)
+{
+	vm_offset_t cacheline;
+
+	cacheline = (uintptr_t)addr & ~31;
+	size += (uintptr_t)addr - cacheline;
+	size = (size + 31) & ~31;
+	while (size > 0) {
+		__asm __volatile("fc %0;; sync.i;; srlz.i;;" :: "r"(cacheline));
+		cacheline += 32;
+		size -= 32;
+	}
+}
+
+#endif /* _MACHINE_KDB_H_ */


Property changes on: trunk/sys/ia64/include/kdb.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/limits.h
===================================================================
--- trunk/sys/ia64/include/limits.h	                        (rev 0)
+++ trunk/sys/ia64/include/limits.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,47 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/limits.h 143063 2005-03-02 21:33:29Z joerg $ */
+/* From: NetBSD: limits.h,v 1.3 1997/04/06 08:47:31 cgd Exp */
+
+/*-
+ * Copyright (c) 1988, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)limits.h	8.3 (Berkeley) 1/4/94
+ */
+
+#ifndef _MACHINE_LIMITS_H_
+#define	_MACHINE_LIMITS_H_
+
+#include <sys/cdefs.h>
+
+#ifdef __CC_SUPPORTS_WARNING
+#warning "machine/limits.h is deprecated.  Include sys/limits.h instead."
+#endif
+
+#include <sys/limits.h>
+
+#endif /* !_MACHINE_LIMITS_H_ */


Property changes on: trunk/sys/ia64/include/limits.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/mca.h
===================================================================
--- trunk/sys/ia64/include/mca.h	                        (rev 0)
+++ trunk/sys/ia64/include/mca.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,249 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002-2010 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/mca.h 209671 2010-07-03 20:19:20Z marcel $
+ */
+
+#ifndef _MACHINE_MCA_H_
+#define _MACHINE_MCA_H_
+
+struct mca_record_header {
+	uint64_t	rh_seqnr;		/* Record id. */
+	uint8_t		rh_major;		/* BCD (=02). */
+	uint8_t		rh_minor;		/* BCD (=00). */
+	uint8_t		rh_error;		/* Error severity. */
+#define	MCA_RH_ERROR_RECOVERABLE	0
+#define	MCA_RH_ERROR_FATAL		1
+#define	MCA_RH_ERROR_CORRECTED		2
+	uint8_t		rh_flags;
+#define	MCA_RH_FLAGS_PLATFORM_ID	0x01	/* Platform_id present. */
+#define	MCA_RH_FLAGS_TIME_STAMP		0x02	/* Timestamp invalid. */
+	uint32_t	rh_length;		/* Size including header. */
+	uint8_t		rh_time[8];
+#define	MCA_RH_TIME_SEC		0
+#define	MCA_RH_TIME_MIN		1
+#define	MCA_RH_TIME_HOUR	2
+#define	MCA_RH_TIME_MDAY	4
+#define	MCA_RH_TIME_MON		5
+#define	MCA_RH_TIME_YEAR	6
+#define	MCA_RH_TIME_CENT	7
+	struct uuid	rh_platform;
+};
+
+struct mca_section_header {
+	struct uuid	sh_uuid;
+	uint8_t		sh_major;		/* BCD (=02). */
+	uint8_t		sh_minor;		/* BCD (=00). */
+	uint8_t		sh_flags;
+#define	MCA_SH_FLAGS_CORRECTED	0x01		/* Error has been corrected. */
+#define	MCA_SH_FLAGS_PROPAGATE	0x02		/* Possible propagation. */
+#define	MCA_SH_FLAGS_RESET	0x04		/* Reset device before use. */
+#define	MCA_SH_FLAGS_VALID	0x80		/* Flags are valid. */
+	uint8_t		__reserved;
+	uint32_t	sh_length;		/* Size including header. */
+};
+
+struct mca_cpu_record {
+	uint64_t	cpu_flags;
+#define	MCA_CPU_FLAGS_ERRMAP		(1ULL << 0)
+#define	MCA_CPU_FLAGS_STATE		(1ULL << 1)
+#define	MCA_CPU_FLAGS_CR_LID		(1ULL << 2)
+#define	MCA_CPU_FLAGS_PSI_STRUCT	(1ULL << 3)
+#define	MCA_CPU_FLAGS_CACHE(x)		(((x) >> 4) & 15)
+#define	MCA_CPU_FLAGS_TLB(x)		(((x) >> 8) & 15)
+#define	MCA_CPU_FLAGS_BUS(x)		(((x) >> 12) & 15)
+#define	MCA_CPU_FLAGS_REG(x)		(((x) >> 16) & 15)
+#define	MCA_CPU_FLAGS_MS(x)		(((x) >> 20) & 15)
+#define	MCA_CPU_FLAGS_CPUID		(1ULL << 24)
+	uint64_t	cpu_errmap;
+	uint64_t	cpu_state;
+	uint64_t	cpu_cr_lid;
+	/* Nx cpu_mod (cache). */
+	/* Nx cpu_mod (TLB). */
+	/* Nx cpu_mod (bus). */
+	/* Nx cpu_mod (reg). */
+	/* Nx cpu_mod (MS). */
+	/* cpu_cpuid. */
+	/* cpu_psi. */
+};
+
+struct mca_cpu_cpuid {
+	uint64_t	cpuid[6];
+};
+
+struct mca_cpu_mod {
+	uint64_t	cpu_mod_flags;
+#define	MCA_CPU_MOD_FLAGS_INFO	(1ULL << 0)
+#define	MCA_CPU_MOD_FLAGS_REQID	(1ULL << 1)
+#define	MCA_CPU_MOD_FLAGS_RSPID	(1ULL << 2)
+#define	MCA_CPU_MOD_FLAGS_TGTID	(1ULL << 3)
+#define	MCA_CPU_MOD_FLAGS_IP	(1ULL << 4)
+	uint64_t	cpu_mod_info;
+	uint64_t	cpu_mod_reqid;
+	uint64_t	cpu_mod_rspid;
+	uint64_t	cpu_mod_tgtid;
+	uint64_t	cpu_mod_ip;
+};
+
+struct mca_cpu_psi {
+	uint64_t	cpu_psi_flags;
+#define	MCA_CPU_PSI_FLAGS_STATE	(1ULL << 0)
+#define	MCA_CPU_PSI_FLAGS_BR	(1ULL << 1)
+#define	MCA_CPU_PSI_FLAGS_CR	(1ULL << 2)
+#define	MCA_CPU_PSI_FLAGS_AR	(1ULL << 3)
+#define	MCA_CPU_PSI_FLAGS_RR	(1ULL << 4)
+#define	MCA_CPU_PSI_FLAGS_FR	(1ULL << 5)
+	uint8_t		cpu_psi_state[1024];	/* XXX variable? */
+	uint64_t	cpu_psi_br[8];
+	uint64_t	cpu_psi_cr[128];	/* XXX variable? */
+	uint64_t	cpu_psi_ar[128];	/* XXX variable? */
+	uint64_t	cpu_psi_rr[8];
+	uint64_t	cpu_psi_fr[256];	/* 16 bytes per register! */
+};
+
+struct mca_mem_record {
+	uint64_t	mem_flags;
+#define	MCA_MEM_FLAGS_STATUS		(1ULL << 0)
+#define	MCA_MEM_FLAGS_ADDR		(1ULL << 1)
+#define	MCA_MEM_FLAGS_ADDRMASK		(1ULL << 2)
+#define	MCA_MEM_FLAGS_NODE		(1ULL << 3)
+#define	MCA_MEM_FLAGS_CARD		(1ULL << 4)
+#define	MCA_MEM_FLAGS_MODULE		(1ULL << 5)
+#define	MCA_MEM_FLAGS_BANK		(1ULL << 6)
+#define	MCA_MEM_FLAGS_DEVICE		(1ULL << 7)
+#define	MCA_MEM_FLAGS_ROW		(1ULL << 8)
+#define	MCA_MEM_FLAGS_COLUMN		(1ULL << 9)
+#define	MCA_MEM_FLAGS_BITPOS		(1ULL << 10)
+#define	MCA_MEM_FLAGS_REQID		(1ULL << 11)
+#define	MCA_MEM_FLAGS_RSPID		(1ULL << 12)
+#define	MCA_MEM_FLAGS_TGTID		(1ULL << 13)
+#define	MCA_MEM_FLAGS_BUSDATA		(1ULL << 14)
+#define	MCA_MEM_FLAGS_OEM_ID		(1ULL << 15)
+#define	MCA_MEM_FLAGS_OEM_DATA		(1ULL << 16)
+	uint64_t	mem_status;
+	uint64_t	mem_addr;
+	uint64_t	mem_addrmask;
+	uint16_t	mem_node;
+	uint16_t	mem_card;
+	uint16_t	mem_module;
+	uint16_t	mem_bank;
+	uint16_t	mem_device;
+	uint16_t	mem_row;
+	uint16_t	mem_column;
+	uint16_t	mem_bitpos;
+	uint64_t	mem_reqid;
+	uint64_t	mem_rspid;
+	uint64_t	mem_tgtid;
+	uint64_t	mem_busdata;
+	struct uuid	mem_oem_id;
+	uint16_t	mem_oem_length;		/* Size of OEM data. */
+	/* N bytes of OEM platform data. */
+};
+
+struct mca_pcibus_record {
+	uint64_t	pcibus_flags;
+#define	MCA_PCIBUS_FLAGS_STATUS		(1ULL << 0)
+#define	MCA_PCIBUS_FLAGS_ERROR		(1ULL << 1)
+#define	MCA_PCIBUS_FLAGS_BUS		(1ULL << 2)
+#define	MCA_PCIBUS_FLAGS_ADDR		(1ULL << 3)
+#define	MCA_PCIBUS_FLAGS_DATA		(1ULL << 4)
+#define	MCA_PCIBUS_FLAGS_CMD		(1ULL << 5)
+#define	MCA_PCIBUS_FLAGS_REQID		(1ULL << 6)
+#define	MCA_PCIBUS_FLAGS_RSPID		(1ULL << 7)
+#define	MCA_PCIBUS_FLAGS_TGTID		(1ULL << 8)
+#define	MCA_PCIBUS_FLAGS_OEM_ID		(1ULL << 9)
+#define	MCA_PCIBUS_FLAGS_OEM_DATA	(1ULL << 10)
+	uint64_t	pcibus_status;
+	uint16_t	pcibus_error;
+	uint16_t	pcibus_bus;
+	uint32_t	__reserved;
+	uint64_t	pcibus_addr;
+	uint64_t	pcibus_data;
+	uint64_t	pcibus_cmd;
+	uint64_t	pcibus_reqid;
+	uint64_t	pcibus_rspid;
+	uint64_t	pcibus_tgtid;
+	struct uuid	pcibus_oem_id;
+	uint16_t	pcibus_oem_length;	/* Size of OEM data. */
+	/* N bytes of OEM platform data. */
+};
+
+struct mca_pcidev_record {
+	uint64_t	pcidev_flags;
+#define	MCA_PCIDEV_FLAGS_STATUS		(1ULL << 0)
+#define	MCA_PCIDEV_FLAGS_INFO		(1ULL << 1)
+#define	MCA_PCIDEV_FLAGS_REG_MEM	(1ULL << 2)
+#define	MCA_PCIDEV_FLAGS_REG_IO		(1ULL << 3)
+#define	MCA_PCIDEV_FLAGS_REG_DATA	(1ULL << 4)
+#define	MCA_PCIDEV_FLAGS_OEM_DATA	(1ULL << 5)
+	uint64_t	pcidev_status;
+	struct {
+		uint16_t	info_vendor;
+		uint16_t	info_device;
+		uint32_t	info_ccfn;	/* Class code & funct. nr. */
+#define	MCA_PCIDEV_INFO_CLASS(x)	((x) & 0xffffff)
+#define	MCA_PCIDEV_INFO_FUNCTION(x)	(((x) >> 24) & 0xff)
+		uint8_t		info_slot;
+		uint8_t		info_bus;
+		uint8_t		info_segment;
+		uint8_t		__res0;
+		uint32_t	__res1;
+	} pcidev_info;
+	uint32_t	pcidev_reg_mem;
+	uint32_t	pcidev_reg_io;
+	/* Nx pcidev_reg. */
+	/* M bytes of OEM platform data. */
+};
+
+struct mca_pcidev_reg {
+	uint64_t	pcidev_reg_addr;
+	uint64_t	pcidev_reg_data;
+};
+
+#define	MCA_UUID_CPU		\
+	{0xe429faf1,0x3cb7,0x11d4,0xbc,0xa7,{0x00,0x80,0xc7,0x3c,0x88,0x81}}
+#define	MCA_UUID_MEMORY		\
+	{0xe429faf2,0x3cb7,0x11d4,0xbc,0xa7,{0x00,0x80,0xc7,0x3c,0x88,0x81}}
+#define	MCA_UUID_SEL		\
+	{0xe429faf3,0x3cb7,0x11d4,0xbc,0xa7,{0x00,0x80,0xc7,0x3c,0x88,0x81}}
+#define	MCA_UUID_PCI_BUS	\
+	{0xe429faf4,0x3cb7,0x11d4,0xbc,0xa7,{0x00,0x80,0xc7,0x3c,0x88,0x81}}
+#define	MCA_UUID_SMBIOS		\
+	{0xe429faf5,0x3cb7,0x11d4,0xbc,0xa7,{0x00,0x80,0xc7,0x3c,0x88,0x81}}
+#define	MCA_UUID_PCI_DEV	\
+	{0xe429faf6,0x3cb7,0x11d4,0xbc,0xa7,{0x00,0x80,0xc7,0x3c,0x88,0x81}}
+#define	MCA_UUID_GENERIC	\
+	{0xe429faf7,0x3cb7,0x11d4,0xbc,0xa7,{0x00,0x80,0xc7,0x3c,0x88,0x81}}
+
+#ifdef _KERNEL
+
+void ia64_mca_init(void);
+void ia64_mca_init_ap(void);
+void ia64_mca_save_state(int);
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_MCA_H_ */


Property changes on: trunk/sys/ia64/include/mca.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/md_var.h
===================================================================
--- trunk/sys/ia64/include/md_var.h	                        (rev 0)
+++ trunk/sys/ia64/include/md_var.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,125 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/md_var.h 310508 2016-12-24 13:28:39Z avg $
+ */
+
+#ifndef _MACHINE_MD_VAR_H_
+#define	_MACHINE_MD_VAR_H_
+
+/*
+ * Miscellaneous machine-dependent declarations.
+ */
+
+struct ia64_fdesc {
+	uint64_t	func;
+	uint64_t	gp;
+};
+
+#define FDESC_FUNC(fn)  (((struct ia64_fdesc *) fn)->func)
+#define FDESC_GP(fn)    (((struct ia64_fdesc *) fn)->gp)
+
+/* Convenience macros to decompose CFM & ar.pfs. */
+#define	IA64_CFM_SOF(x)		((x) & 0x7f)
+#define	IA64_CFM_SOL(x)		(((x) >> 7) & 0x7f)
+#define	IA64_CFM_SOR(x)		(((x) >> 14) & 0x0f)
+#define	IA64_CFM_RRB_GR(x)	(((x) >> 18) & 0x7f)
+#define	IA64_CFM_RRB_FR(x)	(((x) >> 25) & 0x7f)
+#define	IA64_CFM_RRB_PR(x)	(((x) >> 32) & 0x3f)
+
+/* Convenience function (inline) to adjust backingstore pointers. */
+static __inline uint64_t
+ia64_bsp_adjust(uint64_t bsp, int nslots)
+{
+	int bias = ((unsigned int)bsp & 0x1f8) >> 3;
+	nslots += (nslots + bias + 63*8) / 63 - 8;
+	return bsp + (nslots << 3);
+}
+
+#ifdef _KERNEL
+
+struct _special;
+struct pcpu;
+struct thread;
+struct trapframe;
+
+/*
+ * Return value from ia64_init. Describes stack to switch to.
+ */
+struct ia64_init_return {
+	uint64_t	bspstore;
+	uint64_t	sp;
+};
+
+extern uint64_t ia64_lapic_addr;
+extern vm_paddr_t paddr_max;
+extern long Maxmem;
+extern u_int busdma_swi_pending;
+
+void	*acpi_find_table(const char *sig);
+void	busdma_swi(void);
+int	copyout_regstack(struct thread *, uint64_t *, uint64_t *);
+void	cpu_mp_add(u_int, u_int, u_int);
+void	cpu_pcpu_setup(struct pcpu *, u_int, u_int);
+int	do_ast(struct trapframe *);
+void	ia32_trap(int, struct trapframe *);
+int	ia64_count_cpus(void);
+int	ia64_emulate(struct trapframe *, struct thread *);
+int	ia64_flush_dirty(struct thread *, struct _special *);
+uint64_t ia64_get_hcdp(void);
+int	ia64_highfp_drop(struct thread *);
+int	ia64_highfp_enable(struct thread *, struct trapframe *);
+int	ia64_highfp_save(struct thread *);
+int	ia64_highfp_save_ipi(void);
+struct ia64_init_return ia64_init(void);
+u_int	ia64_itc_freq(void);
+int	ia64_physmem_add(vm_paddr_t, vm_size_t);
+void	*ia64_physmem_alloc(vm_size_t, vm_size_t);
+int	ia64_physmem_delete(vm_paddr_t, vm_size_t);
+int	ia64_physmem_fini(void);
+int	ia64_physmem_init(void);
+int	ia64_physmem_track(vm_paddr_t, vm_size_t);
+void	ia64_probe_sapics(void);
+void	ia64_sync_icache(vm_offset_t, vm_size_t);
+void	*ia64_xtrace_alloc(void);
+void	ia64_xtrace_init_ap(void *);
+void	ia64_xtrace_init_bsp(void);
+void	ia64_xtrace_save(void);
+void	ia64_xtrace_stop(void);
+void	interrupt(struct trapframe *);
+void	map_gateway_page(void);
+void	map_pal_code(void);
+void	map_vhpt(uintptr_t);
+void	os_boot_rendez(void);
+void	os_mca(void);
+int	syscall(struct trapframe *);
+void	trap(int, struct trapframe *);
+void	trap_panic(int, struct trapframe *);
+int	unaligned_fixup(struct trapframe *, struct thread *);
+
+#endif	/* _KERNEL */
+
+#endif /* !_MACHINE_MD_VAR_H_ */


Property changes on: trunk/sys/ia64/include/md_var.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/memdev.h
===================================================================
--- trunk/sys/ia64/include/memdev.h	                        (rev 0)
+++ trunk/sys/ia64/include/memdev.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,41 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Mark R V Murray
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer
+ *    in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/memdev.h 217515 2011-01-17 22:58:28Z jkim $
+ */
+
+#ifndef _MACHINE_MEMDEV_H_
+#define	_MACHINE_MEMDEV_H_
+
+#define	CDEV_MINOR_MEM	0
+#define	CDEV_MINOR_KMEM	1
+
+d_open_t	memopen;
+d_read_t	memrw;
+#define		memioctl	(d_ioctl_t *)NULL
+d_mmap_t	memmmap;
+
+#endif /* _MACHINE_MEMDEV_H_ */


Property changes on: trunk/sys/ia64/include/memdev.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/pal.h
===================================================================
--- trunk/sys/ia64/include/pal.h	                        (rev 0)
+++ trunk/sys/ia64/include/pal.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,123 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	$FreeBSD: stable/10/sys/ia64/include/pal.h 219841 2011-03-21 18:20:53Z marcel $
+ */
+
+#ifndef _MACHINE_PAL_H_
+#define _MACHINE_PAL_H_
+
+/*
+ * Architected static calling convention procedures.
+ */
+#define PAL_CACHE_FLUSH		1
+#define PAL_CACHE_INFO		2
+#define PAL_CACHE_INIT		3
+#define PAL_CACHE_SUMMARY	4
+#define PAL_MEM_ATTRIB		5
+#define PAL_PTCE_INFO		6
+#define PAL_VM_INFO		7
+#define PAL_VM_SUMMARY		8
+#define PAL_BUS_GET_FEATURES	9
+#define PAL_BUS_SET_FEATURES	10
+#define PAL_DEBUG_INFO		11
+#define PAL_FIXED_ADDR		12
+#define PAL_FREQ_BASE		13
+#define PAL_FREQ_RATIOS		14
+#define PAL_PERF_MON_INFO	15
+#define PAL_PLATFORM_ADDR	16
+#define PAL_PROC_GET_FEATURE	17
+#define PAL_PROC_SET_FEATURE	18
+#define PAL_RSE_INFO		19
+#define PAL_VERSION		20
+#define PAL_MC_CLEAR_LOG	21
+#define PAL_MC_DRAIN		22
+#define	PAL_MC_EXPECTED		23
+#define PAL_MC_DYNAMIC_STATE	24
+#define PAL_MC_ERROR_INFO	25
+#define	PAL_MC_RESUME		26
+#define PAL_MC_REGISTER_MEM	27
+#define PAL_HALT		28
+#define PAL_HALT_LIGHT		29
+#define PAL_COPY_INFO		30
+#define PAL_CACHE_LINE_INIT	31
+#define PAL_PMI_ENTRYPOINT	32
+#define PAL_ENTER_IA_32_ENV	33
+#define PAL_VM_PAGE_SIZE	34
+#define	PAL_TEST_INFO		37
+#define PAL_CACHE_PROT_INFO	38
+#define PAL_REGISTER_INFO	39
+#define PAL_PREFETCH_VISIBILITY	41
+#define	PAL_LOGICAL_TO_PHYSICAL	42
+#define	PAL_CACHE_SHARED_INFO	43
+#define	PAL_PSTATE_INFO		44
+#define	PAL_SHUTDOWN		45
+#define	PAL_GET_HW_POLICY	48
+#define	PAL_SET_HW_POLICY	49
+
+/*
+ * Architected stacked calling convention procedures.
+ */
+#define PAL_COPY_PAL		256
+#define PAL_HALT_INFO		257
+#define PAL_TEST_PROC		258
+#define PAL_CACHE_READ		259
+#define PAL_CACHE_WRITE		260
+#define PAL_VM_TR_READ		261
+#define	PAL_GET_PSTATE		262
+#define	PAL_SET_PSTATE		263
+#define	PAL_VP_CREATE		265
+#define	PAL_VP_ENV_INFO		266
+#define	PAL_VP_EXIT_ENV		267
+#define	PAL_VP_INIT_ENV		268
+#define	PAL_VP_REGISTER		269
+#define	PAL_VP_RESTORE		270
+#define	PAL_VP_SAVE		271
+#define	PAL_VP_TERMINATE	272
+#define	PAL_BRAND_INFO		274
+#define	PAL_MC_ERROR_INJECT	276
+#define	PAL_MEMORY_BUFFER	277
+
+/*
+ * Default physical address of the Processor Interrupt Block (PIB).
+ * See also: IA-64 SDM, rev 1.1, volume 2, page 5-31.
+ */
+#define	PAL_PIB_DEFAULT_ADDR	0x00000000FEE00000L
+
+struct ia64_pal_result {
+	int64_t		pal_status;
+	uint64_t	pal_result[3];
+};
+
+struct ia64_pal_result ia64_pal_physical(u_long, u_long, u_long, u_long);
+
+struct ia64_pal_result ia64_call_pal_static(uint64_t proc, uint64_t arg1,
+    uint64_t arg2, uint64_t arg3);
+
+struct ia64_pal_result ia64_call_pal_stacked(uint64_t proc, uint64_t arg1,
+    uint64_t arg2, uint64_t arg3);
+
+#endif /* _MACHINE_PAL_H_ */


Property changes on: trunk/sys/ia64/include/pal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/param.h
===================================================================
--- trunk/sys/ia64/include/param.h	                        (rev 0)
+++ trunk/sys/ia64/include/param.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,129 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/param.h 274648 2014-11-18 12:53:32Z kib $ */
+/* From: NetBSD: param.h,v 1.20 1997/09/19 13:52:53 leo Exp */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: machparam.h 1.11 89/08/14$
+ *
+ *	@(#)param.h	8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _IA64_INCLUDE_PARAM_H_
+#define	_IA64_INCLUDE_PARAM_H_
+
+/*
+ * Machine dependent constants for the IA64.
+ */
+
+#include <machine/_align.h>
+
+#define __HAVE_ACPI
+#define __PCI_REROUTE_INTERRUPT
+
+#ifndef MACHINE
+#define	MACHINE		"ia64"
+#endif
+#ifndef MACHINE_ARCH
+#define	MACHINE_ARCH	"ia64"
+#endif
+#ifndef MACHINE_ARCH32
+#define	MACHINE_ARCH32	"i386"
+#endif
+
+#if defined(SMP) || defined(KLD_MODULE)
+#ifndef MAXCPU
+#define	MAXCPU		64
+#endif
+#else
+#define MAXCPU		1
+#endif
+
+#ifndef MAXMEMDOM
+#define	MAXMEMDOM	1
+#endif
+
+#define	ALIGNBYTES		_ALIGNBYTES
+#define	ALIGN(p)		_ALIGN(p)
+/*
+ * ALIGNED_POINTER is a boolean macro that checks whether an address
+ * is valid to fetch data elements of type t from on this architecture.
+ * This does not reflect the optimal alignment, just the possibility
+ * (within reasonable limits). 
+ */
+#define	ALIGNED_POINTER(p,t)	((((u_long)(p)) & (sizeof(t)-1)) == 0)
+
+/*
+ * CACHE_LINE_SIZE is the compile-time maximum cache line size for an
+ * architecture.  It should be used with appropriate caution.
+ */
+#define	CACHE_LINE_SHIFT	7
+#define	CACHE_LINE_SIZE		(1 << CACHE_LINE_SHIFT)
+
+#ifndef LOG2_PAGE_SIZE
+#define	LOG2_PAGE_SIZE		13		/* 8K pages by default. */
+#endif
+#define	PAGE_SHIFT	(LOG2_PAGE_SIZE)
+#define	PAGE_SIZE	(1<<(LOG2_PAGE_SIZE))
+#define PAGE_MASK	(PAGE_SIZE-1)
+#define NPTEPG		(PAGE_SIZE/(sizeof (pt_entry_t)))
+
+#define	MAXPAGESIZES	1		/* maximum number of supported page sizes */
+
+#ifndef	KSTACK_PAGES
+#define	KSTACK_PAGES	4		/* pages of kernel stack */
+#endif
+#define	KSTACK_GUARD_PAGES 0		/* pages of kstack guard; 0 disables */
+
+/* The default size of identity mappings in region 6 & 7. */
+#ifndef LOG2_ID_PAGE_SIZE
+#define	LOG2_ID_PAGE_SIZE	16
+#endif
+
+/*
+ * Mach derived conversion macros
+ */
+#define	round_page(x)	((((unsigned long)(x)) + PAGE_MASK) & ~(PAGE_MASK))
+#define	trunc_page(x)	((unsigned long)(x) & ~(PAGE_MASK))
+
+#define atop(x)			((unsigned long)(x) >> PAGE_SHIFT)
+#define ptoa(x)			((unsigned long)(x) << PAGE_SHIFT)
+
+#define pgtok(x)                ((x) * (PAGE_SIZE / 1024)) 
+
+#ifdef _KERNEL
+#define	NO_FUEWORD	1
+#endif
+
+#endif	/* !_IA64_INCLUDE_PARAM_H_ */


Property changes on: trunk/sys/ia64/include/param.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/pc/display.h
===================================================================
--- trunk/sys/ia64/include/pc/display.h	                        (rev 0)
+++ trunk/sys/ia64/include/pc/display.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,47 @@
+/* $MidnightBSD$ */
+/*
+ * IBM PC display definitions
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/pc/display.h 66459 2000-09-29 13:48:14Z dfr $
+ *	from: i386/include/pc display.h,v 1.4
+ */
+
+/* Color attributes for foreground text */
+
+#define	FG_BLACK		   0
+#define	FG_BLUE			   1
+#define	FG_GREEN		   2
+#define	FG_CYAN			   3
+#define	FG_RED			   4
+#define	FG_MAGENTA		   5
+#define	FG_BROWN		   6
+#define	FG_LIGHTGREY		   7
+#define	FG_DARKGREY		   8
+#define	FG_LIGHTBLUE		   9
+#define	FG_LIGHTGREEN		  10
+#define	FG_LIGHTCYAN		  11
+#define	FG_LIGHTRED		  12
+#define	FG_LIGHTMAGENTA		  13
+#define	FG_YELLOW		  14
+#define	FG_WHITE		  15
+#define	FG_BLINK		0x80
+
+/* Color attributes for text background */
+
+#define	BG_BLACK		0x00
+#define	BG_BLUE			0x10
+#define	BG_GREEN		0x20
+#define	BG_CYAN			0x30
+#define	BG_RED			0x40
+#define	BG_MAGENTA		0x50
+#define	BG_BROWN		0x60
+#define	BG_LIGHTGREY		0x70
+
+/* Monochrome attributes for foreground text */
+
+#define	FG_UNDERLINE		0x01
+#define	FG_INTENSE		0x08
+
+/* Monochrome attributes for text background */
+
+#define	BG_INTENSE		0x10


Property changes on: trunk/sys/ia64/include/pc/display.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/pcb.h
===================================================================
--- trunk/sys/ia64/include/pcb.h	                        (rev 0)
+++ trunk/sys/ia64/include/pcb.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,76 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003,2004 Marcel Moolenaar
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	$FreeBSD: stable/10/sys/ia64/include/pcb.h 234785 2012-04-29 11:04:31Z dim $
+ */
+
+#ifndef _MACHINE_PCB_H_
+#define _MACHINE_PCB_H_
+
+#ifndef _MACHINE_REGSET_H_
+#include <machine/_regset.h>
+#endif
+
+/*
+ * PCB: process control block
+ */
+struct pmap;
+struct pcb {
+	struct _special		pcb_special;
+	struct _callee_saved	pcb_preserved;
+	struct _callee_saved_fp	pcb_preserved_fp;
+	struct _high_fp		pcb_high_fp;
+	struct pcpu		*pcb_fpcpu;
+	struct pmap 		*pcb_current_pmap;
+
+	uint64_t		pcb_onfault;	/* for copy faults */
+
+	/* IA32 specific registers. */
+	uint64_t		pcb_ia32_cflg;
+	uint64_t		pcb_ia32_eflag;
+	uint64_t		pcb_ia32_fcr;
+	uint64_t		pcb_ia32_fdr;
+	uint64_t		pcb_ia32_fir;
+	uint64_t		pcb_ia32_fsr;
+};
+
+#ifdef _KERNEL
+
+#define	savectx(p)	swapctx(p, NULL)
+
+struct trapframe;
+
+void makectx(struct trapframe *, struct pcb *);
+void restorectx(struct pcb *) __dead2;
+int swapctx(struct pcb *old, struct pcb *new) __returns_twice;
+
+void ia32_restorectx(struct pcb *);
+void ia32_savectx(struct pcb *) __returns_twice;
+
+#endif
+
+#endif /* _MACHINE_PCB_H_ */


Property changes on: trunk/sys/ia64/include/pcb.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/pci_cfgreg.h
===================================================================
--- trunk/sys/ia64/include/pci_cfgreg.h	                        (rev 0)
+++ trunk/sys/ia64/include/pci_cfgreg.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,39 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2010 Marcel Moolenaar <marcel at FreeBSD.org>
+ * Copyright (c) 1997, Stefan Esser <se at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/pci_cfgreg.h 203884 2010-02-14 17:03:20Z marcel $
+ */
+
+#ifndef _MACHINE_PCI_CFGREG_H_
+#define	_MACHINE_PCI_CFGREG_H_
+
+int	pci_cfgregopen(void);
+uint32_t pci_cfgregread(int bus, int slot, int func, int reg, int len);
+void	pci_cfgregwrite(int bus, int slot, int func, int reg, uint32_t data,
+    int bytes);
+
+#endif /* _MACHINE_PCI_CFGREG_H_ */


Property changes on: trunk/sys/ia64/include/pci_cfgreg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/pcpu.h
===================================================================
--- trunk/sys/ia64/include/pcpu.h	                        (rev 0)
+++ trunk/sys/ia64/include/pcpu.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,136 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi at freebsd.org>
+ * Copyright (c) Peter Wemm <peter at netplex.com.au>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/pcpu.h 271239 2014-09-07 21:40:14Z marcel $
+ */
+
+#ifndef	_MACHINE_PCPU_H_
+#define	_MACHINE_PCPU_H_
+
+#include <sys/sysctl.h>
+#include <machine/pcb.h>
+
+struct pcpu_stats {
+	u_long		pcs_nasts;		/* IPI_AST counter. */
+	u_long		pcs_nclks;		/* Clock interrupt counter. */
+	u_long		pcs_nextints;		/* ExtINT counter. */
+	u_long		pcs_nhardclocks;	/* IPI_HARDCLOCK counter. */
+	u_long		pcs_nhighfps;		/* IPI_HIGH_FP counter. */
+	u_long		pcs_nhwints;		/* Hardware int. counter. */
+	u_long		pcs_npreempts;		/* IPI_PREEMPT counter. */
+	u_long		pcs_nrdvs;		/* IPI_RENDEZVOUS counter. */
+	u_long		pcs_nstops;		/* IPI_STOP counter. */
+	u_long		pcs_nstrays;		/* Stray interrupt counter. */
+};
+
+struct pcpu_md {
+	struct pcb	pcb;			/* Used by IPI_STOP */
+	struct pmap	*current_pmap;		/* active pmap */
+	vm_offset_t	vhpt;			/* Address of VHPT */
+	uint64_t	lid;			/* local CPU ID */
+	uint64_t	clock;			/* Clock counter. */
+	uint64_t	clock_load;		/* Clock reload value. */
+	uint32_t	clock_mode;		/* Clock ET mode */
+	uint32_t	awake:1;		/* CPU is awake? */
+	struct pcpu_stats stats;		/* Interrupt stats. */
+	void		*xtrace_buffer;
+	uint64_t	xtrace_tail;
+#ifdef _KERNEL
+	struct sysctl_ctx_list sysctl_ctx;
+	struct sysctl_oid *sysctl_tree;
+#endif
+};
+
+#define	PCPU_MD_FIELDS							\
+	uint32_t	pc_acpi_id;		/* ACPI CPU id. */	\
+	struct pcpu_md	pc_md;			/* MD fields. */	\
+	char		__pad[10*128]
+
+#ifdef _KERNEL
+
+#include <sys/systm.h>
+
+struct pcpu;
+
+register struct pcpu * volatile pcpup __asm__("r13");
+
+static __inline __pure2 struct thread *
+__curthread(void)
+{
+	struct thread *td;
+
+	__asm("ld8.acq %0=[r13]" : "=r"(td));
+	return (td);
+}
+#define	curthread	(__curthread())
+
+#define	__pcpu_offset(name)	__offsetof(struct pcpu, name)
+#define	__pcpu_type(name)	__typeof(((struct pcpu *)0)->name)
+
+#define	PCPU_ADD(name, val)					\
+    do {							\
+	__pcpu_type(pc_ ## name) *nmp;				\
+	critical_enter();					\
+	__asm __volatile("add %0=%1,r13;;" :			\
+	    "=r"(nmp) : "i"(__pcpu_offset(pc_ ## name)));	\
+	*nmp += val;						\
+	critical_exit();					\
+    } while (0)
+
+#define	PCPU_GET(name)						\
+    ({	__pcpu_type(pc_ ## name) *nmp;				\
+	__pcpu_type(pc_ ## name) res;				\
+	critical_enter();					\
+	__asm __volatile("add %0=%1,r13;;" :			\
+	    "=r"(nmp) : "i"(__pcpu_offset(pc_ ## name)));	\
+	res = *nmp;						\
+	critical_exit();					\
+	res;							\
+    })
+
+#define	PCPU_INC(member)	PCPU_ADD(member, 1)
+
+#define	PCPU_PTR(name)						\
+    ({	__pcpu_type(pc_ ## name) *nmp;				\
+	__asm __volatile("add %0=%1,r13;;" :			\
+	    "=r"(nmp) : "i"(__pcpu_offset(pc_ ## name)));	\
+	nmp;							\
+    })
+
+#define	PCPU_SET(name, val)					\
+    do {							\
+	__pcpu_type(pc_ ## name) *nmp;				\
+	critical_enter();					\
+	__asm __volatile("add %0=%1,r13;;" :			\
+	    "=r"(nmp) : "i"(__pcpu_offset(pc_ ## name)));	\
+	*nmp = val;						\
+	critical_exit();					\
+    } while (0)
+
+#endif	/* _KERNEL */
+
+#endif	/* !_MACHINE_PCPU_H_ */


Property changes on: trunk/sys/ia64/include/pcpu.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/pmap.h
===================================================================
--- trunk/sys/ia64/include/pmap.h	                        (rev 0)
+++ trunk/sys/ia64/include/pmap.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,144 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Derived from hp300 version by Mike Hibler, this version by William
+ * Jolitz uses a recursive map [a pde points to the page directory] to
+ * map the page tables using the pagetables themselves. This is done to
+ * reduce the impact on kernel virtual memory for lots of sparse address
+ * space, and to reduce the cost of memory to each process.
+ *
+ *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
+ *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
+ *	from: i386 pmap.h,v 1.54 1997/11/20 19:30:35 bde Exp
+ * $FreeBSD: stable/10/sys/ia64/include/pmap.h 268201 2014-07-02 23:57:55Z marcel $
+ */
+
+#ifndef _MACHINE_PMAP_H_
+#define	_MACHINE_PMAP_H_
+
+#include <sys/queue.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+#include <machine/atomic.h>
+#include <machine/pte.h>
+#include <machine/vmparam.h>
+
+#ifdef _KERNEL
+
+#define MAXKPT		(PAGE_SIZE/sizeof(vm_offset_t))
+
+#define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
+
+#endif /* _KERNEL */
+
+/*
+ * Pmap stuff
+ */
+struct	pv_entry;
+struct	pv_chunk;
+
+struct md_page {
+	TAILQ_HEAD(,pv_entry)	pv_list;
+	vm_memattr_t		memattr;
+};
+
+struct pmap {
+	struct mtx		pm_mtx;
+	TAILQ_HEAD(,pv_chunk)	pm_pvchunk;	/* list of mappings in pmap */
+	uint32_t		pm_rid[IA64_VM_MINKERN_REGION];
+	struct pmap_statistics	pm_stats;	/* pmap statistics */
+};
+
+typedef struct pmap	*pmap_t;
+
+#ifdef _KERNEL
+extern struct pmap	kernel_pmap_store;
+#define kernel_pmap	(&kernel_pmap_store)
+
+#define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
+#define	PMAP_LOCK_ASSERT(pmap, type) \
+				mtx_assert(&(pmap)->pm_mtx, (type))
+#define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
+#define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
+				    NULL, MTX_DEF)
+#define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
+#define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
+#define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
+#define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
+#endif
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page.  An entry is a pv_entry_t, the list is pv_list.
+ */
+typedef struct pv_entry {
+	vm_offset_t	pv_va;		/* virtual address for mapping */
+	TAILQ_ENTRY(pv_entry)	pv_list;
+} *pv_entry_t;
+
+#ifdef	_KERNEL
+
+extern vm_paddr_t phys_avail[];
+extern vm_offset_t virtual_avail;
+extern vm_offset_t virtual_end;
+
+extern uint64_t pmap_vhpt_base[];
+extern int pmap_vhpt_log2size;
+
+#define	pmap_mapbios(pa,sz)	pmap_mapdev_attr(pa,sz,VM_MEMATTR_UNCACHEABLE)
+#define	pmap_mapdev(pa,sz)	pmap_mapdev_attr(pa,sz,VM_MEMATTR_UNCACHEABLE)
+#define	pmap_unmapbios(va,sz)	pmap_unmapdev(va,sz)
+
+#define	pmap_page_get_memattr(m)	((m)->md.memattr)
+#define	pmap_page_is_mapped(m)		(!TAILQ_EMPTY(&(m)->md.pv_list))
+#define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
+
+void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
+vm_paddr_t pmap_kextract(vm_offset_t va);
+void	pmap_kremove(vm_offset_t);
+void	*pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
+void	pmap_page_set_memattr(vm_page_t, vm_memattr_t);
+void	pmap_unmapdev(vm_offset_t, vm_size_t);
+
+/* Machine-architecture private */
+vm_offset_t pmap_alloc_vhpt(void);
+void	pmap_bootstrap(void);
+void	pmap_invalidate_all(void);
+vm_offset_t pmap_mapdev_priv(vm_paddr_t, vm_size_t, vm_memattr_t);
+vm_offset_t pmap_page_to_va(vm_page_t);
+vm_offset_t pmap_steal_memory(vm_size_t);
+struct pmap *pmap_switch(struct pmap *pmap);
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PMAP_H_ */


Property changes on: trunk/sys/ia64/include/pmap.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/pmc_mdep.h
===================================================================
--- trunk/sys/ia64/include/pmc_mdep.h	                        (rev 0)
+++ trunk/sys/ia64/include/pmc_mdep.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,29 @@
+/* $MidnightBSD$ */
+/*-
+ * This file is in the public domain.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/pmc_mdep.h 174405 2007-12-07 13:45:47Z jkoshy $
+ */
+
+#ifndef _MACHINE_PMC_MDEP_H_
+#define	_MACHINE_PMC_MDEP_H_
+
+union pmc_md_op_pmcallocate {
+	uint64_t		__pad[4];
+};
+
+/* Logging */
+#define	PMCLOG_READADDR		PMCLOG_READ64
+#define	PMCLOG_EMITADDR		PMCLOG_EMIT64
+
+#if	_KERNEL
+union pmc_md_pmc {
+};
+
+#define	PMC_TRAPFRAME_TO_PC(TF)	(0)	/* Stubs */
+#define	PMC_TRAPFRAME_TO_FP(TF)	(0)
+#define	PMC_TRAPFRAME_TO_SP(TF)	(0)
+
+#endif
+
+#endif /* !_MACHINE_PMC_MDEP_H_ */


Property changes on: trunk/sys/ia64/include/pmc_mdep.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/proc.h
===================================================================
--- trunk/sys/ia64/include/proc.h	                        (rev 0)
+++ trunk/sys/ia64/include/proc.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,55 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003 The FreeBSD Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/proc.h 246714 2013-02-12 17:24:41Z marcel $
+ */
+
+#ifndef _MACHINE_PROC_H_
+#define	_MACHINE_PROC_H_
+
+struct mdthread {
+	int	md_spinlock_count;	/* (k) */
+	int	md_saved_intr;		/* (k) */
+};
+
+struct mdproc {
+	int		__dummy;	/* Avoid having an empty struct. */
+};
+
+#define	KINFO_PROC_SIZE 1088
+#define	KINFO_PROC32_SIZE 768
+
+#ifdef _KERNEL
+struct syscall_args {
+	u_int code;
+	int narg;
+	struct sysent *callp;
+	register_t *args;
+	register_t args32[8];
+};
+#endif
+
+#endif /* !_MACHINE_PROC_H_ */


Property changes on: trunk/sys/ia64/include/proc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/profile.h
===================================================================
--- trunk/sys/ia64/include/profile.h	                        (rev 0)
+++ trunk/sys/ia64/include/profile.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,66 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/profile.h 209617 2010-06-30 22:29:02Z marcel $
+ */
+
+#ifndef _MACHINE_PROFILE_H_
+#define	_MACHINE_PROFILE_H_
+
+#define	_MCOUNT_DECL	void __mcount
+#define	MCOUNT
+
+#define	FUNCTION_ALIGNMENT	16
+
+typedef unsigned long	fptrdiff_t;
+
+#ifdef _KERNEL
+/*
+ * The following two macros do splhigh and splx respectively.
+ */
+#define	MCOUNT_ENTER(s)	s = intr_disable()
+#define	MCOUNT_EXIT(s)	intr_restore(s)
+#define	MCOUNT_DECL(s)	register_t s;
+
+void bintr(void);
+void btrap(void);
+void eintr(void);
+void user(void);
+
+#define	MCOUNT_FROMPC_USER(pc)		\
+	((pc < (uintfptr_t)VM_MAXUSER_ADDRESS) ? ~0UL : pc)
+
+#define	MCOUNT_FROMPC_INTR(pc)		(~0UL)
+
+_MCOUNT_DECL(uintfptr_t, uintfptr_t);
+
+#else /* !_KERNEL */
+
+typedef unsigned long	uintfptr_t;
+
+#endif
+
+#endif /* _MACHINE_PROFILE_H_ */


Property changes on: trunk/sys/ia64/include/profile.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/pte.h
===================================================================
--- trunk/sys/ia64/include/pte.h	                        (rev 0)
+++ trunk/sys/ia64/include/pte.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,107 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/pte.h 137978 2004-11-21 21:40:08Z marcel $
+ */
+
+#ifndef _MACHINE_PTE_H_
+#define	_MACHINE_PTE_H_
+
+#define	PTE_PRESENT	0x0000000000000001
+#define	PTE__RV1_	0x0000000000000002
+#define	PTE_MA_MASK	0x000000000000001C
+#define	PTE_MA_WB	0x0000000000000000
+#define	PTE_MA_UC	0x0000000000000010
+#define	PTE_MA_UCE	0x0000000000000014
+#define	PTE_MA_WC	0x0000000000000018
+#define	PTE_MA_NATPAGE	0x000000000000001C
+#define	PTE_ACCESSED	0x0000000000000020
+#define	PTE_DIRTY	0x0000000000000040
+#define	PTE_PL_MASK	0x0000000000000180
+#define	PTE_PL_KERN	0x0000000000000000
+#define	PTE_PL_USER	0x0000000000000180
+#define	PTE_AR_MASK	0x0000000000000E00
+#define	PTE_AR_R	0x0000000000000000
+#define	PTE_AR_RX	0x0000000000000200
+#define	PTE_AR_RW	0x0000000000000400
+#define	PTE_AR_RWX	0x0000000000000600
+#define	PTE_AR_R_RW	0x0000000000000800
+#define	PTE_AR_RX_RWX	0x0000000000000A00
+#define	PTE_AR_RWX_RW	0x0000000000000C00
+#define	PTE_AR_X_RX	0x0000000000000E00
+#define	PTE_PPN_MASK	0x0003FFFFFFFFF000
+#define	PTE__RV2_	0x000C000000000000
+#define	PTE_ED		0x0010000000000000
+#define	PTE_IG_MASK	0xFFE0000000000000
+#define	PTE_WIRED	0x0020000000000000
+#define	PTE_MANAGED	0x0040000000000000
+#define	PTE_PROT_MASK	0x0700000000000000
+
+#define	ITIR__RV1_	0x0000000000000003
+#define	ITIR_PS_MASK	0x00000000000000FC
+#define	ITIR_KEY_MASK	0x00000000FFFFFF00
+#define	ITIR__RV2_	0xFFFFFFFF00000000
+
+#ifndef LOCORE
+
+typedef uint64_t pt_entry_t;
+
+static __inline pt_entry_t
+pte_atomic_clear(pt_entry_t *ptep, uint64_t val)
+{
+	return (atomic_clear_64(ptep, val));
+}
+
+static __inline pt_entry_t
+pte_atomic_set(pt_entry_t *ptep, uint64_t val)
+{
+	return (atomic_set_64(ptep, val));
+}
+
+/*
+ * A long-format VHPT entry.
+ */
+struct ia64_lpte {
+	pt_entry_t	pte;
+	uint64_t	itir;
+	uint64_t	tag;		/* includes ti */
+	uint64_t	chain;		/* pa of collision chain */
+};
+
+/*
+ * Layout of rr[x].
+ */
+struct ia64_rr {
+	uint64_t	rr_ve	:1;	/* bit 0 */
+	uint64_t	__rv1__	:1;	/* bit 1 */
+	uint64_t	rr_ps	:6;	/* bits 2..7 */
+	uint64_t	rr_rid	:24;	/* bits 8..31 */
+	uint64_t	__rv2__	:32;	/* bits 32..63 */
+};
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_PTE_H_ */


Property changes on: trunk/sys/ia64/include/pte.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/ptrace.h
===================================================================
--- trunk/sys/ia64/include/ptrace.h	                        (rev 0)
+++ trunk/sys/ia64/include/ptrace.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,43 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)ptrace.h	8.1 (Berkeley) 6/11/93
+ * $FreeBSD: stable/10/sys/ia64/include/ptrace.h 139790 2005-01-06 22:18:23Z imp $
+ */
+
+#ifndef _MACHINE_PTRACE_H_
+#define	_MACHINE_PTRACE_H_
+
+#define	__HAVE_PTRACE_MACHDEP
+
+/* Fetch/store dirty registers on the kernel stack. */
+#define	PT_GETKSTACK	(PT_FIRSTMACH + 0)
+#define	PT_SETKSTACK	(PT_FIRSTMACH + 1)
+
+#endif /* _MACHINE_PTRACE_H_ */


Property changes on: trunk/sys/ia64/include/ptrace.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/reg.h
===================================================================
--- trunk/sys/ia64/include/reg.h	                        (rev 0)
+++ trunk/sys/ia64/include/reg.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,107 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	$FreeBSD: stable/10/sys/ia64/include/reg.h 283910 2015-06-02 14:54:53Z jhb $
+ */
+
+#ifndef _MACHINE_REG_H_
+#define _MACHINE_REG_H_
+
+#include <machine/_regset.h>
+
+struct reg32 {
+	unsigned int	r_fs;
+	unsigned int	r_es;
+	unsigned int	r_ds;
+	unsigned int	r_edi;
+	unsigned int	r_esi;
+	unsigned int	r_ebp;
+	unsigned int	r_isp;
+	unsigned int	r_ebx;
+	unsigned int	r_edx;
+	unsigned int	r_ecx;
+	unsigned int	r_eax;
+	unsigned int	r_trapno;
+	unsigned int	r_err;
+	unsigned int	r_eip;
+	unsigned int	r_cs;
+	unsigned int	r_eflags;
+	unsigned int	r_esp;
+	unsigned int	r_ss;
+	unsigned int	r_gs;
+};
+
+struct reg {
+	struct _special		r_special;
+	struct _callee_saved	r_preserved;
+	struct _caller_saved	r_scratch;
+};
+
+struct fpreg32 {
+	unsigned int	fpr_env[7];
+	unsigned char	fpr_acc[8][10];
+	unsigned int	fpr_ex_sw;
+	unsigned char	fpr_pad[64];
+};
+
+struct fpreg {
+	struct _callee_saved_fp	fpr_preserved;
+	struct _caller_saved_fp	fpr_scratch;
+	struct _high_fp		fpr_high;
+};
+
+struct dbreg32 {
+	unsigned int	dr[8];
+};
+
+struct dbreg {
+	unsigned long	dbr_data[8];
+	unsigned long	dbr_inst[8];
+};
+
+#define __HAVE_REG32
+
+#ifdef _KERNEL
+struct thread;
+
+/* XXX these interfaces are MI, so they should be declared in a MI place. */
+int	fill_regs(struct thread *, struct reg *);
+int	set_regs(struct thread *, struct reg *);
+int	fill_fpregs(struct thread *, struct fpreg *);
+int	set_fpregs(struct thread *, struct fpreg *);
+int	fill_dbregs(struct thread *, struct dbreg *);
+int	set_dbregs(struct thread *, struct dbreg *);
+#ifdef COMPAT_FREEBSD32
+int	fill_regs32(struct thread *, struct reg32 *);
+int	set_regs32(struct thread *, struct reg32 *);
+int	fill_fpregs32(struct thread *, struct fpreg32 *);
+int	set_fpregs32(struct thread *, struct fpreg32 *);
+int	fill_dbregs32(struct thread *, struct dbreg32 *);
+int	set_dbregs32(struct thread *, struct dbreg32 *);
+#endif
+#endif
+
+#endif /* _MACHINE_REG_H_ */


Property changes on: trunk/sys/ia64/include/reg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/reloc.h
===================================================================
--- trunk/sys/ia64/include/reloc.h	                        (rev 0)
+++ trunk/sys/ia64/include/reloc.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,31 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 John Birrell <jb at cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/reloc.h 165967 2007-01-12 07:26:21Z imp $
+ */


Property changes on: trunk/sys/ia64/include/reloc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/resource.h
===================================================================
--- trunk/sys/ia64/include/resource.h	                        (rev 0)
+++ trunk/sys/ia64/include/resource.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,45 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/resource.h 139790 2005-01-06 22:18:23Z imp $ */
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.  M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose.  It is provided "as is" without express or implied
+ * warranty.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_RESOURCE_H_
+#define	_MACHINE_RESOURCE_H_	1
+
+/*
+ * Definitions of resource types for Intel Architecture machines
+ * with support for legacy ISA devices and drivers.
+ */
+
+#define	SYS_RES_IRQ	1	/* interrupt lines */
+#define	SYS_RES_DRQ	2	/* isa dma lines */
+#define	SYS_RES_MEMORY	3	/* i/o memory */
+#define	SYS_RES_IOPORT	4	/* i/o ports */
+
+#endif /* !_MACHINE_RESOURCE_H_ */


Property changes on: trunk/sys/ia64/include/resource.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/runq.h
===================================================================
--- trunk/sys/ia64/include/runq.h	                        (rev 0)
+++ trunk/sys/ia64/include/runq.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,66 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Jake Burkholder <jake at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/runq.h 208283 2010-05-19 00:23:10Z marcel $
+ */
+
+#ifndef	_MACHINE_RUNQ_H_
+#define	_MACHINE_RUNQ_H_
+
+#define	RQB_LEN		(1UL)		/* Number of priority status words. */
+#define	RQB_L2BPW	(6UL)		/* Log2(sizeof(rqb_word_t) * NBBY)). */
+#define	RQB_BPW		(1UL<<RQB_L2BPW)	/* Bits in an rqb_word_t. */
+
+#define	RQB_BIT(pri)	(1UL << ((pri) & (RQB_BPW - 1)))
+#define	RQB_WORD(pri)	((pri) >> RQB_L2BPW)
+
+#define	RQB_FFS(word)	(__ffsl(word) - 1)
+
+/*
+ * Type of run queue status word.
+ */
+typedef	uint64_t	rqb_word_t;
+
+static __inline uint64_t
+__popcnt(uint64_t bits)
+{
+        uint64_t result;
+
+	__asm __volatile("popcnt %0=%1" : "=r" (result) : "r" (bits));
+	return result;
+}
+
+
+static __inline int
+__ffsl(u_long mask)
+{
+
+	if (__predict_false(mask == 0ul))
+		return (0);
+	return (__popcnt(mask ^ (mask - 1)));
+}
+
+#endif


Property changes on: trunk/sys/ia64/include/runq.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/sal.h
===================================================================
--- trunk/sys/ia64/include/sal.h	                        (rev 0)
+++ trunk/sys/ia64/include/sal.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,142 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/sal.h 208283 2010-05-19 00:23:10Z marcel $
+ */
+
+#ifndef _MACHINE_SAL_H_
+#define _MACHINE_SAL_H_
+
+struct sal_system_table {
+	char		sal_signature[4];
+#define	SAL_SIGNATURE	"SST_"
+	uint32_t	sal_length;
+	uint8_t		sal_rev[2];
+	uint16_t	sal_entry_count;
+	uint8_t		sal_checksum;
+	uint8_t		sal_reserved1[7];
+	uint8_t		sal_a_version[2];
+	uint8_t		sal_b_version[2];
+	char		sal_oem_id[32];
+	char		sal_product_id[32];
+	uint8_t		sal_reserved2[8];
+};
+
+struct sal_entrypoint_descriptor {
+	uint8_t		sale_type;	/* == 0 */
+	uint8_t		sale_reserved1[7];
+	uint64_t	sale_pal_proc;
+	uint64_t	sale_sal_proc;
+	uint64_t	sale_sal_gp;
+	uint8_t		sale_reserved2[16];
+};
+
+struct sal_memory_descriptor {
+	uint8_t		sale_type;	/* == 1 */
+	uint8_t		sale_need_virtual;
+	uint8_t		sale_current_attribute;
+	uint8_t		sale_access_rights;
+	uint8_t		sale_supported_attributes;
+	uint8_t		sale_reserved1;
+	uint8_t		sale_memory_type[2];
+	uint64_t	sale_physical_address;
+	uint32_t	sale_length;
+	uint8_t		sale_reserved2[12];
+};
+
+struct sal_platform_descriptor {
+	uint8_t		sale_type;	/* == 2 */
+	uint8_t		sale_features;
+	uint8_t		sale_reserved[14];
+};
+
+struct sal_tr_descriptor {
+	uint8_t		sale_type;	/* == 3 */
+	uint8_t		sale_register_type;
+	uint8_t		sale_register_number;
+	uint8_t		sale_reserved1[5];
+	uint64_t	sale_virtual_address;
+	uint64_t	sale_page_size;
+	uint8_t		sale_reserved2[8];
+};
+
+struct sal_ptc_cache_descriptor {
+	uint8_t		sale_type;	/* == 4 */
+	uint8_t		sale_reserved[3];
+	uint32_t	sale_domains;
+	uint64_t	sale_address;
+};
+
+struct sal_ap_wakeup_descriptor {
+	uint8_t		sale_type;	/* == 5 */
+	uint8_t		sale_mechanism;
+	uint8_t		sale_reserved[6];
+	uint64_t	sale_vector;
+};
+
+/*
+ * SAL Procedure numbers.
+ */
+
+#define SAL_SET_VECTORS		0x01000000
+#define SAL_GET_STATE_INFO	0x01000001
+#define SAL_GET_STATE_INFO_SIZE	0x01000002
+#define SAL_CLEAR_STATE_INFO	0x01000003
+#define SAL_MC_RENDEZ		0x01000004
+#define SAL_MC_SET_PARAMS	0x01000005
+#define SAL_REGISTER_PHYSICAL_ADDR 0x01000006
+#define SAL_CACHE_FLUSH		0x01000008
+#define SAL_CACHE_INIT		0x01000009
+#define SAL_PCI_CONFIG_READ	0x01000010
+#define SAL_PCI_CONFIG_WRITE	0x01000011
+#define SAL_FREQ_BASE		0x01000012
+#define SAL_UPDATE_PAL		0x01000020
+
+/* SAL_SET_VECTORS event handler types */
+#define	SAL_OS_MCA		0
+#define	SAL_OS_INIT		1
+#define	SAL_OS_BOOT_RENDEZ	2
+
+/* SAL_GET_STATE_INFO, SAL_GET_STATE_INFO_SIZE types */
+#define	SAL_INFO_MCA		0
+#define	SAL_INFO_INIT		1
+#define	SAL_INFO_CMC		2
+#define	SAL_INFO_CPE		3
+#define	SAL_INFO_TYPES		4	/* number of types we know about */
+
+struct ia64_sal_result {
+	int64_t		sal_status;
+	uint64_t	sal_result[3];
+};
+
+typedef struct ia64_sal_result sal_entry_t(uint64_t, uint64_t, uint64_t,
+    uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
+
+extern sal_entry_t *ia64_sal_entry;
+
+extern void ia64_sal_init(void);
+
+#endif /* _MACHINE_SAL_H_ */


Property changes on: trunk/sys/ia64/include/sal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/setjmp.h
===================================================================
--- trunk/sys/ia64/include/setjmp.h	                        (rev 0)
+++ trunk/sys/ia64/include/setjmp.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,131 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2000
+ * Intel Corporation.
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ * 
+ *    This product includes software developed by Intel Corporation and
+ *    its contributors.
+ * 
+ * 4. Neither the name of Intel Corporation or its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL INTEL CORPORATION OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ * $FreeBSD: stable/10/sys/ia64/include/setjmp.h 118048 2003-07-26 08:03:43Z marcel $
+ */
+
+#ifndef _MACHINE_SETJMP_H_
+#define	_MACHINE_SETJMP_H_
+
+#include <sys/cdefs.h>
+
+#if __BSD_VISIBLE
+#define	JMPBUF_ADDR_OF(buf, item)	((unsigned long)((char *)buf + item))
+
+#define	J_UNAT		0
+#define	J_NATS		0x8
+#define	J_PFS		0x10
+#define	J_BSP		0x18
+#define	J_RNAT		0x20
+#define	J_PREDS		0x28
+#define	J_LC		0x30
+#define	J_R4		0x38
+#define	J_R5		0x40
+#define	J_R6		0x48
+#define	J_R7		0x50
+#define	J_SP		0x58
+#define	J_F2		0x60
+#define	J_F3		0x70
+#define	J_F4		0x80
+#define	J_F5		0x90
+#define	J_F16		0xa0
+#define	J_F17		0xb0
+#define	J_F18		0xc0
+#define	J_F19		0xd0
+#define	J_F20		0xe0
+#define	J_F21		0xf0
+#define	J_F22		0x100
+#define	J_F23		0x110
+#define	J_F24		0x120
+#define	J_F25		0x130
+#define	J_F26		0x140
+#define	J_F27		0x150
+#define	J_F28		0x160
+#define	J_F29		0x170
+#define	J_F30		0x180
+#define	J_F31		0x190
+#define	J_FPSR		0x1a0
+#define	J_B0		0x1a8
+#define	J_B1		0x1b0
+#define	J_B2		0x1b8
+#define	J_B3		0x1c0
+#define	J_B4		0x1c8
+#define	J_B5		0x1d0
+#define	J_SIGMASK	0x1d8
+#define	J_SIGSET	0x1e0
+#endif /* __BSD_VISIBLE */
+
+#define	_JBLEN		0x20			/* Size in long doubles */
+
+/*
+ * XXX this check is wrong, since LOCORE is in the application namespace and
+ * applications shouldn't be able to affect the implementation.  One workaround
+ * would be to only check LOCORE if _KERNEL is defined, but unfortunately
+ * LOCORE is used outside of the kernel.  The best solution would be to rename
+ * LOCORE to _LOCORE, so that it can be used in userland to safely affect the
+ * implementation.
+ */
+#ifndef LOCORE
+
+/*
+ * jmp_buf and sigjmp_buf are encapsulated in different structs to force
+ * compile-time diagnostics for mismatches.  The structs are the same
+ * internally to avoid some run-time errors for mismatches.
+ */
+#if __BSD_VISIBLE || __POSIX_VISIBLE || __XSI_VISIBLE
+struct _sigjmp_buf {
+	long double buf[_JBLEN];
+};
+typedef struct _sigjmp_buf sigjmp_buf[1];
+#endif
+
+struct _jmp_buf {
+	long double buf[_JBLEN];
+};
+typedef struct _jmp_buf	jmp_buf[1];
+
+#ifdef _KERNEL
+#ifdef CTASSERT
+CTASSERT(sizeof(struct _jmp_buf) == 512);
+#endif
+#endif
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_SETJMP_H_ */


Property changes on: trunk/sys/ia64/include/setjmp.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/sf_buf.h
===================================================================
--- trunk/sys/ia64/include/sf_buf.h	                        (rev 0)
+++ trunk/sys/ia64/include/sf_buf.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,73 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003 Alan L. Cox <alc at cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/sf_buf.h 255289 2013-09-06 05:37:49Z glebius $
+ */
+
+#ifndef _MACHINE_SF_BUF_H_
+#define _MACHINE_SF_BUF_H_
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_page.h>
+
+/*
+ * On this machine, the only purpose for which sf_buf is used is to implement
+ * an opaque pointer required by the machine-independent parts of the kernel.
+ * That pointer references the vm_page that is "mapped" by the sf_buf.  The
+ * actual mapping is provided by the direct virtual-to-physical mapping.  
+ */
+struct sf_buf;
+
+static inline struct sf_buf *
+sf_buf_alloc(struct vm_page *m, int pri)
+{
+
+	return ((struct sf_buf *)m);
+}
+
+static inline void
+sf_buf_free(struct sf_buf *sf)
+{
+}
+
+static __inline vm_page_t
+sf_buf_page(struct sf_buf *sf)
+{
+ 
+	return ((vm_page_t)sf);
+}
+
+static __inline vm_offset_t
+sf_buf_kva(struct sf_buf *sf)
+{
+	vm_page_t m;
+
+	m = sf_buf_page(sf);
+	return (pmap_page_to_va(m));
+}
+
+#endif /* !_MACHINE_SF_BUF_H_ */


Property changes on: trunk/sys/ia64/include/sf_buf.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/sigframe.h
===================================================================
--- trunk/sys/ia64/include/sigframe.h	                        (rev 0)
+++ trunk/sys/ia64/include/sigframe.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,40 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1999 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer 
+ *    in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/sigframe.h 105950 2002-10-25 19:10:58Z peter $
+ */
+
+#ifndef _MACHINE_SIGFRAME_H_
+#define _MACHINE_SIGFRAME_H_ 1
+
+struct sigframe {
+	ucontext_t	sf_uc;
+	siginfo_t	sf_si;
+};
+
+#endif /* _MACHINE_SIGFRAME_H_ */


Property changes on: trunk/sys/ia64/include/sigframe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/signal.h
===================================================================
--- trunk/sys/ia64/include/signal.h	                        (rev 0)
+++ trunk/sys/ia64/include/signal.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,82 @@
+/* $MidnightBSD$ */
+/* $FreeBSD: stable/10/sys/ia64/include/signal.h 149337 2005-08-20 16:44:41Z stefanf $ */
+/* From: NetBSD: signal.h,v 1.3 1997/04/06 08:47:43 cgd Exp */
+
+/*-
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ * 
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ * 
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ * 
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution at CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _MACHINE_SIGNAL_H_
+#define	_MACHINE_SIGNAL_H_
+
+#include <sys/cdefs.h>
+#include <sys/_sigset.h>
+
+typedef long	sig_atomic_t;
+
+#if __BSD_VISIBLE
+/* portable macros for SIGFPE/ARITHTRAP */
+#define FPE_INTOVF	1	/* integer overflow */
+#define FPE_INTDIV	2	/* integer divide by zero */
+#define FPE_FLTDIV	3	/* floating point divide by zero */
+#define FPE_FLTOVF	4	/* floating point overflow */
+#define FPE_FLTUND	5	/* floating point underflow */
+#define FPE_FLTRES	6	/* floating point inexact result */
+#define FPE_FLTINV	7	/* invalid floating point operation */
+#define FPE_FLTSUB	8	/* subscript out of range */
+
+#define BUS_SEGM_FAULT	30	/* segment protection base */
+#endif
+
+/*
+ * Information pushed on stack when a signal is delivered.
+ * This is used by the kernel to restore state following
+ * execution of the signal handler.  It is also made available
+ * to the handler to allow it to restore state properly if
+ * a non-standard exit is performed.
+ */
+
+#if __BSD_VISIBLE
+#include <machine/_regset.h>
+
+/*
+ * The sequence of the fields should match those in
+ * mcontext_t. Keep them in sync!
+ */
+struct sigcontext {
+	struct __sigset		sc_mask;	/* signal mask to restore */
+	unsigned long		sc_onstack;
+	unsigned long		sc_flags;
+	struct _special		sc_special;
+	struct _callee_saved	sc_preserved;
+	struct _callee_saved_fp	sc_preserved_fp;
+	struct _caller_saved	sc_scratch;
+	struct _caller_saved_fp	sc_scratch_fp;
+	struct _high_fp		sc_high_fp;
+};
+#endif /* __BSD_VISIBLE */
+
+#endif /* !_MACHINE_SIGNAL_H_*/


Property changes on: trunk/sys/ia64/include/signal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/smp.h
===================================================================
--- trunk/sys/ia64/include/smp.h	                        (rev 0)
+++ trunk/sys/ia64/include/smp.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,58 @@
+/* $MidnightBSD$ */
+/*
+ * $FreeBSD: stable/10/sys/ia64/include/smp.h 268200 2014-07-02 23:47:43Z marcel $
+ */
+#ifndef _MACHINE_SMP_H_
+#define _MACHINE_SMP_H_
+
+#ifdef _KERNEL
+
+#define	IPI_AST			ia64_ipi_ast
+#define	IPI_HARDCLOCK		ia64_ipi_hardclock
+#define	IPI_PREEMPT		ia64_ipi_preempt
+#define	IPI_RENDEZVOUS		ia64_ipi_rndzvs
+#define	IPI_STOP		ia64_ipi_stop
+#define	IPI_STOP_HARD		ia64_ipi_nmi
+
+#ifndef LOCORE
+
+#include <sys/_cpuset.h>
+
+struct pcpu;
+
+struct ia64_ap_state {
+	uint64_t	as_trace;
+	uint64_t	as_pgtbl_pte;
+	uint64_t	as_pgtbl_itir;
+	uint64_t	as_text_va;
+	uint64_t	as_text_pte;
+	uint64_t	as_text_itir;
+	uint64_t	as_data_va;
+	uint64_t	as_data_pte;
+	uint64_t	as_data_itir;
+	void		*as_kstack;
+	void		*as_kstack_top;
+	struct pcpu	*as_pcpu;
+	void		*as_xtrace_buffer;
+	volatile int	as_delay;
+	volatile u_int	as_awake;
+	volatile u_int	as_spin;
+};
+
+extern int ia64_ipi_ast;
+extern int ia64_ipi_hardclock;
+extern int ia64_ipi_highfp;
+extern int ia64_ipi_nmi;
+extern int ia64_ipi_preempt;
+extern int ia64_ipi_rndzvs;
+extern int ia64_ipi_stop;
+extern int ia64_ipi_wakeup;
+
+void	ipi_all_but_self(int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
+void	ipi_selected(cpuset_t cpus, int ipi);
+void	ipi_send(struct pcpu *, int ipi);
+
+#endif /* !LOCORE */
+#endif /* _KERNEL */
+#endif /* !_MACHINE_SMP_H */


Property changes on: trunk/sys/ia64/include/smp.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/stdarg.h
===================================================================
--- trunk/sys/ia64/include/stdarg.h	                        (rev 0)
+++ trunk/sys/ia64/include/stdarg.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,68 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002 David E. O'Brien.  All rights reserved.
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)stdarg.h	8.1 (Berkeley) 6/10/93
+ * $FreeBSD: stable/10/sys/ia64/include/stdarg.h 162487 2006-09-21 01:37:02Z kan $
+ */
+
+#ifndef _MACHINE_STDARG_H_
+#define	_MACHINE_STDARG_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+#ifndef _VA_LIST_DECLARED
+#define	_VA_LIST_DECLARED
+typedef	__va_list	va_list;
+#endif
+
+#if defined(__GNUCLIKE_BUILTIN_STDARG)
+
+#define	va_start(ap, last) \
+	__builtin_va_start((ap), (last))
+
+#define	va_arg(ap, type) \
+	__builtin_va_arg((ap), type)
+
+#if __ISO_C_VISIBLE >= 1999
+#define	va_copy(dest, src) \
+	__builtin_va_copy((dest), (src))
+#endif
+
+#define	va_end(ap) \
+	__builtin_va_end(ap)
+
+#endif
+
+#endif /* !_MACHINE_STDARG_H_ */


Property changes on: trunk/sys/ia64/include/stdarg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/sysarch.h
===================================================================
--- trunk/sys/ia64/include/sysarch.h	                        (rev 0)
+++ trunk/sys/ia64/include/sysarch.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,44 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/sysarch.h 202097 2010-01-11 18:10:13Z marcel $
+ */
+
+#ifndef _MACHINE_SYSARCH_H_
+#define	_MACHINE_SYSARCH_H_
+
+#ifndef _KERNEL
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+int	sysarch(int, void *);
+__END_DECLS
+#endif
+
+#endif /* !_MACHINE_SYSARCH_H_ */


Property changes on: trunk/sys/ia64/include/sysarch.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/ucontext.h
===================================================================
--- trunk/sys/ia64/include/ucontext.h	                        (rev 0)
+++ trunk/sys/ia64/include/ucontext.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,93 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1999, 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer 
+ *    in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/ucontext.h 177091 2008-03-12 10:12:01Z jeff $
+ */
+
+#ifndef _MACHINE_UCONTEXT_H_
+#define	_MACHINE_UCONTEXT_H_
+
+#include <machine/_regset.h>
+
+/*
+ * The mc_flags field provides the necessary clues when dealing with the gory
+ * details of ia64 specific contexts. A comprehensive explanation is added for
+ * everybody's sanity, including the author's.
+ *
+ * The first and foremost variation in the context is synchronous contexts
+ * (= synctx) versus asynchronous contexts (= asynctx). A synctx is created
+ * synchronously WRT program execution and has the advantage that none of the
+ * scratch registers have to be saved. They are assumed to be clobbered by the
+ * call to the function that creates the context. An asynctx needs to have the
+ * scratch registers preserved because it can describe any point in a thread's
+ * (or process') execution.
+ * The second variation is for synchronous contexts. When the kernel creates
+ * a synchronous context if needs to preserve the scratch registers, because
+ * the syscall argument and return values are stored there in the trapframe
+ * and they need to be preserved in order to restart a syscall or return the
+ * proper return values. Also, the IIP and CFM fields need to be preserved
+ * as they point to the syscall stub, which the kernel saves as a favor to
+ * userland (it keeps the stubs small and simple).
+ *
+ * Below a description of the flags and their meaning:
+ *
+ *	_MC_FLAGS_ASYNC_CONTEXT
+ *		If set, indicates that mc_scratch and mc_scratch_fp are both
+ *		valid. IFF not set, _MC_FLAGS_SYSCALL_CONTEXT indicates if the
+ *		synchronous context is one corresponding to a syscall or not.
+ *		Only the kernel is expected to create such a context and it's
+ *		probably wise to let the kernel restore it.
+ *	_MC_FLAGS_HIGHFP_VALID
+ *		If set, indicates that the high FP registers (f32-f127) are
+ *		valid. This flag is very likely not going to be set for any
+ *		sensible synctx, but is not explicitly disallowed. Any synctx
+ *		that has this flag may or may not have the high FP registers
+ *		restored. In short: don't do it.
+ *	_MC_FLAGS_SYSCALL_CONTEXT
+ *		If set (hence _MC_FLAGS_ASYNC_CONTEXT is not set) indicates
+ *		that the scratch registers contain syscall arguments and
+ *		return values and that additionally IIP and CFM are valid.
+ *		Only the kernel is expected to create such a context. It's
+ *		probably wise to let the kernel restore it.
+ */
+
+typedef struct __mcontext {
+	unsigned long		mc_flags;
+#define	_MC_FLAGS_ASYNC_CONTEXT		0x0001
+#define	_MC_FLAGS_HIGHFP_VALID		0x0002
+#define	_MC_FLAGS_SYSCALL_CONTEXT	0x0008
+	unsigned long		_reserved_;
+	struct _special		mc_special;
+	struct _callee_saved	mc_preserved;
+	struct _callee_saved_fp	mc_preserved_fp;
+	struct _caller_saved	mc_scratch;
+	struct _caller_saved_fp	mc_scratch_fp;
+	struct _high_fp		mc_high_fp;
+} mcontext_t;
+
+#endif /* !_MACHINE_UCONTEXT_H_ */


Property changes on: trunk/sys/ia64/include/ucontext.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/unwind.h
===================================================================
--- trunk/sys/ia64/include/unwind.h	                        (rev 0)
+++ trunk/sys/ia64/include/unwind.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,57 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/unwind.h 139790 2005-01-06 22:18:23Z imp $
+ */
+
+#ifndef _MACHINE_UNWIND_H_
+#define	_MACHINE_UNWIND_H_
+
+struct pcb;
+struct trapframe;
+struct uwx_env;
+
+struct unw_regstate {
+	struct pcb	*pcb;
+	struct trapframe *frame;
+	struct uwx_env	*env;
+	uint64_t	keyval[8];
+};
+
+int unw_create_from_pcb(struct unw_regstate *s, struct pcb *pcb);
+int unw_create_from_frame(struct unw_regstate *s, struct trapframe *tf);
+void unw_delete(struct unw_regstate *s);
+int unw_step(struct unw_regstate *s);
+
+int unw_get_bsp(struct unw_regstate *s, uint64_t *r);
+int unw_get_cfm(struct unw_regstate *s, uint64_t *r);
+int unw_get_ip(struct unw_regstate *s, uint64_t *r);
+int unw_get_sp(struct unw_regstate *s, uint64_t *r);
+
+int unw_table_add(uint64_t, uint64_t, uint64_t);
+void unw_table_remove(uint64_t);
+
+#endif /* _MACHINE_UNWIND_H_ */


Property changes on: trunk/sys/ia64/include/unwind.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/varargs.h
===================================================================
--- trunk/sys/ia64/include/varargs.h	                        (rev 0)
+++ trunk/sys/ia64/include/varargs.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,49 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2002 David E. O'Brien.  All rights reserved.
+ * Copyright (c) 1990, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)varargs.h	8.2 (Berkeley) 3/22/94
+ * $FreeBSD: stable/10/sys/ia64/include/varargs.h 120540 2003-09-28 05:34:07Z marcel $
+ */
+
+#ifndef _MACHINE_VARARGS_H_
+#define	_MACHINE_VARARGS_H_
+
+#error "<varargs.h> is obsolete on ia64. Use <stdarg.h> instead."
+
+#endif /* !_MACHINE_VARARGS_H_ */


Property changes on: trunk/sys/ia64/include/varargs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/vdso.h
===================================================================
--- trunk/sys/ia64/include/vdso.h	                        (rev 0)
+++ trunk/sys/ia64/include/vdso.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,42 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright 2012 Konstantin Belousov <kib at FreeBSD.ORG>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/vdso.h 237433 2012-06-22 07:06:40Z kib $
+ */
+
+#ifndef _IA64_VDSO_H
+#define	_IA64_VDSO_H
+
+#define	VDSO_TIMEHANDS_MD			\
+	uint32_t	th_res[8];
+
+#ifdef _KERNEL
+#ifdef COMPAT_FREEBSD32
+
+#define	VDSO_TIMEHANDS_MD32	VDSO_TIMEHANDS_MD
+
+#endif
+#endif
+#endif


Property changes on: trunk/sys/ia64/include/vdso.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/vm.h
===================================================================
--- trunk/sys/ia64/include/vm.h	                        (rev 0)
+++ trunk/sys/ia64/include/vm.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,45 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2009 Alan L. Cox <alc at cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/vm.h 195649 2009-07-12 23:31:20Z alc $
+ */
+
+#ifndef _MACHINE_VM_H_
+#define	_MACHINE_VM_H_
+
+#include <machine/atomic.h>
+#include <machine/pte.h>
+
+/* Memory attributes. */
+#define	VM_MEMATTR_WRITE_BACK		((vm_memattr_t)PTE_MA_WB)
+#define	VM_MEMATTR_UNCACHEABLE		((vm_memattr_t)PTE_MA_UC)
+#define	VM_MEMATTR_UNCACHEABLE_EXPORTED	((vm_memattr_t)PTE_MA_UCE)
+#define	VM_MEMATTR_WRITE_COMBINING	((vm_memattr_t)PTE_MA_WC)
+#define	VM_MEMATTR_NATPAGE		((vm_memattr_t)PTE_MA_NATPAGE)
+
+#define	VM_MEMATTR_DEFAULT		VM_MEMATTR_WRITE_BACK
+
+#endif /* !_MACHINE_VM_H_ */


Property changes on: trunk/sys/ia64/include/vm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/include/vmparam.h
===================================================================
--- trunk/sys/ia64/include/vmparam.h	                        (rev 0)
+++ trunk/sys/ia64/include/vmparam.h	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,207 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: vmparam.h 1.16 91/01/18$
+ *
+ *	@(#)vmparam.h	8.2 (Berkeley) 4/22/94
+ *
+ * $FreeBSD: stable/10/sys/ia64/include/vmparam.h 266204 2014-05-16 01:30:30Z ian $
+ */
+
+#ifndef	_MACHINE_VMPARAM_H_
+#define	_MACHINE_VMPARAM_H_
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#ifndef MAXTSIZ
+#define	MAXTSIZ		(1<<30)			/* max text size (1G) */
+#endif
+#ifndef DFLDSIZ
+#define	DFLDSIZ		(1<<27)			/* initial data size (128M) */
+#endif
+#ifndef MAXDSIZ
+#define	MAXDSIZ		(1<<30)			/* max data size (1G) */
+#endif
+#ifndef	DFLSSIZ
+#define	DFLSSIZ		(1<<21)			/* initial stack size (2M) */
+#endif
+#ifndef	MAXSSIZ
+#define	MAXSSIZ		(1<<28)			/* max stack size (256M) */
+#endif
+#ifndef SGROWSIZ
+#define SGROWSIZ	(128UL*1024)		/* amount to grow stack */
+#endif
+
+/*
+ * We need region 7 virtual addresses for pagetables.
+ */
+#define UMA_MD_SMALL_ALLOC
+
+/*
+ * The physical address space is sparsely populated.
+ */
+#define	VM_PHYSSEG_SPARSE
+
+/*
+ * The number of PHYSSEG entries is equal to the number of phys_avail
+ * entries.
+ */
+#define	VM_PHYSSEG_MAX		49
+
+/*
+ * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool
+ * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
+ * the pool from which physical pages for small UMA objects are
+ * allocated.
+ */
+#define	VM_NFREEPOOL		3
+#define	VM_FREEPOOL_CACHE	2
+#define	VM_FREEPOOL_DEFAULT	0
+#define	VM_FREEPOOL_DIRECT	1
+
+/*
+ * Create one free page list.
+ */
+#define	VM_NFREELIST		1
+#define	VM_FREELIST_DEFAULT	0
+
+/*
+ * An allocation size of 256MB is supported in order to optimize the
+ * use of the identity mappings in region 7 by UMA.
+ */
+#define	VM_NFREEORDER		16
+
+/*
+ * Disable superpage reservations.
+ */
+#ifndef	VM_NRESERVLEVEL
+#define	VM_NRESERVLEVEL		0
+#endif
+
+#define	IA64_VM_MINKERN_REGION	4
+
+/*
+ * Manipulating region bits of an address.
+ */
+#define IA64_RR_BASE(n)         (((uint64_t) (n)) << 61)
+#define IA64_RR_MASK(x)         ((x) & ((1L << 61) - 1))
+
+#define	IA64_PHYS_TO_RR6(x)	((x) | IA64_RR_BASE(6))
+#define	IA64_PHYS_TO_RR7(x)	((x) | IA64_RR_BASE(7))
+
+/*
+ * The Itanium architecture defines that all implementations support at
+ * least 51 virtual address bits (i.e. IMPL_VA_MSB=50). The unimplemented
+ * bits are sign-extended from VA{IMPL_VA_MSB}. As such, there's a gap in
+ * the virtual address range, which extends at most from 0x0004000000000000
+ * to 0x1ffbffffffffffff. We define the top half of a region in terms of
+ * this worst-case gap.
+ */
+#define	IA64_REGION_GAP_START	0x0004000000000000
+#define	IA64_REGION_GAP_EXTEND	0x1ffc000000000000
+
+/*
+ * Parameters for Pre-Boot Virtual Memory (PBVM).
+ * The kernel, its modules and metadata are loaded in the PBVM by the loader.
+ * The PBVM consists of pages for which the mapping is maintained in a page
+ * table. The page table is at least 1 EFI page large (i.e. 4KB), but can be
+ * larger to accommodate more PBVM. The maximum page table size is 1MB. With
+ * 8 bytes per page table entry, this means that the PBVM has at least 512
+ * pages and at most 128K pages.
+ * The GNU toolchain (in particular GNU ld) does not support an alignment
+ * larger than 64K. This means that we cannot guarantee page alignment for
+ * a page size that's larger than 64K. We do want to have text and data in
+ * different pages, which means that the maximum usable page size is 64KB.
+ * Consequently:
+ * The maximum total PBVM size is 8GB -- enough for a DVD image. A page table
+ * of a single EFI page (4KB) allows for 32MB of PBVM.
+ *
+ * The kernel is given the PA and size of the page table that provides the
+ * mapping of the PBVM. The page table itself is assumed to be mapped at a
+ * known virtual address and using a single translation wired into the CPU.
+ * As such, the page table is assumed to be a power of 2 and naturally aligned.
+ * The kernel also assumes that a good portion of the kernel text is mapped
+ * and wired into the CPU, but does not assume that the mapping covers the
+ * whole of PBVM.
+ */
+#define	IA64_PBVM_RR		IA64_VM_MINKERN_REGION
+#define	IA64_PBVM_BASE		\
+		(IA64_RR_BASE(IA64_PBVM_RR) + IA64_REGION_GAP_EXTEND)
+
+#define	IA64_PBVM_PGTBL_MAXSZ	1048576
+#define	IA64_PBVM_PGTBL		\
+		(IA64_RR_BASE(IA64_PBVM_RR + 1) - IA64_PBVM_PGTBL_MAXSZ)
+
+#define	IA64_PBVM_PAGE_SHIFT	16	/* 64KB */
+#define	IA64_PBVM_PAGE_SIZE	(1 << IA64_PBVM_PAGE_SHIFT)
+#define	IA64_PBVM_PAGE_MASK	(IA64_PBVM_PAGE_SIZE - 1)
+
+/*
+ * Mach derived constants
+ */
+
+/* user/kernel map constants */
+#define	VM_MIN_ADDRESS		0
+#define	VM_MAXUSER_ADDRESS	IA64_RR_BASE(IA64_VM_MINKERN_REGION)
+#define	VM_MIN_KERNEL_ADDRESS	VM_MAXUSER_ADDRESS
+#define	VM_INIT_KERNEL_ADDRESS	IA64_RR_BASE(IA64_VM_MINKERN_REGION + 1)
+#define	VM_MAX_KERNEL_ADDRESS	(IA64_RR_BASE(IA64_VM_MINKERN_REGION + 2) - 1)
+#define	VM_MAX_ADDRESS		~0UL
+
+/* We link the kernel at IA64_PBVM_BASE. */
+#define	KERNBASE		IA64_PBVM_BASE
+
+/*
+ * USRSTACK is the top (end) of the user stack.  Immediately above the user
+ * stack resides the syscall gateway page.
+ */
+#define	USRSTACK		VM_MAXUSER_ADDRESS
+#define	IA64_BACKINGSTORE	(USRSTACK - (2 * MAXSSIZ) - PAGE_SIZE)
+
+/*
+ * How many physical pages per kmem arena virtual page.
+ */
+#ifndef VM_KMEM_SIZE_SCALE
+#define	VM_KMEM_SIZE_SCALE	(4)
+#endif
+
+/* initial pagein size of beginning of executable file */
+#ifndef VM_INITIAL_PAGEIN
+#define	VM_INITIAL_PAGEIN	16
+#endif
+
+#define	ZERO_REGION_SIZE	(2 * 1024 * 1024)	/* 2MB */
+
+#endif	/* !_MACHINE_VMPARAM_H_ */


Property changes on: trunk/sys/ia64/include/vmparam.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/isa/isa.c
===================================================================
--- trunk/sys/ia64/isa/isa.c	                        (rev 0)
+++ trunk/sys/ia64/isa/isa.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,138 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/isa/isa.c 221526 2011-05-06 13:48:53Z jhb $
+ */
+
+/*
+ * Modifications for Intel architecture by Garrett A. Wollman.
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.  M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose.  It is provided "as is" without express or implied
+ * warranty.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+
+#include <machine/resource.h>
+
+#include <isa/isareg.h>
+#include <isa/isavar.h>
+#include <isa/isa_common.h>
+
+void
+isa_init(device_t dev)
+{
+}
+
+/*
+ * This implementation simply passes the request up to the parent
+ * bus, which in our case is the special i386 nexus, substituting any
+ * configured values if the caller defaulted.  We can get away with
+ * this because there is no special mapping for ISA resources on an Intel
+ * platform.  When porting this code to another architecture, it may be
+ * necessary to interpose a mapping layer here.
+ */
+struct resource *
+isa_alloc_resource(device_t bus, device_t child, int type, int *rid,
+		   u_long start, u_long end, u_long count, u_int flags)
+{
+	/*
+	 * Consider adding a resource definition.
+	 */
+	int passthrough = (device_get_parent(child) != bus);
+	int isdefault = (start == 0UL && end == ~0UL);
+	struct isa_device* idev = DEVTOISA(child);
+	struct resource_list *rl = &idev->id_resources;
+	struct resource_list_entry *rle;
+	
+	if (!passthrough && !isdefault) {
+		rle = resource_list_find(rl, type, *rid);
+		if (!rle) {
+			if (*rid < 0)
+				return 0;
+			switch (type) {
+			case SYS_RES_IRQ:
+				if (*rid >= ISA_NIRQ)
+					return 0;
+				break;
+			case SYS_RES_DRQ:
+				if (*rid >= ISA_NDRQ)
+					return 0;
+				break;
+			case SYS_RES_MEMORY:
+				if (*rid >= ISA_NMEM)
+					return 0;
+				break;
+			case SYS_RES_IOPORT:
+				if (*rid >= ISA_NPORT)
+					return 0;
+				break;
+			default:
+				return 0;
+			}
+			resource_list_add(rl, type, *rid, start, end, count);
+		}
+	}
+
+	return resource_list_alloc(rl, bus, child, type, rid,
+				   start, end, count, flags);
+}
+
+int
+isa_release_resource(device_t bus, device_t child, int type, int rid,
+		     struct resource *r)
+{
+	struct isa_device* idev = DEVTOISA(child);
+	struct resource_list *rl = &idev->id_resources;
+	return resource_list_release(rl, bus, child, type, rid, r);
+}


Property changes on: trunk/sys/ia64/isa/isa.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/isa/isa_dma.c
===================================================================
--- trunk/sys/ia64/isa/isa_dma.c	                        (rev 0)
+++ trunk/sys/ia64/isa/isa_dma.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,509 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from: @(#)isa.c	7.2 (Berkeley) 5/13/91
+ *	from: isa_dma.c,v 1.3 1999/05/09 23:56:00 peter Exp $
+ * $FreeBSD: stable/10/sys/ia64/isa/isa_dma.c 177215 2008-03-15 06:44:45Z imp $
+ */
+
+/*
+ * code to manage AT bus
+ *
+ * 92/08/18  Frank P. MacLachlan (fpm at crash.cts.com):
+ * Fixed uninitialized variable problem and added code to deal
+ * with DMA page boundaries in isa_dmarangecheck().  Fixed word
+ * mode DMA count compution and reorganized DMA setup code in
+ * isa_dmastart()
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <isa/isareg.h>
+#include <isa/isavar.h>
+#include <isa/isa_dmareg.h>
+#include <machine/bus.h>
+
+static bus_dma_tag_t dma_tag[8];
+static bus_dmamap_t dma_map[8];
+static u_int8_t	dma_busy = 0;		/* Used in isa_dmastart() */
+static u_int8_t	dma_inuse = 0;		/* User for acquire/release */
+static u_int8_t dma_auto_mode = 0;
+static u_int8_t dma_bounced = 0;
+
+#define VALID_DMA_MASK (7)
+
+/* high byte of address is stored in this port for i-th dma channel */
+static int dmapageport[8] = { 0x87, 0x83, 0x81, 0x82, 0x8f, 0x8b, 0x89, 0x8a };
+
+/*
+ * Setup a DMA channel's bounce buffer.
+ */
+int
+isa_dma_init(int chan, u_int bouncebufsize, int flag __unused)
+{
+	static int initted = 0;
+	bus_addr_t boundary = chan >= 4 ? 0x20000 : 0x10000;
+
+	if (!initted) {
+		/*
+		 * Reset the DMA hardware.
+		 */
+		outb(DMA1_RESET, 0);
+		outb(DMA2_RESET, 0);
+		isa_dmacascade(4);
+	    
+		initted = 1;
+	}
+
+#ifdef DIAGNOSTIC
+	if (chan & ~VALID_DMA_MASK)
+		panic("isa_dma_init: channel out of range");
+
+	if (dma_tag[chan] || dma_map[chan])
+		panic("isa_dma_init: impossible request"); 
+#endif
+
+	if (bus_dma_tag_create(/*parent*/NULL,
+			       /*alignment*/2,
+			       /*boundary*/boundary,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_24BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       /*maxsize*/bouncebufsize,
+			       /*nsegments*/1, /*maxsegz*/0x3ffff,
+			       /*flags*/0,
+			       /*lockfunc*/busdma_lock_mutex,
+			       /*lockarg*/&Giant,
+			       &dma_tag[chan]) != 0) {
+		panic("isa_dma_init: unable to create dma tag\n");
+	}
+	
+	if (bus_dmamap_create(dma_tag[chan], 0, &dma_map[chan])) {
+		panic("isa_dma_init: unable to create dma map\n");
+	}
+
+	return (0);
+}
+
+/*
+ * Register a DMA channel's usage.  Usually called from a device driver
+ * in open() or during its initialization.
+ */
+int
+isa_dma_acquire(chan)
+	int chan;
+{
+#ifdef DIAGNOSTIC
+	if (chan & ~VALID_DMA_MASK)
+		panic("isa_dma_acquire: channel out of range");
+#endif
+
+	if (dma_inuse & (1 << chan)) {
+		printf("isa_dma_acquire: channel %d already in use\n", chan);
+		return (EBUSY);
+	}
+	dma_inuse |= (1 << chan);
+	dma_auto_mode &= ~(1 << chan);
+
+	return (0);
+}
+
+/*
+ * Unregister a DMA channel's usage.  Usually called from a device driver
+ * during close() or during its shutdown.
+ */
+void
+isa_dma_release(chan)
+	int chan;
+{
+#ifdef DIAGNOSTIC
+	if (chan & ~VALID_DMA_MASK)
+		panic("isa_dma_release: channel out of range");
+
+	if ((dma_inuse & (1 << chan)) == 0)
+		printf("isa_dma_release: channel %d not in use\n", chan);
+#endif
+
+	if (dma_busy & (1 << chan)) {
+		dma_busy &= ~(1 << chan);
+		/* 
+		 * XXX We should also do "dma_bounced &= (1 << chan);"
+		 * because we are acting on behalf of isa_dmadone() which
+		 * was not called to end the last DMA operation.  This does
+		 * not matter now, but it may in the future.
+		 */
+	}
+
+	dma_inuse &= ~(1 << chan);
+	dma_auto_mode &= ~(1 << chan);
+}
+
+/*
+ * isa_dmacascade(): program 8237 DMA controller channel to accept
+ * external dma control by a board.
+ */
+void
+isa_dmacascade(chan)
+	int chan;
+{
+#ifdef DIAGNOSTIC
+	if (chan & ~VALID_DMA_MASK)
+		panic("isa_dmacascade: channel out of range");
+#endif
+
+	/* set dma channel mode, and set dma channel mode */
+	if ((chan & 4) == 0) {
+		outb(DMA1_MODE, DMA37MD_CASCADE | chan);
+		outb(DMA1_SMSK, chan);
+	} else {
+		outb(DMA2_MODE, DMA37MD_CASCADE | (chan & 3));
+		outb(DMA2_SMSK, chan & 3);
+	}
+}
+
+/*
+ * isa_dmastart(): program 8237 DMA controller channel.
+ */
+
+struct isa_dmastart_arg {
+	caddr_t addr;
+	int 	chan;
+	int 	flags;
+};
+
+static void isa_dmastart_cb(void *arg, bus_dma_segment_t *segs, int nseg,
+			    int error)
+{
+#if 0
+	caddr_t addr = ((struct isa_dmastart_arg *) arg)->addr;
+#endif
+	int chan = ((struct isa_dmastart_arg *) arg)->chan;
+	int flags = ((struct isa_dmastart_arg *) arg)->flags;
+	bus_addr_t phys = segs->ds_addr;
+	int nbytes = segs->ds_len;
+	int waport;
+
+	if (nseg != 1)
+		panic("isa_dmastart: transfer mapping not contiguous");
+
+#if 0
+	if ((chipset.sgmap == NULL) && 
+	    (pmap_extract(kernel_pmap, (vm_offset_t)addr)
+		> BUS_SPACE_MAXADDR_24BIT)) { 
+		/* we bounced */
+		dma_bounced |= (1 << chan);
+                /* copy bounce buffer on write */
+                if (!(flags & ISADMA_READ)) 
+                        bus_dmamap_sync(dma_tag[chan], dma_map[chan], 
+			                  BUS_DMASYNC_PREWRITE);
+	}
+#endif
+	
+	if ((chan & 4) == 0) {
+		/*
+		 * Program one of DMA channels 0..3.  These are
+		 * byte mode channels.
+		 */
+		/* set dma channel mode, and reset address ff */
+
+		/* If ISADMA_RAW flag is set, then use autoinitialise mode */
+		if (flags & ISADMA_RAW) {
+		  if (flags & ISADMA_READ)
+			outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_WRITE|chan);
+		  else
+			outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_READ|chan);
+		}
+		else
+		if (flags & ISADMA_READ)
+			outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|chan);
+		else
+			outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_READ|chan);
+		outb(DMA1_FFC, 0);
+
+		/* send start address */
+		waport =  DMA1_CHN(chan);
+		outb(waport, phys);
+		outb(waport, phys>>8);
+		outb(dmapageport[chan], phys>>16);
+
+		/* send count */
+		outb(waport + 1, --nbytes);
+		outb(waport + 1, nbytes>>8);
+
+		/* unmask channel */
+		outb(DMA1_SMSK, chan);
+	} else {
+		/*
+		 * Program one of DMA channels 4..7.  These are
+		 * word mode channels.
+		 */
+		/* set dma channel mode, and reset address ff */
+
+		/* If ISADMA_RAW flag is set, then use autoinitialise mode */
+		if (flags & ISADMA_RAW) {
+		  if (flags & ISADMA_READ)
+			outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_WRITE|(chan&3));
+		  else
+			outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_READ|(chan&3));
+		}
+		else
+		if (flags & ISADMA_READ)
+			outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|(chan&3));
+		else
+			outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_READ|(chan&3));
+		outb(DMA2_FFC, 0);
+
+		/* send start address */
+		waport = DMA2_CHN(chan - 4);
+		outb(waport, phys>>1);
+		outb(waport, phys>>9);
+		outb(dmapageport[chan], phys>>16);
+
+		/* send count */
+		nbytes >>= 1;
+		outb(waport + 2, --nbytes);
+		outb(waport + 2, nbytes>>8);
+
+		/* unmask channel */
+		outb(DMA2_SMSK, chan & 3);
+	}
+}
+
+void
+isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
+{
+	struct isa_dmastart_arg args;
+
+#ifdef DIAGNOSTIC
+	if (chan & ~VALID_DMA_MASK)
+		panic("isa_dmastart: channel out of range");
+
+	if ((chan < 4 && nbytes > (1<<16))
+	    || (chan >= 4 && (nbytes > (1<<17) || (uintptr_t)addr & 1)))
+		panic("isa_dmastart: impossible request");
+
+	if ((dma_inuse & (1 << chan)) == 0)
+		printf("isa_dmastart: channel %d not acquired\n", chan);
+#endif
+
+#if 0
+	/*
+	 * XXX This should be checked, but drivers like ad1848 only call
+	 * isa_dmastart() once because they use Auto DMA mode.  If we
+	 * leave this in, drivers that do this will print this continuously.
+	 */
+	if (dma_busy & (1 << chan))
+		printf("isa_dmastart: channel %d busy\n", chan);
+#endif
+
+	if (!dma_tag[chan] || !dma_map[chan])
+		panic("isa_dmastart: called without isa_dma_init");
+
+	dma_busy |= (1 << chan);
+
+	if (flags & ISADMA_RAW) {
+		dma_auto_mode |= (1 << chan);
+	} else { 
+		dma_auto_mode &= ~(1 << chan);
+	}
+
+	/*
+	 * Freeze dma while updating registers.
+	 */
+	outb(chan & 4 ? DMA2_SMSK : DMA1_SMSK, (chan & 3) | 4);
+
+        args.addr = addr;
+	args.chan = chan;
+	args.flags = flags;
+	bus_dmamap_load(dma_tag[chan], dma_map[chan], addr, nbytes,
+			isa_dmastart_cb, &args, 0);
+}
+
+void
+isa_dmadone(int flags, caddr_t addr, int nbytes, int chan)
+{  
+#ifdef DIAGNOSTIC
+	if (chan & ~VALID_DMA_MASK)
+		panic("isa_dmadone: channel out of range");
+
+	if ((dma_inuse & (1 << chan)) == 0)
+		printf("isa_dmadone: channel %d not acquired\n", chan);
+#endif
+
+	if (((dma_busy & (1 << chan)) == 0) && 
+	    (dma_auto_mode & (1 << chan)) == 0 )
+		printf("isa_dmadone: channel %d not busy\n", chan);
+
+	if (dma_bounced & (1 << chan)) {
+		/* copy bounce buffer on read */
+		if (flags & ISADMA_READ) {
+			bus_dmamap_sync(dma_tag[chan], dma_map[chan],
+			                  BUS_DMASYNC_POSTREAD);
+		}
+		dma_bounced &= ~(1 << chan);
+	}
+
+	if ((dma_auto_mode & (1 << chan)) == 0) {
+		outb(chan & 4 ? DMA2_SMSK : DMA1_SMSK, (chan & 3) | 4);
+		bus_dmamap_unload(dma_tag[chan], dma_map[chan]);
+	}
+
+	dma_busy &= ~(1 << chan);
+}
+
+/*
+ * Query the progress of a transfer on a DMA channel.
+ *
+ * To avoid having to interrupt a transfer in progress, we sample
+ * each of the high and low databytes twice, and apply the following
+ * logic to determine the correct count.
+ *
+ * Reads are performed with interrupts disabled, thus it is to be
+ * expected that the time between reads is very small.  At most
+ * one rollover in the low count byte can be expected within the
+ * four reads that are performed.
+ *
+ * There are three gaps in which a rollover can occur :
+ *
+ * - read low1
+ *              gap1
+ * - read high1
+ *              gap2
+ * - read low2
+ *              gap3
+ * - read high2
+ *
+ * If a rollover occurs in gap1 or gap2, the low2 value will be
+ * greater than the low1 value.  In this case, low2 and high2 are a
+ * corresponding pair. 
+ *
+ * In any other case, low1 and high1 can be considered to be correct.
+ *
+ * The function returns the number of bytes remaining in the transfer,
+ * or -1 if the channel requested is not active.
+ *
+ */
+int
+isa_dmastatus(int chan)
+{
+	u_long	cnt = 0;
+	int	ffport, waport;
+	u_long	low1, high1, low2, high2;
+	int s;
+
+	/* channel active? */
+	if ((dma_inuse & (1 << chan)) == 0) {
+		printf("isa_dmastatus: channel %d not active\n", chan);
+		return(-1);
+	}
+	/* channel busy? */
+
+	if (((dma_busy & (1 << chan)) == 0) &&
+	    (dma_auto_mode & (1 << chan)) == 0 ) {
+	    printf("chan %d not busy\n", chan);
+	    return -2 ;
+	}	
+	if (chan < 4) {			/* low DMA controller */
+		ffport = DMA1_FFC;
+		waport = DMA1_CHN(chan) + 1;
+	} else {			/* high DMA controller */
+		ffport = DMA2_FFC;
+		waport = DMA2_CHN(chan - 4) + 2;
+	}
+
+	s = splhigh();			/* no interrupts Mr Jones! */
+	outb(ffport, 0);		/* clear register LSB flipflop */
+	low1 = inb(waport);
+	high1 = inb(waport);
+	outb(ffport, 0);		/* clear again */
+	low2 = inb(waport);
+	high2 = inb(waport);
+	splx(s);			/* enable interrupts again */
+
+	/* 
+	 * Now decide if a wrap has tried to skew our results.
+	 * Note that after TC, the count will read 0xffff, while we want 
+	 * to return zero, so we add and then mask to compensate.
+	 */
+	if (low1 >= low2) {
+		cnt = (low1 + (high1 << 8) + 1) & 0xffff;
+	} else {
+		cnt = (low2 + (high2 << 8) + 1) & 0xffff;
+	}
+
+	if (chan >= 4)			/* high channels move words */
+		cnt *= 2;
+	return(cnt);
+}
+
+/*
+ * Reached terminal count yet ?
+ */
+int
+isa_dmatc(int chan)
+{
+
+	if (chan < 4)
+		return(inb(DMA1_STATUS) & (1 << chan));
+	else
+		return(inb(DMA2_STATUS) & (1 << (chan & 3)));
+}
+
+/*
+ * Stop a DMA transfer currently in progress.
+ */
+int
+isa_dmastop(int chan) 
+{
+	if ((dma_inuse & (1 << chan)) == 0)
+		printf("isa_dmastop: channel %d not acquired\n", chan);  
+
+	if (((dma_busy & (1 << chan)) == 0) &&
+	    ((dma_auto_mode & (1 << chan)) == 0)) {
+		printf("chan %d not busy\n", chan);
+		return -2 ;
+	}
+    
+	if ((chan & 4) == 0) {
+		outb(DMA1_SMSK, (chan & 3) | 4 /* disable mask */);
+	} else {
+		outb(DMA2_SMSK, (chan & 3) | 4 /* disable mask */);
+	}
+	return(isa_dmastatus(chan));
+}


Property changes on: trunk/sys/ia64/isa/isa_dma.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/ia64/pci/pci_cfgreg.c
===================================================================
--- trunk/sys/ia64/pci/pci_cfgreg.c	                        (rev 0)
+++ trunk/sys/ia64/pci/pci_cfgreg.c	2018-05-26 22:00:12 UTC (rev 9990)
@@ -0,0 +1,105 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2010 Marcel Moolenaar
+ * Copyright (c) 2001 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/ia64/pci/pci_cfgreg.c 253560 2013-07-23 03:03:17Z marcel $
+ */
+
+#include <sys/param.h>
+#include <machine/cpufunc.h>
+#include <machine/pci_cfgreg.h>
+#include <machine/sal.h>
+
+static u_long
+pci_sal_address(int dom, int bus, int slot, int func, int reg)
+{
+	u_long addr;
+
+	addr = ~0ul;
+	if (dom >= 0 && dom <= 255 && bus >= 0 && bus <= 255 &&
+	    slot >= 0 && slot <= 31 && func >= 0 && func <= 7 &&
+	    reg >= 0 && reg <= 255) {
+		addr = ((u_long)dom << 24) | ((u_long)bus << 16) |
+		    ((u_long)slot << 11) | ((u_long)func << 8) | (u_long)reg;
+	}
+	return (addr);
+}
+
+static int
+pci_valid_access(int reg, int len)
+{
+	int ok;
+
+	ok = ((len == 1 || len == 2 || len == 4) && (reg & (len - 1)) == 0)
+	    ? 1 : 0;
+	return (ok);
+}
+
+int
+pci_cfgregopen(void)
+{
+	return (1);
+}
+
+uint32_t
+pci_cfgregread(int bus, int slot, int func, int reg, int len)
+{
+	struct ia64_sal_result res;
+	register_t is;
+	u_long addr;
+
+	addr = pci_sal_address(bus >> 8, bus & 0xff, slot, func, reg);
+	if (addr == ~0ul)
+		return (~0);
+
+	if (!pci_valid_access(reg, len))
+		return (~0);
+
+	is = intr_disable();
+	res = ia64_sal_entry(SAL_PCI_CONFIG_READ, addr, len, 0, 0, 0, 0, 0);
+	intr_restore(is);
+
+	return ((res.sal_status < 0) ? ~0 : res.sal_result[0]);
+}
+
+void
+pci_cfgregwrite(int bus, int slot, int func, int reg, uint32_t data, int len)
+{
+	struct ia64_sal_result res;
+	register_t is;
+	u_long addr;
+
+	addr = pci_sal_address(bus >> 8, bus & 0xff, slot, func, reg);
+	if (addr == ~0ul)
+		return;
+
+	if (!pci_valid_access(reg, len))
+		return;
+
+	is = intr_disable();
+	res = ia64_sal_entry(SAL_PCI_CONFIG_WRITE, addr, len, data, 0, 0, 0, 0);
+	intr_restore(is);
+}


Property changes on: trunk/sys/ia64/pci/pci_cfgreg.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property


More information about the Midnightbsd-cvs mailing list