[Midnightbsd-cvs] src [10163] trunk/sys/cddl/dev: sync
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Fri Jun 1 18:42:25 EDT 2018
Revision: 10163
http://svnweb.midnightbsd.org/src/?rev=10163
Author: laffer1
Date: 2018-06-01 18:42:24 -0400 (Fri, 01 Jun 2018)
Log Message:
-----------
sync
Modified Paths:
--------------
trunk/sys/cddl/dev/dtmalloc/dtmalloc.c
trunk/sys/cddl/dev/fbt/fbt.c
trunk/sys/cddl/dev/lockstat/lockstat.c
trunk/sys/cddl/dev/profile/profile.c
trunk/sys/cddl/dev/prototype.c
trunk/sys/cddl/dev/sdt/sdt.c
trunk/sys/cddl/dev/systrace/systrace.c
Added Paths:
-----------
trunk/sys/cddl/dev/fbt/fbt_powerpc.c
Modified: trunk/sys/cddl/dev/dtmalloc/dtmalloc.c
===================================================================
--- trunk/sys/cddl/dev/dtmalloc/dtmalloc.c 2018-06-01 22:42:15 UTC (rev 10162)
+++ trunk/sys/cddl/dev/dtmalloc/dtmalloc.c 2018-06-01 22:42:24 UTC (rev 10163)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*
* CDDL HEADER START
*
@@ -20,7 +21,7 @@
*
* Portions Copyright 2006-2008 John Birrell jb at freebsd.org
*
- * $FreeBSD: release/9.2.0/sys/cddl/dev/dtmalloc/dtmalloc.c 252860 2013-07-06 03:39:40Z markj $
+ * $FreeBSD: stable/10/sys/cddl/dev/dtmalloc/dtmalloc.c 252325 2013-06-28 03:14:40Z markj $
*
*/
Modified: trunk/sys/cddl/dev/fbt/fbt.c
===================================================================
--- trunk/sys/cddl/dev/fbt/fbt.c 2018-06-01 22:42:15 UTC (rev 10162)
+++ trunk/sys/cddl/dev/fbt/fbt.c 2018-06-01 22:42:24 UTC (rev 10163)
@@ -21,7 +21,7 @@
*
* Portions Copyright 2006-2008 John Birrell jb at freebsd.org
*
- * $FreeBSD: src/sys/cddl/dev/fbt/fbt.c,v 1.1.2.1.2.1 2008/11/25 02:59:29 kensmith Exp $
+ * $FreeBSD: stable/10/sys/cddl/dev/fbt/fbt.c 282748 2015-05-11 07:54:39Z avg $
*
*/
@@ -429,13 +429,6 @@
return;
/*
- * The cyclic timer subsystem can be built as a module and DTrace
- * depends on that, so it is ineligible too.
- */
- if (strcmp(modname, "cyclic") == 0)
- return;
-
- /*
* To register with DTrace, a module must list 'dtrace' as a
* dependency in order for the kernel linker to resolve
* symbols like dtrace_register(). All modules with such a
@@ -575,7 +568,6 @@
fbt_ctfoff_init(modctl_t *lf, linker_ctf_t *lc)
{
const Elf_Sym *symp = lc->symtab;;
- const char *name;
const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab;
const uint8_t *ctfdata = lc->ctftab + sizeof(ctf_header_t);
int i;
@@ -607,11 +599,6 @@
continue;
}
- if (symp->st_name < lc->strcnt)
- name = lc->strtab + symp->st_name;
- else
- name = "(?)";
-
switch (ELF_ST_TYPE(symp->st_info)) {
case STT_OBJECT:
if (objtoff >= hp->cth_funcoff ||
@@ -1336,6 +1323,15 @@
return;
}
+static int
+fbt_linker_file_cb(linker_file_t lf, void *arg)
+{
+
+ fbt_provide_module(arg, lf);
+
+ return (0);
+}
+
static void
fbt_load(void *dummy)
{
@@ -1360,9 +1356,11 @@
if (dtrace_register("fbt", &fbt_attr, DTRACE_PRIV_USER,
NULL, &fbt_pops, NULL, &fbt_id) != 0)
return;
+
+ /* Create probes for the kernel and already-loaded modules. */
+ linker_file_foreach(fbt_linker_file_cb, NULL);
}
-
static int
fbt_unload()
{
Added: trunk/sys/cddl/dev/fbt/fbt_powerpc.c
===================================================================
--- trunk/sys/cddl/dev/fbt/fbt_powerpc.c (rev 0)
+++ trunk/sys/cddl/dev/fbt/fbt_powerpc.c 2018-06-01 22:42:24 UTC (rev 10163)
@@ -0,0 +1,1352 @@
+/* $MidnightBSD$ */
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * Portions Copyright 2006-2008 John Birrell jb at freebsd.org
+ * Portions Copyright 2013 Justin Hibbits jhibbits at freebsd.org
+ *
+ * $FreeBSD: stable/10/sys/cddl/dev/fbt/fbt_powerpc.c 260670 2014-01-15 05:19:37Z jhibbits $
+ *
+ */
+
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/cpuvar.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/kmem.h>
+#include <sys/kthread.h>
+#include <sys/limits.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/poll.h>
+#include <sys/proc.h>
+#include <sys/selinfo.h>
+#include <sys/smp.h>
+#include <sys/syscall.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <sys/uio.h>
+#include <sys/unistd.h>
+#include <machine/md_var.h>
+#include <machine/stdarg.h>
+
+#include <sys/dtrace.h>
+#include <sys/dtrace_bsd.h>
+
+static MALLOC_DEFINE(M_FBT, "fbt", "Function Boundary Tracing");
+
+#define FBT_PATCHVAL 0x7c810808
+#define FBT_MFLR_R0 0x7c0802a6
+#define FBT_MTLR_R0 0x7c0803a6
+#define FBT_BLR 0x4e800020
+#define FBT_BCTR 0x4e800030
+#define FBT_BRANCH 0x48000000
+#define FBT_BR_MASK 0x03fffffc
+#define FBT_IS_JUMP(instr) ((instr & ~FBT_BR_MASK) == FBT_BRANCH)
+
+static d_open_t fbt_open;
+static int fbt_unload(void);
+static void fbt_getargdesc(void *, dtrace_id_t, void *, dtrace_argdesc_t *);
+static void fbt_provide_module(void *, modctl_t *);
+static void fbt_destroy(void *, dtrace_id_t, void *);
+static void fbt_enable(void *, dtrace_id_t, void *);
+static void fbt_disable(void *, dtrace_id_t, void *);
+static void fbt_load(void *);
+static void fbt_suspend(void *, dtrace_id_t, void *);
+static void fbt_resume(void *, dtrace_id_t, void *);
+
+#define FBT_ENTRY "entry"
+#define FBT_RETURN "return"
+#define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask)
+#define FBT_PROBETAB_SIZE 0x8000 /* 32k entries -- 128K total */
+
+static struct cdevsw fbt_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = fbt_open,
+ .d_name = "fbt",
+};
+
+static dtrace_pattr_t fbt_attr = {
+{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA },
+{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA },
+};
+
+static dtrace_pops_t fbt_pops = {
+ NULL,
+ fbt_provide_module,
+ fbt_enable,
+ fbt_disable,
+ fbt_suspend,
+ fbt_resume,
+ fbt_getargdesc,
+ NULL,
+ NULL,
+ fbt_destroy
+};
+
+typedef struct fbt_probe {
+ struct fbt_probe *fbtp_hashnext;
+ uint32_t *fbtp_patchpoint;
+ int8_t fbtp_rval;
+ uint32_t fbtp_patchval;
+ uint32_t fbtp_savedval;
+ uintptr_t fbtp_roffset;
+ dtrace_id_t fbtp_id;
+ const char *fbtp_name;
+ modctl_t *fbtp_ctl;
+ int fbtp_loadcnt;
+ int fbtp_primary;
+ int fbtp_invop_cnt;
+ int fbtp_symindx;
+ struct fbt_probe *fbtp_next;
+} fbt_probe_t;
+
+static struct cdev *fbt_cdev;
+static dtrace_provider_id_t fbt_id;
+static fbt_probe_t **fbt_probetab;
+static int fbt_probetab_size;
+static int fbt_probetab_mask;
+static int fbt_verbose = 0;
+
+static int
+fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
+{
+ struct trapframe *frame = (struct trapframe *)stack;
+ solaris_cpu_t *cpu = &solaris_cpu[curcpu];
+ fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];
+ uintptr_t tmp;
+
+ for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
+ if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
+ fbt->fbtp_invop_cnt++;
+ if (fbt->fbtp_roffset == 0) {
+ cpu->cpu_dtrace_caller = addr;
+
+ dtrace_probe(fbt->fbtp_id, frame->fixreg[3],
+ frame->fixreg[4], frame->fixreg[5],
+ frame->fixreg[6], frame->fixreg[7]);
+
+ cpu->cpu_dtrace_caller = 0;
+ } else {
+
+ dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset,
+ rval, 0, 0, 0);
+ /*
+ * The caller doesn't have the fbt item, so
+ * fixup tail calls here.
+ */
+ if (fbt->fbtp_rval == DTRACE_INVOP_JUMP) {
+ frame->srr0 = (uintptr_t)fbt->fbtp_patchpoint;
+ tmp = fbt->fbtp_savedval & FBT_BR_MASK;
+ /* Sign extend. */
+ if (tmp & 0x02000000)
+#ifdef __powerpc64__
+ tmp |= 0xfffffffffc000000ULL;
+#else
+ tmp |= 0xfc000000UL;
+#endif
+ frame->srr0 += tmp;
+ }
+ cpu->cpu_dtrace_caller = 0;
+ }
+
+ return (fbt->fbtp_rval);
+ }
+ }
+
+ return (0);
+}
+
+static int
+fbt_provide_module_function(linker_file_t lf, int symindx,
+ linker_symval_t *symval, void *opaque)
+{
+ char *modname = opaque;
+ const char *name = symval->name;
+ fbt_probe_t *fbt, *retfbt;
+ int j;
+ u_int32_t *instr, *limit;
+
+ /* PowerPC64 uses '.' prefixes on symbol names, ignore it. */
+ if (name[0] == '.')
+ name++;
+
+ if (strncmp(name, "dtrace_", 7) == 0 &&
+ strncmp(name, "dtrace_safe_", 12) != 0) {
+ /*
+ * Anything beginning with "dtrace_" may be called
+ * from probe context unless it explicitly indicates
+ * that it won't be called from probe context by
+ * using the prefix "dtrace_safe_".
+ */
+ return (0);
+ }
+
+ if (name[0] == '_' && name[1] == '_')
+ return (0);
+
+ instr = (u_int32_t *) symval->value;
+ limit = (u_int32_t *) (symval->value + symval->size);
+
+ for (; instr < limit; instr++)
+ if (*instr == FBT_MFLR_R0)
+ break;
+
+ if (*instr != FBT_MFLR_R0)
+ return (0);
+
+ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
+ fbt->fbtp_name = name;
+ fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
+ name, FBT_ENTRY, 3, fbt);
+ fbt->fbtp_patchpoint = instr;
+ fbt->fbtp_ctl = lf;
+ fbt->fbtp_loadcnt = lf->loadcnt;
+ fbt->fbtp_savedval = *instr;
+ fbt->fbtp_patchval = FBT_PATCHVAL;
+ fbt->fbtp_rval = DTRACE_INVOP_MFLR_R0;
+ fbt->fbtp_symindx = symindx;
+
+ fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
+ fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
+
+ lf->fbt_nentries++;
+
+ retfbt = NULL;
+again:
+ if (instr >= limit)
+ return (0);
+
+ /*
+ * We (desperately) want to avoid erroneously instrumenting a
+ * jump table To determine if we're looking at a true instruction
+ * sequence or an inline jump table that happens to contain the same
+ * byte sequences, we resort to some heuristic sleeze: we treat this
+ * instruction as being contained within a pointer, and see if that
+ * pointer points to within the body of the function. If it does, we
+ * refuse to instrument it.
+ */
+ {
+ uint32_t *ptr;
+
+ ptr = *(uint32_t **)instr;
+
+ if (ptr >= (uint32_t *) symval->value && ptr < limit) {
+ instr++;
+ goto again;
+ }
+ }
+
+ if (*instr != FBT_MTLR_R0) {
+ instr++;
+ goto again;
+ }
+
+ instr++;
+
+ for (j = 0; j < 12 && instr < limit; j++, instr++) {
+ if ((*instr == FBT_BCTR) || (*instr == FBT_BLR) ||
+ FBT_IS_JUMP(*instr))
+ break;
+ }
+
+ if (!(*instr == FBT_BCTR || *instr == FBT_BLR || FBT_IS_JUMP(*instr)))
+ goto again;
+
+ /*
+ * We have a winner!
+ */
+ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
+ fbt->fbtp_name = name;
+
+ if (retfbt == NULL) {
+ fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
+ name, FBT_RETURN, 5, fbt);
+ } else {
+ retfbt->fbtp_next = fbt;
+ fbt->fbtp_id = retfbt->fbtp_id;
+ }
+
+ retfbt = fbt;
+ fbt->fbtp_patchpoint = instr;
+ fbt->fbtp_ctl = lf;
+ fbt->fbtp_loadcnt = lf->loadcnt;
+ fbt->fbtp_symindx = symindx;
+
+ if (*instr == FBT_BCTR)
+ fbt->fbtp_rval = DTRACE_INVOP_BCTR;
+ else if (*instr == FBT_BLR)
+ fbt->fbtp_rval = DTRACE_INVOP_RET;
+ else
+ fbt->fbtp_rval = DTRACE_INVOP_JUMP;
+
+ fbt->fbtp_savedval = *instr;
+ fbt->fbtp_patchval = FBT_PATCHVAL;
+ fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
+ fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
+
+ lf->fbt_nentries++;
+
+ instr += 4;
+ goto again;
+}
+
+static void
+fbt_provide_module(void *arg, modctl_t *lf)
+{
+ char modname[MAXPATHLEN];
+ int i;
+ size_t len;
+
+ strlcpy(modname, lf->filename, sizeof(modname));
+ len = strlen(modname);
+ if (len > 3 && strcmp(modname + len - 3, ".ko") == 0)
+ modname[len - 3] = '\0';
+
+ /*
+ * Employees of dtrace and their families are ineligible. Void
+ * where prohibited.
+ */
+ if (strcmp(modname, "dtrace") == 0)
+ return;
+
+ /*
+ * The cyclic timer subsystem can be built as a module and DTrace
+ * depends on that, so it is ineligible too.
+ */
+ if (strcmp(modname, "cyclic") == 0)
+ return;
+
+ /*
+ * To register with DTrace, a module must list 'dtrace' as a
+ * dependency in order for the kernel linker to resolve
+ * symbols like dtrace_register(). All modules with such a
+ * dependency are ineligible for FBT tracing.
+ */
+ for (i = 0; i < lf->ndeps; i++)
+ if (strncmp(lf->deps[i]->filename, "dtrace", 6) == 0)
+ return;
+
+ if (lf->fbt_nentries) {
+ /*
+ * This module has some FBT entries allocated; we're afraid
+ * to screw with it.
+ */
+ return;
+ }
+
+ /*
+ * List the functions in the module and the symbol values.
+ */
+ (void) linker_file_function_listall(lf, fbt_provide_module_function, modname);
+}
+
+static void
+fbt_destroy(void *arg, dtrace_id_t id, void *parg)
+{
+ fbt_probe_t *fbt = parg, *next, *hash, *last;
+ modctl_t *ctl;
+ int ndx;
+
+ do {
+ ctl = fbt->fbtp_ctl;
+
+ ctl->fbt_nentries--;
+
+ /*
+ * Now we need to remove this probe from the fbt_probetab.
+ */
+ ndx = FBT_ADDR2NDX(fbt->fbtp_patchpoint);
+ last = NULL;
+ hash = fbt_probetab[ndx];
+
+ while (hash != fbt) {
+ ASSERT(hash != NULL);
+ last = hash;
+ hash = hash->fbtp_hashnext;
+ }
+
+ if (last != NULL) {
+ last->fbtp_hashnext = fbt->fbtp_hashnext;
+ } else {
+ fbt_probetab[ndx] = fbt->fbtp_hashnext;
+ }
+
+ next = fbt->fbtp_next;
+ free(fbt, M_FBT);
+
+ fbt = next;
+ } while (fbt != NULL);
+}
+
+static void
+fbt_enable(void *arg, dtrace_id_t id, void *parg)
+{
+ fbt_probe_t *fbt = parg;
+ modctl_t *ctl = fbt->fbtp_ctl;
+
+ ctl->nenabled++;
+
+ /*
+ * Now check that our modctl has the expected load count. If it
+ * doesn't, this module must have been unloaded and reloaded -- and
+ * we're not going to touch it.
+ */
+ if (ctl->loadcnt != fbt->fbtp_loadcnt) {
+ if (fbt_verbose) {
+ printf("fbt is failing for probe %s "
+ "(module %s reloaded)",
+ fbt->fbtp_name, ctl->filename);
+ }
+
+ return;
+ }
+
+ for (; fbt != NULL; fbt = fbt->fbtp_next) {
+ *fbt->fbtp_patchpoint = fbt->fbtp_patchval;
+ __syncicache(fbt->fbtp_patchpoint, 4);
+ }
+}
+
+static void
+fbt_disable(void *arg, dtrace_id_t id, void *parg)
+{
+ fbt_probe_t *fbt = parg;
+ modctl_t *ctl = fbt->fbtp_ctl;
+
+ ASSERT(ctl->nenabled > 0);
+ ctl->nenabled--;
+
+ if ((ctl->loadcnt != fbt->fbtp_loadcnt))
+ return;
+
+ for (; fbt != NULL; fbt = fbt->fbtp_next) {
+ *fbt->fbtp_patchpoint = fbt->fbtp_savedval;
+ __syncicache(fbt->fbtp_patchpoint, 4);
+ }
+}
+
+static void
+fbt_suspend(void *arg, dtrace_id_t id, void *parg)
+{
+ fbt_probe_t *fbt = parg;
+ modctl_t *ctl = fbt->fbtp_ctl;
+
+ ASSERT(ctl->nenabled > 0);
+
+ if ((ctl->loadcnt != fbt->fbtp_loadcnt))
+ return;
+
+ for (; fbt != NULL; fbt = fbt->fbtp_next) {
+ *fbt->fbtp_patchpoint = fbt->fbtp_savedval;
+ __syncicache(fbt->fbtp_patchpoint, 4);
+ }
+}
+
+static void
+fbt_resume(void *arg, dtrace_id_t id, void *parg)
+{
+ fbt_probe_t *fbt = parg;
+ modctl_t *ctl = fbt->fbtp_ctl;
+
+ ASSERT(ctl->nenabled > 0);
+
+ if ((ctl->loadcnt != fbt->fbtp_loadcnt))
+ return;
+
+ for (; fbt != NULL; fbt = fbt->fbtp_next) {
+ *fbt->fbtp_patchpoint = fbt->fbtp_patchval;
+ __syncicache(fbt->fbtp_patchpoint, 4);
+ }
+}
+
+static int
+fbt_ctfoff_init(modctl_t *lf, linker_ctf_t *lc)
+{
+ const Elf_Sym *symp = lc->symtab;;
+ const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab;
+ const uint8_t *ctfdata = lc->ctftab + sizeof(ctf_header_t);
+ int i;
+ uint32_t *ctfoff;
+ uint32_t objtoff = hp->cth_objtoff;
+ uint32_t funcoff = hp->cth_funcoff;
+ ushort_t info;
+ ushort_t vlen;
+
+ /* Sanity check. */
+ if (hp->cth_magic != CTF_MAGIC) {
+ printf("Bad magic value in CTF data of '%s'\n",lf->pathname);
+ return (EINVAL);
+ }
+
+ if (lc->symtab == NULL) {
+ printf("No symbol table in '%s'\n",lf->pathname);
+ return (EINVAL);
+ }
+
+ if ((ctfoff = malloc(sizeof(uint32_t) * lc->nsym, M_LINKER, M_WAITOK)) == NULL)
+ return (ENOMEM);
+
+ *lc->ctfoffp = ctfoff;
+
+ for (i = 0; i < lc->nsym; i++, ctfoff++, symp++) {
+ if (symp->st_name == 0 || symp->st_shndx == SHN_UNDEF) {
+ *ctfoff = 0xffffffff;
+ continue;
+ }
+
+ switch (ELF_ST_TYPE(symp->st_info)) {
+ case STT_OBJECT:
+ if (objtoff >= hp->cth_funcoff ||
+ (symp->st_shndx == SHN_ABS && symp->st_value == 0)) {
+ *ctfoff = 0xffffffff;
+ break;
+ }
+
+ *ctfoff = objtoff;
+ objtoff += sizeof (ushort_t);
+ break;
+
+ case STT_FUNC:
+ if (funcoff >= hp->cth_typeoff) {
+ *ctfoff = 0xffffffff;
+ break;
+ }
+
+ *ctfoff = funcoff;
+
+ info = *((const ushort_t *)(ctfdata + funcoff));
+ vlen = CTF_INFO_VLEN(info);
+
+ /*
+ * If we encounter a zero pad at the end, just skip it.
+ * Otherwise skip over the function and its return type
+ * (+2) and the argument list (vlen).
+ */
+ if (CTF_INFO_KIND(info) == CTF_K_UNKNOWN && vlen == 0)
+ funcoff += sizeof (ushort_t); /* skip pad */
+ else
+ funcoff += sizeof (ushort_t) * (vlen + 2);
+ break;
+
+ default:
+ *ctfoff = 0xffffffff;
+ break;
+ }
+ }
+
+ return (0);
+}
+
+static ssize_t
+fbt_get_ctt_size(uint8_t version, const ctf_type_t *tp, ssize_t *sizep,
+ ssize_t *incrementp)
+{
+ ssize_t size, increment;
+
+ if (version > CTF_VERSION_1 &&
+ tp->ctt_size == CTF_LSIZE_SENT) {
+ size = CTF_TYPE_LSIZE(tp);
+ increment = sizeof (ctf_type_t);
+ } else {
+ size = tp->ctt_size;
+ increment = sizeof (ctf_stype_t);
+ }
+
+ if (sizep)
+ *sizep = size;
+ if (incrementp)
+ *incrementp = increment;
+
+ return (size);
+}
+
+static int
+fbt_typoff_init(linker_ctf_t *lc)
+{
+ const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab;
+ const ctf_type_t *tbuf;
+ const ctf_type_t *tend;
+ const ctf_type_t *tp;
+ const uint8_t *ctfdata = lc->ctftab + sizeof(ctf_header_t);
+ int ctf_typemax = 0;
+ uint32_t *xp;
+ ulong_t pop[CTF_K_MAX + 1] = { 0 };
+
+
+ /* Sanity check. */
+ if (hp->cth_magic != CTF_MAGIC)
+ return (EINVAL);
+
+ tbuf = (const ctf_type_t *) (ctfdata + hp->cth_typeoff);
+ tend = (const ctf_type_t *) (ctfdata + hp->cth_stroff);
+
+ int child = hp->cth_parname != 0;
+
+ /*
+ * We make two passes through the entire type section. In this first
+ * pass, we count the number of each type and the total number of types.
+ */
+ for (tp = tbuf; tp < tend; ctf_typemax++) {
+ ushort_t kind = CTF_INFO_KIND(tp->ctt_info);
+ ulong_t vlen = CTF_INFO_VLEN(tp->ctt_info);
+ ssize_t size, increment;
+
+ size_t vbytes;
+ uint_t n;
+
+ (void) fbt_get_ctt_size(hp->cth_version, tp, &size, &increment);
+
+ switch (kind) {
+ case CTF_K_INTEGER:
+ case CTF_K_FLOAT:
+ vbytes = sizeof (uint_t);
+ break;
+ case CTF_K_ARRAY:
+ vbytes = sizeof (ctf_array_t);
+ break;
+ case CTF_K_FUNCTION:
+ vbytes = sizeof (ushort_t) * (vlen + (vlen & 1));
+ break;
+ case CTF_K_STRUCT:
+ case CTF_K_UNION:
+ if (size < CTF_LSTRUCT_THRESH) {
+ ctf_member_t *mp = (ctf_member_t *)
+ ((uintptr_t)tp + increment);
+
+ vbytes = sizeof (ctf_member_t) * vlen;
+ for (n = vlen; n != 0; n--, mp++)
+ child |= CTF_TYPE_ISCHILD(mp->ctm_type);
+ } else {
+ ctf_lmember_t *lmp = (ctf_lmember_t *)
+ ((uintptr_t)tp + increment);
+
+ vbytes = sizeof (ctf_lmember_t) * vlen;
+ for (n = vlen; n != 0; n--, lmp++)
+ child |=
+ CTF_TYPE_ISCHILD(lmp->ctlm_type);
+ }
+ break;
+ case CTF_K_ENUM:
+ vbytes = sizeof (ctf_enum_t) * vlen;
+ break;
+ case CTF_K_FORWARD:
+ /*
+ * For forward declarations, ctt_type is the CTF_K_*
+ * kind for the tag, so bump that population count too.
+ * If ctt_type is unknown, treat the tag as a struct.
+ */
+ if (tp->ctt_type == CTF_K_UNKNOWN ||
+ tp->ctt_type >= CTF_K_MAX)
+ pop[CTF_K_STRUCT]++;
+ else
+ pop[tp->ctt_type]++;
+ /*FALLTHRU*/
+ case CTF_K_UNKNOWN:
+ vbytes = 0;
+ break;
+ case CTF_K_POINTER:
+ case CTF_K_TYPEDEF:
+ case CTF_K_VOLATILE:
+ case CTF_K_CONST:
+ case CTF_K_RESTRICT:
+ child |= CTF_TYPE_ISCHILD(tp->ctt_type);
+ vbytes = 0;
+ break;
+ default:
+ printf("%s(%d): detected invalid CTF kind -- %u\n", __func__, __LINE__, kind);
+ return (EIO);
+ }
+ tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes);
+ pop[kind]++;
+ }
+
+ /* account for a sentinel value below */
+ ctf_typemax++;
+ *lc->typlenp = ctf_typemax;
+
+ if ((xp = malloc(sizeof(uint32_t) * ctf_typemax, M_LINKER, M_ZERO | M_WAITOK)) == NULL)
+ return (ENOMEM);
+
+ *lc->typoffp = xp;
+
+ /* type id 0 is used as a sentinel value */
+ *xp++ = 0;
+
+ /*
+ * In the second pass, fill in the type offset.
+ */
+ for (tp = tbuf; tp < tend; xp++) {
+ ushort_t kind = CTF_INFO_KIND(tp->ctt_info);
+ ulong_t vlen = CTF_INFO_VLEN(tp->ctt_info);
+ ssize_t size, increment;
+
+ size_t vbytes;
+ uint_t n;
+
+ (void) fbt_get_ctt_size(hp->cth_version, tp, &size, &increment);
+
+ switch (kind) {
+ case CTF_K_INTEGER:
+ case CTF_K_FLOAT:
+ vbytes = sizeof (uint_t);
+ break;
+ case CTF_K_ARRAY:
+ vbytes = sizeof (ctf_array_t);
+ break;
+ case CTF_K_FUNCTION:
+ vbytes = sizeof (ushort_t) * (vlen + (vlen & 1));
+ break;
+ case CTF_K_STRUCT:
+ case CTF_K_UNION:
+ if (size < CTF_LSTRUCT_THRESH) {
+ ctf_member_t *mp = (ctf_member_t *)
+ ((uintptr_t)tp + increment);
+
+ vbytes = sizeof (ctf_member_t) * vlen;
+ for (n = vlen; n != 0; n--, mp++)
+ child |= CTF_TYPE_ISCHILD(mp->ctm_type);
+ } else {
+ ctf_lmember_t *lmp = (ctf_lmember_t *)
+ ((uintptr_t)tp + increment);
+
+ vbytes = sizeof (ctf_lmember_t) * vlen;
+ for (n = vlen; n != 0; n--, lmp++)
+ child |=
+ CTF_TYPE_ISCHILD(lmp->ctlm_type);
+ }
+ break;
+ case CTF_K_ENUM:
+ vbytes = sizeof (ctf_enum_t) * vlen;
+ break;
+ case CTF_K_FORWARD:
+ case CTF_K_UNKNOWN:
+ vbytes = 0;
+ break;
+ case CTF_K_POINTER:
+ case CTF_K_TYPEDEF:
+ case CTF_K_VOLATILE:
+ case CTF_K_CONST:
+ case CTF_K_RESTRICT:
+ vbytes = 0;
+ break;
+ default:
+ printf("%s(%d): detected invalid CTF kind -- %u\n", __func__, __LINE__, kind);
+ return (EIO);
+ }
+ *xp = (uint32_t)((uintptr_t) tp - (uintptr_t) ctfdata);
+ tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes);
+ }
+
+ return (0);
+}
+
+/*
+ * CTF Declaration Stack
+ *
+ * In order to implement ctf_type_name(), we must convert a type graph back
+ * into a C type declaration. Unfortunately, a type graph represents a storage
+ * class ordering of the type whereas a type declaration must obey the C rules
+ * for operator precedence, and the two orderings are frequently in conflict.
+ * For example, consider these CTF type graphs and their C declarations:
+ *
+ * CTF_K_POINTER -> CTF_K_FUNCTION -> CTF_K_INTEGER : int (*)()
+ * CTF_K_POINTER -> CTF_K_ARRAY -> CTF_K_INTEGER : int (*)[]
+ *
+ * In each case, parentheses are used to raise operator * to higher lexical
+ * precedence, so the string form of the C declaration cannot be constructed by
+ * walking the type graph links and forming the string from left to right.
+ *
+ * The functions in this file build a set of stacks from the type graph nodes
+ * corresponding to the C operator precedence levels in the appropriate order.
+ * The code in ctf_type_name() can then iterate over the levels and nodes in
+ * lexical precedence order and construct the final C declaration string.
+ */
+typedef struct ctf_list {
+ struct ctf_list *l_prev; /* previous pointer or tail pointer */
+ struct ctf_list *l_next; /* next pointer or head pointer */
+} ctf_list_t;
+
+#define ctf_list_prev(elem) ((void *)(((ctf_list_t *)(elem))->l_prev))
+#define ctf_list_next(elem) ((void *)(((ctf_list_t *)(elem))->l_next))
+
+typedef enum {
+ CTF_PREC_BASE,
+ CTF_PREC_POINTER,
+ CTF_PREC_ARRAY,
+ CTF_PREC_FUNCTION,
+ CTF_PREC_MAX
+} ctf_decl_prec_t;
+
+typedef struct ctf_decl_node {
+ ctf_list_t cd_list; /* linked list pointers */
+ ctf_id_t cd_type; /* type identifier */
+ uint_t cd_kind; /* type kind */
+ uint_t cd_n; /* type dimension if array */
+} ctf_decl_node_t;
+
+typedef struct ctf_decl {
+ ctf_list_t cd_nodes[CTF_PREC_MAX]; /* declaration node stacks */
+ int cd_order[CTF_PREC_MAX]; /* storage order of decls */
+ ctf_decl_prec_t cd_qualp; /* qualifier precision */
+ ctf_decl_prec_t cd_ordp; /* ordered precision */
+ char *cd_buf; /* buffer for output */
+ char *cd_ptr; /* buffer location */
+ char *cd_end; /* buffer limit */
+ size_t cd_len; /* buffer space required */
+ int cd_err; /* saved error value */
+} ctf_decl_t;
+
+/*
+ * Simple doubly-linked list append routine. This implementation assumes that
+ * each list element contains an embedded ctf_list_t as the first member.
+ * An additional ctf_list_t is used to store the head (l_next) and tail
+ * (l_prev) pointers. The current head and tail list elements have their
+ * previous and next pointers set to NULL, respectively.
+ */
+static void
+ctf_list_append(ctf_list_t *lp, void *new)
+{
+ ctf_list_t *p = lp->l_prev; /* p = tail list element */
+ ctf_list_t *q = new; /* q = new list element */
+
+ lp->l_prev = q;
+ q->l_prev = p;
+ q->l_next = NULL;
+
+ if (p != NULL)
+ p->l_next = q;
+ else
+ lp->l_next = q;
+}
+
+/*
+ * Prepend the specified existing element to the given ctf_list_t. The
+ * existing pointer should be pointing at a struct with embedded ctf_list_t.
+ */
+static void
+ctf_list_prepend(ctf_list_t *lp, void *new)
+{
+ ctf_list_t *p = new; /* p = new list element */
+ ctf_list_t *q = lp->l_next; /* q = head list element */
+
+ lp->l_next = p;
+ p->l_prev = NULL;
+ p->l_next = q;
+
+ if (q != NULL)
+ q->l_prev = p;
+ else
+ lp->l_prev = p;
+}
+
+static void
+ctf_decl_init(ctf_decl_t *cd, char *buf, size_t len)
+{
+ int i;
+
+ bzero(cd, sizeof (ctf_decl_t));
+
+ for (i = CTF_PREC_BASE; i < CTF_PREC_MAX; i++)
+ cd->cd_order[i] = CTF_PREC_BASE - 1;
+
+ cd->cd_qualp = CTF_PREC_BASE;
+ cd->cd_ordp = CTF_PREC_BASE;
+
+ cd->cd_buf = buf;
+ cd->cd_ptr = buf;
+ cd->cd_end = buf + len;
+}
+
+static void
+ctf_decl_fini(ctf_decl_t *cd)
+{
+ ctf_decl_node_t *cdp, *ndp;
+ int i;
+
+ for (i = CTF_PREC_BASE; i < CTF_PREC_MAX; i++) {
+ for (cdp = ctf_list_next(&cd->cd_nodes[i]);
+ cdp != NULL; cdp = ndp) {
+ ndp = ctf_list_next(cdp);
+ free(cdp, M_FBT);
+ }
+ }
+}
+
+static const ctf_type_t *
+ctf_lookup_by_id(linker_ctf_t *lc, ctf_id_t type)
+{
+ const ctf_type_t *tp;
+ uint32_t offset;
+ uint32_t *typoff = *lc->typoffp;
+
+ if (type >= *lc->typlenp) {
+ printf("%s(%d): type %d exceeds max %ld\n",__func__,__LINE__,(int) type,*lc->typlenp);
+ return(NULL);
+ }
+
+ /* Check if the type isn't cross-referenced. */
+ if ((offset = typoff[type]) == 0) {
+ printf("%s(%d): type %d isn't cross referenced\n",__func__,__LINE__, (int) type);
+ return(NULL);
+ }
+
+ tp = (const ctf_type_t *)(lc->ctftab + offset + sizeof(ctf_header_t));
+
+ return (tp);
+}
+
+static void
+fbt_array_info(linker_ctf_t *lc, ctf_id_t type, ctf_arinfo_t *arp)
+{
+ const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab;
+ const ctf_type_t *tp;
+ const ctf_array_t *ap;
+ ssize_t increment;
+
+ bzero(arp, sizeof(*arp));
+
+ if ((tp = ctf_lookup_by_id(lc, type)) == NULL)
+ return;
+
+ if (CTF_INFO_KIND(tp->ctt_info) != CTF_K_ARRAY)
+ return;
+
+ (void) fbt_get_ctt_size(hp->cth_version, tp, NULL, &increment);
+
+ ap = (const ctf_array_t *)((uintptr_t)tp + increment);
+ arp->ctr_contents = ap->cta_contents;
+ arp->ctr_index = ap->cta_index;
+ arp->ctr_nelems = ap->cta_nelems;
+}
+
+static const char *
+ctf_strptr(linker_ctf_t *lc, int name)
+{
+ const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab;;
+ const char *strp = "";
+
+ if (name < 0 || name >= hp->cth_strlen)
+ return(strp);
+
+ strp = (const char *)(lc->ctftab + hp->cth_stroff + name + sizeof(ctf_header_t));
+
+ return (strp);
+}
+
+static void
+ctf_decl_push(ctf_decl_t *cd, linker_ctf_t *lc, ctf_id_t type)
+{
+ ctf_decl_node_t *cdp;
+ ctf_decl_prec_t prec;
+ uint_t kind, n = 1;
+ int is_qual = 0;
+
+ const ctf_type_t *tp;
+ ctf_arinfo_t ar;
+
+ if ((tp = ctf_lookup_by_id(lc, type)) == NULL) {
+ cd->cd_err = ENOENT;
+ return;
+ }
+
+ switch (kind = CTF_INFO_KIND(tp->ctt_info)) {
+ case CTF_K_ARRAY:
+ fbt_array_info(lc, type, &ar);
+ ctf_decl_push(cd, lc, ar.ctr_contents);
+ n = ar.ctr_nelems;
+ prec = CTF_PREC_ARRAY;
+ break;
+
+ case CTF_K_TYPEDEF:
+ if (ctf_strptr(lc, tp->ctt_name)[0] == '\0') {
+ ctf_decl_push(cd, lc, tp->ctt_type);
+ return;
+ }
+ prec = CTF_PREC_BASE;
+ break;
+
+ case CTF_K_FUNCTION:
+ ctf_decl_push(cd, lc, tp->ctt_type);
+ prec = CTF_PREC_FUNCTION;
+ break;
+
+ case CTF_K_POINTER:
+ ctf_decl_push(cd, lc, tp->ctt_type);
+ prec = CTF_PREC_POINTER;
+ break;
+
+ case CTF_K_VOLATILE:
+ case CTF_K_CONST:
+ case CTF_K_RESTRICT:
+ ctf_decl_push(cd, lc, tp->ctt_type);
+ prec = cd->cd_qualp;
+ is_qual++;
+ break;
+
+ default:
+ prec = CTF_PREC_BASE;
+ }
+
+ if ((cdp = malloc(sizeof (ctf_decl_node_t), M_FBT, M_WAITOK)) == NULL) {
+ cd->cd_err = EAGAIN;
+ return;
+ }
+
+ cdp->cd_type = type;
+ cdp->cd_kind = kind;
+ cdp->cd_n = n;
+
+ if (ctf_list_next(&cd->cd_nodes[prec]) == NULL)
+ cd->cd_order[prec] = cd->cd_ordp++;
+
+ /*
+ * Reset cd_qualp to the highest precedence level that we've seen so
+ * far that can be qualified (CTF_PREC_BASE or CTF_PREC_POINTER).
+ */
+ if (prec > cd->cd_qualp && prec < CTF_PREC_ARRAY)
+ cd->cd_qualp = prec;
+
+ /*
+ * C array declarators are ordered inside out so prepend them. Also by
+ * convention qualifiers of base types precede the type specifier (e.g.
+ * const int vs. int const) even though the two forms are equivalent.
+ */
+ if (kind == CTF_K_ARRAY || (is_qual && prec == CTF_PREC_BASE))
+ ctf_list_prepend(&cd->cd_nodes[prec], cdp);
+ else
+ ctf_list_append(&cd->cd_nodes[prec], cdp);
+}
+
+static void
+ctf_decl_sprintf(ctf_decl_t *cd, const char *format, ...)
+{
+ size_t len = (size_t)(cd->cd_end - cd->cd_ptr);
+ va_list ap;
+ size_t n;
+
+ va_start(ap, format);
+ n = vsnprintf(cd->cd_ptr, len, format, ap);
+ va_end(ap);
+
+ cd->cd_ptr += MIN(n, len);
+ cd->cd_len += n;
+}
+
+static ssize_t
+fbt_type_name(linker_ctf_t *lc, ctf_id_t type, char *buf, size_t len)
+{
+ ctf_decl_t cd;
+ ctf_decl_node_t *cdp;
+ ctf_decl_prec_t prec, lp, rp;
+ int ptr, arr;
+ uint_t k;
+
+ if (lc == NULL && type == CTF_ERR)
+ return (-1); /* simplify caller code by permitting CTF_ERR */
+
+ ctf_decl_init(&cd, buf, len);
+ ctf_decl_push(&cd, lc, type);
+
+ if (cd.cd_err != 0) {
+ ctf_decl_fini(&cd);
+ return (-1);
+ }
+
+ /*
+ * If the type graph's order conflicts with lexical precedence order
+ * for pointers or arrays, then we need to surround the declarations at
+ * the corresponding lexical precedence with parentheses. This can
+ * result in either a parenthesized pointer (*) as in int (*)() or
+ * int (*)[], or in a parenthesized pointer and array as in int (*[])().
+ */
+ ptr = cd.cd_order[CTF_PREC_POINTER] > CTF_PREC_POINTER;
+ arr = cd.cd_order[CTF_PREC_ARRAY] > CTF_PREC_ARRAY;
+
+ rp = arr ? CTF_PREC_ARRAY : ptr ? CTF_PREC_POINTER : -1;
+ lp = ptr ? CTF_PREC_POINTER : arr ? CTF_PREC_ARRAY : -1;
+
+ k = CTF_K_POINTER; /* avoid leading whitespace (see below) */
+
+ for (prec = CTF_PREC_BASE; prec < CTF_PREC_MAX; prec++) {
+ for (cdp = ctf_list_next(&cd.cd_nodes[prec]);
+ cdp != NULL; cdp = ctf_list_next(cdp)) {
+
+ const ctf_type_t *tp =
+ ctf_lookup_by_id(lc, cdp->cd_type);
+ const char *name = ctf_strptr(lc, tp->ctt_name);
+
+ if (k != CTF_K_POINTER && k != CTF_K_ARRAY)
+ ctf_decl_sprintf(&cd, " ");
+
+ if (lp == prec) {
+ ctf_decl_sprintf(&cd, "(");
+ lp = -1;
+ }
+
+ switch (cdp->cd_kind) {
+ case CTF_K_INTEGER:
+ case CTF_K_FLOAT:
+ case CTF_K_TYPEDEF:
+ ctf_decl_sprintf(&cd, "%s", name);
+ break;
+ case CTF_K_POINTER:
+ ctf_decl_sprintf(&cd, "*");
+ break;
+ case CTF_K_ARRAY:
+ ctf_decl_sprintf(&cd, "[%u]", cdp->cd_n);
+ break;
+ case CTF_K_FUNCTION:
+ ctf_decl_sprintf(&cd, "()");
+ break;
+ case CTF_K_STRUCT:
+ case CTF_K_FORWARD:
+ ctf_decl_sprintf(&cd, "struct %s", name);
+ break;
+ case CTF_K_UNION:
+ ctf_decl_sprintf(&cd, "union %s", name);
+ break;
+ case CTF_K_ENUM:
+ ctf_decl_sprintf(&cd, "enum %s", name);
+ break;
+ case CTF_K_VOLATILE:
+ ctf_decl_sprintf(&cd, "volatile");
+ break;
+ case CTF_K_CONST:
+ ctf_decl_sprintf(&cd, "const");
+ break;
+ case CTF_K_RESTRICT:
+ ctf_decl_sprintf(&cd, "restrict");
+ break;
+ }
+
+ k = cdp->cd_kind;
+ }
+
+ if (rp == prec)
+ ctf_decl_sprintf(&cd, ")");
+ }
+
+ ctf_decl_fini(&cd);
+ return (cd.cd_len);
+}
+
+static void
+fbt_getargdesc(void *arg __unused, dtrace_id_t id __unused, void *parg, dtrace_argdesc_t *desc)
+{
+ const ushort_t *dp;
+ fbt_probe_t *fbt = parg;
+ linker_ctf_t lc;
+ modctl_t *ctl = fbt->fbtp_ctl;
+ int ndx = desc->dtargd_ndx;
+ int symindx = fbt->fbtp_symindx;
+ uint32_t *ctfoff;
+ uint32_t offset;
+ ushort_t info, kind, n;
+
+ if (fbt->fbtp_roffset != 0 && desc->dtargd_ndx == 0) {
+ (void) strcpy(desc->dtargd_native, "int");
+ return;
+ }
+
+ desc->dtargd_ndx = DTRACE_ARGNONE;
+
+ /* Get a pointer to the CTF data and it's length. */
+ if (linker_ctf_get(ctl, &lc) != 0)
+ /* No CTF data? Something wrong? *shrug* */
+ return;
+
+ /* Check if this module hasn't been initialised yet. */
+ if (*lc.ctfoffp == NULL) {
+ /*
+ * Initialise the CTF object and function symindx to
+ * byte offset array.
+ */
+ if (fbt_ctfoff_init(ctl, &lc) != 0)
+ return;
+
+ /* Initialise the CTF type to byte offset array. */
+ if (fbt_typoff_init(&lc) != 0)
+ return;
+ }
+
+ ctfoff = *lc.ctfoffp;
+
+ if (ctfoff == NULL || *lc.typoffp == NULL)
+ return;
+
+ /* Check if the symbol index is out of range. */
+ if (symindx >= lc.nsym)
+ return;
+
+ /* Check if the symbol isn't cross-referenced. */
+ if ((offset = ctfoff[symindx]) == 0xffffffff)
+ return;
+
+ dp = (const ushort_t *)(lc.ctftab + offset + sizeof(ctf_header_t));
+
+ info = *dp++;
+ kind = CTF_INFO_KIND(info);
+ n = CTF_INFO_VLEN(info);
+
+ if (kind == CTF_K_UNKNOWN && n == 0) {
+ printf("%s(%d): Unknown function!\n",__func__,__LINE__);
+ return;
+ }
+
+ if (kind != CTF_K_FUNCTION) {
+ printf("%s(%d): Expected a function!\n",__func__,__LINE__);
+ return;
+ }
+
+ if (fbt->fbtp_roffset != 0) {
+ /* Only return type is available for args[1] in return probe. */
+ if (ndx > 1)
+ return;
+ ASSERT(ndx == 1);
+ } else {
+ /* Check if the requested argument doesn't exist. */
+ if (ndx >= n)
+ return;
+
+ /* Skip the return type and arguments up to the one requested. */
+ dp += ndx + 1;
+ }
+
+ if (fbt_type_name(&lc, *dp, desc->dtargd_native, sizeof(desc->dtargd_native)) > 0)
+ desc->dtargd_ndx = ndx;
+
+ return;
+}
+
+static int
+fbt_linker_file_cb(linker_file_t lf, void *arg)
+{
+
+ fbt_provide_module(arg, lf);
+
+ return (0);
+}
+
+static void
+fbt_load(void *dummy)
+{
+ /* Create the /dev/dtrace/fbt entry. */
+ fbt_cdev = make_dev(&fbt_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
+ "dtrace/fbt");
+
+ /* Default the probe table size if not specified. */
+ if (fbt_probetab_size == 0)
+ fbt_probetab_size = FBT_PROBETAB_SIZE;
+
+ /* Choose the hash mask for the probe table. */
+ fbt_probetab_mask = fbt_probetab_size - 1;
+
+ /* Allocate memory for the probe table. */
+ fbt_probetab =
+ malloc(fbt_probetab_size * sizeof (fbt_probe_t *), M_FBT, M_WAITOK | M_ZERO);
+
+ dtrace_invop_add(fbt_invop);
+
+ if (dtrace_register("fbt", &fbt_attr, DTRACE_PRIV_USER,
+ NULL, &fbt_pops, NULL, &fbt_id) != 0)
+ return;
+
+ /* Create probes for the kernel and already-loaded modules. */
+ linker_file_foreach(fbt_linker_file_cb, NULL);
+}
+
+
+static int
+fbt_unload()
+{
+ int error = 0;
+
+ /* De-register the invalid opcode handler. */
+ dtrace_invop_remove(fbt_invop);
+
+ /* De-register this DTrace provider. */
+ if ((error = dtrace_unregister(fbt_id)) != 0)
+ return (error);
+
+ /* Free the probe table. */
+ free(fbt_probetab, M_FBT);
+ fbt_probetab = NULL;
+ fbt_probetab_mask = 0;
+
+ destroy_dev(fbt_cdev);
+
+ return (error);
+}
+
+static int
+fbt_modevent(module_t mod __unused, int type, void *data __unused)
+{
+ int error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ break;
+
+ case MOD_UNLOAD:
+ break;
+
+ case MOD_SHUTDOWN:
+ break;
+
+ default:
+ error = EOPNOTSUPP;
+ break;
+
+ }
+
+ return (error);
+}
+
+static int
+fbt_open(struct cdev *dev __unused, int oflags __unused, int devtype __unused, struct thread *td __unused)
+{
+ return (0);
+}
+
+SYSINIT(fbt_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fbt_load, NULL);
+SYSUNINIT(fbt_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fbt_unload, NULL);
+
+DEV_MODULE(fbt, fbt_modevent, NULL);
+MODULE_VERSION(fbt, 1);
+MODULE_DEPEND(fbt, dtrace, 1, 1, 1);
+MODULE_DEPEND(fbt, opensolaris, 1, 1, 1);
Property changes on: trunk/sys/cddl/dev/fbt/fbt_powerpc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/cddl/dev/lockstat/lockstat.c
===================================================================
--- trunk/sys/cddl/dev/lockstat/lockstat.c 2018-06-01 22:42:15 UTC (rev 10162)
+++ trunk/sys/cddl/dev/lockstat/lockstat.c 2018-06-01 22:42:24 UTC (rev 10163)
@@ -21,7 +21,7 @@
*
* Portions Copyright (c) 2008-2009 Stacey Son <sson at FreeBSD.org>
*
- * $FreeBSD: release/9.2.0/sys/cddl/dev/lockstat/lockstat.c 192853 2009-05-26 20:28:22Z sson $
+ * $FreeBSD: stable/10/sys/cddl/dev/lockstat/lockstat.c 285759 2015-07-21 17:16:37Z markj $
*
*/
@@ -46,7 +46,8 @@
#include <sys/dtrace.h>
#include <sys/lockstat.h>
-#if defined(__i386__) || defined(__amd64__)
+#if defined(__i386__) || defined(__amd64__) || \
+ defined(__mips__) || defined(__powerpc__)
#define LOCKSTAT_AFRAMES 1
#else
#error "architecture not supported"
@@ -66,12 +67,12 @@
char *lsp_name;
int lsp_probe;
dtrace_id_t lsp_id;
-#ifdef __MidnightBSD__
+#ifdef __FreeBSD__
int lsp_frame;
#endif
} lockstat_probe_t;
-#ifdef __MidnightBSD__
+#ifdef __FreeBSD__
lockstat_probe_t lockstat_probes[] =
{
/* Spin Locks */
@@ -161,6 +162,8 @@
ASSERT(!lockstat_probemap[probe->lsp_probe]);
+ lockstat_enabled++;
+
lockstat_probemap[probe->lsp_probe] = id;
#ifdef DOODAD
membar_producer();
@@ -184,6 +187,8 @@
ASSERT(lockstat_probemap[probe->lsp_probe]);
+ lockstat_enabled--;
+
lockstat_probemap[probe->lsp_probe] = 0;
#ifdef DOODAD
lockstat_hot_patch();
@@ -228,7 +233,7 @@
continue;
ASSERT(!probe->lsp_id);
-#ifdef __MidnightBSD__
+#ifdef __FreeBSD__
probe->lsp_id = dtrace_probe_create(lockstat_id,
"kernel", probe->lsp_func, probe->lsp_name,
probe->lsp_frame, probe);
Modified: trunk/sys/cddl/dev/profile/profile.c
===================================================================
--- trunk/sys/cddl/dev/profile/profile.c 2018-06-01 22:42:15 UTC (rev 10162)
+++ trunk/sys/cddl/dev/profile/profile.c 2018-06-01 22:42:24 UTC (rev 10163)
@@ -21,7 +21,7 @@
*
* Portions Copyright 2006-2008 John Birrell jb at freebsd.org
*
- * $FreeBSD: src/sys/cddl/dev/profile/profile.c,v 1.1.2.1.2.1 2008/11/25 02:59:29 kensmith Exp $
+ * $FreeBSD: stable/10/sys/cddl/dev/profile/profile.c 314667 2017-03-04 13:03:31Z avg $
*
*/
@@ -53,9 +53,9 @@
#include <sys/smp.h>
#include <sys/uio.h>
#include <sys/unistd.h>
+#include <machine/cpu.h>
#include <machine/stdarg.h>
-#include <sys/cyclic.h>
#include <sys/dtrace.h>
#include <sys/dtrace_bsd.h>
@@ -98,7 +98,7 @@
* allow for a manual override in case we get it completely wrong.
*/
#ifdef __amd64
-#define PROF_ARTIFICIAL_FRAMES 7
+#define PROF_ARTIFICIAL_FRAMES 10
#else
#ifdef __i386
#define PROF_ARTIFICIAL_FRAMES 6
@@ -113,12 +113,35 @@
#endif
#endif
+#ifdef __mips
+/*
+ * This value is bogus just to make module compilable on mips
+ */
+#define PROF_ARTIFICIAL_FRAMES 3
+#endif
+
+#ifdef __powerpc__
+/*
+ * This value is bogus just to make module compilable on powerpc
+ */
+#define PROF_ARTIFICIAL_FRAMES 3
+#endif
+
+struct profile_probe_percpu;
+
typedef struct profile_probe {
char prof_name[PROF_NAMELEN];
dtrace_id_t prof_id;
int prof_kind;
+#ifdef illumos
hrtime_t prof_interval;
cyclic_id_t prof_cyclic;
+#else
+ sbintime_t prof_interval;
+ struct callout prof_cyclic;
+ sbintime_t prof_expected;
+ struct profile_probe_percpu **prof_pcpus;
+#endif
} profile_probe_t;
typedef struct profile_probe_percpu {
@@ -125,6 +148,9 @@
hrtime_t profc_expected;
hrtime_t profc_interval;
profile_probe_t *profc_probe;
+#ifdef __FreeBSD__
+ struct callout profc_cyclic;
+#endif
} profile_probe_percpu_t;
static d_open_t profile_open;
@@ -193,6 +219,28 @@
static hrtime_t profile_interval_min = NANOSEC / 5000; /* 5000 hz */
static int profile_aframes = 0; /* override */
+static sbintime_t
+nsec_to_sbt(hrtime_t nsec)
+{
+ time_t sec;
+
+ /*
+ * We need to calculate nsec * 2^32 / 10^9
+ * Seconds and nanoseconds are split to avoid overflow.
+ */
+ sec = nsec / NANOSEC;
+ nsec = nsec % NANOSEC;
+ return (((sbintime_t)sec << 32) | ((sbintime_t)nsec << 32) / NANOSEC);
+}
+
+static hrtime_t
+sbt_to_nsec(sbintime_t sbt)
+{
+
+ return ((sbt >> 32) * NANOSEC +
+ (((uint32_t)sbt * (hrtime_t)NANOSEC) >> 32));
+}
+
static void
profile_fire(void *arg)
{
@@ -199,13 +247,35 @@
profile_probe_percpu_t *pcpu = arg;
profile_probe_t *prof = pcpu->profc_probe;
hrtime_t late;
- solaris_cpu_t *c = &solaris_cpu[curcpu];
+ struct trapframe *frame;
+ uintfptr_t pc, upc;
+#ifdef illumos
late = gethrtime() - pcpu->profc_expected;
+#else
+ late = sbt_to_nsec(sbinuptime() - pcpu->profc_expected);
+#endif
+
+ pc = 0;
+ upc = 0;
+
+ /*
+ * td_intr_frame can be unset if this is a catch up event
+ * after waking up from idle sleep.
+ * This can only happen on a CPU idle thread.
+ */
+ frame = curthread->td_intr_frame;
+ if (frame != NULL) {
+ if (TRAPF_USERMODE(frame))
+ upc = TRAPF_PC(frame);
+ else
+ pc = TRAPF_PC(frame);
+ }
+ dtrace_probe(prof->prof_id, pc, upc, late, 0, 0);
+
pcpu->profc_expected += pcpu->profc_interval;
-
- dtrace_probe(prof->prof_id, c->cpu_profile_pc,
- c->cpu_profile_upc, late, 0, 0);
+ callout_schedule_sbt_curcpu(&pcpu->profc_cyclic,
+ pcpu->profc_expected, 0, C_DIRECT_EXEC | C_ABSOLUTE);
}
static void
@@ -212,10 +282,29 @@
profile_tick(void *arg)
{
profile_probe_t *prof = arg;
- solaris_cpu_t *c = &solaris_cpu[curcpu];
+ struct trapframe *frame;
+ uintfptr_t pc, upc;
- dtrace_probe(prof->prof_id, c->cpu_profile_pc,
- c->cpu_profile_upc, 0, 0, 0);
+ pc = 0;
+ upc = 0;
+
+ /*
+ * td_intr_frame can be unset if this is a catch up event
+ * after waking up from idle sleep.
+ * This can only happen on a CPU idle thread.
+ */
+ frame = curthread->td_intr_frame;
+ if (frame != NULL) {
+ if (TRAPF_USERMODE(frame))
+ upc = TRAPF_PC(frame);
+ else
+ pc = TRAPF_PC(frame);
+ }
+ dtrace_probe(prof->prof_id, pc, upc, 0, 0, 0);
+
+ prof->prof_expected += prof->prof_interval;
+ callout_schedule_sbt(&prof->prof_cyclic,
+ prof->prof_expected, 0, C_DIRECT_EXEC | C_ABSOLUTE);
}
static void
@@ -237,8 +326,13 @@
prof = kmem_zalloc(sizeof (profile_probe_t), KM_SLEEP);
(void) strcpy(prof->prof_name, name);
+#ifdef illumos
prof->prof_interval = interval;
prof->prof_cyclic = CYCLIC_NONE;
+#else
+ prof->prof_interval = nsec_to_sbt(interval);
+ callout_init(&prof->prof_cyclic, 1);
+#endif
prof->prof_kind = kind;
prof->prof_id = dtrace_probe_create(profile_id,
NULL, NULL, name,
@@ -383,7 +477,11 @@
{
profile_probe_t *prof = parg;
+#ifdef illumos
ASSERT(prof->prof_cyclic == CYCLIC_NONE);
+#else
+ ASSERT(!callout_active(&prof->prof_cyclic) && prof->prof_pcpus == NULL);
+#endif
kmem_free(prof, sizeof (profile_probe_t));
ASSERT(profile_total >= 1);
@@ -390,6 +488,7 @@
atomic_add_32(&profile_total, -1);
}
+#ifdef illumos
/*ARGSUSED*/
static void
profile_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
@@ -465,7 +564,82 @@
prof->prof_cyclic = CYCLIC_NONE;
}
+#else
+
static void
+profile_enable_omni(profile_probe_t *prof)
+{
+ profile_probe_percpu_t *pcpu;
+ int cpu;
+
+ prof->prof_pcpus = kmem_zalloc((mp_maxid + 1) * sizeof(pcpu), KM_SLEEP);
+ CPU_FOREACH(cpu) {
+ pcpu = kmem_zalloc(sizeof(profile_probe_percpu_t), KM_SLEEP);
+ prof->prof_pcpus[cpu] = pcpu;
+ pcpu->profc_probe = prof;
+ pcpu->profc_expected = sbinuptime() + prof->prof_interval;
+ pcpu->profc_interval = prof->prof_interval;
+ callout_init(&pcpu->profc_cyclic, 1);
+ callout_reset_sbt_on(&pcpu->profc_cyclic,
+ pcpu->profc_expected, 0, profile_fire, pcpu,
+ cpu, C_DIRECT_EXEC | C_ABSOLUTE);
+ }
+}
+
+static void
+profile_disable_omni(profile_probe_t *prof)
+{
+ profile_probe_percpu_t *pcpu;
+ int cpu;
+
+ ASSERT(prof->prof_pcpus != NULL);
+ CPU_FOREACH(cpu) {
+ pcpu = prof->prof_pcpus[cpu];
+ ASSERT(pcpu->profc_probe == prof);
+ ASSERT(callout_active(&pcpu->profc_cyclic));
+ callout_stop(&pcpu->profc_cyclic);
+ callout_drain(&pcpu->profc_cyclic);
+ kmem_free(pcpu, sizeof(profile_probe_percpu_t));
+ }
+ kmem_free(prof->prof_pcpus, (mp_maxid + 1) * sizeof(pcpu));
+ prof->prof_pcpus = NULL;
+}
+
+/* ARGSUSED */
+static void
+profile_enable(void *arg, dtrace_id_t id, void *parg)
+{
+ profile_probe_t *prof = parg;
+
+ if (prof->prof_kind == PROF_TICK) {
+ prof->prof_expected = sbinuptime() + prof->prof_interval;
+ callout_reset_sbt(&prof->prof_cyclic,
+ prof->prof_expected, 0, profile_tick, prof,
+ C_DIRECT_EXEC | C_ABSOLUTE);
+ } else {
+ ASSERT(prof->prof_kind == PROF_PROFILE);
+ profile_enable_omni(prof);
+ }
+}
+
+/* ARGSUSED */
+static void
+profile_disable(void *arg, dtrace_id_t id, void *parg)
+{
+ profile_probe_t *prof = parg;
+
+ if (prof->prof_kind == PROF_TICK) {
+ ASSERT(callout_active(&prof->prof_cyclic));
+ callout_stop(&prof->prof_cyclic);
+ callout_drain(&prof->prof_cyclic);
+ } else {
+ ASSERT(prof->prof_kind == PROF_PROFILE);
+ profile_disable_omni(prof);
+ }
+}
+#endif
+
+static void
profile_load(void *dummy)
{
/* Create the /dev/dtrace/profile entry. */
@@ -528,5 +702,4 @@
DEV_MODULE(profile, profile_modevent, NULL);
MODULE_VERSION(profile, 1);
MODULE_DEPEND(profile, dtrace, 1, 1, 1);
-MODULE_DEPEND(profile, cyclic, 1, 1, 1);
MODULE_DEPEND(profile, opensolaris, 1, 1, 1);
Modified: trunk/sys/cddl/dev/prototype.c
===================================================================
--- trunk/sys/cddl/dev/prototype.c 2018-06-01 22:42:15 UTC (rev 10162)
+++ trunk/sys/cddl/dev/prototype.c 2018-06-01 22:42:24 UTC (rev 10163)
@@ -3,7 +3,7 @@
* This file is freeware. You are free to use it and add your own
* license.
*
- * $FreeBSD: src/sys/cddl/dev/prototype.c,v 1.1.2.1.2.1 2008/11/25 02:59:29 kensmith Exp $
+ * $FreeBSD: stable/10/sys/cddl/dev/prototype.c 179237 2008-05-23 05:59:42Z jb $
*
*/
Modified: trunk/sys/cddl/dev/sdt/sdt.c
===================================================================
--- trunk/sys/cddl/dev/sdt/sdt.c 2018-06-01 22:42:15 UTC (rev 10162)
+++ trunk/sys/cddl/dev/sdt/sdt.c 2018-06-01 22:42:24 UTC (rev 10163)
@@ -21,47 +21,63 @@
*
* Portions Copyright 2006-2008 John Birrell jb at freebsd.org
*
- * $FreeBSD: release/9.2.0/sys/cddl/dev/sdt/sdt.c 252858 2013-07-06 02:49:56Z markj $
+ * $FreeBSD: stable/10/sys/cddl/dev/sdt/sdt.c 299001 2016-05-03 19:42:58Z markj $
*
*/
-#ifndef KDTRACE_HOOKS
-#define KDTRACE_HOOKS
-#endif
+/*
+ * This file contains a reimplementation of the statically-defined tracing (SDT)
+ * framework for DTrace. Probes and SDT providers are defined using the macros
+ * in sys/sdt.h, which append all the needed structures to linker sets. When
+ * this module is loaded, it iterates over all of the loaded modules and
+ * registers probes and providers with the DTrace framework based on the
+ * contents of these linker sets.
+ *
+ * A list of SDT providers is maintained here since a provider may span multiple
+ * modules. When a kernel module is unloaded, a provider defined in that module
+ * is unregistered only if no other modules refer to it. The DTrace framework is
+ * responsible for destroying individual probes when a kernel module is
+ * unloaded; in particular, probes may not span multiple kernel modules.
+ */
+#include "opt_kdtrace.h"
+
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
+
#include <sys/conf.h>
+#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/limits.h>
+#include <sys/linker.h>
+#include <sys/linker_set.h>
#include <sys/lock.h>
-#include <sys/linker.h>
+#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/sdt.h>
#include <sys/dtrace.h>
-#include <sys/sdt.h>
+#include <sys/dtrace_bsd.h>
-#define SDT_ADDR2NDX(addr) (((uintptr_t)(addr)) >> 4)
-
-static d_open_t sdt_open;
-static int sdt_unload(void);
+/* DTrace methods. */
static void sdt_getargdesc(void *, dtrace_id_t, void *, dtrace_argdesc_t *);
static void sdt_provide_probes(void *, dtrace_probedesc_t *);
static void sdt_destroy(void *, dtrace_id_t, void *);
static void sdt_enable(void *, dtrace_id_t, void *);
static void sdt_disable(void *, dtrace_id_t, void *);
-static void sdt_load(void *);
-static int sdt_provider_unreg_callback(struct sdt_provider *prov,
- void *arg);
-static struct cdevsw sdt_cdevsw = {
- .d_version = D_VERSION,
- .d_open = sdt_open,
- .d_name = "sdt",
-};
+static void sdt_load(void);
+static int sdt_unload(void);
+static void sdt_create_provider(struct sdt_provider *);
+static void sdt_create_probe(struct sdt_probe *);
+static void sdt_kld_load(void *, struct linker_file *);
+static void sdt_kld_unload_try(void *, struct linker_file *, int *);
+static MALLOC_DEFINE(M_SDT, "SDT", "DTrace SDT providers");
+
static dtrace_pattr_t sdt_attr = {
{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
@@ -80,175 +96,303 @@
sdt_getargdesc,
NULL,
NULL,
- sdt_destroy
+ sdt_destroy,
};
-static struct cdev *sdt_cdev;
+static TAILQ_HEAD(, sdt_provider) sdt_prov_list;
-static int
-sdt_argtype_callback(struct sdt_argtype *argtype, void *arg)
+static eventhandler_tag sdt_kld_load_tag;
+static eventhandler_tag sdt_kld_unload_try_tag;
+
+static void
+sdt_create_provider(struct sdt_provider *prov)
{
- dtrace_argdesc_t *desc = arg;
+ struct sdt_provider *curr, *newprov;
- if (desc->dtargd_ndx == argtype->ndx) {
- desc->dtargd_mapping = desc->dtargd_ndx; /* XXX */
- strlcpy(desc->dtargd_native, argtype->type,
- sizeof(desc->dtargd_native));
- desc->dtargd_xlate[0] = '\0'; /* XXX */
- }
+ TAILQ_FOREACH(curr, &sdt_prov_list, prov_entry)
+ if (strcmp(prov->name, curr->name) == 0) {
+ /* The provider has already been defined. */
+ curr->sdt_refs++;
+ return;
+ }
- return (0);
+ /*
+ * Make a copy of prov so that we don't lose fields if its module is
+ * unloaded but the provider isn't destroyed. This could happen with
+ * a provider that spans multiple modules.
+ */
+ newprov = malloc(sizeof(*newprov), M_SDT, M_WAITOK | M_ZERO);
+ newprov->name = strdup(prov->name, M_SDT);
+ prov->sdt_refs = newprov->sdt_refs = 1;
+
+ TAILQ_INSERT_TAIL(&sdt_prov_list, newprov, prov_entry);
+
+ (void)dtrace_register(newprov->name, &sdt_attr, DTRACE_PRIV_USER, NULL,
+ &sdt_pops, NULL, (dtrace_provider_id_t *)&newprov->id);
+ prov->id = newprov->id;
}
static void
-sdt_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc)
+sdt_create_probe(struct sdt_probe *probe)
{
- struct sdt_probe *probe = parg;
+ struct sdt_provider *prov;
+ char mod[DTRACE_MODNAMELEN];
+ char func[DTRACE_FUNCNAMELEN];
+ char name[DTRACE_NAMELEN];
+ const char *from;
+ char *to;
+ size_t len;
- if (desc->dtargd_ndx < probe->n_args)
- (void) (sdt_argtype_listall(probe, sdt_argtype_callback, desc));
- else
- desc->dtargd_ndx = DTRACE_ARGNONE;
+ if (probe->version != (int)sizeof(*probe)) {
+ printf("ignoring probe %p, version %u expected %u\n",
+ probe, probe->version, (int)sizeof(*probe));
+ return;
+ }
- return;
-}
+ TAILQ_FOREACH(prov, &sdt_prov_list, prov_entry)
+ if (strcmp(prov->name, probe->prov->name) == 0)
+ break;
-static int
-sdt_probe_callback(struct sdt_probe *probe, void *arg __unused)
-{
- struct sdt_provider *prov = probe->prov;
- char mod[64];
- char func[64];
- char name[64];
+ KASSERT(prov != NULL, ("probe defined without a provider"));
+ /* If no module name was specified, use the module filename. */
+ if (*probe->mod == 0) {
+ len = strlcpy(mod, probe->sdtp_lf->filename, sizeof(mod));
+ if (len > 3 && strcmp(mod + len - 3, ".ko") == 0)
+ mod[len - 3] = '\0';
+ } else
+ strlcpy(mod, probe->mod, sizeof(mod));
+
/*
* Unfortunately this is necessary because the Solaris DTrace
* code mixes consts and non-consts with casts to override
* the incompatibilies. On FreeBSD, we use strict warnings
- * in gcc, so we have to respect const vs non-const.
+ * in the C compiler, so we have to respect const vs non-const.
*/
- strlcpy(mod, probe->mod, sizeof(mod));
strlcpy(func, probe->func, sizeof(func));
- strlcpy(name, probe->name, sizeof(name));
+ if (func[0] == '\0')
+ strcpy(func, "none");
- if (dtrace_probe_lookup(prov->id, mod, func, name) != 0)
- return (0);
+ from = probe->name;
+ to = name;
+ for (len = 0; len < (sizeof(name) - 1) && *from != '\0';
+ len++, from++, to++) {
+ if (from[0] == '_' && from[1] == '_') {
+ *to = '-';
+ from++;
+ } else
+ *to = *from;
+ }
+ *to = '\0';
- (void) dtrace_probe_create(prov->id, probe->mod, probe->func,
- probe->name, 1, probe);
+ if (dtrace_probe_lookup(prov->id, mod, func, name) != DTRACE_IDNONE)
+ return;
- return (0);
+ (void)dtrace_probe_create(prov->id, mod, func, name, 1, probe);
}
-static int
-sdt_provider_entry(struct sdt_provider *prov, void *arg)
+/*
+ * Probes are created through the SDT module load/unload hook, so this function
+ * has nothing to do. It only exists because the DTrace provider framework
+ * requires one of provide_probes and provide_module to be defined.
+ */
+static void
+sdt_provide_probes(void *arg, dtrace_probedesc_t *desc)
{
- return (sdt_probe_listall(prov, sdt_probe_callback, NULL));
}
static void
-sdt_provide_probes(void *arg, dtrace_probedesc_t *desc)
+sdt_enable(void *arg __unused, dtrace_id_t id, void *parg)
{
- if (desc != NULL)
+ struct sdt_probe *probe = parg;
+
+ probe->id = id;
+ probe->sdtp_lf->nenabled++;
+}
+
+static void
+sdt_disable(void *arg __unused, dtrace_id_t id, void *parg)
+{
+ struct sdt_probe *probe = parg;
+
+ KASSERT(probe->sdtp_lf->nenabled > 0, ("no probes enabled"));
+
+ probe->id = 0;
+ probe->sdtp_lf->nenabled--;
+}
+
+static void
+sdt_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc)
+{
+ struct sdt_argtype *argtype;
+ struct sdt_probe *probe = parg;
+
+ if (desc->dtargd_ndx >= probe->n_args) {
+ desc->dtargd_ndx = DTRACE_ARGNONE;
return;
+ }
- (void) sdt_provider_listall(sdt_provider_entry, NULL);
+ TAILQ_FOREACH(argtype, &probe->argtype_list, argtype_entry) {
+ if (desc->dtargd_ndx == argtype->ndx) {
+ desc->dtargd_mapping = desc->dtargd_ndx;
+ if (argtype->type == NULL) {
+ desc->dtargd_native[0] = '\0';
+ desc->dtargd_xlate[0] = '\0';
+ continue;
+ }
+ strlcpy(desc->dtargd_native, argtype->type,
+ sizeof(desc->dtargd_native));
+ if (argtype->xtype != NULL)
+ strlcpy(desc->dtargd_xlate, argtype->xtype,
+ sizeof(desc->dtargd_xlate));
+ }
+ }
}
static void
sdt_destroy(void *arg, dtrace_id_t id, void *parg)
{
- /* Nothing to do here. */
}
+/*
+ * Called from the kernel linker when a module is loaded, before
+ * dtrace_module_loaded() is called. This is done so that it's possible to
+ * register new providers when modules are loaded. The DTrace framework
+ * explicitly disallows calling into the framework from the provide_module
+ * provider method, so we cannot do this there.
+ */
static void
-sdt_enable(void *arg, dtrace_id_t id, void *parg)
+sdt_kld_load(void *arg __unused, struct linker_file *lf)
{
- struct sdt_probe *probe = parg;
+ struct sdt_provider **prov, **begin, **end;
+ struct sdt_probe **probe, **p_begin, **p_end;
+ struct sdt_argtype **argtype, **a_begin, **a_end;
- probe->id = id;
+ if (linker_file_lookup_set(lf, "sdt_providers_set", &begin, &end,
+ NULL) == 0) {
+ for (prov = begin; prov < end; prov++)
+ sdt_create_provider(*prov);
+ }
+
+ if (linker_file_lookup_set(lf, "sdt_probes_set", &p_begin, &p_end,
+ NULL) == 0) {
+ for (probe = p_begin; probe < p_end; probe++) {
+ (*probe)->sdtp_lf = lf;
+ sdt_create_probe(*probe);
+ TAILQ_INIT(&(*probe)->argtype_list);
+ }
+ }
+
+ if (linker_file_lookup_set(lf, "sdt_argtypes_set", &a_begin, &a_end,
+ NULL) == 0) {
+ for (argtype = a_begin; argtype < a_end; argtype++) {
+ (*argtype)->probe->n_args++;
+ TAILQ_INSERT_TAIL(&(*argtype)->probe->argtype_list,
+ *argtype, argtype_entry);
+ }
+ }
}
static void
-sdt_disable(void *arg, dtrace_id_t id, void *parg)
+sdt_kld_unload_try(void *arg __unused, struct linker_file *lf, int *error)
{
- struct sdt_probe *probe = parg;
+ struct sdt_provider *prov, **curr, **begin, **end, *tmp;
- probe->id = 0;
+ if (*error != 0)
+ /* We already have an error, so don't do anything. */
+ return;
+ else if (linker_file_lookup_set(lf, "sdt_providers_set", &begin, &end,
+ NULL))
+ /* No DTrace providers are declared in this file. */
+ return;
+
+ /*
+ * Go through all the providers declared in this linker file and
+ * unregister any that aren't declared in another loaded file.
+ */
+ for (curr = begin; curr < end; curr++) {
+ TAILQ_FOREACH_SAFE(prov, &sdt_prov_list, prov_entry, tmp) {
+ if (strcmp(prov->name, (*curr)->name) != 0)
+ continue;
+
+ if (prov->sdt_refs == 1) {
+ if (dtrace_unregister(prov->id) != 0) {
+ *error = 1;
+ return;
+ }
+ TAILQ_REMOVE(&sdt_prov_list, prov, prov_entry);
+ free(prov->name, M_SDT);
+ free(prov, M_SDT);
+ } else
+ prov->sdt_refs--;
+ break;
+ }
+ }
}
static int
-sdt_provider_reg_callback(struct sdt_provider *prov, void *arg __unused)
+sdt_linker_file_cb(linker_file_t lf, void *arg __unused)
{
- return (dtrace_register(prov->name, &sdt_attr, DTRACE_PRIV_USER,
- NULL, &sdt_pops, NULL, (dtrace_provider_id_t *) &prov->id));
+
+ sdt_kld_load(NULL, lf);
+
+ return (0);
}
static void
-sdt_load(void *dummy)
+sdt_load()
{
- /* Create the /dev/dtrace/sdt entry. */
- sdt_cdev = make_dev(&sdt_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
- "dtrace/sdt");
+ TAILQ_INIT(&sdt_prov_list);
+
sdt_probe_func = dtrace_probe;
- sdt_register_callbacks(sdt_provider_reg_callback, NULL,
- sdt_provider_unreg_callback, NULL, sdt_probe_callback, NULL);
-}
+ sdt_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, sdt_kld_load, NULL,
+ EVENTHANDLER_PRI_ANY);
+ sdt_kld_unload_try_tag = EVENTHANDLER_REGISTER(kld_unload_try,
+ sdt_kld_unload_try, NULL, EVENTHANDLER_PRI_ANY);
-static int
-sdt_provider_unreg_callback(struct sdt_provider *prov, void *arg __unused)
-{
- return (dtrace_unregister(prov->id));
+ /* Pick up probes from the kernel and already-loaded linker files. */
+ linker_file_foreach(sdt_linker_file_cb, NULL);
}
static int
sdt_unload()
{
- int error = 0;
+ struct sdt_provider *prov, *tmp;
+ int ret;
+ EVENTHANDLER_DEREGISTER(kld_load, sdt_kld_load_tag);
+ EVENTHANDLER_DEREGISTER(kld_unload_try, sdt_kld_unload_try_tag);
+
sdt_probe_func = sdt_probe_stub;
- sdt_deregister_callbacks();
-
- destroy_dev(sdt_cdev);
+ TAILQ_FOREACH_SAFE(prov, &sdt_prov_list, prov_entry, tmp) {
+ ret = dtrace_unregister(prov->id);
+ if (ret != 0)
+ return (ret);
+ TAILQ_REMOVE(&sdt_prov_list, prov, prov_entry);
+ free(prov->name, M_SDT);
+ free(prov, M_SDT);
+ }
- return (error);
+ return (0);
}
-/* ARGSUSED */
static int
sdt_modevent(module_t mod __unused, int type, void *data __unused)
{
- int error = 0;
switch (type) {
case MOD_LOAD:
- break;
-
case MOD_UNLOAD:
- break;
-
case MOD_SHUTDOWN:
- break;
-
+ return (0);
default:
- error = EOPNOTSUPP;
- break;
-
+ return (EOPNOTSUPP);
}
-
- return (error);
}
-/* ARGSUSED */
-static int
-sdt_open(struct cdev *dev __unused, int oflags __unused, int devtype __unused, struct thread *td __unused)
-{
- return (0);
-}
-
SYSINIT(sdt_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, sdt_load, NULL);
SYSUNINIT(sdt_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, sdt_unload, NULL);
@@ -255,4 +399,3 @@
DEV_MODULE(sdt, sdt_modevent, NULL);
MODULE_VERSION(sdt, 1);
MODULE_DEPEND(sdt, dtrace, 1, 1, 1);
-MODULE_DEPEND(sdt, opensolaris, 1, 1, 1);
Modified: trunk/sys/cddl/dev/systrace/systrace.c
===================================================================
--- trunk/sys/cddl/dev/systrace/systrace.c 2018-06-01 22:42:15 UTC (rev 10162)
+++ trunk/sys/cddl/dev/systrace/systrace.c 2018-06-01 22:42:24 UTC (rev 10163)
@@ -21,7 +21,7 @@
*
* Portions Copyright 2006-2008 John Birrell jb at freebsd.org
*
- * $FreeBSD: release/9.2.0/sys/cddl/dev/systrace/systrace.c 220437 2011-04-08 06:27:43Z art $
+ * $FreeBSD: stable/10/sys/cddl/dev/systrace/systrace.c 294368 2016-01-20 01:09:53Z jhb $
*
*/
@@ -61,24 +61,37 @@
#ifdef LINUX_SYSTRACE
#if defined(__amd64__)
-#include <amd64/linux32/linux.h>
-#include <amd64/linux32/linux32_proto.h>
-#include <amd64/linux32/linux32_syscalls.c>
-#include <amd64/linux32/linux32_systrace_args.c>
-#define MODNAME "linux32"
+#include <amd64/linux/linux.h>
+#include <amd64/linux/linux_proto.h>
+#include <amd64/linux/linux_syscalls.c>
+#include <amd64/linux/linux_systrace_args.c>
#elif defined(__i386__)
#include <i386/linux/linux.h>
#include <i386/linux/linux_proto.h>
#include <i386/linux/linux_syscalls.c>
#include <i386/linux/linux_systrace_args.c>
-#define MODNAME "linux"
#else
#error Only i386 and amd64 are supported.
#endif
+#define MODNAME "linux"
extern struct sysent linux_sysent[];
#define MAXSYSCALL LINUX_SYS_MAXSYSCALL
#define SYSCALLNAMES linux_syscallnames
#define SYSENT linux_sysent
+#elif defined(LINUX32_SYSTRACE)
+#if defined(__amd64__)
+#include <amd64/linux32/linux.h>
+#include <amd64/linux32/linux32_proto.h>
+#include <amd64/linux32/linux32_syscalls.c>
+#include <amd64/linux32/linux32_systrace_args.c>
+#else
+#error Only amd64 is supported.
+#endif
+#define MODNAME "linux32"
+extern struct sysent linux32_sysent[];
+#define MAXSYSCALL LINUX32_SYS_MAXSYSCALL
+#define SYSCALLNAMES linux32_syscallnames
+#define SYSENT linux32_sysent
#elif defined(FREEBSD32_SYSTRACE)
/*
* The syscall arguments are processed into a DTrace argument array
@@ -104,6 +117,7 @@
#define MAXSYSCALL SYS_MAXSYSCALL
#define SYSCALLNAMES syscallnames
#define SYSENT sysent
+#define NATIVE_ABI
#endif
#define PROVNAME "syscall"
@@ -133,7 +147,7 @@
static struct cdevsw systrace_cdevsw = {
.d_version = D_VERSION,
.d_open = systrace_open,
-#ifdef LINUX_SYSTRACE
+#ifndef NATIVE_ABI
.d_name = "systrace_" MODNAME,
#else
.d_name = "systrace",
@@ -169,7 +183,10 @@
static struct cdev *systrace_cdev;
static dtrace_provider_id_t systrace_id;
-#if !defined(LINUX_SYSTRACE)
+typedef void (*systrace_dtrace_probe_t)(dtrace_id_t, uintptr_t, uintptr_t,
+ uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+
+#ifdef NATIVE_ABI
/*
* Probe callback function.
*
@@ -181,6 +198,7 @@
systrace_probe(u_int32_t id, int sysnum, struct sysent *sysent, void *params,
int ret)
{
+ systrace_dtrace_probe_t probe;
int n_args = 0;
u_int64_t uargs[8];
@@ -212,8 +230,11 @@
}
/* Process the probe using the converted argments. */
- dtrace_probe(id, uargs[0], uargs[1], uargs[2], uargs[3], uargs[4]);
+ probe = (systrace_dtrace_probe_t)dtrace_probe;
+ probe(id, uargs[0], uargs[1], uargs[2], uargs[3], uargs[4], uargs[5],
+ uargs[6], uargs[7]);
}
+
#endif
static void
@@ -221,8 +242,12 @@
{
int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg);
- systrace_setargdesc(sysnum, desc->dtargd_ndx, desc->dtargd_native,
- sizeof(desc->dtargd_native));
+ if (SYSTRACE_ISENTRY((uintptr_t)parg))
+ systrace_entry_setargdesc(sysnum, desc->dtargd_ndx,
+ desc->dtargd_native, sizeof(desc->dtargd_native));
+ else
+ systrace_return_setargdesc(sysnum, desc->dtargd_ndx,
+ desc->dtargd_native, sizeof(desc->dtargd_native));
if (desc->dtargd_native[0] == '\0')
desc->dtargd_ndx = DTRACE_ARGNONE;
@@ -304,7 +329,7 @@
NULL, &systrace_pops, NULL, &systrace_id) != 0)
return;
-#if !defined(LINUX_SYSTRACE)
+#ifdef NATIVE_ABI
systrace_probe_func = systrace_probe;
#endif
}
@@ -318,7 +343,7 @@
if ((error = dtrace_unregister(systrace_id)) != 0)
return (error);
-#if !defined(LINUX_SYSTRACE)
+#ifdef NATIVE_ABI
systrace_probe_func = NULL;
#endif
@@ -360,6 +385,16 @@
SYSUNINIT(systrace_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, systrace_unload, NULL);
#ifdef LINUX_SYSTRACE
+DEV_MODULE(systrace_linux, systrace_modevent, NULL);
+MODULE_VERSION(systrace_linux, 1);
+#ifdef __amd64__
+MODULE_DEPEND(systrace_linux, linux64, 1, 1, 1);
+#else
+MODULE_DEPEND(systrace_linux, linux, 1, 1, 1);
+#endif
+MODULE_DEPEND(systrace_linux, dtrace, 1, 1, 1);
+MODULE_DEPEND(systrace_linux, opensolaris, 1, 1, 1);
+#elif defined(LINUX32_SYSTRACE)
DEV_MODULE(systrace_linux32, systrace_modevent, NULL);
MODULE_VERSION(systrace_linux32, 1);
MODULE_DEPEND(systrace_linux32, linux, 1, 1, 1);
More information about the Midnightbsd-cvs
mailing list