[Midnightbsd-cvs] src [9949] trunk/sys/kern: sync with freebsd
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Fri May 25 16:59:47 EDT 2018
Revision: 9949
http://svnweb.midnightbsd.org/src/?rev=9949
Author: laffer1
Date: 2018-05-25 16:59:46 -0400 (Fri, 25 May 2018)
Log Message:
-----------
sync with freebsd
Modified Paths:
--------------
trunk/sys/kern/kern_syscalls.c
trunk/sys/kern/kern_sysctl.c
trunk/sys/kern/kern_tc.c
trunk/sys/kern/kern_thr.c
trunk/sys/kern/kern_thread.c
Property Changed:
----------------
trunk/sys/kern/clock_if.m
trunk/sys/kern/genassym.sh
Index: trunk/sys/kern/clock_if.m
===================================================================
--- trunk/sys/kern/clock_if.m 2018-05-25 20:58:03 UTC (rev 9948)
+++ trunk/sys/kern/clock_if.m 2018-05-25 20:59:46 UTC (rev 9949)
Property changes on: trunk/sys/kern/clock_if.m
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Index: trunk/sys/kern/genassym.sh
===================================================================
--- trunk/sys/kern/genassym.sh 2018-05-25 20:58:03 UTC (rev 9948)
+++ trunk/sys/kern/genassym.sh 2018-05-25 20:59:46 UTC (rev 9949)
Property changes on: trunk/sys/kern/genassym.sh
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/kern/kern_syscalls.c
===================================================================
--- trunk/sys/kern/kern_syscalls.c 2018-05-25 20:58:03 UTC (rev 9948)
+++ trunk/sys/kern/kern_syscalls.c 2018-05-25 20:59:46 UTC (rev 9949)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*-
* Copyright (c) 1999 Assar Westerlund
* All rights reserved.
@@ -25,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/kern/kern_syscalls.c 214181 2010-10-21 20:31:50Z delphij $");
#include <sys/param.h>
#include <sys/kernel.h>
Modified: trunk/sys/kern/kern_sysctl.c
===================================================================
--- trunk/sys/kern/kern_sysctl.c 2018-05-25 20:58:03 UTC (rev 9948)
+++ trunk/sys/kern/kern_sysctl.c 2018-05-25 20:59:46 UTC (rev 9949)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*-
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
@@ -36,7 +37,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/kern/kern_sysctl.c 324749 2017-10-19 08:00:34Z avg $");
#include "opt_capsicum.h"
#include "opt_compat.h"
@@ -45,7 +46,7 @@
#include <sys/param.h>
#include <sys/fail.h>
#include <sys/systm.h>
-#include <sys/capability.h>
+#include <sys/capsicum.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/malloc.h>
@@ -142,6 +143,8 @@
struct sysctl_oid_list *parent = oidp->oid_parent;
struct sysctl_oid *p;
struct sysctl_oid *q;
+ int oid_number;
+ int timeout = 2;
/*
* First check if another oid with the same name already
@@ -158,37 +161,66 @@
return;
}
}
+ /* get current OID number */
+ oid_number = oidp->oid_number;
+
+#if (OID_AUTO >= 0)
+#error "OID_AUTO is expected to be a negative value"
+#endif
/*
- * If this oid has a number OID_AUTO, give it a number which
- * is greater than any current oid.
+ * Any negative OID number qualifies as OID_AUTO. Valid OID
+ * numbers should always be positive.
+ *
* NOTE: DO NOT change the starting value here, change it in
* <sys/sysctl.h>, and make sure it is at least 256 to
- * accomodate e.g. net.inet.raw as a static sysctl node.
+ * accommodate e.g. net.inet.raw as a static sysctl node.
*/
- if (oidp->oid_number == OID_AUTO) {
- static int newoid = CTL_AUTO_START;
+ if (oid_number < 0) {
+ static int newoid;
- oidp->oid_number = newoid++;
- if (newoid == 0x7fffffff)
- panic("out of oids");
+ /*
+ * By decrementing the next OID number we spend less
+ * time inserting the OIDs into a sorted list.
+ */
+ if (--newoid < CTL_AUTO_START)
+ newoid = 0x7fffffff;
+
+ oid_number = newoid;
}
-#if 0
- else if (oidp->oid_number >= CTL_AUTO_START) {
- /* do not panic; this happens when unregistering sysctl sets */
- printf("static sysctl oid too high: %d", oidp->oid_number);
- }
-#endif
/*
- * Insert the oid into the parent's list in order.
+ * Insert the OID into the parent's list sorted by OID number.
*/
+retry:
q = NULL;
SLIST_FOREACH(p, parent, oid_link) {
- if (oidp->oid_number < p->oid_number)
+ /* check if the current OID number is in use */
+ if (oid_number == p->oid_number) {
+ /* get the next valid OID number */
+ if (oid_number < CTL_AUTO_START ||
+ oid_number == 0x7fffffff) {
+ /* wraparound - restart */
+ oid_number = CTL_AUTO_START;
+ /* don't loop forever */
+ if (!timeout--)
+ panic("sysctl: Out of OID numbers\n");
+ goto retry;
+ } else {
+ oid_number++;
+ }
+ } else if (oid_number < p->oid_number)
break;
q = p;
}
- if (q)
+ /* check for non-auto OID number collision */
+ if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START &&
+ oid_number >= CTL_AUTO_START) {
+ printf("sysctl: OID number(%d) is already in use for '%s'\n",
+ oidp->oid_number, oidp->oid_name);
+ }
+ /* update the OID number, if any */
+ oidp->oid_number = oid_number;
+ if (q != NULL)
SLIST_INSERT_AFTER(q, oidp, oid_link);
else
SLIST_INSERT_HEAD(parent, oidp, oid_link);
@@ -195,6 +227,37 @@
}
void
+sysctl_register_disabled_oid(struct sysctl_oid *oidp)
+{
+
+ /*
+ * Mark the leaf as dormant if it's not to be immediately enabled.
+ * We do not disable nodes as they can be shared between modules
+ * and it is always safe to access a node.
+ */
+ KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0,
+ ("internal flag is set in oid_kind"));
+ if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
+ oidp->oid_kind |= CTLFLAG_DORMANT;
+ sysctl_register_oid(oidp);
+}
+
+void
+sysctl_enable_oid(struct sysctl_oid *oidp)
+{
+
+ SYSCTL_ASSERT_XLOCKED();
+ if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
+ KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0,
+ ("sysctl node is marked as dormant"));
+ return;
+ }
+ KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) != 0,
+ ("enabling already enabled sysctl oid"));
+ oidp->oid_kind &= ~CTLFLAG_DORMANT;
+}
+
+void
sysctl_unregister_oid(struct sysctl_oid *oidp)
{
struct sysctl_oid *p;
@@ -264,7 +327,7 @@
}
/*
* Restore deregistered entries, either from the end,
- * or from the place where error occured.
+ * or from the place where error occurred.
* e contains the entry that was not unregistered
*/
if (error)
@@ -398,7 +461,8 @@
if (oidp == NULL)
return(EINVAL);
if ((oidp->oid_kind & CTLFLAG_DYN) == 0) {
- printf("can't remove non-dynamic nodes!\n");
+ printf("Warning: can't remove non-dynamic nodes (%s)!\n",
+ oidp->oid_name);
return (EINVAL);
}
/*
@@ -412,8 +476,12 @@
if (oidp->oid_refcnt == 1) {
SLIST_FOREACH_SAFE(p,
SYSCTL_CHILDREN(oidp), oid_link, tmp) {
- if (!recurse)
+ if (!recurse) {
+ printf("Warning: failed attempt to "
+ "remove oid %s with child %s\n",
+ oidp->oid_name, p->oid_name);
return (ENOTEMPTY);
+ }
error = sysctl_remove_oid_locked(p, del,
recurse);
if (error)
@@ -732,7 +800,7 @@
*next = oidp->oid_number;
*oidpp = oidp;
- if (oidp->oid_kind & CTLFLAG_SKIP)
+ if ((oidp->oid_kind & (CTLFLAG_SKIP | CTLFLAG_DORMANT)) != 0)
continue;
if (!namelen) {
@@ -1384,6 +1452,8 @@
}
lsp = SYSCTL_CHILDREN(oid);
} else if (indx == namelen) {
+ if ((oid->oid_kind & CTLFLAG_DORMANT) != 0)
+ return (ENOENT);
*noid = oid;
if (nindx != NULL)
*nindx = indx;
@@ -1487,7 +1557,10 @@
#endif
oid->oid_running++;
SYSCTL_XUNLOCK();
-
+#ifdef VIMAGE
+ if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL)
+ arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1);
+#endif
if (!(oid->oid_kind & CTLFLAG_MPSAFE))
mtx_lock(&Giant);
error = oid->oid_handler(oid, arg1, arg2, req);
Modified: trunk/sys/kern/kern_tc.c
===================================================================
--- trunk/sys/kern/kern_tc.c 2018-05-25 20:58:03 UTC (rev 9948)
+++ trunk/sys/kern/kern_tc.c 2018-05-25 20:59:46 UTC (rev 9949)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*-
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
@@ -5,19 +6,30 @@
* can do whatever you want with this stuff. If we meet some day, and you think
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
+ *
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Julien Ridoux at the University
+ * of Melbourne under sponsorship from the FreeBSD Foundation.
*/
#include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/kern/kern_tc.c 302234 2016-06-27 21:50:30Z bdrewery $");
#include "opt_compat.h"
#include "opt_ntp.h"
+#include "opt_ffclock.h"
#include <sys/param.h>
#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/systm.h>
+#include <sys/timeffc.h>
#include <sys/timepps.h>
#include <sys/timetc.h>
#include <sys/timex.h>
@@ -107,6 +119,23 @@
SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
×tepwarnings, 0, "Log time steps");
+struct bintime bt_timethreshold;
+struct bintime bt_tickthreshold;
+sbintime_t sbt_timethreshold;
+sbintime_t sbt_tickthreshold;
+struct bintime tc_tick_bt;
+sbintime_t tc_tick_sbt;
+int tc_precexp;
+int tc_timepercentage = TC_DEFAULTPERC;
+TUNABLE_INT("kern.timecounter.alloweddeviation", &tc_timepercentage);
+static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
+SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
+ sysctl_kern_timecounter_adjprecision, "I",
+ "Allowed time interval deviation in percents");
+
+static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
+
static void tc_windup(void);
static void cpu_tick_calibrate(int);
@@ -115,6 +144,7 @@
static int
sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
{
+#ifndef __mips__
#ifdef SCTL_MASK32
int tv[2];
@@ -124,6 +154,7 @@
return SYSCTL_OUT(req, tv, sizeof(tv));
} else
#endif
+#endif
return SYSCTL_OUT(req, &boottime, sizeof(boottime));
}
@@ -167,7 +198,145 @@
* the comment in <sys/time.h> for a description of these 12 functions.
*/
+#ifdef FFCLOCK
void
+fbclock_binuptime(struct bintime *bt)
+{
+ struct timehands *th;
+ unsigned int gen;
+
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ *bt = th->th_offset;
+ bintime_addx(bt, th->th_scale * tc_delta(th));
+ } while (gen == 0 || gen != th->th_generation);
+}
+
+void
+fbclock_nanouptime(struct timespec *tsp)
+{
+ struct bintime bt;
+
+ fbclock_binuptime(&bt);
+ bintime2timespec(&bt, tsp);
+}
+
+void
+fbclock_microuptime(struct timeval *tvp)
+{
+ struct bintime bt;
+
+ fbclock_binuptime(&bt);
+ bintime2timeval(&bt, tvp);
+}
+
+void
+fbclock_bintime(struct bintime *bt)
+{
+
+ fbclock_binuptime(bt);
+ bintime_add(bt, &boottimebin);
+}
+
+void
+fbclock_nanotime(struct timespec *tsp)
+{
+ struct bintime bt;
+
+ fbclock_bintime(&bt);
+ bintime2timespec(&bt, tsp);
+}
+
+void
+fbclock_microtime(struct timeval *tvp)
+{
+ struct bintime bt;
+
+ fbclock_bintime(&bt);
+ bintime2timeval(&bt, tvp);
+}
+
+void
+fbclock_getbinuptime(struct bintime *bt)
+{
+ struct timehands *th;
+ unsigned int gen;
+
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ *bt = th->th_offset;
+ } while (gen == 0 || gen != th->th_generation);
+}
+
+void
+fbclock_getnanouptime(struct timespec *tsp)
+{
+ struct timehands *th;
+ unsigned int gen;
+
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ bintime2timespec(&th->th_offset, tsp);
+ } while (gen == 0 || gen != th->th_generation);
+}
+
+void
+fbclock_getmicrouptime(struct timeval *tvp)
+{
+ struct timehands *th;
+ unsigned int gen;
+
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ bintime2timeval(&th->th_offset, tvp);
+ } while (gen == 0 || gen != th->th_generation);
+}
+
+void
+fbclock_getbintime(struct bintime *bt)
+{
+ struct timehands *th;
+ unsigned int gen;
+
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ *bt = th->th_offset;
+ } while (gen == 0 || gen != th->th_generation);
+ bintime_add(bt, &boottimebin);
+}
+
+void
+fbclock_getnanotime(struct timespec *tsp)
+{
+ struct timehands *th;
+ unsigned int gen;
+
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ *tsp = th->th_nanotime;
+ } while (gen == 0 || gen != th->th_generation);
+}
+
+void
+fbclock_getmicrotime(struct timeval *tvp)
+{
+ struct timehands *th;
+ unsigned int gen;
+
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ *tvp = th->th_microtime;
+ } while (gen == 0 || gen != th->th_generation);
+}
+#else /* !FFCLOCK */
+void
binuptime(struct bintime *bt)
{
struct timehands *th;
@@ -303,8 +472,513 @@
*tvp = th->th_microtime;
} while (gen == 0 || gen != th->th_generation);
}
+#endif /* FFCLOCK */
+#ifdef FFCLOCK
/*
+ * Support for feed-forward synchronization algorithms. This is heavily inspired
+ * by the timehands mechanism but kept independent from it. *_windup() functions
+ * have some connection to avoid accessing the timecounter hardware more than
+ * necessary.
+ */
+
+/* Feed-forward clock estimates kept updated by the synchronization daemon. */
+struct ffclock_estimate ffclock_estimate;
+struct bintime ffclock_boottime; /* Feed-forward boot time estimate. */
+uint32_t ffclock_status; /* Feed-forward clock status. */
+int8_t ffclock_updated; /* New estimates are available. */
+struct mtx ffclock_mtx; /* Mutex on ffclock_estimate. */
+
+struct fftimehands {
+ struct ffclock_estimate cest;
+ struct bintime tick_time;
+ struct bintime tick_time_lerp;
+ ffcounter tick_ffcount;
+ uint64_t period_lerp;
+ volatile uint8_t gen;
+ struct fftimehands *next;
+};
+
+#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
+
+static struct fftimehands ffth[10];
+static struct fftimehands *volatile fftimehands = ffth;
+
+static void
+ffclock_init(void)
+{
+ struct fftimehands *cur;
+ struct fftimehands *last;
+
+ memset(ffth, 0, sizeof(ffth));
+
+ last = ffth + NUM_ELEMENTS(ffth) - 1;
+ for (cur = ffth; cur < last; cur++)
+ cur->next = cur + 1;
+ last->next = ffth;
+
+ ffclock_updated = 0;
+ ffclock_status = FFCLOCK_STA_UNSYNC;
+ mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
+}
+
+/*
+ * Reset the feed-forward clock estimates. Called from inittodr() to get things
+ * kick started and uses the timecounter nominal frequency as a first period
+ * estimate. Note: this function may be called several time just after boot.
+ * Note: this is the only function that sets the value of boot time for the
+ * monotonic (i.e. uptime) version of the feed-forward clock.
+ */
+void
+ffclock_reset_clock(struct timespec *ts)
+{
+ struct timecounter *tc;
+ struct ffclock_estimate cest;
+
+ tc = timehands->th_counter;
+ memset(&cest, 0, sizeof(struct ffclock_estimate));
+
+ timespec2bintime(ts, &ffclock_boottime);
+ timespec2bintime(ts, &(cest.update_time));
+ ffclock_read_counter(&cest.update_ffcount);
+ cest.leapsec_next = 0;
+ cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
+ cest.errb_abs = 0;
+ cest.errb_rate = 0;
+ cest.status = FFCLOCK_STA_UNSYNC;
+ cest.leapsec_total = 0;
+ cest.leapsec = 0;
+
+ mtx_lock(&ffclock_mtx);
+ bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
+ ffclock_updated = INT8_MAX;
+ mtx_unlock(&ffclock_mtx);
+
+ printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
+ (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
+ (unsigned long)ts->tv_nsec);
+}
+
+/*
+ * Sub-routine to convert a time interval measured in RAW counter units to time
+ * in seconds stored in bintime format.
+ * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
+ * larger than the max value of u_int (on 32 bit architecture). Loop to consume
+ * extra cycles.
+ */
+static void
+ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
+{
+ struct bintime bt2;
+ ffcounter delta, delta_max;
+
+ delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
+ bintime_clear(bt);
+ do {
+ if (ffdelta > delta_max)
+ delta = delta_max;
+ else
+ delta = ffdelta;
+ bt2.sec = 0;
+ bt2.frac = period;
+ bintime_mul(&bt2, (unsigned int)delta);
+ bintime_add(bt, &bt2);
+ ffdelta -= delta;
+ } while (ffdelta > 0);
+}
+
+/*
+ * Update the fftimehands.
+ * Push the tick ffcount and time(s) forward based on current clock estimate.
+ * The conversion from ffcounter to bintime relies on the difference clock
+ * principle, whose accuracy relies on computing small time intervals. If a new
+ * clock estimate has been passed by the synchronisation daemon, make it
+ * current, and compute the linear interpolation for monotonic time if needed.
+ */
+static void
+ffclock_windup(unsigned int delta)
+{
+ struct ffclock_estimate *cest;
+ struct fftimehands *ffth;
+ struct bintime bt, gap_lerp;
+ ffcounter ffdelta;
+ uint64_t frac;
+ unsigned int polling;
+ uint8_t forward_jump, ogen;
+
+ /*
+ * Pick the next timehand, copy current ffclock estimates and move tick
+ * times and counter forward.
+ */
+ forward_jump = 0;
+ ffth = fftimehands->next;
+ ogen = ffth->gen;
+ ffth->gen = 0;
+ cest = &ffth->cest;
+ bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
+ ffdelta = (ffcounter)delta;
+ ffth->period_lerp = fftimehands->period_lerp;
+
+ ffth->tick_time = fftimehands->tick_time;
+ ffclock_convert_delta(ffdelta, cest->period, &bt);
+ bintime_add(&ffth->tick_time, &bt);
+
+ ffth->tick_time_lerp = fftimehands->tick_time_lerp;
+ ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
+ bintime_add(&ffth->tick_time_lerp, &bt);
+
+ ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
+
+ /*
+ * Assess the status of the clock, if the last update is too old, it is
+ * likely the synchronisation daemon is dead and the clock is free
+ * running.
+ */
+ if (ffclock_updated == 0) {
+ ffdelta = ffth->tick_ffcount - cest->update_ffcount;
+ ffclock_convert_delta(ffdelta, cest->period, &bt);
+ if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
+ ffclock_status |= FFCLOCK_STA_UNSYNC;
+ }
+
+ /*
+ * If available, grab updated clock estimates and make them current.
+ * Recompute time at this tick using the updated estimates. The clock
+ * estimates passed the feed-forward synchronisation daemon may result
+ * in time conversion that is not monotonically increasing (just after
+ * the update). time_lerp is a particular linear interpolation over the
+ * synchronisation algo polling period that ensures monotonicity for the
+ * clock ids requesting it.
+ */
+ if (ffclock_updated > 0) {
+ bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
+ ffdelta = ffth->tick_ffcount - cest->update_ffcount;
+ ffth->tick_time = cest->update_time;
+ ffclock_convert_delta(ffdelta, cest->period, &bt);
+ bintime_add(&ffth->tick_time, &bt);
+
+ /* ffclock_reset sets ffclock_updated to INT8_MAX */
+ if (ffclock_updated == INT8_MAX)
+ ffth->tick_time_lerp = ffth->tick_time;
+
+ if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
+ forward_jump = 1;
+ else
+ forward_jump = 0;
+
+ bintime_clear(&gap_lerp);
+ if (forward_jump) {
+ gap_lerp = ffth->tick_time;
+ bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
+ } else {
+ gap_lerp = ffth->tick_time_lerp;
+ bintime_sub(&gap_lerp, &ffth->tick_time);
+ }
+
+ /*
+ * The reset from the RTC clock may be far from accurate, and
+ * reducing the gap between real time and interpolated time
+ * could take a very long time if the interpolated clock insists
+ * on strict monotonicity. The clock is reset under very strict
+ * conditions (kernel time is known to be wrong and
+ * synchronization daemon has been restarted recently.
+ * ffclock_boottime absorbs the jump to ensure boot time is
+ * correct and uptime functions stay consistent.
+ */
+ if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
+ ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
+ ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
+ if (forward_jump)
+ bintime_add(&ffclock_boottime, &gap_lerp);
+ else
+ bintime_sub(&ffclock_boottime, &gap_lerp);
+ ffth->tick_time_lerp = ffth->tick_time;
+ bintime_clear(&gap_lerp);
+ }
+
+ ffclock_status = cest->status;
+ ffth->period_lerp = cest->period;
+
+ /*
+ * Compute corrected period used for the linear interpolation of
+ * time. The rate of linear interpolation is capped to 5000PPM
+ * (5ms/s).
+ */
+ if (bintime_isset(&gap_lerp)) {
+ ffdelta = cest->update_ffcount;
+ ffdelta -= fftimehands->cest.update_ffcount;
+ ffclock_convert_delta(ffdelta, cest->period, &bt);
+ polling = bt.sec;
+ bt.sec = 0;
+ bt.frac = 5000000 * (uint64_t)18446744073LL;
+ bintime_mul(&bt, polling);
+ if (bintime_cmp(&gap_lerp, &bt, >))
+ gap_lerp = bt;
+
+ /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
+ frac = 0;
+ if (gap_lerp.sec > 0) {
+ frac -= 1;
+ frac /= ffdelta / gap_lerp.sec;
+ }
+ frac += gap_lerp.frac / ffdelta;
+
+ if (forward_jump)
+ ffth->period_lerp += frac;
+ else
+ ffth->period_lerp -= frac;
+ }
+
+ ffclock_updated = 0;
+ }
+ if (++ogen == 0)
+ ogen = 1;
+ ffth->gen = ogen;
+ fftimehands = ffth;
+}
+
+/*
+ * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
+ * the old and new hardware counter cannot be read simultaneously. tc_windup()
+ * does read the two counters 'back to back', but a few cycles are effectively
+ * lost, and not accumulated in tick_ffcount. This is a fairly radical
+ * operation for a feed-forward synchronization daemon, and it is its job to not
+ * pushing irrelevant data to the kernel. Because there is no locking here,
+ * simply force to ignore pending or next update to give daemon a chance to
+ * realize the counter has changed.
+ */
+static void
+ffclock_change_tc(struct timehands *th)
+{
+ struct fftimehands *ffth;
+ struct ffclock_estimate *cest;
+ struct timecounter *tc;
+ uint8_t ogen;
+
+ tc = th->th_counter;
+ ffth = fftimehands->next;
+ ogen = ffth->gen;
+ ffth->gen = 0;
+
+ cest = &ffth->cest;
+ bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
+ cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
+ cest->errb_abs = 0;
+ cest->errb_rate = 0;
+ cest->status |= FFCLOCK_STA_UNSYNC;
+
+ ffth->tick_ffcount = fftimehands->tick_ffcount;
+ ffth->tick_time_lerp = fftimehands->tick_time_lerp;
+ ffth->tick_time = fftimehands->tick_time;
+ ffth->period_lerp = cest->period;
+
+ /* Do not lock but ignore next update from synchronization daemon. */
+ ffclock_updated--;
+
+ if (++ogen == 0)
+ ogen = 1;
+ ffth->gen = ogen;
+ fftimehands = ffth;
+}
+
+/*
+ * Retrieve feed-forward counter and time of last kernel tick.
+ */
+void
+ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
+{
+ struct fftimehands *ffth;
+ uint8_t gen;
+
+ /*
+ * No locking but check generation has not changed. Also need to make
+ * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
+ */
+ do {
+ ffth = fftimehands;
+ gen = ffth->gen;
+ if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
+ *bt = ffth->tick_time_lerp;
+ else
+ *bt = ffth->tick_time;
+ *ffcount = ffth->tick_ffcount;
+ } while (gen == 0 || gen != ffth->gen);
+}
+
+/*
+ * Absolute clock conversion. Low level function to convert ffcounter to
+ * bintime. The ffcounter is converted using the current ffclock period estimate
+ * or the "interpolated period" to ensure monotonicity.
+ * NOTE: this conversion may have been deferred, and the clock updated since the
+ * hardware counter has been read.
+ */
+void
+ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
+{
+ struct fftimehands *ffth;
+ struct bintime bt2;
+ ffcounter ffdelta;
+ uint8_t gen;
+
+ /*
+ * No locking but check generation has not changed. Also need to make
+ * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
+ */
+ do {
+ ffth = fftimehands;
+ gen = ffth->gen;
+ if (ffcount > ffth->tick_ffcount)
+ ffdelta = ffcount - ffth->tick_ffcount;
+ else
+ ffdelta = ffth->tick_ffcount - ffcount;
+
+ if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
+ *bt = ffth->tick_time_lerp;
+ ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
+ } else {
+ *bt = ffth->tick_time;
+ ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
+ }
+
+ if (ffcount > ffth->tick_ffcount)
+ bintime_add(bt, &bt2);
+ else
+ bintime_sub(bt, &bt2);
+ } while (gen == 0 || gen != ffth->gen);
+}
+
+/*
+ * Difference clock conversion.
+ * Low level function to Convert a time interval measured in RAW counter units
+ * into bintime. The difference clock allows measuring small intervals much more
+ * reliably than the absolute clock.
+ */
+void
+ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
+{
+ struct fftimehands *ffth;
+ uint8_t gen;
+
+ /* No locking but check generation has not changed. */
+ do {
+ ffth = fftimehands;
+ gen = ffth->gen;
+ ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
+ } while (gen == 0 || gen != ffth->gen);
+}
+
+/*
+ * Access to current ffcounter value.
+ */
+void
+ffclock_read_counter(ffcounter *ffcount)
+{
+ struct timehands *th;
+ struct fftimehands *ffth;
+ unsigned int gen, delta;
+
+ /*
+ * ffclock_windup() called from tc_windup(), safe to rely on
+ * th->th_generation only, for correct delta and ffcounter.
+ */
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ ffth = fftimehands;
+ delta = tc_delta(th);
+ *ffcount = ffth->tick_ffcount;
+ } while (gen == 0 || gen != th->th_generation);
+
+ *ffcount += delta;
+}
+
+void
+binuptime(struct bintime *bt)
+{
+
+ binuptime_fromclock(bt, sysclock_active);
+}
+
+void
+nanouptime(struct timespec *tsp)
+{
+
+ nanouptime_fromclock(tsp, sysclock_active);
+}
+
+void
+microuptime(struct timeval *tvp)
+{
+
+ microuptime_fromclock(tvp, sysclock_active);
+}
+
+void
+bintime(struct bintime *bt)
+{
+
+ bintime_fromclock(bt, sysclock_active);
+}
+
+void
+nanotime(struct timespec *tsp)
+{
+
+ nanotime_fromclock(tsp, sysclock_active);
+}
+
+void
+microtime(struct timeval *tvp)
+{
+
+ microtime_fromclock(tvp, sysclock_active);
+}
+
+void
+getbinuptime(struct bintime *bt)
+{
+
+ getbinuptime_fromclock(bt, sysclock_active);
+}
+
+void
+getnanouptime(struct timespec *tsp)
+{
+
+ getnanouptime_fromclock(tsp, sysclock_active);
+}
+
+void
+getmicrouptime(struct timeval *tvp)
+{
+
+ getmicrouptime_fromclock(tvp, sysclock_active);
+}
+
+void
+getbintime(struct bintime *bt)
+{
+
+ getbintime_fromclock(bt, sysclock_active);
+}
+
+void
+getnanotime(struct timespec *tsp)
+{
+
+ getnanotime_fromclock(tsp, sysclock_active);
+}
+
+void
+getmicrotime(struct timeval *tvp)
+{
+
+ getmicrouptime_fromclock(tvp, sysclock_active);
+}
+
+#endif /* FFCLOCK */
+
+/*
* This is a clone of getnanotime and used for walltimestamps.
* The dtrace_ prefix prevents fbt from creating probes for
* it so walltimestamp can be safely used in all fbt probes.
@@ -323,6 +997,146 @@
}
/*
+ * System clock currently providing time to the system. Modifiable via sysctl
+ * when the FFCLOCK option is defined.
+ */
+int sysclock_active = SYSCLOCK_FBCK;
+
+/* Internal NTP status and error estimates. */
+extern int time_status;
+extern long time_esterror;
+
+/*
+ * Take a snapshot of sysclock data which can be used to compare system clocks
+ * and generate timestamps after the fact.
+ */
+void
+sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
+{
+ struct fbclock_info *fbi;
+ struct timehands *th;
+ struct bintime bt;
+ unsigned int delta, gen;
+#ifdef FFCLOCK
+ ffcounter ffcount;
+ struct fftimehands *ffth;
+ struct ffclock_info *ffi;
+ struct ffclock_estimate cest;
+
+ ffi = &clock_snap->ff_info;
+#endif
+
+ fbi = &clock_snap->fb_info;
+ delta = 0;
+
+ do {
+ th = timehands;
+ gen = th->th_generation;
+ fbi->th_scale = th->th_scale;
+ fbi->tick_time = th->th_offset;
+#ifdef FFCLOCK
+ ffth = fftimehands;
+ ffi->tick_time = ffth->tick_time_lerp;
+ ffi->tick_time_lerp = ffth->tick_time_lerp;
+ ffi->period = ffth->cest.period;
+ ffi->period_lerp = ffth->period_lerp;
+ clock_snap->ffcount = ffth->tick_ffcount;
+ cest = ffth->cest;
+#endif
+ if (!fast)
+ delta = tc_delta(th);
+ } while (gen == 0 || gen != th->th_generation);
+
+ clock_snap->delta = delta;
+ clock_snap->sysclock_active = sysclock_active;
+
+ /* Record feedback clock status and error. */
+ clock_snap->fb_info.status = time_status;
+ /* XXX: Very crude estimate of feedback clock error. */
+ bt.sec = time_esterror / 1000000;
+ bt.frac = ((time_esterror - bt.sec) * 1000000) *
+ (uint64_t)18446744073709ULL;
+ clock_snap->fb_info.error = bt;
+
+#ifdef FFCLOCK
+ if (!fast)
+ clock_snap->ffcount += delta;
+
+ /* Record feed-forward clock leap second adjustment. */
+ ffi->leapsec_adjustment = cest.leapsec_total;
+ if (clock_snap->ffcount > cest.leapsec_next)
+ ffi->leapsec_adjustment -= cest.leapsec;
+
+ /* Record feed-forward clock status and error. */
+ clock_snap->ff_info.status = cest.status;
+ ffcount = clock_snap->ffcount - cest.update_ffcount;
+ ffclock_convert_delta(ffcount, cest.period, &bt);
+ /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
+ bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
+ /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
+ bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
+ clock_snap->ff_info.error = bt;
+#endif
+}
+
+/*
+ * Convert a sysclock snapshot into a struct bintime based on the specified
+ * clock source and flags.
+ */
+int
+sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
+ int whichclock, uint32_t flags)
+{
+#ifdef FFCLOCK
+ struct bintime bt2;
+ uint64_t period;
+#endif
+
+ switch (whichclock) {
+ case SYSCLOCK_FBCK:
+ *bt = cs->fb_info.tick_time;
+
+ /* If snapshot was created with !fast, delta will be >0. */
+ if (cs->delta > 0)
+ bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
+
+ if ((flags & FBCLOCK_UPTIME) == 0)
+ bintime_add(bt, &boottimebin);
+ break;
+#ifdef FFCLOCK
+ case SYSCLOCK_FFWD:
+ if (flags & FFCLOCK_LERP) {
+ *bt = cs->ff_info.tick_time_lerp;
+ period = cs->ff_info.period_lerp;
+ } else {
+ *bt = cs->ff_info.tick_time;
+ period = cs->ff_info.period;
+ }
+
+ /* If snapshot was created with !fast, delta will be >0. */
+ if (cs->delta > 0) {
+ ffclock_convert_delta(cs->delta, period, &bt2);
+ bintime_add(bt, &bt2);
+ }
+
+ /* Leap second adjustment. */
+ if (flags & FFCLOCK_LEAPSEC)
+ bt->sec -= cs->ff_info.leapsec_adjustment;
+
+ /* Boot time adjustment, for uptime/monotonic clocks. */
+ if (flags & FFCLOCK_UPTIME)
+ bintime_sub(bt, &ffclock_boottime);
+ break;
+#endif
+ default:
+ return (EINVAL);
+ break;
+ }
+
+ return (0);
+}
+
+/*
* Initialize a new timecounter and possibly use it.
*/
void
@@ -369,10 +1183,13 @@
"quality", CTLFLAG_RD, &(tc->tc_quality), 0,
"goodness of time counter");
/*
- * Never automatically use a timecounter with negative quality.
+ * Do not automatically switch if the current tc was specifically
+ * chosen. Never automatically use a timecounter with negative quality.
* Even though we run on the dummy counter, switching here may be
- * worse since this timecounter may not be monotonous.
+ * worse since this timecounter may not be monotonic.
*/
+ if (tc_chosen)
+ return;
if (tc->tc_quality < 0)
return;
if (tc->tc_quality < timecounter->tc_quality)
@@ -462,6 +1279,9 @@
ncount = timecounter->tc_get_timecount(timecounter);
else
ncount = 0;
+#ifdef FFCLOCK
+ ffclock_windup(delta);
+#endif
th->th_offset_count += delta;
th->th_offset_count &= th->th_counter->tc_counter_mask;
while (delta > th->th_counter->tc_frequency) {
@@ -516,14 +1336,17 @@
if (th->th_counter != timecounter) {
#ifndef __arm__
if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
- cpu_disable_deep_sleep++;
+ cpu_disable_c2_sleep++;
if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
- cpu_disable_deep_sleep--;
+ cpu_disable_c2_sleep--;
#endif
th->th_counter = timecounter;
th->th_offset_count = ncount;
tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
(((uint64_t)timecounter->tc_counter_mask + 1) / 3));
+#ifdef FFCLOCK
+ ffclock_change_tc(th);
+#endif
}
/*-
@@ -563,8 +1386,21 @@
th->th_generation = ogen;
/* Go live with the new struct timehands. */
- time_second = th->th_microtime.tv_sec;
- time_uptime = th->th_offset.sec;
+#ifdef FFCLOCK
+ switch (sysclock_active) {
+ case SYSCLOCK_FBCK:
+#endif
+ time_second = th->th_microtime.tv_sec;
+ time_uptime = th->th_offset.sec;
+#ifdef FFCLOCK
+ break;
+ case SYSCLOCK_FFWD:
+ time_second = fftimehands->tick_time_lerp.sec;
+ time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
+ break;
+ }
+#endif
+
timehands = th;
timekeep_push_vdso();
}
@@ -581,9 +1417,12 @@
strlcpy(newname, tc->tc_name, sizeof(newname));
error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
- if (error != 0 || req->newptr == NULL ||
- strcmp(newname, tc->tc_name) == 0)
+ if (error != 0 || req->newptr == NULL)
return (error);
+ /* Record that the tc in use now was specifically chosen. */
+ tc_chosen = 1;
+ if (strcmp(newname, tc->tc_name) == 0)
+ return (0);
for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
if (strcmp(newname, newtc->tc_name) != 0)
continue;
@@ -604,7 +1443,7 @@
"Timecounter hardware selected");
-/* Report or change the active timecounter hardware. */
+/* Report the available timecounter hardware. */
static int
sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
{
@@ -630,11 +1469,83 @@
* RFC 2783 PPS-API implementation.
*/
+/*
+ * Return true if the driver is aware of the abi version extensions in the
+ * pps_state structure, and it supports at least the given abi version number.
+ */
+static inline int
+abi_aware(struct pps_state *pps, int vers)
+{
+
+ return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
+}
+
+static int
+pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
+{
+ int err, timo;
+ pps_seq_t aseq, cseq;
+ struct timeval tv;
+
+ if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
+ return (EINVAL);
+
+ /*
+ * If no timeout is requested, immediately return whatever values were
+ * most recently captured. If timeout seconds is -1, that's a request
+ * to block without a timeout. WITNESS won't let us sleep forever
+ * without a lock (we really don't need a lock), so just repeatedly
+ * sleep a long time.
+ */
+ if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
+ if (fapi->timeout.tv_sec == -1)
+ timo = 0x7fffffff;
+ else {
+ tv.tv_sec = fapi->timeout.tv_sec;
+ tv.tv_usec = fapi->timeout.tv_nsec / 1000;
+ timo = tvtohz(&tv);
+ }
+ aseq = pps->ppsinfo.assert_sequence;
+ cseq = pps->ppsinfo.clear_sequence;
+ while (aseq == pps->ppsinfo.assert_sequence &&
+ cseq == pps->ppsinfo.clear_sequence) {
+ if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
+ if (pps->flags & PPSFLAG_MTX_SPIN) {
+ err = msleep_spin(pps, pps->driver_mtx,
+ "ppsfch", timo);
+ } else {
+ err = msleep(pps, pps->driver_mtx, PCATCH,
+ "ppsfch", timo);
+ }
+ } else {
+ err = tsleep(pps, PCATCH, "ppsfch", timo);
+ }
+ if (err == EWOULDBLOCK) {
+ if (fapi->timeout.tv_sec == -1) {
+ continue;
+ } else {
+ return (ETIMEDOUT);
+ }
+ } else if (err != 0) {
+ return (err);
+ }
+ }
+ }
+
+ pps->ppsinfo.current_mode = pps->ppsparam.mode;
+ fapi->pps_info_buf = pps->ppsinfo;
+
+ return (0);
+}
+
int
pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
{
pps_params_t *app;
struct pps_fetch_args *fapi;
+#ifdef FFCLOCK
+ struct pps_fetch_ffc_args *fapi_ffc;
+#endif
#ifdef PPS_SYNC
struct pps_kcbind_args *kapi;
#endif
@@ -649,6 +1560,11 @@
app = (pps_params_t *)data;
if (app->mode & ~pps->ppscap)
return (EINVAL);
+#ifdef FFCLOCK
+ /* Ensure only a single clock is selected for ffc timestamp. */
+ if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
+ return (EINVAL);
+#endif
pps->ppsparam = *app;
return (0);
case PPS_IOC_GETPARAMS:
@@ -661,13 +1577,32 @@
return (0);
case PPS_IOC_FETCH:
fapi = (struct pps_fetch_args *)data;
- if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
+ return (pps_fetch(fapi, pps));
+#ifdef FFCLOCK
+ case PPS_IOC_FETCH_FFCOUNTER:
+ fapi_ffc = (struct pps_fetch_ffc_args *)data;
+ if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
+ PPS_TSFMT_TSPEC)
return (EINVAL);
- if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
+ if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
return (EOPNOTSUPP);
- pps->ppsinfo.current_mode = pps->ppsparam.mode;
- fapi->pps_info_buf = pps->ppsinfo;
+ pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
+ fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
+ /* Overwrite timestamps if feedback clock selected. */
+ switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
+ case PPS_TSCLK_FBCK:
+ fapi_ffc->pps_info_buf_ffc.assert_timestamp =
+ pps->ppsinfo.assert_timestamp;
+ fapi_ffc->pps_info_buf_ffc.clear_timestamp =
+ pps->ppsinfo.clear_timestamp;
+ break;
+ case PPS_TSCLK_FFWD:
+ break;
+ default:
+ break;
+ }
return (0);
+#endif /* FFCLOCK */
case PPS_IOC_KCBIND:
#ifdef PPS_SYNC
kapi = (struct pps_kcbind_args *)data;
@@ -678,7 +1613,8 @@
return (EINVAL);
if (kapi->edge & ~pps->ppscap)
return (EINVAL);
- pps->kcmode = kapi->edge;
+ pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
+ (pps->kcmode & KCMODE_ABIFLAG);
return (0);
#else
return (EOPNOTSUPP);
@@ -691,14 +1627,29 @@
void
pps_init(struct pps_state *pps)
{
- pps->ppscap |= PPS_TSFMT_TSPEC;
+ pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
if (pps->ppscap & PPS_CAPTUREASSERT)
pps->ppscap |= PPS_OFFSETASSERT;
if (pps->ppscap & PPS_CAPTURECLEAR)
pps->ppscap |= PPS_OFFSETCLEAR;
+#ifdef FFCLOCK
+ pps->ppscap |= PPS_TSCLK_MASK;
+#endif
+ pps->kcmode &= ~KCMODE_ABIFLAG;
}
void
+pps_init_abi(struct pps_state *pps)
+{
+
+ pps_init(pps);
+ if (pps->driver_abi > 0) {
+ pps->kcmode |= KCMODE_ABIFLAG;
+ pps->kernel_abi = PPS_ABI_VERSION;
+ }
+}
+
+void
pps_capture(struct pps_state *pps)
{
struct timehands *th;
@@ -707,6 +1658,9 @@
th = timehands;
pps->capgen = th->th_generation;
pps->capth = th;
+#ifdef FFCLOCK
+ pps->capffth = fftimehands;
+#endif
pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
if (pps->capgen != th->th_generation)
pps->capgen = 0;
@@ -720,8 +1674,16 @@
u_int tcount, *pcount;
int foff, fhard;
pps_seq_t *pseq;
+#ifdef FFCLOCK
+ struct timespec *tsp_ffc;
+ pps_seq_t *pseq_ffc;
+ ffcounter *ffcount;
+#endif
KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
+ /* Nothing to do if not currently set to capture this event type. */
+ if ((event & pps->ppsparam.mode) == 0)
+ return;
/* If the timecounter was wound up underneath us, bail out. */
if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
return;
@@ -734,6 +1696,11 @@
fhard = pps->kcmode & PPS_CAPTUREASSERT;
pcount = &pps->ppscount[0];
pseq = &pps->ppsinfo.assert_sequence;
+#ifdef FFCLOCK
+ ffcount = &pps->ppsinfo_ffc.assert_ffcount;
+ tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
+ pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
+#endif
} else {
tsp = &pps->ppsinfo.clear_timestamp;
osp = &pps->ppsparam.clear_offset;
@@ -741,6 +1708,11 @@
fhard = pps->kcmode & PPS_CAPTURECLEAR;
pcount = &pps->ppscount[1];
pseq = &pps->ppsinfo.clear_sequence;
+#ifdef FFCLOCK
+ ffcount = &pps->ppsinfo_ffc.clear_ffcount;
+ tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
+ pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
+#endif
}
/*
@@ -777,6 +1749,17 @@
tsp->tv_sec -= 1;
}
}
+
+#ifdef FFCLOCK
+ *ffcount = pps->capffth->tick_ffcount + tcount;
+ bt = pps->capffth->tick_time;
+ ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
+ bintime_add(&bt, &pps->capffth->tick_time);
+ bintime2timespec(&bt, &ts);
+ (*pseq_ffc)++;
+ *tsp_ffc = ts;
+#endif
+
#ifdef PPS_SYNC
if (fhard) {
uint64_t scale;
@@ -799,6 +1782,9 @@
hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
}
#endif
+
+ /* Wakeup anyone sleeping in pps_fetch(). */
+ wakeup(pps);
}
/*
@@ -824,16 +1810,53 @@
tc_windup();
}
+static void __inline
+tc_adjprecision(void)
+{
+ int t;
+
+ if (tc_timepercentage > 0) {
+ t = (99 + tc_timepercentage) / tc_timepercentage;
+ tc_precexp = fls(t + (t >> 1)) - 1;
+ FREQ2BT(hz / tc_tick, &bt_timethreshold);
+ FREQ2BT(hz, &bt_tickthreshold);
+ bintime_shift(&bt_timethreshold, tc_precexp);
+ bintime_shift(&bt_tickthreshold, tc_precexp);
+ } else {
+ tc_precexp = 31;
+ bt_timethreshold.sec = INT_MAX;
+ bt_timethreshold.frac = ~(uint64_t)0;
+ bt_tickthreshold = bt_timethreshold;
+ }
+ sbt_timethreshold = bttosbt(bt_timethreshold);
+ sbt_tickthreshold = bttosbt(bt_tickthreshold);
+}
+
+static int
+sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
+{
+ int error, val;
+
+ val = tc_timepercentage;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ tc_timepercentage = val;
+ tc_adjprecision();
+ return (0);
+}
+
static void
inittimecounter(void *dummy)
{
u_int p;
+ int tick_rate;
/*
* Set the initial timeout to
* max(1, <approx. number of hardclock ticks in a millisecond>).
* People should probably not use the sysctl to set the timeout
- * to smaller than its inital value, since that value is the
+ * to smaller than its initial value, since that value is the
* smallest reasonable one. If they want better timestamps they
* should use the non-"get"* functions.
*/
@@ -841,9 +1864,18 @@
tc_tick = (hz + 500) / 1000;
else
tc_tick = 1;
+ tc_adjprecision();
+ FREQ2BT(hz, &tick_bt);
+ tick_sbt = bttosbt(tick_bt);
+ tick_rate = hz / tc_tick;
+ FREQ2BT(tick_rate, &tc_tick_bt);
+ tc_tick_sbt = bttosbt(tc_tick_bt);
p = (tc_tick * 1000000) / hz;
printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
+#ifdef FFCLOCK
+ ffclock_init();
+#endif
/* warm up new timecounter (again) and get rolling. */
(void)timecounter->tc_get_timecount(timecounter);
(void)timecounter->tc_get_timecount(timecounter);
@@ -857,20 +1889,27 @@
static int cpu_tick_variable;
static uint64_t cpu_tick_frequency;
+static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
+static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
+
static uint64_t
tc_cpu_ticks(void)
{
- static uint64_t base;
- static unsigned last;
- unsigned u;
struct timecounter *tc;
+ uint64_t res, *base;
+ unsigned u, *last;
+ critical_enter();
+ base = DPCPU_PTR(tc_cpu_ticks_base);
+ last = DPCPU_PTR(tc_cpu_ticks_last);
tc = timehands->th_counter;
u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
- if (u < last)
- base += (uint64_t)tc->tc_counter_mask + 1;
- last = u;
- return (u + base);
+ if (u < *last)
+ *base += (uint64_t)tc->tc_counter_mask + 1;
+ *last = u;
+ res = u + *base;
+ critical_exit();
+ return (res);
}
void
Modified: trunk/sys/kern/kern_thr.c
===================================================================
--- trunk/sys/kern/kern_thr.c 2018-05-25 20:58:03 UTC (rev 9948)
+++ trunk/sys/kern/kern_thr.c 2018-05-25 20:59:46 UTC (rev 9949)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*-
* Copyright (c) 2003, Jeffrey Roberson <jeff at freebsd.org>
* All rights reserved.
@@ -25,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/kern/kern_thr.c 315949 2017-03-25 13:33:23Z badger $");
#include "opt_compat.h"
#include "opt_posix.h"
@@ -36,6 +37,7 @@
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/posix4.h>
+#include <sys/ptrace.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
@@ -63,11 +65,11 @@
static int max_threads_per_proc = 1500;
SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
- &max_threads_per_proc, 0, "Limit on threads per proc");
+ &max_threads_per_proc, 0, "Limit on threads per proc");
static int max_threads_hits;
SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
- &max_threads_hits, 0, "");
+ &max_threads_hits, 0, "kern.threads.max_threads_per_proc hit count");
#ifdef COMPAT_FREEBSD32
@@ -87,29 +89,39 @@
#define suword_lwpid suword
#endif
-static int create_thread(struct thread *td, mcontext_t *ctx,
- void (*start_func)(void *), void *arg,
- char *stack_base, size_t stack_size,
- char *tls_base,
- long *child_tid, long *parent_tid,
- int flags, struct rtprio *rtp);
-
/*
* System call interface.
*/
+
+struct thr_create_initthr_args {
+ ucontext_t ctx;
+ long *tid;
+};
+
+static int
+thr_create_initthr(struct thread *td, void *thunk)
+{
+ struct thr_create_initthr_args *args;
+
+ /* Copy out the child tid. */
+ args = thunk;
+ if (args->tid != NULL && suword_lwpid(args->tid, td->td_tid))
+ return (EFAULT);
+
+ return (set_mcontext(td, &args->ctx.uc_mcontext));
+}
+
int
sys_thr_create(struct thread *td, struct thr_create_args *uap)
/* ucontext_t *ctx, long *id, int flags */
{
- ucontext_t ctx;
+ struct thr_create_initthr_args args;
int error;
- if ((error = copyin(uap->ctx, &ctx, sizeof(ctx))))
+ if ((error = copyin(uap->ctx, &args.ctx, sizeof(args.ctx))))
return (error);
-
- error = create_thread(td, &ctx.uc_mcontext, NULL, NULL,
- NULL, 0, NULL, uap->id, NULL, uap->flags, NULL);
- return (error);
+ args.tid = uap->id;
+ return (thread_create(td, NULL, thr_create_initthr, &args));
}
int
@@ -127,6 +139,35 @@
return (kern_thr_new(td, ¶m));
}
+static int
+thr_new_initthr(struct thread *td, void *thunk)
+{
+ stack_t stack;
+ struct thr_param *param;
+
+ /*
+ * Here we copy out tid to two places, one for child and one
+ * for parent, because pthread can create a detached thread,
+ * if parent wants to safely access child tid, it has to provide
+ * its storage, because child thread may exit quickly and
+ * memory is freed before parent thread can access it.
+ */
+ param = thunk;
+ if ((param->child_tid != NULL &&
+ suword_lwpid(param->child_tid, td->td_tid)) ||
+ (param->parent_tid != NULL &&
+ suword_lwpid(param->parent_tid, td->td_tid)))
+ return (EFAULT);
+
+ /* Set up our machine context. */
+ stack.ss_sp = param->stack_base;
+ stack.ss_size = param->stack_size;
+ /* Set upcall address to user thread entry function. */
+ cpu_set_upcall_kse(td, param->start_func, param->arg, &stack);
+ /* Setup user TLS address and TLS pointer register. */
+ return (cpu_set_user_tls(td, param->tls_base));
+}
+
int
kern_thr_new(struct thread *td, struct thr_param *param)
{
@@ -140,22 +181,13 @@
return (error);
rtpp = &rtp;
}
- error = create_thread(td, NULL, param->start_func, param->arg,
- param->stack_base, param->stack_size, param->tls_base,
- param->child_tid, param->parent_tid, param->flags,
- rtpp);
- return (error);
+ return (thread_create(td, rtpp, thr_new_initthr, param));
}
-static int
-create_thread(struct thread *td, mcontext_t *ctx,
- void (*start_func)(void *), void *arg,
- char *stack_base, size_t stack_size,
- char *tls_base,
- long *child_tid, long *parent_tid,
- int flags, struct rtprio *rtp)
+int
+thread_create(struct thread *td, struct rtprio *rtp,
+ int (*initialize_thread)(struct thread *, void *), void *thunk)
{
- stack_t stack;
struct thread *newtd;
struct proc *p;
int error;
@@ -162,12 +194,6 @@
p = td->td_proc;
- /* Have race condition but it is cheap. */
- if (p->p_numthreads >= max_threads_per_proc) {
- ++max_threads_hits;
- return (EPROCLIM);
- }
-
if (rtp != NULL) {
switch(rtp->type) {
case RTP_PRIO_REALTIME:
@@ -195,64 +221,32 @@
#endif
/* Initialize our td */
- newtd = thread_alloc(0);
- if (newtd == NULL) {
- error = ENOMEM;
+ error = kern_thr_alloc(p, 0, &newtd);
+ if (error)
goto fail;
- }
cpu_set_upcall(newtd, td);
- /*
- * Try the copyout as soon as we allocate the td so we don't
- * have to tear things down in a failure case below.
- * Here we copy out tid to two places, one for child and one
- * for parent, because pthread can create a detached thread,
- * if parent wants to safely access child tid, it has to provide
- * its storage, because child thread may exit quickly and
- * memory is freed before parent thread can access it.
- */
- if ((child_tid != NULL &&
- suword_lwpid(child_tid, newtd->td_tid)) ||
- (parent_tid != NULL &&
- suword_lwpid(parent_tid, newtd->td_tid))) {
- thread_free(newtd);
- error = EFAULT;
- goto fail;
- }
-
bzero(&newtd->td_startzero,
__rangeof(struct thread, td_startzero, td_endzero));
+ newtd->td_su = NULL;
+ newtd->td_sleeptimo = 0;
bcopy(&td->td_startcopy, &newtd->td_startcopy,
__rangeof(struct thread, td_startcopy, td_endcopy));
newtd->td_proc = td->td_proc;
newtd->td_ucred = crhold(td->td_ucred);
+ newtd->td_dbg_sc_code = td->td_dbg_sc_code;
+ newtd->td_dbg_sc_narg = td->td_dbg_sc_narg;
- if (ctx != NULL) { /* old way to set user context */
- error = set_mcontext(newtd, ctx);
- if (error != 0) {
- thread_free(newtd);
- crfree(td->td_ucred);
- goto fail;
- }
- } else {
- /* Set up our machine context. */
- stack.ss_sp = stack_base;
- stack.ss_size = stack_size;
- /* Set upcall address to user thread entry function. */
- cpu_set_upcall_kse(newtd, start_func, arg, &stack);
- /* Setup user TLS address and TLS pointer register. */
- error = cpu_set_user_tls(newtd, tls_base);
- if (error != 0) {
- thread_free(newtd);
- crfree(td->td_ucred);
- goto fail;
- }
+ error = initialize_thread(newtd, thunk);
+ if (error != 0) {
+ thread_free(newtd);
+ crfree(td->td_ucred);
+ goto fail;
}
PROC_LOCK(td->td_proc);
td->td_proc->p_flag |= P_HADTHREADS;
- newtd->td_sigmask = td->td_sigmask;
thread_link(newtd, p);
bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
thread_lock(td);
@@ -261,6 +255,8 @@
thread_unlock(td);
if (P_SHOULDSTOP(p))
newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
+ if (p->p_ptevents & PTRACE_LWP)
+ newtd->td_dbgflags |= TDB_BORN;
PROC_UNLOCK(p);
tidhash_add(newtd);
@@ -281,9 +277,11 @@
fail:
#ifdef RACCT
- PROC_LOCK(p);
- racct_sub(p, RACCT_NTHR, 1);
- PROC_UNLOCK(p);
+ if (racct_enable) {
+ PROC_LOCK(p);
+ racct_sub(p, RACCT_NTHR, 1);
+ PROC_UNLOCK(p);
+ }
#endif
return (error);
}
@@ -304,10 +302,7 @@
sys_thr_exit(struct thread *td, struct thr_exit_args *uap)
/* long *state */
{
- struct proc *p;
- p = td->td_proc;
-
/* Signal userland that it can free the stack. */
if ((void *)uap->state != NULL) {
suword_lwpid(uap->state, 1);
@@ -314,27 +309,64 @@
kern_umtx_wake(td, uap->state, INT_MAX, 0);
}
- rw_wlock(&tidhash_lock);
+ return (kern_thr_exit(td));
+}
- PROC_LOCK(p);
+int
+kern_thr_exit(struct thread *td)
+{
+ struct proc *p;
+ p = td->td_proc;
+
/*
- * Shutting down last thread in the proc. This will actually
- * call exit() in the trampoline when it returns.
+ * If all of the threads in a process call this routine to
+ * exit (e.g. all threads call pthread_exit()), exactly one
+ * thread should return to the caller to terminate the process
+ * instead of the thread.
+ *
+ * Checking p_numthreads alone is not sufficient since threads
+ * might be committed to terminating while the PROC_LOCK is
+ * dropped in either ptracestop() or while removing this thread
+ * from the tidhash. Instead, the p_pendingexits field holds
+ * the count of threads in either of those states and a thread
+ * is considered the "last" thread if all of the other threads
+ * in a process are already terminating.
*/
- if (p->p_numthreads != 1) {
- racct_sub(p, RACCT_NTHR, 1);
- LIST_REMOVE(td, td_hash);
- rw_wunlock(&tidhash_lock);
- tdsigcleanup(td);
- PROC_SLOCK(p);
- thread_stopped(p);
- thread_exit();
- /* NOTREACHED */
+ PROC_LOCK(p);
+ if (p->p_numthreads == p->p_pendingexits + 1) {
+ /*
+ * Ignore attempts to shut down last thread in the
+ * proc. This will actually call _exit(2) in the
+ * usermode trampoline when it returns.
+ */
+ PROC_UNLOCK(p);
+ return (0);
}
+
+ p->p_pendingexits++;
+ td->td_dbgflags |= TDB_EXIT;
+ if (p->p_ptevents & PTRACE_LWP)
+ ptracestop(td, SIGTRAP, NULL);
PROC_UNLOCK(p);
- rw_wunlock(&tidhash_lock);
- return (0);
+ tidhash_remove(td);
+ PROC_LOCK(p);
+ p->p_pendingexits--;
+
+ /*
+ * The check above should prevent all other threads from this
+ * process from exiting while the PROC_LOCK is dropped, so
+ * there must be at least one other thread other than the
+ * current thread.
+ */
+ KASSERT(p->p_numthreads > 1, ("too few threads"));
+ racct_sub(p, RACCT_NTHR, 1);
+ tdsigcleanup(td);
+ umtx_thread_exit(td);
+ PROC_SLOCK(p);
+ thread_stopped(p);
+ thread_exit();
+ /* NOTREACHED */
}
int
@@ -538,8 +570,11 @@
error = 0;
name[0] = '\0';
if (uap->name != NULL) {
- error = copyinstr(uap->name, name, sizeof(name),
- NULL);
+ error = copyinstr(uap->name, name, sizeof(name), NULL);
+ if (error == ENAMETOOLONG) {
+ error = copyin(uap->name, name, sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+ }
if (error)
return (error);
}
@@ -554,3 +589,20 @@
PROC_UNLOCK(p);
return (error);
}
+
+int
+kern_thr_alloc(struct proc *p, int pages, struct thread **ntd)
+{
+
+ /* Have race condition but it is cheap. */
+ if (p->p_numthreads >= max_threads_per_proc) {
+ ++max_threads_hits;
+ return (EPROCLIM);
+ }
+
+ *ntd = thread_alloc(pages);
+ if (*ntd == NULL)
+ return (ENOMEM);
+
+ return (0);
+}
Modified: trunk/sys/kern/kern_thread.c
===================================================================
--- trunk/sys/kern/kern_thread.c 2018-05-25 20:58:03 UTC (rev 9948)
+++ trunk/sys/kern/kern_thread.c 2018-05-25 20:59:46 UTC (rev 9949)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*-
* Copyright (C) 2001 Julian Elischer <julian at freebsd.org>.
* All rights reserved.
@@ -31,7 +32,7 @@
#include "opt_hwpmc_hooks.h"
#include <sys/cdefs.h>
-__FBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/kern/kern_thread.c 315837 2017-03-23 08:02:29Z avg $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -46,6 +47,8 @@
#include <sys/sched.h>
#include <sys/sleepqueue.h>
#include <sys/selinfo.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysent.h>
#include <sys/turnstile.h>
#include <sys/ktr.h>
#include <sys/rwlock.h>
@@ -63,9 +66,8 @@
#include <sys/eventhandler.h>
SDT_PROVIDER_DECLARE(proc);
-SDT_PROBE_DEFINE(proc, , , lwp_exit, lwp-exit);
+SDT_PROBE_DEFINE(proc, , , lwp__exit);
-
/*
* thread related storage.
*/
@@ -76,6 +78,8 @@
MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
static void thread_zombie(struct thread *);
+static int thread_unsuspend_one(struct thread *td, struct proc *p,
+ bool boundary);
#define TID_BUFFER_SIZE 1024
@@ -207,11 +211,11 @@
td->td_sleepqueue = sleepq_alloc();
td->td_turnstile = turnstile_alloc();
td->td_rlqe = NULL;
- td->td_vp_reserv = 0;
EVENTHANDLER_INVOKE(thread_init, td);
td->td_sched = (struct td_sched *)&td[1];
umtx_thread_init(td);
td->td_kstack = 0;
+ td->td_sel = NULL;
return (0);
}
@@ -279,7 +283,7 @@
thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
thread_ctor, thread_dtor, thread_init, thread_fini,
- 16 - 1, 0);
+ 16 - 1, UMA_ZONE_NOFREE);
tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
rw_init(&tidhash_lock, "tidhash");
}
@@ -316,7 +320,7 @@
/*
* Don't even bother to lock if none at this instant,
- * we really don't care about the next instant..
+ * we really don't care about the next instant.
*/
if (!TAILQ_EMPTY(&zombie_threads)) {
mtx_lock_spin(&zombie_lock);
@@ -380,6 +384,7 @@
cpu_thread_free(td);
if (td->td_kstack != 0)
vm_thread_dispose(td);
+ callout_drain(&td->td_slpcallout);
uma_zfree(thread_zone, td);
}
@@ -411,18 +416,18 @@
KASSERT(p != NULL, ("thread exiting without a process"));
CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
(long)p->p_pid, td->td_name);
+ SDT_PROBE0(proc, , , lwp__exit);
KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
#ifdef AUDIT
AUDIT_SYSCALL_EXIT(0, td);
#endif
- umtx_thread_exit(td);
/*
* drop FPU & debug register state storage, or any other
* architecture specific resources that
* would not be on a new untouched process.
*/
- cpu_thread_exit(td); /* XXXSMP */
+ cpu_thread_exit(td);
/*
* The last thread is left attached to the process
@@ -434,6 +439,7 @@
*/
if (p->p_flag & P_HADTHREADS) {
if (p->p_numthreads > 1) {
+ atomic_add_int(&td->td_proc->p_exitthreads, 1);
thread_unlink(td);
td2 = FIRST_THREAD_IN_PROC(p);
sched_exit_thread(td2, td);
@@ -447,7 +453,7 @@
if (p->p_numthreads == p->p_suspcount) {
thread_lock(p->p_singlethread);
wakeup_swapper = thread_unsuspend_one(
- p->p_singlethread);
+ p->p_singlethread, p, false);
thread_unlock(p->p_singlethread);
if (wakeup_swapper)
kick_proc0();
@@ -454,7 +460,6 @@
}
}
- atomic_add_int(&td->td_proc->p_exitthreads, 1);
PCPU_SET(deadthread, td);
} else {
/*
@@ -472,6 +477,9 @@
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
#endif
PROC_UNLOCK(p);
+ PROC_STATLOCK(p);
+ thread_lock(td);
+ PROC_SUNLOCK(p);
/* Do the same timestamp bookkeeping that mi_switch() would do. */
new_switchtime = cpu_ticks();
@@ -486,9 +494,8 @@
td->td_ru.ru_nvcsw++;
ruxagg(p, td);
rucollect(&p->p_ru, &td->td_ru);
+ PROC_STATUNLOCK(p);
- thread_lock(td);
- PROC_SUNLOCK(p);
td->td_state = TDS_INACTIVE;
#ifdef WITNESS
witness_thread_exit(td);
@@ -509,19 +516,18 @@
struct thread *td;
mtx_assert(&Giant, MA_NOTOWNED);
- KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
+ KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
+ KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
td = FIRST_THREAD_IN_PROC(p);
/* Lock the last thread so we spin until it exits cpu_throw(). */
thread_lock(td);
thread_unlock(td);
- /* Wait for any remaining threads to exit cpu_throw(). */
- while (p->p_exitthreads)
- sched_relinquish(curthread);
lock_profile_thread_exit(td);
cpuset_rel(td->td_cpuset);
td->td_cpuset = NULL;
cpu_thread_clean(td);
crfree(td->td_ucred);
+ callout_drain(&td->td_slpcallout);
thread_reap(); /* check for zombie threads etc. */
}
@@ -547,24 +553,12 @@
LIST_INIT(&td->td_lprof[0]);
LIST_INIT(&td->td_lprof[1]);
sigqueue_init(&td->td_sigqueue, p);
- callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
+ callout_init(&td->td_slpcallout, 1);
TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
p->p_numthreads++;
}
/*
- * Convert a process with one thread to an unthreaded process.
- */
-void
-thread_unthread(struct thread *td)
-{
- struct proc *p = td->td_proc;
-
- KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
- p->p_flag &= ~P_HADTHREADS;
-}
-
-/*
* Called from:
* thread_exit()
*/
@@ -591,7 +585,7 @@
remaining = p->p_numthreads;
else if (mode == SINGLE_BOUNDARY)
remaining = p->p_numthreads - p->p_boundary_count;
- else if (mode == SINGLE_NO_EXIT)
+ else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
remaining = p->p_numthreads - p->p_suspcount;
else
panic("calc_remaining: wrong mode %d", mode);
@@ -598,6 +592,62 @@
return (remaining);
}
+static int
+remain_for_mode(int mode)
+{
+
+ return (mode == SINGLE_ALLPROC ? 0 : 1);
+}
+
+static int
+weed_inhib(int mode, struct thread *td2, struct proc *p)
+{
+ int wakeup_swapper;
+
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ PROC_SLOCK_ASSERT(p, MA_OWNED);
+ THREAD_LOCK_ASSERT(td2, MA_OWNED);
+
+ wakeup_swapper = 0;
+ switch (mode) {
+ case SINGLE_EXIT:
+ if (TD_IS_SUSPENDED(td2))
+ wakeup_swapper |= thread_unsuspend_one(td2, p, true);
+ if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
+ wakeup_swapper |= sleepq_abort(td2, EINTR);
+ break;
+ case SINGLE_BOUNDARY:
+ case SINGLE_NO_EXIT:
+ if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
+ wakeup_swapper |= thread_unsuspend_one(td2, p, false);
+ if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
+ wakeup_swapper |= sleepq_abort(td2, ERESTART);
+ break;
+ case SINGLE_ALLPROC:
+ /*
+ * ALLPROC suspend tries to avoid spurious EINTR for
+ * threads sleeping interruptable, by suspending the
+ * thread directly, similarly to sig_suspend_threads().
+ * Since such sleep is not performed at the user
+ * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
+ * is used to avoid immediate un-suspend.
+ */
+ if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
+ TDF_ALLPROCSUSP)) == 0)
+ wakeup_swapper |= thread_unsuspend_one(td2, p, false);
+ if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
+ if ((td2->td_flags & TDF_SBDRY) == 0) {
+ thread_suspend_one(td2);
+ td2->td_flags |= TDF_ALLPROCSUSP;
+ } else {
+ wakeup_swapper |= sleepq_abort(td2, ERESTART);
+ }
+ }
+ break;
+ }
+ return (wakeup_swapper);
+}
+
/*
* Enforce single-threading.
*
@@ -612,19 +662,29 @@
* any sleeping threads that are interruptable. (PCATCH).
*/
int
-thread_single(int mode)
+thread_single(struct proc *p, int mode)
{
struct thread *td;
struct thread *td2;
- struct proc *p;
int remaining, wakeup_swapper;
td = curthread;
- p = td->td_proc;
+ KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
+ mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
+ ("invalid mode %d", mode));
+ /*
+ * If allowing non-ALLPROC singlethreading for non-curproc
+ * callers, calc_remaining() and remain_for_mode() should be
+ * adjusted to also account for td->td_proc != p. For now
+ * this is not implemented because it is not used.
+ */
+ KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
+ (mode != SINGLE_ALLPROC && td->td_proc == p),
+ ("mode %d proc %p curproc %p", mode, p, td->td_proc));
mtx_assert(&Giant, MA_NOTOWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
- if ((p->p_flag & P_HADTHREADS) == 0)
+ if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
return (0);
/* Is someone already single threading? */
@@ -641,11 +701,13 @@
else
p->p_flag &= ~P_SINGLE_BOUNDARY;
}
+ if (mode == SINGLE_ALLPROC)
+ p->p_flag |= P_TOTAL_STOP;
p->p_flag |= P_STOPPED_SINGLE;
PROC_SLOCK(p);
p->p_singlethread = td;
remaining = calc_remaining(p, mode);
- while (remaining != 1) {
+ while (remaining != remain_for_mode(mode)) {
if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
goto stopme;
wakeup_swapper = 0;
@@ -655,45 +717,12 @@
thread_lock(td2);
td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
if (TD_IS_INHIBITED(td2)) {
- switch (mode) {
- case SINGLE_EXIT:
- if (TD_IS_SUSPENDED(td2))
- wakeup_swapper |=
- thread_unsuspend_one(td2);
- if (TD_ON_SLEEPQ(td2) &&
- (td2->td_flags & TDF_SINTR))
- wakeup_swapper |=
- sleepq_abort(td2, EINTR);
- break;
- case SINGLE_BOUNDARY:
- if (TD_IS_SUSPENDED(td2) &&
- !(td2->td_flags & TDF_BOUNDARY))
- wakeup_swapper |=
- thread_unsuspend_one(td2);
- if (TD_ON_SLEEPQ(td2) &&
- (td2->td_flags & TDF_SINTR))
- wakeup_swapper |=
- sleepq_abort(td2, ERESTART);
- break;
- case SINGLE_NO_EXIT:
- if (TD_IS_SUSPENDED(td2) &&
- !(td2->td_flags & TDF_BOUNDARY))
- wakeup_swapper |=
- thread_unsuspend_one(td2);
- if (TD_ON_SLEEPQ(td2) &&
- (td2->td_flags & TDF_SINTR))
- wakeup_swapper |=
- sleepq_abort(td2, ERESTART);
- break;
- default:
- break;
- }
- }
+ wakeup_swapper |= weed_inhib(mode, td2, p);
#ifdef SMP
- else if (TD_IS_RUNNING(td2) && td != td2) {
+ } else if (TD_IS_RUNNING(td2) && td != td2) {
forward_signal(td2);
+#endif
}
-#endif
thread_unlock(td2);
}
if (wakeup_swapper)
@@ -703,7 +732,7 @@
/*
* Maybe we suspended some threads.. was it enough?
*/
- if (remaining == 1)
+ if (remaining == remain_for_mode(mode))
break;
stopme:
@@ -711,24 +740,70 @@
* Wake us up when everyone else has suspended.
* In the mean time we suspend as well.
*/
- thread_suspend_switch(td);
+ thread_suspend_switch(td, p);
remaining = calc_remaining(p, mode);
}
if (mode == SINGLE_EXIT) {
/*
- * We have gotten rid of all the other threads and we
- * are about to either exit or exec. In either case,
- * we try our utmost to revert to being a non-threaded
- * process.
+ * Convert the process to an unthreaded process. The
+ * SINGLE_EXIT is called by exit1() or execve(), in
+ * both cases other threads must be retired.
*/
+ KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
p->p_singlethread = NULL;
- p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
- thread_unthread(td);
+ p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
+
+ /*
+ * Wait for any remaining threads to exit cpu_throw().
+ */
+ while (p->p_exitthreads != 0) {
+ PROC_SUNLOCK(p);
+ PROC_UNLOCK(p);
+ sched_relinquish(td);
+ PROC_LOCK(p);
+ PROC_SLOCK(p);
+ }
+ } else if (mode == SINGLE_BOUNDARY) {
+ /*
+ * Wait until all suspended threads are removed from
+ * the processors. The thread_suspend_check()
+ * increments p_boundary_count while it is still
+ * running, which makes it possible for the execve()
+ * to destroy vmspace while our other threads are
+ * still using the address space.
+ *
+ * We lock the thread, which is only allowed to
+ * succeed after context switch code finished using
+ * the address space.
+ */
+ FOREACH_THREAD_IN_PROC(p, td2) {
+ if (td2 == td)
+ continue;
+ thread_lock(td2);
+ KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
+ ("td %p not on boundary", td2));
+ KASSERT(TD_IS_SUSPENDED(td2),
+ ("td %p is not suspended", td2));
+ thread_unlock(td2);
+ }
}
PROC_SUNLOCK(p);
return (0);
}
+bool
+thread_suspend_check_needed(void)
+{
+ struct proc *p;
+ struct thread *td;
+
+ td = curthread;
+ p = td->td_proc;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
+ (td->td_dbgflags & TDB_SUSPEND) != 0));
+}
+
/*
* Called in from locations that can safely check to see
* whether we have to suspend or at least throttle for a
@@ -749,10 +824,10 @@
* P_SINGLE_EXIT | return_instead == 0| return_instead != 0
*---------------+--------------------+---------------------
* 0 | returns 0 | returns 0 or 1
- * | when ST ends | immediatly
+ * | when ST ends | immediately
*---------------+--------------------+---------------------
* 1 | thread exits | returns 1
- * | | immediatly
+ * | | immediately
* 0 = thread_exit() or suspension ok,
* other = return error instead of stopping the thread.
*
@@ -773,8 +848,7 @@
p = td->td_proc;
mtx_assert(&Giant, MA_NOTOWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
- while (P_SHOULDSTOP(p) ||
- ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
+ while (thread_suspend_check_needed()) {
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
KASSERT(p->p_singlethread != NULL,
("singlethread not set"));
@@ -781,8 +855,8 @@
/*
* The only suspension in action is a
* single-threading. Single threader need not stop.
- * XXX Should be safe to access unlocked
- * as it can only be set to be true by us.
+ * It is safe to access p->p_singlethread unlocked
+ * because it can only be set to our address by us.
*/
if (p->p_singlethread == td)
return (0); /* Exempt from stopping. */
@@ -796,11 +870,9 @@
return (ERESTART);
/*
- * Ignore suspend requests for stop signals if they
- * are deferred.
+ * Ignore suspend requests if they are deferred.
*/
- if (P_SHOULDSTOP(p) == P_STOPPED_SIG &&
- td->td_flags & TDF_SBDRY) {
+ if ((td->td_flags & TDF_SBDRY) != 0) {
KASSERT(return_instead,
("TDF_SBDRY set for unsafe thread_suspend_check"));
return (0);
@@ -813,12 +885,15 @@
*/
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
PROC_UNLOCK(p);
- tidhash_remove(td);
- PROC_LOCK(p);
- tdsigcleanup(td);
- PROC_SLOCK(p);
- thread_stopped(p);
- thread_exit();
+
+ /*
+ * Allow Linux emulation layer to do some work
+ * before thread suicide.
+ */
+ if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
+ (p->p_sysent->sv_thread_detach)(td);
+ kern_thr_exit(td);
+ panic("stopped thread did not exit");
}
PROC_SLOCK(p);
@@ -826,8 +901,8 @@
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount + 1) {
thread_lock(p->p_singlethread);
- wakeup_swapper =
- thread_unsuspend_one(p->p_singlethread);
+ wakeup_swapper = thread_unsuspend_one(
+ p->p_singlethread, p, false);
thread_unlock(p->p_singlethread);
if (wakeup_swapper)
kick_proc0();
@@ -846,25 +921,16 @@
}
PROC_SUNLOCK(p);
mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
- if (return_instead == 0)
- td->td_flags &= ~TDF_BOUNDARY;
thread_unlock(td);
PROC_LOCK(p);
- if (return_instead == 0) {
- PROC_SLOCK(p);
- p->p_boundary_count--;
- PROC_SUNLOCK(p);
- }
}
return (0);
}
void
-thread_suspend_switch(struct thread *td)
+thread_suspend_switch(struct thread *td, struct proc *p)
{
- struct proc *p;
- p = td->td_proc;
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
@@ -872,8 +938,10 @@
* We implement thread_suspend_one in stages here to avoid
* dropping the proc lock while the thread lock is owned.
*/
- thread_stopped(p);
- p->p_suspcount++;
+ if (p == td->td_proc) {
+ thread_stopped(p);
+ p->p_suspcount++;
+ }
PROC_UNLOCK(p);
thread_lock(td);
td->td_flags &= ~TDF_NEEDSUSPCHK;
@@ -891,8 +959,9 @@
void
thread_suspend_one(struct thread *td)
{
- struct proc *p = td->td_proc;
+ struct proc *p;
+ p = td->td_proc;
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
@@ -902,16 +971,22 @@
sched_sleep(td, 0);
}
-int
-thread_unsuspend_one(struct thread *td)
+static int
+thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
{
- struct proc *p = td->td_proc;
- PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
TD_CLR_SUSPENDED(td);
- p->p_suspcount--;
+ td->td_flags &= ~TDF_ALLPROCSUSP;
+ if (td->td_proc == p) {
+ PROC_SLOCK_ASSERT(p, MA_OWNED);
+ p->p_suspcount--;
+ if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
+ td->td_flags &= ~TDF_BOUNDARY;
+ p->p_boundary_count--;
+ }
+ }
return (setrunnable(td));
}
@@ -931,20 +1006,24 @@
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (TD_IS_SUSPENDED(td)) {
- wakeup_swapper |= thread_unsuspend_one(td);
+ wakeup_swapper |= thread_unsuspend_one(td, p,
+ true);
}
thread_unlock(td);
}
- } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
- (p->p_numthreads == p->p_suspcount)) {
+ } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
+ p->p_numthreads == p->p_suspcount) {
/*
* Stopping everything also did the job for the single
* threading request. Now we've downgraded to single-threaded,
* let it continue.
*/
- thread_lock(p->p_singlethread);
- wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
- thread_unlock(p->p_singlethread);
+ if (p->p_singlethread->td_proc == p) {
+ thread_lock(p->p_singlethread);
+ wakeup_swapper = thread_unsuspend_one(
+ p->p_singlethread, p, false);
+ thread_unlock(p->p_singlethread);
+ }
}
if (wakeup_swapper)
kick_proc0();
@@ -954,16 +1033,26 @@
* End the single threading mode..
*/
void
-thread_single_end(void)
+thread_single_end(struct proc *p, int mode)
{
struct thread *td;
- struct proc *p;
int wakeup_swapper;
- td = curthread;
- p = td->td_proc;
+ KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
+ mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
+ ("invalid mode %d", mode));
PROC_LOCK_ASSERT(p, MA_OWNED);
- p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
+ KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
+ (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
+ ("mode %d does not match P_TOTAL_STOP", mode));
+ KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
+ ("thread_single_end from other thread %p %p",
+ curthread, p->p_singlethread));
+ KASSERT(mode != SINGLE_BOUNDARY ||
+ (p->p_flag & P_SINGLE_BOUNDARY) != 0,
+ ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
+ p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
+ P_TOTAL_STOP);
PROC_SLOCK(p);
p->p_singlethread = NULL;
wakeup_swapper = 0;
@@ -973,15 +1062,18 @@
* on the process. The single threader must be allowed
* to continue however as this is a bad place to stop.
*/
- if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
+ if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (TD_IS_SUSPENDED(td)) {
- wakeup_swapper |= thread_unsuspend_one(td);
+ wakeup_swapper |= thread_unsuspend_one(td, p,
+ mode == SINGLE_BOUNDARY);
}
thread_unlock(td);
}
}
+ KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
+ ("inconsistent boundary count %d", p->p_boundary_count));
PROC_SUNLOCK(p);
if (wakeup_swapper)
kick_proc0();
More information about the Midnightbsd-cvs
mailing list