[Midnightbsd-cvs] src [9961] trunk/sys/kern: sync with freebsd 10-stable
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Sat May 26 10:32:34 EDT 2018
Revision: 9961
http://svnweb.midnightbsd.org/src/?rev=9961
Author: laffer1
Date: 2018-05-26 10:32:33 -0400 (Sat, 26 May 2018)
Log Message:
-----------
sync with freebsd 10-stable
Modified Paths:
--------------
trunk/sys/kern/kern_sx.c
trunk/sys/kern/kern_synch.c
Modified: trunk/sys/kern/kern_sx.c
===================================================================
--- trunk/sys/kern/kern_sx.c 2018-05-26 14:31:56 UTC (rev 9960)
+++ trunk/sys/kern/kern_sx.c 2018-05-26 14:32:33 UTC (rev 9961)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*-
* Copyright (c) 2007 Attilio Rao <attilio at freebsd.org>
* Copyright (c) 2001 Jason Evans <jasone at freebsd.org>
@@ -42,16 +43,20 @@
#include "opt_no_adaptive_sx.h"
#include <sys/cdefs.h>
-__FBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/kern/kern_sx.c 323870 2017-09-21 19:24:11Z marius $");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/sleepqueue.h>
#include <sys/sx.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
@@ -77,11 +82,6 @@
#define SQ_EXCLUSIVE_QUEUE 0
#define SQ_SHARED_QUEUE 1
-#ifdef ADAPTIVE_SX
-#define ASX_RETRIES 10
-#define ASX_LOOPS 10000
-#endif
-
/*
* Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We
* drop Giant anytime we have to sleep or if we adaptively spin.
@@ -116,15 +116,15 @@
#define sx_recurse lock_object.lo_data
#define sx_recursed(sx) ((sx)->sx_recurse != 0)
-static void assert_sx(struct lock_object *lock, int what);
+static void assert_sx(const struct lock_object *lock, int what);
#ifdef DDB
-static void db_show_sx(struct lock_object *lock);
+static void db_show_sx(const struct lock_object *lock);
#endif
-static void lock_sx(struct lock_object *lock, int how);
+static void lock_sx(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
-static int owner_sx(struct lock_object *lock, struct thread **owner);
+static int owner_sx(const struct lock_object *lock, struct thread **owner);
#endif
-static int unlock_sx(struct lock_object *lock);
+static uintptr_t unlock_sx(struct lock_object *lock);
struct lock_class lock_class_sx = {
.lc_name = "sx",
@@ -144,26 +144,61 @@
#define _sx_assert(sx, what, file, line)
#endif
+#ifdef ADAPTIVE_SX
+static u_int asx_retries = 10;
+static u_int asx_loops = 10000;
+static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
+SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
+SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
+
+static struct lock_delay_config sx_delay = {
+ .initial = 1000,
+ .step = 500,
+ .min = 100,
+ .max = 5000,
+};
+
+SYSCTL_INT(_debug_sx, OID_AUTO, delay_initial, CTLFLAG_RW, &sx_delay.initial,
+ 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, delay_step, CTLFLAG_RW, &sx_delay.step,
+ 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, delay_min, CTLFLAG_RW, &sx_delay.min,
+ 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
+ 0, "");
+
+static void
+sx_delay_sysinit(void *dummy)
+{
+
+ sx_delay.initial = mp_ncpus * 25;
+ sx_delay.step = (mp_ncpus * 25) / 2;
+ sx_delay.min = mp_ncpus * 5;
+ sx_delay.max = mp_ncpus * 25 * 10;
+}
+LOCK_DELAY_SYSINIT(sx_delay_sysinit);
+#endif
+
void
-assert_sx(struct lock_object *lock, int what)
+assert_sx(const struct lock_object *lock, int what)
{
- sx_assert((struct sx *)lock, what);
+ sx_assert((const struct sx *)lock, what);
}
void
-lock_sx(struct lock_object *lock, int how)
+lock_sx(struct lock_object *lock, uintptr_t how)
{
struct sx *sx;
sx = (struct sx *)lock;
if (how)
+ sx_slock(sx);
+ else
sx_xlock(sx);
- else
- sx_slock(sx);
}
-int
+uintptr_t
unlock_sx(struct lock_object *lock)
{
struct sx *sx;
@@ -172,18 +207,18 @@
sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
if (sx_xlocked(sx)) {
sx_xunlock(sx);
- return (1);
+ return (0);
} else {
sx_sunlock(sx);
- return (0);
+ return (1);
}
}
#ifdef KDTRACE_HOOKS
int
-owner_sx(struct lock_object *lock, struct thread **owner)
+owner_sx(const struct lock_object *lock, struct thread **owner)
{
- struct sx *sx = (struct sx *)lock;
+ const struct sx *sx = (const struct sx *)lock;
uintptr_t x = sx->sx_lock;
*owner = (struct thread *)SX_OWNER(x);
@@ -206,7 +241,7 @@
int flags;
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
- SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
+ SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
("%s: sx_lock not aligned for %s: %p", __func__, description,
&sx->sx_lock));
@@ -222,11 +257,13 @@
flags |= LO_RECURSABLE;
if (opts & SX_QUIET)
flags |= LO_QUIET;
+ if (opts & SX_NEW)
+ flags |= LO_NEW;
flags |= opts & SX_NOADAPTIVE;
+ lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
sx->sx_lock = SX_LOCK_UNLOCKED;
sx->sx_recurse = 0;
- lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
}
void
@@ -246,6 +283,9 @@
if (SCHEDULER_STOPPED())
return (0);
+ KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
+ ("sx_slock() by idle thread %p on sx %s @ %s:%d",
+ curthread, sx->lock_object.lo_name, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_slock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
@@ -260,7 +300,7 @@
}
int
-_sx_try_slock(struct sx *sx, const char *file, int line)
+sx_try_slock_(struct sx *sx, const char *file, int line)
{
uintptr_t x;
@@ -267,6 +307,10 @@
if (SCHEDULER_STOPPED())
return (1);
+ KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
+ ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
+ curthread, sx->lock_object.lo_name, file, line));
+
for (;;) {
x = sx->sx_lock;
KASSERT(x != SX_LOCK_DESTROYED,
@@ -276,6 +320,8 @@
if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
+ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE,
+ sx, 0, 0, file, line);
curthread->td_locks++;
return (1);
}
@@ -292,6 +338,9 @@
if (SCHEDULER_STOPPED())
return (0);
+ KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
+ ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
+ curthread, sx->lock_object.lo_name, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xlock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
@@ -308,7 +357,7 @@
}
int
-_sx_try_xlock(struct sx *sx, const char *file, int line)
+sx_try_xlock_(struct sx *sx, const char *file, int line)
{
int rval;
@@ -315,6 +364,9 @@
if (SCHEDULER_STOPPED())
return (1);
+ KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
+ ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
+ curthread, sx->lock_object.lo_name, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
@@ -330,6 +382,9 @@
if (rval) {
WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
+ if (!sx_recursed(sx))
+ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
+ sx, 0, 0, file, line);
curthread->td_locks++;
}
@@ -345,11 +400,11 @@
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_sunlock() of destroyed sx @ %s:%d", file, line));
_sx_assert(sx, SA_SLOCKED, file, line);
- curthread->td_locks--;
WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
__sx_sunlock(sx, file, line);
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
+ curthread->td_locks--;
}
void
@@ -361,7 +416,6 @@
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xunlock() of destroyed sx @ %s:%d", file, line));
_sx_assert(sx, SA_XLOCKED, file, line);
- curthread->td_locks--;
WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
line);
@@ -368,6 +422,7 @@
if (!sx_recursed(sx))
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
__sx_xunlock(sx, curthread, file, line);
+ curthread->td_locks--;
}
/*
@@ -376,7 +431,7 @@
* Return 1 if if the upgrade succeed, 0 otherwise.
*/
int
-_sx_try_upgrade(struct sx *sx, const char *file, int line)
+sx_try_upgrade_(struct sx *sx, const char *file, int line)
{
uintptr_t x;
int success;
@@ -409,7 +464,7 @@
* Downgrade an unrecursed exclusive lock into a single shared lock.
*/
void
-_sx_downgrade(struct sx *sx, const char *file, int line)
+sx_downgrade_(struct sx *sx, const char *file, int line)
{
uintptr_t x;
int wakeup_swapper;
@@ -493,15 +548,25 @@
int contested = 0;
#endif
int error = 0;
+#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ uintptr_t state;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
+ int64_t all_time = 0;
#endif
if (SCHEDULER_STOPPED())
return (0);
+#if defined(ADAPTIVE_SX)
+ lock_delay_arg_init(&lda, &sx_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
+
/* If we already hold an exclusive lock, then recurse. */
if (sx_xlocked(sx)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
@@ -518,10 +583,17 @@
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
- while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ all_time -= lockstat_nsecs(&sx->lock_object);
+ state = sx->sx_lock;
#endif
+ for (;;) {
+ if (sx->sx_lock == SX_LOCK_UNLOCKED &&
+ atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
+ break;
+#ifdef KDTRACE_HOOKS
+ lda.spin_cnt++;
+#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -543,20 +615,25 @@
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"",
+ sx->lock_object.lo_name);
GIANT_SAVE();
while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner)) {
- cpu_spinwait();
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- }
+ TD_IS_RUNNING(owner))
+ lock_delay(&lda);
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
continue;
}
- } else if (SX_SHARERS(x) && spintries < ASX_RETRIES) {
+ } else if (SX_SHARERS(x) && spintries < asx_retries) {
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE();
spintries++;
- for (i = 0; i < ASX_LOOPS; i++) {
+ for (i = 0; i < asx_loops; i++) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
"%s: shared spinning on %p with %u and %u",
@@ -567,10 +644,12 @@
break;
cpu_spinwait();
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ lda.spin_cnt++;
#endif
}
- if (i != ASX_LOOPS)
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
+ if (i != asx_loops)
continue;
}
}
@@ -654,7 +733,7 @@
__func__, sx);
#ifdef KDTRACE_HOOKS
- sleep_time -= lockstat_nsecs();
+ sleep_time -= lockstat_nsecs(&sx->lock_object);
#endif
GIANT_SAVE();
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
@@ -665,7 +744,7 @@
else
error = sleepq_wait_sig(&sx->lock_object, 0);
#ifdef KDTRACE_HOOKS
- sleep_time += lockstat_nsecs();
+ sleep_time += lockstat_nsecs(&sx->lock_object);
sleep_cnt++;
#endif
if (error) {
@@ -679,17 +758,21 @@
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
}
-
- GIANT_RESTORE();
+#ifdef KDTRACE_HOOKS
+ all_time += lockstat_nsecs(&sx->lock_object);
+ if (sleep_time)
+ LOCKSTAT_RECORD4(LS_SX_XLOCK_BLOCK, sx, sleep_time,
+ LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
+ (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
+ if (lda.spin_cnt > sleep_cnt)
+ LOCKSTAT_RECORD4(LS_SX_XLOCK_SPIN, sx, all_time - sleep_time,
+ LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
+ (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
+#endif
if (!error)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
contested, waittime, file, line);
-#ifdef KDTRACE_HOOKS
- if (sleep_time)
- LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
- if (spin_cnt > sleep_cnt)
- LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
-#endif
+ GIANT_RESTORE();
return (error);
}
@@ -774,15 +857,29 @@
#endif
uintptr_t x;
int error = 0;
+#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ uintptr_t state;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
+ int64_t all_time = 0;
#endif
if (SCHEDULER_STOPPED())
return (0);
+#if defined(ADAPTIVE_SX)
+ lock_delay_arg_init(&lda, &sx_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
+#ifdef KDTRACE_HOOKS
+ state = sx->sx_lock;
+ all_time -= lockstat_nsecs(&sx->lock_object);
+#endif
+
/*
* As with rwlocks, we don't make any attempt to try to block
* shared locks once there is an exclusive waiter.
@@ -789,7 +886,7 @@
*/
for (;;) {
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ lda.spin_cnt++;
#endif
x = sx->sx_lock;
@@ -832,14 +929,15 @@
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE();
while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner)) {
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- cpu_spinwait();
- }
+ TD_IS_RUNNING(owner))
+ lock_delay(&lda);
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
continue;
}
}
@@ -902,7 +1000,7 @@
__func__, sx);
#ifdef KDTRACE_HOOKS
- sleep_time -= lockstat_nsecs();
+ sleep_time -= lockstat_nsecs(&sx->lock_object);
#endif
GIANT_SAVE();
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
@@ -913,7 +1011,7 @@
else
error = sleepq_wait_sig(&sx->lock_object, 0);
#ifdef KDTRACE_HOOKS
- sleep_time += lockstat_nsecs();
+ sleep_time += lockstat_nsecs(&sx->lock_object);
sleep_cnt++;
#endif
if (error) {
@@ -927,15 +1025,20 @@
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
}
+#ifdef KDTRACE_HOOKS
+ all_time += lockstat_nsecs(&sx->lock_object);
+ if (sleep_time)
+ LOCKSTAT_RECORD4(LS_SX_SLOCK_BLOCK, sx, sleep_time,
+ LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
+ (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
+ if (lda.spin_cnt > sleep_cnt)
+ LOCKSTAT_RECORD4(LS_SX_SLOCK_SPIN, sx, all_time - sleep_time,
+ LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
+ (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
+#endif
if (error == 0)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
contested, waittime, file, line);
-#ifdef KDTRACE_HOOKS
- if (sleep_time)
- LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
- if (spin_cnt > sleep_cnt)
- LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
-#endif
GIANT_RESTORE();
return (error);
}
@@ -1041,7 +1144,7 @@
* thread owns an slock.
*/
void
-_sx_assert(struct sx *sx, int what, const char *file, int line)
+_sx_assert(const struct sx *sx, int what, const char *file, int line)
{
#ifndef WITNESS
int slocked = 0;
@@ -1124,12 +1227,12 @@
#ifdef DDB
static void
-db_show_sx(struct lock_object *lock)
+db_show_sx(const struct lock_object *lock)
{
struct thread *td;
- struct sx *sx;
+ const struct sx *sx;
- sx = (struct sx *)lock;
+ sx = (const struct sx *)lock;
db_printf(" state: ");
if (sx->sx_lock == SX_LOCK_UNLOCKED)
Modified: trunk/sys/kern/kern_synch.c
===================================================================
--- trunk/sys/kern/kern_synch.c 2018-05-26 14:31:56 UTC (rev 9960)
+++ trunk/sys/kern/kern_synch.c 2018-05-26 14:32:33 UTC (rev 9961)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*-
* Copyright (c) 1982, 1986, 1990, 1991, 1993
* The Regents of the University of California. All rights reserved.
@@ -35,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/kern/kern_synch.c 316843 2017-04-14 14:45:44Z avg $");
#include "opt_kdtrace.h"
#include "opt_ktrace.h"
@@ -73,19 +74,12 @@
#include <vm/pmap.h>
#endif
-#define KTDSTATE(td) \
- (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
- ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
- ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \
- ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \
- ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
-
static void synch_setup(void *dummy);
SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
NULL);
int hogticks;
-static int pause_wchan;
+static uint8_t pause_wchan[MAXCPU];
static struct callout loadav_callout;
@@ -102,13 +96,12 @@
};
/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
-static int fscale __unused = FSCALE;
-SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
+SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, FSCALE, "");
static void loadav(void *arg);
SDT_PROVIDER_DECLARE(sched);
-SDT_PROBE_DEFINE(sched, , , preempt, preempt);
+SDT_PROBE_DEFINE(sched, , , preempt);
/*
* These probes reference Solaris features that are not implemented in FreeBSD.
@@ -115,14 +108,14 @@
* Create the probes anyway for compatibility with existing D scripts; they'll
* just never fire.
*/
-SDT_PROBE_DEFINE(sched, , , cpucaps_sleep, cpucaps-sleep);
-SDT_PROBE_DEFINE(sched, , , cpucaps_wakeup, cpucaps-wakeup);
-SDT_PROBE_DEFINE(sched, , , schedctl_nopreempt, schedctl-nopreempt);
-SDT_PROBE_DEFINE(sched, , , schedctl_preempt, schedctl-preempt);
-SDT_PROBE_DEFINE(sched, , , schedctl_yield, schedctl-yield);
+SDT_PROBE_DEFINE(sched, , , cpucaps__sleep);
+SDT_PROBE_DEFINE(sched, , , cpucaps__wakeup);
+SDT_PROBE_DEFINE(sched, , , schedctl__nopreempt);
+SDT_PROBE_DEFINE(sched, , , schedctl__preempt);
+SDT_PROBE_DEFINE(sched, , , schedctl__yield);
-void
-sleepinit(void)
+static void
+sleepinit(void *unused)
{
hogticks = (hz / 10) * 2; /* Default only. */
@@ -130,13 +123,19 @@
}
/*
+ * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure
+ * it is available.
+ */
+SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, 0);
+
+/*
* General sleep call. Suspends the current thread until a wakeup is
* performed on the specified identifier. The thread will then be made
- * runnable with the specified priority. Sleeps at most timo/hz seconds
- * (0 means no timeout). If pri includes PCATCH flag, signals are checked
- * before and after sleeping, else signals are not checked. Returns 0 if
+ * runnable with the specified priority. Sleeps at most sbt units of time
+ * (0 means no timeout). If pri includes the PCATCH flag, let signals
+ * interrupt the sleep, otherwise ignore them while sleeping. Returns 0 if
* awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
- * signal needs to be delivered, ERESTART is returned if the current system
+ * signal becomes pending, ERESTART is returned if the current system
* call should be restarted if possible, and EINTR is returned if the system
* call should be interrupted by the signal (return EINTR).
*
@@ -146,12 +145,13 @@
*/
int
_sleep(void *ident, struct lock_object *lock, int priority,
- const char *wmesg, int timo)
+ const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
{
struct thread *td;
struct proc *p;
struct lock_class *class;
- int catch, flags, lock_state, pri, rval;
+ uintptr_t lock_state;
+ int catch, pri, rval, sleepq_flags;
WITNESS_SAVE_DECL(lock_witness);
td = curthread;
@@ -162,7 +162,7 @@
#endif
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
"Sleeping on \"%s\"", wmesg);
- KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
+ KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL,
("sleeping without a lock"));
KASSERT(p != NULL, ("msleep1"));
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
@@ -190,22 +190,15 @@
catch = priority & PCATCH;
pri = priority & PRIMASK;
- /*
- * If we are already on a sleep queue, then remove us from that
- * sleep queue first. We have to do this to handle recursive
- * sleeps.
- */
- if (TD_ON_SLEEPQ(td))
- sleepq_remove(td, td->td_wchan);
+ KASSERT(!TD_ON_SLEEPQ(td), ("recursive sleep"));
- if (ident == &pause_wchan)
- flags = SLEEPQ_PAUSE;
+ if ((uint8_t *)ident >= &pause_wchan[0] &&
+ (uint8_t *)ident <= &pause_wchan[MAXCPU - 1])
+ sleepq_flags = SLEEPQ_PAUSE;
else
- flags = SLEEPQ_SLEEP;
+ sleepq_flags = SLEEPQ_SLEEP;
if (catch)
- flags |= SLEEPQ_INTERRUPTIBLE;
- if (priority & PBDRY)
- flags |= SLEEPQ_STOP_ON_BDRY;
+ sleepq_flags |= SLEEPQ_INTERRUPTIBLE;
sleepq_lock(ident);
CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
@@ -231,9 +224,9 @@
* stopped, then td will no longer be on a sleep queue upon
* return from cursig().
*/
- sleepq_add(ident, lock, wmesg, flags, 0);
- if (timo)
- sleepq_set_timeout(ident, timo);
+ sleepq_add(ident, lock, wmesg, sleepq_flags, 0);
+ if (sbt != 0)
+ sleepq_set_timeout_sbt(ident, sbt, pr, flags);
if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
sleepq_release(ident);
WITNESS_SAVE(lock, lock_witness);
@@ -240,9 +233,9 @@
lock_state = class->lc_unlock(lock);
sleepq_lock(ident);
}
- if (timo && catch)
+ if (sbt != 0 && catch)
rval = sleepq_timedwait_sig(ident, pri);
- else if (timo)
+ else if (sbt != 0)
rval = sleepq_timedwait(ident, pri);
else if (catch)
rval = sleepq_wait_sig(ident, pri);
@@ -263,7 +256,8 @@
}
int
-msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
+msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg,
+ sbintime_t sbt, sbintime_t pr, int flags)
{
struct thread *td;
struct proc *p;
@@ -301,8 +295,8 @@
* We put ourselves on the sleep queue and start our timeout.
*/
sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
- if (timo)
- sleepq_set_timeout(ident, timo);
+ if (sbt != 0)
+ sleepq_set_timeout_sbt(ident, sbt, pr, flags);
/*
* Can't call ktrace with any spin locks held so it can lock the
@@ -324,7 +318,7 @@
wmesg);
sleepq_lock(ident);
#endif
- if (timo)
+ if (sbt != 0)
rval = sleepq_timedwait(ident, 0);
else {
sleepq_wait(ident, 0);
@@ -348,28 +342,30 @@
* to a "timo" value of one.
*/
int
-pause(const char *wmesg, int timo)
+pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
{
- KASSERT(timo >= 0, ("pause: timo must be >= 0"));
+ KASSERT(sbt >= 0, ("pause: timeout must be >= 0"));
/* silently convert invalid timeouts */
- if (timo < 1)
- timo = 1;
+ if (sbt == 0)
+ sbt = tick_sbt;
- if (cold) {
+ if (cold || kdb_active || SCHEDULER_STOPPED()) {
/*
- * We delay one HZ at a time to avoid overflowing the
+ * We delay one second at a time to avoid overflowing the
* system specific DELAY() function(s):
*/
- while (timo >= hz) {
+ while (sbt >= SBT_1S) {
DELAY(1000000);
- timo -= hz;
+ sbt -= SBT_1S;
}
- if (timo > 0)
- DELAY(timo * tick);
+ /* Do the delay remainder, if any */
+ sbt = (sbt + SBT_1US - 1) / SBT_1US;
+ if (sbt > 0)
+ DELAY(sbt);
return (0);
}
- return (tsleep(&pause_wchan, 0, wmesg, timo));
+ return (_sleep(&pause_wchan[curcpu], NULL, 0, wmesg, sbt, pr, flags));
}
/*
@@ -424,11 +420,9 @@
{
uint64_t runtime, new_switchtime;
struct thread *td;
- struct proc *p;
td = curthread; /* XXX */
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
- p = td->td_proc; /* XXX */
KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
#ifdef INVARIANTS
if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
@@ -468,26 +462,18 @@
PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
- td->td_tid, td->td_sched, p->p_pid, td->td_name);
-#if (KTR_COMPILE & KTR_SCHED) != 0
- if (TD_IS_IDLETHREAD(td))
- KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
- "prio:%d", td->td_priority);
- else
- KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
- "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
- "lockname:\"%s\"", td->td_lockname);
+ td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
+#ifdef KDTRACE_HOOKS
+ if ((flags & SW_PREEMPT) != 0 || ((flags & SW_INVOL) != 0 &&
+ (flags & SW_TYPE_MASK) == SWT_NEEDRESCHED))
+ SDT_PROBE0(sched, , , preempt);
#endif
- SDT_PROBE0(sched, , , preempt);
#ifdef XEN
PT_UPDATES_FLUSH();
#endif
sched_switch(td, newtd, flags);
- KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
- "prio:%d", td->td_priority);
-
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
- td->td_tid, td->td_sched, p->p_pid, td->td_name);
+ td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
/*
* If the last thread was exiting, finish cleaning it up.
@@ -560,8 +546,9 @@
* random variation to avoid synchronisation with processes that
* run at regular intervals.
*/
- callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
- loadav, NULL);
+ callout_reset_sbt(&loadav_callout,
+ SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US,
+ loadav, NULL, C_DIRECT_EXEC | C_PREL(32));
}
/* ARGSUSED */
@@ -568,7 +555,7 @@
static void
synch_setup(void *dummy)
{
- callout_init(&loadav_callout, CALLOUT_MPSAFE);
+ callout_init(&loadav_callout, 1);
/* Kick off timeout driven events by calling first time. */
loadav(NULL);
@@ -578,7 +565,7 @@
should_yield(void)
{
- return (ticks - curthread->td_swvoltick >= hogticks);
+ return ((u_int)ticks - (u_int)curthread->td_swvoltick >= hogticks);
}
void
More information about the Midnightbsd-cvs
mailing list