[Midnightbsd-cvs] src [8163] trunk/sys/kern/sched_4bsd.c: 4bsd scheduler quantum mechanism has been broken for awhile.

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Fri Sep 16 22:26:43 EDT 2016


Revision: 8163
          http://svnweb.midnightbsd.org/src/?rev=8163
Author:   laffer1
Date:     2016-09-16 22:26:43 -0400 (Fri, 16 Sep 2016)
Log Message:
-----------
4bsd scheduler quantum mechanism has been broken for awhile.

Modified Paths:
--------------
    trunk/sys/kern/sched_4bsd.c

Modified: trunk/sys/kern/sched_4bsd.c
===================================================================
--- trunk/sys/kern/sched_4bsd.c	2016-09-17 02:24:38 UTC (rev 8162)
+++ trunk/sys/kern/sched_4bsd.c	2016-09-17 02:26:43 UTC (rev 8163)
@@ -94,6 +94,7 @@
 	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
 	int		ts_cpticks;	/* (j) Ticks of cpu time. */
 	int		ts_slptime;	/* (j) Seconds !RUNNING. */
+	int		ts_slice;	/* Remaining part of time slice. */
 	int		ts_flags;
 	struct runq	*ts_runq;	/* runq the thread is currently on */
 #ifdef KTR
@@ -117,9 +118,9 @@
 static struct td_sched td_sched0;
 struct mtx sched_lock;
 
+static int	realstathz;	/* stathz is sometimes 0 and run off of hz. */
 static int	sched_tdcnt;	/* Total runnable threads in the system. */
-static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
-#define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
+static int	sched_slice = 1; /* Thread run time before rescheduling. */
 
 static void	setup_runqs(void);
 static void	schedcpu(void);
@@ -145,6 +146,10 @@
     &sched_kp);
 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
 
+static void sched_initticks(void *dummy);
+SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
+    NULL);
+
 /*
  * Global run queue.
  */
@@ -179,31 +184,12 @@
 	runq_init(&runq);
 }
 
-static int
-sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
-{
-	int error, new_val;
-
-	new_val = sched_quantum * tick;
-	error = sysctl_handle_int(oidp, &new_val, 0, req);
-        if (error != 0 || req->newptr == NULL)
-		return (error);
-	if (new_val < tick)
-		return (EINVAL);
-	sched_quantum = new_val / tick;
-	hogticks = 2 * sched_quantum;
-	return (0);
-}
-
 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
 
 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
     "Scheduler name");
-
-SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
-    0, sizeof sched_quantum, sysctl_kern_quantum, "I",
-    "Roundrobin scheduling quantum in microseconds");
-
+SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
+    "Slice size for timeshare threads");
 #ifdef SMP
 /* Enable forwarding of wakeups to all other cpus */
 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
@@ -470,9 +456,8 @@
 	struct thread *td;
 	struct proc *p;
 	struct td_sched *ts;
-	int awake, realstathz;
+	int awake;
 
-	realstathz = stathz ? stathz : hz;
 	sx_slock(&allproc_lock);
 	FOREACH_PROC_IN_SYSTEM(p) {
 		PROC_LOCK(p);
@@ -644,14 +629,28 @@
 {
 	setup_runqs();
 
-	if (sched_quantum == 0)
-		sched_quantum = SCHED_QUANTUM;
-	hogticks = 2 * sched_quantum;
+	/*
+	 * To avoid divide-by-zero, we set realstathz a dummy value
+	 * in case which sched_clock() called before sched_initticks().
+	 */
+	realstathz = hz;
+	sched_slice = realstathz / 10;	/* ~100ms */
 
 	/* Account for thread0. */
 	sched_load_add();
 }
 
+/*
+ * This routine determines the sched_slice after stathz and hz are setup.
+ */
+static void
+sched_initticks(void *dummy)
+{
+
+	realstathz = stathz ? stathz : hz;
+	sched_slice = realstathz / 10;	/* ~100ms */
+}
+
 /* External interfaces start here */
 
 /*
@@ -669,6 +668,7 @@
 	proc0.p_sched = NULL; /* XXX */
 	thread0.td_sched = &td_sched0;
 	thread0.td_lock = &sched_lock;
+	td_sched0.ts_slice = sched_slice;
 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
 }
 
@@ -685,9 +685,9 @@
 int
 sched_rr_interval(void)
 {
-	if (sched_quantum == 0)
-		sched_quantum = SCHED_QUANTUM;
-	return (sched_quantum);
+
+	/* Convert sched_slice from stathz to hz. */
+	return (hz / (realstathz / sched_slice));
 }
 
 /*
@@ -724,9 +724,10 @@
 	 * Force a context switch if the current thread has used up a full
 	 * quantum (default quantum is 100ms).
 	 */
-	if (!TD_IS_IDLETHREAD(td) &&
-	    ticks - PCPU_GET(switchticks) >= sched_quantum)
+	if (!TD_IS_IDLETHREAD(td) && (--ts->ts_slice <= 0)) {
+		ts->ts_slice = sched_slice;
 		td->td_flags |= TDF_NEEDRESCHED;
+	}
 
 	stat = DPCPU_PTR(idlestat);
 	stat->oldidlecalls = stat->idlecalls;
@@ -780,6 +781,7 @@
 	ts = childtd->td_sched;
 	bzero(ts, sizeof(*ts));
 	ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
+	ts->ts_slice = 1;
 }
 
 void
@@ -1076,6 +1078,7 @@
 	}
 	td->td_slptick = 0;
 	ts->ts_slptime = 0;
+	ts->ts_slice = sched_slice;
 	sched_add(td, SRQ_BORING);
 }
 



More information about the Midnightbsd-cvs mailing list