[Midnightbsd-cvs] src [8234] trunk/sys: add a kpi to allow to reserve some amount of space in the numvnodes counter without actually allocating
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Sat Sep 17 17:26:57 EDT 2016
Revision: 8234
http://svnweb.midnightbsd.org/src/?rev=8234
Author: laffer1
Date: 2016-09-17 17:26:57 -0400 (Sat, 17 Sep 2016)
Log Message:
-----------
add a kpi to allow to reserve some amount of space in the numvnodes counter without actually allocating
Modified Paths:
--------------
trunk/sys/kern/kern_thread.c
trunk/sys/kern/subr_trap.c
trunk/sys/kern/vfs_subr.c
trunk/sys/sys/proc.h
trunk/sys/sys/vnode.h
Modified: trunk/sys/kern/kern_thread.c
===================================================================
--- trunk/sys/kern/kern_thread.c 2016-09-17 21:24:37 UTC (rev 8233)
+++ trunk/sys/kern/kern_thread.c 2016-09-17 21:26:57 UTC (rev 8234)
@@ -207,6 +207,7 @@
td->td_sleepqueue = sleepq_alloc();
td->td_turnstile = turnstile_alloc();
td->td_rlqe = NULL;
+ td->td_vp_reserv = 0;
EVENTHANDLER_INVOKE(thread_init, td);
td->td_sched = (struct td_sched *)&td[1];
umtx_thread_init(td);
Modified: trunk/sys/kern/subr_trap.c
===================================================================
--- trunk/sys/kern/subr_trap.c 2016-09-17 21:24:37 UTC (rev 8233)
+++ trunk/sys/kern/subr_trap.c 2016-09-17 21:26:57 UTC (rev 8234)
@@ -139,6 +139,8 @@
sched_userret(td);
KASSERT(td->td_locks == 0,
("userret: Returning with %d locks held.", td->td_locks));
+ KASSERT(td->td_vp_reserv == 0,
+ ("userret: Returning while holding vnode reservation"));
#ifdef VIMAGE
/* Unfortunately td_vnet_lpush needs VNET_DEBUG. */
VNET_ASSERT(curvnet == NULL,
Modified: trunk/sys/kern/vfs_subr.c
===================================================================
--- trunk/sys/kern/vfs_subr.c 2016-09-17 21:24:37 UTC (rev 8233)
+++ trunk/sys/kern/vfs_subr.c 2016-09-17 21:26:57 UTC (rev 8234)
@@ -935,27 +935,15 @@
}
/*
- * Return the next vnode from the free list.
+ * Wait for available vnodes.
*/
-int
-getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
- struct vnode **vpp)
+static int
+getnewvnode_wait(int suspended)
{
- struct vnode *vp = NULL;
- struct bufobj *bo;
- CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
- mtx_lock(&vnode_free_list_mtx);
- /*
- * Lend our context to reclaim vnodes if they've exceeded the max.
- */
- if (freevnodes > wantfreevnodes)
- vnlru_free(1);
- /*
- * Wait for available vnodes.
- */
+ mtx_assert(&vnode_free_list_mtx, MA_OWNED);
if (numvnodes > desiredvnodes) {
- if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) {
+ if (suspended) {
/*
* File system is beeing suspended, we cannot risk a
* deadlock here, so allocate new vnode anyway.
@@ -962,7 +950,7 @@
*/
if (freevnodes > wantfreevnodes)
vnlru_free(freevnodes - wantfreevnodes);
- goto alloc;
+ return (0);
}
if (vnlruproc_sig == 0) {
vnlruproc_sig = 1; /* avoid unnecessary wakeups */
@@ -970,16 +958,76 @@
}
msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
"vlruwk", hz);
+ }
+ return (numvnodes > desiredvnodes ? ENFILE : 0);
+}
+
+void
+getnewvnode_reserve(u_int count)
+{
+ struct thread *td;
+
+ td = curthread;
+ mtx_lock(&vnode_free_list_mtx);
+ while (count > 0) {
+ if (getnewvnode_wait(0) == 0) {
+ count--;
+ td->td_vp_reserv++;
+ numvnodes++;
+ }
+ }
+ mtx_unlock(&vnode_free_list_mtx);
+}
+
+void
+getnewvnode_drop_reserve(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ mtx_lock(&vnode_free_list_mtx);
+ KASSERT(numvnodes >= td->td_vp_reserv, ("reserve too large"));
+ numvnodes -= td->td_vp_reserv;
+ mtx_unlock(&vnode_free_list_mtx);
+ td->td_vp_reserv = 0;
+}
+
+/*
+ * Return the next vnode from the free list.
+ */
+int
+getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
+ struct vnode **vpp)
+{
+ struct vnode *vp;
+ struct bufobj *bo;
+ struct thread *td;
+ int error;
+
+ CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
+ vp = NULL;
+ td = curthread;
+ if (td->td_vp_reserv > 0) {
+ td->td_vp_reserv -= 1;
+ goto alloc;
+ }
+ mtx_lock(&vnode_free_list_mtx);
+ /*
+ * Lend our context to reclaim vnodes if they've exceeded the max.
+ */
+ if (freevnodes > wantfreevnodes)
+ vnlru_free(1);
+ error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag &
+ MNTK_SUSPEND));
#if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */
- if (numvnodes > desiredvnodes) {
- mtx_unlock(&vnode_free_list_mtx);
- return (ENFILE);
- }
+ if (error != 0) {
+ mtx_unlock(&vnode_free_list_mtx);
+ return (error);
+ }
#endif
- }
-alloc:
numvnodes++;
mtx_unlock(&vnode_free_list_mtx);
+alloc:
vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
/*
* Setup locks.
Modified: trunk/sys/sys/proc.h
===================================================================
--- trunk/sys/sys/proc.h 2016-09-17 21:24:37 UTC (rev 8233)
+++ trunk/sys/sys/proc.h 2016-09-17 21:26:57 UTC (rev 8234)
@@ -316,6 +316,7 @@
struct vm_page **td_ma; /* (k) uio pages held */
int td_ma_cnt; /* (k) size of *td_ma */
struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */
+ u_int td_vp_reserv; /* (k) Count of reserved vnodes. */
void *td_machdata; /* (k) mach state. */
};
Modified: trunk/sys/sys/vnode.h
===================================================================
--- trunk/sys/sys/vnode.h 2016-09-17 21:24:37 UTC (rev 8233)
+++ trunk/sys/sys/vnode.h 2016-09-17 21:26:57 UTC (rev 8234)
@@ -603,6 +603,8 @@
void cvtnstat(struct stat *sb, struct nstat *nsb);
int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
struct vnode **vpp);
+void getnewvnode_reserve(u_int count);
+void getnewvnode_drop_reserve(void);
int insmntque1(struct vnode *vp, struct mount *mp,
void (*dtr)(struct vnode *, void *), void *dtr_arg);
int insmntque(struct vnode *vp, struct mount *mp);
More information about the Midnightbsd-cvs
mailing list