[Midnightbsd-cvs] src [12332] trunk/sys/sys/sx.h: sync with FreeBSD 11-stable

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Sat Feb 8 15:01:04 EST 2020


Revision: 12332
          http://svnweb.midnightbsd.org/src/?rev=12332
Author:   laffer1
Date:     2020-02-08 15:01:03 -0500 (Sat, 08 Feb 2020)
Log Message:
-----------
sync with FreeBSD 11-stable

Modified Paths:
--------------
    trunk/sys/sys/sx.h

Modified: trunk/sys/sys/sx.h
===================================================================
--- trunk/sys/sys/sx.h	2020-02-08 20:00:09 UTC (rev 12331)
+++ trunk/sys/sys/sx.h	2020-02-08 20:01:03 UTC (rev 12332)
@@ -27,7 +27,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  * DAMAGE.
  *
- * $FreeBSD: stable/10/sys/sys/sx.h 323870 2017-09-21 19:24:11Z marius $
+ * $FreeBSD: stable/11/sys/sys/sx.h 331722 2018-03-29 02:50:57Z eadler $
  */
 
 #ifndef	_SYS_SX_H_
@@ -87,6 +87,13 @@
 
 #ifdef _KERNEL
 
+#define	sx_recurse	lock_object.lo_data
+
+#define	SX_READ_VALUE(sx)	((sx)->sx_lock)
+
+#define	lv_sx_owner(v) \
+	((v & SX_LOCK_SHARED) ? NULL : (struct thread *)SX_OWNER(v))
+
 /*
  * Function prototipes.  Routines that start with an underscore are not part
  * of the public interface and are wrappered with a macro.
@@ -95,20 +102,22 @@
 #define	sx_init(sx, desc)	sx_init_flags((sx), (desc), 0)
 void	sx_init_flags(struct sx *sx, const char *description, int opts);
 void	sx_destroy(struct sx *sx);
+int	sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
 int	sx_try_slock_(struct sx *sx, const char *file, int line);
+int	sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
 int	sx_try_xlock_(struct sx *sx, const char *file, int line);
+int	sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
 int	sx_try_upgrade_(struct sx *sx, const char *file, int line);
+void	sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
 void	sx_downgrade_(struct sx *sx, const char *file, int line);
+int	_sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF);
 int	_sx_slock(struct sx *sx, int opts, const char *file, int line);
 int	_sx_xlock(struct sx *sx, int opts, const char *file, int line);
+void	_sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
 void	_sx_sunlock(struct sx *sx, const char *file, int line);
 void	_sx_xunlock(struct sx *sx, const char *file, int line);
-int	_sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts,
-	    const char *file, int line);
-int	_sx_slock_hard(struct sx *sx, int opts, const char *file, int line);
-void	_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
-	    line);
-void	_sx_sunlock_hard(struct sx *sx, const char *file, int line);
+int	_sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF);
+void	_sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF);
 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
 void	_sx_assert(const struct sx *sx, int what, const char *file, int line);
 #endif
@@ -141,6 +150,7 @@
  * deferred to 'tougher' functions.
  */
 
+#if	(LOCK_DEBUG == 0)
 /* Acquire an exclusive lock. */
 static __inline int
 __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
@@ -147,14 +157,12 @@
     int line)
 {
 	uintptr_t tid = (uintptr_t)td;
+	uintptr_t v = SX_LOCK_UNLOCKED;
 	int error = 0;
 
-	if (sx->sx_lock != SX_LOCK_UNLOCKED ||
-	    !atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
-		error = _sx_xlock_hard(sx, tid, opts, file, line);
-	else 
-		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
-		    sx, 0, 0, file, line);
+	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
+	    !atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid)))
+		error = _sx_xlock_hard(sx, v, opts);
 
 	return (error);
 }
@@ -163,48 +171,15 @@
 static __inline void
 __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
 {
-	uintptr_t tid = (uintptr_t)td;
+	uintptr_t x = (uintptr_t)td;
 
-	if (sx->sx_lock != tid ||
-	    !atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
-		_sx_xunlock_hard(sx, tid, file, line);
+	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
+	    !atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, SX_LOCK_UNLOCKED)))
+		_sx_xunlock_hard(sx, x);
 }
+#endif
 
-/* Acquire a shared lock. */
-static __inline int
-__sx_slock(struct sx *sx, int opts, const char *file, int line)
-{
-	uintptr_t x = sx->sx_lock;
-	int error = 0;
-
-	if (!(x & SX_LOCK_SHARED) ||
-	    !atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER))
-		error = _sx_slock_hard(sx, opts, file, line);
-	else
-		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx, 0,
-		    0, file, line);
-
-	return (error);
-}
-
 /*
- * Release a shared lock.  We can just drop a single shared lock so
- * long as we aren't trying to drop the last shared lock when other
- * threads are waiting for an exclusive lock.  This takes advantage of
- * the fact that an unlocked lock is encoded as a shared lock with a
- * count of 0.
- */
-static __inline void
-__sx_sunlock(struct sx *sx, const char *file, int line)
-{
-	uintptr_t x = sx->sx_lock;
-
-	if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
-	    !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
-		_sx_sunlock_hard(sx, file, line);
-}
-
-/*
  * Public interface for lock operations.
  */
 #ifndef LOCK_DEBUG
@@ -217,12 +192,6 @@
 	_sx_xlock((sx), SX_INTERRUPTIBLE, (file), (line))
 #define	sx_xunlock_(sx, file, line)					\
 	_sx_xunlock((sx), (file), (line))
-#define	sx_slock_(sx, file, line)					\
-	(void)_sx_slock((sx), 0, (file), (line))
-#define	sx_slock_sig_(sx, file, line)					\
-	_sx_slock((sx), SX_INTERRUPTIBLE, (file) , (line))
-#define	sx_sunlock_(sx, file, line)					\
-	_sx_sunlock((sx), (file), (line))
 #else
 #define	sx_xlock_(sx, file, line)					\
 	(void)__sx_xlock((sx), curthread, 0, (file), (line))
@@ -230,17 +199,30 @@
 	__sx_xlock((sx), curthread, SX_INTERRUPTIBLE, (file), (line))
 #define	sx_xunlock_(sx, file, line)					\
 	__sx_xunlock((sx), curthread, (file), (line))
+#endif	/* LOCK_DEBUG > 0 || SX_NOINLINE */
+#if	(LOCK_DEBUG > 0)
 #define	sx_slock_(sx, file, line)					\
-	(void)__sx_slock((sx), 0, (file), (line))
+	(void)_sx_slock((sx), 0, (file), (line))
 #define	sx_slock_sig_(sx, file, line)					\
-	__sx_slock((sx), SX_INTERRUPTIBLE, (file), (line))
+	_sx_slock((sx), SX_INTERRUPTIBLE, (file) , (line))
 #define	sx_sunlock_(sx, file, line)					\
-	__sx_sunlock((sx), (file), (line))
-#endif	/* LOCK_DEBUG > 0 || SX_NOINLINE */
+	_sx_sunlock((sx), (file), (line))
 #define	sx_try_slock(sx)	sx_try_slock_((sx), LOCK_FILE, LOCK_LINE)
 #define	sx_try_xlock(sx)	sx_try_xlock_((sx), LOCK_FILE, LOCK_LINE)
 #define	sx_try_upgrade(sx)	sx_try_upgrade_((sx), LOCK_FILE, LOCK_LINE)
 #define	sx_downgrade(sx)	sx_downgrade_((sx), LOCK_FILE, LOCK_LINE)
+#else
+#define	sx_slock_(sx, file, line)					\
+	(void)_sx_slock_int((sx), 0)
+#define	sx_slock_sig_(sx, file, line)					\
+	_sx_slock_int((sx), SX_INTERRUPTIBLE)
+#define	sx_sunlock_(sx, file, line)					\
+	_sx_sunlock_int((sx))
+#define	sx_try_slock(sx)	sx_try_slock_int((sx))
+#define	sx_try_xlock(sx)	sx_try_xlock_int((sx))
+#define	sx_try_upgrade(sx)	sx_try_upgrade_int((sx))
+#define	sx_downgrade(sx)	sx_downgrade_int((sx))
+#endif
 #ifdef INVARIANTS
 #define	sx_assert_(sx, what, file, line)				\
 	_sx_assert((sx), (what), (file), (line))



More information about the Midnightbsd-cvs mailing list