ViewVC Help
View File | Revision Log | Show Annotations | Download File | View Changeset | Root Listing
root/src/trunk/sys/sys/seq.h
(Generate patch)

Comparing trunk/sys/sys/seq.h (file contents):
Revision 12360 by laffer1, Thu May 24 22:43:29 2018 UTC vs.
Revision 12361 by laffer1, Sun Feb 9 18:31:04 2020 UTC

# Line 23 | Line 23
23   * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24   * SUCH DAMAGE.
25   *
26 < * $FreeBSD: stable/10/sys/sys/seq.h 273109 2014-10-14 21:19:23Z mjg $
26 > * $FreeBSD: stable/11/sys/sys/seq.h 312714 2017-01-24 19:39:24Z mjg $
27   */
28  
29   #ifndef _SYS_SEQ_H_
# Line 60 | Line 60 | typedef uint32_t seq_t;
60   *              lobj = gobj;
61   *              if (seq_consistent(&gobj->seq, seq))
62   *                      break;
63 *              cpu_spinwait();
63   *      }
64   *      foo(lobj);
65   */            
# Line 70 | Line 69 | typedef uint32_t seq_t;
69  
70   #include <machine/cpu.h>
71  
73 /*
74 * This is a temporary hack until memory barriers are cleaned up.
75 *
76 * atomic_load_acq_int at least on amd64 provides a full memory barrier,
77 * in a way which affects perforance.
78 *
79 * Hack below covers all architectures and avoids most of the penalty at least
80 * on amd64.
81 */
82 static __inline int
83 atomic_load_acq_rmb_int(volatile u_int *p)
84 {
85        volatile u_int v;
86
87        v = *p;
88        atomic_load_acq_int(&v);
89        return (v);
90 }
91
72   static __inline bool
73   seq_in_modify(seq_t seqp)
74   {
# Line 101 | Line 81 | seq_write_begin(seq_t *seqp)
81   {
82  
83          MPASS(!seq_in_modify(*seqp));
84 <        atomic_add_acq_int(seqp, 1);
84 >        *seqp += 1;
85 >        atomic_thread_fence_rel();
86   }
87  
88   static __inline void
89   seq_write_end(seq_t *seqp)
90   {
91  
92 <        atomic_add_rel_int(seqp, 1);
92 >        atomic_store_rel_int(seqp, *seqp + 1);
93          MPASS(!seq_in_modify(*seqp));
94   }
95  
96   static __inline seq_t
97 < seq_read(seq_t *seqp)
97 > seq_read(const seq_t *seqp)
98   {
99          seq_t ret;
100  
101          for (;;) {
102 <                ret = atomic_load_acq_rmb_int(seqp);
102 >                ret = atomic_load_acq_int(__DECONST(seq_t *, seqp));
103                  if (seq_in_modify(ret)) {
104                          cpu_spinwait();
105                          continue;
# Line 130 | Line 111 | seq_read(seq_t *seqp)
111   }
112  
113   static __inline seq_t
114 < seq_consistent(seq_t *seqp, seq_t oldseq)
114 > seq_consistent_nomb(const seq_t *seqp, seq_t oldseq)
115   {
116  
117 <        return (atomic_load_acq_rmb_int(seqp) == oldseq);
117 >        return (*seqp == oldseq);
118   }
119  
120   static __inline seq_t
121 < seq_consistent_nomb(seq_t *seqp, seq_t oldseq)
121 > seq_consistent(const seq_t *seqp, seq_t oldseq)
122   {
123  
124 <        return (*seqp == oldseq);
124 >        atomic_thread_fence_acq();
125 >        return (seq_consistent_nomb(seqp, oldseq));
126   }
127  
128   #endif  /* _KERNEL */

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines