23 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
24 |
|
* SUCH DAMAGE. |
25 |
|
* |
26 |
< |
* $FreeBSD: stable/10/sys/sys/seq.h 273109 2014-10-14 21:19:23Z mjg $ |
26 |
> |
* $FreeBSD: stable/11/sys/sys/seq.h 312714 2017-01-24 19:39:24Z mjg $ |
27 |
|
*/ |
28 |
|
|
29 |
|
#ifndef _SYS_SEQ_H_ |
60 |
|
* lobj = gobj; |
61 |
|
* if (seq_consistent(&gobj->seq, seq)) |
62 |
|
* break; |
63 |
– |
* cpu_spinwait(); |
63 |
|
* } |
64 |
|
* foo(lobj); |
65 |
|
*/ |
69 |
|
|
70 |
|
#include <machine/cpu.h> |
71 |
|
|
73 |
– |
/* |
74 |
– |
* This is a temporary hack until memory barriers are cleaned up. |
75 |
– |
* |
76 |
– |
* atomic_load_acq_int at least on amd64 provides a full memory barrier, |
77 |
– |
* in a way which affects perforance. |
78 |
– |
* |
79 |
– |
* Hack below covers all architectures and avoids most of the penalty at least |
80 |
– |
* on amd64. |
81 |
– |
*/ |
82 |
– |
static __inline int |
83 |
– |
atomic_load_acq_rmb_int(volatile u_int *p) |
84 |
– |
{ |
85 |
– |
volatile u_int v; |
86 |
– |
|
87 |
– |
v = *p; |
88 |
– |
atomic_load_acq_int(&v); |
89 |
– |
return (v); |
90 |
– |
} |
91 |
– |
|
72 |
|
static __inline bool |
73 |
|
seq_in_modify(seq_t seqp) |
74 |
|
{ |
81 |
|
{ |
82 |
|
|
83 |
|
MPASS(!seq_in_modify(*seqp)); |
84 |
< |
atomic_add_acq_int(seqp, 1); |
84 |
> |
*seqp += 1; |
85 |
> |
atomic_thread_fence_rel(); |
86 |
|
} |
87 |
|
|
88 |
|
static __inline void |
89 |
|
seq_write_end(seq_t *seqp) |
90 |
|
{ |
91 |
|
|
92 |
< |
atomic_add_rel_int(seqp, 1); |
92 |
> |
atomic_store_rel_int(seqp, *seqp + 1); |
93 |
|
MPASS(!seq_in_modify(*seqp)); |
94 |
|
} |
95 |
|
|
96 |
|
static __inline seq_t |
97 |
< |
seq_read(seq_t *seqp) |
97 |
> |
seq_read(const seq_t *seqp) |
98 |
|
{ |
99 |
|
seq_t ret; |
100 |
|
|
101 |
|
for (;;) { |
102 |
< |
ret = atomic_load_acq_rmb_int(seqp); |
102 |
> |
ret = atomic_load_acq_int(__DECONST(seq_t *, seqp)); |
103 |
|
if (seq_in_modify(ret)) { |
104 |
|
cpu_spinwait(); |
105 |
|
continue; |
111 |
|
} |
112 |
|
|
113 |
|
static __inline seq_t |
114 |
< |
seq_consistent(seq_t *seqp, seq_t oldseq) |
114 |
> |
seq_consistent_nomb(const seq_t *seqp, seq_t oldseq) |
115 |
|
{ |
116 |
|
|
117 |
< |
return (atomic_load_acq_rmb_int(seqp) == oldseq); |
117 |
> |
return (*seqp == oldseq); |
118 |
|
} |
119 |
|
|
120 |
|
static __inline seq_t |
121 |
< |
seq_consistent_nomb(seq_t *seqp, seq_t oldseq) |
121 |
> |
seq_consistent(const seq_t *seqp, seq_t oldseq) |
122 |
|
{ |
123 |
|
|
124 |
< |
return (*seqp == oldseq); |
124 |
> |
atomic_thread_fence_acq(); |
125 |
> |
return (seq_consistent_nomb(seqp, oldseq)); |
126 |
|
} |
127 |
|
|
128 |
|
#endif /* _KERNEL */ |