1 |
/*- |
2 |
* Copyright (c) 2005-2007 Joseph Koshy |
3 |
* Copyright (c) 2007 The FreeBSD Foundation |
4 |
* All rights reserved. |
5 |
* |
6 |
* Portions of this software were developed by A. Joseph Koshy under |
7 |
* sponsorship from the FreeBSD Foundation and Google, Inc. |
8 |
* |
9 |
* Redistribution and use in source and binary forms, with or without |
10 |
* modification, are permitted provided that the following conditions |
11 |
* are met: |
12 |
* 1. Redistributions of source code must retain the above copyright |
13 |
* notice, this list of conditions and the following disclaimer. |
14 |
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
* notice, this list of conditions and the following disclaimer in the |
16 |
* documentation and/or other materials provided with the distribution. |
17 |
* |
18 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
* SUCH DAMAGE. |
29 |
* |
30 |
*/ |
31 |
|
32 |
/* |
33 |
* Logging code for hwpmc(4) |
34 |
*/ |
35 |
|
36 |
#include <sys/cdefs.h> |
37 |
__MBSDID("$MidnightBSD$"); |
38 |
|
39 |
#include <sys/param.h> |
40 |
#include <sys/capability.h> |
41 |
#include <sys/file.h> |
42 |
#include <sys/kernel.h> |
43 |
#include <sys/kthread.h> |
44 |
#include <sys/lock.h> |
45 |
#include <sys/module.h> |
46 |
#include <sys/mutex.h> |
47 |
#include <sys/pmc.h> |
48 |
#include <sys/pmckern.h> |
49 |
#include <sys/pmclog.h> |
50 |
#include <sys/proc.h> |
51 |
#include <sys/signalvar.h> |
52 |
#include <sys/sysctl.h> |
53 |
#include <sys/systm.h> |
54 |
#include <sys/uio.h> |
55 |
#include <sys/unistd.h> |
56 |
#include <sys/vnode.h> |
57 |
|
58 |
/* |
59 |
* Sysctl tunables |
60 |
*/ |
61 |
|
62 |
SYSCTL_DECL(_kern_hwpmc); |
63 |
|
64 |
/* |
65 |
* kern.hwpmc.logbuffersize -- size of the per-cpu owner buffers. |
66 |
*/ |
67 |
|
68 |
static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; |
69 |
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size); |
70 |
SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_TUN|CTLFLAG_RD, |
71 |
&pmclog_buffer_size, 0, "size of log buffers in kilobytes"); |
72 |
|
73 |
/* |
74 |
* kern.hwpmc.nbuffer -- number of global log buffers |
75 |
*/ |
76 |
|
77 |
static int pmc_nlogbuffers = PMC_NLOGBUFFERS; |
78 |
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers); |
79 |
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_TUN|CTLFLAG_RD, |
80 |
&pmc_nlogbuffers, 0, "number of global log buffers"); |
81 |
|
82 |
/* |
83 |
* Global log buffer list and associated spin lock. |
84 |
*/ |
85 |
|
86 |
TAILQ_HEAD(, pmclog_buffer) pmc_bufferlist = |
87 |
TAILQ_HEAD_INITIALIZER(pmc_bufferlist); |
88 |
static struct mtx pmc_bufferlist_mtx; /* spin lock */ |
89 |
static struct mtx pmc_kthread_mtx; /* sleep lock */ |
90 |
|
91 |
#define PMCLOG_INIT_BUFFER_DESCRIPTOR(D) do { \ |
92 |
const int __roundup = roundup(sizeof(*D), \ |
93 |
sizeof(uint32_t)); \ |
94 |
(D)->plb_fence = ((char *) (D)) + \ |
95 |
1024*pmclog_buffer_size; \ |
96 |
(D)->plb_base = (D)->plb_ptr = ((char *) (D)) + \ |
97 |
__roundup; \ |
98 |
} while (0) |
99 |
|
100 |
|
101 |
/* |
102 |
* Log file record constructors. |
103 |
*/ |
104 |
#define _PMCLOG_TO_HEADER(T,L) \ |
105 |
((PMCLOG_HEADER_MAGIC << 24) | \ |
106 |
(PMCLOG_TYPE_ ## T << 16) | \ |
107 |
((L) & 0xFFFF)) |
108 |
|
109 |
/* reserve LEN bytes of space and initialize the entry header */ |
110 |
#define _PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do { \ |
111 |
uint32_t *_le; \ |
112 |
int _len = roundup((LEN), sizeof(uint32_t)); \ |
113 |
if ((_le = pmclog_reserve((PO), _len)) == NULL) { \ |
114 |
ACTION; \ |
115 |
} \ |
116 |
*_le = _PMCLOG_TO_HEADER(TYPE,_len); \ |
117 |
_le += 3 /* skip over timestamp */ |
118 |
|
119 |
#define PMCLOG_RESERVE(P,T,L) _PMCLOG_RESERVE(P,T,L,return) |
120 |
#define PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L, \ |
121 |
error=ENOMEM;goto error) |
122 |
|
123 |
#define PMCLOG_EMIT32(V) do { *_le++ = (V); } while (0) |
124 |
#define PMCLOG_EMIT64(V) do { \ |
125 |
*_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \ |
126 |
*_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \ |
127 |
} while (0) |
128 |
|
129 |
|
130 |
/* Emit a string. Caution: does NOT update _le, so needs to be last */ |
131 |
#define PMCLOG_EMITSTRING(S,L) do { bcopy((S), _le, (L)); } while (0) |
132 |
#define PMCLOG_EMITNULLSTRING(L) do { bzero(_le, (L)); } while (0) |
133 |
|
134 |
#define PMCLOG_DESPATCH(PO) \ |
135 |
pmclog_release((PO)); \ |
136 |
} while (0) |
137 |
|
138 |
|
139 |
/* |
140 |
* Assertions about the log file format. |
141 |
*/ |
142 |
|
143 |
CTASSERT(sizeof(struct pmclog_callchain) == 6*4 + |
144 |
PMC_CALLCHAIN_DEPTH_MAX*sizeof(uintfptr_t)); |
145 |
CTASSERT(sizeof(struct pmclog_closelog) == 3*4); |
146 |
CTASSERT(sizeof(struct pmclog_dropnotify) == 3*4); |
147 |
CTASSERT(sizeof(struct pmclog_map_in) == PATH_MAX + |
148 |
4*4 + sizeof(uintfptr_t)); |
149 |
CTASSERT(offsetof(struct pmclog_map_in,pl_pathname) == |
150 |
4*4 + sizeof(uintfptr_t)); |
151 |
CTASSERT(sizeof(struct pmclog_map_out) == 4*4 + 2*sizeof(uintfptr_t)); |
152 |
CTASSERT(sizeof(struct pmclog_pcsample) == 6*4 + sizeof(uintfptr_t)); |
153 |
CTASSERT(sizeof(struct pmclog_pmcallocate) == 6*4); |
154 |
CTASSERT(sizeof(struct pmclog_pmcattach) == 5*4 + PATH_MAX); |
155 |
CTASSERT(offsetof(struct pmclog_pmcattach,pl_pathname) == 5*4); |
156 |
CTASSERT(sizeof(struct pmclog_pmcdetach) == 5*4); |
157 |
CTASSERT(sizeof(struct pmclog_proccsw) == 5*4 + 8); |
158 |
CTASSERT(sizeof(struct pmclog_procexec) == 5*4 + PATH_MAX + |
159 |
sizeof(uintfptr_t)); |
160 |
CTASSERT(offsetof(struct pmclog_procexec,pl_pathname) == 5*4 + |
161 |
sizeof(uintfptr_t)); |
162 |
CTASSERT(sizeof(struct pmclog_procexit) == 5*4 + 8); |
163 |
CTASSERT(sizeof(struct pmclog_procfork) == 5*4); |
164 |
CTASSERT(sizeof(struct pmclog_sysexit) == 4*4); |
165 |
CTASSERT(sizeof(struct pmclog_userdata) == 4*4); |
166 |
|
167 |
/* |
168 |
* Log buffer structure |
169 |
*/ |
170 |
|
171 |
struct pmclog_buffer { |
172 |
TAILQ_ENTRY(pmclog_buffer) plb_next; |
173 |
char *plb_base; |
174 |
char *plb_ptr; |
175 |
char *plb_fence; |
176 |
}; |
177 |
|
178 |
/* |
179 |
* Prototypes |
180 |
*/ |
181 |
|
182 |
static int pmclog_get_buffer(struct pmc_owner *po); |
183 |
static void pmclog_loop(void *arg); |
184 |
static void pmclog_release(struct pmc_owner *po); |
185 |
static uint32_t *pmclog_reserve(struct pmc_owner *po, int length); |
186 |
static void pmclog_schedule_io(struct pmc_owner *po); |
187 |
static void pmclog_stop_kthread(struct pmc_owner *po); |
188 |
|
189 |
/* |
190 |
* Helper functions |
191 |
*/ |
192 |
|
193 |
/* |
194 |
* Get a log buffer |
195 |
*/ |
196 |
|
197 |
static int |
198 |
pmclog_get_buffer(struct pmc_owner *po) |
199 |
{ |
200 |
struct pmclog_buffer *plb; |
201 |
|
202 |
mtx_assert(&po->po_mtx, MA_OWNED); |
203 |
|
204 |
KASSERT(po->po_curbuf == NULL, |
205 |
("[pmclog,%d] po=%p current buffer still valid", __LINE__, po)); |
206 |
|
207 |
mtx_lock_spin(&pmc_bufferlist_mtx); |
208 |
if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) |
209 |
TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next); |
210 |
mtx_unlock_spin(&pmc_bufferlist_mtx); |
211 |
|
212 |
PMCDBG(LOG,GTB,1, "po=%p plb=%p", po, plb); |
213 |
|
214 |
#ifdef DEBUG |
215 |
if (plb) |
216 |
KASSERT(plb->plb_ptr == plb->plb_base && |
217 |
plb->plb_base < plb->plb_fence, |
218 |
("[pmclog,%d] po=%p buffer invariants: ptr=%p " |
219 |
"base=%p fence=%p", __LINE__, po, plb->plb_ptr, |
220 |
plb->plb_base, plb->plb_fence)); |
221 |
#endif |
222 |
|
223 |
po->po_curbuf = plb; |
224 |
|
225 |
/* update stats */ |
226 |
atomic_add_int(&pmc_stats.pm_buffer_requests, 1); |
227 |
if (plb == NULL) |
228 |
atomic_add_int(&pmc_stats.pm_buffer_requests_failed, 1); |
229 |
|
230 |
return (plb ? 0 : ENOMEM); |
231 |
} |
232 |
|
233 |
/* |
234 |
* Log handler loop. |
235 |
* |
236 |
* This function is executed by each pmc owner's helper thread. |
237 |
*/ |
238 |
|
239 |
static void |
240 |
pmclog_loop(void *arg) |
241 |
{ |
242 |
int error; |
243 |
struct pmc_owner *po; |
244 |
struct pmclog_buffer *lb; |
245 |
struct proc *p; |
246 |
struct ucred *ownercred; |
247 |
struct ucred *mycred; |
248 |
struct thread *td; |
249 |
struct uio auio; |
250 |
struct iovec aiov; |
251 |
size_t nbytes; |
252 |
|
253 |
po = (struct pmc_owner *) arg; |
254 |
p = po->po_owner; |
255 |
td = curthread; |
256 |
mycred = td->td_ucred; |
257 |
|
258 |
PROC_LOCK(p); |
259 |
ownercred = crhold(p->p_ucred); |
260 |
PROC_UNLOCK(p); |
261 |
|
262 |
PMCDBG(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread); |
263 |
KASSERT(po->po_kthread == curthread->td_proc, |
264 |
("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__, |
265 |
po, po->po_kthread, curthread->td_proc)); |
266 |
|
267 |
lb = NULL; |
268 |
|
269 |
|
270 |
/* |
271 |
* Loop waiting for I/O requests to be added to the owner |
272 |
* struct's queue. The loop is exited when the log file |
273 |
* is deconfigured. |
274 |
*/ |
275 |
|
276 |
mtx_lock(&pmc_kthread_mtx); |
277 |
|
278 |
for (;;) { |
279 |
|
280 |
/* check if we've been asked to exit */ |
281 |
if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) |
282 |
break; |
283 |
|
284 |
if (lb == NULL) { /* look for a fresh buffer to write */ |
285 |
mtx_lock_spin(&po->po_mtx); |
286 |
if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) { |
287 |
mtx_unlock_spin(&po->po_mtx); |
288 |
|
289 |
/* No more buffers and shutdown required. */ |
290 |
if (po->po_flags & PMC_PO_SHUTDOWN) { |
291 |
mtx_unlock(&pmc_kthread_mtx); |
292 |
/* |
293 |
* Close the file to get PMCLOG_EOF |
294 |
* error in pmclog(3). |
295 |
*/ |
296 |
fo_close(po->po_file, curthread); |
297 |
mtx_lock(&pmc_kthread_mtx); |
298 |
break; |
299 |
} |
300 |
|
301 |
(void) msleep(po, &pmc_kthread_mtx, PWAIT, |
302 |
"pmcloop", 0); |
303 |
continue; |
304 |
} |
305 |
|
306 |
TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); |
307 |
mtx_unlock_spin(&po->po_mtx); |
308 |
} |
309 |
|
310 |
mtx_unlock(&pmc_kthread_mtx); |
311 |
|
312 |
/* process the request */ |
313 |
PMCDBG(LOG,WRI,2, "po=%p base=%p ptr=%p", po, |
314 |
lb->plb_base, lb->plb_ptr); |
315 |
/* change our thread's credentials before issuing the I/O */ |
316 |
|
317 |
aiov.iov_base = lb->plb_base; |
318 |
aiov.iov_len = nbytes = lb->plb_ptr - lb->plb_base; |
319 |
|
320 |
auio.uio_iov = &aiov; |
321 |
auio.uio_iovcnt = 1; |
322 |
auio.uio_offset = -1; |
323 |
auio.uio_resid = nbytes; |
324 |
auio.uio_rw = UIO_WRITE; |
325 |
auio.uio_segflg = UIO_SYSSPACE; |
326 |
auio.uio_td = td; |
327 |
|
328 |
/* switch thread credentials -- see kern_ktrace.c */ |
329 |
td->td_ucred = ownercred; |
330 |
error = fo_write(po->po_file, &auio, ownercred, 0, td); |
331 |
td->td_ucred = mycred; |
332 |
|
333 |
if (error) { |
334 |
/* XXX some errors are recoverable */ |
335 |
/* send a SIGIO to the owner and exit */ |
336 |
PROC_LOCK(p); |
337 |
kern_psignal(p, SIGIO); |
338 |
PROC_UNLOCK(p); |
339 |
|
340 |
mtx_lock(&pmc_kthread_mtx); |
341 |
|
342 |
po->po_error = error; /* save for flush log */ |
343 |
|
344 |
PMCDBG(LOG,WRI,2, "po=%p error=%d", po, error); |
345 |
|
346 |
break; |
347 |
} |
348 |
|
349 |
mtx_lock(&pmc_kthread_mtx); |
350 |
|
351 |
/* put the used buffer back into the global pool */ |
352 |
PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); |
353 |
|
354 |
mtx_lock_spin(&pmc_bufferlist_mtx); |
355 |
TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); |
356 |
mtx_unlock_spin(&pmc_bufferlist_mtx); |
357 |
|
358 |
lb = NULL; |
359 |
} |
360 |
|
361 |
wakeup_one(po->po_kthread); |
362 |
po->po_kthread = NULL; |
363 |
|
364 |
mtx_unlock(&pmc_kthread_mtx); |
365 |
|
366 |
/* return the current I/O buffer to the global pool */ |
367 |
if (lb) { |
368 |
PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); |
369 |
|
370 |
mtx_lock_spin(&pmc_bufferlist_mtx); |
371 |
TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); |
372 |
mtx_unlock_spin(&pmc_bufferlist_mtx); |
373 |
} |
374 |
|
375 |
/* |
376 |
* Exit this thread, signalling the waiter |
377 |
*/ |
378 |
|
379 |
crfree(ownercred); |
380 |
|
381 |
kproc_exit(0); |
382 |
} |
383 |
|
384 |
/* |
385 |
* Release and log entry and schedule an I/O if needed. |
386 |
*/ |
387 |
|
388 |
static void |
389 |
pmclog_release(struct pmc_owner *po) |
390 |
{ |
391 |
KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base, |
392 |
("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, |
393 |
po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base)); |
394 |
KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, |
395 |
("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, |
396 |
po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence)); |
397 |
|
398 |
/* schedule an I/O if we've filled a buffer */ |
399 |
if (po->po_curbuf->plb_ptr >= po->po_curbuf->plb_fence) |
400 |
pmclog_schedule_io(po); |
401 |
|
402 |
mtx_unlock_spin(&po->po_mtx); |
403 |
|
404 |
PMCDBG(LOG,REL,1, "po=%p", po); |
405 |
} |
406 |
|
407 |
|
408 |
/* |
409 |
* Attempt to reserve 'length' bytes of space in an owner's log |
410 |
* buffer. The function returns a pointer to 'length' bytes of space |
411 |
* if there was enough space or returns NULL if no space was |
412 |
* available. Non-null returns do so with the po mutex locked. The |
413 |
* caller must invoke pmclog_release() on the pmc owner structure |
414 |
* when done. |
415 |
*/ |
416 |
|
417 |
static uint32_t * |
418 |
pmclog_reserve(struct pmc_owner *po, int length) |
419 |
{ |
420 |
uintptr_t newptr, oldptr; |
421 |
uint32_t *lh; |
422 |
struct timespec ts; |
423 |
|
424 |
PMCDBG(LOG,ALL,1, "po=%p len=%d", po, length); |
425 |
|
426 |
KASSERT(length % sizeof(uint32_t) == 0, |
427 |
("[pmclog,%d] length not a multiple of word size", __LINE__)); |
428 |
|
429 |
mtx_lock_spin(&po->po_mtx); |
430 |
|
431 |
/* No more data when shutdown in progress. */ |
432 |
if (po->po_flags & PMC_PO_SHUTDOWN) { |
433 |
mtx_unlock_spin(&po->po_mtx); |
434 |
return (NULL); |
435 |
} |
436 |
|
437 |
if (po->po_curbuf == NULL) |
438 |
if (pmclog_get_buffer(po) != 0) { |
439 |
mtx_unlock_spin(&po->po_mtx); |
440 |
return (NULL); |
441 |
} |
442 |
|
443 |
KASSERT(po->po_curbuf != NULL, |
444 |
("[pmclog,%d] po=%p no current buffer", __LINE__, po)); |
445 |
|
446 |
KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base && |
447 |
po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, |
448 |
("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p", |
449 |
__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base, |
450 |
po->po_curbuf->plb_fence)); |
451 |
|
452 |
oldptr = (uintptr_t) po->po_curbuf->plb_ptr; |
453 |
newptr = oldptr + length; |
454 |
|
455 |
KASSERT(oldptr != (uintptr_t) NULL, |
456 |
("[pmclog,%d] po=%p Null log buffer pointer", __LINE__, po)); |
457 |
|
458 |
/* |
459 |
* If we have space in the current buffer, return a pointer to |
460 |
* available space with the PO structure locked. |
461 |
*/ |
462 |
if (newptr <= (uintptr_t) po->po_curbuf->plb_fence) { |
463 |
po->po_curbuf->plb_ptr = (char *) newptr; |
464 |
goto done; |
465 |
} |
466 |
|
467 |
/* |
468 |
* Otherwise, schedule the current buffer for output and get a |
469 |
* fresh buffer. |
470 |
*/ |
471 |
pmclog_schedule_io(po); |
472 |
|
473 |
if (pmclog_get_buffer(po) != 0) { |
474 |
mtx_unlock_spin(&po->po_mtx); |
475 |
return (NULL); |
476 |
} |
477 |
|
478 |
KASSERT(po->po_curbuf != NULL, |
479 |
("[pmclog,%d] po=%p no current buffer", __LINE__, po)); |
480 |
|
481 |
KASSERT(po->po_curbuf->plb_ptr != NULL, |
482 |
("[pmclog,%d] null return from pmc_get_log_buffer", __LINE__)); |
483 |
|
484 |
KASSERT(po->po_curbuf->plb_ptr == po->po_curbuf->plb_base && |
485 |
po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, |
486 |
("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p", |
487 |
__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base, |
488 |
po->po_curbuf->plb_fence)); |
489 |
|
490 |
oldptr = (uintptr_t) po->po_curbuf->plb_ptr; |
491 |
|
492 |
done: |
493 |
lh = (uint32_t *) oldptr; |
494 |
lh++; /* skip header */ |
495 |
getnanotime(&ts); /* fill in the timestamp */ |
496 |
*lh++ = ts.tv_sec & 0xFFFFFFFF; |
497 |
*lh++ = ts.tv_nsec & 0xFFFFFFF; |
498 |
return ((uint32_t *) oldptr); |
499 |
} |
500 |
|
501 |
/* |
502 |
* Schedule an I/O. |
503 |
* |
504 |
* Transfer the current buffer to the helper kthread. |
505 |
*/ |
506 |
|
507 |
static void |
508 |
pmclog_schedule_io(struct pmc_owner *po) |
509 |
{ |
510 |
KASSERT(po->po_curbuf != NULL, |
511 |
("[pmclog,%d] schedule_io with null buffer po=%p", __LINE__, po)); |
512 |
|
513 |
KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base, |
514 |
("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, |
515 |
po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base)); |
516 |
KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, |
517 |
("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, |
518 |
po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence)); |
519 |
|
520 |
PMCDBG(LOG,SIO, 1, "po=%p", po); |
521 |
|
522 |
mtx_assert(&po->po_mtx, MA_OWNED); |
523 |
|
524 |
/* |
525 |
* Add the current buffer to the tail of the buffer list and |
526 |
* wakeup the helper. |
527 |
*/ |
528 |
TAILQ_INSERT_TAIL(&po->po_logbuffers, po->po_curbuf, plb_next); |
529 |
po->po_curbuf = NULL; |
530 |
wakeup_one(po); |
531 |
} |
532 |
|
533 |
/* |
534 |
* Stop the helper kthread. |
535 |
*/ |
536 |
|
537 |
static void |
538 |
pmclog_stop_kthread(struct pmc_owner *po) |
539 |
{ |
540 |
/* |
541 |
* Close the file to force the thread out of fo_write, |
542 |
* unset flag, wakeup the helper thread, |
543 |
* wait for it to exit |
544 |
*/ |
545 |
|
546 |
if (po->po_file != NULL) |
547 |
fo_close(po->po_file, curthread); |
548 |
|
549 |
mtx_lock(&pmc_kthread_mtx); |
550 |
po->po_flags &= ~PMC_PO_OWNS_LOGFILE; |
551 |
wakeup_one(po); |
552 |
if (po->po_kthread) |
553 |
msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0); |
554 |
mtx_unlock(&pmc_kthread_mtx); |
555 |
} |
556 |
|
557 |
/* |
558 |
* Public functions |
559 |
*/ |
560 |
|
561 |
/* |
562 |
* Configure a log file for pmc owner 'po'. |
563 |
* |
564 |
* Parameter 'logfd' is a file handle referencing an open file in the |
565 |
* owner process. This file needs to have been opened for writing. |
566 |
*/ |
567 |
|
568 |
int |
569 |
pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd) |
570 |
{ |
571 |
int error; |
572 |
struct proc *p; |
573 |
|
574 |
/* |
575 |
* As long as it is possible to get a LOR between pmc_sx lock and |
576 |
* proctree/allproc sx locks used for adding a new process, assure |
577 |
* the former is not held here. |
578 |
*/ |
579 |
sx_assert(&pmc_sx, SA_UNLOCKED); |
580 |
PMCDBG(LOG,CFG,1, "config po=%p logfd=%d", po, logfd); |
581 |
|
582 |
p = po->po_owner; |
583 |
|
584 |
/* return EBUSY if a log file was already present */ |
585 |
if (po->po_flags & PMC_PO_OWNS_LOGFILE) |
586 |
return (EBUSY); |
587 |
|
588 |
KASSERT(po->po_kthread == NULL, |
589 |
("[pmclog,%d] po=%p kthread (%p) already present", __LINE__, po, |
590 |
po->po_kthread)); |
591 |
KASSERT(po->po_file == NULL, |
592 |
("[pmclog,%d] po=%p file (%p) already present", __LINE__, po, |
593 |
po->po_file)); |
594 |
|
595 |
/* get a reference to the file state */ |
596 |
error = fget_write(curthread, logfd, CAP_WRITE, &po->po_file); |
597 |
if (error) |
598 |
goto error; |
599 |
|
600 |
/* mark process as owning a log file */ |
601 |
po->po_flags |= PMC_PO_OWNS_LOGFILE; |
602 |
error = kproc_create(pmclog_loop, po, &po->po_kthread, |
603 |
RFHIGHPID, 0, "hwpmc: proc(%d)", p->p_pid); |
604 |
if (error) |
605 |
goto error; |
606 |
|
607 |
/* mark process as using HWPMCs */ |
608 |
PROC_LOCK(p); |
609 |
p->p_flag |= P_HWPMC; |
610 |
PROC_UNLOCK(p); |
611 |
|
612 |
/* create a log initialization entry */ |
613 |
PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE, |
614 |
sizeof(struct pmclog_initialize)); |
615 |
PMCLOG_EMIT32(PMC_VERSION); |
616 |
PMCLOG_EMIT32(md->pmd_cputype); |
617 |
PMCLOG_DESPATCH(po); |
618 |
|
619 |
return (0); |
620 |
|
621 |
error: |
622 |
/* shutdown the thread */ |
623 |
if (po->po_kthread) |
624 |
pmclog_stop_kthread(po); |
625 |
|
626 |
KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not " |
627 |
"stopped", __LINE__, po)); |
628 |
|
629 |
if (po->po_file) |
630 |
(void) fdrop(po->po_file, curthread); |
631 |
po->po_file = NULL; /* clear file and error state */ |
632 |
po->po_error = 0; |
633 |
|
634 |
return (error); |
635 |
} |
636 |
|
637 |
|
638 |
/* |
639 |
* De-configure a log file. This will throw away any buffers queued |
640 |
* for this owner process. |
641 |
*/ |
642 |
|
643 |
int |
644 |
pmclog_deconfigure_log(struct pmc_owner *po) |
645 |
{ |
646 |
int error; |
647 |
struct pmclog_buffer *lb; |
648 |
|
649 |
PMCDBG(LOG,CFG,1, "de-config po=%p", po); |
650 |
|
651 |
if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) |
652 |
return (EINVAL); |
653 |
|
654 |
KASSERT(po->po_sscount == 0, |
655 |
("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po)); |
656 |
KASSERT(po->po_file != NULL, |
657 |
("[pmclog,%d] po=%p no log file", __LINE__, po)); |
658 |
|
659 |
/* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */ |
660 |
pmclog_stop_kthread(po); |
661 |
|
662 |
KASSERT(po->po_kthread == NULL, |
663 |
("[pmclog,%d] po=%p kthread not stopped", __LINE__, po)); |
664 |
|
665 |
/* return all queued log buffers to the global pool */ |
666 |
while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) { |
667 |
TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); |
668 |
PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); |
669 |
mtx_lock_spin(&pmc_bufferlist_mtx); |
670 |
TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); |
671 |
mtx_unlock_spin(&pmc_bufferlist_mtx); |
672 |
} |
673 |
|
674 |
/* return the 'current' buffer to the global pool */ |
675 |
if ((lb = po->po_curbuf) != NULL) { |
676 |
PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); |
677 |
mtx_lock_spin(&pmc_bufferlist_mtx); |
678 |
TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); |
679 |
mtx_unlock_spin(&pmc_bufferlist_mtx); |
680 |
} |
681 |
|
682 |
/* drop a reference to the fd */ |
683 |
error = fdrop(po->po_file, curthread); |
684 |
po->po_file = NULL; |
685 |
po->po_error = 0; |
686 |
|
687 |
return (error); |
688 |
} |
689 |
|
690 |
/* |
691 |
* Flush a process' log buffer. |
692 |
*/ |
693 |
|
694 |
int |
695 |
pmclog_flush(struct pmc_owner *po) |
696 |
{ |
697 |
int error; |
698 |
struct pmclog_buffer *lb; |
699 |
|
700 |
PMCDBG(LOG,FLS,1, "po=%p", po); |
701 |
|
702 |
/* |
703 |
* If there is a pending error recorded by the logger thread, |
704 |
* return that. |
705 |
*/ |
706 |
if (po->po_error) |
707 |
return (po->po_error); |
708 |
|
709 |
error = 0; |
710 |
|
711 |
/* |
712 |
* Check that we do have an active log file. |
713 |
*/ |
714 |
mtx_lock(&pmc_kthread_mtx); |
715 |
if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { |
716 |
error = EINVAL; |
717 |
goto error; |
718 |
} |
719 |
|
720 |
/* |
721 |
* Schedule the current buffer if any and not empty. |
722 |
*/ |
723 |
mtx_lock_spin(&po->po_mtx); |
724 |
lb = po->po_curbuf; |
725 |
if (lb && lb->plb_ptr != lb->plb_base) { |
726 |
pmclog_schedule_io(po); |
727 |
} else |
728 |
error = ENOBUFS; |
729 |
mtx_unlock_spin(&po->po_mtx); |
730 |
|
731 |
error: |
732 |
mtx_unlock(&pmc_kthread_mtx); |
733 |
|
734 |
return (error); |
735 |
} |
736 |
|
737 |
int |
738 |
pmclog_close(struct pmc_owner *po) |
739 |
{ |
740 |
|
741 |
PMCDBG(LOG,CLO,1, "po=%p", po); |
742 |
|
743 |
mtx_lock(&pmc_kthread_mtx); |
744 |
|
745 |
/* |
746 |
* Schedule the current buffer. |
747 |
*/ |
748 |
mtx_lock_spin(&po->po_mtx); |
749 |
if (po->po_curbuf) |
750 |
pmclog_schedule_io(po); |
751 |
else |
752 |
wakeup_one(po); |
753 |
mtx_unlock_spin(&po->po_mtx); |
754 |
|
755 |
/* |
756 |
* Initiate shutdown: no new data queued, |
757 |
* thread will close file on last block. |
758 |
*/ |
759 |
po->po_flags |= PMC_PO_SHUTDOWN; |
760 |
|
761 |
mtx_unlock(&pmc_kthread_mtx); |
762 |
|
763 |
return (0); |
764 |
} |
765 |
|
766 |
void |
767 |
pmclog_process_callchain(struct pmc *pm, struct pmc_sample *ps) |
768 |
{ |
769 |
int n, recordlen; |
770 |
uint32_t flags; |
771 |
struct pmc_owner *po; |
772 |
|
773 |
PMCDBG(LOG,SAM,1,"pm=%p pid=%d n=%d", pm, ps->ps_pid, |
774 |
ps->ps_nsamples); |
775 |
|
776 |
recordlen = offsetof(struct pmclog_callchain, pl_pc) + |
777 |
ps->ps_nsamples * sizeof(uintfptr_t); |
778 |
po = pm->pm_owner; |
779 |
flags = PMC_CALLCHAIN_TO_CPUFLAGS(ps->ps_cpu,ps->ps_flags); |
780 |
PMCLOG_RESERVE(po, CALLCHAIN, recordlen); |
781 |
PMCLOG_EMIT32(ps->ps_pid); |
782 |
PMCLOG_EMIT32(pm->pm_id); |
783 |
PMCLOG_EMIT32(flags); |
784 |
for (n = 0; n < ps->ps_nsamples; n++) |
785 |
PMCLOG_EMITADDR(ps->ps_pc[n]); |
786 |
PMCLOG_DESPATCH(po); |
787 |
} |
788 |
|
789 |
void |
790 |
pmclog_process_closelog(struct pmc_owner *po) |
791 |
{ |
792 |
PMCLOG_RESERVE(po,CLOSELOG,sizeof(struct pmclog_closelog)); |
793 |
PMCLOG_DESPATCH(po); |
794 |
} |
795 |
|
796 |
void |
797 |
pmclog_process_dropnotify(struct pmc_owner *po) |
798 |
{ |
799 |
PMCLOG_RESERVE(po,DROPNOTIFY,sizeof(struct pmclog_dropnotify)); |
800 |
PMCLOG_DESPATCH(po); |
801 |
} |
802 |
|
803 |
void |
804 |
pmclog_process_map_in(struct pmc_owner *po, pid_t pid, uintfptr_t start, |
805 |
const char *path) |
806 |
{ |
807 |
int pathlen, recordlen; |
808 |
|
809 |
KASSERT(path != NULL, ("[pmclog,%d] map-in, null path", __LINE__)); |
810 |
|
811 |
pathlen = strlen(path) + 1; /* #bytes for path name */ |
812 |
recordlen = offsetof(struct pmclog_map_in, pl_pathname) + |
813 |
pathlen; |
814 |
|
815 |
PMCLOG_RESERVE(po, MAP_IN, recordlen); |
816 |
PMCLOG_EMIT32(pid); |
817 |
PMCLOG_EMITADDR(start); |
818 |
PMCLOG_EMITSTRING(path,pathlen); |
819 |
PMCLOG_DESPATCH(po); |
820 |
} |
821 |
|
822 |
void |
823 |
pmclog_process_map_out(struct pmc_owner *po, pid_t pid, uintfptr_t start, |
824 |
uintfptr_t end) |
825 |
{ |
826 |
KASSERT(start <= end, ("[pmclog,%d] start > end", __LINE__)); |
827 |
|
828 |
PMCLOG_RESERVE(po, MAP_OUT, sizeof(struct pmclog_map_out)); |
829 |
PMCLOG_EMIT32(pid); |
830 |
PMCLOG_EMITADDR(start); |
831 |
PMCLOG_EMITADDR(end); |
832 |
PMCLOG_DESPATCH(po); |
833 |
} |
834 |
|
835 |
void |
836 |
pmclog_process_pmcallocate(struct pmc *pm) |
837 |
{ |
838 |
struct pmc_owner *po; |
839 |
struct pmc_soft *ps; |
840 |
|
841 |
po = pm->pm_owner; |
842 |
|
843 |
PMCDBG(LOG,ALL,1, "pm=%p", pm); |
844 |
|
845 |
if (PMC_TO_CLASS(pm) == PMC_CLASS_SOFT) { |
846 |
PMCLOG_RESERVE(po, PMCALLOCATEDYN, |
847 |
sizeof(struct pmclog_pmcallocatedyn)); |
848 |
PMCLOG_EMIT32(pm->pm_id); |
849 |
PMCLOG_EMIT32(pm->pm_event); |
850 |
PMCLOG_EMIT32(pm->pm_flags); |
851 |
ps = pmc_soft_ev_acquire(pm->pm_event); |
852 |
if (ps != NULL) |
853 |
PMCLOG_EMITSTRING(ps->ps_ev.pm_ev_name,PMC_NAME_MAX); |
854 |
else |
855 |
PMCLOG_EMITNULLSTRING(PMC_NAME_MAX); |
856 |
pmc_soft_ev_release(ps); |
857 |
PMCLOG_DESPATCH(po); |
858 |
} else { |
859 |
PMCLOG_RESERVE(po, PMCALLOCATE, |
860 |
sizeof(struct pmclog_pmcallocate)); |
861 |
PMCLOG_EMIT32(pm->pm_id); |
862 |
PMCLOG_EMIT32(pm->pm_event); |
863 |
PMCLOG_EMIT32(pm->pm_flags); |
864 |
PMCLOG_DESPATCH(po); |
865 |
} |
866 |
} |
867 |
|
868 |
void |
869 |
pmclog_process_pmcattach(struct pmc *pm, pid_t pid, char *path) |
870 |
{ |
871 |
int pathlen, recordlen; |
872 |
struct pmc_owner *po; |
873 |
|
874 |
PMCDBG(LOG,ATT,1,"pm=%p pid=%d", pm, pid); |
875 |
|
876 |
po = pm->pm_owner; |
877 |
|
878 |
pathlen = strlen(path) + 1; /* #bytes for the string */ |
879 |
recordlen = offsetof(struct pmclog_pmcattach, pl_pathname) + pathlen; |
880 |
|
881 |
PMCLOG_RESERVE(po, PMCATTACH, recordlen); |
882 |
PMCLOG_EMIT32(pm->pm_id); |
883 |
PMCLOG_EMIT32(pid); |
884 |
PMCLOG_EMITSTRING(path, pathlen); |
885 |
PMCLOG_DESPATCH(po); |
886 |
} |
887 |
|
888 |
void |
889 |
pmclog_process_pmcdetach(struct pmc *pm, pid_t pid) |
890 |
{ |
891 |
struct pmc_owner *po; |
892 |
|
893 |
PMCDBG(LOG,ATT,1,"!pm=%p pid=%d", pm, pid); |
894 |
|
895 |
po = pm->pm_owner; |
896 |
|
897 |
PMCLOG_RESERVE(po, PMCDETACH, sizeof(struct pmclog_pmcdetach)); |
898 |
PMCLOG_EMIT32(pm->pm_id); |
899 |
PMCLOG_EMIT32(pid); |
900 |
PMCLOG_DESPATCH(po); |
901 |
} |
902 |
|
903 |
/* |
904 |
* Log a context switch event to the log file. |
905 |
*/ |
906 |
|
907 |
void |
908 |
pmclog_process_proccsw(struct pmc *pm, struct pmc_process *pp, pmc_value_t v) |
909 |
{ |
910 |
struct pmc_owner *po; |
911 |
|
912 |
KASSERT(pm->pm_flags & PMC_F_LOG_PROCCSW, |
913 |
("[pmclog,%d] log-process-csw called gratuitously", __LINE__)); |
914 |
|
915 |
PMCDBG(LOG,SWO,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid, |
916 |
v); |
917 |
|
918 |
po = pm->pm_owner; |
919 |
|
920 |
PMCLOG_RESERVE(po, PROCCSW, sizeof(struct pmclog_proccsw)); |
921 |
PMCLOG_EMIT32(pm->pm_id); |
922 |
PMCLOG_EMIT64(v); |
923 |
PMCLOG_EMIT32(pp->pp_proc->p_pid); |
924 |
PMCLOG_DESPATCH(po); |
925 |
} |
926 |
|
927 |
void |
928 |
pmclog_process_procexec(struct pmc_owner *po, pmc_id_t pmid, pid_t pid, |
929 |
uintfptr_t startaddr, char *path) |
930 |
{ |
931 |
int pathlen, recordlen; |
932 |
|
933 |
PMCDBG(LOG,EXC,1,"po=%p pid=%d path=\"%s\"", po, pid, path); |
934 |
|
935 |
pathlen = strlen(path) + 1; /* #bytes for the path */ |
936 |
recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen; |
937 |
|
938 |
PMCLOG_RESERVE(po, PROCEXEC, recordlen); |
939 |
PMCLOG_EMIT32(pid); |
940 |
PMCLOG_EMITADDR(startaddr); |
941 |
PMCLOG_EMIT32(pmid); |
942 |
PMCLOG_EMITSTRING(path,pathlen); |
943 |
PMCLOG_DESPATCH(po); |
944 |
} |
945 |
|
946 |
/* |
947 |
* Log a process exit event (and accumulated pmc value) to the log file. |
948 |
*/ |
949 |
|
950 |
void |
951 |
pmclog_process_procexit(struct pmc *pm, struct pmc_process *pp) |
952 |
{ |
953 |
int ri; |
954 |
struct pmc_owner *po; |
955 |
|
956 |
ri = PMC_TO_ROWINDEX(pm); |
957 |
PMCDBG(LOG,EXT,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid, |
958 |
pp->pp_pmcs[ri].pp_pmcval); |
959 |
|
960 |
po = pm->pm_owner; |
961 |
|
962 |
PMCLOG_RESERVE(po, PROCEXIT, sizeof(struct pmclog_procexit)); |
963 |
PMCLOG_EMIT32(pm->pm_id); |
964 |
PMCLOG_EMIT64(pp->pp_pmcs[ri].pp_pmcval); |
965 |
PMCLOG_EMIT32(pp->pp_proc->p_pid); |
966 |
PMCLOG_DESPATCH(po); |
967 |
} |
968 |
|
969 |
/* |
970 |
* Log a fork event. |
971 |
*/ |
972 |
|
973 |
void |
974 |
pmclog_process_procfork(struct pmc_owner *po, pid_t oldpid, pid_t newpid) |
975 |
{ |
976 |
PMCLOG_RESERVE(po, PROCFORK, sizeof(struct pmclog_procfork)); |
977 |
PMCLOG_EMIT32(oldpid); |
978 |
PMCLOG_EMIT32(newpid); |
979 |
PMCLOG_DESPATCH(po); |
980 |
} |
981 |
|
982 |
/* |
983 |
* Log a process exit event of the form suitable for system-wide PMCs. |
984 |
*/ |
985 |
|
986 |
void |
987 |
pmclog_process_sysexit(struct pmc_owner *po, pid_t pid) |
988 |
{ |
989 |
PMCLOG_RESERVE(po, SYSEXIT, sizeof(struct pmclog_sysexit)); |
990 |
PMCLOG_EMIT32(pid); |
991 |
PMCLOG_DESPATCH(po); |
992 |
} |
993 |
|
994 |
/* |
995 |
* Write a user log entry. |
996 |
*/ |
997 |
|
998 |
int |
999 |
pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl) |
1000 |
{ |
1001 |
int error; |
1002 |
|
1003 |
PMCDBG(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata); |
1004 |
|
1005 |
error = 0; |
1006 |
|
1007 |
PMCLOG_RESERVE_WITH_ERROR(po, USERDATA, |
1008 |
sizeof(struct pmclog_userdata)); |
1009 |
PMCLOG_EMIT32(wl->pm_userdata); |
1010 |
PMCLOG_DESPATCH(po); |
1011 |
|
1012 |
error: |
1013 |
return (error); |
1014 |
} |
1015 |
|
1016 |
/* |
1017 |
* Initialization. |
1018 |
* |
1019 |
* Create a pool of log buffers and initialize mutexes. |
1020 |
*/ |
1021 |
|
1022 |
void |
1023 |
pmclog_initialize() |
1024 |
{ |
1025 |
int n; |
1026 |
struct pmclog_buffer *plb; |
1027 |
|
1028 |
if (pmclog_buffer_size <= 0) { |
1029 |
(void) printf("hwpmc: tunable logbuffersize=%d must be " |
1030 |
"greater than zero.\n", pmclog_buffer_size); |
1031 |
pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; |
1032 |
} |
1033 |
|
1034 |
if (pmc_nlogbuffers <= 0) { |
1035 |
(void) printf("hwpmc: tunable nlogbuffers=%d must be greater " |
1036 |
"than zero.\n", pmc_nlogbuffers); |
1037 |
pmc_nlogbuffers = PMC_NLOGBUFFERS; |
1038 |
} |
1039 |
|
1040 |
/* create global pool of log buffers */ |
1041 |
for (n = 0; n < pmc_nlogbuffers; n++) { |
1042 |
plb = malloc(1024 * pmclog_buffer_size, M_PMC, |
1043 |
M_WAITOK|M_ZERO); |
1044 |
PMCLOG_INIT_BUFFER_DESCRIPTOR(plb); |
1045 |
TAILQ_INSERT_HEAD(&pmc_bufferlist, plb, plb_next); |
1046 |
} |
1047 |
mtx_init(&pmc_bufferlist_mtx, "pmc-buffer-list", "pmc-leaf", |
1048 |
MTX_SPIN); |
1049 |
mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF); |
1050 |
} |
1051 |
|
1052 |
/* |
1053 |
* Shutdown logging. |
1054 |
* |
1055 |
* Destroy mutexes and release memory back the to free pool. |
1056 |
*/ |
1057 |
|
1058 |
void |
1059 |
pmclog_shutdown() |
1060 |
{ |
1061 |
struct pmclog_buffer *plb; |
1062 |
|
1063 |
mtx_destroy(&pmc_kthread_mtx); |
1064 |
mtx_destroy(&pmc_bufferlist_mtx); |
1065 |
|
1066 |
while ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) { |
1067 |
TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next); |
1068 |
free(plb, M_PMC); |
1069 |
} |
1070 |
} |