Project

General

Profile

Actions

Bug #859

closed

hwpmc [6/13]

Added by aoiko almost 17 years ago. Updated over 14 years ago.

Status:
Closed
Priority:
Normal
Assignee:
-
Category:
-
Target version:
-
Start date:
Due date:
% Done:

0%

Estimated time:

Description

Port to DragonFly. WIP.

diff upr dev/misc/hwpmc.freebsd/hwpmc_amd.c dev/misc/hwpmc/hwpmc_amd.c
--
dev/misc/hwpmc.freebsd/hwpmc_amd.c
+++ dev/misc/hwpmc/hwpmc_amd.c
@ -26,16 +26,14 @
*/

#include <sys/cdefs.h>
-_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_amd.c,v 1.13 2005/12/05 11:58:32 ru Exp $");
+/*
_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_amd.c,v 1.13 2005/12/05 11:58:32 ru Exp $");*/
/* Support for the AMD K7 and later processors */
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/pmc.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/cpufunc.h>
@ -265,7 +263,7 @ amd_read_pmc(int cpu, int ri, pmc_value_
const struct pmc_hw *phw;
pmc_value_t tmp;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", LINE, ri));
@ -320,7 +318,7 @ amd_write_pmc(int cpu, int ri, pmc_value
const struct pmc_hw *phw;
enum pmc_mode mode;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", LINE, ri));
@ -367,7 +365,7 @ amd_config_pmc(int cpu, int ri, struct p

PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", LINE, ri));
@ -449,7 +447,7 @ amd_allocate_pmc(int cpu, int ri, struct

(void) cpu;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < AMD_NPMCS,
("[amd,%d] illegal row index %d", LINE, ri));
@ -543,7 +541,7 @ amd_release_pmc(int cpu, int ri, struct

(void) pmc;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", LINE, ri));
@ -575,7 +573,7 @ amd_start_pmc(int cpu, int ri)
struct pmc_hw *phw;
const struct amd_descr *pd;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", LINE, ri));
@ -624,7 +622,7 @ amd_stop_pmc(int cpu, int ri)
const struct amd_descr *pd;
uint64_t config;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", LINE, ri));
@ -676,7 +674,7 @ amd_intr(int cpu, uintptr_t eip, int use
struct pmc_hw *phw;
pmc_value_t v;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] out of range CPU %d", LINE, cpu));

PMCDBG(MDP,INT,1, "cpu=%d eip=%p um=%d", cpu, (void *) eip,
@ -756,7 +754,7 @ amd_describe(int cpu, int ri, struct pmc
const struct amd_descr *pd;
struct pmc_hw *phw;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < AMD_NPMCS,
("[amd,%d] row-index %d out of range", LINE, ri));
@ -825,7 +823,7 @ amd_init(int cpu)
struct amd_cpu *pcs;
struct pmc_hw *phw;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] insane cpu number %d", LINE, cpu));

PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
@ -868,7 +866,7 @ amd_cleanup(int cpu)
uint32_t evsel;
struct pmc_cpu *pcs;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] insane cpu number (%d)", LINE, cpu));

PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
@ -944,7 +942,7 @ pmc_amd_initialize(void)
}
if ((int) cputype == 1) {
(void) printf("pmc: Unknown AMD CPU.\n");
+ (void) kprintf("pmc: Unknown AMD CPU.\n");
return NULL;
}

@ -976,7 +974,7 @ pmc_amd_initialize(void)

/* fill in the correct pmc name and class */
for (i = 1; i < AMD_NPMCS; i++) {
- (void) snprintf(amd_pmcdesc[i].pm_descr.pd_name,
+ (void) ksnprintf(amd_pmcdesc[i].pm_descr.pd_name,
sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d",
name, i-1);
amd_pmcdesc[i].pm_descr.pd_class = class;
diff upr dev/misc/hwpmc.freebsd/hwpmc_logging.c dev/misc/hwpmc/hwpmc_logging.c
--
dev/misc/hwpmc.freebsd/hwpmc_logging.c
+++ dev/misc/hwpmc/hwpmc_logging.c
@ -30,19 +30,21 @
*/
#include &lt;sys/cdefs.h&gt;
-__FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_logging.c,v 1.7 2007/04/19 08:02:51 jkoshy Exp $");
+/* __FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_logging.c,v 1.7 2007/04/19 08:02:51 jkoshy Exp $"); */
#include &lt;sys/param.h&gt;
#include &lt;sys/file.h&gt;
#include &lt;sys/file2.h&gt;
#include &lt;sys/kernel.h&gt;
#include &lt;sys/kthread.h&gt;
#include &lt;sys/lock.h&gt;
#include &lt;sys/module.h&gt;
-#include &lt;sys/mutex.h&gt;
#include &lt;sys/pmc.h&gt;
#include &lt;sys/pmclog.h&gt;
#include &lt;sys/proc.h&gt;
#include &lt;sys/signalvar.h&gt;
#include &lt;sys/spinlock.h&gt;
+#include &lt;sys/spinlock2.h&gt;
#include &lt;sys/sysctl.h&gt;
#include &lt;sys/systm.h&gt;
#include &lt;sys/uio.h&gt;
@ -50,6 +52,15 @ __FBSDID("$FreeBSD: src/sys/dev/hwpmc/hw
#include &lt;sys/vnode.h&gt;
/*
+ * There will probably be a corresponding lock in dragonfly,
+ * until then keep the macros around as markers. If the
+ * semantics are not too different for our lock, we might
+ * reuse them.
+ /
#define PROC_LOCK(e) do { (void)(e); } while(0)
#define PROC_UNLOCK(e) do { (void)(e); } while(0)

/
* Sysctl tunables
*/

@ -61,7 +72,7 @ SYSCTL_DECL(_kern_hwpmc);

static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size);
-SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_TUN|CTLFLAG_RD,
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_RD,
&pmclog_buffer_size, 0, "size of log buffers in kilobytes");

@ -71,7 +82,7 @ SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuf

static int pmc_nlogbuffers = PMC_NLOGBUFFERS;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers);
-SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_TUN|CTLFLAG_RD,
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_RD,
&pmc_nlogbuffers, 0, "number of global log buffers");
/*
@ -80,8 +91,8 @ SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffe
TAILQ_HEAD(, pmclog_buffer) pmc_bufferlist =
TAILQ_HEAD_INITIALIZER(pmc_bufferlist);
-static struct mtx pmc_bufferlist_mtx; /* spin lock /
-static struct mtx pmc_kthread_mtx; /
sleep lock /
+static struct spinlock pmc_bufferlist_lock; /
spin lock /
+static struct lock pmc_kthread_lock; /
sleep lock */
#define    PMCLOG_INIT_BUFFER_DESCRIPTOR(D) do {                \
const int __roundup = roundup(sizeof(D), \
@ -130,7 +141,6 @ static struct mtx pmc_kthread_mtx; /
sl
pmclog_release((PO)); \
} while (0)

-
/* * Assertions about the log file format.
*/
@ -192,15 +202,13 @ pmclog_get_buffer(struct pmc_owner *po) {
struct pmclog_buffer *plb;

- mtx_assert(&po->po_mtx, MA_OWNED);

KASSERT(po
>po_curbuf == NULL,
("[pmc,%d] po=%p current buffer still valid", LINE, po));

- mtx_lock_spin(&pmc_bufferlist_mtx);
+ spin_lock_wr(&pmc_bufferlist_lock);
if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL)
TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
- mtx_unlock_spin(&pmc_bufferlist_mtx);
+ spin_unlock_wr(&pmc_bufferlist_lock);

PMCDBG(LOG,GTB,1, "po=%p plb=%p", po, plb);

@ -236,7 +244,6 @ pmclog_loop(void *arg)
struct pmc_owner *po;
struct pmclog_buffer *lb;
struct ucred *ownercred;
- struct ucred *mycred;
struct thread *td;
struct uio auio;
struct iovec aiov;
@ -244,17 +251,12 @ pmclog_loop(void *arg)

po = (struct pmc_owner *) arg;
td = curthread;
- mycred = td->td_ucred;

PROC_LOCK(po->po_owner);
ownercred = crhold(po->po_owner->p_ucred);
- PROC_UNLOCK(po->po_owner);
PMCDBG(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
- KASSERT(po->po_kthread == curthread->td_proc,
- ("[pmc,%d] proc mismatch po=%p po/kt=%p curproc=%p", LINE,
- po, po->po_kthread, curthread->td_proc));

+ KASSERT(po
>po_kthread == curthread,
+ ("[pmc,%d] proc mismatch po=%p po/kt=%p curthread=%p", LINE,
+ po, po->po_kthread, curthread));
lb = NULL;

@ -264,7 +266,7 @ pmclog_loop(void *arg) * is deconfigured.
*/

- mtx_lock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_EXCLUSIVE);

for (;;) {

@ -273,9 +275,9 @ pmclog_loop(void *arg)
break;

if (lb  NULL) { /* look for a fresh buffer to write */
- mtx_lock_spin(&po->po_mtx);
+ spin_lock_wr(&po->po_lock);
if ((lb = TAILQ_FIRST(&po->po_logbuffers)) NULL) {
- mtx_unlock_spin(&po->po_mtx);
+ spin_unlock_wr(&po->po_lock);
/* wakeup any processes waiting for a FLUSH */
if (po->po_flags & PMC_PO_IN_FLUSH) {
@ -283,16 +285,30 @ pmclog_loop(void *arg)
wakeup_one(po->po_kthread);
}

- (void) msleep(po, &pmc_kthread_mtx, PWAIT,
- "pmcloop", 0);
+ /*
+ * drop pmc_kthread_lock and go to sleep,
+ * using a critical section to ensure
+ * there's no race with a wakeup.
+ /
+ crit_enter();
+ tsleep_interlock(po);
+ lockmgr(&pmc_kthread_lock, LK_RELEASE);
+ /
XXX: fbsd flags PWAIT -- agg /
+ tsleep(po, 0, "pmcloop", 0);
+ crit_exit();
+ /

+ * lock must be acquired at the beginning
+ * of the loop
+ */
+ lockmgr(&pmc_kthread_lock, LK_EXCLUSIVE);
continue;
}

TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
- mtx_unlock_spin(&po->po_mtx);
+ spin_unlock_wr(&po->po_lock);
}

- mtx_unlock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_RELEASE);

/* process the request */
PMCDBG(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
@ -310,12 +326,9 @ pmclog_loop(void *arg)
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_td = td;

- /* switch thread credentials -- see kern_ktrace.c */
- td->td_ucred = ownercred;
- error = fo_write(po->po_file, &auio, ownercred, 0, td);
- td->td_ucred = mycred;
+ error = fo_write(po->po_file, &auio, ownercred, 0);

- mtx_lock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_EXCLUSIVE);

if (error) {
/* XXX some errors are recoverable */
@ -323,7 +336,7 @ pmclog_loop(void *arg)
/* send a SIGIO to the owner and exit */
PROC_LOCK(po->po_owner);
- psignal(po->po_owner, SIGIO);
+ lwpsignal(po->po_owner, NULL, SIGIO);
PROC_UNLOCK(po->po_owner);
po->po_error = error; /* save for flush log /
@ -336,24 +349,24 @ pmclog_loop(void *arg)
/
put the used buffer back into the global pool */
PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);

- mtx_lock_spin(&pmc_bufferlist_mtx);
+ spin_lock_wr(&pmc_bufferlist_lock);
TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
- mtx_unlock_spin(&pmc_bufferlist_mtx);
+ spin_unlock_wr(&pmc_bufferlist_lock);

lb = NULL;
}
po->po_kthread = NULL;

- mtx_unlock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_RELEASE);

/* return the current I/O buffer to the global pool */
if (lb) {
PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);

- mtx_lock_spin(&pmc_bufferlist_mtx);
+ spin_lock_wr(&pmc_bufferlist_lock);
TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
- mtx_unlock_spin(&pmc_bufferlist_mtx);
+ spin_unlock_wr(&pmc_bufferlist_lock);
}

/*
@ -362,7 +375,11 @ pmclog_loop(void *arg)
crfree(ownercred);

- kthread_exit(0);
+ /*
+ * XXX: maybe do a wakeup in kthread_exit() or lwkt_exit()?
+ */
+ wakeup(td);
+ kthread_exit();
}

/*
@ -383,7 +400,7 @ pmclog_release(struct pmc_owner *po)
if (po->po_curbuf->plb_ptr >= po->po_curbuf->plb_fence)
pmclog_schedule_io(po);

- mtx_unlock_spin(&po->po_mtx);
+ spin_unlock_wr(&po->po_lock);

PMCDBG(LOG,REL,1, "po=%p", po);
}
@ -410,11 +427,11 @ pmclog_reserve(struct pmc_owner *po, int
KASSERT(length % sizeof(uint32_t) 0,
("[pmclog,%d] length not a multiple of word size", LINE));

- mtx_lock_spin(&po->po_mtx);
+ spin_lock_wr(&po->po_lock);

if (po->po_curbuf  NULL)
if (pmclog_get_buffer(po) != 0) {
- mtx_unlock_spin(&po->po_mtx);
+ spin_unlock_wr(&po->po_lock);
return NULL;
}

@ -449,7 +466,7 @ pmclog_reserve(struct pmc_owner *po, int
pmclog_schedule_io(po);

if (pmclog_get_buffer(po) != 0) {
- mtx_unlock_spin(&po->po_mtx);
+ spin_unlock_wr(&po->po_lock);
return NULL;
}

@ -497,8 +514,6 @ pmclog_schedule_io(struct pmc_owner *po)

PMCDBG(LOG,SIO, 1, "po=%p", po);

- mtx_assert(&po->po_mtx, MA_OWNED);
-
/* * Add the current buffer to the tail of the buffer list and * wakeup the helper.
@ -520,11 +535,18 @ pmclog_stop_kthread(struct pmc_owner *po * wait for it to exit
*/

- mtx_assert(&pmc_kthread_mtx, MA_OWNED);
+ KKASSERT == LK_EXCLUSIVE);
po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
wakeup_one(po);
- if (po->po_kthread)
- msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0);
+ if (po->po_kthread) {
+ crit_enter();
+ tsleep_interlock(po->po_kthread);
+ lockmgr(&pmc_kthread_lock, LK_RELEASE);
+ /* XXX: fbsd flags PPAUSE -- agg */
+ tsleep(po->po_kthread, 0, "pmckstp", 0);
+ crit_exit();
+ lockmgr(&pmc_kthread_lock, LK_EXCLUSIVE);
+ }
}

/*
@ -559,15 +581,14 @ pmclog_configure_log(struct pmc_owner *p
("[pmc,%d] po=%p file (%p) already present", LINE, po,
po->po_file));

- /* get a reference to the file state */
- error = fget_write(curthread, logfd, &po->po_file);
+ error = holdvnode(curthread->td_proc->p_fd, logfd, &po->po_file);
if (error)
goto error;

/* mark process as owning a log file */
po->po_flags |= PMC_PO_OWNS_LOGFILE;
error = kthread_create(pmclog_loop, po, &po->po_kthread,
- RFHIGHPID, 0, "hwpmc: proc(%d)", p->p_pid);
+ "hwpmc: proc(%d)", p->p_pid);
if (error)
goto error;

@ -587,16 +608,16 @ pmclog_configure_log(struct pmc_owner *p

error:
/* shutdown the thread */
- mtx_lock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_EXCLUSIVE);
if (po->po_kthread)
pmclog_stop_kthread(po);
- mtx_unlock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_RELEASE);
KASSERT(po->po_kthread == NULL, ("[pmc,%d] po=%p kthread not stopped",
LINE, po));
if (po->po_file)
- (void) fdrop(po->po_file, curthread);
+ (void) fdrop(po->po_file);
po->po_file = NULL; /* clear file and error state */
po->po_error = 0;

@ -626,10 +647,10 @ pmclog_deconfigure_log(struct pmc_owner
("[pmc,%d] po=%p no log file", LINE, po));

/* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */
- mtx_lock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_EXCLUSIVE);
if (po->po_kthread)
pmclog_stop_kthread(po);
- mtx_unlock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_RELEASE);
KASSERT(po->po_kthread == NULL,
("[pmc,%d] po=%p kthread not stopped", LINE, po));
@ -638,21 +659,21 @ pmclog_deconfigure_log(struct pmc_owner
while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) {
TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
- mtx_lock_spin(&pmc_bufferlist_mtx);
+ spin_lock_wr(&pmc_bufferlist_lock);
TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
- mtx_unlock_spin(&pmc_bufferlist_mtx);
+ spin_unlock_wr(&pmc_bufferlist_lock);
}
/* return the 'current' buffer to the global pool */
if ((lb = po->po_curbuf) != NULL) {
PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
- mtx_lock_spin(&pmc_bufferlist_mtx);
+ spin_lock_wr(&pmc_bufferlist_lock);
TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
- mtx_unlock_spin(&pmc_bufferlist_mtx);
+ spin_unlock_wr(&pmc_bufferlist_lock);
}
/* drop a reference to the fd */
- error = fdrop(po->po_file, curthread);
+ error = fdrop(po->po_file);
po->po_file = NULL;
po->po_error = 0;

@ -666,7 +687,7 @ pmclog_deconfigure_log(struct pmc_owner
int
pmclog_flush(struct pmc_owner *po) {
- int error, has_pending_buffers;
+ int error, has_pending_buffers = 0;

PMCDBG(LOG,FLS,1, "po=%p", po);

@ -682,7 +703,7 @ pmclog_flush(struct pmc_owner po)
/
* Check that we do have an active log file.
/
- mtx_lock(&pmc_kthread_mtx);
+ lockmgr(&pmc_kthread_lock, LK_EXCLUSIVE);
if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
error = EINVAL;
goto error;
@ -691,20 +712,22 @ pmclog_flush(struct pmc_owner *po)
/
* Schedule the current buffer if any.
*/
- mtx_lock_spin(&po->po_mtx);
+ spin_lock_wr(&po->po_lock);
if (po->po_curbuf)
pmclog_schedule_io(po);
has_pending_buffers = !TAILQ_EMPTY(&po->po_logbuffers);
- mtx_unlock_spin(&po->po_mtx);
+ spin_unlock_wr(&po->po_lock);

if (has_pending_buffers) {
po->po_flags |= PMC_PO_IN_FLUSH; /* ask for a wakeup /
- error = msleep(po->po_kthread, &pmc_kthread_mtx, PWAIT,
- "pmcflush", 0);
+ lockmgr(&pmc_kthread_lock, LK_RELEASE);
+ /
XXX: fbsd flags PWAIT -- agg */
+ error = tsleep(po->po_kthread, 0, "pmcflush", 0);
}
error:
- mtx_unlock(&pmc_kthread_mtx);
+ if (!has_pending_buffers)
+ lockmgr(&pmc_kthread_lock, LK_RELEASE);
return error;
}
@ -955,13 +978,13 @ pmclog_initialize()
struct pmclog_buffer *plb;
if (pmclog_buffer_size <= 0) {
- (void) printf("hwpmc: tunable logbuffersize=%d must be greater "
+ (void) kprintf("hwpmc: tunable logbuffersize=%d must be greater "
"than zero.\n", pmclog_buffer_size);
pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
}
if (pmc_nlogbuffers <= 0) {
- (void) printf("hwpmc: tunable nlogbuffers=%d must be greater "
+ (void) kprintf("hwpmc: tunable nlogbuffers=%d must be greater "
"than zero.\n", pmc_nlogbuffers);
pmc_nlogbuffers = PMC_NLOGBUFFERS;
}
@ -973,9 +996,8 @ pmclog_initialize()
PMCLOG_INIT_BUFFER_DESCRIPTOR(plb);
TAILQ_INSERT_HEAD(&pmc_bufferlist, plb, plb_next);
}
- mtx_init(&pmc_bufferlist_mtx, "pmc-buffer-list", "pmc-leaf",
- MTX_SPIN);
- mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF);
+ spin_init(&pmc_bufferlist_lock);
+ lockinit(&pmc_kthread_lock, "pmc-sleep", 0, 0);
}
/*
@ -989,8 +1011,8 @ pmclog_shutdown() {
struct pmclog_buffer *plb;

- mtx_destroy(&pmc_kthread_mtx);
- mtx_destroy(&pmc_bufferlist_mtx);
+ lockuninit(&pmc_kthread_lock);
+ spin_uninit(&pmc_bufferlist_lock);

while ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) {
TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
diff upr dev/misc/hwpmc.freebsd/hwpmc_mod.c dev/misc/hwpmc/hwpmc_mod.c
--
dev/misc/hwpmc.freebsd/hwpmc_mod.c
+++ dev/misc/hwpmc/hwpmc_mod.c
@ -26,29 +26,26 @
*/
#include &lt;sys/cdefs.h&gt;
-__FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_mod.c,v 1.29 2007/06/05 00:00:50 jeff Exp $");
+/* __FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_mod.c,v 1.29 2007/06/05 00:00:50 jeff Exp $"); */
#include &lt;sys/param.h&gt;
#include &lt;sys/eventhandler.h&gt;
#include &lt;sys/jail.h&gt;
#include &lt;sys/kernel.h&gt;
#include &lt;sys/kthread.h&gt;
#include &lt;sys/limits.h&gt;
#include &lt;sys/lock.h&gt;
#include &lt;sys/malloc.h&gt;
#include &lt;sys/module.h&gt;
#include &lt;sys/mutex.h&gt;
#include &lt;sys/pmc.h&gt;
#include &lt;sys/pmckern.h&gt;
#include &lt;sys/pmclog.h&gt;
#include &lt;sys/priv.h&gt;
#include &lt;sys/proc.h&gt;
#include &lt;sys/queue.h&gt;
#include &lt;sys/resourcevar.h&gt;
#include &lt;sys/sched.h&gt;
#include &lt;sys/signalvar.h&gt;
#include &lt;sys/smp.h&gt;
-#include &lt;sys/sx.h&gt;
#include &lt;sys/spinlock.h&gt;
#include &lt;sys/spinlock2.h&gt;
#include &lt;sys/sysctl.h&gt;
#include &lt;sys/sysent.h&gt;
#include &lt;sys/systm.h&gt;
@ -57,7 +54,52 @ __FBSDID("$FreeBSD: src/sys/dev/hwpmc/hw
#include &lt;sys/linker.h&gt; /* needs to be after &lt;sys/malloc.h&gt; */
#include &lt;machine/atomic.h&gt;
#include &lt;machine/limits.h&gt;
#include &lt;machine/md_var.h&gt;
#include &lt;machine/smp.h&gt;
#include &lt;sys/ktr.h&gt; /* XXX /

KTR_INFO_MASTER(hwpmctr);
+KTR_INFO(!0, hwpmctr, regval, 0, "r=%lld", sizeof(int64_t));

#define PROC_LOCK(p) do { (void)p; } while(0)
#define PROC_UNLOCK(p) do { (void)p; } while(0)

+static __inline void
+critical_enter(void)
{
+ /
no preemption in the dragonfly kernel /
}
+static __inline void
+critical_exit(void)
{
}

/
I think we need giant for process manipulation etc -- agg /
#define DROP_GIANT() do {} while(0)
#define PICKUP_GIANT() do {} while(0)

#define PRIV_PMC_MANAGE 0
#define PRIV_PMC_SYSTEM 0
static __inline int
+priv_check(struct thread *td, int priv)
{
+ /

+ * FIXME: don't have privs in dfly, just use root for now -- agg
+ * XXX: jail?
+ /
+ return suser(td);
}

static __inline int
+p_candebug(struct thread *td, struct proc *p)
{
+ /

+ * FIXME: need to bring in some stuff from freebsd which will
+ * require some code reorganization. TBD -- agg
+ */
+ return 0;
+}
/*
 * Types
@ -79,7 +121,6 @ pmc_value_t pmc_pcpu_saved; / saved P
#define    PMC_PCPU_SAVED(C,R)    pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]

-struct mtx_pool pmc_mtxpool;
static int *pmc_pmcdisp; /
PMC row dispositions */

#define    PMC_ROW_DISP_IS_FREE(R)        (pmc_pmcdisp[(R)] == 0)
@ -94,7 +135,7 @ static int pmc_pmcdisp; / PMC row di
KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
LINE)); \
atomic_add_int(&pmc_pmcdisp[(R)], 1); \
KASSERT(pmc_pmcdisp[(R)] >= (-mp_ncpus), ("[pmc,%d] row " \
+ KASSERT(pmc_pmcdisp[(R)] >= (-ncpus), ("[pmc,%d] row " \
"disposition error", LINE)); \
} while (0)

@ -130,7 +171,7 @ struct pmc_mdep *md; * Hash tables mapping owner processes and target threads to PMCs.
*/

-struct mtx pmc_processhash_mtx; /* spin mutex /
+struct spinlock pmc_processhash_lock; /
spin lock */
static u_long pmc_processhashmask;
static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;

@ -183,8 +224,8 @ static void pmc_link_target_process(stru
static void pmc_maybe_remove_owner(struct pmc_owner *po);
static void pmc_process_csw_in(struct thread *td);
static void pmc_process_csw_out(struct thread *td);
-static void pmc_process_exit(void *arg, struct proc *p);
-static void pmc_process_fork(void *arg, struct proc *p1,
+static void pmc_process_exit(struct thread *td);
+static void pmc_process_fork(struct proc *p1,
struct proc *p2, int n);
static void pmc_process_samples(int cpu);
static void pmc_release_pmc_descriptor(struct pmc *pmc);
@ -195,7 +236,7 @ static void pmc_save_cpu_binding(struct
static void pmc_select_cpu(int cpu);
static int pmc_start(struct pmc *pm);
static int pmc_stop(struct pmc *pm);
-static int pmc_syscall_handler(struct thread *td, void *syscall_args);
+static int pmc_syscall_handler(void *syscall_args);
static void pmc_unlink_target_process(struct pmc *pmc,
struct pmc_process *pp);

@ -206,12 +247,25 @ static void pmc_unlink_target_process(st
SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");

#ifdef    DEBUG
#ifndef agg
#undef PMC_DEBUG_DEFAULT_FLAGS
#define PMC_DEBUG_DEFAULT_FLAGS { \
.pdb_CPU = 0x00000000, \
+ .pdb_CSW = 0x00000000, \
+ .pdb_LOG = 0xffffffff, \
+ .pdb_MDP = 0x00000000, \
+ .pdb_MOD = (1 << PMC_DEBUG_MIN_PMS), \
+ .pdb_OWN = 0xffffffff, \
+ .pdb_PMC = 0xffffffff, \
+ .pdb_PRC = 0xffffffff, \
+ .pdb_SAM = 0xffffffff,}
#endif
struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
char pmc_debugstr[PMC_DEBUG_STRSIZE];
TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
sizeof(pmc_debugstr));
SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
- CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
CTLTYPE_STRING|CTLFLAG_RW,
0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
#endif

@ -222,7 +276,7 @ SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debug

static int pmc_hashsize = PMC_HASH_SIZE;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
-SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RD,
&pmc_hashsize, 0, "rows in hash tables");
/*
@ -231,20 +285,10 @ SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsi
static int pmc_nsamples = PMC_NSAMPLES;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
-SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RD,
&pmc_nsamples, 0, "number of PC samples per CPU");
/*
- * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
- /

-static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
-TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
-SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
&pmc_mtxpool_size, 0, "size of spin mutex pool");


-/
* security.bsd.unprivileged_syspmcs -- allow non-root processes to * allocate system-wide PMCs. *
@ -254,8 +298,8 @ SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoo
*/
static int pmc_unprivileged_syspmcs = 0;
-TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
-SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
+TUNABLE_INT("kern.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
+SYSCTL_INT(_kern, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
&pmc_unprivileged_syspmcs, 0,
"allow unprivileged process to allocate system PMCs");

@ -282,7 +326,8 @ SYSCTL_INT(_security_bsd, OID_AUTO, unpr
/* The `sysent' for the new syscall /
static struct sysent pmc_sysent = {
2, /
sy_narg /
- pmc_syscall_handler /
sy_call /
+ pmc_syscall_handler, /
sy_call */
+ NULL
};

static struct syscall_module_data pmc_syscall_mod = {
@ -299,7 +344,7 @ static moduledata_t pmc_mod = {
&pmc_syscall_mod
};

-DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
+DECLARE_MODULE(pmc, pmc_mod, SI_SUB_CONFIGURE, SI_ORDER_ANY);
MODULE_VERSION(pmc, PMC_VERSION);

#ifdef    DEBUG
@ -495,7 +540,7 @ pmc_debugflags_sysctl_handler(SYSCTL_HAN * * The driver uses four locking strategies for its operation: *
- * - The global SX lock "pmc_sx" is used to protect internal
+ * - The global lockmgr lock "pmc_lock" is used to protect internal * data structures. * * Calls into the module by syscall() start with this lock being
@ -511,15 +556,15 @ pmc_debugflags_sysctl_handler(SYSCTL_HAN * It is only examined with the sx lock held in exclusive mode. The * kernel module is allowed to be unloaded only with the sx lock held * in exclusive mode. In normal syscall handling, after acquiring the
- * pmc_sx lock we first check that 'pmc_hook' is non-null before
+ * pmc_lock lock we first check that 'pmc_hook' is non-null before * proceeding. This prevents races between the thread unloading the module * and other threads seeking to use the module. * * - Lookups of target process structures and owner process structures
- * cannot use the global "pmc_sx" SX lock because these lookups need
+ * cannot use the global "pmc_lock" lock because these lookups need * to happen during context switches and in other critical sections * where sleeping is not allowed. We protect these lookup tables
- * with their own private spin-mutexes, "pmc_processhash_mtx" and
+ * with their own private spin-mutexes, "pmc_processhash_lock" and * "pmc_ownerhash_mtx". * * - Interrupt handlers work in a lock free manner. At interrupt
@ -587,14 +632,41 @ pmc_debugflags_sysctl_handler(SYSCTL_HAN * save the cpu binding of the current kthread
*/

/*
* count set bits
+ /
static int
+csb(cpumask_t mask)
{
+ unsigned i, cnt;

for (i = 0, cnt = 0; i < (sizeof(cpumask_t) * 8); +i, mask >>= 1) {
if (mask & 1) {
+ +cnt;
}
+ }
+ return cnt;
}

static int
+sched_is_bound(struct thread *td)
{
+ struct lwp *lwp;

lwp = td->td_lwp;
+ if (!lwp) {
+ panic("can't happen\n");
+ }
+ /
FIXME: could be better -- agg */
+ return csb(lwp->lwp_cpumask) == ((sizeof(cpumask_t) * 8) - 1);
}

static void
pmc_save_cpu_binding(struct pmc_binding *pb) {
PMCDBG;
- thread_lock(curthread);
pb->pb_bound = sched_is_bound(curthread);
- pb->pb_cpu = curthread->td_oncpu;
- thread_unlock(curthread);
+ pb->pb_cpu = mycpu->gd_cpuid;
PMCDBG;
}

@ -606,13 +678,11 @ static void
pmc_restore_cpu_binding(struct pmc_binding *pb) {
PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
- curthread->td_oncpu, pb->pb_cpu);
- thread_lock(curthread);
- if (pb->pb_bound)
- sched_bind(curthread, pb->pb_cpu);
- else
- sched_unbind(curthread);
- thread_unlock(curthread);
+ mycpu->gd_cpuid, pb->pb_cpu);

/* unconditionally return to original cpu */
+ lwkt_migratecpu(pb->pb_cpu);
+
PMCDBG;
}

@ -623,7 +693,7 @ pmc_restore_cpu_binding(struct pmc_bindi
static void
pmc_select_cpu(int cpu) {
- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT & smp_active_mask),
("[pmc,%d] bad cpu number %d", LINE, cpu));

/* never move to a disabled CPU */
@ -631,13 +701,12 @ pmc_select_cpu(int cpu)
"disabled CPU %d", LINE, cpu));
PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
- thread_lock(curthread);
- sched_bind(curthread, cpu);
- thread_unlock(curthread);

- KASSERT(curthread->td_oncpu cpu,
+ lwkt_migratecpu(cpu);

KASSERT(mycpu->gd_cpuid cpu,
("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", LINE,
- cpu, curthread->td_oncpu));
+ cpu, mycpu->gd_cpuid));

PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
}
@ -652,8 +721,8 @ pmc_select_cpu(int cpu)
static void
pmc_force_context_switch(void) {

pause("pmcctx", 1);
+ static int pillow;
+ tsleep(&pillow, 0, "pmcctx", 1);
}
/*
@ -669,9 +738,9 @ pmc_getfilename(struct vnode v, char *
td = curthread;
*fullpath = "unknown";
*freepath = NULL;
- vn_lock(v, LK_CANRECURSE | LK_EXCLUSIVE | LK_RETRY, td);
- vn_fullpath(td, v, fullpath, freepath);
- VOP_UNLOCK(v, 0, td);
+ vn_lock(v, LK_CANRECURSE | LK_EXCLUSIVE | LK_RETRY);
+ vn_fullpath(td->td_proc, v, fullpath, freepath);
+ vn_unlock(v);
}
/*
@ -683,7 +752,7 @ pmc_remove_owner(struct pmc_owner *po) {
struct pmc *pm, *tmp;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);

@ -691,7 +760,7 @ pmc_remove_owner(struct pmc_owner *po)
LIST_REMOVE(po, po_next);

/* release all owned PMC descriptors */
- LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
+ LIST_FOREACH_MUTABLE(pm, &po->po_pmcs, pm_next, tmp) {
PMCDBG(OWN,ORM,2, "pmc=%p", pm);
KASSERT(pm->pm_owner == po,
("[pmc,%d] owner %p != po %p", LINE, pm->pm_owner, po));
@ -742,7 +811,7 @ pmc_link_target_process(struct pmc *pm,
int ri;
struct pmc_target *pt;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

KASSERT(pm != NULL && pp != NULL,
("[pmc,%d] Null pm %p or pp %p", LINE, pm, pp));
@ -799,7 +868,7 @ pmc_unlink_target_process(struct pmc *pm
struct proc *p;
struct pmc_target *ptgt;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

KASSERT(pm != NULL && pp != NULL,
("[pmc,%d] Null pm %p or pp %p", LINE, pm, pp));
@ -843,7 +912,7 @ pmc_unlink_target_process(struct pmc *pm
if (LIST_EMPTY(&pm->pm_targets)) {
p = pm->pm_owner->po_owner;
PROC_LOCK(p);
- psignal(p, SIGIO);
+ lwpsignal(p, NULL, SIGIO);
PROC_UNLOCK(p);
PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
@ -917,8 +986,9 @ pmc_attach_one_process(struct proc *p, s
char *fullpath, *freepath;
struct pmc_process *pp;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

+ kprintf("FTLOD\n");
PMCDBG", pm,
PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);

@ -976,7 +1046,7 @ pmc_attach_process(struct proc *p, struc
int error;
struct proc *top;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
@ -998,11 +1068,10 @ pmc_attach_process(struct proc *p, struc * this PMC.
*/

- sx_slock(&proctree_lock);
-
top = p;

for (;;) {
+ kprintf("attaching: p %lu\n", p->p_pid);
if ((error = pmc_attach_one_process(p, pm)) != 0)
break;
if (!LIST_EMPTY(&p->p_children))
@ -1022,7 +1091,6 @ pmc_attach_process(struct proc *p, struc
(void) pmc_detach_process(top, pm);
done:
- sx_sunlock(&proctree_lock);
return error;
}

@ -1038,7 +1106,7 @ pmc_detach_one_process(struct proc *p, s
int ri;
struct pmc_process *pp;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

KASSERT(pm != NULL,
("[pmc,%d] null pm pointer", LINE));
@ -1093,7 +1161,7 @ pmc_detach_process(struct proc *p, struc {
struct proc *top;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
@ -1107,8 +1175,6 @ pmc_detach_process(struct proc *p, struc * partially attached proc tree.
*/

- sx_slock(&proctree_lock);
-
top = p;

for (;;) {
@ -1128,8 +1194,6 @ pmc_detach_process(struct proc *p, struc
}
done:
- sx_sunlock(&proctree_lock);

if (LIST_EMPTY(&pm
>pm_targets))
pm->pm_flags &= ~PMC_F_ATTACH_DONE;

@ -1163,12 +1227,12 @ pmc_process_csw_in(struct thread *td)

critical_enter(); /* no preemption from this point */

- cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
+ cpu = mycpu->gd_cpuid;

PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
p->p_pid, p->p_comm, pp);

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[pmc,%d] wierd CPU id %d", LINE, cpu));

pc = pmc_pcpu[cpu];
@ -1217,18 +1281,18 @ pmc_process_csw_in(struct thread *td) * inherited across descendants.
*/
if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
- mtx_pool_lock_spin(pmc_mtxpool, pm);
+ spin_lock_wr(&pm->lock);
newvalue = PMC_PCPU_SAVED(cpu,ri) =
pp->pp_pmcs[ri].pp_pmcval;
- mtx_pool_unlock_spin(pmc_mtxpool, pm);
+ spin_unlock_wr(&pm->lock);
} else {
KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
("[pmc,%d] illegal mode=%d", LINE,
PMC_TO_MODE(pm)));
- mtx_pool_lock_spin(pmc_mtxpool, pm);
+ spin_lock_wr(&pm->lock);
newvalue = PMC_PCPU_SAVED(cpu, ri) =
pm->pm_gv.pm_savedvalue;
- mtx_pool_unlock_spin(pmc_mtxpool, pm);
+ spin_unlock_wr(&pm->lock);
}
PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
@ -1265,6 +1329,7 @ pmc_process_csw_out(struct thread *td)
int64_t tmp;
pmc_value_t newvalue;

+
/* * Locate our process descriptor; this may be NULL if * this process is exiting and we have already removed
@ -1288,12 +1353,12 @ pmc_process_csw_out(struct thread *td)

critical_enter();

- cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
+ cpu = mycpu->gd_cpuid;

PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
p->p_pid, p->p_comm, pp);

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[pmc,%d wierd CPU id %d", LINE, cpu));

pc = pmc_pcpu[cpu];
@ -1366,12 +1431,12 @ pmc_process_csw_out(struct thread *td)
*/
if (tmp < 0)
tmp = pm->pm_sc.pm_reloadcount;
- mtx_pool_lock_spin(pmc_mtxpool, pm);
spin_lock_wr(&pm->lock);
pp->pp_pmcs[ri].pp_pmcval = tmp;
if ((int64_t) pp
>pp_pmcs[ri].pp_pmcval < 0)
pp->pp_pmcs[ri].pp_pmcval =
pm->pm_sc.pm_reloadcount;
- mtx_pool_unlock_spin(pmc_mtxpool, pm);
spin_unlock_wr(&pm->lock);
} else {

@ -1381,16 +1446,18 @ pmc_process_csw_out(struct thread *td) * increasing monotonically, modulo a 64 * bit wraparound.
*/

KTR_LOG(hwpmctr_regval, PMC_PCPU_SAVED(cpu, ri));
KASSERT tmp >= 0,
("[pmc,%d] negative increment cpu=%d "
"ri=%d newvalue=%jx saved=%jx "
"incr=%jx", LINE, cpu, ri,
newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));

- mtx_pool_lock_spin(pmc_mtxpool, pm);
+ spin_lock_wr(&pm->lock);
pm->pm_gv.pm_savedvalue = tmp;
pp->pp_pmcs[ri].pp_pmcval += tmp;
- mtx_pool_unlock_spin(pmc_mtxpool, pm);
spin_unlock_wr(&pm->lock);

if (pm->pm_flags & PMC_F_LOG_PROCCSW)
pmclog_process_proccsw(pm, pp, tmp);
@ -1420,7 +1487,7 @ pmc_process_kld_load(struct pmckern_map_ {
struct pmc_owner *po;

- sx_assert(&pmc_sx, SX_LOCKED);
+ KKASSERT LK_SHARED);

/*
     * Notify owners of system sampling PMCs about KLD operations.
@ -1443,7 +1510,7 @ pmc_process_kld_unload(struct pmckern_ma {
struct pmc_owner *po;

- sx_assert(&pmc_sx, SX_LOCKED);
+ KKASSERT LK_SHARED);

LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
if (po->po_flags & PMC_PO_OWNS_LOGFILE)
@ -1572,7 +1639,7 @ pmc_hook_handler(struct thread *td, int
struct pmc_process *pp;
struct pmckern_procexec *pk;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

p = td->td_proc;
pmc_getfilename(p->p_textvp, &fullpath, &freepath);
@ -1701,28 +1768,28 @ pmc_hook_handler(struct thread *td, int * had already processed the interrupt). We don't * lose the interrupt sample.
*/
- atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
- pmc_process_samples(PCPU_GET(cpuid));
+ atomic_clear_int(&pmc_cpumask, (1 << mycpu->gd_cpuid));
+ pmc_process_samples(mycpu->gd_cpuid);
break;
case PMC_FN_KLD_LOAD:
- sx_assert(&pmc_sx, SX_LOCKED);
+ KKASSERT(lockstatus(&pmc_lock, curthread) LK_SHARED);
pmc_process_kld_load((struct pmckern_map_in *) arg);
break;
case PMC_FN_KLD_UNLOAD:
- sx_assert(&pmc_sx, SX_LOCKED);
+ KKASSERT(lockstatus(&pmc_lock, curthread) LK_SHARED);
pmc_process_kld_unload((struct pmckern_map_out *) arg);
break;
case PMC_FN_MMAP:
- sx_assert(&pmc_sx, SX_LOCKED);
+ KKASSERT(lockstatus(&pmc_lock, curthread) LK_SHARED);
pmc_process_mmap(td, (struct pmckern_map_in *) arg);
break;
case PMC_FN_MUNMAP:
- sx_assert(&pmc_sx, SX_LOCKED);
+ KKASSERT(lockstatus(&pmc_lock, curthread) LK_SHARED);
pmc_process_munmap(td, (struct pmckern_map_out *) arg);
break;

@ -1763,7 +1830,7 @ pmc_allocate_owner_descriptor(struct pro
LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */

TAILQ_INIT(&po->po_logbuffers);
- mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
+ spin_init(&po->po_lock);
PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
p, p->p_pid, p->p_comm, po);
@ -1778,7 +1845,7 @ pmc_destroy_owner_descriptor(struct pmc_
PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);

- mtx_destroy(&po->po_mtx);
+ spin_uninit(&po->po_lock);
FREE;
}

@ -1811,7 +1878,7 @ pmc_find_process_descriptor(struct proc
sizeof(struct pmc_targetstate), M_PMC, M_ZERO|M_WAITOK);
}

- mtx_lock_spin(&pmc_processhash_mtx);
+ spin_lock_wr(&pmc_processhash_lock);
LIST_FOREACH(pp, pph, pp_next)
if (pp->pp_proc == p)
break;
@ -1826,7 +1893,7 @ pmc_find_process_descriptor(struct proc
pp = ppnew;
ppnew = NULL;
}
- mtx_unlock_spin(&pmc_processhash_mtx);
+ spin_unlock_wr(&pmc_processhash_lock);

if (pp != NULL && ppnew != NULL)
FREE(ppnew, M_PMC);
@ -1845,9 +1912,9 @ pmc_remove_process_descriptor(struct pmc
("[pmc,%d] Removing process descriptor %p with count %d",
LINE, pp, pp->pp_refcnt));

- mtx_lock_spin(&pmc_processhash_mtx);
+ spin_lock_wr(&pmc_processhash_lock);
LIST_REMOVE(pp, pp_next);
- mtx_unlock_spin(&pmc_processhash_mtx);
+ spin_unlock_wr(&pmc_processhash_lock);
}

@ -1929,7 +1996,7 @ pmc_wait_for_pmc_idle(struct pmc *pm)
#ifdef DEBUG
volatile int maxloop;

- maxloop = 100 * mp_ncpus;
+ maxloop = 100 * ncpus;
#endif

/*
@ -1971,7 +2038,7 @ pmc_release_pmc_descriptor(struct pmc *p
struct pmc_target *ptgt, *tmp;
struct pmc_binding pb;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

KASSERT(pm, ("[pmc,%d] null pmc", LINE));

@ -2065,7 +2132,7 @ pmc_release_pmc_descriptor(struct pmc *p * it from the hash table. The module-wide SX lock * protects us from races.
/
- LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
+ LIST_FOREACH_MUTABLE(ptgt, &pm->pm_targets, pt_next, tmp) {
pp = ptgt->pt_process;
pmc_unlink_target_process(pm, pp); /
frees 'ptgt' */

@ -2082,7 +2149,7 @ pmc_release_pmc_descriptor(struct pmc *p
}
}

- cpu = curthread->td_oncpu; /* setup cpu for pmd_release() /
+ cpu = curthread->td_gd->gd_cpuid; /
setup cpu for pmd_release() */

}

@ -2119,7 +2186,7 @ pmc_register_owner(struct proc *p, struc {
struct pmc_owner *po;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT LK_EXCLUSIVE);

if ((po = pmc_find_owner_descriptor(p))  NULL)
if ((po = pmc_allocate_owner_descriptor(p)) NULL)
@ -2222,7 +2289,7 @ pmc_can_allocate_row(int ri, enum pmc_mo {
enum pmc_disp disp;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT LK_EXCLUSIVE);

PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);

@ -2456,7 +2523,7 @ pmc_stop(struct pmc *pm)

cpu = PMC_TO_CPU(pm);

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[pmc,%d] illegal cpu=%d", LINE, cpu));

if (pmc_cpu_is_disabled(cpu))
@ -2503,25 +2570,27 @ static const char *pmc_op_to_name[] = {
*/
#define    PMC_GET_SX_XLOCK(...) do {        \
- sx_xlock(&pmc_sx); \
+ lockmgr(&pmc_lock, LK_EXCLUSIVE); \
if (pmc_hook == NULL) { \
- sx_xunlock(&pmc_sx); \
+ lockmgr(&pmc_lock, LK_RELEASE); \
return VA_ARGS; \
} \
} while (0)
#define    PMC_DOWNGRADE_SX() do {            \
- sx_downgrade(&pmc_sx); \
+ lockmgr(&pmc_lock, LK_DOWNGRADE); \
is_sx_downgraded = 1; \
} while (0)
static int
-pmc_syscall_handler(struct thread *td, void *syscall_args)
+pmc_syscall_handler(void *syscall_args) {
int error, is_sx_downgraded, op;
struct pmc_syscall_args *c;
void *arg;
+ struct thread *td;

+ td = curthread;
PMC_GET_SX_XLOCK(ENOSYS);

DROP_GIANT();
@ -2557,7 +2626,7 @ pmc_syscall_handler(struct thread *td, v
struct pmckern_map_in *km, *kmbase;
struct pmc_op_configurelog cl;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
break;
@ -2617,7 +2686,7 @ pmc_syscall_handler(struct thread *td, v {
struct pmc_owner *po;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT LK_EXCLUSIVE);

if ((po = pmc_find_owner_descriptor(td->td_proc))  NULL) {
error = EINVAL;
@ -2637,7 +2706,7 @ pmc_syscall_handler(struct thread *td, v
struct pmc_op_getcpuinfo gci;
gci.pm_cputype = md->pmd_cputype;
- gci.pm_ncpu = mp_ncpus;
+ gci.pm_ncpu = ncpus;
gci.pm_npmc = md->pmd_npmc;
gci.pm_nclass = md->pmd_nclass;
bcopy(md->pmd_classes, &gci.pm_classes,
@ -2705,7 +2774,7 @ pmc_syscall_handler(struct thread *td, v
if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
break;

- if (cpu >= (unsigned int) mp_ncpus) {
+ if (cpu >= (unsigned int) ncpus) {
error = EINVAL;
break;
}
@ -2785,7 +2854,7 @ pmc_syscall_handler(struct thread *td, v
struct pmc_op_pmcadmin pma;
struct pmc_binding pb;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT LK_EXCLUSIVE);

KASSERT(td  curthread,
("[pmc,%d] td != curthread", LINE));
@ -2799,7 +2868,7 @ pmc_syscall_handler(struct thread *td, v
cpu = pma.pm_cpu;

- if (cpu < 0 || cpu >= mp_ncpus) {
+ if (cpu < 0 || cpu >= ncpus) {
error = EINVAL;
break;
}
@ -2892,7 +2961,7 @ pmc_syscall_handler(struct thread *td, v

if ((mode != PMC_MODE_SS  &&  mode != PMC_MODE_SC  &&
mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
- (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) mp_ncpus)) {
+ (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) ncpus)) {
error = EINVAL;
break;
}
@ -2925,7 +2994,7 @ pmc_syscall_handler(struct thread *td, v
*/
if (PMC_IS_SYSTEM_MODE(mode)) {
- if (jailed(curthread->td_ucred)) {
+ if (jailed(curthread->td_proc->p_ucred)) {
error = EPERM;
break;
}
@ -3017,7 +3086,7 @ pmc_syscall_handler(struct thread *td, v
pmc_can_allocate_rowindex(
curthread->td_proc, n,
PMC_CPU_ANY) 0 &&
- md->pmd_allocate_pmc(curthread->td_oncpu,
+ md->pmd_allocate_pmc(curthread->td_gd->gd_cpuid,
n, pmc, &pa) 0)
break;
}
@ -3119,7 +3188,7 @ pmc_syscall_handler(struct thread *td, v
struct proc *p;
struct pmc_op_pmcattach a;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

if ((error = copyin(arg, &a, sizeof(a))) != 0)
break;
@ -3405,8 +3474,8 @ pmc_syscall_handler(struct thread *td, v
ri = PMC_TO_ROWINDEX(pm);

- mtx_pool_lock_spin(pmc_mtxpool, pm);
- cpu = curthread->td_oncpu;
+ spin_lock_wr(&pm->lock);
+ cpu = curthread->td_gd->gd_cpuid;

if (prw.pm_flags & PMC_F_OLDVALUE) {
if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
@ -3419,7 +3488,7 @ pmc_syscall_handler(struct thread *td, v
if (prw.pm_flags & PMC_F_NEWVALUE)
pm->pm_gv.pm_savedvalue = prw.pm_value;

- mtx_pool_unlock_spin(pmc_mtxpool, pm);
+ spin_unlock_wr(&pm->lock);

} else { /* System mode PMCs */
cpu = PMC_TO_CPU(pm);
@ -3512,7 +3581,7 @ pmc_syscall_handler(struct thread *td, v
struct pmc *pm;
struct pmc_op_simple sp;

- sx_assert(&pmc_sx, SX_XLOCKED);
+ KKASSERT == LK_EXCLUSIVE);

if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
break;
@ -3614,10 +3683,7 @ pmc_syscall_handler(struct thread *td, v
break;
}

- if (is_sx_downgraded)
- sx_sunlock(&pmc_sx);
- else
- sx_xunlock(&pmc_sx);
+ lockmgr(&pmc_lock, LK_RELEASE);

if (error)
atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
@ -3708,14 +3774,13 @ pmc_process_samples(int cpu) {
int n, ri;
struct pmc *pm;
- struct thread *td;
struct pmc_owner *po;
struct pmc_sample *ps;
struct pmc_samplebuffer *psb;

- KASSERT cpu,
+ KASSERT(mycpu->gd_cpuid cpu,
("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", LINE,
- PCPU_GET(cpuid), cpu));
+ mycpu->gd_cpuid, cpu));

psb = pmc_pcpu[cpu]->pc_sb;

@ -3749,8 +3814,7 @ pmc_process_samples(int cpu)
*/
if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
if (ps->ps_usermode) {
- td = FIRST_THREAD_IN_PROC(po->po_owner);
- addupc_intr(td, ps->ps_pc, 1);
+ addupc_intr(po->po_owner, ps->ps_pc, 1);
}
goto entrydone;
}
@ -3824,7 +3888,7 @ pmc_process_samples(int cpu)
*/

static void
-pmc_process_exit(void *arg __unused, struct proc *p)
+pmc_process_exit(struct thread *td) {
int is_using_hwpmcs;
int cpu;
@ -3833,6 +3897,10 @ pmc_process_exit(void *arg __unused, str
struct pmc_process *pp;
struct pmc_owner *po;
pmc_value_t newvalue, tmp;
+ struct proc *p;

p = td->td_proc;
+ KKASSERT(p);
PROC_LOCK(p);
is_using_hwpmcs = p->p_flag & P_HWPMC;
@ -3869,7 +3937,7 @ pmc_process_exit(void *arg __unused, str
critical_enter(); /* no preemption */

- cpu = curthread->td_oncpu;
+ cpu = curthread->td_gd->gd_cpuid;

if ((pp = pmc_find_process_descriptor(p,
PMC_FLAG_REMOVE)) != NULL) {
@ -3925,10 +3993,10 @ pmc_process_exit(void *arg __unused, str
tmp = newvalue -
PMC_PCPU_SAVED(cpu,ri);

- mtx_pool_lock_spin(pmc_mtxpool, pm);
+ spin_lock_wr(&pm->lock);
pm->pm_gv.pm_savedvalue = tmp;
pp->pp_pmcs[ri].pp_pmcval += tmp;
- mtx_pool_unlock_spin(pmc_mtxpool, pm);
spin_unlock_wr(&pm->lock);
}

atomic_subtract_rel_32(&pm->pm_runcount,1);
@ -3976,7 +4044,7 @ pmc_process_exit(void *arg __unused, str
pmc_destroy_owner_descriptor(po);
}

- sx_xunlock(&pmc_sx);
+ lockmgr(&pmc_lock, LK_RELEASE);
}

/*
@ -3987,7 +4055,7 @ pmc_process_exit(void *arg __unused, str
*/
static void
-pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
+pmc_process_fork(struct proc *p1, struct proc *newproc,
int flags) {
int is_using_hwpmcs;
@ -4068,7 +4136,7 @ pmc_process_fork(void *arg __unused, str
PROC_UNLOCK(newproc);
done:
- sx_xunlock(&pmc_sx);
+ lockmgr(&pmc_lock, LK_RELEASE);
}

@ -4105,10 +4173,10 @ pmc_initialize(void)
/* check kernel version */
if (pmc_kernel_version != PMC_VERSION) {
if (pmc_kernel_version == 0)
- printf("hwpmc: this kernel has not been compiled with "
+ kprintf("hwpmc: this kernel has not been compiled with "
"'options HWPMC_HOOKS'.\n");
else
- printf("hwpmc: kernel version (0x%x) does not match "
+ kprintf("hwpmc: kernel version (0x%x) does not match "
"module version (0x%x).\n", pmc_kernel_version,
PMC_VERSION);
return EPROGMISMATCH;
@ -4119,13 +4187,13 @ pmc_initialize(void)
*/

if (pmc_hashsize <= 0) {
- (void) printf("hwpmc: tunable hashsize=%d must be greater "
+ (void) kprintf("hwpmc: tunable hashsize=%d must be greater "
"than zero.\n", pmc_hashsize);
pmc_hashsize = PMC_HASH_SIZE;
}
if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
- (void) printf("hwpmc: tunable nsamples=%d out of range.\n",
+ (void) kprintf("hwpmc: tunable nsamples=%d out of range.\n",
pmc_nsamples);
pmc_nsamples = PMC_NSAMPLES;
}
@ -4136,16 +4204,16 @ pmc_initialize(void)
return ENOSYS;
/* allocate space for the per-cpu array /
- MALLOC(pmc_pcpu, struct pmc_cpu *
, mp_ncpus * sizeof(struct pmc_cpu ),
+ MALLOC(pmc_pcpu, struct pmc_cpu *
, ncpus * sizeof(struct pmc_cpu *),
M_PMC, M_WAITOK|M_ZERO);
/* per-cpu 'saved values' for managing process-mode PMCs */
MALLOC(pmc_pcpu_saved, pmc_value_t *,
- sizeof(pmc_value_t) * mp_ncpus * md->pmd_npmc, M_PMC, M_WAITOK);
+ sizeof(pmc_value_t) * ncpus * md->pmd_npmc, M_PMC, M_WAITOK);
/* perform cpu dependent initialization */
pmc_save_cpu_binding(&pb);
- for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
if (pmc_cpu_is_disabled(cpu))
continue;
pmc_select_cpu(cpu);
@ -4158,7 +4226,7 @ pmc_initialize(void)
return error;
/* allocate space for the sample array */
- for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
if (pmc_cpu_is_disabled(cpu))
continue;
MALLOC(sb, struct pmc_samplebuffer *,
@ -4175,7 +4243,7 @ pmc_initialize(void)
}
/* allocate space for the row disposition array */
- pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
+ pmc_pmcdisp = kmalloc(sizeof(enum pmc_mode) * md->pmd_npmc,
M_PMC, M_WAITOK|M_ZERO);
KASSERT(pmc_pmcdisp != NULL,
@ -4191,25 +4259,22 @ pmc_initialize(void)
pmc_processhash = hashinit(pmc_hashsize, M_PMC,
&pmc_processhashmask);
- mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
- MTX_SPIN);
+ spin_init(&pmc_processhash_lock);
LIST_INIT(&pmc_ss_owners);
pmc_ss_count = 0;

- /* allocate a pool of spin mutexes */
- pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
- MTX_SPIN);
-
PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
"targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
pmc_processhash, pmc_processhashmask);

/* register process {exit,fork,exec} handlers */
- pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
- pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
- pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
- pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
+ if (at_exit(pmc_process_exit)) {
+ panic("cannot register exit() handler\n");
+ }
+ if (at_fork(pmc_process_fork)) {
+ panic("cannot register fork() handler\n");
+ }
/* initialize logging */
pmclog_initialize();
@ -4219,9 +4284,9 @ pmc_initialize(void)
pmc_hook = pmc_hook_handler;
if (error == 0) {
- printf(PMC_MODULE_NAME ":");
+ kprintf(PMC_MODULE_NAME ":");
for (n = 0; n < (int) md->pmd_nclass; n++) {
- printf(" %s/%d/0x%b",
+ kprintf(" %s/%d/0x%b",
pmc_name_of_pmcclass[md->pmd_classes[n].pm_class],
md->pmd_nclasspmcs[n],
md->pmd_classes[n].pm_caps,
@ -4230,7 +4295,7 @ pmc_initialize(void)
"\6REA\7WRI\10INV\11QUA\12PRC"
"\13TAG\14CSC");
}
- printf("\n");
+ kprintf("\n");
}
return error;
@ -4254,9 +4319,9 @ pmc_cleanup(void)
atomic_store_rel_int(&pmc_cpumask, 0);
pmc_intr = NULL;

- sx_xlock(&pmc_sx);
+ lockmgr(&pmc_lock, LK_EXCLUSIVE);
if (pmc_hook == NULL) { /* being unloaded already */
- sx_xunlock(&pmc_sx);
+ lockmgr(&pmc_lock, LK_RELEASE);
return;
}

@ -4271,7 +4336,7 @ pmc_cleanup(void)
for (ph = pmc_ownerhash;
ph <= &pmc_ownerhash[pmc_ownerhashmask];
ph++) {
- LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
+ LIST_FOREACH_MUTABLE(po, ph, po_next, tmp) {
pmc_remove_owner(po);

/* send SIGBUS to owner processes */
@ -4281,18 +4346,14 @ pmc_cleanup(void)
po->po_owner->p_comm);
PROC_LOCK(po->po_owner);
- psignal(po->po_owner, SIGBUS);
+ lwpsignal(po->po_owner, NULL, SIGBUS);
PROC_UNLOCK(po->po_owner);
pmc_destroy_owner_descriptor(po);
}
}

- /* reclaim allocated data structures */
- if (pmc_mtxpool)
- mtx_pool_destroy(&pmc_mtxpool);

mtx_destroy(&pmc_processhash_mtx);
+ spin_uninit(&pmc_processhash_lock);
if (pmc_processhash) {
#ifdef DEBUG
struct pmc_process *pp;
@ -4321,7 +4382,7 @ pmc_cleanup(void)
("[pmc,%d] Global SS count not empty", LINE));

/* free the per-cpu sample buffers */
- for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
if (pmc_cpu_is_disabled(cpu))
continue;
KASSERT(pmc_pcpu[cpu]->pc_sb != NULL,
@ -4335,7 +4396,7 @ pmc_cleanup(void)
PMCDBG(MOD,INI,3, "%s", "md cleanup");
if (md) {
pmc_save_cpu_binding(&pb);
- for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
cpu, pmc_pcpu[cpu]);
if (pmc_cpu_is_disabled(cpu))
@ -4363,7 +4424,7 @ pmc_cleanup(void)
pmclog_shutdown();

- sx_xunlock(&pmc_sx); /* we are done /
+ lockmgr(&pmc_lock, LK_RELEASE); /
we are done */
}

/*
@ -4384,7 +4445,7 @ load (struct module *module __unused, in
if (error != 0)
break;
PMCDBG(MOD,INI,1, "syscall=%d ncpus=%d",
- pmc_syscall_num, mp_ncpus);
+ pmc_syscall_num, ncpus);
break;

diff upr dev/misc/hwpmc.freebsd/hwpmc_pentium.c dev/misc/hwpmc/hwpmc_pentium.c
--
dev/misc/hwpmc.freebsd/hwpmc_pentium.c
+++ dev/misc/hwpmc/hwpmc_pentium.c
@ -25,14 +25,12 @
*/

#include &lt;sys/cdefs.h&gt;
-_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_pentium.c,v 1.4 2005/06/09 19:45:07 jkoshy Exp $");
+/*
_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_pentium.c,v 1.4 2005/06/09 19:45:07 jkoshy Exp $");*/
#include &lt;sys/param.h&gt;
#include &lt;sys/lock.h&gt;
#include &lt;sys/mutex.h&gt;
#include &lt;sys/pmc.h&gt;
#include &lt;sys/pmckern.h&gt;
#include &lt;sys/smp.h&gt;
#include &lt;sys/systm.h&gt;
#include &lt;machine/cpufunc.h&gt;
diff upr dev/misc/hwpmc.freebsd/hwpmc_piv.c dev/misc/hwpmc/hwpmc_piv.c
--
dev/misc/hwpmc.freebsd/hwpmc_piv.c
+++ dev/misc/hwpmc/hwpmc_piv.c
@ -25,14 +25,14 @
*/
#include &lt;sys/cdefs.h&gt;
-_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_piv.c,v 1.14 2007/04/19 08:02:51 jkoshy Exp $");
+/*
_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_piv.c,v 1.14 2007/04/19 08:02:51 jkoshy Exp $");*/
#include &lt;sys/param.h&gt;
#include &lt;sys/lock.h&gt;
#include &lt;sys/mutex.h&gt;
#include &lt;sys/pmc.h&gt;
#include &lt;sys/pmckern.h&gt;
#include &lt;sys/smp.h&gt;
#include &lt;sys/spinlock.h&gt;
#include &lt;sys/spinlock2.h&gt;
#include &lt;sys/systm.h&gt;
#include &lt;machine/cpufunc.h&gt;
@ -447,7 +447,7 @ struct p4_cpu {
struct pmc_hw pc_hwpmcs[P4_NPMCS];
struct pmc_hw pc_p4pmcs[P4_NPMCS];
char pc_escrs[P4_NESCR];
- struct mtx pc_mtx; /
spin lock /
+ struct spinlock pc_lock; /
spin lock /
uint32_t pc_intrflag; /
NMI handler flags /
unsigned int pc_intrlock; /
NMI handler spin lock /
unsigned char pc_flags[P4_NPMCS]; /
4 bits each: {cfg,run}count */
@ -511,7 +511,7 @ struct p4_logicalcpu {
*/
#define P4_PCPU_ACQ_INTR_SPINLOCK(PC) do { \
while (!atomic_cmpset_acq_int(&pc->pc_intrlock, 0, 1)) \
- ia32_pause(); \
+ cpu_pause(); \
} while (0)
#define P4_PCPU_REL_INTR_SPINLOCK(PC) \
atomic_store_rel_int(&pc->pc_intrlock, 0);
@ -527,7 +527,7 @ static int p4_escrdisp[P4_NESCR];
KASSERT] <= 0, ("[p4,%d] row disposition error",\
LINE)); \
atomic_add_int(&p4_escrdisp[(E)], 1); \
KASSERT] >= (-mp_ncpus), ("[p4,%d] row " \
+ KASSERT] >= (-ncpus), ("[p4,%d] row " \
"disposition error", LINE)); \
} while (0)

@ -591,7 +591,7 @ p4_init(int cpu)
struct p4_logicalcpu *plcs;
struct pmc_hw *phw;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p4,%d] insane cpu number %d", LINE, cpu));

PMCDBG(MDP,INI,0, "p4-init cpu=%d logical=%d", cpu,
@ -662,7 +662,7 @ p4_init(int cpu)
*pescr++ = P4_INVALID_PMC_INDEX;
pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;

- mtx_init(&pcs->pc_mtx, "p4-pcpu", "pmc-leaf", MTX_SPIN);
+ spin_init(&pcs->pc_lock);

return 0;
}
@ -692,7 +692,7 @ p4_cleanup(int cpu) * full MD state.
*/
if (!P4_CPU_IS_HTT_SECONDARY(cpu))
- mtx_destroy(&pcs->pc_mtx);
+ spin_uninit(&pcs->pc_lock);
FREE(pcs, M_PMC);

@ -756,7 +756,7 @ p4_read_pmc(int cpu, int ri, pmc_value_t
struct pmc_hw *phw;
pmc_value_t tmp;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p4,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P4_NPMCS,
("[p4,%d] illegal row-index %d", LINE, ri));
@ -834,7 +834,7 @ p4_write_pmc(int cpu, int ri, pmc_value_
const struct pmc_hw *phw;
const struct p4pmc_descr *pd;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[amd,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P4_NPMCS,
("[amd,%d] illegal row-index %d", LINE, ri));
@ -908,7 +908,7 @ p4_config_pmc(int cpu, int ri, struct pm
struct p4_cpu *pc;
int cfgflags, cpuflag;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p4,%d] illegal CPU d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P4_NPMCS,
("[p4,%d] illegal row-index d", LINE, ri));
@ -934,7 +934,7 @ p4_config_pmc(int cpu, int ri, struct pm
(p4_system_has_htt &x%x
phw->phw_pmc == pm),
("[p4,%d] hwpmc not unconfigured before re-config", LINE));

- mtx_lock_spin(&pc->pc_mtx);
+ spin_lock_wr(&pc->pc_lock);
cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri);

KASSERT(cfgflags >= 0 || cfgflags <= 3,
@ -969,7 +969,7 @ p4_config_pmc(int cpu, int ri, struct pm
P4_PCPU_SET_CFGFLAGS(pc,ri,cfgflags);

- mtx_unlock_spin(&pc->pc_mtx);
+ spin_unlock_wr(&pc->pc_lock);

return 0;
}
@ -988,9 +988,9 @ p4_get_config(int cpu, int ri, struct pm
pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)];
phw = pc->pc_hwpmcs[ri];

- mtx_lock_spin(&pc->pc_mtx);
+ spin_lock_wr(&pc->pc_lock);
cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri);
- mtx_unlock_spin(&pc->pc_mtx);
+ spin_unlock_wr(&pc->pc_lock);

if (cfgflags & P4_CPU_TO_FLAG(cpu))
ppm = phw->phw_pmc; / PMC config'ed on this CPU */
@ -1045,7 +1045,7 @ p4_allocate_pmc(int cpu, int ri, struct
struct p4_event_descr *pevent;
const struct p4pmc_descr *pd;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p4,%d] illegal CPU d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P4_NPMCS,
("[p4,%d] illegal row-index value %d", LINE, ri));
@ -1292,7 +1292,7 @ p4_start_pmc(int cpu, int ri)
struct pmc_hw *phw;
struct p4pmc_descr *pd;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p4,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P4_NPMCS,
("[p4,%d] illegal row-index %d", LINE, ri));
@ -1348,7 +1348,7 @ p4_start_pmc(int cpu, int ri) *
*/

- mtx_lock_spin(&pc->pc_mtx);
+ spin_lock_wr(&pc->pc_lock);

rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
KASSERT(rc 0 || rc 1,
@ -1419,7 +1419,7 @ p4_start_pmc(int cpu, int ri)
++rc;
P4_PCPU_SET_RUNCOUNT(pc,ri,rc);

- mtx_unlock_spin(&pc->pc_mtx);
+ spin_unlock_wr(&pc->pc_lock);

PMCDBG(MDP,STA,2,"p4-start cpu=%d rc=%d ri=%d escr=%d " 
"escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x v=%jx", cpu, rc,
@ -1444,7 +1444,7 @ p4_stop_pmc(int cpu, int ri)
struct p4pmc_descr *pd;
pmc_value_t tmp;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p4,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P4_NPMCS,
("[p4,%d] illegal row index %d", LINE, ri));
@ -1492,7 +1492,7 @ p4_stop_pmc(int cpu, int ri)
escrtbits >>= 2;
}

- mtx_lock_spin(&pc->pc_mtx);
+ spin_lock_wr(&pc->pc_lock);

rc = P4_PCPU_GET_RUNCOUNT(pc,ri);

@ -1531,7 +1531,7 @ p4_stop_pmc(int cpu, int ri)
wrmsr(pd->pm_cccr_msr, cccrvalue);
}

- mtx_unlock_spin(&pc->pc_mtx);
+ spin_unlock_wr(&pc->pc_lock);

PMCDBG(MDP,STO,2, "p4-stop cpu=%d rc=%d ri=%d escrmsr=0x%x " 
"escrval=0x%x cccrval=0x%x v=%jx", cpu, rc, ri, escrmsr,
@ -1714,7 +1714,7 @ p4_describe(int cpu, int ri, struct pmc_
struct pmc_hw *phw;
const struct p4pmc_descr *pd;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p4,%d] illegal CPU d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P4_NPMCS,
("[p4,%d] row-index %d out of range", LINE, ri));
diff upr dev/misc/hwpmc.freebsd/hwpmc_ppro.c dev/misc/hwpmc/hwpmc_ppro.c
--
dev/misc/hwpmc.freebsd/hwpmc_ppro.c
+++ dev/misc/hwpmc/hwpmc_ppro.c
@ -25,14 +25,12 @
*/

#include &lt;sys/cdefs.h&gt;
-_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_ppro.c,v 1.9 2005/07/14 15:09:14 jkoshy Exp $");
+/*
_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_ppro.c,v 1.9 2005/07/14 15:09:14 jkoshy Exp $");*/
#include &lt;sys/param.h&gt;
#include &lt;sys/lock.h&gt;
#include &lt;sys/mutex.h&gt;
#include &lt;sys/pmc.h&gt;
#include &lt;sys/pmckern.h&gt;
#include &lt;sys/smp.h&gt;
#include &lt;sys/systm.h&gt;
#include &lt;machine/cpufunc.h&gt;
@ -331,7 +329,7 @ p6_init(int cpu)
struct p6_cpu *pcs;
struct pmc_hw *phw;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p6,%d] bad cpu %d", LINE, cpu));

PMCDBG(MDP,INI,0,"p6-init cpu=%d", cpu);
@ -361,7 +359,7 @ p6_cleanup(int cpu) {
struct pmc_cpu *pcs;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p6,%d] bad cpu %d", LINE, cpu));

PMCDBG(MDP,INI,0,"p6-cleanup cpu=%d", cpu);
@ -507,7 +505,7 @ p6_allocate_pmc(int cpu, int ri, struct
(void) cpu;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p4,%d] illegal CPU d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P6_NPMCS,
("[p4,%d] illegal row-index value %d", LINE, ri));
@ -611,7 +609,7 @ p6_release_pmc(int cpu, int ri, struct p

PMCDBG(MDP,REL,1, "p6-release cpu=%d ri=%d pm=%p", cpu, ri, pm);

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p6,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P6_NPMCS,
("[p6,%d] illegal row-index %d", LINE, ri));
@ -633,7 +631,7 @ p6_start_pmc(int cpu, int ri)
struct pmc_hw *phw;
const struct p6pmc_descr *pd;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p6,%d] illegal CPU value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P6_NPMCS,
("[p6,%d] illegal row-index %d", LINE, ri));
@ -677,7 +675,7 @ p6_stop_pmc(int cpu, int ri)
struct pmc_hw *phw;
struct p6pmc_descr *pd;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p6,%d] illegal cpu value d", LINE, cpu));
KASSERT(ri >= 0 &x%x
ri < P6_NPMCS,
("[p6,%d] illegal row index %d", LINE, ri));
@ -719,7 +717,7 @ p6_intr(int cpu, uintptr_t eip, int user
struct pmc_hw *phw;
pmc_value_t v;

- KASSERT(cpu >= 0 && cpu < mp_ncpus,
+ KASSERT(cpu >= 0 && cpu < ncpus,
("[p6,%d] CPU %d out of range", LINE, cpu));

retval = 0;
diff upr dev/misc/hwpmc.freebsd/hwpmc_x86.c dev/misc/hwpmc/hwpmc_x86.c
--
dev/misc/hwpmc.freebsd/hwpmc_x86.c
+++ dev/misc/hwpmc/hwpmc_x86.c
@ -25,30 +25,24 @
*/
#include &lt;sys/cdefs.h&gt;
-_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_x86.c,v 1.5 2006/04/04 02:36:04 ps Exp $");
+/*
_FBSDID("$FreeBSD: src/sys/dev/hwpmc/hwpmc_x86.c,v 1.5 2006/04/04 02:36:04 ps Exp $");*/
#include &lt;sys/param.h&gt;
#include &lt;sys/bus.h&gt;
-#include &lt;sys/pmc.h&gt;
#include &lt;sys/spinlock.h&gt;
#include &lt;sys/systm.h&gt;
#include &lt;sys/pmc.h&gt;

#include <machine/apicreg.h>
#include <machine/pmc_mdep.h>
#include <machine/md_var.h>
-
-extern volatile lapic_t *lapic;
#include <machine/pmc_mdep.h>
#include <machine/smp.h>

void
pmc_x86_lapic_enable_pmc_interrupt(void) {
- uint32_t value;

value = lapic->lvt_pcint;
- value &= ~APIC_LVT_M;
- lapic->lvt_pcint = value;
+ lapic_enable_pmc_interrupt();
}

-
static struct pmc_mdep *
pmc_intel_initialize(void) {
@ -98,7 +92,7 @ pmc_intel_initialize(void)
#endif

if ((int) cputype  1) {
printf("pmc: Unknown Intel CPU.\n");
+ kprintf("pmc: Unknown Intel CPU.\n");
return NULL;
}

Index: sys/proc.h
=================================================================
RCS file: /home/aggelos/imports/vcs/dcvs/src/sys/sys/proc.h,v
retrieving revision 1.114
diff u -p -r1.114 proc.h
--
sys/proc.h 30 Aug 2007 20:41:00 -0000 1.114
++ sys/proc.h 24 Nov 2007 00:59:16 -0000
@ -332,6 +332,7 @ struct proc {
#define P_WEXIT 0x02000 /* Working on exiting (master exit) /
#define P_EXEC 0x04000 /
Process called exec. /
#define P_CONTINUED 0x08000 /
Proc has continued from a stopped state. /
#define P_HWPMC 0x10000 / Process is using HWPMCs */

/* Should probably be changed into a hold count. /
/
was P_NOSWAP 0x08000 was: Do not swap upages; p->p_hold */
Actions

Also available in: Atom PDF