Project

General

Profile

Submit #2714 ยป patch-spinlock.txt

dclink, 08/16/2014 03:03 PM

 
diff --git a/share/man/man9/spinlock.9 b/share/man/man9/spinlock.9
index 2c2a115..f8e3aee 100644
--- a/share/man/man9/spinlock.9
+++ b/share/man/man9/spinlock.9
@@ -46,7 +46,7 @@
.In sys/spinlock.h
.In sys/spinlock2.h
.Ft void
-.Fn spin_init "struct spinlock *mtx"
+.Fn spin_init "struct spinlock *mtx" "const char *descr"
.Ft void
.Fn spin_uninit "struct spinlock *mtx"
.Ft void
diff --git a/sys/bus/cam/cam_sim.c b/sys/bus/cam/cam_sim.c
index 85c7dba..5b3c704 100644
--- a/sys/bus/cam/cam_sim.c
+++ b/sys/bus/cam/cam_sim.c
@@ -211,7 +211,7 @@ cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll,
SLIST_INIT(&sim->ccb_freeq);
TAILQ_INIT(&sim->sim_doneq);
- spin_init(&sim->sim_spin);
+ spin_init(&sim->sim_spin, "cam_sim_alloc");
return (sim);
}
diff --git a/sys/bus/cam/cam_xpt.c b/sys/bus/cam/cam_xpt.c
index fc0f2f9..075113c 100644
--- a/sys/bus/cam/cam_xpt.c
+++ b/sys/bus/cam/cam_xpt.c
@@ -1452,13 +1452,13 @@ xpt_init(void *dummy)
STAILQ_INIT(&xsoftc.highpowerq);
xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
- spin_init(&cam_simq_spin);
+ spin_init(&cam_simq_spin, "cam_simq_spin");
lockinit(&xsoftc.xpt_lock, "XPT lock", 0, LK_CANRECURSE);
lockinit(&xsoftc.xpt_topo_lock, "XPT topology lock", 0, LK_CANRECURSE);
SLIST_INIT(&cam_dead_sim.ccb_freeq);
TAILQ_INIT(&cam_dead_sim.sim_doneq);
- spin_init(&cam_dead_sim.sim_spin);
+ spin_init(&cam_dead_sim.sim_spin, "cam_dead_sim");
cam_dead_sim.sim_action = dead_sim_action;
cam_dead_sim.sim_poll = dead_sim_poll;
cam_dead_sim.sim_name = "dead_sim";
diff --git a/sys/bus/pci/i386/pci_cfgreg.c b/sys/bus/pci/i386/pci_cfgreg.c
index 0aea827..003eac5 100644
--- a/sys/bus/pci/i386/pci_cfgreg.c
+++ b/sys/bus/pci/i386/pci_cfgreg.c
@@ -52,7 +52,7 @@
#include <machine/pmap.h>
#if defined(__DragonFly__)
-#define mtx_init(a, b, c, d) spin_init(a)
+#define mtx_init(a, b, c, d) spin_init(a, b)
#define mtx_lock_spin(a) spin_lock(a)
#define mtx_unlock_spin(a) spin_unlock(a)
#endif
diff --git a/sys/bus/pci/x86_64/pci_cfgreg.c b/sys/bus/pci/x86_64/pci_cfgreg.c
index 0d12a73..9da2043 100644
--- a/sys/bus/pci/x86_64/pci_cfgreg.c
+++ b/sys/bus/pci/x86_64/pci_cfgreg.c
@@ -89,7 +89,7 @@ pci_cfgregopen(void)
if (!inited) {
inited = 1;
- spin_init(&pcicfg_spin);
+ spin_init(&pcicfg_spin, "pcicfg");
}
if (cfgmech != CFGMECH_NONE)
diff --git a/sys/dev/acpica/Osd/OsdSynch.c b/sys/dev/acpica/Osd/OsdSynch.c
index 863536a..2912004 100644
--- a/sys/dev/acpica/Osd/OsdSynch.c
+++ b/sys/dev/acpica/Osd/OsdSynch.c
@@ -95,7 +95,7 @@ AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO);
- spin_init(&as->as_spin);
+ spin_init(&as->as_spin, "AcpiOsSem");
as->as_units = InitialUnits;
as->as_maxunits = MaxUnits;
as->as_pendings = as->as_resetting = as->as_timeouts = 0;
@@ -345,7 +345,7 @@ AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
if (OutHandle == NULL)
return (AE_BAD_PARAMETER);
spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO);
- spin_init(&spin->lock);
+ spin_init(&spin->lock, "AcpiOsLock");
#ifdef ACPI_DEBUG_LOCKS
spin->owner = NULL;
spin->func = "";
diff --git a/sys/dev/crypto/aesni/aesni.c b/sys/dev/crypto/aesni/aesni.c
index a4919d5..3c4b7d2 100644
--- a/sys/dev/crypto/aesni/aesni.c
+++ b/sys/dev/crypto/aesni/aesni.c
@@ -92,7 +92,7 @@ aesni_attach(device_t dev)
return (ENOMEM);
}
- spin_init(&sc->lock);
+ spin_init(&sc->lock, "aesniattach");
crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
return (0);
diff --git a/sys/dev/crypto/glxsb/glxsb.c b/sys/dev/crypto/glxsb/glxsb.c
index 17f15d7..40f012f 100644
--- a/sys/dev/crypto/glxsb/glxsb.c
+++ b/sys/dev/crypto/glxsb/glxsb.c
@@ -508,8 +508,8 @@ glxsb_crypto_setup(struct glxsb_softc *sc)
TAILQ_INIT(&sc->sc_sessions);
sc->sc_sid = 1;
- spin_init(&sc->sc_sessions_lock);
- spin_init(&sc->sc_task_mtx);
+ spin_init(&sc->sc_sessions_lock, "glxsb_sessions");
+ spin_init(&sc->sc_task_mtx, "glxsb_task");
if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0)
goto crypto_fail;
diff --git a/sys/dev/crypto/padlock/padlock.c b/sys/dev/crypto/padlock/padlock.c
index 96e663e..b203cf2 100644
--- a/sys/dev/crypto/padlock/padlock.c
+++ b/sys/dev/crypto/padlock/padlock.c
@@ -126,7 +126,7 @@ padlock_attach(device_t dev)
return (ENOMEM);
}
- spin_init(&sc->sc_sessions_lock);
+ spin_init(&sc->sc_sessions_lock, "padlock");
crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
diff --git a/sys/dev/disk/nata/ata-all.c b/sys/dev/disk/nata/ata-all.c
index 5a84352..0bd005b 100644
--- a/sys/dev/disk/nata/ata-all.c
+++ b/sys/dev/disk/nata/ata-all.c
@@ -122,8 +122,8 @@ ata_attach(device_t dev)
/* initialize the softc basics */
ch->dev = dev;
ch->state = ATA_IDLE;
- spin_init(&ch->state_mtx);
- spin_init(&ch->queue_mtx);
+ spin_init(&ch->state_mtx, "ataattach_state");
+ spin_init(&ch->queue_mtx, "ataattach_queue");
ata_queue_init(ch);
/* reset the controller HW, the channel and device(s) */
diff --git a/sys/dev/disk/nata/ata-chipset.c b/sys/dev/disk/nata/ata-chipset.c
index 287ab1e..3a1344f 100644
--- a/sys/dev/disk/nata/ata-chipset.c
+++ b/sys/dev/disk/nata/ata-chipset.c
@@ -3548,7 +3548,7 @@ ata_promise_chipinit(device_t dev)
/* setup host packet controls */
hpkt = kmalloc(sizeof(struct ata_promise_sx4),
M_TEMP, M_INTWAIT | M_ZERO);
- spin_init(&hpkt->mtx);
+ spin_init(&hpkt->mtx, "chipinit");
TAILQ_INIT(&hpkt->queue);
hpkt->busy = 0;
device_set_ivars(dev, hpkt);
@@ -5815,7 +5815,7 @@ ata_serialize(device_t dev, int flags)
if (!inited) {
serial = kmalloc(sizeof(struct ata_serialize),
M_TEMP, M_INTWAIT | M_ZERO);
- spin_init(&serial->locked_mtx);
+ spin_init(&serial->locked_mtx, "ataserialize");
serial->locked_ch = -1;
serial->restart_ch = -1;
device_set_ivars(ctlr->dev, serial);
diff --git a/sys/dev/disk/nata/ata-queue.c b/sys/dev/disk/nata/ata-queue.c
index ff7fda3..9ca13f2 100644
--- a/sys/dev/disk/nata/ata-queue.c
+++ b/sys/dev/disk/nata/ata-queue.c
@@ -92,7 +92,7 @@ ata_queue_request(struct ata_request *request)
ch = device_get_softc(request->parent);
callout_init_mp(&request->callout); /* serialization done via state_mtx */
if (!request->callback && !(request->flags & ATA_R_REQUEUE))
- spin_init(&request->done);
+ spin_init(&request->done, "ataqueuerqdone");
/* in ATA_STALL_QUEUE state we call HW directly */
if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) {
diff --git a/sys/dev/disk/nata/ata-raid.c b/sys/dev/disk/nata/ata-raid.c
index c32111f..ff4d909 100644
--- a/sys/dev/disk/nata/ata-raid.c
+++ b/sys/dev/disk/nata/ata-raid.c
@@ -138,7 +138,7 @@ ata_raid_attach(struct ar_softc *rdp, int writeback)
char buffer[32];
int disk;
- spin_init(&rdp->lock);
+ spin_init(&rdp->lock, "ataraidattach");
ata_raid_config_changed(rdp, writeback);
/* sanitize arrays total_size % (width * interleave) == 0 */
@@ -498,7 +498,7 @@ ata_raid_strategy(struct dev_strategy_args *ap)
rebuild->dev = rdp->disks[this].dev;
rebuild->flags &= ~ATA_R_READ;
rebuild->flags |= ATA_R_WRITE;
- spin_init(&composite->lock);
+ spin_init(&composite->lock, "ardfspare");
composite->residual = request->bytecount;
composite->rd_needed |= (1 << drv);
composite->wr_depend |= (1 << drv);
@@ -557,7 +557,7 @@ ata_raid_strategy(struct dev_strategy_args *ap)
sizeof(struct ata_request));
mirror->this = this;
mirror->dev = rdp->disks[this].dev;
- spin_init(&composite->lock);
+ spin_init(&composite->lock, "ardfonline");
composite->residual = request->bytecount;
composite->wr_needed |= (1 << drv);
composite->wr_needed |= (1 << this);
diff --git a/sys/dev/disk/nata/ata-usb.c b/sys/dev/disk/nata/ata-usb.c
index 478a536..f081196 100644
--- a/sys/dev/disk/nata/ata-usb.c
+++ b/sys/dev/disk/nata/ata-usb.c
@@ -223,7 +223,7 @@ atausb_attach(device_t dev)
sc->timeout = 5000;
sc->locked_ch = NULL;
sc->restart_ch = NULL;
- spin_init(&sc->locked_mtx);
+ spin_init(&sc->locked_mtx, "atausbattach");
id = usbd_get_interface_descriptor(sc->iface);
switch (id->bInterfaceProtocol) {
@@ -880,8 +880,8 @@ ata_usbchannel_attach(device_t dev)
ch->hw.end_transaction = ata_usbchannel_end_transaction;
ch->hw.status = NULL;
ch->hw.command = NULL;
- spin_init(&ch->state_mtx);
- spin_init(&ch->queue_mtx);
+ spin_init(&ch->state_mtx, "usbattach_state");
+ spin_init(&ch->queue_mtx, "usbattach_queue");
ata_queue_init(ch);
/* XXX SOS reset the controller HW, the channel and device(s) */
diff --git a/sys/dev/disk/nata/atapi-cam.c b/sys/dev/disk/nata/atapi-cam.c
index 8fadcfa..d522f22 100644
--- a/sys/dev/disk/nata/atapi-cam.c
+++ b/sys/dev/disk/nata/atapi-cam.c
@@ -195,7 +195,7 @@ atapi_cam_attach(device_t dev)
return ENOMEM;
}
- spin_init(&scp->state_lock);
+ spin_init(&scp->state_lock, "atapicamattach");
scp->dev = dev;
scp->parent = device_get_parent(dev);
diff --git a/sys/dev/drm/drm_dma.c b/sys/dev/drm/drm_dma.c
index 75c528a..651c15f 100644
--- a/sys/dev/drm/drm_dma.c
+++ b/sys/dev/drm/drm_dma.c
@@ -46,7 +46,7 @@ int drm_dma_setup(struct drm_device *dev)
if (dev->dma == NULL)
return ENOMEM;
- spin_init(&dev->dma_lock);
+ spin_init(&dev->dma_lock, "drmdma_lock");
return 0;
}
diff --git a/sys/dev/drm/drm_mm.c b/sys/dev/drm/drm_mm.c
index 26fd87f..7dcdf90 100644
--- a/sys/dev/drm/drm_mm.c
+++ b/sys/dev/drm/drm_mm.c
@@ -643,7 +643,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
- spin_init(&mm->unused_lock);
+ spin_init(&mm->unused_lock, "drmmminit");
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
diff --git a/sys/dev/drm/i915/i915_dma.c b/sys/dev/drm/i915/i915_dma.c
index b82a4fe..a073a2b 100644
--- a/sys/dev/drm/i915/i915_dma.c
+++ b/sys/dev/drm/i915/i915_dma.c
@@ -1438,8 +1438,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE);
- spin_init(&dev_priv->rps.lock);
- spin_init(&dev_priv->dpio_lock);
+ spin_init(&dev_priv->rps.lock, "i915initrps");
+ spin_init(&dev_priv->dpio_lock, "i915initdpio");
lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE);
@@ -1565,7 +1565,7 @@ i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
i915_file_priv = kmalloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
M_WAITOK | M_ZERO);
- spin_init(&i915_file_priv->mm.lock);
+ spin_init(&i915_file_priv->mm.lock, "i915priv");
INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
file_priv->driver_priv = i915_file_priv;
diff --git a/sys/dev/drm/include/linux/wait.h b/sys/dev/drm/include/linux/wait.h
index 886fa8f..b974077 100644
--- a/sys/dev/drm/include/linux/wait.h
+++ b/sys/dev/drm/include/linux/wait.h
@@ -37,7 +37,7 @@ typedef struct {
static inline void
init_waitqueue_head(wait_queue_head_t *eq)
{
- spin_init(&eq->lock);
+ spin_init(&eq->lock, "linux_waitqueue");
}
#define wake_up(eq) wakeup_one(eq)
diff --git a/sys/dev/drm/radeon/radeon_device.c b/sys/dev/drm/radeon/radeon_device.c
index 6663847..4514025 100644
--- a/sys/dev/drm/radeon/radeon_device.c
+++ b/sys/dev/drm/radeon/radeon_device.c
@@ -1031,10 +1031,10 @@ int radeon_device_init(struct radeon_device *rdev,
lockinit(&rdev->dc_hw_i2c_mutex,
"drm__radeon_device__dc_hw_i2c_mutex", 0, LK_CANRECURSE);
atomic_set(&rdev->ih.lock, 0);
- spin_init(&rdev->gem.mutex);
+ spin_init(&rdev->gem.mutex, "radeon_gemmtx");
lockinit(&rdev->pm.mutex, "drm__radeon_device__pm__mutex", 0,
LK_CANRECURSE);
- spin_init(&rdev->gpu_clock_mutex);
+ spin_init(&rdev->gpu_clock_mutex, "radeon_clockmtx");
lockinit(&rdev->pm.mclk_lock, "drm__radeon_device__pm__mclk_lock", 0,
LK_CANRECURSE);
lockinit(&rdev->exclusive_lock, "drm__radeon_device__exclusive_lock",
@@ -1101,7 +1101,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
- spin_init(&rdev->mmio_idx_lock);
+ spin_init(&rdev->mmio_idx_lock, "radeon_mpio");
rdev->rmmio_rid = PCIR_BAR(2);
rdev->rmmio = bus_alloc_resource_any(rdev->dev, SYS_RES_MEMORY,
&rdev->rmmio_rid, RF_ACTIVE | RF_SHAREABLE);
diff --git a/sys/dev/drm/ttm/ttm_memory.c b/sys/dev/drm/ttm/ttm_memory.c
index bb7c37a..3b48ee1 100644
--- a/sys/dev/drm/ttm/ttm_memory.c
+++ b/sys/dev/drm/ttm/ttm_memory.c
@@ -253,7 +253,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
int i;
struct ttm_mem_zone *zone;
- spin_init(&glob->spin);
+ spin_init(&glob->spin, "ttmemglob");
glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
taskqueue_thread_enqueue, &glob->swap_queue);
taskqueue_start_threads(&glob->swap_queue, 1, 0, -1, "ttm swap");
diff --git a/sys/dev/misc/putter/putter.c b/sys/dev/misc/putter/putter.c
index 868115c..fffe062 100644
--- a/sys/dev/misc/putter/putter.c
+++ b/sys/dev/misc/putter/putter.c
@@ -170,7 +170,7 @@ int putterdebug = 0;
*/
/* protects both the list and the contents of the list elements */
-static struct spinlock pi_mtx = SPINLOCK_INITIALIZER(&pi_mtx);
+static struct spinlock pi_mtx = SPINLOCK_INITIALIZER("pi_mtx", &pi_mtx);
/*
* fd routines, for cloner
diff --git a/sys/dev/netif/ath/ath_hal/ah_osdep.c b/sys/dev/netif/ath/ath_hal/ah_osdep.c
index e5b27dd..fa57d63 100644
--- a/sys/dev/netif/ath/ath_hal/ah_osdep.c
+++ b/sys/dev/netif/ath/ath_hal/ah_osdep.c
@@ -78,7 +78,7 @@
* XXX This is a global lock for now; it should be pushed to
* a per-device lock in some platform-independent fashion.
*/
-struct spinlock ah_regser_spin = SPINLOCK_INITIALIZER(ah_regser_spin);
+struct spinlock ah_regser_spin = SPINLOCK_INITIALIZER("ah_regser_spin", ah_regser_spin);
extern void ath_hal_printf(struct ath_hal *, const char*, ...)
__printflike(2,3);
diff --git a/sys/dev/raid/mrsas/mrsas.c b/sys/dev/raid/mrsas/mrsas.c
index a26d9da..1c6051a 100644
--- a/sys/dev/raid/mrsas/mrsas.c
+++ b/sys/dev/raid/mrsas/mrsas.c
@@ -776,7 +776,7 @@ static int mrsas_attach(device_t dev)
lockinit(&sc->pci_lock, "mrsas_pci_lock", 0, LK_CANRECURSE);
lockinit(&sc->io_lock, "mrsas_io_lock", 0, LK_CANRECURSE);
lockinit(&sc->aen_lock, "mrsas_aen_lock", 0, LK_CANRECURSE);
- spin_init(&sc->ioctl_lock);
+ spin_init(&sc->ioctl_lock, "mrsasioctl");
lockinit(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", 0,
LK_CANRECURSE);
lockinit(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", 0,
diff --git a/sys/dev/raid/twa/tw_osl_freebsd.c b/sys/dev/raid/twa/tw_osl_freebsd.c
index 679fc3a..4c5f1ab 100644
--- a/sys/dev/raid/twa/tw_osl_freebsd.c
+++ b/sys/dev/raid/twa/tw_osl_freebsd.c
@@ -306,9 +306,9 @@ twa_attach(device_t dev)
/* Initialize the mutexes right here. */
sc->io_lock = &(sc->io_lock_handle);
- spin_init(sc->io_lock);
+ spin_init(sc->io_lock, "twa_iolock");
sc->q_lock = &(sc->q_lock_handle);
- spin_init(sc->q_lock);
+ spin_init(sc->q_lock, "twa_qlock");
sc->sim_lock = &(sc->sim_lock_handle);
lockinit(sc->sim_lock, "tw_osl_sim_lock", 0, LK_CANRECURSE);
diff --git a/sys/dev/raid/twa/tw_osl_inline.h b/sys/dev/raid/twa/tw_osl_inline.h
index 5a2d2e1..1fb926f 100644
--- a/sys/dev/raid/twa/tw_osl_inline.h
+++ b/sys/dev/raid/twa/tw_osl_inline.h
@@ -60,7 +60,7 @@
* Return value: None
*/
#define tw_osl_init_lock(ctlr_handle, lock_name, lock) \
- spin_init(lock)
+ spin_init(lock, lock_name)
diff --git a/sys/emulation/linux/linux_futex.c b/sys/emulation/linux/linux_futex.c
index 93f7140..fd693f7 100644
--- a/sys/emulation/linux/linux_futex.c
+++ b/sys/emulation/linux/linux_futex.c
@@ -89,7 +89,7 @@ struct futex_list futex_list;
#if 0
#define FUTEX_LOCK(f) spin_lock(&(f)->f_lck)
#define FUTEX_UNLOCK(f) spin_unlock(&(f)->f_lck)
-#define FUTEX_INIT(f) spin_init(&(f)->f_lck)
+#define FUTEX_INIT(f) spin_init(&(f)->f_lck, "futex")
#define FUTEX_SLEEP(f, id, flag, wm, timo) ssleep((id), &(f)->f_lck, (flag), (wm), (timo))
#endif
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 742c391..b1c20a3 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -164,7 +164,7 @@ mi_proc0init(struct globaldata *gd, struct user *proc0paddr)
lwkt_init_thread(&thread0, proc0paddr, LWKT_THREAD_STACK, 0, gd);
lwkt_set_comm(&thread0, "thread0");
RB_INIT(&proc0.p_lwp_tree);
- spin_init(&proc0.p_spin);
+ spin_init(&proc0.p_spin, "iproc_proc0");
lwkt_token_init(&proc0.p_token, "iproc");
proc0.p_lasttid = 0; /* +1 = next TID */
lwp_rb_tree_RB_INSERT(&proc0.p_lwp_tree, &lwp0);
@@ -173,7 +173,7 @@ mi_proc0init(struct globaldata *gd, struct user *proc0paddr)
proc0.p_usched = usched_init();
CPUMASK_ASSALLONES(lwp0.lwp_cpumask);
lwkt_token_init(&lwp0.lwp_token, "lwp_token");
- spin_init(&lwp0.lwp_spin);
+ spin_init(&lwp0.lwp_spin, "iproc_lwp0");
varsymset_init(&proc0.p_varsymset, NULL);
thread0.td_flags |= TDF_RUNNING;
thread0.td_proc = &proc0;
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 8ec0c3e..a01344d 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -8,7 +8,7 @@ cv_init(struct cv *c, const char *desc)
{
c->cv_desc = desc;
c->cv_waiters = 0;
- spin_init(&c->cv_lock);
+ spin_init(&c->cv_lock, "cvinit");
}
void
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 5925185..7b857e4 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -126,7 +126,7 @@ static struct dev_ops fildesc_ops = {
* Descriptor management.
*/
static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead);
-static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin);
+static struct spinlock filehead_spin = SPINLOCK_INITIALIZER("filehead_spin", &filehead_spin);
static int nfiles; /* actual number of open files */
extern int cmask;
@@ -1551,7 +1551,7 @@ falloc(struct lwp *lp, struct file **resultfp, int *resultfd)
* Allocate a new file descriptor.
*/
fp = kmalloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
- spin_init(&fp->f_spin);
+ spin_init(&fp->f_spin, "falloc");
SLIST_INIT(&fp->f_klist);
fp->f_count = 1;
fp->f_ops = &badfileops;
@@ -1774,7 +1774,7 @@ fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
fdp0->fd_files = fdp0->fd_builtin_files;
fdp0->fd_nfiles = NDFILE;
fdp0->fd_lastfile = -1;
- spin_init(&fdp0->fd_spin);
+ spin_init(&fdp0->fd_spin, "fdinitbootstrap");
}
/*
@@ -1818,7 +1818,7 @@ fdinit(struct proc *p)
newfdp->fd_files = newfdp->fd_builtin_files;
newfdp->fd_nfiles = NDFILE;
newfdp->fd_lastfile = -1;
- spin_init(&newfdp->fd_spin);
+ spin_init(&newfdp->fd_spin, "fdinit");
return (newfdp);
}
@@ -1934,7 +1934,7 @@ again:
newfdp->fd_lastfile = fdp->fd_lastfile;
newfdp->fd_freefile = fdp->fd_freefile;
newfdp->fd_cmask = fdp->fd_cmask;
- spin_init(&newfdp->fd_spin);
+ spin_init(&newfdp->fd_spin, "fdcopy");
/*
* Copy the descriptor table through (i). This also copies the
diff --git a/sys/kern/kern_environment.c b/sys/kern/kern_environment.c
index abda37d..094e19d 100644
--- a/sys/kern/kern_environment.c
+++ b/sys/kern/kern_environment.c
@@ -479,7 +479,7 @@ kenv_init(void *dummy)
}
kenv_dynp[i] = NULL;
- spin_init(&kenv_dynlock);
+ spin_init(&kenv_dynlock, "kenvdynlock");
kenv_isdynamic = 1;
}
SYSINIT(kenv, SI_BOOT1_POST, SI_ORDER_ANY, kenv_init, NULL);
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 8065ccf4..b5fbc4b 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -383,7 +383,7 @@ fork1(struct lwp *lp1, int flags, struct proc **procp)
p2->p_stat = SIDL;
RB_INIT(&p2->p_lwp_tree);
- spin_init(&p2->p_spin);
+ spin_init(&p2->p_spin, "procfork1");
lwkt_token_init(&p2->p_token, "proc");
lwkt_gettoken(&p2->p_token);
@@ -652,7 +652,7 @@ lwp_fork(struct lwp *origlp, struct proc *destproc, int flags)
crit_exit();
CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask);
lwkt_token_init(&lp->lwp_token, "lwp_token");
- spin_init(&lp->lwp_spin);
+ spin_init(&lp->lwp_spin, "lwptoken");
/*
* Assign the thread to the current cpu to begin with so we
diff --git a/sys/kern/kern_ktr.c b/sys/kern/kern_ktr.c
index 5cc1e79..21c144f 100644
--- a/sys/kern/kern_ktr.c
+++ b/sys/kern/kern_ktr.c
@@ -274,7 +274,7 @@ ktr_resync_callback(void *dummy __unused)
if (ktr_testspincnt) {
struct spinlock spin;
- spin_init(&spin);
+ spin_init(&spin, "ktrresync");
spin_lock(&spin);
spin_unlock(&spin);
logtest_noargs(spin_beg);
diff --git a/sys/kern/kern_nrandom.c b/sys/kern/kern_nrandom.c
index 944b5f7..0b1408f 100644
--- a/sys/kern/kern_nrandom.c
+++ b/sys/kern/kern_nrandom.c
@@ -468,7 +468,7 @@ rand_initialize(void)
#endif
- spin_init(&rand_spin);
+ spin_init(&rand_spin, "randinit");
/* Initialize IBAA. */
IBAA_Init();
diff --git a/sys/kern/kern_objcache.c b/sys/kern/kern_objcache.c
index a678cb2..6ac88ff 100644
--- a/sys/kern/kern_objcache.c
+++ b/sys/kern/kern_objcache.c
@@ -226,7 +226,7 @@ objcache_create(const char *name, int cluster_limit, int nom_cache,
*/
depot = &oc->depot[0];
- spin_init(&depot->spin);
+ spin_init(&depot->spin, "objcachedepot");
SLIST_INIT(&depot->fullmagazines);
SLIST_INIT(&depot->emptymagazines);
@@ -989,7 +989,7 @@ objcache_timer(void *dummy)
static void
objcache_init(void)
{
- spin_init(&objcachelist_spin);
+ spin_init(&objcachelist_spin, "objcachelist");
magazine_capmin = mag_capacity_align(MAGAZINE_CAPACITY_MIN);
magazine_capmax = mag_capacity_align(MAGAZINE_CAPACITY_MAX);
diff --git a/sys/kern/kern_plimit.c b/sys/kern/kern_plimit.c
index 4bcc2db..d4d702a 100644
--- a/sys/kern/kern_plimit.c
+++ b/sys/kern/kern_plimit.c
@@ -111,7 +111,7 @@ plimit_init0(struct plimit *limit)
limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
limit->p_cpulimit = RLIM_INFINITY;
limit->p_refcnt = 1;
- spin_init(&limit->p_spin);
+ spin_init(&limit->p_spin, "plimitinit");
}
/*
@@ -515,7 +515,7 @@ plimit_copy(struct plimit *olimit, struct plimit *nlimit)
{
*nlimit = *olimit;
- spin_init(&nlimit->p_spin);
+ spin_init(&nlimit->p_spin, "plimitcopy");
nlimit->p_refcnt = 1;
nlimit->p_exclusive = 0;
}
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 0d90ef0..19d3e68 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -915,7 +915,7 @@ ruadd(struct rusage *ru, struct rusage *ru2)
void
uihashinit(void)
{
- spin_init(&uihash_lock);
+ spin_init(&uihash_lock, "uihashinit");
uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
}
@@ -957,7 +957,7 @@ uicreate(uid_t uid)
/*
* Initialize structure and enter it into the hash table
*/
- spin_init(&uip->ui_lock);
+ spin_init(&uip->ui_lock, "uicreate");
uip->ui_uid = uid;
uip->ui_ref = 1; /* we're returning a ref */
varsymset_init(&uip->ui_varsymset, NULL);
diff --git a/sys/kern/kern_sensors.c b/sys/kern/kern_sensors.c
index 7d29e81..c2fe087 100644
--- a/sys/kern/kern_sensors.c
+++ b/sys/kern/kern_sensors.c
@@ -38,7 +38,7 @@
static int sensor_task_lock_inited = 0;
static struct lock sensor_task_lock;
-static struct spinlock sensor_dev_lock = SPINLOCK_INITIALIZER(sensor_dev_lock);
+static struct spinlock sensor_dev_lock = SPINLOCK_INITIALIZER("sensor_dev_lock", sensor_dev_lock);
int sensordev_count = 0;
SLIST_HEAD(, ksensordev) sensordev_list = SLIST_HEAD_INITIALIZER(sensordev_list);
diff --git a/sys/kern/kern_spinlock.c b/sys/kern/kern_spinlock.c
index a2764d8..4027cd0 100644
--- a/sys/kern/kern_spinlock.c
+++ b/sys/kern/kern_spinlock.c
@@ -70,7 +70,7 @@
#include <pthread.h>
#endif
-struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin);
+struct spinlock pmap_spin = SPINLOCK_INITIALIZER("pmap_spin", pmap_spin);
struct indefinite_info {
sysclock_t base;
@@ -199,6 +199,7 @@ _spin_lock_contested(struct spinlock *spin, const char *ident)
atomic_clear_int(&spin->counta, SPINLOCK_SHARED);
#ifdef DEBUG_LOCKS_LATENCY
+ kprintf("spinlock %s : _spin_lock_contested", spin->descr);
long j;
for (j = spinlocks_add_latency; j > 0; --j)
cpu_ccfence();
@@ -433,7 +434,7 @@ sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
* Indefinite wait test
*/
if (value == 1) {
- spin_init(&spin);
+ spin_init(&spin, "sysctllock");
spin_lock(&spin); /* force an indefinite wait */
spin_lock_test_mode = 1;
spin_lock(&spin);
@@ -448,7 +449,7 @@ sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
if (value == 2) {
globaldata_t gd = mycpu;
- spin_init(&spin);
+ spin_init(&spin, "sysctllocktest");
for (i = spin_test_count; i > 0; --i) {
_spin_lock_quick(gd, &spin, "test");
spin_unlock_quick(gd, &spin);
diff --git a/sys/kern/kern_sysref.c b/sys/kern/kern_sysref.c
index 97aed42..f9811d6 100644
--- a/sys/kern/kern_sysref.c
+++ b/sys/kern/kern_sysref.c
@@ -80,7 +80,7 @@ sysrefbootinit(void *dummy __unused)
for (i = 0; i < ncpus; ++i) {
sa = &sysref_array[i];
- spin_init(&sa->spin);
+ spin_init(&sa->spin, "sysrefbootinit");
RB_INIT(&sa->rbtree);
}
}
diff --git a/sys/kern/kern_wdog.c b/sys/kern/kern_wdog.c
index 129407f..ef3f4c6 100644
--- a/sys/kern/kern_wdog.c
+++ b/sys/kern/kern_wdog.c
@@ -207,7 +207,7 @@ static struct dev_ops wdog_ops = {
static void
wdog_init(void)
{
- spin_init(&wdogmtx);
+ spin_init(&wdogmtx, "wdog");
make_dev(&wdog_ops, 0,
UID_ROOT, GID_WHEEL, 0600, "wdog");
callout_init_mp(&wdog_callout);
diff --git a/sys/kern/lwkt_msgport.c b/sys/kern/lwkt_msgport.c
index 36c08a7..48943f3 100644
--- a/sys/kern/lwkt_msgport.c
+++ b/sys/kern/lwkt_msgport.c
@@ -370,7 +370,7 @@ lwkt_initport_spin(lwkt_port_t port, thread_t td, boolean_t fixed_cpuid)
lwkt_spin_replyport,
dmsgfn,
pportfn_oncpu);
- spin_init(&port->mpu_spin);
+ spin_init(&port->mpu_spin, "lwktinitport");
port->mpu_td = td;
if (fixed_cpuid)
port->mp_cpuid = td->td_gd->gd_cpuid;
diff --git a/sys/kern/lwkt_token.c b/sys/kern/lwkt_token.c
index fdec0d0..44773a4 100644
--- a/sys/kern/lwkt_token.c
+++ b/sys/kern/lwkt_token.c
@@ -87,7 +87,7 @@ extern int lwkt_sched_debug;
#endif
static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
-struct spinlock tok_debug_spin = SPINLOCK_INITIALIZER(&tok_debug_spin);
+struct spinlock tok_debug_spin = SPINLOCK_INITIALIZER("tok_debug_spin", &tok_debug_spin);
#define TOKEN_STRING "REF=%p TOK=%p TD=%p"
#define TOKEN_ARGS lwkt_tokref_t ref, lwkt_token_t tok, struct thread *td
diff --git a/sys/kern/subr_csprng.c b/sys/kern/subr_csprng.c
index 0af44e3..02380de 100644
--- a/sys/kern/subr_csprng.c
+++ b/sys/kern/subr_csprng.c
@@ -52,7 +52,7 @@
/* Lock macros */
#define POOL_LOCK_INIT(pool) \
- spin_init(&(pool)->lock)
+ spin_init(&(pool)->lock, "csprng_poollock")
#define POOL_LOCK(pool) \
spin_lock(&pool->lock)
@@ -65,7 +65,7 @@
#define STATE_LOCK_INIT(state) \
- spin_init(&state->lock)
+ spin_init(&state->lock, "csprng_statelock")
#define STATE_LOCK(state) \
spin_lock(&state->lock)
@@ -329,8 +329,10 @@ csprng_add_entropy(struct csprng_state *state, int src_id,
* of spinning until we get it, return if we
* can't get a hold of the lock right now.
*/
- if (!POOL_TRYLOCK(pool))
+ if (!POOL_TRYLOCK(pool)) {
+ kprintf("POOL_TRYLOCK %s failed", pool->lock.descr);
return -1;
+ }
} else {
POOL_LOCK(pool);
}
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
index f1a5570..273bd07 100644
--- a/sys/kern/subr_prf.c
+++ b/sys/kern/subr_prf.c
@@ -103,7 +103,7 @@ static void snprintf_func (int ch, void *arg);
static int consintr = 1; /* Ok to handle console interrupts? */
static int msgbufmapped; /* Set when safe to use msgbuf */
-static struct spinlock cons_spin = SPINLOCK_INITIALIZER(cons_spin);
+static struct spinlock cons_spin = SPINLOCK_INITIALIZER("cons_spin", cons_spin);
static thread_t constty_td = NULL;
int msgbuftrigger;
@@ -892,7 +892,7 @@ done:
void
kvcreinitspin(void)
{
- spin_init(&cons_spin);
+ spin_init(&cons_spin, "kvcre");
atomic_clear_long(&mycpu->gd_flags, GDF_KPRINTF);
}
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index 31e60e3..2b99148 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -84,7 +84,7 @@ static void taskqueue_run(struct taskqueue *queue, int lock_held);
static __inline void
TQ_LOCK_INIT(struct taskqueue *tq)
{
- spin_init(&tq->tq_lock);
+ spin_init(&tq->tq_lock, "tqlock");
}
static __inline void
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 18ce8bd..2165ccc 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -134,7 +134,7 @@ mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
struct mbuf_rb_tree mbuf_track_root;
-static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
+static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER("mbuf_track_spin", mbuf_track_spin);
static void
mbuftrack(struct mbuf *m)
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index e277295..3e56b35 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -180,7 +180,7 @@ soalloc(int waitok, struct protosw *pr)
TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist);
lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok");
lwkt_token_init(&so->so_snd.ssb_token, "sndtok");
- spin_init(&so->so_rcvd_spin);
+ spin_init(&so->so_rcvd_spin, "soalloc");
netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport,
MSGF_DROPABLE | MSGF_PRIORITY,
so->so_proto->pr_usrreqs->pru_rcvd);
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 1129791..404cec8 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -83,7 +83,7 @@ static unp_defdiscard_t unp_defdiscard_base;
*/
static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL };
static ino_t unp_ino = 1; /* prototype for fake inode numbers */
-static struct spinlock unp_ino_spin = SPINLOCK_INITIALIZER(&unp_ino_spin);
+static struct spinlock unp_ino_spin = SPINLOCK_INITIALIZER("unp_ino_spin", &unp_ino_spin);
static int unp_attach (struct socket *, struct pru_attach_info *);
static void unp_detach (struct unpcb *);
@@ -787,7 +787,7 @@ static u_long unpdg_sendspace = 2*1024; /* really max datagram size */
static u_long unpdg_recvspace = 4*1024;
static int unp_rights; /* file descriptors in flight */
-static struct spinlock unp_spin = SPINLOCK_INITIALIZER(&unp_spin);
+static struct spinlock unp_spin = SPINLOCK_INITIALIZER("unp_spin", &unp_spin);
SYSCTL_DECL(_net_local_seqpacket);
SYSCTL_DECL(_net_local_stream);
@@ -1413,7 +1413,7 @@ unp_init(void)
{
LIST_INIT(&unp_dhead);
LIST_INIT(&unp_shead);
- spin_init(&unp_spin);
+ spin_init(&unp_spin, "unpinit");
}
static int
diff --git a/sys/kern/usched_bsd4.c b/sys/kern/usched_bsd4.c
index 5856e42..b63574d 100644
--- a/sys/kern/usched_bsd4.c
+++ b/sys/kern/usched_bsd4.c
@@ -298,7 +298,7 @@ bsd4_rqinit(void *dummy)
{
int i;
- spin_init(&bsd4_spin);
+ spin_init(&bsd4_spin, "bsd4rq");
for (i = 0; i < NQS; i++) {
TAILQ_INIT(&bsd4_queues[i]);
TAILQ_INIT(&bsd4_rtqueues[i]);
diff --git a/sys/kern/usched_dfly.c b/sys/kern/usched_dfly.c
index 2b03f56..8386a2a 100644
--- a/sys/kern/usched_dfly.c
+++ b/sys/kern/usched_dfly.c
@@ -816,6 +816,7 @@ dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
if (nlp == NULL)
spin_unlock(&dd->spin);
} else {
+ kprintf("choose worst queue rdd spin_trylock %s failed\n",
spin_unlock(&dd->spin);
nlp = NULL;
}
@@ -2071,9 +2072,15 @@ dfly_helper_thread(void *dummy)
* 4 cores).
*/
rdd = dfly_choose_worst_queue(dd);
- if (rdd && spin_trylock(&rdd->spin)) {
+ if (rdd) {
+ if (spin_trylock(&rdd->spin)) {
nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
spin_unlock(&rdd->spin);
+ } else {
+ kprintf("choose worst queue rdd spin_trylock %s failed\n",
+ rdd->spin.descr);
+ nlp = NULL;
+ }
} else {
nlp = NULL;
}
@@ -2160,7 +2167,7 @@ usched_dfly_cpu_init(void)
if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
continue;
- spin_init(&dd->spin);
+ spin_init(&dd->spin, "uschedcpuinit");
dd->cpunode = get_cpu_node_by_cpuid(i);
dd->cpuid = i;
CPUMASK_ASSBIT(dd->cpumask, i);
diff --git a/sys/kern/usched_dummy.c b/sys/kern/usched_dummy.c
index e747c8f..cb40224 100644
--- a/sys/kern/usched_dummy.c
+++ b/sys/kern/usched_dummy.c
@@ -120,7 +120,7 @@ static void
dummyinit(void *dummy)
{
TAILQ_INIT(&dummy_runq);
- spin_init(&dummy_spin);
+ spin_init(&dummy_spin, "uscheddummy");
ATOMIC_CPUMASK_NANDBIT(dummy_curprocmask, 0);
}
SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, dummyinit, NULL)
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index cb0f6f9..45ef3db 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -635,7 +635,7 @@ bufinit(void *dummy __unused)
/* next, make a null set of free lists */
for (i = 0; i < ncpus; ++i) {
pcpu = &bufpcpu[i];
- spin_init(&pcpu->spin);
+ spin_init(&pcpu->spin, "bufinit");
for (j = 0; j < BUFFER_QUEUES; j++)
TAILQ_INIT(&pcpu->bufqueues[j]);
}
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index bc4c00b..bb5fa7f 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -3627,16 +3627,16 @@ nchinit(void)
gd->gd_nchstats = &nchstats[i];
}
TAILQ_INIT(&ncneglist);
- spin_init(&ncspin);
+ spin_init(&ncspin, "nchinit");
nchashtbl = hashinit_ext(desiredvnodes / 2,
sizeof(struct nchash_head),
M_VFSCACHE, &nchash);
for (i = 0; i <= (int)nchash; ++i) {
LIST_INIT(&nchashtbl[i].list);
- spin_init(&nchashtbl[i].spin);
+ spin_init(&nchashtbl[i].spin, "nchinit_hash");
}
for (i = 0; i < NCMOUNT_NUMCACHE; ++i)
- spin_init(&ncmount_cache[i].spin);
+ spin_init(&ncmount_cache[i].spin, "nchinit_cache");
nclockwarn = 5 * hz;
}
diff --git a/sys/kern/vfs_lock.c b/sys/kern/vfs_lock.c
index 95f1e05..cc33d6c 100644
--- a/sys/kern/vfs_lock.c
+++ b/sys/kern/vfs_lock.c
@@ -86,7 +86,7 @@ TAILQ_HEAD(freelst, vnode);
static struct freelst vnode_active_list;
static struct freelst vnode_inactive_list;
static struct vnode vnode_active_rover;
-static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
+static struct spinlock vfs_spin = SPINLOCK_INITIALIZER("vfs_spin", vfs_spin);
int activevnodes = 0;
SYSCTL_INT(_debug, OID_AUTO, activevnodes, CTLFLAG_RD,
@@ -115,7 +115,7 @@ vfs_lock_init(void)
TAILQ_INIT(&vnode_inactive_list);
TAILQ_INIT(&vnode_active_list);
TAILQ_INSERT_TAIL(&vnode_active_list, &vnode_active_rover, v_list);
- spin_init(&vfs_spin);
+ spin_init(&vfs_spin, "vfslock");
kmalloc_raise_limit(M_VNODE, 0); /* unlimited */
}
@@ -875,7 +875,7 @@ allocvnode(int lktimeout, int lkflags)
RB_INIT(&vp->v_rbclean_tree);
RB_INIT(&vp->v_rbdirty_tree);
RB_INIT(&vp->v_rbhash_tree);
- spin_init(&vp->v_spin);
+ spin_init(&vp->v_spin, "allocvnode");
lockmgr(&vp->v_lock, LK_EXCLUSIVE);
atomic_add_int(&numvnodes, 1);
diff --git a/sys/kern/vfs_quota.c b/sys/kern/vfs_quota.c
index 1467944..a5fadbe 100644
--- a/sys/kern/vfs_quota.c
+++ b/sys/kern/vfs_quota.c
@@ -125,7 +125,7 @@ vq_init(struct mount *mp)
/* initialize the rb trees */
RB_INIT(&mp->mnt_acct.ac_uroot);
RB_INIT(&mp->mnt_acct.ac_groot);
- spin_init(&mp->mnt_acct.ac_spin);
+ spin_init(&mp->mnt_acct.ac_spin, "vqinit");
mp->mnt_acct.ac_bytes = 0;
diff --git a/sys/net/netmap/netmap_mbq.c b/sys/net/netmap/netmap_mbq.c
index 62abe4d..88de31d 100644
--- a/sys/net/netmap/netmap_mbq.c
+++ b/sys/net/netmap/netmap_mbq.c
@@ -40,7 +40,7 @@ static inline void __mbq_init(struct mbq *q)
void mbq_safe_init(struct mbq *q)
{
- spin_init(&q->lock);
+ spin_init(&q->lock, "mbq");
__mbq_init(q);
}
diff --git a/sys/net/pf/pf.c b/sys/net/pf/pf.c
index b49cf47..8dc9f73 100644
--- a/sys/net/pf/pf.c
+++ b/sys/net/pf/pf.c
@@ -117,7 +117,7 @@ extern int debug_pfugidhack;
struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token);
struct lwkt_token pf_gtoken = LWKT_TOKEN_INITIALIZER(pf_gtoken);
#if __SIZEOF_LONG__ != 8
-struct spinlock pf_spin = SPINLOCK_INITIALIZER(pf_spin);
+struct spinlock pf_spin = SPINLOCK_INITIALIZER("pf_spin", pf_spin);
#endif
#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
diff --git a/sys/netproto/smb/smb_subr.h b/sys/netproto/smb/smb_subr.h
index 5f0cbc7..fdd45d1 100644
--- a/sys/netproto/smb/smb_subr.h
+++ b/sys/netproto/smb/smb_subr.h
@@ -82,7 +82,7 @@ void m_dumpm(struct mbuf *m);
#define lockdestroy(lock)
#define smb_slock spinlock
-#define smb_sl_init(sl, desc) spin_init(sl)
+#define smb_sl_init(sl, desc) spin_init(sl, desc)
#define smb_sl_destroy(sl)
#define smb_sl_lock(sl) spin_lock(sl)
#define smb_sl_unlock(sl) spin_unlock(sl)
diff --git a/sys/opencrypto/cryptosoft.c b/sys/opencrypto/cryptosoft.c
index 23623a7..85a2f2b 100644
--- a/sys/opencrypto/cryptosoft.c
+++ b/sys/opencrypto/cryptosoft.c
@@ -58,7 +58,7 @@ static struct swcr_data **swcr_sessions = NULL;
static u_int32_t swcr_sesnum;
static u_int32_t swcr_minsesnum = 1;
-static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin);
+static struct spinlock swcr_spin = SPINLOCK_INITIALIZER("swcr_spin", swcr_spin);
u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
diff --git a/sys/platform/pc32/i386/busdma_machdep.c b/sys/platform/pc32/i386/busdma_machdep.c
index cb50f11..dcc6595 100644
--- a/sys/platform/pc32/i386/busdma_machdep.c
+++ b/sys/platform/pc32/i386/busdma_machdep.c
@@ -154,7 +154,7 @@ struct bus_dmamap {
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
static struct spinlock bounce_map_list_spin =
- SPINLOCK_INITIALIZER(&bounce_map_list_spin);
+ SPINLOCK_INITIALIZER("bounce_map_list_spin", &bounce_map_list_spin);
static struct bus_dmamap nobounce_dmamap;
@@ -258,7 +258,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO);
- spin_init(&newtag->spin);
+ spin_init(&newtag->spin, "busdmacreate");
newtag->parent = parent;
newtag->alignment = alignment;
newtag->boundary = boundary;
@@ -1103,7 +1103,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
}
bz = new_bz;
- spin_init(&bz->spin);
+ spin_init(&bz->spin, "allocbouncezone");
STAILQ_INIT(&bz->bounce_page_list);
STAILQ_INIT(&bz->bounce_map_waitinglist);
bz->free_bpages = 0;
diff --git a/sys/platform/pc32/i386/pmap.c b/sys/platform/pc32/i386/pmap.c
index bee854a..cad74b2 100644
--- a/sys/platform/pc32/i386/pmap.c
+++ b/sys/platform/pc32/i386/pmap.c
@@ -417,7 +417,7 @@ pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
kernel_pmap.pm_pteobj = NULL; /* see pmap_init */
TAILQ_INIT(&kernel_pmap.pm_pvlist);
TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
- spin_init(&kernel_pmap.pm_spin);
+ spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
nkpt = NKPT;
@@ -1301,7 +1301,7 @@ pmap_pinit0(struct pmap *pmap)
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
TAILQ_INIT(&pmap->pm_pvlist_free);
- spin_init(&pmap->pm_spin);
+ spin_init(&pmap->pm_spin, "pmapinit0");
lwkt_token_init(&pmap->pm_token, "pmap_tok");
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
}
@@ -1360,7 +1360,7 @@ pmap_pinit(struct pmap *pmap)
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
TAILQ_INIT(&pmap->pm_pvlist_free);
- spin_init(&pmap->pm_spin);
+ spin_init(&pmap->pm_spin, "pmapinit");
lwkt_token_init(&pmap->pm_token, "pmap_tok");
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
pmap->pm_stats.resident_count = 1;
diff --git a/sys/platform/pc64/x86_64/busdma_machdep.c b/sys/platform/pc64/x86_64/busdma_machdep.c
index 9b7d74d..ae6e9df 100644
--- a/sys/platform/pc64/x86_64/busdma_machdep.c
+++ b/sys/platform/pc64/x86_64/busdma_machdep.c
@@ -154,7 +154,7 @@ struct bus_dmamap {
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
static struct spinlock bounce_map_list_spin =
- SPINLOCK_INITIALIZER(&bounce_map_list_spin);
+ SPINLOCK_INITIALIZER("bounce_map_list_spin", &bounce_map_list_spin);
static struct bus_dmamap nobounce_dmamap;
@@ -258,7 +258,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO);
- spin_init(&newtag->spin);
+ spin_init(&newtag->spin, "busdmacreate");
newtag->parent = parent;
newtag->alignment = alignment;
newtag->boundary = boundary;
@@ -1112,7 +1112,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
}
bz = new_bz;
- spin_init(&bz->spin);
+ spin_init(&bz->spin, "allocbouncezone");
STAILQ_INIT(&bz->bounce_page_list);
STAILQ_INIT(&bz->bounce_map_waitinglist);
bz->free_bpages = 0;
diff --git a/sys/platform/pc64/x86_64/pmap.c b/sys/platform/pc64/x86_64/pmap.c
index 45b3a29..3af4f30 100644
--- a/sys/platform/pc64/x86_64/pmap.c
+++ b/sys/platform/pc64/x86_64/pmap.c
@@ -912,7 +912,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
kernel_pmap.pm_count = 1;
CPUMASK_ASSALLONES(kernel_pmap.pm_active);
RB_INIT(&kernel_pmap.pm_pvroot);
- spin_init(&kernel_pmap.pm_spin);
+ spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
/*
@@ -1626,7 +1626,7 @@ pmap_pinit0(struct pmap *pmap)
CPUMASK_ASSZERO(pmap->pm_active);
pmap->pm_pvhint = NULL;
RB_INIT(&pmap->pm_pvroot);
- spin_init(&pmap->pm_spin);
+ spin_init(&pmap->pm_spin, "pmapinit0");
lwkt_token_init(&pmap->pm_token, "pmap_tok");
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
pmap_pinit_defaults(pmap);
@@ -1656,7 +1656,7 @@ pmap_pinit_simple(struct pmap *pmap)
if (pmap->pm_pmlpv == NULL) {
RB_INIT(&pmap->pm_pvroot);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
- spin_init(&pmap->pm_spin);
+ spin_init(&pmap->pm_spin, "pmapinitsimple");
lwkt_token_init(&pmap->pm_token, "pmap_tok");
}
}
diff --git a/sys/platform/vkernel/platform/busdma_machdep.c b/sys/platform/vkernel/platform/busdma_machdep.c
index d39594b..8212bfc 100644
--- a/sys/platform/vkernel/platform/busdma_machdep.c
+++ b/sys/platform/vkernel/platform/busdma_machdep.c
@@ -1006,7 +1006,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
}
bz = new_bz;
- spin_init(&bz->spin);
+ spin_init(&bz->spin, "allocbouncezone");
STAILQ_INIT(&bz->bounce_page_list);
STAILQ_INIT(&bz->bounce_map_waitinglist);
bz->free_bpages = 0;
diff --git a/sys/platform/vkernel/platform/pmap.c b/sys/platform/vkernel/platform/pmap.c
index cc85121..5dfa8cc 100644
--- a/sys/platform/vkernel/platform/pmap.c
+++ b/sys/platform/vkernel/platform/pmap.c
@@ -182,7 +182,7 @@ pmap_bootstrap(void)
kernel_pmap.pm_pteobj = NULL; /* see pmap_init */
TAILQ_INIT(&kernel_pmap.pm_pvlist);
TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
- spin_init(&kernel_pmap.pm_spin);
+ spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
i386_protection_init();
}
@@ -253,7 +253,7 @@ pmap_pinit(struct pmap *pmap)
pmap->pm_cpucachemask = 0;
TAILQ_INIT(&pmap->pm_pvlist);
TAILQ_INIT(&pmap->pm_pvlist_free);
- spin_init(&pmap->pm_spin);
+ spin_init(&pmap->pm_spin, "pmapinit");
lwkt_token_init(&pmap->pm_token, "pmap_tok");
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
pmap->pm_stats.resident_count = 1;
diff --git a/sys/platform/vkernel64/platform/busdma_machdep.c b/sys/platform/vkernel64/platform/busdma_machdep.c
index 12a568e..d27be7c 100644
--- a/sys/platform/vkernel64/platform/busdma_machdep.c
+++ b/sys/platform/vkernel64/platform/busdma_machdep.c
@@ -1002,7 +1002,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
}
bz = new_bz;
- spin_init(&bz->spin);
+ spin_init(&bz->spin, "allocbouncezone");
STAILQ_INIT(&bz->bounce_page_list);
STAILQ_INIT(&bz->bounce_map_waitinglist);
bz->free_bpages = 0;
diff --git a/sys/platform/vkernel64/platform/pmap.c b/sys/platform/vkernel64/platform/pmap.c
index 3b9493f..5e0c929 100644
--- a/sys/platform/vkernel64/platform/pmap.c
+++ b/sys/platform/vkernel64/platform/pmap.c
@@ -569,7 +569,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr, int64_t ptov_offset)
TAILQ_INIT(&kernel_pmap.pm_pvlist);
TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
- spin_init(&kernel_pmap.pm_spin);
+ spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
/*
* Reserve some special page table entries/VA space for temporary
@@ -1180,7 +1180,7 @@ pmap_pinit(struct pmap *pmap)
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
TAILQ_INIT(&pmap->pm_pvlist_free);
- spin_init(&pmap->pm_spin);
+ spin_init(&pmap->pm_spin, "pmapinit");
lwkt_token_init(&pmap->pm_token, "pmap_tok");
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
pmap->pm_stats.resident_count = 1;
diff --git a/sys/sys/spinlock.h b/sys/sys/spinlock.h
index 2da0267..7cf0262 100644
--- a/sys/sys/spinlock.h
+++ b/sys/sys/spinlock.h
@@ -48,11 +48,12 @@
* cache entry.
*/
struct spinlock {
+ const char *descr;
int counta;
int countb;
};
-#define SPINLOCK_INITIALIZER(head) { 0, 0 }
+#define SPINLOCK_INITIALIZER(d, head) { #d, 0, 0 }
#define SPINLOCK_SHARED 0x80000000
#define SPINLOCK_EXCLWAIT 0x00100000 /* high bits counter */
diff --git a/sys/sys/spinlock2.h b/sys/sys/spinlock2.h
index e08da39..dc2efe3 100644
--- a/sys/sys/spinlock2.h
+++ b/sys/sys/spinlock2.h
@@ -80,6 +80,7 @@ spin_trylock(struct spinlock *spin)
if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
return (spin_trylock_contested(spin));
#ifdef DEBUG_LOCKS
+ kprintf("spin_trylock %s\n", spin->descr);
int i;
for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
@@ -100,7 +101,11 @@ spin_trylock(struct spinlock *spin)
static __inline int
spin_held(struct spinlock *spin)
{
- return(spin->counta != 0);
+ int held = (spin->counta != 0);
+#ifdef DEBUG_LOCKS
+ kprintf("spinlock %s held : %d\n", spin->descr, held);
+#endif
+ return(held);
}
/*
@@ -116,6 +121,7 @@ _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
if (spin->counta != 1)
_spin_lock_contested(spin, ident);
#ifdef DEBUG_LOCKS
+ kprintf("_spin_lock_quick %s\n", spin->descr);
int i;
for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
@@ -144,6 +150,7 @@ static __inline void
spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
{
#ifdef DEBUG_LOCKS
+ kprintf("spin_unlock_quick %s\n", spin->descr);
int i;
for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
@@ -192,6 +199,7 @@ _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
if (atomic_cmpset_int(&spin->counta, 0, SPINLOCK_SHARED | 1) == 0)
_spin_lock_shared_contested(spin, ident);
#ifdef DEBUG_LOCKS
+ kprintf("_spin_lock_shared_quick %s\n", spin->descr);
int i;
for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
@@ -209,6 +217,7 @@ static __inline void
spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
{
#ifdef DEBUG_LOCKS
+ kprintf("spin_unlock_shared_quick %s\n", spin->descr);
int i;
for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
@@ -263,8 +272,9 @@ spin_pool_unlock(void *chan)
}
static __inline void
-spin_init(struct spinlock *spin)
+spin_init(struct spinlock *spin, const char *descr)
{
+ spin->descr = descr;
spin->counta = 0;
spin->countb = 0;
}
diff --git a/sys/sys/tree.h b/sys/sys/tree.h
index 5a9bb77..17544a4 100644
--- a/sys/sys/tree.h
+++ b/sys/sys/tree.h
@@ -313,7 +313,7 @@ struct name { \
}
#define RB_INITIALIZER(root) \
- { NULL, NULL, SPINLOCK_INITIALIZER(root.spin) }
+ { NULL, NULL, SPINLOCK_INITIALIZER("root.spin", root.spin) }
#define RB_INIT(root) do { \
(root)->rbh_root = NULL; \
diff --git a/sys/vfs/hammer2/hammer2_ccms.c b/sys/vfs/hammer2/hammer2_ccms.c
index ccd861c..d4f8101 100644
--- a/sys/vfs/hammer2/hammer2_ccms.c
+++ b/sys/vfs/hammer2/hammer2_ccms.c
@@ -74,7 +74,7 @@ void
ccms_cst_init(ccms_cst_t *cst, void *handle)
{
bzero(cst, sizeof(*cst));
- spin_init(&cst->spin);
+ spin_init(&cst->spin, "ccmscst");
cst->handle = handle;
}
diff --git a/sys/vfs/hammer2/hammer2_vfsops.c b/sys/vfs/hammer2/hammer2_vfsops.c
index 2eeb318..ec998f4 100644
--- a/sys/vfs/hammer2/hammer2_vfsops.c
+++ b/sys/vfs/hammer2/hammer2_vfsops.c
@@ -334,10 +334,10 @@ hammer2_pfsalloc(const hammer2_inode_data_t *ipdata, hammer2_tid_t alloc_tid)
kmalloc_create(&pmp->minode, "HAMMER2-inodes");
kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
lockinit(&pmp->lock, "pfslk", 0, 0);
- spin_init(&pmp->inum_spin);
+ spin_init(&pmp->inum_spin, "hm2pfsalloc_inum");
RB_INIT(&pmp->inum_tree);
TAILQ_INIT(&pmp->unlinkq);
- spin_init(&pmp->list_spin);
+ spin_init(&pmp->list_spin, "hm2pfsalloc_list");
pmp->alloc_tid = alloc_tid + 1; /* our first media transaction id */
pmp->flush_tid = pmp->alloc_tid;
@@ -528,8 +528,8 @@ hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
kmalloc_create(&hmp->mchain, "HAMMER2-chains");
TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
RB_INIT(&hmp->iotree);
- spin_init(&hmp->io_spin);
- spin_init(&hmp->list_spin);
+ spin_init(&hmp->io_spin, "hm2mount_io");
+ spin_init(&hmp->list_spin, "hm2mount_list");
TAILQ_INIT(&hmp->flushq);
lockinit(&hmp->vollk, "h2vol", 0, 0);
diff --git a/sys/vfs/ntfs/ntfs_subr.c b/sys/vfs/ntfs/ntfs_subr.c
index 82af7be..d46015c 100644
--- a/sys/vfs/ntfs/ntfs_subr.c
+++ b/sys/vfs/ntfs/ntfs_subr.c
@@ -388,7 +388,7 @@ ntfs_ntlookup(struct ntfsmount *ntmp, ino_t ino, struct ntnode **ipp)
/* init lock and lock the newborn ntnode */
lockinit(&ip->i_lock, "ntnode", 0, LK_EXCLUSIVE);
- spin_init(&ip->i_interlock);
+ spin_init(&ip->i_interlock, "ntfsntlookup");
ntfs_ntget(ip);
ntfs_nthashins(ip);
diff --git a/sys/vfs/ufs/ffs_softdep.c b/sys/vfs/ufs/ffs_softdep.c
index 7be0069..d574761 100644
--- a/sys/vfs/ufs/ffs_softdep.c
+++ b/sys/vfs/ufs/ffs_softdep.c
@@ -275,7 +275,7 @@ sema_init(struct sema *semap, char *name, int timo)
semap->value = 0;
semap->name = name;
semap->timo = timo;
- spin_init(&semap->spin);
+ spin_init(&semap->spin, "ufssema");
}
/*
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index a96cde4..98fb724 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -168,7 +168,7 @@ static int swap_async_max = 4; /* maximum in-progress async I/O's */
static int swap_burst_read = 0; /* allow burst reading */
static swblk_t swapiterator; /* linearize allocations */
-static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin);
+static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER("swapbp_spin", &swapbp_spin);
/* from vm_swap.c */
extern struct vnode *swapdev_vp;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index da6ea3d..b1e1246 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -116,7 +116,7 @@ static volatile int vm_pages_waiting;
static struct alist vm_contig_alist;
static struct almeta vm_contig_ameta[ALIST_RECORDS_65536];
-static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER(&vm_contig_spin);
+static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER("vm_contig_spin", &vm_contig_spin);
static u_long vm_dma_reserved = 0;
TUNABLE_ULONG("vm.dma_reserved", &vm_dma_reserved);
@@ -150,7 +150,7 @@ vm_page_queue_init(void)
for (i = 0; i < PQ_COUNT; i++) {
TAILQ_INIT(&vm_page_queues[i].pl);
- spin_init(&vm_page_queues[i].spin);
+ spin_init(&vm_page_queues[i].spin, "vm_page_queue_init");
}
for (i = 0; i < VMACTION_HSIZE; i++)
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 003165d..4579d45 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -184,7 +184,7 @@ static struct buf *swbuf_raw;
static vm_offset_t swapbkva; /* swap buffers kva */
static struct swqueue bswlist_raw; /* without kva */
static struct swqueue bswlist_kva; /* with kva */
-static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin);
+static struct spinlock bswspin = SPINLOCK_INITIALIZER("bswspin", &bswspin);
static int pbuf_raw_count;
static int pbuf_kva_count;
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index 4e61113..caac30e 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -237,7 +237,7 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
*/
if ((z->zflags & ZONE_BOOT) == 0) {
z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
- spin_init(&z->zlock);
+ spin_init(&z->zlock, "zinitna");
z->zfreecnt = 0;
z->ztotal = 0;
z->zmax = 0;
@@ -371,7 +371,7 @@ zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
z->zpagecount = 0;
z->zalloc = 0;
z->znalloc = 0;
- spin_init(&z->zlock);
+ spin_init(&z->zlock, "zbootinit");
bzero(item, (size_t)nitems * z->zsize);
z->zitems = NULL;
    (1-1/1)