Project

General

Profile

Submit #2714 ยป patch-spinlock.txt

dclink, 08/16/2014 03:03 PM

 
1
diff --git a/share/man/man9/spinlock.9 b/share/man/man9/spinlock.9
2
index 2c2a115..f8e3aee 100644
3
--- a/share/man/man9/spinlock.9
4
+++ b/share/man/man9/spinlock.9
5
@@ -46,7 +46,7 @@
6
 .In sys/spinlock.h
7
 .In sys/spinlock2.h
8
 .Ft void
9
-.Fn spin_init "struct spinlock *mtx"
10
+.Fn spin_init "struct spinlock *mtx" "const char *descr"
11
 .Ft void
12
 .Fn spin_uninit "struct spinlock *mtx"
13
 .Ft void
14
diff --git a/sys/bus/cam/cam_sim.c b/sys/bus/cam/cam_sim.c
15
index 85c7dba..5b3c704 100644
16
--- a/sys/bus/cam/cam_sim.c
17
+++ b/sys/bus/cam/cam_sim.c
18
@@ -211,7 +211,7 @@ cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll,
19
 
20
 	SLIST_INIT(&sim->ccb_freeq);
21
 	TAILQ_INIT(&sim->sim_doneq);
22
-	spin_init(&sim->sim_spin);
23
+	spin_init(&sim->sim_spin, "cam_sim_alloc");
24
 
25
 	return (sim);
26
 }
27
diff --git a/sys/bus/cam/cam_xpt.c b/sys/bus/cam/cam_xpt.c
28
index fc0f2f9..075113c 100644
29
--- a/sys/bus/cam/cam_xpt.c
30
+++ b/sys/bus/cam/cam_xpt.c
31
@@ -1452,13 +1452,13 @@ xpt_init(void *dummy)
32
 	STAILQ_INIT(&xsoftc.highpowerq);
33
 	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
34
 
35
-	spin_init(&cam_simq_spin);
36
+	spin_init(&cam_simq_spin, "cam_simq_spin");
37
 	lockinit(&xsoftc.xpt_lock, "XPT lock", 0, LK_CANRECURSE);
38
 	lockinit(&xsoftc.xpt_topo_lock, "XPT topology lock", 0, LK_CANRECURSE);
39
 
40
 	SLIST_INIT(&cam_dead_sim.ccb_freeq);
41
 	TAILQ_INIT(&cam_dead_sim.sim_doneq);
42
-	spin_init(&cam_dead_sim.sim_spin);
43
+	spin_init(&cam_dead_sim.sim_spin, "cam_dead_sim");
44
 	cam_dead_sim.sim_action = dead_sim_action;
45
 	cam_dead_sim.sim_poll = dead_sim_poll;
46
 	cam_dead_sim.sim_name = "dead_sim";
47
diff --git a/sys/bus/pci/i386/pci_cfgreg.c b/sys/bus/pci/i386/pci_cfgreg.c
48
index 0aea827..003eac5 100644
49
--- a/sys/bus/pci/i386/pci_cfgreg.c
50
+++ b/sys/bus/pci/i386/pci_cfgreg.c
51
@@ -52,7 +52,7 @@
52
 #include <machine/pmap.h>
53
 
54
 #if defined(__DragonFly__)
55
-#define mtx_init(a, b, c, d) spin_init(a)
56
+#define mtx_init(a, b, c, d) spin_init(a, b)
57
 #define mtx_lock_spin(a) spin_lock(a)
58
 #define mtx_unlock_spin(a) spin_unlock(a)
59
 #endif
60
diff --git a/sys/bus/pci/x86_64/pci_cfgreg.c b/sys/bus/pci/x86_64/pci_cfgreg.c
61
index 0d12a73..9da2043 100644
62
--- a/sys/bus/pci/x86_64/pci_cfgreg.c
63
+++ b/sys/bus/pci/x86_64/pci_cfgreg.c
64
@@ -89,7 +89,7 @@ pci_cfgregopen(void)
65
 
66
 	if (!inited) {
67
 		inited = 1;
68
-		spin_init(&pcicfg_spin);
69
+		spin_init(&pcicfg_spin, "pcicfg");
70
 	}
71
 
72
 	if (cfgmech != CFGMECH_NONE)
73
diff --git a/sys/dev/acpica/Osd/OsdSynch.c b/sys/dev/acpica/Osd/OsdSynch.c
74
index 863536a..2912004 100644
75
--- a/sys/dev/acpica/Osd/OsdSynch.c
76
+++ b/sys/dev/acpica/Osd/OsdSynch.c
77
@@ -95,7 +95,7 @@ AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
78
 
79
     as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO);
80
 
81
-    spin_init(&as->as_spin);
82
+    spin_init(&as->as_spin, "AcpiOsSem");
83
     as->as_units = InitialUnits;
84
     as->as_maxunits = MaxUnits;
85
     as->as_pendings = as->as_resetting = as->as_timeouts = 0;
86
@@ -345,7 +345,7 @@ AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
87
     if (OutHandle == NULL)
88
 	return (AE_BAD_PARAMETER);
89
     spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO);
90
-    spin_init(&spin->lock);
91
+    spin_init(&spin->lock, "AcpiOsLock");
92
 #ifdef ACPI_DEBUG_LOCKS
93
     spin->owner = NULL;
94
     spin->func = "";
95
diff --git a/sys/dev/crypto/aesni/aesni.c b/sys/dev/crypto/aesni/aesni.c
96
index a4919d5..3c4b7d2 100644
97
--- a/sys/dev/crypto/aesni/aesni.c
98
+++ b/sys/dev/crypto/aesni/aesni.c
99
@@ -92,7 +92,7 @@ aesni_attach(device_t dev)
100
 		return (ENOMEM);
101
 	}
102
 
103
-	spin_init(&sc->lock);
104
+	spin_init(&sc->lock, "aesniattach");
105
 	crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
106
 	crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
107
 	return (0);
108
diff --git a/sys/dev/crypto/glxsb/glxsb.c b/sys/dev/crypto/glxsb/glxsb.c
109
index 17f15d7..40f012f 100644
110
--- a/sys/dev/crypto/glxsb/glxsb.c
111
+++ b/sys/dev/crypto/glxsb/glxsb.c
112
@@ -508,8 +508,8 @@ glxsb_crypto_setup(struct glxsb_softc *sc)
113
 
114
 	TAILQ_INIT(&sc->sc_sessions);
115
 	sc->sc_sid = 1;
116
-	spin_init(&sc->sc_sessions_lock);
117
-	spin_init(&sc->sc_task_mtx);
118
+	spin_init(&sc->sc_sessions_lock, "glxsb_sessions");
119
+	spin_init(&sc->sc_task_mtx, "glxsb_task");
120
 
121
 	if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0)
122
 		goto crypto_fail;
123
diff --git a/sys/dev/crypto/padlock/padlock.c b/sys/dev/crypto/padlock/padlock.c
124
index 96e663e..b203cf2 100644
125
--- a/sys/dev/crypto/padlock/padlock.c
126
+++ b/sys/dev/crypto/padlock/padlock.c
127
@@ -126,7 +126,7 @@ padlock_attach(device_t dev)
128
 		return (ENOMEM);
129
 	}
130
 
131
-	spin_init(&sc->sc_sessions_lock);
132
+	spin_init(&sc->sc_sessions_lock, "padlock");
133
 	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
134
 	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
135
 	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
136
diff --git a/sys/dev/disk/nata/ata-all.c b/sys/dev/disk/nata/ata-all.c
137
index 5a84352..0bd005b 100644
138
--- a/sys/dev/disk/nata/ata-all.c
139
+++ b/sys/dev/disk/nata/ata-all.c
140
@@ -122,8 +122,8 @@ ata_attach(device_t dev)
141
     /* initialize the softc basics */
142
     ch->dev = dev;
143
     ch->state = ATA_IDLE;
144
-    spin_init(&ch->state_mtx);
145
-    spin_init(&ch->queue_mtx);
146
+    spin_init(&ch->state_mtx, "ataattach_state");
147
+    spin_init(&ch->queue_mtx, "ataattach_queue");
148
     ata_queue_init(ch);
149
 
150
     /* reset the controller HW, the channel and device(s) */
151
diff --git a/sys/dev/disk/nata/ata-chipset.c b/sys/dev/disk/nata/ata-chipset.c
152
index 287ab1e..3a1344f 100644
153
--- a/sys/dev/disk/nata/ata-chipset.c
154
+++ b/sys/dev/disk/nata/ata-chipset.c
155
@@ -3548,7 +3548,7 @@ ata_promise_chipinit(device_t dev)
156
 	    /* setup host packet controls */
157
 	    hpkt = kmalloc(sizeof(struct ata_promise_sx4),
158
 			  M_TEMP, M_INTWAIT | M_ZERO);
159
-	    spin_init(&hpkt->mtx);
160
+	    spin_init(&hpkt->mtx, "chipinit");
161
 	    TAILQ_INIT(&hpkt->queue);
162
 	    hpkt->busy = 0;
163
 	    device_set_ivars(dev, hpkt);
164
@@ -5815,7 +5815,7 @@ ata_serialize(device_t dev, int flags)
165
     if (!inited) {
166
 	serial = kmalloc(sizeof(struct ata_serialize),
167
 			      M_TEMP, M_INTWAIT | M_ZERO);
168
-	spin_init(&serial->locked_mtx);
169
+	spin_init(&serial->locked_mtx, "ataserialize");
170
 	serial->locked_ch = -1;
171
 	serial->restart_ch = -1;
172
 	device_set_ivars(ctlr->dev, serial);
173
diff --git a/sys/dev/disk/nata/ata-queue.c b/sys/dev/disk/nata/ata-queue.c
174
index ff7fda3..9ca13f2 100644
175
--- a/sys/dev/disk/nata/ata-queue.c
176
+++ b/sys/dev/disk/nata/ata-queue.c
177
@@ -92,7 +92,7 @@ ata_queue_request(struct ata_request *request)
178
     ch = device_get_softc(request->parent);
179
     callout_init_mp(&request->callout);	/* serialization done via state_mtx */
180
     if (!request->callback && !(request->flags & ATA_R_REQUEUE))
181
-	spin_init(&request->done);
182
+	spin_init(&request->done, "ataqueuerqdone");
183
 
184
     /* in ATA_STALL_QUEUE state we call HW directly */
185
     if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) {
186
diff --git a/sys/dev/disk/nata/ata-raid.c b/sys/dev/disk/nata/ata-raid.c
187
index c32111f..ff4d909 100644
188
--- a/sys/dev/disk/nata/ata-raid.c
189
+++ b/sys/dev/disk/nata/ata-raid.c
190
@@ -138,7 +138,7 @@ ata_raid_attach(struct ar_softc *rdp, int writeback)
191
     char buffer[32];
192
     int disk;
193
 
194
-    spin_init(&rdp->lock);
195
+    spin_init(&rdp->lock, "ataraidattach");
196
     ata_raid_config_changed(rdp, writeback);
197
 
198
     /* sanitize arrays total_size % (width * interleave) == 0 */
199
@@ -498,7 +498,7 @@ ata_raid_strategy(struct dev_strategy_args *ap)
200
 				rebuild->dev = rdp->disks[this].dev;
201
 				rebuild->flags &= ~ATA_R_READ;
202
 				rebuild->flags |= ATA_R_WRITE;
203
-				spin_init(&composite->lock);
204
+				spin_init(&composite->lock, "ardfspare");
205
 				composite->residual = request->bytecount;
206
 				composite->rd_needed |= (1 << drv);
207
 				composite->wr_depend |= (1 << drv);
208
@@ -557,7 +557,7 @@ ata_raid_strategy(struct dev_strategy_args *ap)
209
 				      sizeof(struct ata_request));
210
 				mirror->this = this;
211
 				mirror->dev = rdp->disks[this].dev;
212
-				spin_init(&composite->lock);
213
+				spin_init(&composite->lock, "ardfonline");
214
 				composite->residual = request->bytecount;
215
 				composite->wr_needed |= (1 << drv);
216
 				composite->wr_needed |= (1 << this);
217
diff --git a/sys/dev/disk/nata/ata-usb.c b/sys/dev/disk/nata/ata-usb.c
218
index 478a536..f081196 100644
219
--- a/sys/dev/disk/nata/ata-usb.c
220
+++ b/sys/dev/disk/nata/ata-usb.c
221
@@ -223,7 +223,7 @@ atausb_attach(device_t dev)
222
     sc->timeout = 5000;
223
     sc->locked_ch = NULL;
224
     sc->restart_ch = NULL;
225
-    spin_init(&sc->locked_mtx); 
226
+    spin_init(&sc->locked_mtx, "atausbattach"); 
227
 
228
     id = usbd_get_interface_descriptor(sc->iface);
229
     switch (id->bInterfaceProtocol) {
230
@@ -880,8 +880,8 @@ ata_usbchannel_attach(device_t dev)
231
     ch->hw.end_transaction = ata_usbchannel_end_transaction;
232
     ch->hw.status = NULL;
233
     ch->hw.command = NULL;
234
-    spin_init(&ch->state_mtx);
235
-    spin_init(&ch->queue_mtx);
236
+    spin_init(&ch->state_mtx, "usbattach_state");
237
+    spin_init(&ch->queue_mtx, "usbattach_queue");
238
     ata_queue_init(ch);
239
 
240
     /* XXX SOS reset the controller HW, the channel and device(s) */
241
diff --git a/sys/dev/disk/nata/atapi-cam.c b/sys/dev/disk/nata/atapi-cam.c
242
index 8fadcfa..d522f22 100644
243
--- a/sys/dev/disk/nata/atapi-cam.c
244
+++ b/sys/dev/disk/nata/atapi-cam.c
245
@@ -195,7 +195,7 @@ atapi_cam_attach(device_t dev)
246
 	return ENOMEM;
247
     }
248
 
249
-    spin_init(&scp->state_lock);
250
+    spin_init(&scp->state_lock, "atapicamattach");
251
 
252
     scp->dev = dev;
253
     scp->parent = device_get_parent(dev);
254
diff --git a/sys/dev/drm/drm_dma.c b/sys/dev/drm/drm_dma.c
255
index 75c528a..651c15f 100644
256
--- a/sys/dev/drm/drm_dma.c
257
+++ b/sys/dev/drm/drm_dma.c
258
@@ -46,7 +46,7 @@ int drm_dma_setup(struct drm_device *dev)
259
 	if (dev->dma == NULL)
260
 		return ENOMEM;
261
 
262
-	spin_init(&dev->dma_lock);
263
+	spin_init(&dev->dma_lock, "drmdma_lock");
264
 
265
 	return 0;
266
 }
267
diff --git a/sys/dev/drm/drm_mm.c b/sys/dev/drm/drm_mm.c
268
index 26fd87f..7dcdf90 100644
269
--- a/sys/dev/drm/drm_mm.c
270
+++ b/sys/dev/drm/drm_mm.c
271
@@ -643,7 +643,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
272
 	INIT_LIST_HEAD(&mm->unused_nodes);
273
 	mm->num_unused = 0;
274
 	mm->scanned_blocks = 0;
275
-	spin_init(&mm->unused_lock);
276
+	spin_init(&mm->unused_lock, "drmmminit");
277
 
278
 	/* Clever trick to avoid a special case in the free hole tracking. */
279
 	INIT_LIST_HEAD(&mm->head_node.node_list);
280
diff --git a/sys/dev/drm/i915/i915_dma.c b/sys/dev/drm/i915/i915_dma.c
281
index b82a4fe..a073a2b 100644
282
--- a/sys/dev/drm/i915/i915_dma.c
283
+++ b/sys/dev/drm/i915/i915_dma.c
284
@@ -1438,8 +1438,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
285
 
286
 	lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
287
 	lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE);
288
-	spin_init(&dev_priv->rps.lock);
289
-	spin_init(&dev_priv->dpio_lock);
290
+	spin_init(&dev_priv->rps.lock, "i915initrps");
291
+	spin_init(&dev_priv->dpio_lock, "i915initdpio");
292
 
293
 	lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE);
294
 
295
@@ -1565,7 +1565,7 @@ i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
296
 	i915_file_priv = kmalloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
297
 	    M_WAITOK | M_ZERO);
298
 
299
-	spin_init(&i915_file_priv->mm.lock);
300
+	spin_init(&i915_file_priv->mm.lock, "i915priv");
301
 	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
302
 	file_priv->driver_priv = i915_file_priv;
303
 
304
diff --git a/sys/dev/drm/include/linux/wait.h b/sys/dev/drm/include/linux/wait.h
305
index 886fa8f..b974077 100644
306
--- a/sys/dev/drm/include/linux/wait.h
307
+++ b/sys/dev/drm/include/linux/wait.h
308
@@ -37,7 +37,7 @@ typedef struct {
309
 static inline void
310
 init_waitqueue_head(wait_queue_head_t *eq)
311
 {
312
-	spin_init(&eq->lock);
313
+	spin_init(&eq->lock, "linux_waitqueue");
314
 }
315
 
316
 #define wake_up(eq)		wakeup_one(eq)
317
diff --git a/sys/dev/drm/radeon/radeon_device.c b/sys/dev/drm/radeon/radeon_device.c
318
index 6663847..4514025 100644
319
--- a/sys/dev/drm/radeon/radeon_device.c
320
+++ b/sys/dev/drm/radeon/radeon_device.c
321
@@ -1031,10 +1031,10 @@ int radeon_device_init(struct radeon_device *rdev,
322
 	lockinit(&rdev->dc_hw_i2c_mutex,
323
 		 "drm__radeon_device__dc_hw_i2c_mutex", 0, LK_CANRECURSE);
324
 	atomic_set(&rdev->ih.lock, 0);
325
-	spin_init(&rdev->gem.mutex);
326
+	spin_init(&rdev->gem.mutex, "radeon_gemmtx");
327
 	lockinit(&rdev->pm.mutex, "drm__radeon_device__pm__mutex", 0,
328
 		 LK_CANRECURSE);
329
-	spin_init(&rdev->gpu_clock_mutex);
330
+	spin_init(&rdev->gpu_clock_mutex, "radeon_clockmtx");
331
 	lockinit(&rdev->pm.mclk_lock, "drm__radeon_device__pm__mclk_lock", 0,
332
 		 LK_CANRECURSE);
333
 	lockinit(&rdev->exclusive_lock, "drm__radeon_device__exclusive_lock",
334
@@ -1101,7 +1101,7 @@ int radeon_device_init(struct radeon_device *rdev,
335
 
336
 	/* Registers mapping */
337
 	/* TODO: block userspace mapping of io register */
338
-	spin_init(&rdev->mmio_idx_lock);
339
+	spin_init(&rdev->mmio_idx_lock, "radeon_mpio");
340
 	rdev->rmmio_rid = PCIR_BAR(2);
341
 	rdev->rmmio = bus_alloc_resource_any(rdev->dev, SYS_RES_MEMORY,
342
 	    &rdev->rmmio_rid, RF_ACTIVE | RF_SHAREABLE);
343
diff --git a/sys/dev/drm/ttm/ttm_memory.c b/sys/dev/drm/ttm/ttm_memory.c
344
index bb7c37a..3b48ee1 100644
345
--- a/sys/dev/drm/ttm/ttm_memory.c
346
+++ b/sys/dev/drm/ttm/ttm_memory.c
347
@@ -253,7 +253,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
348
 	int i;
349
 	struct ttm_mem_zone *zone;
350
 
351
-	spin_init(&glob->spin);
352
+	spin_init(&glob->spin, "ttmemglob");
353
 	glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
354
 	    taskqueue_thread_enqueue, &glob->swap_queue);
355
 	taskqueue_start_threads(&glob->swap_queue, 1, 0, -1, "ttm swap");
356
diff --git a/sys/dev/misc/putter/putter.c b/sys/dev/misc/putter/putter.c
357
index 868115c..fffe062 100644
358
--- a/sys/dev/misc/putter/putter.c
359
+++ b/sys/dev/misc/putter/putter.c
360
@@ -170,7 +170,7 @@ int putterdebug = 0;
361
  */
362
 
363
 /* protects both the list and the contents of the list elements */
364
-static struct spinlock pi_mtx = SPINLOCK_INITIALIZER(&pi_mtx);
365
+static struct spinlock pi_mtx = SPINLOCK_INITIALIZER("pi_mtx", &pi_mtx);
366
 
367
 /*
368
  * fd routines, for cloner
369
diff --git a/sys/dev/netif/ath/ath_hal/ah_osdep.c b/sys/dev/netif/ath/ath_hal/ah_osdep.c
370
index e5b27dd..fa57d63 100644
371
--- a/sys/dev/netif/ath/ath_hal/ah_osdep.c
372
+++ b/sys/dev/netif/ath/ath_hal/ah_osdep.c
373
@@ -78,7 +78,7 @@
374
  * XXX This is a global lock for now; it should be pushed to
375
  * a per-device lock in some platform-independent fashion.
376
  */
377
-struct spinlock ah_regser_spin = SPINLOCK_INITIALIZER(ah_regser_spin);
378
+struct spinlock ah_regser_spin = SPINLOCK_INITIALIZER("ah_regser_spin", ah_regser_spin);
379
 
380
 extern	void ath_hal_printf(struct ath_hal *, const char*, ...)
381
 		__printflike(2,3);
382
diff --git a/sys/dev/raid/mrsas/mrsas.c b/sys/dev/raid/mrsas/mrsas.c
383
index a26d9da..1c6051a 100644
384
--- a/sys/dev/raid/mrsas/mrsas.c
385
+++ b/sys/dev/raid/mrsas/mrsas.c
386
@@ -776,7 +776,7 @@ static int mrsas_attach(device_t dev)
387
     lockinit(&sc->pci_lock,  "mrsas_pci_lock", 0, LK_CANRECURSE);
388
     lockinit(&sc->io_lock,  "mrsas_io_lock", 0, LK_CANRECURSE);
389
     lockinit(&sc->aen_lock,  "mrsas_aen_lock", 0, LK_CANRECURSE);
390
-    spin_init(&sc->ioctl_lock);
391
+    spin_init(&sc->ioctl_lock, "mrsasioctl");
392
     lockinit(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", 0,
393
 	LK_CANRECURSE);
394
     lockinit(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", 0,
395
diff --git a/sys/dev/raid/twa/tw_osl_freebsd.c b/sys/dev/raid/twa/tw_osl_freebsd.c
396
index 679fc3a..4c5f1ab 100644
397
--- a/sys/dev/raid/twa/tw_osl_freebsd.c
398
+++ b/sys/dev/raid/twa/tw_osl_freebsd.c
399
@@ -306,9 +306,9 @@ twa_attach(device_t dev)
400
 
401
 	/* Initialize the mutexes right here. */
402
 	sc->io_lock = &(sc->io_lock_handle);
403
-	spin_init(sc->io_lock);
404
+	spin_init(sc->io_lock, "twa_iolock");
405
 	sc->q_lock = &(sc->q_lock_handle);
406
-	spin_init(sc->q_lock);
407
+	spin_init(sc->q_lock, "twa_qlock");
408
 	sc->sim_lock = &(sc->sim_lock_handle);
409
 	lockinit(sc->sim_lock, "tw_osl_sim_lock", 0, LK_CANRECURSE);
410
 
411
diff --git a/sys/dev/raid/twa/tw_osl_inline.h b/sys/dev/raid/twa/tw_osl_inline.h
412
index 5a2d2e1..1fb926f 100644
413
--- a/sys/dev/raid/twa/tw_osl_inline.h
414
+++ b/sys/dev/raid/twa/tw_osl_inline.h
415
@@ -60,7 +60,7 @@
416
  * Return value:	None
417
  */
418
 #define tw_osl_init_lock(ctlr_handle, lock_name, lock)	\
419
-	spin_init(lock)
420
+	spin_init(lock, lock_name)
421
 
422
 
423
 
424
diff --git a/sys/emulation/linux/linux_futex.c b/sys/emulation/linux/linux_futex.c
425
index 93f7140..fd693f7 100644
426
--- a/sys/emulation/linux/linux_futex.c
427
+++ b/sys/emulation/linux/linux_futex.c
428
@@ -89,7 +89,7 @@ struct futex_list futex_list;
429
 #if 0
430
 #define FUTEX_LOCK(f)		spin_lock(&(f)->f_lck)
431
 #define FUTEX_UNLOCK(f)		spin_unlock(&(f)->f_lck)
432
-#define FUTEX_INIT(f)		spin_init(&(f)->f_lck)
433
+#define FUTEX_INIT(f)		spin_init(&(f)->f_lck, "futex")
434
 #define	FUTEX_SLEEP(f, id, flag, wm, timo)	ssleep((id), &(f)->f_lck, (flag), (wm), (timo))
435
 #endif
436
 
437
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
438
index 742c391..b1c20a3 100644
439
--- a/sys/kern/init_main.c
440
+++ b/sys/kern/init_main.c
441
@@ -164,7 +164,7 @@ mi_proc0init(struct globaldata *gd, struct user *proc0paddr)
442
 	lwkt_init_thread(&thread0, proc0paddr, LWKT_THREAD_STACK, 0, gd);
443
 	lwkt_set_comm(&thread0, "thread0");
444
 	RB_INIT(&proc0.p_lwp_tree);
445
-	spin_init(&proc0.p_spin);
446
+	spin_init(&proc0.p_spin, "iproc_proc0");
447
 	lwkt_token_init(&proc0.p_token, "iproc");
448
 	proc0.p_lasttid = 0;	/* +1 = next TID */
449
 	lwp_rb_tree_RB_INSERT(&proc0.p_lwp_tree, &lwp0);
450
@@ -173,7 +173,7 @@ mi_proc0init(struct globaldata *gd, struct user *proc0paddr)
451
 	proc0.p_usched = usched_init();
452
 	CPUMASK_ASSALLONES(lwp0.lwp_cpumask);
453
 	lwkt_token_init(&lwp0.lwp_token, "lwp_token");
454
-	spin_init(&lwp0.lwp_spin);
455
+	spin_init(&lwp0.lwp_spin, "iproc_lwp0");
456
 	varsymset_init(&proc0.p_varsymset, NULL);
457
 	thread0.td_flags |= TDF_RUNNING;
458
 	thread0.td_proc = &proc0;
459
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
460
index 8ec0c3e..a01344d 100644
461
--- a/sys/kern/kern_condvar.c
462
+++ b/sys/kern/kern_condvar.c
463
@@ -8,7 +8,7 @@ cv_init(struct cv *c, const char *desc)
464
 {
465
 	c->cv_desc = desc;
466
 	c->cv_waiters = 0;
467
-	spin_init(&c->cv_lock);
468
+	spin_init(&c->cv_lock, "cvinit");
469
 }
470
 
471
 void
472
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
473
index 5925185..7b857e4 100644
474
--- a/sys/kern/kern_descrip.c
475
+++ b/sys/kern/kern_descrip.c
476
@@ -126,7 +126,7 @@ static struct dev_ops fildesc_ops = {
477
  * Descriptor management.
478
  */
479
 static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead);
480
-static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin);
481
+static struct spinlock filehead_spin = SPINLOCK_INITIALIZER("filehead_spin", &filehead_spin);
482
 static int nfiles;		/* actual number of open files */
483
 extern int cmask;	
484
 
485
@@ -1551,7 +1551,7 @@ falloc(struct lwp *lp, struct file **resultfp, int *resultfd)
486
 	 * Allocate a new file descriptor.
487
 	 */
488
 	fp = kmalloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
489
-	spin_init(&fp->f_spin);
490
+	spin_init(&fp->f_spin, "falloc");
491
 	SLIST_INIT(&fp->f_klist);
492
 	fp->f_count = 1;
493
 	fp->f_ops = &badfileops;
494
@@ -1774,7 +1774,7 @@ fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
495
 	fdp0->fd_files = fdp0->fd_builtin_files;
496
 	fdp0->fd_nfiles = NDFILE;
497
 	fdp0->fd_lastfile = -1;
498
-	spin_init(&fdp0->fd_spin);
499
+	spin_init(&fdp0->fd_spin, "fdinitbootstrap");
500
 }
501
 
502
 /*
503
@@ -1818,7 +1818,7 @@ fdinit(struct proc *p)
504
 	newfdp->fd_files = newfdp->fd_builtin_files;
505
 	newfdp->fd_nfiles = NDFILE;
506
 	newfdp->fd_lastfile = -1;
507
-	spin_init(&newfdp->fd_spin);
508
+	spin_init(&newfdp->fd_spin, "fdinit");
509
 
510
 	return (newfdp);
511
 }
512
@@ -1934,7 +1934,7 @@ again:
513
 	newfdp->fd_lastfile = fdp->fd_lastfile;
514
 	newfdp->fd_freefile = fdp->fd_freefile;
515
 	newfdp->fd_cmask = fdp->fd_cmask;
516
-	spin_init(&newfdp->fd_spin);
517
+	spin_init(&newfdp->fd_spin, "fdcopy");
518
 
519
 	/*
520
 	 * Copy the descriptor table through (i).  This also copies the
521
diff --git a/sys/kern/kern_environment.c b/sys/kern/kern_environment.c
522
index abda37d..094e19d 100644
523
--- a/sys/kern/kern_environment.c
524
+++ b/sys/kern/kern_environment.c
525
@@ -479,7 +479,7 @@ kenv_init(void *dummy)
526
 	}
527
 	kenv_dynp[i] = NULL;
528
 	
529
-	spin_init(&kenv_dynlock);
530
+	spin_init(&kenv_dynlock, "kenvdynlock");
531
 	kenv_isdynamic = 1;
532
 }
533
 SYSINIT(kenv, SI_BOOT1_POST, SI_ORDER_ANY, kenv_init, NULL);
534
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
535
index 8065ccf4..b5fbc4b 100644
536
--- a/sys/kern/kern_fork.c
537
+++ b/sys/kern/kern_fork.c
538
@@ -383,7 +383,7 @@ fork1(struct lwp *lp1, int flags, struct proc **procp)
539
 	p2->p_stat = SIDL;
540
 
541
 	RB_INIT(&p2->p_lwp_tree);
542
-	spin_init(&p2->p_spin);
543
+	spin_init(&p2->p_spin, "procfork1");
544
 	lwkt_token_init(&p2->p_token, "proc");
545
 	lwkt_gettoken(&p2->p_token);
546
 
547
@@ -652,7 +652,7 @@ lwp_fork(struct lwp *origlp, struct proc *destproc, int flags)
548
 	crit_exit();
549
 	CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask);
550
 	lwkt_token_init(&lp->lwp_token, "lwp_token");
551
-	spin_init(&lp->lwp_spin);
552
+	spin_init(&lp->lwp_spin, "lwptoken");
553
 
554
 	/*
555
 	 * Assign the thread to the current cpu to begin with so we
556
diff --git a/sys/kern/kern_ktr.c b/sys/kern/kern_ktr.c
557
index 5cc1e79..21c144f 100644
558
--- a/sys/kern/kern_ktr.c
559
+++ b/sys/kern/kern_ktr.c
560
@@ -274,7 +274,7 @@ ktr_resync_callback(void *dummy __unused)
561
 	if (ktr_testspincnt) {
562
 		struct spinlock spin;
563
 
564
-		spin_init(&spin);
565
+		spin_init(&spin, "ktrresync");
566
 		spin_lock(&spin);
567
 		spin_unlock(&spin);
568
 		logtest_noargs(spin_beg);
569
diff --git a/sys/kern/kern_nrandom.c b/sys/kern/kern_nrandom.c
570
index 944b5f7..0b1408f 100644
571
--- a/sys/kern/kern_nrandom.c
572
+++ b/sys/kern/kern_nrandom.c
573
@@ -468,7 +468,7 @@ rand_initialize(void)
574
 #endif
575
 
576
 
577
-	spin_init(&rand_spin);
578
+	spin_init(&rand_spin, "randinit");
579
 
580
 	/* Initialize IBAA. */
581
 	IBAA_Init();
582
diff --git a/sys/kern/kern_objcache.c b/sys/kern/kern_objcache.c
583
index a678cb2..6ac88ff 100644
584
--- a/sys/kern/kern_objcache.c
585
+++ b/sys/kern/kern_objcache.c
586
@@ -226,7 +226,7 @@ objcache_create(const char *name, int cluster_limit, int nom_cache,
587
 	 */
588
 	depot = &oc->depot[0];
589
 
590
-	spin_init(&depot->spin);
591
+	spin_init(&depot->spin, "objcachedepot");
592
 	SLIST_INIT(&depot->fullmagazines);
593
 	SLIST_INIT(&depot->emptymagazines);
594
 
595
@@ -989,7 +989,7 @@ objcache_timer(void *dummy)
596
 static void
597
 objcache_init(void)
598
 {
599
-	spin_init(&objcachelist_spin);
600
+	spin_init(&objcachelist_spin, "objcachelist");
601
 
602
 	magazine_capmin = mag_capacity_align(MAGAZINE_CAPACITY_MIN);
603
 	magazine_capmax = mag_capacity_align(MAGAZINE_CAPACITY_MAX);
604
diff --git a/sys/kern/kern_plimit.c b/sys/kern/kern_plimit.c
605
index 4bcc2db..d4d702a 100644
606
--- a/sys/kern/kern_plimit.c
607
+++ b/sys/kern/kern_plimit.c
608
@@ -111,7 +111,7 @@ plimit_init0(struct plimit *limit)
609
 	limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
610
 	limit->p_cpulimit = RLIM_INFINITY;
611
 	limit->p_refcnt = 1;
612
-	spin_init(&limit->p_spin);
613
+	spin_init(&limit->p_spin, "plimitinit");
614
 }
615
 
616
 /*
617
@@ -515,7 +515,7 @@ plimit_copy(struct plimit *olimit, struct plimit *nlimit)
618
 {
619
 	*nlimit = *olimit;
620
 
621
-	spin_init(&nlimit->p_spin);
622
+	spin_init(&nlimit->p_spin, "plimitcopy");
623
 	nlimit->p_refcnt = 1;
624
 	nlimit->p_exclusive = 0;
625
 }
626
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
627
index 0d90ef0..19d3e68 100644
628
--- a/sys/kern/kern_resource.c
629
+++ b/sys/kern/kern_resource.c
630
@@ -915,7 +915,7 @@ ruadd(struct rusage *ru, struct rusage *ru2)
631
 void
632
 uihashinit(void)
633
 {
634
-	spin_init(&uihash_lock);
635
+	spin_init(&uihash_lock, "uihashinit");
636
 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
637
 }
638
 
639
@@ -957,7 +957,7 @@ uicreate(uid_t uid)
640
 	/*
641
 	 * Initialize structure and enter it into the hash table
642
 	 */
643
-	spin_init(&uip->ui_lock);
644
+	spin_init(&uip->ui_lock, "uicreate");
645
 	uip->ui_uid = uid;
646
 	uip->ui_ref = 1;	/* we're returning a ref */
647
 	varsymset_init(&uip->ui_varsymset, NULL);
648
diff --git a/sys/kern/kern_sensors.c b/sys/kern/kern_sensors.c
649
index 7d29e81..c2fe087 100644
650
--- a/sys/kern/kern_sensors.c
651
+++ b/sys/kern/kern_sensors.c
652
@@ -38,7 +38,7 @@
653
 
654
 static int		sensor_task_lock_inited = 0;
655
 static struct lock	sensor_task_lock;
656
-static struct spinlock	sensor_dev_lock = SPINLOCK_INITIALIZER(sensor_dev_lock);
657
+static struct spinlock	sensor_dev_lock = SPINLOCK_INITIALIZER("sensor_dev_lock", sensor_dev_lock);
658
 
659
 int			sensordev_count = 0;
660
 SLIST_HEAD(, ksensordev) sensordev_list = SLIST_HEAD_INITIALIZER(sensordev_list);
661
diff --git a/sys/kern/kern_spinlock.c b/sys/kern/kern_spinlock.c
662
index a2764d8..4027cd0 100644
663
--- a/sys/kern/kern_spinlock.c
664
+++ b/sys/kern/kern_spinlock.c
665
@@ -70,7 +70,7 @@
666
 #include <pthread.h>
667
 #endif
668
 
669
-struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin);
670
+struct spinlock pmap_spin = SPINLOCK_INITIALIZER("pmap_spin", pmap_spin);
671
 
672
 struct indefinite_info {
673
 	sysclock_t	base;
674
@@ -199,6 +199,7 @@ _spin_lock_contested(struct spinlock *spin, const char *ident)
675
 	atomic_clear_int(&spin->counta, SPINLOCK_SHARED);
676
 
677
 #ifdef DEBUG_LOCKS_LATENCY
678
+        kprintf("spinlock %s : _spin_lock_contested", spin->descr);
679
 	long j;
680
 	for (j = spinlocks_add_latency; j > 0; --j)
681
 		cpu_ccfence();
682
@@ -433,7 +434,7 @@ sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
683
 	 * Indefinite wait test
684
 	 */
685
 	if (value == 1) {
686
-		spin_init(&spin);
687
+		spin_init(&spin, "sysctllock");
688
 		spin_lock(&spin);	/* force an indefinite wait */
689
 		spin_lock_test_mode = 1;
690
 		spin_lock(&spin);
691
@@ -448,7 +449,7 @@ sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
692
 	if (value == 2) {
693
 		globaldata_t gd = mycpu;
694
 
695
-		spin_init(&spin);
696
+		spin_init(&spin, "sysctllocktest");
697
 		for (i = spin_test_count; i > 0; --i) {
698
 		    _spin_lock_quick(gd, &spin, "test");
699
 		    spin_unlock_quick(gd, &spin);
700
diff --git a/sys/kern/kern_sysref.c b/sys/kern/kern_sysref.c
701
index 97aed42..f9811d6 100644
702
--- a/sys/kern/kern_sysref.c
703
+++ b/sys/kern/kern_sysref.c
704
@@ -80,7 +80,7 @@ sysrefbootinit(void *dummy __unused)
705
 
706
 	for (i = 0; i < ncpus; ++i) {
707
 		sa = &sysref_array[i];
708
-		spin_init(&sa->spin);
709
+		spin_init(&sa->spin, "sysrefbootinit");
710
 		RB_INIT(&sa->rbtree);
711
 	}
712
 }
713
diff --git a/sys/kern/kern_wdog.c b/sys/kern/kern_wdog.c
714
index 129407f..ef3f4c6 100644
715
--- a/sys/kern/kern_wdog.c
716
+++ b/sys/kern/kern_wdog.c
717
@@ -207,7 +207,7 @@ static struct dev_ops wdog_ops = {
718
 static void
719
 wdog_init(void)
720
 {
721
-	spin_init(&wdogmtx);
722
+	spin_init(&wdogmtx, "wdog");
723
 	make_dev(&wdog_ops, 0,
724
 	    UID_ROOT, GID_WHEEL, 0600, "wdog");
725
 	callout_init_mp(&wdog_callout);
726
diff --git a/sys/kern/lwkt_msgport.c b/sys/kern/lwkt_msgport.c
727
index 36c08a7..48943f3 100644
728
--- a/sys/kern/lwkt_msgport.c
729
+++ b/sys/kern/lwkt_msgport.c
730
@@ -370,7 +370,7 @@ lwkt_initport_spin(lwkt_port_t port, thread_t td, boolean_t fixed_cpuid)
731
 		   lwkt_spin_replyport,
732
 		   dmsgfn,
733
 		   pportfn_oncpu);
734
-    spin_init(&port->mpu_spin);
735
+    spin_init(&port->mpu_spin, "lwktinitport");
736
     port->mpu_td = td;
737
     if (fixed_cpuid)
738
 	port->mp_cpuid = td->td_gd->gd_cpuid;
739
diff --git a/sys/kern/lwkt_token.c b/sys/kern/lwkt_token.c
740
index fdec0d0..44773a4 100644
741
--- a/sys/kern/lwkt_token.c
742
+++ b/sys/kern/lwkt_token.c
743
@@ -87,7 +87,7 @@ extern int lwkt_sched_debug;
744
 #endif
745
 
746
 static lwkt_token	pool_tokens[LWKT_NUM_POOL_TOKENS];
747
-struct spinlock		tok_debug_spin = SPINLOCK_INITIALIZER(&tok_debug_spin);
748
+struct spinlock		tok_debug_spin = SPINLOCK_INITIALIZER("tok_debug_spin", &tok_debug_spin);
749
 
750
 #define TOKEN_STRING	"REF=%p TOK=%p TD=%p"
751
 #define TOKEN_ARGS	lwkt_tokref_t ref, lwkt_token_t tok, struct thread *td
752
diff --git a/sys/kern/subr_csprng.c b/sys/kern/subr_csprng.c
753
index 0af44e3..02380de 100644
754
--- a/sys/kern/subr_csprng.c
755
+++ b/sys/kern/subr_csprng.c
756
@@ -52,7 +52,7 @@
757
 
758
 /* Lock macros */
759
 #define POOL_LOCK_INIT(pool) \
760
-    spin_init(&(pool)->lock)
761
+    spin_init(&(pool)->lock, "csprng_poollock")
762
 
763
 #define POOL_LOCK(pool)      \
764
     spin_lock(&pool->lock)
765
@@ -65,7 +65,7 @@
766
 
767
 
768
 #define STATE_LOCK_INIT(state)  \
769
-    spin_init(&state->lock)
770
+    spin_init(&state->lock, "csprng_statelock")
771
 
772
 #define STATE_LOCK(state)	\
773
     spin_lock(&state->lock)
774
@@ -329,8 +329,10 @@ csprng_add_entropy(struct csprng_state *state, int src_id,
775
 		 * of spinning until we get it, return if we
776
 		 * can't get a hold of the lock right now.
777
 		 */
778
-		if (!POOL_TRYLOCK(pool))
779
+		if (!POOL_TRYLOCK(pool)) {
780
+                        kprintf("POOL_TRYLOCK %s failed", pool->lock.descr);
781
 			return -1;
782
+                }
783
 	} else {
784
 		POOL_LOCK(pool);
785
 	}
786
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
787
index f1a5570..273bd07 100644
788
--- a/sys/kern/subr_prf.c
789
+++ b/sys/kern/subr_prf.c
790
@@ -103,7 +103,7 @@ static void  snprintf_func (int ch, void *arg);
791
 
792
 static int consintr = 1;		/* Ok to handle console interrupts? */
793
 static int msgbufmapped;		/* Set when safe to use msgbuf */
794
-static struct spinlock cons_spin = SPINLOCK_INITIALIZER(cons_spin);
795
+static struct spinlock cons_spin = SPINLOCK_INITIALIZER("cons_spin", cons_spin);
796
 static thread_t constty_td = NULL;
797
 
798
 int msgbuftrigger;
799
@@ -892,7 +892,7 @@ done:
800
 void
801
 kvcreinitspin(void)
802
 {
803
-	spin_init(&cons_spin);
804
+	spin_init(&cons_spin, "kvcre");
805
 	atomic_clear_long(&mycpu->gd_flags, GDF_KPRINTF);
806
 }
807
 
808
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
809
index 31e60e3..2b99148 100644
810
--- a/sys/kern/subr_taskqueue.c
811
+++ b/sys/kern/subr_taskqueue.c
812
@@ -84,7 +84,7 @@ static void taskqueue_run(struct taskqueue *queue, int lock_held);
813
 static __inline void
814
 TQ_LOCK_INIT(struct taskqueue *tq)
815
 {
816
-	spin_init(&tq->tq_lock);
817
+	spin_init(&tq->tq_lock, "tqlock");
818
 }
819
 
820
 static __inline void
821
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
822
index 18ce8bd..2165ccc 100644
823
--- a/sys/kern/uipc_mbuf.c
824
+++ b/sys/kern/uipc_mbuf.c
825
@@ -134,7 +134,7 @@ mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
826
 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
827
 
828
 struct mbuf_rb_tree	mbuf_track_root;
829
-static struct spinlock	mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
830
+static struct spinlock	mbuf_track_spin = SPINLOCK_INITIALIZER("mbuf_track_spin", mbuf_track_spin);
831
 
832
 static void
833
 mbuftrack(struct mbuf *m)
834
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
835
index e277295..3e56b35 100644
836
--- a/sys/kern/uipc_socket.c
837
+++ b/sys/kern/uipc_socket.c
838
@@ -180,7 +180,7 @@ soalloc(int waitok, struct protosw *pr)
839
 		TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist);
840
 		lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok");
841
 		lwkt_token_init(&so->so_snd.ssb_token, "sndtok");
842
-		spin_init(&so->so_rcvd_spin);
843
+		spin_init(&so->so_rcvd_spin, "soalloc");
844
 		netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport,
845
 		    MSGF_DROPABLE | MSGF_PRIORITY,
846
 		    so->so_proto->pr_usrreqs->pru_rcvd);
847
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
848
index 1129791..404cec8 100644
849
--- a/sys/kern/uipc_usrreq.c
850
+++ b/sys/kern/uipc_usrreq.c
851
@@ -83,7 +83,7 @@ static unp_defdiscard_t unp_defdiscard_base;
852
  */
853
 static struct	sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL };
854
 static ino_t	unp_ino = 1;		/* prototype for fake inode numbers */
855
-static struct spinlock unp_ino_spin = SPINLOCK_INITIALIZER(&unp_ino_spin);
856
+static struct spinlock unp_ino_spin = SPINLOCK_INITIALIZER("unp_ino_spin", &unp_ino_spin);
857
 
858
 static int     unp_attach (struct socket *, struct pru_attach_info *);
859
 static void    unp_detach (struct unpcb *);
860
@@ -787,7 +787,7 @@ static u_long	unpdg_sendspace = 2*1024;	/* really max datagram size */
861
 static u_long	unpdg_recvspace = 4*1024;
862
 
863
 static int	unp_rights;			/* file descriptors in flight */
864
-static struct spinlock unp_spin = SPINLOCK_INITIALIZER(&unp_spin);
865
+static struct spinlock unp_spin = SPINLOCK_INITIALIZER("unp_spin", &unp_spin);
866
 
867
 SYSCTL_DECL(_net_local_seqpacket);
868
 SYSCTL_DECL(_net_local_stream);
869
@@ -1413,7 +1413,7 @@ unp_init(void)
870
 {
871
 	LIST_INIT(&unp_dhead);
872
 	LIST_INIT(&unp_shead);
873
-	spin_init(&unp_spin);
874
+	spin_init(&unp_spin, "unpinit");
875
 }
876
 
877
 static int
878
diff --git a/sys/kern/usched_bsd4.c b/sys/kern/usched_bsd4.c
879
index 5856e42..b63574d 100644
880
--- a/sys/kern/usched_bsd4.c
881
+++ b/sys/kern/usched_bsd4.c
882
@@ -298,7 +298,7 @@ bsd4_rqinit(void *dummy)
883
 {
884
 	int i;
885
 
886
-	spin_init(&bsd4_spin);
887
+	spin_init(&bsd4_spin, "bsd4rq");
888
 	for (i = 0; i < NQS; i++) {
889
 		TAILQ_INIT(&bsd4_queues[i]);
890
 		TAILQ_INIT(&bsd4_rtqueues[i]);
891
diff --git a/sys/kern/usched_dfly.c b/sys/kern/usched_dfly.c
892
index 2b03f56..8386a2a 100644
893
--- a/sys/kern/usched_dfly.c
894
+++ b/sys/kern/usched_dfly.c
895
@@ -816,6 +816,7 @@ dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
896
 				if (nlp == NULL)
897
 					spin_unlock(&dd->spin);
898
 			} else {
899
+                                kprintf("choose worst queue rdd spin_trylock %s failed\n",
900
 				spin_unlock(&dd->spin);
901
 				nlp = NULL;
902
 			}
903
@@ -2071,9 +2072,15 @@ dfly_helper_thread(void *dummy)
904
 		 *	 4 cores).
905
 		 */
906
 		rdd = dfly_choose_worst_queue(dd);
907
-		if (rdd && spin_trylock(&rdd->spin)) {
908
+		if (rdd) { 
909
+                    if (spin_trylock(&rdd->spin)) {
910
 			nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
911
 			spin_unlock(&rdd->spin);
912
+                    } else {
913
+                        kprintf("choose worst queue rdd spin_trylock %s failed\n",
914
+                                rdd->spin.descr);
915
+                        nlp = NULL;
916
+                    }
917
 		} else {
918
 			nlp = NULL;
919
 		}
920
@@ -2160,7 +2167,7 @@ usched_dfly_cpu_init(void)
921
 		if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
922
 		    continue;
923
 
924
-		spin_init(&dd->spin);
925
+		spin_init(&dd->spin, "uschedcpuinit");
926
 		dd->cpunode = get_cpu_node_by_cpuid(i);
927
 		dd->cpuid = i;
928
 		CPUMASK_ASSBIT(dd->cpumask, i);
929
diff --git a/sys/kern/usched_dummy.c b/sys/kern/usched_dummy.c
930
index e747c8f..cb40224 100644
931
--- a/sys/kern/usched_dummy.c
932
+++ b/sys/kern/usched_dummy.c
933
@@ -120,7 +120,7 @@ static void
934
 dummyinit(void *dummy)
935
 {
936
 	TAILQ_INIT(&dummy_runq);
937
-	spin_init(&dummy_spin);
938
+	spin_init(&dummy_spin, "uscheddummy");
939
 	ATOMIC_CPUMASK_NANDBIT(dummy_curprocmask, 0);
940
 }
941
 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, dummyinit, NULL)
942
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
943
index cb0f6f9..45ef3db 100644
944
--- a/sys/kern/vfs_bio.c
945
+++ b/sys/kern/vfs_bio.c
946
@@ -635,7 +635,7 @@ bufinit(void *dummy __unused)
947
 	/* next, make a null set of free lists */
948
 	for (i = 0; i < ncpus; ++i) {
949
 		pcpu = &bufpcpu[i];
950
-		spin_init(&pcpu->spin);
951
+		spin_init(&pcpu->spin, "bufinit");
952
 		for (j = 0; j < BUFFER_QUEUES; j++)
953
 			TAILQ_INIT(&pcpu->bufqueues[j]);
954
 	}
955
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
956
index bc4c00b..bb5fa7f 100644
957
--- a/sys/kern/vfs_cache.c
958
+++ b/sys/kern/vfs_cache.c
959
@@ -3627,16 +3627,16 @@ nchinit(void)
960
 		gd->gd_nchstats = &nchstats[i];
961
 	}
962
 	TAILQ_INIT(&ncneglist);
963
-	spin_init(&ncspin);
964
+	spin_init(&ncspin, "nchinit");
965
 	nchashtbl = hashinit_ext(desiredvnodes / 2,
966
 				 sizeof(struct nchash_head),
967
 				 M_VFSCACHE, &nchash);
968
 	for (i = 0; i <= (int)nchash; ++i) {
969
 		LIST_INIT(&nchashtbl[i].list);
970
-		spin_init(&nchashtbl[i].spin);
971
+		spin_init(&nchashtbl[i].spin, "nchinit_hash");
972
 	}
973
 	for (i = 0; i < NCMOUNT_NUMCACHE; ++i)
974
-		spin_init(&ncmount_cache[i].spin);
975
+		spin_init(&ncmount_cache[i].spin, "nchinit_cache");
976
 	nclockwarn = 5 * hz;
977
 }
978
 
979
diff --git a/sys/kern/vfs_lock.c b/sys/kern/vfs_lock.c
980
index 95f1e05..cc33d6c 100644
981
--- a/sys/kern/vfs_lock.c
982
+++ b/sys/kern/vfs_lock.c
983
@@ -86,7 +86,7 @@ TAILQ_HEAD(freelst, vnode);
984
 static struct freelst	vnode_active_list;
985
 static struct freelst	vnode_inactive_list;
986
 static struct vnode	vnode_active_rover;
987
-static struct spinlock	vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
988
+static struct spinlock	vfs_spin = SPINLOCK_INITIALIZER("vfs_spin", vfs_spin);
989
 
990
 int  activevnodes = 0;
991
 SYSCTL_INT(_debug, OID_AUTO, activevnodes, CTLFLAG_RD,
992
@@ -115,7 +115,7 @@ vfs_lock_init(void)
993
 	TAILQ_INIT(&vnode_inactive_list);
994
 	TAILQ_INIT(&vnode_active_list);
995
 	TAILQ_INSERT_TAIL(&vnode_active_list, &vnode_active_rover, v_list);
996
-	spin_init(&vfs_spin);
997
+	spin_init(&vfs_spin, "vfslock");
998
 	kmalloc_raise_limit(M_VNODE, 0);	/* unlimited */
999
 }
1000
 
1001
@@ -875,7 +875,7 @@ allocvnode(int lktimeout, int lkflags)
1002
 	RB_INIT(&vp->v_rbclean_tree);
1003
 	RB_INIT(&vp->v_rbdirty_tree);
1004
 	RB_INIT(&vp->v_rbhash_tree);
1005
-	spin_init(&vp->v_spin);
1006
+	spin_init(&vp->v_spin, "allocvnode");
1007
 
1008
 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
1009
 	atomic_add_int(&numvnodes, 1);
1010
diff --git a/sys/kern/vfs_quota.c b/sys/kern/vfs_quota.c
1011
index 1467944..a5fadbe 100644
1012
--- a/sys/kern/vfs_quota.c
1013
+++ b/sys/kern/vfs_quota.c
1014
@@ -125,7 +125,7 @@ vq_init(struct mount *mp)
1015
 	/* initialize the rb trees */
1016
 	RB_INIT(&mp->mnt_acct.ac_uroot);
1017
 	RB_INIT(&mp->mnt_acct.ac_groot);
1018
-	spin_init(&mp->mnt_acct.ac_spin);
1019
+	spin_init(&mp->mnt_acct.ac_spin, "vqinit");
1020
 
1021
 	mp->mnt_acct.ac_bytes = 0;
1022
 
1023
diff --git a/sys/net/netmap/netmap_mbq.c b/sys/net/netmap/netmap_mbq.c
1024
index 62abe4d..88de31d 100644
1025
--- a/sys/net/netmap/netmap_mbq.c
1026
+++ b/sys/net/netmap/netmap_mbq.c
1027
@@ -40,7 +40,7 @@ static inline void __mbq_init(struct mbq *q)
1028
 
1029
 void mbq_safe_init(struct mbq *q)
1030
 {
1031
-    spin_init(&q->lock);
1032
+    spin_init(&q->lock, "mbq");
1033
     __mbq_init(q);
1034
 }
1035
 
1036
diff --git a/sys/net/pf/pf.c b/sys/net/pf/pf.c
1037
index b49cf47..8dc9f73 100644
1038
--- a/sys/net/pf/pf.c
1039
+++ b/sys/net/pf/pf.c
1040
@@ -117,7 +117,7 @@ extern int debug_pfugidhack;
1041
 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token);
1042
 struct lwkt_token pf_gtoken = LWKT_TOKEN_INITIALIZER(pf_gtoken);
1043
 #if __SIZEOF_LONG__ != 8
1044
-struct spinlock pf_spin = SPINLOCK_INITIALIZER(pf_spin);
1045
+struct spinlock pf_spin = SPINLOCK_INITIALIZER("pf_spin", pf_spin);
1046
 #endif
1047
 
1048
 #define DPFPRINTF(n, x)	if (pf_status.debug >= (n)) kprintf x
1049
diff --git a/sys/netproto/smb/smb_subr.h b/sys/netproto/smb/smb_subr.h
1050
index 5f0cbc7..fdd45d1 100644
1051
--- a/sys/netproto/smb/smb_subr.h
1052
+++ b/sys/netproto/smb/smb_subr.h
1053
@@ -82,7 +82,7 @@ void m_dumpm(struct mbuf *m);
1054
 
1055
 #define	lockdestroy(lock)
1056
 #define	smb_slock			spinlock
1057
-#define	smb_sl_init(sl, desc)		spin_init(sl)
1058
+#define	smb_sl_init(sl, desc)		spin_init(sl, desc)
1059
 #define	smb_sl_destroy(sl)
1060
 #define	smb_sl_lock(sl)			spin_lock(sl)
1061
 #define	smb_sl_unlock(sl)		spin_unlock(sl)
1062
diff --git a/sys/opencrypto/cryptosoft.c b/sys/opencrypto/cryptosoft.c
1063
index 23623a7..85a2f2b 100644
1064
--- a/sys/opencrypto/cryptosoft.c
1065
+++ b/sys/opencrypto/cryptosoft.c
1066
@@ -58,7 +58,7 @@ static	struct swcr_data **swcr_sessions = NULL;
1067
 static	u_int32_t swcr_sesnum;
1068
 static	u_int32_t swcr_minsesnum = 1;
1069
 
1070
-static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin);
1071
+static struct spinlock swcr_spin = SPINLOCK_INITIALIZER("swcr_spin", swcr_spin);
1072
 
1073
 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
1074
 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
1075
diff --git a/sys/platform/pc32/i386/busdma_machdep.c b/sys/platform/pc32/i386/busdma_machdep.c
1076
index cb50f11..dcc6595 100644
1077
--- a/sys/platform/pc32/i386/busdma_machdep.c
1078
+++ b/sys/platform/pc32/i386/busdma_machdep.c
1079
@@ -154,7 +154,7 @@ struct bus_dmamap {
1080
 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
1081
 	STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
1082
 static struct spinlock bounce_map_list_spin =
1083
-	SPINLOCK_INITIALIZER(&bounce_map_list_spin);
1084
+	SPINLOCK_INITIALIZER("bounce_map_list_spin", &bounce_map_list_spin);
1085
 
1086
 static struct bus_dmamap nobounce_dmamap;
1087
 
1088
@@ -258,7 +258,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
1089
 
1090
 	newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO);
1091
 
1092
-	spin_init(&newtag->spin);
1093
+	spin_init(&newtag->spin, "busdmacreate");
1094
 	newtag->parent = parent;
1095
 	newtag->alignment = alignment;
1096
 	newtag->boundary = boundary;
1097
@@ -1103,7 +1103,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
1098
 	}
1099
 	bz = new_bz;
1100
 
1101
-	spin_init(&bz->spin);
1102
+	spin_init(&bz->spin, "allocbouncezone");
1103
 	STAILQ_INIT(&bz->bounce_page_list);
1104
 	STAILQ_INIT(&bz->bounce_map_waitinglist);
1105
 	bz->free_bpages = 0;
1106
diff --git a/sys/platform/pc32/i386/pmap.c b/sys/platform/pc32/i386/pmap.c
1107
index bee854a..cad74b2 100644
1108
--- a/sys/platform/pc32/i386/pmap.c
1109
+++ b/sys/platform/pc32/i386/pmap.c
1110
@@ -417,7 +417,7 @@ pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
1111
 	kernel_pmap.pm_pteobj = NULL;	/* see pmap_init */
1112
 	TAILQ_INIT(&kernel_pmap.pm_pvlist);
1113
 	TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
1114
-	spin_init(&kernel_pmap.pm_spin);
1115
+	spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
1116
 	lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
1117
 	nkpt = NKPT;
1118
 
1119
@@ -1301,7 +1301,7 @@ pmap_pinit0(struct pmap *pmap)
1120
 	pmap->pm_ptphint = NULL;
1121
 	TAILQ_INIT(&pmap->pm_pvlist);
1122
 	TAILQ_INIT(&pmap->pm_pvlist_free);
1123
-	spin_init(&pmap->pm_spin);
1124
+	spin_init(&pmap->pm_spin, "pmapinit0");
1125
 	lwkt_token_init(&pmap->pm_token, "pmap_tok");
1126
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1127
 }
1128
@@ -1360,7 +1360,7 @@ pmap_pinit(struct pmap *pmap)
1129
 	pmap->pm_ptphint = NULL;
1130
 	TAILQ_INIT(&pmap->pm_pvlist);
1131
 	TAILQ_INIT(&pmap->pm_pvlist_free);
1132
-	spin_init(&pmap->pm_spin);
1133
+	spin_init(&pmap->pm_spin, "pmapinit");
1134
 	lwkt_token_init(&pmap->pm_token, "pmap_tok");
1135
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1136
 	pmap->pm_stats.resident_count = 1;
1137
diff --git a/sys/platform/pc64/x86_64/busdma_machdep.c b/sys/platform/pc64/x86_64/busdma_machdep.c
1138
index 9b7d74d..ae6e9df 100644
1139
--- a/sys/platform/pc64/x86_64/busdma_machdep.c
1140
+++ b/sys/platform/pc64/x86_64/busdma_machdep.c
1141
@@ -154,7 +154,7 @@ struct bus_dmamap {
1142
 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
1143
 	STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
1144
 static struct spinlock bounce_map_list_spin =
1145
-	SPINLOCK_INITIALIZER(&bounce_map_list_spin);
1146
+	SPINLOCK_INITIALIZER("bounce_map_list_spin", &bounce_map_list_spin);
1147
 
1148
 static struct bus_dmamap nobounce_dmamap;
1149
 
1150
@@ -258,7 +258,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
1151
 
1152
 	newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO);
1153
 
1154
-	spin_init(&newtag->spin);
1155
+	spin_init(&newtag->spin, "busdmacreate");
1156
 	newtag->parent = parent;
1157
 	newtag->alignment = alignment;
1158
 	newtag->boundary = boundary;
1159
@@ -1112,7 +1112,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
1160
 	}
1161
 	bz = new_bz;
1162
 
1163
-	spin_init(&bz->spin);
1164
+	spin_init(&bz->spin, "allocbouncezone");
1165
 	STAILQ_INIT(&bz->bounce_page_list);
1166
 	STAILQ_INIT(&bz->bounce_map_waitinglist);
1167
 	bz->free_bpages = 0;
1168
diff --git a/sys/platform/pc64/x86_64/pmap.c b/sys/platform/pc64/x86_64/pmap.c
1169
index 45b3a29..3af4f30 100644
1170
--- a/sys/platform/pc64/x86_64/pmap.c
1171
+++ b/sys/platform/pc64/x86_64/pmap.c
1172
@@ -912,7 +912,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
1173
 	kernel_pmap.pm_count = 1;
1174
 	CPUMASK_ASSALLONES(kernel_pmap.pm_active);
1175
 	RB_INIT(&kernel_pmap.pm_pvroot);
1176
-	spin_init(&kernel_pmap.pm_spin);
1177
+	spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
1178
 	lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
1179
 
1180
 	/*
1181
@@ -1626,7 +1626,7 @@ pmap_pinit0(struct pmap *pmap)
1182
 	CPUMASK_ASSZERO(pmap->pm_active);
1183
 	pmap->pm_pvhint = NULL;
1184
 	RB_INIT(&pmap->pm_pvroot);
1185
-	spin_init(&pmap->pm_spin);
1186
+	spin_init(&pmap->pm_spin, "pmapinit0");
1187
 	lwkt_token_init(&pmap->pm_token, "pmap_tok");
1188
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1189
 	pmap_pinit_defaults(pmap);
1190
@@ -1656,7 +1656,7 @@ pmap_pinit_simple(struct pmap *pmap)
1191
 	if (pmap->pm_pmlpv == NULL) {
1192
 		RB_INIT(&pmap->pm_pvroot);
1193
 		bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1194
-		spin_init(&pmap->pm_spin);
1195
+		spin_init(&pmap->pm_spin, "pmapinitsimple");
1196
 		lwkt_token_init(&pmap->pm_token, "pmap_tok");
1197
 	}
1198
 }
1199
diff --git a/sys/platform/vkernel/platform/busdma_machdep.c b/sys/platform/vkernel/platform/busdma_machdep.c
1200
index d39594b..8212bfc 100644
1201
--- a/sys/platform/vkernel/platform/busdma_machdep.c
1202
+++ b/sys/platform/vkernel/platform/busdma_machdep.c
1203
@@ -1006,7 +1006,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
1204
 	}
1205
 	bz = new_bz;
1206
 
1207
-	spin_init(&bz->spin);
1208
+	spin_init(&bz->spin, "allocbouncezone");
1209
 	STAILQ_INIT(&bz->bounce_page_list);
1210
 	STAILQ_INIT(&bz->bounce_map_waitinglist);
1211
 	bz->free_bpages = 0;
1212
diff --git a/sys/platform/vkernel/platform/pmap.c b/sys/platform/vkernel/platform/pmap.c
1213
index cc85121..5dfa8cc 100644
1214
--- a/sys/platform/vkernel/platform/pmap.c
1215
+++ b/sys/platform/vkernel/platform/pmap.c
1216
@@ -182,7 +182,7 @@ pmap_bootstrap(void)
1217
 	kernel_pmap.pm_pteobj = NULL;	/* see pmap_init */
1218
 	TAILQ_INIT(&kernel_pmap.pm_pvlist);
1219
 	TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
1220
-	spin_init(&kernel_pmap.pm_spin);
1221
+	spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
1222
 	lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
1223
 	i386_protection_init();
1224
 }
1225
@@ -253,7 +253,7 @@ pmap_pinit(struct pmap *pmap)
1226
 	pmap->pm_cpucachemask = 0;
1227
 	TAILQ_INIT(&pmap->pm_pvlist);
1228
 	TAILQ_INIT(&pmap->pm_pvlist_free);
1229
-	spin_init(&pmap->pm_spin);
1230
+	spin_init(&pmap->pm_spin, "pmapinit");
1231
 	lwkt_token_init(&pmap->pm_token, "pmap_tok");
1232
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1233
 	pmap->pm_stats.resident_count = 1;
1234
diff --git a/sys/platform/vkernel64/platform/busdma_machdep.c b/sys/platform/vkernel64/platform/busdma_machdep.c
1235
index 12a568e..d27be7c 100644
1236
--- a/sys/platform/vkernel64/platform/busdma_machdep.c
1237
+++ b/sys/platform/vkernel64/platform/busdma_machdep.c
1238
@@ -1002,7 +1002,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
1239
 	}
1240
 	bz = new_bz;
1241
 
1242
-	spin_init(&bz->spin);
1243
+	spin_init(&bz->spin, "allocbouncezone");
1244
 	STAILQ_INIT(&bz->bounce_page_list);
1245
 	STAILQ_INIT(&bz->bounce_map_waitinglist);
1246
 	bz->free_bpages = 0;
1247
diff --git a/sys/platform/vkernel64/platform/pmap.c b/sys/platform/vkernel64/platform/pmap.c
1248
index 3b9493f..5e0c929 100644
1249
--- a/sys/platform/vkernel64/platform/pmap.c
1250
+++ b/sys/platform/vkernel64/platform/pmap.c
1251
@@ -569,7 +569,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr, int64_t ptov_offset)
1252
 	TAILQ_INIT(&kernel_pmap.pm_pvlist);
1253
 	TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
1254
 	lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
1255
-	spin_init(&kernel_pmap.pm_spin);
1256
+	spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
1257
 
1258
 	/*
1259
 	 * Reserve some special page table entries/VA space for temporary
1260
@@ -1180,7 +1180,7 @@ pmap_pinit(struct pmap *pmap)
1261
 	pmap->pm_ptphint = NULL;
1262
 	TAILQ_INIT(&pmap->pm_pvlist);
1263
 	TAILQ_INIT(&pmap->pm_pvlist_free);
1264
-	spin_init(&pmap->pm_spin);
1265
+	spin_init(&pmap->pm_spin, "pmapinit");
1266
 	lwkt_token_init(&pmap->pm_token, "pmap_tok");
1267
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1268
 	pmap->pm_stats.resident_count = 1;
1269
diff --git a/sys/sys/spinlock.h b/sys/sys/spinlock.h
1270
index 2da0267..7cf0262 100644
1271
--- a/sys/sys/spinlock.h
1272
+++ b/sys/sys/spinlock.h
1273
@@ -48,11 +48,12 @@
1274
  * cache entry.
1275
  */
1276
 struct spinlock {
1277
+        const char *descr;
1278
 	int counta;
1279
 	int countb;
1280
 };
1281
 
1282
-#define SPINLOCK_INITIALIZER(head)	{ 0, 0 }
1283
+#define SPINLOCK_INITIALIZER(d, head)	{ #d, 0, 0 }
1284
 
1285
 #define SPINLOCK_SHARED			0x80000000
1286
 #define SPINLOCK_EXCLWAIT		0x00100000 /* high bits counter */
1287
diff --git a/sys/sys/spinlock2.h b/sys/sys/spinlock2.h
1288
index e08da39..dc2efe3 100644
1289
--- a/sys/sys/spinlock2.h
1290
+++ b/sys/sys/spinlock2.h
1291
@@ -80,6 +80,7 @@ spin_trylock(struct spinlock *spin)
1292
 	if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
1293
 		return (spin_trylock_contested(spin));
1294
 #ifdef DEBUG_LOCKS
1295
+        kprintf("spin_trylock %s\n", spin->descr);
1296
 	int i;
1297
 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
1298
 		if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
1299
@@ -100,7 +101,11 @@ spin_trylock(struct spinlock *spin)
1300
 static __inline int
1301
 spin_held(struct spinlock *spin)
1302
 {
1303
-	return(spin->counta != 0);
1304
+        int held = (spin->counta != 0);
1305
+#ifdef DEBUG_LOCKS
1306
+	kprintf("spinlock %s held : %d\n", spin->descr, held);
1307
+#endif
1308
+	return(held);
1309
 }
1310
 
1311
 /*
1312
@@ -116,6 +121,7 @@ _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
1313
 	if (spin->counta != 1)
1314
 		_spin_lock_contested(spin, ident);
1315
 #ifdef DEBUG_LOCKS
1316
+        kprintf("_spin_lock_quick %s\n", spin->descr);
1317
 	int i;
1318
 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
1319
 		if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
1320
@@ -144,6 +150,7 @@ static __inline void
1321
 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
1322
 {
1323
 #ifdef DEBUG_LOCKS
1324
+	kprintf("spin_unlock_quick %s\n", spin->descr);
1325
 	int i;
1326
 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
1327
 		if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
1328
@@ -192,6 +199,7 @@ _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
1329
 	if (atomic_cmpset_int(&spin->counta, 0, SPINLOCK_SHARED | 1) == 0)
1330
 		_spin_lock_shared_contested(spin, ident);
1331
 #ifdef DEBUG_LOCKS
1332
+        kprintf("_spin_lock_shared_quick %s\n", spin->descr);
1333
 	int i;
1334
 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
1335
 		if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
1336
@@ -209,6 +217,7 @@ static __inline void
1337
 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
1338
 {
1339
 #ifdef DEBUG_LOCKS
1340
+        kprintf("spin_unlock_shared_quick %s\n", spin->descr);
1341
 	int i;
1342
 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
1343
 		if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
1344
@@ -263,8 +272,9 @@ spin_pool_unlock(void *chan)
1345
 }
1346
 
1347
 static __inline void
1348
-spin_init(struct spinlock *spin)
1349
+spin_init(struct spinlock *spin, const char *descr)
1350
 {
1351
+	spin->descr  = descr;
1352
         spin->counta = 0;
1353
         spin->countb = 0;
1354
 }
1355
diff --git a/sys/sys/tree.h b/sys/sys/tree.h
1356
index 5a9bb77..17544a4 100644
1357
--- a/sys/sys/tree.h
1358
+++ b/sys/sys/tree.h
1359
@@ -313,7 +313,7 @@ struct name {								\
1360
 }
1361
 
1362
 #define RB_INITIALIZER(root)						\
1363
-	{ NULL, NULL, SPINLOCK_INITIALIZER(root.spin) }
1364
+	{ NULL, NULL, SPINLOCK_INITIALIZER("root.spin", root.spin) }
1365
 
1366
 #define RB_INIT(root) do {						\
1367
 	(root)->rbh_root = NULL;					\
1368
diff --git a/sys/vfs/hammer2/hammer2_ccms.c b/sys/vfs/hammer2/hammer2_ccms.c
1369
index ccd861c..d4f8101 100644
1370
--- a/sys/vfs/hammer2/hammer2_ccms.c
1371
+++ b/sys/vfs/hammer2/hammer2_ccms.c
1372
@@ -74,7 +74,7 @@ void
1373
 ccms_cst_init(ccms_cst_t *cst, void *handle)
1374
 {
1375
 	bzero(cst, sizeof(*cst));
1376
-	spin_init(&cst->spin);
1377
+	spin_init(&cst->spin, "ccmscst");
1378
 	cst->handle = handle;
1379
 }
1380
 
1381
diff --git a/sys/vfs/hammer2/hammer2_vfsops.c b/sys/vfs/hammer2/hammer2_vfsops.c
1382
index 2eeb318..ec998f4 100644
1383
--- a/sys/vfs/hammer2/hammer2_vfsops.c
1384
+++ b/sys/vfs/hammer2/hammer2_vfsops.c
1385
@@ -334,10 +334,10 @@ hammer2_pfsalloc(const hammer2_inode_data_t *ipdata, hammer2_tid_t alloc_tid)
1386
 	kmalloc_create(&pmp->minode, "HAMMER2-inodes");
1387
 	kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
1388
 	lockinit(&pmp->lock, "pfslk", 0, 0);
1389
-	spin_init(&pmp->inum_spin);
1390
+	spin_init(&pmp->inum_spin, "hm2pfsalloc_inum");
1391
 	RB_INIT(&pmp->inum_tree);
1392
 	TAILQ_INIT(&pmp->unlinkq);
1393
-	spin_init(&pmp->list_spin);
1394
+	spin_init(&pmp->list_spin, "hm2pfsalloc_list");
1395
 
1396
 	pmp->alloc_tid = alloc_tid + 1;	  /* our first media transaction id */
1397
 	pmp->flush_tid = pmp->alloc_tid;
1398
@@ -528,8 +528,8 @@ hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
1399
 		kmalloc_create(&hmp->mchain, "HAMMER2-chains");
1400
 		TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
1401
 		RB_INIT(&hmp->iotree);
1402
-		spin_init(&hmp->io_spin);
1403
-		spin_init(&hmp->list_spin);
1404
+		spin_init(&hmp->io_spin, "hm2mount_io");
1405
+		spin_init(&hmp->list_spin, "hm2mount_list");
1406
 		TAILQ_INIT(&hmp->flushq);
1407
 
1408
 		lockinit(&hmp->vollk, "h2vol", 0, 0);
1409
diff --git a/sys/vfs/ntfs/ntfs_subr.c b/sys/vfs/ntfs/ntfs_subr.c
1410
index 82af7be..d46015c 100644
1411
--- a/sys/vfs/ntfs/ntfs_subr.c
1412
+++ b/sys/vfs/ntfs/ntfs_subr.c
1413
@@ -388,7 +388,7 @@ ntfs_ntlookup(struct ntfsmount *ntmp, ino_t ino, struct ntnode **ipp)
1414
 
1415
 	/* init lock and lock the newborn ntnode */
1416
 	lockinit(&ip->i_lock, "ntnode", 0, LK_EXCLUSIVE);
1417
-	spin_init(&ip->i_interlock);
1418
+	spin_init(&ip->i_interlock, "ntfsntlookup");
1419
 	ntfs_ntget(ip);
1420
 
1421
 	ntfs_nthashins(ip);
1422
diff --git a/sys/vfs/ufs/ffs_softdep.c b/sys/vfs/ufs/ffs_softdep.c
1423
index 7be0069..d574761 100644
1424
--- a/sys/vfs/ufs/ffs_softdep.c
1425
+++ b/sys/vfs/ufs/ffs_softdep.c
1426
@@ -275,7 +275,7 @@ sema_init(struct sema *semap, char *name, int timo)
1427
 	semap->value = 0;
1428
 	semap->name = name;
1429
 	semap->timo = timo;
1430
-	spin_init(&semap->spin);
1431
+	spin_init(&semap->spin, "ufssema");
1432
 }
1433
 
1434
 /*
1435
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
1436
index a96cde4..98fb724 100644
1437
--- a/sys/vm/swap_pager.c
1438
+++ b/sys/vm/swap_pager.c
1439
@@ -168,7 +168,7 @@ static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
1440
 static int swap_burst_read = 0;	/* allow burst reading */
1441
 static swblk_t swapiterator;	/* linearize allocations */
1442
 
1443
-static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin);
1444
+static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER("swapbp_spin", &swapbp_spin);
1445
 
1446
 /* from vm_swap.c */
1447
 extern struct vnode *swapdev_vp;
1448
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
1449
index da6ea3d..b1e1246 100644
1450
--- a/sys/vm/vm_page.c
1451
+++ b/sys/vm/vm_page.c
1452
@@ -116,7 +116,7 @@ static volatile int vm_pages_waiting;
1453
 
1454
 static struct alist vm_contig_alist;
1455
 static struct almeta vm_contig_ameta[ALIST_RECORDS_65536];
1456
-static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER(&vm_contig_spin);
1457
+static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER("vm_contig_spin", &vm_contig_spin);
1458
 
1459
 static u_long vm_dma_reserved = 0;
1460
 TUNABLE_ULONG("vm.dma_reserved", &vm_dma_reserved);
1461
@@ -150,7 +150,7 @@ vm_page_queue_init(void)
1462
 
1463
 	for (i = 0; i < PQ_COUNT; i++) {
1464
 		TAILQ_INIT(&vm_page_queues[i].pl);
1465
-		spin_init(&vm_page_queues[i].spin);
1466
+		spin_init(&vm_page_queues[i].spin, "vm_page_queue_init");
1467
 	}
1468
 
1469
 	for (i = 0; i < VMACTION_HSIZE; i++)
1470
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
1471
index 003165d..4579d45 100644
1472
--- a/sys/vm/vm_pager.c
1473
+++ b/sys/vm/vm_pager.c
1474
@@ -184,7 +184,7 @@ static struct buf *swbuf_raw;
1475
 static vm_offset_t swapbkva;		/* swap buffers kva */
1476
 static struct swqueue bswlist_raw;	/* without kva */
1477
 static struct swqueue bswlist_kva;	/* with kva */
1478
-static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin);
1479
+static struct spinlock bswspin = SPINLOCK_INITIALIZER("bswspin", &bswspin);
1480
 static int pbuf_raw_count;
1481
 static int pbuf_kva_count;
1482
 
1483
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
1484
index 4e61113..caac30e 100644
1485
--- a/sys/vm/vm_zone.c
1486
+++ b/sys/vm/vm_zone.c
1487
@@ -237,7 +237,7 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
1488
 	 */
1489
 	if ((z->zflags & ZONE_BOOT) == 0) {
1490
 		z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
1491
-		spin_init(&z->zlock);
1492
+		spin_init(&z->zlock, "zinitna");
1493
 		z->zfreecnt = 0;
1494
 		z->ztotal = 0;
1495
 		z->zmax = 0;
1496
@@ -371,7 +371,7 @@ zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
1497
 	z->zpagecount = 0;
1498
 	z->zalloc = 0;
1499
 	z->znalloc = 0;
1500
-	spin_init(&z->zlock);
1501
+	spin_init(&z->zlock, "zbootinit");
1502
 
1503
 	bzero(item, (size_t)nitems * z->zsize);
1504
 	z->zitems = NULL;
    (1-1/1)