ncc3.diff

csaba.henk, 03/29/2006 10:02 AM

Download (18.5 KB)

View differences:

sys/kern/vfs_cache.c Wed Mar 29 10:23:26 2006 +0200
108 108
#define NCHHASH(hash)	(&nchashtbl[(hash) & nchash])
109 109
#define MINNEG		1024
110 110

  
111
/* Modes for shadow group traversal */
112
#define SG_ALL     0 /* traverse whole group */
113
#define SG_SUBTREE 1 /* traverse only subtree */
114

  
111 115
MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
112 116

  
113 117
static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
......
170 174
static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
171 175

  
172 176
struct nchstats nchstats[SMP_MAXCPU];
177

  
178
static STAILQ_HEAD(, shadowinfo) shadowinfo_freeq;
179
static u_long numshadowinfo = 0;
180
STATNODE(CTLFLAG_RD, numshadowinfo, &numshadowinfo);
181
static long maxnumshadowinfo = -1;
182
SYSCTL_LONG(_vfs_cache, OID_AUTO, maxnumshadowinfo, CTLFLAG_RW,
183
            &maxnumshadowinfo, 0, "");
184
MALLOC_DEFINE(M_SHADOWINFO, "shadowinfo", "VFS name cache shadowinfo");
185

  
173 186
/*
174 187
 * Export VFS cache effectiveness statistics to user-land.
175 188
 *
......
196 209
SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
197 210
  0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
198 211

  
212
/* XXX stubs for later MPSAFE work */
213
#define shadowinfo_freeq_lock()
214
#define shadowinfo_freeq_unlock()
215

  
216
static struct shadowinfo *
217
shadowinfo_fetch(void)
218
{
219
	struct shadowinfo *shinf = STAILQ_FIRST(&shadowinfo_freeq);
220

  
221
	if (! shinf)
222
		goto alloc;
223

  
224
	shadowinfo_freeq_lock();
225
	if ((shinf = STAILQ_FIRST(&shadowinfo_freeq)))
226
		STAILQ_REMOVE_HEAD(&shadowinfo_freeq, sh_entry);
227
	shadowinfo_freeq_unlock();
228

  
229
	if (shinf)
230
		return (shinf);
231

  
232
alloc:
233
	shinf = malloc(sizeof(*shinf), M_SHADOWINFO, M_WAITOK|M_ZERO);
234
	numshadowinfo++;
235

  
236
	return (shinf);
237
}
238

  
239
static __inline
240
struct shadowinfo*
241
shadowinfo_ref(struct shadowinfo *shinf)
242
{
243
	shinf->sh_refs++;
244

  
245
	return (shinf);
246
}
247

  
248
static void 
249
shadowinfo_put(struct shadowinfo *shinf)
250
{
251
	if (--shinf->sh_refs > 0)
252
		return;
253

  
254
	if (maxnumshadowinfo >= 0 && numshadowinfo > maxnumshadowinfo) {
255
		free(shinf, M_SHADOWINFO);
256
		numshadowinfo--;
257
		return;
258
	}
259

  
260
	shinf->sh_exlocks = 0;
261
	shinf->sh_locktd = NULL;
262

  
263
	shadowinfo_freeq_lock();
264
	STAILQ_INSERT_TAIL(&shadowinfo_freeq, shinf, sh_entry);
265
	shadowinfo_freeq_unlock();
266
}
267

  
199 268
static void cache_zap(struct namecache *ncp);
200 269

  
201 270
/*
......
225 294
	    (ncp->nc_flag & NCF_UNRESOLVED) && 
226 295
	    TAILQ_EMPTY(&ncp->nc_list)
227 296
	) {
228
		KKASSERT(ncp->nc_exlocks == 0);
297
		KKASSERT(ncp->nc_shadowinfo->sh_exlocks == 0);
229 298
		cache_lock(ncp);
230 299
		cache_zap(ncp);
231 300
	} else {
......
295 364
	ncp->nc_error = ENOTCONN;	/* needs to be resolved */
296 365
	ncp->nc_refs = 1;
297 366
	ncp->nc_fsmid = 1;
367
	ncp->nc_shadowinfo = &ncp->nc_shadowinfo_internal;
368
	ncp->nc_shadowinfo_internal.sh_refs = 2;
369
	ncp->nc_shadow_prev = NULL;
370
	ncp->nc_shadow_next = NULL;
298 371
	TAILQ_INIT(&ncp->nc_list);
299 372
	cache_lock(ncp);
300 373
	return(ncp);
......
303 376
static void
304 377
cache_free(struct namecache *ncp)
305 378
{
306
	KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
379
	KKASSERT(ncp->nc_refs == 1 && ncp->nc_shadowinfo->sh_exlocks == 1);
307 380
	if (ncp->nc_name)
308 381
		free(ncp->nc_name, M_VFSCACHE);
309 382
	free(ncp, M_VFSCACHE);
......
322 395
cache_drop(struct namecache *ncp)
323 396
{
324 397
	_cache_drop(ncp);
398
}
399

  
400
/*
401
 * Iterate an "updater" function over a shadow group.
402
 * All-group and subtree-only traversals are supported.
403
 */
404
static struct namecache *
405
cache_group_walk(struct namecache *ncp,
406
                 int (*updater)(struct namecache *xncp, void *param),
407
                 int flags, void *param)
408
{
409
	struct namecache *xncp = ncp, *yncp;
410

  
411
	for (;;) {
412
		yncp = xncp->nc_shadow_next;
413
		if (updater(xncp, param))
414
			break;
415
		if (! yncp || yncp == ncp ||
416
		    (flags & SG_SUBTREE &&
417
		     yncp->nc_shadowheight <= ncp->nc_shadowheight))
418
			break;
419
		xncp = yncp;
420
	}
421

  
422
	return(xncp);
423
}
424

  
425
struct migrate_param {
426
	int heightdelta;
427
	int exlocks;
428
	struct shadowinfo *shadowinfo;
429
};
430

  
431
static int 
432
migrate_updater(struct namecache *ncp, void *param)
433
{
434
	struct migrate_param *mpm = param;
435
	struct shadowinfo *shinf = mpm->shadowinfo;
436
	struct shadowinfo *oldshinf = ncp->nc_shadowinfo;
437

  
438
	if (! shinf)
439
		shinf = &ncp->nc_shadowinfo_internal;
440

  
441
	if (shinf == oldshinf)
442
		goto out;
443

  
444
	shinf->sh_locktd = oldshinf->sh_locktd;
445

  
446
	ncp->nc_shadowinfo = shadowinfo_ref(shinf);
447
	shadowinfo_put(oldshinf);
448

  
449
out:
450
	ncp->nc_shadowheight += mpm->heightdelta;
451
	if (mpm->exlocks >= 0)
452
		shinf->sh_exlocks = mpm->exlocks;
453

  
454
	return (0);
455
}
456

  
457
static __inline
458
void
459
cache_shadow_link(struct namecache *sncp, struct namecache *ncp)
460
{
461
	struct namecache *pncp;
462
	struct namecache *nsncp;
463

  
464
	pncp = ncp->nc_shadow_prev ?: ncp;
465
	nsncp = sncp->nc_shadow_next ?: sncp;
466

  
467
	pncp->nc_shadow_next = nsncp;
468
	nsncp->nc_shadow_prev = pncp;
469

  
470
	sncp->nc_shadow_next = ncp;
471
	ncp->nc_shadow_prev = sncp;
472
}
473

  
474
static __inline
475
void
476
cache_shadow_unlink(struct namecache *ncp)
477
{
478
	if (! ncp->nc_shadow_next)
479
		return;
480

  
481
	KKASSERT(ncp->nc_shadow_prev);
482

  
483
	if (ncp->nc_shadow_prev == ncp->nc_shadow_next) {
484
		ncp->nc_shadow_prev->nc_shadow_next = NULL;
485
		ncp->nc_shadow_next->nc_shadow_prev = NULL;
486
	} else {
487
		ncp->nc_shadow_prev->nc_shadow_next = ncp->nc_shadow_next;
488
		ncp->nc_shadow_next->nc_shadow_prev = ncp->nc_shadow_prev;
489
	}
490

  
491
	ncp->nc_shadow_prev = ncp->nc_shadow_next = NULL;
492
}
493

  
494
/*
495
 * Join ncp into the shadow group of sncp.
496
 * 
497
 * ncp must be unlocked on entry, while sncp must be locked on entry.
498
 *
499
 * The routine will fail and return ELOOP if the intended shadowing association
500
 * doesnt' make sense (currently this boils down to ncp being the same as
501
 * sncp).
502
 * It will fail with EEXIST if ncp gets resolved or acquires a shadow
503
 * association from elsewhere during the attach attempt (it is possbile due to
504
 * the fact that ncp is unlocked).
505
 *
506
 * - On success ncp will be a representative of the joint shadow group, which
507
 *   then will be locked.
508
 * - On failure the namecache entries will exist separately just as they did
509
 *   before; both entries will be locked.
510
 */
511
int
512
cache_shadow_attach(struct namecache *ncp, struct namecache *sncp)
513
{
514
	struct migrate_param mpm;
515

  
516
	if (ncp == sncp)
517
		return(ELOOP);
518

  
519
	KKASSERT(ncp->nc_shadowinfo->sh_locktd != curthread);
520
	KKASSERT(sncp->nc_shadowinfo->sh_locktd == curthread);
521

  
522
	cache_lock_two(ncp, sncp);
523

  
524
	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 || ncp->nc_shadowheight != 0)
525
		return(EEXIST);
526

  
527
	if (sncp->nc_shadowinfo == &sncp->nc_shadowinfo_internal) {
528
		mpm.heightdelta = 0;
529
		mpm.shadowinfo = shadowinfo_fetch();
530
		mpm.exlocks = sncp->nc_shadowinfo->sh_exlocks;
531
		migrate_updater(sncp, &mpm);
532
	}
533

  
534
	mpm.heightdelta = sncp->nc_shadowheight + 1;
535
	mpm.shadowinfo = sncp->nc_shadowinfo;
536
	mpm.exlocks = -1;
537

  
538
	cache_group_walk(ncp, &migrate_updater, SG_ALL, &mpm);
539
	cache_shadow_link(sncp, ncp);
540
	
541
	return(0);
542
}
543

  
544
/*
545
 * Take out namecache entry from its shadow group.
546
 *
547
 * The shadow group must be locked upon entry.
548
 *
549
 * On return both the entry and its former group remain locked.
550
 */
551
void
552
cache_shadow_detach(struct namecache *ncp)
553
{
554
	struct namecache *pncp, *nncp;
555
	struct migrate_param mpm;
556

  
557
	mpm.shadowinfo = NULL;
558
again:
559
	mpm.heightdelta = -ncp->nc_shadowheight;
560
	mpm.exlocks = ncp->nc_shadowinfo->sh_exlocks;
561
	pncp = ncp->nc_shadow_prev;
562
	nncp = ncp->nc_shadow_next;
563

  
564
	migrate_updater(ncp, &mpm);
565
	cache_shadow_unlink(ncp);
566

  
567
	if (nncp && nncp == pncp) {
568
		ncp = nncp;
569
		goto again;
570
	}
571
}
572

  
573
static int
574
vref_updater(struct namecache *ncp, void *param)
575
{
576
	if (ncp->nc_vp)
577
		*(int *)param > 0 ? vhold(ncp->nc_vp) : vdrop(ncp->nc_vp);
578

  
579
	return(0);
325 580
}
326 581

  
327 582
/*
......
349 604
{
350 605
	thread_t td;
351 606
	int didwarn;
607
	struct shadowinfo *shinf;
352 608

  
353 609
	KKASSERT(ncp->nc_refs != 0);
354 610
	didwarn = 0;
355 611
	td = curthread;
356 612

  
357 613
	for (;;) {
358
		if (ncp->nc_exlocks == 0) {
359
			ncp->nc_exlocks = 1;
360
			ncp->nc_locktd = td;
614
		shinf = ncp->nc_shadowinfo;
615
		KKASSERT(shinf);
616
		KKASSERT(shinf->sh_refs != 0);
617
		if (shinf->sh_exlocks == 0) {
618
			int ref = 1;
619

  
620
			shinf->sh_exlocks = 1;
621
			shinf->sh_locktd = td;
361 622
			/* 
362 623
			 * The vp associated with a locked ncp must be held
363 624
			 * to prevent it from being recycled (which would
......
365 626
			 *
366 627
			 * XXX loop on race for later MPSAFE work.
367 628
			 */
368
			if (ncp->nc_vp)
369
				vhold(ncp->nc_vp);
629
			cache_group_walk(ncp, &vref_updater, SG_ALL, &ref);
370 630
			break;
371 631
		}
372
		if (ncp->nc_locktd == td) {
373
			++ncp->nc_exlocks;
632
		if (shinf->sh_locktd == td) {
633
			++shinf->sh_exlocks;
374 634
			break;
375 635
		}
376
		ncp->nc_flag |= NCF_LOCKREQ;
377
		if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
636
		shinf->sh_lockreq = 1;
637
		if (tsleep(shinf, 0, "clock", nclockwarn) == EWOULDBLOCK) {
378 638
			if (didwarn)
379 639
				continue;
380 640
			didwarn = 1;
......
398 658
cache_lock_nonblock(struct namecache *ncp)
399 659
{
400 660
	thread_t td;
661
	struct shadowinfo *shinf = ncp->nc_shadowinfo;
401 662

  
402 663
	KKASSERT(ncp->nc_refs != 0);
664
	KKASSERT(shinf);
665
	KKASSERT(shinf->sh_refs != 0);
403 666
	td = curthread;
404
	if (ncp->nc_exlocks == 0) {
405
		ncp->nc_exlocks = 1;
406
		ncp->nc_locktd = td;
667
	if (shinf->sh_exlocks == 0) {
668
		int ref = 1;
669

  
670
		shinf->sh_exlocks = 1;
671
		shinf->sh_locktd = td;
407 672
		/* 
408 673
		 * The vp associated with a locked ncp must be held
409 674
		 * to prevent it from being recycled (which would
......
411 676
		 *
412 677
		 * XXX loop on race for later MPSAFE work.
413 678
		 */
414
		if (ncp->nc_vp)
415
			vhold(ncp->nc_vp);
679
		cache_group_walk(ncp, &vref_updater, SG_ALL, &ref);
416 680
		return(0);
417 681
	} else {
418 682
		return(EWOULDBLOCK);
......
423 687
cache_unlock(struct namecache *ncp)
424 688
{
425 689
	thread_t td = curthread;
690
	struct shadowinfo *shinf = ncp->nc_shadowinfo;
426 691

  
427 692
	KKASSERT(ncp->nc_refs > 0);
428
	KKASSERT(ncp->nc_exlocks > 0);
429
	KKASSERT(ncp->nc_locktd == td);
430
	if (--ncp->nc_exlocks == 0) {
431
		if (ncp->nc_vp)
432
			vdrop(ncp->nc_vp);
433
		ncp->nc_locktd = NULL;
434
		if (ncp->nc_flag & NCF_LOCKREQ) {
435
			ncp->nc_flag &= ~NCF_LOCKREQ;
436
			wakeup(ncp);
693
	KKASSERT(shinf);
694
	KKASSERT(shinf->sh_refs > 0);
695
	KKASSERT(shinf->sh_exlocks > 0);
696
	KKASSERT(shinf->sh_locktd == td);
697
	if (shinf->sh_exlocks == 1) {
698
		int ref = -1;
699
		cache_group_walk(ncp, &vref_updater, SG_ALL, &ref);
700
	}
701
	if (--shinf->sh_exlocks == 0) {
702
		shinf->sh_locktd = NULL;
703
		if (shinf->sh_lockreq) {
704
			shinf->sh_lockreq = 0;
705
			wakeup(shinf);
706
		}
707
	}
708
}
709

  
710
/*
711
 * Obtain lock on both of uncp and lncp.
712
 *
713
 * On entry, uncp is assumed to be unlocked, and lncp is assumed to be
714
 * locked.
715
 *
716
 * After this function returns, caller is responsible for checking
717
 * the state of lncp which might have got unlocked temporarily.
718
 */
719
void
720
cache_lock_two(struct namecache *uncp, struct namecache *lncp)
721
{
722
	if (cache_lock_nonblock(uncp) != 0) {
723
		if (uncp > lncp)
724
			cache_lock(uncp);
725
		else {
726
			cache_unlock(lncp);
727
			cache_lock(uncp);
728
			cache_lock(lncp);
437 729
		}
438 730
	}
439 731
}
......
453 745
cache_get_nonblock(struct namecache *ncp)
454 746
{
455 747
	/* XXX MP */
456
	if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
748
	if (ncp->nc_shadowinfo->sh_exlocks == 0 ||
749
	    ncp->nc_shadowinfo->sh_locktd == curthread) {
457 750
		_cache_hold(ncp);
458 751
		cache_lock(ncp);
459 752
		return(0);
......
487 780
		if (!TAILQ_EMPTY(&ncp->nc_list))
488 781
			vhold(vp);
489 782
		TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
490
		if (ncp->nc_exlocks)
783
		if (ncp->nc_shadowinfo->sh_exlocks)
491 784
			vhold(vp);
492 785

  
493 786
		/*
......
521 814
		ncp->nc_timeout = 1;
522 815
}
523 816

  
817
static int unresolver_updater(struct namecache *ncp, void *param); 
818

  
524 819
/*
525 820
 * Disassociate the vnode or negative-cache association and mark a
526 821
 * namecache entry as unresolved again.  Note that the ncp is still
......
541 836
void
542 837
cache_setunresolved(struct namecache *ncp)
543 838
{
839
	struct namecache *nncp;
840

  
841
	cache_group_walk(ncp, &unresolver_updater, SG_SUBTREE, ncp);
842

  
843
	nncp = ncp->nc_shadow_next;
844
	if (nncp)
845
		cache_hold(nncp);
846
	unresolver_updater(ncp, NULL);
847
	if (nncp)
848
		cache_put(nncp);
849
}
850

  
851
static int
852
unresolver_updater(struct namecache *ncp, void *param) 
853
{
544 854
	struct vnode *vp;
855

  
856
	if (ncp == param)
857
		return(0);
545 858

  
546 859
	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
547 860
		ncp->nc_flag |= NCF_UNRESOLVED;
......
563 876
			 */
564 877
			if (!TAILQ_EMPTY(&ncp->nc_list))
565 878
				vdrop(vp);
566
			if (ncp->nc_exlocks)
879
			if (ncp->nc_shadowinfo->sh_exlocks)
567 880
				vdrop(vp);
568 881
		} else {
569 882
			TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
570 883
			--numneg;
571 884
		}
572
	}
885

  
886
		cache_shadow_detach(ncp);
887
	}
888

  
889
	if (ncp->nc_refs == 0) {
890
		cache_hold(ncp);
891
		cache_put(ncp);
892
	}
893

  
894

  
895
	return(0);
573 896
}
574 897

  
575 898
/*
......
619 942
	struct namecache *nextkid;
620 943
	int rcnt = 0;
621 944

  
622
	KKASSERT(ncp->nc_exlocks);
945
	KKASSERT(ncp->nc_shadowinfo->sh_exlocks);
623 946

  
624 947
	cache_setunresolved(ncp);
625 948
	if (flags & CINV_DESTROY)
......
715 1038
 * XXX the disconnection could pose a problem, check code paths to make
716 1039
 * sure any code that blocks can handle the parent being changed out from
717 1040
 * under it.  Maybe we should lock the children (watch out for deadlocks) ?
1041
 * [UPDATE: attempt made to lock children, see in situ explanation]
718 1042
 *
719 1043
 * After we return the caller has the option of calling cache_setvp() if
720 1044
 * the vnode of the new target ncp is known.
......
726 1050
cache_rename(struct namecache *fncp, struct namecache *tncp)
727 1051
{
728 1052
	struct namecache *scan;
729
	int didwarn = 0;
730

  
1053
	int didwarn[] = { 0, 0 };
1054

  
1055
	/* XXX should we rather make here a non-equality assertion? */
1056
	if (fncp == tncp)
1057
		return;
1058

  
1059
again:
731 1060
	cache_setunresolved(fncp);
732 1061
	cache_setunresolved(tncp);
1062

  
1063
	/*
1064
	 * It seems we need to unlock fncp before calling cache_inval():
1065
	 * cache_inval() does a lot of lock/unlock/relock-ing (with tncp
1066
	 * and its children), therefore keeping fncp locked might be
1067
	 * deadlocky...
1068
	 */
1069
	cache_unlock(fncp);
1070
	
733 1071
	while (cache_inval(tncp, CINV_CHILDREN) != 0) {
734
		if (didwarn++ % 10 == 0) {
735
			printf("Warning: cache_rename: race during "
1072
		if (didwarn[0]++ % 10 == 0) {
1073
			printf("Warning: cache_rename: race #1 during "
736 1074
				"rename %s->%s\n",
737 1075
				fncp->nc_name, tncp->nc_name);
738 1076
		}
739 1077
		tsleep(tncp, 0, "mvrace", hz / 10);
740 1078
		cache_setunresolved(tncp);
741 1079
	}
1080

  
1081
	cache_unlock(tncp);
1082
	cache_lock(fncp);
1083

  
742 1084
	while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
743
		cache_hold(scan);
1085
		cache_unlock(fncp);
1086
		/*
1087
		 * We have to lock fncp's kids in order to unresolve
1088
		 * their shadow kids...
1089
		 */
1090
		cache_get(scan);
744 1091
		cache_unlink_parent(scan);
1092
		cache_group_walk(scan, &unresolver_updater, SG_SUBTREE, scan);
745 1093
		cache_link_parent(scan, tncp);
746 1094
		if (scan->nc_flag & NCF_HASHED)
747 1095
			cache_rehash(scan);
748
		cache_drop(scan);
1096
		cache_put(scan);
1097
		cache_lock(fncp);
1098
	}
1099

  
1100
	cache_lock_two(tncp, fncp);
1101

  
1102
	if ((fncp->nc_flag & tncp->nc_flag & NCF_UNRESOLVED) == 0) {
1103
		if (didwarn[1]++ % 10 == 0) {
1104
			printf("Warning: cache_rename: race #2 during "
1105
				"rename %s->%s\n",
1106
				fncp->nc_name, tncp->nc_name);
1107
		}
1108
		goto again;
749 1109
	}
750 1110
}
751 1111

  
......
1321 1681
			cache_drop(ncp);
1322 1682
			return;
1323 1683
		}
1324
		KKASSERT(par->nc_exlocks == 0);
1684
		KKASSERT(par->nc_shadowinfo->sh_exlocks == 0);
1325 1685
		cache_lock(ncp);
1326 1686
	}
1327 1687
done:
......
1417 1777
		if (ncp->nc_timeout && 
1418 1778
		    (int)(ncp->nc_timeout - ticks) < 0 &&
1419 1779
		    (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1420
		    ncp->nc_exlocks == 0
1780
		    ncp->nc_shadowinfo->sh_exlocks == 0
1421 1781
		) {
1422 1782
			cache_zap(cache_get(ncp));
1423 1783
			goto restart;
......
1738 2098
		gd->gd_nchstats = &nchstats[i];
1739 2099
	}
1740 2100
	TAILQ_INIT(&ncneglist);
2101
	STAILQ_INIT(&shadowinfo_freeq);
1741 2102
	nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
1742 2103
	nclockwarn = 1 * hz;
1743 2104
}
sys/sys/namecache.h Wed Mar 29 10:23:26 2006 +0200
70 70

  
71 71
struct vnode;
72 72

  
73
/*
74
 * Auxiliary structure for locking namecache entries,
75
 * either on their own or grouped into "shadow groups".
76
 */
77
struct shadowinfo {
78
    STAILQ_ENTRY(shadowinfo) sh_entry;  /* entry for free list */
79
    int		   sh_exlocks;		/* namespace locking */
80
    struct thread *sh_locktd;		/* namespace locking */
81
    int            sh_refs;		/* reference count */
82
    uint8_t        sh_lockreq :1;	/* lock intent sign */ 
83
};
84

  
73 85
TAILQ_HEAD(namecache_list, namecache);
86
LIST_HEAD(namecache_shadow_list, namecache);
74 87

  
75 88
/*
76 89
 * The namecache structure is used to manage the filesystem namespace.  Most
......
110 123
    char	*nc_name;		/* Separately allocated seg name */
111 124
    int		nc_error;
112 125
    int		nc_timeout;		/* compared against ticks, or 0 */
113
    int		nc_exlocks;		/* namespace locking */
114
    struct thread *nc_locktd;		/* namespace locking */
126
    struct shadowinfo *nc_shadowinfo;         /* namespace locking */
127
    struct shadowinfo nc_shadowinfo_internal; /* private locking information */
128
    struct namecache *nc_shadow_prev;   /* previous entry in shadow group */
129
    struct namecache *nc_shadow_next;   /* next entry in shadow group */
130
    int         nc_shadowheight;        /* measure within shadow group */
131
    struct namecache *nc_shadowed;	/* lower layer entry in layered fs */
115 132
    struct mount *nc_mount;		/* associated mount for vopops */
116 133
    int64_t	nc_fsmid;		/* filesystem modified id */
117 134
};
......
127 144
#define NCF_MOUNTPT	0x0008	/* mount point */
128 145
#define NCF_ROOT	0x0010	/* namecache root (static) */
129 146
#define NCF_HASHED	0x0020	/* namecache entry in hash table */
130
#define NCF_LOCKREQ	0x0040
147
#define NCF_UNUSED040	0x0040
131 148
#define NCF_UNUSED080	0x0080
132 149
#define NCF_ISSYMLINK	0x0100	/* represents a symlink */
133 150
#define NCF_ISDIR	0x0200	/* represents a directory */
......
150 167
void	cache_lock(struct namecache *ncp);
151 168
int	cache_lock_nonblock(struct namecache *ncp);
152 169
void	cache_unlock(struct namecache *ncp);
170
void	cache_lock_two(struct namecache *uncp, struct namecache *lncp);
171
int	cache_shadow_attach(struct namecache *ncp, struct namecache *sncp);
172
void	cache_shadow_detach(struct namecache *ncp);
153 173
void	cache_setvp(struct namecache *ncp, struct vnode *vp);
154 174
void	cache_settimeout(struct namecache *ncp, int nticks);
155 175
void	cache_setunresolved(struct namecache *ncp);