Project

General

Profile

Submit #3279 ยป doc.patch

falsifian, 05/28/2021 12:36 PM

View differences:

sys/platform/pc64/x86_64/pmap.c
544 544
}
545 545

  
546 546
/*
547
 * Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is
547
 * Of all the layers (PT, PD, PDP, PML4) the best one to cache is
548 548
 * the PT layer.  This will speed up core pmap operations considerably.
549
 * We also cache the PTE layer to (hopefully) improve relative lookup
550
 * speeds.
551 549
 *
552 550
 * NOTE: The pmap spinlock does not need to be held but the passed-in pv
553 551
 *	 must be in a known associated state (typically by being locked when
......
1882 1880
 ***************************************************/
1883 1881

  
1884 1882
/*
1885
 * Routine: pmap_kenter
1886
 * Function:
1887
 *  	Add a wired page to the KVA
1888
 *  	NOTE! note that in order for the mapping to take effect -- you
1889
 *  	should do an invltlb after doing the pmap_kenter().
1883
 * Add a wired page to the KVA and invalidate the mapping on all CPUs.
1890 1884
 */
1891
void 
1885
void
1892 1886
pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1893 1887
{
1894 1888
	pt_entry_t *ptep;
......
2622 2616
		goto notnew;
2623 2617

  
2624 2618
	/*
2625
	 * (isnew) is TRUE, pv is not terminal.
2619
	 * (isnew) is TRUE.
2626 2620
	 *
2627 2621
	 * (1) Add a wire count to the parent page table (pvp).
2628 2622
	 * (2) Allocate a VM page for the page table.
......
2650 2644
	pv->pv_m = m;
2651 2645

  
2652 2646
	/*
2653
	 * (isnew) is TRUE, pv is not terminal.
2647
	 * (isnew) is TRUE.
2654 2648
	 *
2655 2649
	 * Wire the page into pvp.  Bump the resident_count for the pmap.
2656 2650
	 * There is no pvp for the top level, address the pm_pml4[] array
......
2697 2691
	}
2698 2692
	vm_page_wakeup(m);
2699 2693

  
2694
notnew:
2700 2695
	/*
2701
	 * (isnew) may be TRUE or FALSE, pv may or may not be terminal.
2696
	 * (isnew) may be TRUE or FALSE.
2702 2697
	 */
2703
notnew:
2704 2698
	if (pvp) {
2705 2699
		KKASSERT(pvp->pv_m != NULL);
2706 2700
		ptep = pv_pte_lookup(pvp, ptepindex);
sys/sys/_malloc.h
65 65
	__uint32_t		orig_cpuid;	/* originally allocated on */
66 66
	__size_t		offset;		/* copied from kmalloc_mgt */
67 67
	__size_t		objsize;	/* copied from malloc_type */
68
	__size_t		ncount;		/* copied from kmalloc_mgt */
68
	__size_t		ncount;
69
	/* copy of slab_count from kmalloc_mgt */
69 70
	__size_t		aindex;		/* start of allocations */
70 71
	__size_t		findex;		/* end of frees */
71 72
	__size_t		xindex;		/* synchronizer */
72 73
	exislock_t		exis;		/* existential lock state */
73 74
	void			*unused01;
74 75
	__uint64_t		bmap[(KMALLOC_SLAB_MAXOBJS + 63) / 64];
75
	void			*fobjs[1];	/* list of free objects */
76
	void			*fobjs[1];
77
	/*
78
	 * Circular buffer listing free objects. All indices modulo ncount.
79
	 * Invariants:
80
	 * - aindex, findex and xindex never decrease
81
	 * - aindex <= xindex <= findex
82
	 *
83
	 * aindex <= i < xindex: fobjs[i % count] is a pointer to a free object.
84
	 * xindex <= i < findex:
85
	 *     Synchronization state: fobjs[i % count] is temporarily reserved.
86
	 */
76 87
} __cachealign;
77 88

  
78 89
/*
sys/vm/vm_map.h
326 326
typedef struct vm_map_freehint vm_map_freehint_t;
327 327

  
328 328
/*
329
 * Maps are doubly-linked lists of map entries, kept sorted by address.
330
 * A single hint is provided to start searches again from the last
331
 * successful search, insertion, or removal.
332
 *
333
 * NOTE: The lock structure cannot be the first element of vm_map
334
 *	 because this can result in a running lockup between two or more
335
 *	 system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
336
 *	 and free tsleep/waking up 'map' and the underlying lockmgr also
337
 *	 sleeping and waking up on 'map'.  The lockup occurs when the map fills
338
 *	 up.  The 'exec' map, for example.
329
 * A vm_map stores a red-black tree of map entries, indexed by address.
339 330
 *
340 331
 * NOTE: The vm_map structure can be hard-locked with the lockmgr lock
341 332
 *	 or soft-serialized with the token, or both.
......
380 371
	struct pmap vm_pmap;	/* private physical map */
381 372
	int vm_flags;
382 373
	caddr_t vm_shm;		/* SYS5 shared memory private data XXX */
383
/* we copy from vm_startcopy to the end of the structure on fork */
374
/* we copy from vm_startcopy on fork */
384 375
#define vm_startcopy vm_rssize
385 376
	segsz_t vm_rssize;	/* current resident set size in pages */
386 377
	segsz_t vm_swrss;	/* resident set size before last swap */
    (1-1/1)