Project

General

Profile

Submit #3279 ยป doc.patch

falsifian, 05/28/2021 12:36 PM

View differences:

sys/platform/pc64/x86_64/pmap.c
}
/*
* Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is
* Of all the layers (PT, PD, PDP, PML4) the best one to cache is
* the PT layer. This will speed up core pmap operations considerably.
* We also cache the PTE layer to (hopefully) improve relative lookup
* speeds.
*
* NOTE: The pmap spinlock does not need to be held but the passed-in pv
* must be in a known associated state (typically by being locked when
......
***************************************************/
/*
* Routine: pmap_kenter
* Function:
* Add a wired page to the KVA
* NOTE! note that in order for the mapping to take effect -- you
* should do an invltlb after doing the pmap_kenter().
* Add a wired page to the KVA and invalidate the mapping on all CPUs.
*/
void
void
pmap_kenter(vm_offset_t va, vm_paddr_t pa)
{
pt_entry_t *ptep;
......
goto notnew;
/*
* (isnew) is TRUE, pv is not terminal.
* (isnew) is TRUE.
*
* (1) Add a wire count to the parent page table (pvp).
* (2) Allocate a VM page for the page table.
......
pv->pv_m = m;
/*
* (isnew) is TRUE, pv is not terminal.
* (isnew) is TRUE.
*
* Wire the page into pvp. Bump the resident_count for the pmap.
* There is no pvp for the top level, address the pm_pml4[] array
......
}
vm_page_wakeup(m);
notnew:
/*
* (isnew) may be TRUE or FALSE, pv may or may not be terminal.
* (isnew) may be TRUE or FALSE.
*/
notnew:
if (pvp) {
KKASSERT(pvp->pv_m != NULL);
ptep = pv_pte_lookup(pvp, ptepindex);
sys/sys/_malloc.h
__uint32_t orig_cpuid; /* originally allocated on */
__size_t offset; /* copied from kmalloc_mgt */
__size_t objsize; /* copied from malloc_type */
__size_t ncount; /* copied from kmalloc_mgt */
__size_t ncount;
/* copy of slab_count from kmalloc_mgt */
__size_t aindex; /* start of allocations */
__size_t findex; /* end of frees */
__size_t xindex; /* synchronizer */
exislock_t exis; /* existential lock state */
void *unused01;
__uint64_t bmap[(KMALLOC_SLAB_MAXOBJS + 63) / 64];
void *fobjs[1]; /* list of free objects */
void *fobjs[1];
/*
* Circular buffer listing free objects. All indices modulo ncount.
* Invariants:
* - aindex, findex and xindex never decrease
* - aindex <= xindex <= findex
*
* aindex <= i < xindex: fobjs[i % count] is a pointer to a free object.
* xindex <= i < findex:
* Synchronization state: fobjs[i % count] is temporarily reserved.
*/
} __cachealign;
/*
sys/vm/vm_map.h
typedef struct vm_map_freehint vm_map_freehint_t;
/*
* Maps are doubly-linked lists of map entries, kept sorted by address.
* A single hint is provided to start searches again from the last
* successful search, insertion, or removal.
*
* NOTE: The lock structure cannot be the first element of vm_map
* because this can result in a running lockup between two or more
* system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
* and free tsleep/waking up 'map' and the underlying lockmgr also
* sleeping and waking up on 'map'. The lockup occurs when the map fills
* up. The 'exec' map, for example.
* A vm_map stores a red-black tree of map entries, indexed by address.
*
* NOTE: The vm_map structure can be hard-locked with the lockmgr lock
* or soft-serialized with the token, or both.
......
struct pmap vm_pmap; /* private physical map */
int vm_flags;
caddr_t vm_shm; /* SYS5 shared memory private data XXX */
/* we copy from vm_startcopy to the end of the structure on fork */
/* we copy from vm_startcopy on fork */
#define vm_startcopy vm_rssize
segsz_t vm_rssize; /* current resident set size in pages */
segsz_t vm_swrss; /* resident set size before last swap */
    (1-1/1)