Bug #1020
closedAllocate lwkt threads with objcache
0%
Description
This patch uses an objcache instead of a custom per-CPU cache and a zone to
allocate and free struct thread instances.
I'll commit it in a couple of days if nobody objects.
Index: src2/sys/kern/kern_proc.c
===================================================================
--- src2.orig/sys/kern/kern_proc.c 2008-05-19 12:05:48.466196000 0200
++ src2/sys/kern/kern_proc.c 2008-05-19 12:10:23.000000000 +0200@ -51,7 +51,6
@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <sys/user.h>
-#include <vm/vm_zone.h>
#include <machine/smp.h>
#include <sys/spinlock2.h>
@ -85,7 +84,6
@ u_long pgrphash;
struct proclist allproc;
struct proclist zombproc;
struct spinlock allproc_spin;
-vm_zone_t thread_zone;
/*
* Random component to nextpid generation. We mix in a random factor to make
@ -128,9 +126,9
@ procinit(void)
LIST_INIT(&allproc);
LIST_INIT(&zombproc);
spin_init(&allproc_spin);
+ lwkt_init();
pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
- thread_zone = zinit("THREAD", sizeof (struct thread), 0, 0, 5);
uihashinit();
}
Index: src2/sys/kern/lwkt_thread.c
===================================================================
--- src2.orig/sys/kern/lwkt_thread.c 2008-05-19 12:05:48.466556000 0200
++ src2/sys/kern/lwkt_thread.c 2008-05-19 12:11:52.000000000 +0200@ -66,11 +66,12
@
#include <vm/vm_map.h>
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
-#include <vm/vm_zone.h>
#include <machine/stdarg.h>
#include <machine/smp.h>
static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads");
static int untimely_switch = 0;
#ifdef INVARIANTS
static int panic_on_cscount = 0;@ -82,6 +83,7
@ static __int64_t preempt_weird = 0;
static __int64_t token_contention_count = 0;
static __int64_t mplock_contention_count = 0;
static int lwkt_use_spin_port;
+static struct objcache *thread_cache;
/*
* We can make all thread ports use the spin backend instead of the thread
@ -154,6 +156,40
@ _lwkt_enqueue(thread_t td)
}
}
static __boolean_t
_lwkt_thread_ctor(void obj, void *privdata, int ocflags)
{
struct thread *td = (struct thread *)obj;
td->td_kstack = NULL;
+ td->td_kstack_size = 0;
+ td->td_flags = TDF_ALLOCATED_THREAD;
+ return (1);
}
static void
_lwkt_thread_dtor(void *obj, void *privdata)
{
struct thread *td = (struct thread *)obj;
KASSERT &&
+ td->td_kstack_size > 0,
+ ("_lwkt_thread_dtor: corrupted stack"));
+ kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
}
/
* Initialize the lwkt s/system.
+ /
void
+lwkt_init(void)
{
+ / An objcache has 2 magazines per CPU so divide cache size by 2. /
+ thread_cache = objcache_create_mbacked(M_THREAD, sizeof(struct thread), 0,
+ CACHE_NTHREADS/2, _lwkt_thread_ctor, _lwkt_thread_dtor,
+ NULL);
}
/
* Schedule a thread to run. As the current thread we can always safely
* schedule ourselves, and a shortcut procedure is provided for that@ -212,25 +248,13
@ thread_t
lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags)
{
void *stack;
- globaldata_t gd = mycpu;
if (td == NULL) {
- crit_enter_gd(gd);
- if (gd->gd_tdfreecount > 0) {
- --gd->gd_tdfreecount;
- td = TAILQ_FIRST(&gd->gd_tdfreeq);
- KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0,
- ("lwkt_alloc_thread: unexpected NULL or corrupted td"));
- TAILQ_REMOVE(&gd->gd_tdfreeq, td, td_threadq);
- crit_exit_gd(gd);
- flags |= td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD);
- } else {
- crit_exit_gd(gd);
- td = zalloc(thread_zone);
- td->td_kstack = NULL;
- td->td_kstack_size = 0;
- flags |= TDF_ALLOCATED_THREAD;
- }
+ td = objcache_get(thread_cache, M_WAITOK);
+ KASSERT((td->td_flags &
+ (TDF_ALLOCATED_THREAD|TDF_RUNNING)) == TDF_ALLOCATED_THREAD,
+ ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags));
+ flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK);
}
if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) {
if (flags & TDF_ALLOCATED_STACK) {
@ -353,29 +377,18
@ lwkt_wait_free(thread_t td)
void
lwkt_free_thread(thread_t td)
{
- struct globaldata *gd = mycpu;
KASSERT((td>td_flags & TDF_RUNNING) == 0,
("lwkt_free_thread: did not exit! %p", td));
- crit_enter_gd(gd);
- if (gd->gd_tdfreecount < CACHE_NTHREADS &&
- (td->td_flags & TDF_ALLOCATED_THREAD)
- ) {
- ++gd->gd_tdfreecount;
- TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq);
- crit_exit_gd(gd);
- } else {
- crit_exit_gd(gd);
- if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) {
- kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td-
td_kstack_size);
- /* gd invalid /
- td->td_kstack = NULL;
- td->td_kstack_size = 0;
- }
- if (td->td_flags & TDF_ALLOCATED_THREAD) {
- zfree(thread_zone, td);
- }
+ if (td->td_flags & TDF_ALLOCATED_THREAD) {
+ objcache_put(thread_cache, td);
+ } else if (td->td_flags & TDF_ALLOCATED_STACK) {
+ / client-allocated struct with internally allocated stack */
+ KASSERT(td->td_kstack && td->td_kstack_size > 0,
+ ("lwkt_free_thread: corrupted stack"));
+ kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
+ td->td_kstack = NULL;
+ td->td_kstack_size = 0;
}
}
@ -1271,8 +1284,7
@ lwkt_exit(void)
gd = mycpu;
lwkt_remove_tdallq(td);
if (td->td_flags & TDF_ALLOCATED_THREAD) {
- +gd->gd_tdfreecount;
- TAILQ_INSERT_TAIL(&gd->gd_tdfreeq, td, td_threadq);
objcache_put(thread_cache, td);
}
cpu_thread_exit();
}
Index: src2/sys/kern/init_main.c
===================================================================
--- src2.orig/sys/kern/init_main.c 2008-05-19 12:05:15.759623000 0200
++ src2/sys/kern/init_main.c 2008-05-19 12:10:23.000000000 +0200@ -662,7 +662,6
@ SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, S
void
mi_gdinit(struct globaldata *gd, int cpuid)
{
- TAILQ_INIT(&gd->gd_tdfreeq); /* for pmap_{new,dispose}_thread() /
TAILQ_INIT(&gd->gd_systimerq);
gd->gd_sysid_alloc = cpuid; / prime low bits for cpu lookup */
gd->gd_cpuid = cpuid;
Index: src2/sys/sys/globaldata.h
===================================================================
--- src2.orig/sys/sys/globaldata.h 2008-05-19 12:05:15.760457000 0200
++ src2/sys/sys/globaldata.h 2008-05-19 12:10:23.000000000 0200@ -125,11 +125,11
@ struct pipe;
struct globaldata {
struct privatespace gd_prvspace; / self-reference /
struct thread *gd_curthread;
- int gd_tdfreecount; / new thread cache /
int gd_unused00;
__uint32_t gd_reqflags; / (see note above) /
- void *gd_unused00;
+ void *gd_unused00B;
lwkt_queue gd_tdallq; / all threads /
- lwkt_queue gd_tdfreeq; / new thread cache /
+ lwkt_queue gd_unused00C;
lwkt_queue gd_tdrunq32; / runnable threads /
__uint32_t gd_runqmask; / which queues? */
__uint32_t gd_cpuid;
Index: src2/sys/sys/thread.h
===================================================================
--- src2.orig/sys/sys/thread.h 2008-05-19 12:05:15.760659000 0200
++ src2/sys/sys/thread.h 2008-05-19 12:10:23.000000000 +0200@ -267,8 +267,8
@ struct thread {
#define TDF_TSLEEPQ 0x0080 /* on a tsleep wait queue */
#define TDF_SYSTHREAD 0x0100 /* system thread /
#define TDF_ALLOCATED_THREAD 0x0200 / zalloc allocated thread /
#define TDF_ALLOCATED_STACK 0x0400 / zalloc allocated stack /
#define TDF_ALLOCATED_THREAD 0x0200 / dynamically allocated thread /
#define TDF_ALLOCATED_STACK 0x0400 / dynamically allocated stack /
#define TDF_VERBOSE 0x0800 / verbose on exit /
#define TDF_DEADLKTREAT 0x1000 / special lockmgr deadlock
treatment /
#define TDF_STOPREQ 0x2000 / suspend_kproc */
@ -321,15 +321,7
@ struct thread {
#define IN_CRITICAL_SECT(td) ((td)->td_pri >= TDPRI_CRIT)
#ifdef _KERNELextern struct vm_zone thread_zone;#endif/ * Applies both to the kernel and to liblwkt.
- */
+extern void lwkt_init(void);
extern struct thread *lwkt_alloc_thread(struct thread *, int, int, int);
extern void lwkt_init_thread(struct thread *, void *, int, int,
struct globaldata *);
Updated by dillon over 16 years ago
:New submission from Nicolas Thery <nthery@gmail.com>:
:
:This patch uses an objcache instead of a custom per-CPU cache and a zone to=20
:allocate and free struct thread instances.
:
:I'll commit it in a couple of days if nobody objects.
I haven't tested it but it looks like a nice cleanup.
-Matt