Bug #524 ยป struct-by-reference.diff
| cpu/i386/include/cpu.h 21 Jan 2007 08:55:49 -0000 | ||
|---|---|---|
|
extern char etext[];
|
||
|
void fork_trampoline (void);
|
||
|
void fork_return (struct lwp *, struct trapframe);
|
||
|
void fork_return (struct lwp *, struct trapframe *);
|
||
|
#endif
|
||
| kern/kern_intr.c 21 Jan 2007 08:22:48 -0000 | ||
|---|---|---|
|
* Must return non-zero if we do not want the vector code to re-enable
|
||
|
* the interrupt (which we don't if we have to schedule the interrupt)
|
||
|
*/
|
||
|
int ithread_fast_handler(struct intrframe frame);
|
||
|
int ithread_fast_handler(struct intrframe *frame);
|
||
|
int
|
||
|
ithread_fast_handler(struct intrframe frame)
|
||
|
ithread_fast_handler(struct intrframe *frame)
|
||
|
{
|
||
|
int intr;
|
||
|
struct intr_info *info;
|
||
| ... | ... | |
|
intrec_t rec, next_rec;
|
||
|
globaldata_t gd;
|
||
|
intr = frame.if_vec;
|
||
|
intr = frame->if_vec;
|
||
|
gd = mycpu;
|
||
|
info = &intr_info_ary[intr];
|
||
| ... | ... | |
|
if (rec->serializer) {
|
||
|
must_schedule += lwkt_serialize_handler_try(
|
||
|
rec->serializer, rec->handler,
|
||
|
rec->argument, &frame);
|
||
|
rec->argument, frame);
|
||
|
} else {
|
||
|
rec->handler(rec->argument, &frame);
|
||
|
rec->handler(rec->argument, frame);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
| kern/lwkt_ipiq.c 21 Jan 2007 08:37:19 -0000 | ||
|---|---|---|
|
#ifdef _KERNEL
|
||
|
void
|
||
|
lwkt_process_ipiq_frame(struct intrframe frame)
|
||
|
lwkt_process_ipiq_frame(struct intrframe *frame)
|
||
|
{
|
||
|
globaldata_t gd = mycpu;
|
||
|
globaldata_t sgd;
|
||
| ... | ... | |
|
sgd = globaldata_find(n);
|
||
|
ip = sgd->gd_ipiq;
|
||
|
if (ip != NULL) {
|
||
|
while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], &frame))
|
||
|
while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame))
|
||
|
;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
|
||
|
if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, &frame)) {
|
||
|
if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) {
|
||
|
if (gd->gd_curthread->td_cscount == 0)
|
||
|
goto again;
|
||
|
need_ipiq();
|
||
| platform/pc32/apic/apic_vector.s 21 Jan 2007 10:10:27 -0000 | ||
|---|---|---|
|
/* clear pending bit, run handler */ \
|
||
|
andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
|
||
|
pushl $irq_num ; \
|
||
|
pushl %esp ; /* pass frame by reference */ \
|
||
|
call ithread_fast_handler ; /* returns 0 to unmask */ \
|
||
|
addl $4, %esp ; \
|
||
|
addl $8, %esp ; \
|
||
|
UNMASK_IRQ(irq_num) ; \
|
||
|
5: ; \
|
||
|
MEXITCOUNT ; \
|
||
| ... | ... | |
|
cmpl $TDPRI_CRIT,TD_PRI(%ebx)
|
||
|
jge 1f
|
||
|
subl $8,%esp /* make same as interrupt frame */
|
||
|
pushl %esp /* pass frame by reference */
|
||
|
incl PCPU(intr_nesting_level)
|
||
|
addl $TDPRI_CRIT,TD_PRI(%ebx)
|
||
|
call lwkt_process_ipiq_frame
|
||
|
subl $TDPRI_CRIT,TD_PRI(%ebx)
|
||
|
decl PCPU(intr_nesting_level)
|
||
|
addl $8,%esp
|
||
|
addl $12,%esp
|
||
|
pushl $0 /* CPL for frame (REMOVED) */
|
||
|
MEXITCOUNT
|
||
|
jmp doreti
|
||
| platform/pc32/apic/apicvar.h 21 Jan 2007 08:42:56 -0000 | ||
|---|---|---|
|
void lapic_ipi_raw(register_t icrlo, u_int dest);
|
||
|
void lapic_ipi_vectored(u_int vector, int dest);
|
||
|
int lapic_ipi_wait(int delay);
|
||
|
void lapic_handle_intr(struct intrframe frame);
|
||
|
void lapic_handle_intr(struct intrframe *frame);
|
||
|
void lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id);
|
||
|
int lapic_set_lvt_mask(u_int apic_id, u_int lvt, u_char masked);
|
||
|
int lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode);
|
||
| platform/pc32/i386/exception.s 21 Jan 2007 10:06:02 -0000 | ||
|---|---|---|
|
FAKE_MCOUNT(btrap) /* init "from" _btrap -> calltrap */
|
||
|
incl PCPU(cnt)+V_TRAP
|
||
|
/* warning, trap frame dummy arg, no extra reg pushes */
|
||
|
pushl %esp /* pass frame by reference */
|
||
|
call trap
|
||
|
addl $4,%esp
|
||
|
/*
|
||
|
* Return via doreti to handle ASTs. Have to change trap frame
|
||
| ... | ... | |
|
FAKE_MCOUNT(15*4(%esp))
|
||
|
incl PCPU(cnt)+V_SYSCALL /* YYY per-cpu */
|
||
|
/* warning, trap frame dummy arg, no extra reg pushes */
|
||
|
push %esp /* pass frame by reference */
|
||
|
call syscall2
|
||
|
addl $4,%esp
|
||
|
MEXITCOUNT
|
||
|
cli /* atomic reqflags interlock w/iret */
|
||
|
cmpl $0,PCPU(reqflags)
|
||
| ... | ... | |
|
FAKE_MCOUNT(15*4(%esp))
|
||
|
incl PCPU(cnt)+V_SYSCALL
|
||
|
/* warning, trap frame dummy arg, no extra reg pushes */
|
||
|
push %esp /* pass frame by reference */
|
||
|
call syscall2
|
||
|
addl $4,%esp
|
||
|
MEXITCOUNT
|
||
|
cli /* atomic reqflags interlock w/irq */
|
||
|
cmpl $0,PCPU(reqflags)
|
||
| ... | ... | |
|
*
|
||
|
* initproc has its own fork handler, start_init(), which DOES
|
||
|
* return.
|
||
|
*
|
||
|
* The function (set in pcb_esi) gets passed two arguments,
|
||
|
* the primary parameter set in pcb_ebx and a pointer to the
|
||
|
* trapframe.
|
||
|
* void (func)(int arg, struct trapframe *frame);
|
||
|
*/
|
||
|
pushl %esp /* pass frame by reference */
|
||
|
pushl %ebx /* arg1 */
|
||
|
call *%esi /* function */
|
||
|
addl $4,%esp
|
||
|
addl $8,%esp
|
||
|
/* cut from syscall */
|
||
|
sti
|
||
| platform/pc32/i386/trap.c 21 Jan 2007 08:50:43 -0000 | ||
|---|---|---|
|
int (*pmath_emulate) (struct trapframe *);
|
||
|
extern void trap (struct trapframe frame);
|
||
|
extern void trap (struct trapframe *frame);
|
||
|
extern int trapwrite (unsigned addr);
|
||
|
extern void syscall2 (struct trapframe frame);
|
||
|
extern void syscall2 (struct trapframe *frame);
|
||
|
static int trap_pfault (struct trapframe *, int, vm_offset_t);
|
||
|
static void trap_fatal (struct trapframe *, vm_offset_t);
|
||
| ... | ... | |
|
*/
|
||
|
void
|
||
|
trap(struct trapframe frame)
|
||
|
trap(struct trapframe *frame)
|
||
|
{
|
||
|
struct globaldata *gd = mycpu;
|
||
|
struct thread *td = gd->gd_curthread;
|
||
| ... | ... | |
|
p = td->td_proc;
|
||
|
#ifdef DDB
|
||
|
if (db_active) {
|
||
|
eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
|
||
|
eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
|
||
|
++gd->gd_trap_nesting_level;
|
||
|
MAKEMPSAFE(have_mplock);
|
||
|
trap_fatal(&frame, eva);
|
||
|
trap_fatal(frame, eva);
|
||
|
--gd->gd_trap_nesting_level;
|
||
|
goto out2;
|
||
|
}
|
||
| ... | ... | |
|
eva = 0;
|
||
|
++gd->gd_trap_nesting_level;
|
||
|
if (frame.tf_trapno == T_PAGEFLT) {
|
||
|
if (frame->tf_trapno == T_PAGEFLT) {
|
||
|
/*
|
||
|
* For some Cyrix CPUs, %cr2 is clobbered by interrupts.
|
||
|
* This problem is worked around by using an interrupt
|
||
| ... | ... | |
|
--gd->gd_trap_nesting_level;
|
||
|
if (!(frame.tf_eflags & PSL_I)) {
|
||
|
if (!(frame->tf_eflags & PSL_I)) {
|
||
|
/*
|
||
|
* Buggy application or kernel code has disabled interrupts
|
||
|
* and then trapped. Enabling interrupts now is wrong, but
|
||
|
* it is better than running with interrupts disabled until
|
||
|
* they are accidentally enabled later.
|
||
|
*/
|
||
|
type = frame.tf_trapno;
|
||
|
if (ISPL(frame.tf_cs)==SEL_UPL || (frame.tf_eflags & PSL_VM)) {
|
||
|
type = frame->tf_trapno;
|
||
|
if (ISPL(frame->tf_cs)==SEL_UPL || (frame->tf_eflags & PSL_VM)) {
|
||
|
MAKEMPSAFE(have_mplock);
|
||
|
kprintf(
|
||
|
"pid %ld (%s): trap %d with interrupts disabled\n",
|
||
| ... | ... | |
|
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
|
||
|
restart:
|
||
|
#endif
|
||
|
type = frame.tf_trapno;
|
||
|
code = frame.tf_err;
|
||
|
type = frame->tf_trapno;
|
||
|
code = frame->tf_err;
|
||
|
if (in_vm86call) {
|
||
|
ASSERT_MP_LOCK_HELD(curthread);
|
||
|
if (frame.tf_eflags & PSL_VM &&
|
||
|
if (frame->tf_eflags & PSL_VM &&
|
||
|
(type == T_PROTFLT || type == T_STKFLT)) {
|
||
|
#ifdef SMP
|
||
|
KKASSERT(td->td_mpcount > 0);
|
||
|
#endif
|
||
|
i = vm86_emulate((struct vm86frame *)&frame);
|
||
|
i = vm86_emulate((struct vm86frame *)frame);
|
||
|
#ifdef SMP
|
||
|
KKASSERT(td->td_mpcount > 0);
|
||
|
#endif
|
||
| ... | ... | |
|
* returns to original process
|
||
|
*/
|
||
|
#ifdef SMP
|
||
|
vm86_trap((struct vm86frame *)&frame,
|
||
|
vm86_trap((struct vm86frame *)frame,
|
||
|
have_mplock);
|
||
|
#else
|
||
|
vm86_trap((struct vm86frame *)&frame, 0);
|
||
|
vm86_trap((struct vm86frame *)frame, 0);
|
||
|
#endif
|
||
|
KKASSERT(0); /* NOT REACHED */
|
||
|
}
|
||
| ... | ... | |
|
*/
|
||
|
case T_PROTFLT:
|
||
|
case T_SEGNPFLT:
|
||
|
trap_fatal(&frame, eva);
|
||
|
trap_fatal(frame, eva);
|
||
|
goto out2;
|
||
|
case T_TRCTRAP:
|
||
|
type = T_BPTFLT; /* kernel breakpoint */
|
||
| ... | ... | |
|
goto kernel_trap; /* normal kernel trap handling */
|
||
|
}
|
||
|
if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
|
||
|
if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
|
||
|
/* user trap */
|
||
|
userenter(td);
|
||
|
sticks = (int)td->td_sticks;
|
||
|
lp->lwp_md.md_regs = &frame;
|
||
|
lp->lwp_md.md_regs = frame;
|
||
|
switch (type) {
|
||
|
case T_PRIVINFLT: /* privileged instruction fault */
|
||
| ... | ... | |
|
case T_BPTFLT: /* bpt instruction fault */
|
||
|
case T_TRCTRAP: /* trace trap */
|
||
|
frame.tf_eflags &= ~PSL_T;
|
||
|
frame->tf_eflags &= ~PSL_T;
|
||
|
i = SIGTRAP;
|
||
|
break;
|
||
| ... | ... | |
|
*/
|
||
|
case T_PROTFLT: /* general protection fault */
|
||
|
case T_STKFLT: /* stack fault */
|
||
|
if (frame.tf_eflags & PSL_VM) {
|
||
|
i = vm86_emulate((struct vm86frame *)&frame);
|
||
|
if (frame->tf_eflags & PSL_VM) {
|
||
|
i = vm86_emulate((struct vm86frame *)frame);
|
||
|
if (i == 0)
|
||
|
goto out;
|
||
|
break;
|
||
| ... | ... | |
|
case T_PAGEFLT: /* page fault */
|
||
|
MAKEMPSAFE(have_mplock);
|
||
|
i = trap_pfault(&frame, TRUE, eva);
|
||
|
i = trap_pfault(frame, TRUE, eva);
|
||
|
if (i == -1)
|
||
|
goto out;
|
||
|
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
|
||
| ... | ... | |
|
*/
|
||
|
if (ddb_on_nmi) {
|
||
|
kprintf ("NMI ... going to debugger\n");
|
||
|
kdb_trap (type, 0, &frame);
|
||
|
kdb_trap (type, 0, frame);
|
||
|
}
|
||
|
#endif /* DDB */
|
||
|
goto out2;
|
||
| ... | ... | |
|
ucode = FPE_FPU_NP_TRAP;
|
||
|
break;
|
||
|
}
|
||
|
i = (*pmath_emulate)(&frame);
|
||
|
i = (*pmath_emulate)(frame);
|
||
|
if (i == 0) {
|
||
|
if (!(frame.tf_eflags & PSL_T))
|
||
|
if (!(frame->tf_eflags & PSL_T))
|
||
|
goto out2;
|
||
|
frame.tf_eflags &= ~PSL_T;
|
||
|
frame->tf_eflags &= ~PSL_T;
|
||
|
i = SIGTRAP;
|
||
|
}
|
||
|
/* else ucode = emulator_only_knows() XXX */
|
||
| ... | ... | |
|
switch (type) {
|
||
|
case T_PAGEFLT: /* page fault */
|
||
|
MAKEMPSAFE(have_mplock);
|
||
|
trap_pfault(&frame, FALSE, eva);
|
||
|
trap_pfault(frame, FALSE, eva);
|
||
|
goto out2;
|
||
|
case T_DNA:
|
||
| ... | ... | |
|
*/
|
||
|
#define MAYBE_DORETI_FAULT(where, whereto) \
|
||
|
do { \
|
||
|
if (frame.tf_eip == (int)where) { \
|
||
|
frame.tf_eip = (int)whereto; \
|
||
|
if (frame->tf_eip == (int)where) { \
|
||
|
frame->tf_eip = (int)whereto; \
|
||
|
goto out2; \
|
||
|
} \
|
||
|
} while (0)
|
||
| ... | ... | |
|
MAYBE_DORETI_FAULT(doreti_popl_gs,
|
||
|
doreti_popl_gs_fault);
|
||
|
if (td->td_pcb->pcb_onfault) {
|
||
|
frame.tf_eip =
|
||
|
frame->tf_eip =
|
||
|
(register_t)td->td_pcb->pcb_onfault;
|
||
|
goto out2;
|
||
|
}
|
||
| ... | ... | |
|
* problem here and not every time the kernel is
|
||
|
* entered.
|
||
|
*/
|
||
|
if (frame.tf_eflags & PSL_NT) {
|
||
|
frame.tf_eflags &= ~PSL_NT;
|
||
|
if (frame->tf_eflags & PSL_NT) {
|
||
|
frame->tf_eflags &= ~PSL_NT;
|
||
|
goto out2;
|
||
|
}
|
||
|
break;
|
||
|
case T_TRCTRAP: /* trace trap */
|
||
|
if (frame.tf_eip == (int)IDTVEC(syscall)) {
|
||
|
if (frame->tf_eip == (int)IDTVEC(syscall)) {
|
||
|
/*
|
||
|
* We've just entered system mode via the
|
||
|
* syscall lcall. Continue single stepping
|
||
| ... | ... | |
|
*/
|
||
|
goto out2;
|
||
|
}
|
||
|
if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
|
||
|
if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
|
||
|
/*
|
||
|
* The syscall handler has now saved the
|
||
|
* flags. Stop single stepping it.
|
||
|
*/
|
||
|
frame.tf_eflags &= ~PSL_T;
|
||
|
frame->tf_eflags &= ~PSL_T;
|
||
|
goto out2;
|
||
|
}
|
||
|
/*
|
||
| ... | ... | |
|
*/
|
||
|
#ifdef DDB
|
||
|
MAKEMPSAFE(have_mplock);
|
||
|
if (kdb_trap (type, 0, &frame))
|
||
|
if (kdb_trap (type, 0, frame))
|
||
|
goto out2;
|
||
|
#endif
|
||
|
break;
|
||
| ... | ... | |
|
*/
|
||
|
if (ddb_on_nmi) {
|
||
|
kprintf ("NMI ... going to debugger\n");
|
||
|
kdb_trap (type, 0, &frame);
|
||
|
kdb_trap (type, 0, frame);
|
||
|
}
|
||
|
#endif /* DDB */
|
||
|
goto out2;
|
||
| ... | ... | |
|
}
|
||
|
MAKEMPSAFE(have_mplock);
|
||
|
trap_fatal(&frame, eva);
|
||
|
trap_fatal(frame, eva);
|
||
|
goto out2;
|
||
|
}
|
||
| ... | ... | |
|
* handle it.
|
||
|
*/
|
||
|
if (p->p_vkernel && p->p_vkernel->vk_current) {
|
||
|
vkernel_trap(p, &frame);
|
||
|
vkernel_trap(p, frame);
|
||
|
goto out;
|
||
|
}
|
||
| ... | ... | |
|
out:
|
||
|
#ifdef SMP
|
||
|
if (ISPL(frame.tf_cs) == SEL_UPL)
|
||
|
KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame.tf_eip));
|
||
|
if (ISPL(frame->tf_cs) == SEL_UPL)
|
||
|
KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame->tf_eip));
|
||
|
#endif
|
||
|
userret(lp, &frame, sticks);
|
||
|
userret(lp, frame, sticks);
|
||
|
userexit(lp);
|
||
|
out2: ;
|
||
|
#ifdef SMP
|
||
| ... | ... | |
|
*/
|
||
|
void
|
||
|
syscall2(struct trapframe frame)
|
||
|
syscall2(struct trapframe *frame)
|
||
|
{
|
||
|
struct thread *td = curthread;
|
||
|
struct proc *p = td->td_proc;
|
||
| ... | ... | |
|
union sysunion args;
|
||
|
#ifdef DIAGNOSTIC
|
||
|
if (ISPL(frame.tf_cs) != SEL_UPL) {
|
||
|
if (ISPL(frame->tf_cs) != SEL_UPL) {
|
||
|
get_mplock();
|
||
|
panic("syscall");
|
||
|
/* NOT REACHED */
|
||
| ... | ... | |
|
#endif
|
||
|
#ifdef SMP
|
||
|
KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame.tf_eip));
|
||
|
KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame->tf_eip));
|
||
|
if (syscall_mpsafe == 0)
|
||
|
MAKEMPSAFE(have_mplock);
|
||
|
#endif
|
||
| ... | ... | |
|
* Misc
|
||
|
*/
|
||
|
sticks = (int)td->td_sticks;
|
||
|
orig_tf_eflags = frame.tf_eflags;
|
||
|
orig_tf_eflags = frame->tf_eflags;
|
||
|
/*
|
||
|
* Virtual kernel intercept - if a VM context managed by a virtual
|
||
| ... | ... | |
|
* call. The current frame is copied out to the virtual kernel.
|
||
|
*/
|
||
|
if (p->p_vkernel && p->p_vkernel->vk_current) {
|
||
|
error = vkernel_trap(p, &frame);
|
||
|
frame.tf_eax = error;
|
||
|
error = vkernel_trap(p, frame);
|
||
|
frame->tf_eax = error;
|
||
|
if (error)
|
||
|
frame.tf_eflags |= PSL_C;
|
||
|
frame->tf_eflags |= PSL_C;
|
||
|
error = EJUSTRETURN;
|
||
|
goto out;
|
||
|
}
|
||
| ... | ... | |
|
/*
|
||
|
* Get the system call parameters and account for time
|
||
|
*/
|
||
|
lp->lwp_md.md_regs = &frame;
|
||
|
params = (caddr_t)frame.tf_esp + sizeof(int);
|
||
|
code = frame.tf_eax;
|
||
|
lp->lwp_md.md_regs = frame;
|
||
|
params = (caddr_t)frame->tf_esp + sizeof(int);
|
||
|
code = frame->tf_eax;
|
||
|
if (p->p_sysent->sv_prepsyscall) {
|
||
|
(*p->p_sysent->sv_prepsyscall)(
|
||
|
&frame, (int *)(&args.nosys.sysmsg + 1),
|
||
|
frame, (int *)(&args.nosys.sysmsg + 1),
|
||
|
&code, ¶ms);
|
||
|
} else {
|
||
|
/*
|
||
| ... | ... | |
|
* system call returns we pre-set it here.
|
||
|
*/
|
||
|
args.sysmsg_fds[0] = 0;
|
||
|
args.sysmsg_fds[1] = frame.tf_edx;
|
||
|
args.sysmsg_fds[1] = frame->tf_edx;
|
||
|
/*
|
||
|
* The syscall might manipulate the trap frame. If it does it
|
||
|
* will probably return EJUSTRETURN.
|
||
|
*/
|
||
|
args.sysmsg_frame = &frame;
|
||
|
args.sysmsg_frame = frame;
|
||
|
STOPEVENT(p, S_SCE, narg); /* MP aware */
|
||
| ... | ... | |
|
*/
|
||
|
p = curproc;
|
||
|
lp = curthread->td_lwp;
|
||
|
frame.tf_eax = args.sysmsg_fds[0];
|
||
|
frame.tf_edx = args.sysmsg_fds[1];
|
||
|
frame.tf_eflags &= ~PSL_C;
|
||
|
frame->tf_eax = args.sysmsg_fds[0];
|
||
|
frame->tf_edx = args.sysmsg_fds[1];
|
||
|
frame->tf_eflags &= ~PSL_C;
|
||
|
break;
|
||
|
case ERESTART:
|
||
|
/*
|
||
|
* Reconstruct pc, assuming lcall $X,y is 7 bytes,
|
||
|
* int 0x80 is 2 bytes. We saved this in tf_err.
|
||
|
*/
|
||
|
frame.tf_eip -= frame.tf_err;
|
||
|
frame->tf_eip -= frame->tf_err;
|
||
|
break;
|
||
|
case EJUSTRETURN:
|
||
|
break;
|
||
| ... | ... | |
|
else
|
||
|
error = p->p_sysent->sv_errtbl[error];
|
||
|
}
|
||
|
frame.tf_eax = error;
|
||
|
frame.tf_eflags |= PSL_C;
|
||
|
frame->tf_eax = error;
|
||
|
frame->tf_eflags |= PSL_C;
|
||
|
break;
|
||
|
}
|
||
| ... | ... | |
|
*/
|
||
|
if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
|
||
|
MAKEMPSAFE(have_mplock);
|
||
|
frame.tf_eflags &= ~PSL_T;
|
||
|
frame->tf_eflags &= ~PSL_T;
|
||
|
trapsignal(p, SIGTRAP, 0);
|
||
|
}
|
||
|
/*
|
||
|
* Handle reschedule and other end-of-syscall issues
|
||
|
*/
|
||
|
userret(lp, &frame, sticks);
|
||
|
userret(lp, frame, sticks);
|
||
|
#ifdef KTRACE
|
||
|
if (KTRPOINT(td, KTR_SYSRET)) {
|
||
| ... | ... | |
|
* Release the MP lock if we had to get it
|
||
|
*/
|
||
|
KASSERT(td->td_mpcount == have_mplock,
|
||
|
("badmpcount syscall2/end from %p", (void *)frame.tf_eip));
|
||
|
("badmpcount syscall2/end from %p", (void *)frame->tf_eip));
|
||
|
if (have_mplock)
|
||
|
rel_mplock();
|
||
|
#endif
|
||
| ... | ... | |
|
* trampoline code which then runs doreti.
|
||
|
*/
|
||
|
void
|
||
|
fork_return(struct lwp *lp, struct trapframe frame)
|
||
|
fork_return(struct lwp *lp, struct trapframe *frame)
|
||
|
{
|
||
|
struct proc *p = lp->lwp_proc;
|
||
|
frame.tf_eax = 0; /* Child returns zero */
|
||
|
frame.tf_eflags &= ~PSL_C; /* success */
|
||
|
frame.tf_edx = 1;
|
||
|
frame->tf_eax = 0; /* Child returns zero */
|
||
|
frame->tf_eflags &= ~PSL_C; /* success */
|
||
|
frame->tf_edx = 1;
|
||
|
/*
|
||
|
* Newly forked processes are given a kernel priority. We have to
|
||
| ... | ... | |
|
*/
|
||
|
lwkt_setpri_self(TDPRI_USER_NORM);
|
||
|
userenter(lp->lwp_thread);
|
||
|
userret(lp, &frame, 0);
|
||
|
userret(lp, frame, 0);
|
||
|
#ifdef KTRACE
|
||
|
if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
|
||
|
ktrsysret(p, SYS_fork, 0, 0);
|
||
| platform/pc32/i386/vm86.c 21 Jan 2007 09:42:18 -0000 | ||
|---|---|---|
|
char vml_iomap_trailer;
|
||
|
};
|
||
|
void vm86_prepcall(struct vm86frame);
|
||
|
void vm86_prepcall(struct vm86frame *);
|
||
|
struct system_map {
|
||
|
int type;
|
||
| ... | ... | |
|
* called from vm86_bioscall, while in vm86 address space, to finalize setup.
|
||
|
*/
|
||
|
void
|
||
|
vm86_prepcall(struct vm86frame vmf)
|
||
|
vm86_prepcall(struct vm86frame *vmf)
|
||
|
{
|
||
|
uintptr_t addr[] = { 0xA00, 0x1000 }; /* code, stack */
|
||
|
u_char intcall[] = {
|
||
|
CLI, INTn, 0x00, STI, HLT
|
||
|
};
|
||
|
if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
|
||
|
if ((vmf->vmf_trapno & PAGE_MASK) <= 0xff) {
|
||
|
/* interrupt call requested */
|
||
|
intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
|
||
|
intcall[2] = (u_char)(vmf->vmf_trapno & 0xff);
|
||
|
memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
|
||
|
vmf.vmf_ip = addr[0];
|
||
|
vmf.vmf_cs = 0;
|
||
|
vmf->vmf_ip = addr[0];
|
||
|
vmf->vmf_cs = 0;
|
||
|
}
|
||
|
vmf.vmf_sp = addr[1] - 2; /* keep aligned */
|
||
|
vmf.kernel_fs = vmf.kernel_es = vmf.kernel_ds = vmf.kernel_gs = 0;
|
||
|
vmf.vmf_ss = 0;
|
||
|
vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
|
||
|
vm86_initflags(&vmf);
|
||
|
vmf->vmf_sp = addr[1] - 2; /* keep aligned */
|
||
|
vmf->kernel_fs = vmf->kernel_es = vmf->kernel_ds = vmf->kernel_gs = 0;
|
||
|
vmf->vmf_ss = 0;
|
||
|
vmf->vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
|
||
|
vm86_initflags(vmf);
|
||
|
}
|
||
|
/*
|
||
| platform/pc32/i386/vm86bios.s 21 Jan 2007 09:43:47 -0000 | ||
|---|---|---|
|
movl %ecx,%cr3 /* new page tables */
|
||
|
movl SCR_VMFRAME(%edx),%esp /* switch to new stack */
|
||
|
|
||
|
pushl %esp /* pass frame by reference */
|
||
|
call vm86_prepcall /* finish setup */
|
||
|
addl $4,%esp
|
||
|
movl $1,in_vm86call /* set flag for trap() */
|
||
| platform/pc32/icu/icu_vector.s 21 Jan 2007 10:10:12 -0000 | ||
|---|---|---|
|
/* clear pending bit, run handler */ \
|
||
|
andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
|
||
|
pushl $irq_num ; \
|
||
|
pushl %esp ; /* pass frame by reference */ \
|
||
|
call ithread_fast_handler ; /* returns 0 to unmask int */ \
|
||
|
addl $4,%esp ; \
|
||
|
addl $8,%esp ; \
|
||
|
UNMASK_IRQ(icu, irq_num) ; \
|
||
|
5: ; \
|
||
|
MEXITCOUNT ; \
|
||
| platform/pc32/isa/ipl.s 21 Jan 2007 11:02:33 -0000 | ||
|---|---|---|
|
sti
|
||
|
movl %eax,%esi /* save cpl (can't use stack) */
|
||
|
movl $T_ASTFLT,TF_TRAPNO(%esp)
|
||
|
pushl %esp /* pass frame by reference */
|
||
|
subl $TDPRI_CRIT,TD_PRI(%ebx)
|
||
|
call trap
|
||
|
addl $TDPRI_CRIT,TD_PRI(%ebx)
|
||
|
addl $4,%esp
|
||
|
movl %esi,%eax /* restore cpl for loop */
|
||
|
jmp doreti_next
|
||
| ... | ... | |
|
incl PCPU(intr_nesting_level)
|
||
|
andl $~RQF_IPIQ,PCPU(reqflags)
|
||
|
subl $8,%esp /* add dummy vec and ppl */
|
||
|
pushl %esp /* pass frame by reference */
|
||
|
call lwkt_process_ipiq_frame
|
||
|
addl $8,%esp
|
||
|
addl $12,%esp
|
||
|
decl PCPU(intr_nesting_level)
|
||
|
movl %esi,%eax /* restore cpl for loop */
|
||
|
jmp doreti_next
|
||
| ... | ... | |
|
PUSH_DUMMY
|
||
|
pushl %ecx /* last part of intrframe = intr */
|
||
|
incl fastunpend_count
|
||
|
pushl %esp /* pass frame by reference */
|
||
|
call ithread_fast_handler /* returns 0 to unmask */
|
||
|
addl $4,%esp /* remove pointer, now intr on top */
|
||
|
cmpl $0,%eax
|
||
|
jnz 1f
|
||
|
movl MachIntrABI + MACHINTR_INTREN, %eax
|
||
| platform/vkernel/i386/fork_tramp.s 21 Jan 2007 09:38:17 -0000 | ||
|---|---|---|
|
*
|
||
|
* initproc has its own fork handler, start_init(), which DOES
|
||
|
* return.
|
||
|
*
|
||
|
* The function (set in pcb_esi) gets passed two arguments,
|
||
|
* the primary parameter set in pcb_ebx and a pointer to the
|
||
|
* trapframe.
|
||
|
* void (func)(int arg, struct trapframe *frame);
|
||
|
*/
|
||
|
pushl %esp /* pass frame by reference */
|
||
|
pushl %ebx /* arg1 */
|
||
|
call *%esi /* function */
|
||
|
addl $4,%esp
|
||
|
addl $8,%esp
|
||
|
/* cut from syscall */
|
||
|
call splz
|
||
| ... | ... | |
|
MEXITCOUNT
|
||
|
pushl $0 /* if_ppl */
|
||
|
pushl $0 /* if_vec */
|
||
|
pushl %esp /* pass by reference */
|
||
|
call go_user
|
||
|
/* NOT REACHED */
|
||
| platform/vkernel/i386/trap.c 21 Jan 2007 08:53:03 -0000 | ||
|---|---|---|
|
* trampoline code which then runs doreti.
|
||
|
*/
|
||
|
void
|
||
|
fork_return(struct lwp *lp, struct trapframe frame)
|
||
|
fork_return(struct lwp *lp, struct trapframe *frame)
|
||
|
{
|
||
|
struct proc *p = lp->lwp_proc;
|
||
|
frame.tf_eax = 0; /* Child returns zero */
|
||
|
frame.tf_eflags &= ~PSL_C; /* success */
|
||
|
frame.tf_edx = 1;
|
||
|
frame->tf_eax = 0; /* Child returns zero */
|
||
|
frame->tf_eflags &= ~PSL_C; /* success */
|
||
|
frame->tf_edx = 1;
|
||
|
/*
|
||
|
* Newly forked processes are given a kernel priority. We have to
|
||
| ... | ... | |
|
*/
|
||
|
lwkt_setpri_self(TDPRI_USER_NORM);
|
||
|
userenter(lp->lwp_thread);
|
||
|
userret(lp, &frame, 0);
|
||
|
userret(lp, frame, 0);
|
||
|
#ifdef KTRACE
|
||
|
if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
|
||
|
ktrsysret(p, SYS_fork, 0, 0);
|
||
| ... | ... | |
|
* context we supplied or problems copying data to/from our VM space.
|
||
|
*/
|
||
|
void
|
||
|
go_user(struct intrframe frame)
|
||
|
go_user(struct intrframe *frame)
|
||
|
{
|
||
|
struct trapframe *tf = (void *)&frame.if_gs;
|
||
|
struct trapframe *tf = (void *)&frame->if_gs;
|
||
|
int r;
|
||
|
/*
|
||
| ... | ... | |
|
*/
|
||
|
r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
|
||
|
tf, &curthread->td_savevext);
|
||
|
frame.if_xflags |= PGEX_U;
|
||
|
frame->if_xflags |= PGEX_U;
|
||
|
#if 0
|
||
|
kprintf("GO USER %d trap %d EVA %08x EIP %08x ESP %08x XFLAGS %02x/%02x\n",
|
||
|
r, tf->tf_trapno, tf->tf_err, tf->tf_eip, tf->tf_esp,
|
||
|
tf->tf_xflags, frame.if_xflags);
|
||
|
tf->tf_xflags, frame->if_xflags);
|
||
|
#endif
|
||
|
if (r < 0) {
|
||
|
if (errno == EINTR)
|
||
|
signalmailbox(&frame);
|
||
|
signalmailbox(frame);
|
||
|
else
|
||
|
panic("vmspace_ctl failed");
|
||
|
} else {
|
||
|
signalmailbox(&frame);
|
||
|
signalmailbox(frame);
|
||
|
if (tf->tf_trapno) {
|
||
|
user_trap(tf);
|
||
|
} else if (mycpu->gd_reqflags & RQF_AST_MASK) {
|
||
| platform/vkernel/include/md_var.h 21 Jan 2007 08:53:10 -0000 | ||
|---|---|---|
|
void cpu_exit_switch (struct thread *next);
|
||
|
void cpu_setregs (void);
|
||
|
void cpu_idle (void);
|
||
|
void go_user (struct intrframe frame);
|
||
|
void go_user (struct intrframe *frame);
|
||
|
void init_exceptions(void);
|
||
|
void init_kqueue(void);
|
||
| sys/thread.h 21 Jan 2007 08:37:26 -0000 | ||
|---|---|---|
|
extern int lwkt_seq_ipiq(struct globaldata *targ);
|
||
|
extern void lwkt_process_ipiq(void);
|
||
|
#ifdef _KERNEL
|
||
|
extern void lwkt_process_ipiq_frame(struct intrframe frame);
|
||
|
extern void lwkt_process_ipiq_frame(struct intrframe *frame);
|
||
|
#endif
|
||
|
extern void lwkt_smp_stopped(void);
|
||