From 03eac0b24146fe73d585c998eca4c1c2873f1acb Mon Sep 17 00:00:00 2001 From: bernie Date: Sun, 10 Aug 2008 19:15:28 +0000 Subject: [PATCH] sig_wait(): Don't call proc_shecule() with interrupts disabled Loosening slg_wait() locking lets us drop the requirement for asm_switch_context() to save and restore the processor interrupt flag. This patch also includes changes all over the place in proc to harden consistency checks. git-svn-id: https://src.develer.com/svnoss/bertos/trunk@1614 38d2e660-2303-0410-9eaa-f027e97ec537 --- bertos/kern/proc.c | 95 +++++++++++++++++++++++++------------------- bertos/kern/signal.c | 40 ++++++++++++++++--- 2 files changed, 89 insertions(+), 46 deletions(-) diff --git a/bertos/kern/proc.c b/bertos/kern/proc.c index 37b36e07..5674038c 100644 --- a/bertos/kern/proc.c +++ b/bertos/kern/proc.c @@ -35,19 +35,21 @@ * Context switching is only done cooperatively. * * \version $Id$ - * * \author Bernie Innocenti * \author Stefano Fedrigo */ - #include "proc_p.h" #include "proc.h" #include "cfg/cfg_arch.h" /* ARCH_EMUL */ #include #include -#include /* ABS() */ + +// Log settings for cfg/log.h. +#define LOG_LEVEL KERN_LOG_LEVEL +#define LOG_FORMAT KERN_LOG_FORMAT +#include #include #include @@ -60,32 +62,41 @@ /** * CPU dependent context switching routines. * - * \note This function *MUST* preserve also the status of the interrupts. + * Saving and restoring the context on the stack is done by a CPU-dependent + * support routine which usually needs to be written in assembly. */ EXTERN_C void asm_switch_context(cpustack_t **new_sp, cpustack_t **save_sp); /* - * The scheduer tracks ready and waiting processes - * by enqueuing them in these lists. A pointer to the currently - * running process is stored in the CurrentProcess pointer. + * The scheduer tracks ready processes by enqueuing them in the + * ready list. * - * NOTE: these variables are protected by DI/EI locking + * \note Access to the list must occur while interrupts are disabled. */ -REGISTER Process *CurrentProcess; -REGISTER List ProcReadyList; +REGISTER List ProcReadyList; +/* + * Holds a pointer to the TCB of the currently running process. + * + * \note User applications should use proc_current() to retrieve this value. + */ +REGISTER Process *CurrentProcess; #if CONFIG_KERN_PREEMPTIVE /* - * The time sharing scheduler forces a task switch when - * the current process has consumed its quantum. + * The time sharing scheduler forces a task switch when the current + * process has exhausted its quantum. */ uint16_t Quantum; #endif -/* In Win32 we must emulate stack on the real process stack */ #if (ARCH & ARCH_EMUL) +/* + * In hosted environments, we must emulate the stack on the real process stack. + * + * Access to this list must be protected by PROC_ATOMIC(). + */ extern List StackFreeList; #endif @@ -117,17 +128,19 @@ void proc_init(void) { LIST_INIT(&ProcReadyList); -#if CONFIG_KERN_MONITOR - monitor_init(); -#endif - - /* We "promote" the current context into a real process. The only thing we have + /* + * We "promote" the current context into a real process. The only thing we have * to do is create a PCB and make it current. We don't need to setup the stack * pointer because it will be written the first time we switch to another process. */ proc_init_struct(&MainProcess); CurrentProcess = &MainProcess; +#if CONFIG_KERN_MONITOR + monitor_init(); + monitor_add(CurrentProcess, "main"); +#endif + MOD_INIT(proc); } @@ -146,10 +159,11 @@ struct Process *proc_new_with_name(UNUSED(const char *, name), void (*entry)(voi #if CONFIG_KERN_HEAP bool free_stack = false; #endif + TRACEMSG("name=%s", name); #if (ARCH & ARCH_EMUL) /* Ignore stack provided by caller and use the large enough default instead. */ - stack_base = (cpustack_t *)list_remHead(&StackFreeList); + PROC_ATOMIC(stack_base = (cpustack_t *)list_remHead(&StackFreeList)); stack_size = CONFIG_PROC_DEFSTACKSIZE; #elif CONFIG_KERN_HEAP @@ -168,12 +182,13 @@ struct Process *proc_new_with_name(UNUSED(const char *, name), void (*entry)(voi } #else /* Stack must have been provided by the user */ - ASSERT(stack_base); + ASSERT_VALID_PTR(stack_base); ASSERT(stack_size); #endif #if CONFIG_KERN_MONITOR /* Fill-in the stack with a special marker to help debugging */ +#warning size incorrect memset(stack_base, CONFIG_KERN_STACKFILLCODE, stack_size / sizeof(cpustack_t)); #endif @@ -215,6 +230,7 @@ struct Process *proc_new_with_name(UNUSED(const char *, name), void (*entry)(voi /* Add to ready list */ ATOMIC(SCHED_ENQUEUE(proc)); + ATOMIC(LIST_ASSERT_VALID(&ProcReadyList)); #if CONFIG_KERN_MONITOR monitor_add(proc, name); @@ -237,24 +253,19 @@ void proc_rename(struct Process *proc, const char *name) /** * System scheduler: pass CPU control to the next process in * the ready queue. - * - * Saving and restoring the context on the stack is done - * by a CPU-dependent support routine which must usually be - * written in assembly. */ void proc_schedule(void) { struct Process *old_process; cpuflags_t flags; + ATOMIC(LIST_ASSERT_VALID(&ProcReadyList)); + ASSERT_USER_CONTEXT(); + ASSERT_IRQ_ENABLED(); + /* Remember old process to save its context later */ old_process = CurrentProcess; -#ifdef IRQ_RUNNING - /* Scheduling in interrupts is a nono. */ - ASSERT(!IRQ_RUNNING()); -#endif - /* Poll on the ready queue for the first ready process */ IRQ_SAVE_DISABLE(flags); while (!(CurrentProcess = (struct Process *)list_remHead(&ProcReadyList))) @@ -289,10 +300,16 @@ void proc_schedule(void) { cpustack_t *dummy; -#if CONFIG_KERN_PREEMPTIVE - /* Reset quantum for this process */ - Quantum = CONFIG_KERN_QUANTUM; -#endif + #if CONFIG_KERN_MONITOR + LOG_INFO("Switch from %p(%s) to %p(%s)\n", + old_process, old_process ? old_process->monitor.name : "NONE", + CurrentProcess, CurrentProcess->monitor.name); + #endif + + #if CONFIG_KERN_PREEMPTIVE + /* Reset quantum for this process */ + Quantum = CONFIG_KERN_QUANTUM; + #endif /* Save context of old process and switch to new process. If there is no * old process, we save the old stack pointer into a dummy variable that @@ -313,6 +330,8 @@ void proc_schedule(void) */ void proc_exit(void) { + TRACE; + #if CONFIG_KERN_MONITOR monitor_remove(CurrentProcess); #endif @@ -333,8 +352,8 @@ void proc_exit(void) #if (ARCH & ARCH_EMUL) #warning This is wrong /* Reinsert process stack in free list */ - ADDHEAD(&StackFreeList, (Node *)(CurrentProcess->stack - - (CONFIG_PROC_DEFSTACKSIZE / sizeof(cpustack_t)))); + PROC_ATOMIC(ADDHEAD(&StackFreeList, (Node *)(CurrentProcess->stack + - (CONFIG_PROC_DEFSTACKSIZE / sizeof(cpustack_t))))); /* * NOTE: At this point the first two words of what used @@ -354,11 +373,7 @@ void proc_exit(void) */ void proc_switch(void) { - cpuflags_t flags; - - IRQ_SAVE_DISABLE(flags); - SCHED_ENQUEUE(CurrentProcess); - IRQ_RESTORE(flags); + ATOMIC(SCHED_ENQUEUE(CurrentProcess)); proc_schedule(); } diff --git a/bertos/kern/signal.c b/bertos/kern/signal.c index c21de178..d7633a14 100644 --- a/bertos/kern/signal.c +++ b/bertos/kern/signal.c @@ -26,7 +26,7 @@ * invalidate any other reasons why the executable file might be covered by * the GNU General Public License. * - * Copyright 2004 Develer S.r.l. (http://www.develer.com/) + * Copyright 2004, 2008 Develer S.r.l. (http://www.develer.com/) * Copyright 1999, 2000, 2001 Bernie Innocenti * * --> @@ -137,18 +137,45 @@ sigmask_t sig_wait(sigmask_t sigs) sigmask_t result; cpuflags_t flags; + /* + * This is subtle: there's a race condition where a concurrent + * process or an interrupt calls sig_signal() to set a bit in + * out sig_recv just after we have checked for it, but before + * we've set sig_wait to tell them we want to be awaken. + * + * In this case, we'd deadlock with the signal bit already + * set and the process never being reinserted into the ready + * list. + */ IRQ_SAVE_DISABLE(flags); /* Loop until we get at least one of the signals */ while (!(result = CurrentProcess->sig_recv & sigs)) { - /* go to sleep and proc_schedule() another process */ + /* + * Tell "them" that we want to be awaken when any of these + * signals arrives. + */ CurrentProcess->sig_wait = sigs; - proc_schedule(); - /* When we come back here, a signal must be arrived */ + /* + * Go to sleep and proc_schedule() another process. + * + * We re-enable IRQs because proc_schedule() does not + * guarantee to save and restore the interrupt mask. + */ + IRQ_RESTORE(flags); + proc_schedule(); + IRQ_SAVE_DISABLE(flags); + + /* + * When we come back here, the wait mask must have been + * cleared by someone through sig_signal(), and at least + * one of the signals we were expecting must have been + * delivered to us. + */ ASSERT(!CurrentProcess->sig_wait); - ASSERT(CurrentProcess->sig_recv); + ASSERT(CurrentProcess->sig_recv & sigs); } /* Signals found: clear them and return */ @@ -198,6 +225,8 @@ sigmask_t sig_waitTimeout(sigmask_t sigs, ticks_t timeout) void sig_signal(Process *proc, sigmask_t sigs) { cpuflags_t flags; + + /* See comment in sig_wait() for why this protection is necessary */ IRQ_SAVE_DISABLE(flags); /* Set the signals */ @@ -215,4 +244,3 @@ void sig_signal(Process *proc, sigmask_t sigs) } #endif /* CONFIG_KERN_SIGNALS */ - -- 2.25.1