X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fkern%2Fpreempt.c;h=19c43d3c0c01da72d468cf9752faceddf44ad8c5;hb=32d1445272120a254d77ce8d1af1f527da7a2c17;hp=48a1278ebffa7b0d6df4937a417d2c08eb82b3bf;hpb=fe0a14d1434098bfd0780d06a2a7e55f27940d27;p=bertos.git diff --git a/bertos/kern/preempt.c b/bertos/kern/preempt.c index 48a1278e..19c43d3c 100644 --- a/bertos/kern/preempt.c +++ b/bertos/kern/preempt.c @@ -27,21 +27,61 @@ * the GNU General Public License. * * Copyright 2008 Bernie Innocenti + * Copyright 2009 Andrea Righi * --> * * \brief Simple preemptive multitasking scheduler. * - * All voluntary and preemptive context switching happens on exit from - * a common interrupt (signal) dispatcher. Preemption on quantum timeout - * is regulated by a soft-timer. Other kinds of preemption could happen - * if an interrupt sends a signal to a higher priority process (but this - * is still unimplemented). + * Preemption is explicitly regulated at the exit of each interrupt service + * routine (ISR). Each task obtains a time quantum as soon as it is scheduled + * on the CPU and its quantum is decremented at each clock tick. The frequency + * of the timer determines the system tick granularity and CONFIG_KERN_QUANTUM + * the time sharing interval. * - * In the POSIX implementaiton, context switching is done by the portable - * SVR4 swapcontext() facility. + * When the quantum expires the handler proc_needPreempt() checks if the + * preemption is enabled and in this case proc_schedule() is called, that + * possibly replaces the current running thread with a different one. + * + * The preemption can be disabled or enabled via proc_forbid() and + * proc_permit() primitives. This is implemented using a global atomic counter. + * When the counter is greater than 0 the task cannot be preempted; only when + * the counter reaches 0 the task can be preempted again. + * + * Preemption-disabled sections may be nested. The preemption will be + * re-enabled when the outermost preemption-disabled section completes. + * + * The voluntary preemption still happens via proc_switch() or proc_yield(). + * The first one assumes the current process has been already added to a + * private wait queue (e.g., on a semaphore or a signal), while the second one + * takes care of adding the process into the ready queue. + * + * Context switch is done by CPU-dependent support routines. In case of a + * voluntary preemption the context switch routine must take care of + * saving/restoring only the callee-save registers (the voluntary-preemption is + * actually a function call). The kernel-preemption always happens inside a + * signal/interrupt context and it must take care of saving all registers. For + * this, in the entry point of each ISR the caller-save registers must be + * saved. In the ISR exit point, if the context switch must happen, we switch + * to user-context and call the same voluntary context switch routine that take + * care of saving/restoring also the callee-save registers. On resume from the + * switch, the interrupt exit point moves back to interrupt-context, resumes + * the caller-save registers (saved in the ISR entry point) and return from the + * interrupt-context. + * + * \note Thread priority (if enabled by CONFIG_KERN_PRI) defines the order in + * the \p proc_ready_list and the capability to deschedule a running process. A + * low-priority thread can't preempt a high-priority thread. + * + * A high-priority process can preempt a low-priority process immediately (it + * will be descheduled and replaced in the interrupt exit point). Processes + * running at the same priority can be descheduled when they expire the time + * quantum. + * + * \note Sleeping while preemption is disabled fallbacks to a busy-wait sleep. + * Voluntary preemption when preemption is disabled raises a kernel bug. * - * \version $Id$ * \author Bernie Innocenti + * \author Andrea Righi */ #include "cfg/cfg_proc.h" @@ -50,115 +90,145 @@ #include "proc_p.h" #include "proc.h" -#include "idle.h" #include #include +#include // idle_proc #include // CPU_IDLE #include // IRQ_DISABLE()... -#include +#include #include #include // CONFIG_DEPEND() // Check config dependencies -CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN && CONFIG_TIMER_EVENTS && CONFIG_KERN_IRQ); +CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN); MOD_DEFINE(preempt) -/// Global preemption disabling nesting counter -cpu_atomic_t _preempt_forbid_cnt; +/** + * CPU dependent context switching routines. + * + * Saving and restoring the context on the stack is done by a CPU-dependent + * support routine which usually needs to be written in assembly. + */ +EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp); -static Timer preempt_timer; +/* Global preemption nesting counter */ +cpu_atomic_t preempt_count; +/* + * The time sharing interval: when a process is scheduled on a CPU it gets an + * amount of CONFIG_KERN_QUANTUM clock ticks. When these ticks expires and + * preemption is enabled a new process is selected to run. + */ +int _proc_quantum; -void proc_schedule(void) +/** + * Call the scheduler and eventually replace the current running process. + */ +static void proc_schedule(void) { - IRQ_DISABLE; - - ASSERT(proc_preemptAllowed()); - LIST_ASSERT_VALID(&ProcReadyList); - CurrentProcess = (struct Process *)list_remHead(&ProcReadyList); - ASSERT2(CurrentProcess, "no idle proc?"); - - IRQ_ENABLE; + Process *old_process = current_process; + + IRQ_ASSERT_DISABLED(); + + /* Poll on the ready queue for the first ready process */ + LIST_ASSERT_VALID(&proc_ready_list); + current_process = (Process *)list_remHead(&proc_ready_list); + if (UNLIKELY(!current_process)) + current_process = idle_proc; + _proc_quantum = CONFIG_KERN_QUANTUM; + /* + * Optimization: don't switch contexts when the active process has not + * changed. + */ + if (LIKELY(old_process != current_process)) + { + cpu_stack_t *dummy; + + /* + * Save context of old process and switch to new process. If + * there is no old process, we save the old stack pointer into + * a dummy variable that we ignore. In fact, this happens only + * when the old process has just exited. + * + * \todo Instead of physically clearing the process at exit + * time, a zombie list should be created. + */ + asm_switch_context(¤t_process->stack, + old_process ? &old_process->stack : &dummy); + } - TRACEMSG("launching %p:%s", CurrentProcess, proc_currentName()); + /* This RET resumes the execution on the new process */ + LOG_INFO("resuming %p:%s\n", current_process, proc_currentName()); } -void proc_preempt(UNUSED_ARG(void *, param)) +/** + * Check if we need to schedule another task + */ +int proc_needPreempt(void) { - if (proc_preemptAllowed()) - { - IRQ_DISABLE; - - #if CONFIG_KERN_PRI - Process *rival = (Process *)LIST_HEAD(&ProcReadyList); - if (rival && rival->link.pri >= CurrentProcess->link.pri) - { - #endif - - TRACEMSG("preempting %p:%s", CurrentProcess, proc_currentName()); - -// FIXME: this still breaks havoc, probably because of some reentrancy issue -#if 0 - SCHED_ENQUEUE(CurrentProcess); - proc_schedule(); -#endif - #if CONFIG_KERN_PRI - } - #endif - - IRQ_ENABLE; - } + if (UNLIKELY(current_process == NULL)) + return 0; + if (!proc_preemptAllowed()) + return 0; + return _proc_quantum ? prio_next() > prio_curr() : + prio_next() >= prio_curr(); +} - timer_setDelay(&preempt_timer, CONFIG_KERN_QUANTUM); - timer_add(&preempt_timer); +/** + * Preempt the current task. + */ +void proc_preempt(void) +{ + IRQ_ASSERT_DISABLED(); + ASSERT(current_process); + + /* Perform the kernel preemption */ + LOG_INFO("preempting %p:%s\n", current_process, proc_currentName()); + /* We are inside a IRQ context, so ATOMIC is not needed here */ + if (current_process != idle_proc) + SCHED_ENQUEUE(current_process); + proc_schedule(); } +/** + * Give the control of the CPU to another process. + * + * \note Assume the current process has been already added to a wait queue. + * + * \warning This should be considered an internal kernel function, even if it + * is allowed, usage from application code is strongly discouraged. + */ void proc_switch(void) { - ATOMIC(LIST_ASSERT_VALID(&ProcReadyList)); - TRACEMSG("%p:%s", CurrentProcess, proc_currentName()); - ATOMIC(LIST_ASSERT_VALID(&ProcReadyList)); - - /* Sleeping with IRQs disabled or preemption forbidden is illegal */ - IRQ_ASSERT_ENABLED(); ASSERT(proc_preemptAllowed()); - // Will invoke proc_switch() in interrupt context - kill(0, SIGUSR1); + ATOMIC(proc_schedule()); } +/** + * Voluntarily release the CPU. + */ void proc_yield(void) { - TRACEMSG("%p:%s", CurrentProcess, proc_currentName()); - - IRQ_DISABLE; - SCHED_ENQUEUE(CurrentProcess); - IRQ_ENABLE; - - proc_switch(); -} + /* + * Voluntary preemption while preemption is disabled is considered + * illegal, as not very useful in practice. + * + * ASSERT if it happens. + */ + ASSERT(proc_preemptAllowed()); -void proc_entry(void (*user_entry)(void)) -{ - user_entry(); - proc_exit(); + ATOMIC( + SCHED_ENQUEUE(current_process); + proc_schedule(); + ); } void preempt_init(void) { - MOD_CHECK(irq); - MOD_CHECK(timer); - - irq_register(SIGUSR1, proc_schedule); - - timer_setSoftint(&preempt_timer, proc_preempt, NULL); - timer_setDelay(&preempt_timer, CONFIG_KERN_QUANTUM); - timer_add(&preempt_timer); - idle_init(); - MOD_INIT(preempt); }