X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fkern%2Fpreempt.c;h=065faed88e17823eace2972fcd754927f0326f6e;hb=d9d931610bca1df6ceb9227eacc9ff2c7f89b77a;hp=dfa343d7a9b702c769a590d8b76e0a409bdec9e2;hpb=0315f64ec2ee2c5eb10634360fb8874c52dbbabf;p=bertos.git diff --git a/bertos/kern/preempt.c b/bertos/kern/preempt.c index dfa343d7..065faed8 100644 --- a/bertos/kern/preempt.c +++ b/bertos/kern/preempt.c @@ -26,161 +26,185 @@ * invalidate any other reasons why the executable file might be covered by * the GNU General Public License. * - * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/) - * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti + * Copyright 2008 Bernie Innocenti + * Copyright 2009 Andrea Righi * --> * - * \brief Simple realtime multitasking scheduler. - * Context switching is only done cooperatively. + * \brief Simple preemptive multitasking scheduler. + * + * Preemption is explicitly regulated at the exit of each interrupt service + * routine (ISR). Each task obtains a time quantum as soon as it is scheduled + * on the CPU and its quantum is decremented at each clock tick. The frequency + * of the timer determines the system tick granularity and CONFIG_KERN_QUANTUM + * the time sharing interval. + * + * When the quantum expires the handler proc_needPreempt() checks if the + * preemption is enabled and in this case preempt_schedule() is called, that + * possibly replaces the current running thread with a different one. + * + * The preemption can be disabled or enabled via proc_forbid() and + * proc_permit() primitives. This is implemented using a global atomic counter. + * When the counter is greater than 0 the task cannot be preempted; only when + * the counter reaches 0 the task can be preempted again. + * + * Preemption-disabled sections may be nested. The preemption will be + * re-enabled when the outermost preemption-disabled section completes. + * + * The voluntary preemption still happens via proc_switch() or proc_yield(). + * The first one assumes the current process has been already added to a + * private wait queue (e.g., on a semaphore or a signal), while the second one + * takes care of adding the process into the ready queue. + * + * Context switch is done by CPU-dependent support routines. In case of a + * voluntary preemption the context switch routine must take care of + * saving/restoring only the callee-save registers (the voluntary-preemption is + * actually a function call). The kernel-preemption always happens inside a + * signal/interrupt context and it must take care of saving all registers. For + * this, in the entry point of each ISR the caller-save registers must be + * saved. In the ISR exit point, if the context switch must happen, we switch + * to user-context and call the same voluntary context switch routine that take + * care of saving/restoring also the callee-save registers. On resume from the + * switch, the interrupt exit point moves back to interrupt-context, resumes + * the caller-save registers (saved in the ISR entry point) and return from the + * interrupt-context. + * + * \note Thread priority (if enabled by CONFIG_KERN_PRI) defines the order in + * the \p proc_ready_list and the capability to deschedule a running process. A + * low-priority thread can't preempt a high-priority thread. + * + * A high-priority process can preempt a low-priority process immediately (it + * will be descheduled and replaced in the interrupt exit point). Processes + * running at the same priority can be descheduled when they expire the time + * quantum. + * + * \note Sleeping while preemption is disabled fallbacks to a busy-wait sleep. + * Voluntary preemption when preemption is disabled raises a kernel bug. * - * \version $Id: proc.c 1616 2008-08-10 19:41:26Z bernie $ * \author Bernie Innocenti - * \author Stefano Fedrigo + * \author Andrea Righi */ +#include "cfg/cfg_proc.h" + #include "proc_p.h" #include "proc.h" +#include +#include #include // CPU_IDLE +#include // IRQ_DISABLE()... +#include +#include +#include // CONFIG_DEPEND() -#include // XXX alarm() +// Check config dependencies +CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN); +MOD_DEFINE(preempt) + +/** + * CPU dependent context switching routines. + * + * Saving and restoring the context on the stack is done by a CPU-dependent + * support routine which usually needs to be written in assembly. + */ +EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp); + +/* Global preemption nesting counter */ +cpu_atomic_t preempt_count; /* - * The time sharing scheduler forces a task switch when the current - * process has exhausted its quantum. + * The time sharing interval: when a process is scheduled on a CPU it gets an + * amount of CONFIG_KERN_QUANTUM clock ticks. When these ticks expires and + * preemption is enabled a new process is selected to run. */ -uint16_t Quantum; +int _proc_quantum; /** - * Disable preemptive task switching. + * Define function prototypes exported outside. * - * The scheduler maintains a per-process nesting counter. Task switching is - * effectively re-enabled only when the number of calls to proc_permit() - * matches the number of calls to proc_forbid(). - * - * Calling functions that could sleep while task switching is disabled - * is dangerous, although supported. Preemptive task switching is - * resumed while the process is sleeping and disabled again as soon as - * it wakes up again. - * - * \sa proc_permit() + * Required to silent gcc "no previous prototype" warnings. */ -void proc_forbid(void) -{ - /* No need to protect against interrupts here. */ - ++CurrentProcess->forbid_cnt; -} +void preempt_yield(void); +int preempt_needPreempt(void); +void preempt_preempt(void); +void preempt_switch(void); +void preempt_init(void); /** - * Re-enable preemptive task switching. - * - * \sa proc_forbid() + * Call the scheduler and eventually replace the current running process. */ -void proc_permit(void) +static void preempt_schedule(void) { - /* No need to protect against interrupts here. */ - --CurrentProcess->forbid_cnt; -} - -static void (*irq_handlers[100])(void); // FIXME - - -void proc_preempt(void) -{ - TRACE; - - ATOMIC(LIST_ASSERT_VALID(&ProcReadyList)); - - IRQ_DISABLE; - /* Poll on the ready queue for the first ready process */ - while (!(CurrentProcess = (struct Process *)list_remHead(&ProcReadyList))) - { - /* - * Make sure we physically reenable interrupts here, no matter what - * the current task status is. This is important because if we - * are idle-spinning, we must allow interrupts, otherwise no - * process will ever wake up. - * - * During idle-spinning, an interrupt can occur and it may - * modify \p ProcReadyList. To ensure that compiler reload this - * variable every while cycle we call CPU_MEMORY_BARRIER. - * The memory barrier ensure that all variables used in this context - * are reloaded. - */ - IRQ_ENABLE; - CPU_IDLE; - MEMORY_BARRIER; - IRQ_DISABLE; - } - IRQ_ENABLE; + _proc_quantum = CONFIG_KERN_QUANTUM; + proc_schedule(); } -void proc_preempt_timer(void) +/** + * Check if we need to schedule another task + */ +int preempt_needPreempt(void) { - // TODO: check Quantum - - alarm(1); - ATOMIC(SCHED_ENQUEUE(CurrentProcess)); - proc_schedule(); + if (UNLIKELY(current_process == NULL)) + return 0; + if (!proc_preemptAllowed()) + return 0; + return _proc_quantum ? prio_next() > prio_curr() : + prio_next() >= prio_curr(); } -void proc_schedule(void) +/** + * Preempt the current task. + */ +void preempt_preempt(void) { - kill(0, SIGUSR1); + IRQ_ASSERT_DISABLED(); + ASSERT(current_process); + + /* Perform the kernel preemption */ + LOG_INFO("preempting %p:%s\n", current_process, proc_currentName()); + /* We are inside a IRQ context, so ATOMIC is not needed here */ + SCHED_ENQUEUE(current_process); + preempt_schedule(); } -void proc_yield(void) +/** + * Give the control of the CPU to another process. + * + * \note Assume the current process has been already added to a wait queue. + * + * \warning This should be considered an internal kernel function, even if it + * is allowed, usage from application code is strongly discouraged. + */ +void preempt_switch(void) { - ATOMIC(SCHED_ENQUEUE(CurrentProcess)); + ASSERT(proc_preemptAllowed()); + IRQ_ASSERT_ENABLED(); - proc_schedule(); + ATOMIC(preempt_schedule()); } -/* signal handler */ -void irq_entry(int signum) +/** + * Voluntarily release the CPU. + */ +void preempt_yield(void) { - Process *old_process; - - TRACEMSG("storing %p:%s", CurrentProcess, CurrentProcess->monitor.name); - CurrentProcess->leaving = false; - getcontext(&CurrentProcess->context); - /* We get here in two ways: directly, and after setcontext() below */ - - if (CurrentProcess->leaving) - { - TRACEMSG("leaving to %p:%s", CurrentProcess, CurrentProcess->monitor.name); - return; - } - - old_process = CurrentProcess; - - irq_handlers[signum](); - - if (old_process != CurrentProcess) - { - TRACEMSG("launching %p:%s", CurrentProcess, CurrentProcess->monitor.name); - CurrentProcess->leaving = true; - setcontext(&CurrentProcess->context); - /* not reached */ - } - - TRACEMSG("keeping %p:%s", CurrentProcess, CurrentProcess->monitor.name); + /* + * Voluntary preemption while preemption is disabled is considered + * illegal, as not very useful in practice. + * + * ASSERT if it happens. + */ + ASSERT(proc_preemptAllowed()); + IRQ_ASSERT_ENABLED(); + + ATOMIC( + SCHED_ENQUEUE(current_process); + preempt_schedule(); + ); } void preempt_init(void) { - struct sigaction act; - act.sa_handler = irq_entry; - sigemptyset(&act.sa_mask); - sigaddset(&act.sa_mask, SIGUSR1); - sigaddset(&act.sa_mask, SIGALRM); - act.sa_flags = SA_RESTART; /* | SA_SIGINFO; */ - - irq_handlers[SIGUSR1] = proc_preempt; - irq_handlers[SIGALRM] = proc_preempt_timer; - sigaction(SIGUSR1, &act, NULL); - sigaction(SIGALRM, &act, NULL); - - alarm(1); // FIXME + MOD_INIT(preempt); }