X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fkern%2Fproc.c;h=781bd5532ab3846c0b5de51f2a5c2f48283fa09f;hb=f35b6066ecdeffcc8998dd566b5246bdcf43c548;hp=a18fc40498d52e85e198ee54e5e13a1e90e2c49e;hpb=2c8af54b2053cae5a6b5ff073cfd94606b21920a;p=bertos.git diff --git a/bertos/kern/proc.c b/bertos/kern/proc.c index a18fc404..781bd553 100644 --- a/bertos/kern/proc.c +++ b/bertos/kern/proc.c @@ -26,14 +26,63 @@ * invalidate any other reasons why the executable file might be covered by * the GNU General Public License. * - * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/) - * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti + * \brief Simple preemptive multitasking scheduler. + * + * Preemption is explicitly regulated at the exit of each interrupt service + * routine (ISR). Each task obtains a time quantum as soon as it is scheduled + * on the CPU and its quantum is decremented at each clock tick. The frequency + * of the timer determines the system tick granularity and CONFIG_KERN_QUANTUM + * the time sharing interval. + * + * When the quantum expires the handler proc_needPreempt() checks if the + * preemption is enabled and in this case proc_schedule() is called, that + * possibly replaces the current running thread with a different one. + * + * The preemption can be disabled or enabled via proc_forbid() and + * proc_permit() primitives. This is implemented using a global atomic counter. + * When the counter is greater than 0 the task cannot be preempted; only when + * the counter reaches 0 the task can be preempted again. + * + * Preemption-disabled sections may be nested. The preemption will be + * re-enabled when the outermost preemption-disabled section completes. + * + * The voluntary preemption still happens via proc_switch() or proc_yield(). + * The first one assumes the current process has been already added to a + * private wait queue (e.g., on a semaphore or a signal), while the second one + * takes care of adding the process into the ready queue. + * + * Context switch is done by CPU-dependent support routines. In case of a + * voluntary preemption the context switch routine must take care of + * saving/restoring only the callee-save registers (the voluntary-preemption is + * actually a function call). The kernel-preemption always happens inside a + * signal/interrupt context and it must take care of saving all registers. For + * this, in the entry point of each ISR the caller-save registers must be + * saved. In the ISR exit point, if the context switch must happen, we switch + * to user-context and call the same voluntary context switch routine that take + * care of saving/restoring also the callee-save registers. On resume from the + * switch, the interrupt exit point moves back to interrupt-context, resumes + * the caller-save registers (saved in the ISR entry point) and return from the + * interrupt-context. + * + * \note Thread priority (if enabled by CONFIG_KERN_PRI) defines the order in + * the \p proc_ready_list and the capability to deschedule a running process. A + * low-priority thread can't preempt a high-priority thread. + * + * A high-priority process can preempt a low-priority process immediately (it + * will be descheduled and replaced in the interrupt exit point). Processes + * running at the same priority can be descheduled when they expire the time + * quantum. + * + * \note Sleeping while preemption is disabled fallbacks to a busy-wait sleep. + * Voluntary preemption when preemption is disabled raises a kernel bug. + * * --> * - * \brief Simple cooperative multitasking scheduler. + * \brief Simple cooperative and preemptive multitasking scheduler. * * \author Bernie Innocenti * \author Stefano Fedrigo + * \author Andrea Righi */ #include "proc_p.h" @@ -98,6 +147,36 @@ static List zombie_list; #endif /* CONFIG_KERN_HEAP */ +/* + * Check if the process context switch can be performed directly by the + * architecture-dependent asm_switch_context() or if it must be delayed + * because we're in the middle of an ISR. + * + * Return true if asm_switch_context() can be executed, false + * otherwise. + * + * NOTE: if an architecture does not implement IRQ_RUNNING() this function + * always returns true. + */ +#define CONTEXT_SWITCH_FROM_ISR() (!IRQ_RUNNING()) + +/* + * Save context of old process and switch to new process. + */ +static void proc_context_switch(Process *next, Process *prev) +{ + cpu_stack_t *dummy; + + if (UNLIKELY(next == prev)) + return; + /* + * If there is no old process, we save the old stack pointer into a + * dummy variable that we ignore. In fact, this happens only when the + * old process has just exited. + */ + asm_switch_context(&next->stack, prev ? &prev->stack : &dummy); +} + static void proc_initStruct(Process *proc) { /* Avoid warning for unused argument. */ @@ -423,7 +502,7 @@ void proc_exit(void) /** * Call the scheduler and eventually replace the current running process. */ -void proc_schedule(void) +static void proc_schedule(void) { Process *old_process = current_process; @@ -454,7 +533,116 @@ void proc_schedule(void) IRQ_DISABLE; } if (CONTEXT_SWITCH_FROM_ISR()) - proc_switchTo(current_process, old_process); + proc_context_switch(current_process, old_process); /* This RET resumes the execution on the new process */ LOG_INFO("resuming %p:%s\n", current_process, proc_currentName()); } + +#if CONFIG_KERN_PREEMPT +/* Global preemption nesting counter */ +cpu_atomic_t preempt_count; + +/* + * The time sharing interval: when a process is scheduled on a CPU it gets an + * amount of CONFIG_KERN_QUANTUM clock ticks. When these ticks expires and + * preemption is enabled a new process is selected to run. + */ +int _proc_quantum; + +/** + * Check if we need to schedule another task + */ +bool proc_needPreempt(void) +{ + if (UNLIKELY(current_process == NULL)) + return false; + if (!proc_preemptAllowed()) + return false; + if (LIST_EMPTY(&proc_ready_list)) + return false; + return preempt_quantum() ? prio_next() > prio_curr() : + prio_next() >= prio_curr(); +} + +/** + * Preempt the current task. + */ +void proc_preempt(void) +{ + IRQ_ASSERT_DISABLED(); + ASSERT(current_process); + + /* Perform the kernel preemption */ + LOG_INFO("preempting %p:%s\n", current_process, proc_currentName()); + /* We are inside a IRQ context, so ATOMIC is not needed here */ + SCHED_ENQUEUE(current_process); + preempt_reset_quantum(); + proc_schedule(); +} +#endif /* CONFIG_KERN_PREEMPT */ + +/* Immediately switch to a particular process */ +static void proc_switchTo(Process *proc) +{ + Process *old_process = current_process; + + SCHED_ENQUEUE(current_process); + preempt_reset_quantum(); + current_process = proc; + proc_context_switch(current_process, old_process); +} + +/** + * Give the control of the CPU to another process. + * + * \note Assume the current process has been already added to a wait queue. + * + * \warning This should be considered an internal kernel function, even if it + * is allowed, usage from application code is strongly discouraged. + */ +void proc_switch(void) +{ + ASSERT(proc_preemptAllowed()); + ATOMIC( + preempt_reset_quantum(); + proc_schedule(); + ); +} + +/** + * Immediately wakeup a process, dispatching it to the CPU. + */ +void proc_wakeup(Process *proc) +{ + ASSERT(proc_preemptAllowed()); + ASSERT(current_process); + IRQ_ASSERT_DISABLED(); + + if (prio_proc(proc) >= prio_curr()) + proc_switchTo(proc); + else + SCHED_ENQUEUE_HEAD(proc); +} + +/** + * Voluntarily release the CPU. + */ +void proc_yield(void) +{ + Process *proc; + + /* + * Voluntary preemption while preemption is disabled is considered + * illegal, as not very useful in practice. + * + * ASSERT if it happens. + */ + ASSERT(proc_preemptAllowed()); + IRQ_ASSERT_ENABLED(); + + IRQ_DISABLE; + proc = (struct Process *)list_remHead(&proc_ready_list); + if (proc) + proc_switchTo(proc); + IRQ_ENABLE; +}