* invalidate any other reasons why the executable file might be covered by
* the GNU General Public License.
*
- * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
- * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
+ * Copyright 2008 Bernie Innocenti <bernie@codewiz.org>
+ * Copyright 2009 Andrea Righi <arighi@develer.com>
* -->
*
- * \brief Simple realtime multitasking scheduler.
- * Context switching is only done cooperatively.
+ * \brief Simple preemptive multitasking scheduler.
+ *
+ * Preemption is explicitly regulated at the exit of each interrupt service
+ * routine (ISR). Each task obtains a time quantum as soon as it is scheduled
+ * on the CPU and its quantum is decremented at each clock tick. The frequency
+ * of the timer determines the system tick granularity and CONFIG_KERN_QUANTUM
+ * the time sharing interval.
+ *
+ * When the quantum expires the handler proc_needPreempt() checks if the
+ * preemption is enabled and in this case preempt_schedule() is called, that
+ * possibly replaces the current running thread with a different one.
+ *
+ * The preemption can be disabled or enabled via proc_forbid() and
+ * proc_permit() primitives. This is implemented using a global atomic counter.
+ * When the counter is greater than 0 the task cannot be preempted; only when
+ * the counter reaches 0 the task can be preempted again.
+ *
+ * Preemption-disabled sections may be nested. The preemption will be
+ * re-enabled when the outermost preemption-disabled section completes.
+ *
+ * The voluntary preemption still happens via proc_switch() or proc_yield().
+ * The first one assumes the current process has been already added to a
+ * private wait queue (e.g., on a semaphore or a signal), while the second one
+ * takes care of adding the process into the ready queue.
+ *
+ * Context switch is done by CPU-dependent support routines. In case of a
+ * voluntary preemption the context switch routine must take care of
+ * saving/restoring only the callee-save registers (the voluntary-preemption is
+ * actually a function call). The kernel-preemption always happens inside a
+ * signal/interrupt context and it must take care of saving all registers. For
+ * this, in the entry point of each ISR the caller-save registers must be
+ * saved. In the ISR exit point, if the context switch must happen, we switch
+ * to user-context and call the same voluntary context switch routine that take
+ * care of saving/restoring also the callee-save registers. On resume from the
+ * switch, the interrupt exit point moves back to interrupt-context, resumes
+ * the caller-save registers (saved in the ISR entry point) and return from the
+ * interrupt-context.
+ *
+ * \note Thread priority (if enabled by CONFIG_KERN_PRI) defines the order in
+ * the \p proc_ready_list and the capability to deschedule a running process. A
+ * low-priority thread can't preempt a high-priority thread.
+ *
+ * A high-priority process can preempt a low-priority process immediately (it
+ * will be descheduled and replaced in the interrupt exit point). Processes
+ * running at the same priority can be descheduled when they expire the time
+ * quantum.
+ *
+ * \note Sleeping while preemption is disabled fallbacks to a busy-wait sleep.
+ * Voluntary preemption when preemption is disabled raises a kernel bug.
*
- * \version $Id: proc.c 1616 2008-08-10 19:41:26Z bernie $
* \author Bernie Innocenti <bernie@codewiz.org>
- * \author Stefano Fedrigo <aleph@develer.com>
+ * \author Andrea Righi <arighi@develer.com>
*/
+#include "cfg/cfg_proc.h"
+
+#if CONFIG_KERN_PREEMPT
+
#include "proc_p.h"
#include "proc.h"
+#include <kern/irq.h>
+#include <kern/monitor.h>
+#include <kern/idle.h> // idle_proc
#include <cpu/frame.h> // CPU_IDLE
+#include <cpu/irq.h> // IRQ_DISABLE()...
+#include <cfg/log.h>
+#include <cfg/module.h>
+#include <cfg/depend.h> // CONFIG_DEPEND()
-#include <unistd.h> // XXX alarm()
+// Check config dependencies
+CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN);
-
-/*
- * The time sharing scheduler forces a task switch when the current
- * process has exhausted its quantum.
- */
-uint16_t Quantum;
+MOD_DEFINE(preempt)
/**
- * Disable preemptive task switching.
- *
- * The scheduler maintains a per-process nesting counter. Task switching is
- * effectively re-enabled only when the number of calls to proc_permit()
- * matches the number of calls to proc_forbid().
+ * CPU dependent context switching routines.
*
- * Calling functions that could sleep while task switching is disabled
- * is dangerous, although supported. Preemptive task switching is
- * resumed while the process is sleeping and disabled again as soon as
- * it wakes up again.
- *
- * \sa proc_permit()
- */
-void proc_forbid(void)
-{
- /* No need to protect against interrupts here. */
- ++CurrentProcess->forbid_cnt;
-}
-
-/**
- * Re-enable preemptive task switching.
- *
- * \sa proc_forbid()
+ * Saving and restoring the context on the stack is done by a CPU-dependent
+ * support routine which usually needs to be written in assembly.
*/
-void proc_permit(void)
-{
- /* No need to protect against interrupts here. */
- --CurrentProcess->forbid_cnt;
-}
+EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp);
-static void (*irq_handlers[100])(void); // FIXME
+/* Global preemption nesting counter */
+cpu_atomic_t preempt_count;
+/*
+ * The time sharing interval: when a process is scheduled on a CPU it gets an
+ * amount of CONFIG_KERN_QUANTUM clock ticks. When these ticks expires and
+ * preemption is enabled a new process is selected to run.
+ */
+int _proc_quantum;
-void proc_preempt(void)
+/**
+ * Call the scheduler and eventually replace the current running process.
+ */
+static void preempt_schedule(void)
{
- TRACE;
+ Process *old_process = current_process;
- ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
+ IRQ_ASSERT_DISABLED();
- TRACEMSG("hello1");
- IRQ_DISABLE;
/* Poll on the ready queue for the first ready process */
- while (!(CurrentProcess = (struct Process *)list_remHead(&ProcReadyList)))
+ LIST_ASSERT_VALID(&proc_ready_list);
+ current_process = (Process *)list_remHead(&proc_ready_list);
+ if (UNLIKELY(!current_process))
+ current_process = idle_proc;
+ _proc_quantum = CONFIG_KERN_QUANTUM;
+ /*
+ * Optimization: don't switch contexts when the active process has not
+ * changed.
+ */
+ if (LIKELY(old_process != current_process))
{
- TRACEMSG("hello2");
+ cpu_stack_t *dummy;
+
/*
- * Make sure we physically reenable interrupts here, no matter what
- * the current task status is. This is important because if we
- * are idle-spinning, we must allow interrupts, otherwise no
- * process will ever wake up.
+ * Save context of old process and switch to new process. If
+ * there is no old process, we save the old stack pointer into
+ * a dummy variable that we ignore. In fact, this happens only
+ * when the old process has just exited.
*
- * During idle-spinning, an interrupt can occur and it may
- * modify \p ProcReadyList. To ensure that compiler reload this
- * variable every while cycle we call CPU_MEMORY_BARRIER.
- * The memory barrier ensure that all variables used in this context
- * are reloaded.
+ * \todo Instead of physically clearing the process at exit
+ * time, a zombie list should be created.
*/
- IRQ_ENABLE;
- //FIXME: calls Qt stuff from sighandler! CPU_IDLE;
- MEMORY_BARRIER;
- IRQ_DISABLE;
- TRACEMSG("hello3");
+ asm_switch_context(¤t_process->stack,
+ old_process ? &old_process->stack : &dummy);
}
- IRQ_ENABLE;
- TRACEMSG("hello4");
-}
-
-void proc_preempt_timer(void)
-{
- // TODO: check Quantum
-
- alarm(1);
- if (CurrentProcess)
- {
- TRACEMSG("preempting %p:%s", CurrentProcess, CurrentProcess->monitor.name);
- ATOMIC(SCHED_ENQUEUE(CurrentProcess));
- proc_schedule();
- }
+ /* This RET resumes the execution on the new process */
+ LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());
}
-void proc_schedule(void)
+/**
+ * Check if we need to schedule another task
+ */
+int preempt_needPreempt(void)
{
- TRACE;
-
- // Will invoke proc_preempt() in interrupt context
- kill(0, SIGUSR1);
+ if (UNLIKELY(current_process == NULL))
+ return 0;
+ if (!proc_preemptAllowed())
+ return 0;
+ return _proc_quantum ? prio_next() > prio_curr() :
+ prio_next() >= prio_curr();
}
-void proc_yield(void)
+/**
+ * Preempt the current task.
+ */
+void preempt_preempt(void)
{
- ATOMIC(SCHED_ENQUEUE(CurrentProcess));
-
- proc_schedule();
+ IRQ_ASSERT_DISABLED();
+ ASSERT(current_process);
+
+ /* Perform the kernel preemption */
+ LOG_INFO("preempting %p:%s\n", current_process, proc_currentName());
+ /* We are inside a IRQ context, so ATOMIC is not needed here */
+ if (current_process != idle_proc)
+ SCHED_ENQUEUE(current_process);
+ preempt_schedule();
}
-void proc_entry(void (*user_entry)(void))
+/**
+ * Give the control of the CPU to another process.
+ *
+ * \note Assume the current process has been already added to a wait queue.
+ *
+ * \warning This should be considered an internal kernel function, even if it
+ * is allowed, usage from application code is strongly discouraged.
+ */
+void preempt_switch(void)
{
- user_entry();
- proc_exit();
+ ASSERT(proc_preemptAllowed());
+ IRQ_ASSERT_ENABLED();
+
+ ATOMIC(preempt_schedule());
}
-/* signal handler */
-void irq_entry(int signum)
+/**
+ * Voluntarily release the CPU.
+ */
+void preempt_yield(void)
{
- Process *old_process;
-
-// TRACEMSG("storing %p:%s", CurrentProcess, CurrentProcess->monitor.name);
-// CurrentProcess->leaving = false;
-// getcontext(&CurrentProcess->context);
- /* We get here in two ways: directly, and after setcontext() below */
-
-// if (CurrentProcess->leaving)
-// {
-// TRACEMSG("leaving to %p:%s", CurrentProcess, CurrentProcess->monitor.name);
-// return;
-// }
-
- old_process = CurrentProcess;
-
- irq_handlers[signum]();
-
- if (old_process != CurrentProcess)
- {
- TRACEMSG("switching from %p:%s to %p:%s",
- old_process, old_process->monitor.name,
- CurrentProcess, CurrentProcess->monitor.name);
- swapcontext(&old_process->context, &CurrentProcess->context);
-// TRACEMSG("launching %p:%s", CurrentProcess, CurrentProcess->monitor.name);
-// CurrentProcess->leaving = true;
-// setcontext(&CurrentProcess->context);
- /* not reached */
- }
-
- TRACEMSG("keeping %p:%s", CurrentProcess, CurrentProcess->monitor.name);
+ /*
+ * Voluntary preemption while preemption is disabled is considered
+ * illegal, as not very useful in practice.
+ *
+ * ASSERT if it happens.
+ */
+ ASSERT(proc_preemptAllowed());
+ IRQ_ASSERT_ENABLED();
+
+ ATOMIC(
+ SCHED_ENQUEUE(current_process);
+ preempt_schedule();
+ );
}
void preempt_init(void)
{
- struct sigaction act;
- act.sa_handler = irq_entry;
- sigemptyset(&act.sa_mask);
- sigaddset(&act.sa_mask, SIGUSR1);
- sigaddset(&act.sa_mask, SIGALRM);
- act.sa_flags = SA_RESTART; /* | SA_SIGINFO; */
-
- irq_handlers[SIGUSR1] = proc_preempt;
- irq_handlers[SIGALRM] = proc_preempt_timer;
- sigaction(SIGUSR1, &act, NULL);
- sigaction(SIGALRM, &act, NULL);
-
- alarm(1); // FIXME
+ idle_init();
+ MOD_INIT(preempt);
}
+
+#endif // CONFIG_KERN_PREEMPT