* invalidate any other reasons why the executable file might be covered by
* the GNU General Public License.
*
- * Copyright 2001,2004 Develer S.r.l. (http://www.develer.com/)
- * Copyright 1999,2000,2001 Bernardo Innocenti <bernie@develer.com>
- *
+ * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
+ * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
* -->
*
- * \brief Simple realtime multitasking scheduler.
- * Context switching is only done cooperatively.
- *
- * \version $Id$
+ * \brief Simple cooperative multitasking scheduler.
*
- * \author Bernardo Innocenti <bernie@develer.com>
+ * \author Bernie Innocenti <bernie@codewiz.org>
* \author Stefano Fedrigo <aleph@develer.com>
*/
-
#include "proc_p.h"
#include "proc.h"
-#include <cfg/cfg_arch.h> /* ARCH_EMUL */
-#include <cfg/debug.h>
+#include "cfg/cfg_proc.h"
+#define LOG_LEVEL KERN_LOG_LEVEL
+#define LOG_FORMAT KERN_LOG_FORMAT
+#include <cfg/log.h>
+
+#include "cfg/cfg_monitor.h"
+#include <cfg/macros.h> // ROUND_UP2
#include <cfg/module.h>
-#include <cfg/macros.h> /* ABS() */
+#include <cfg/depend.h> // CONFIG_DEPEND()
#include <cpu/irq.h>
#include <cpu/types.h>
#include <cpu/attr.h>
+#include <cpu/frame.h>
-#include <mware/event.h>
+#if CONFIG_KERN_HEAP
+ #include <struct/heap.h>
+#endif
#include <string.h> /* memset() */
-/**
- * CPU dependent context switching routines.
- *
- * \note This function *MUST* preserve also the status of the interrupts.
- */
-EXTERN_C void asm_switch_context(cpustack_t **new_sp, cpustack_t **save_sp);
-EXTERN_C int asm_switch_version(void);
+#define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t))
/*
- * The scheduer tracks ready and waiting processes
- * by enqueuing them in these lists. A pointer to the currently
- * running process is stored in the CurrentProcess pointer.
+ * The scheduer tracks ready processes by enqueuing them in the
+ * ready list.
*
- * NOTE: these variables are protected by DI/EI locking
+ * \note Access to the list must occur while interrupts are disabled.
*/
-REGISTER Process *CurrentProcess;
-REGISTER List ProcReadyList;
+REGISTER List proc_ready_list;
-
-#if CONFIG_KERN_PREEMPTIVE
/*
- * The time sharing scheduler forces a task switch when
- * the current process has consumed its quantum.
+ * Holds a pointer to the TCB of the currently running process.
+ *
+ * \note User applications should use proc_current() to retrieve this value.
*/
-uint16_t Quantum;
-#endif
+REGISTER Process *current_process;
+/** The main process (the one that executes main()). */
+static struct Process main_process;
-/* In Win32 we must emulate stack on the real process stack */
-#if (ARCH & ARCH_EMUL)
-extern List StackFreeList;
-#endif
+#if CONFIG_KERN_HEAP
-/** The main process (the one that executes main()). */
-struct Process MainProcess;
+/**
+ * Local heap dedicated to allocate the memory used by the processes.
+ */
+static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE);
+static Heap proc_heap;
+/*
+ * Keep track of zombie processes (processes that are exiting and need to
+ * release some resources).
+ *
+ * \note Access to the list must occur while kernel preemption is disabled.
+ */
+static List zombie_list;
-static void proc_init_struct(Process *proc)
+#endif /* CONFIG_KERN_HEAP */
+
+static void proc_initStruct(Process *proc)
{
/* Avoid warning for unused argument. */
(void)proc;
#if CONFIG_KERN_SIGNALS
proc->sig_recv = 0;
-#endif
-
-#if CONFIG_KERN_PREEMPTIVE
- proc->forbid_cnt = 0;
+ proc->sig_wait = 0;
#endif
#if CONFIG_KERN_HEAP
proc->flags = 0;
#endif
+
+#if CONFIG_KERN_PRI
+ proc->link.pri = 0;
+#endif
}
MOD_DEFINE(proc);
void proc_init(void)
{
- LIST_INIT(&ProcReadyList);
+ LIST_INIT(&proc_ready_list);
-#if CONFIG_KERN_MONITOR
- monitor_init();
+#if CONFIG_KERN_HEAP
+ LIST_INIT(&zombie_list);
+ heap_init(&proc_heap, heap_buf, sizeof(heap_buf));
#endif
-
- /* We "promote" the current context into a real process. The only thing we have
+ /*
+ * We "promote" the current context into a real process. The only thing we have
* to do is create a PCB and make it current. We don't need to setup the stack
* pointer because it will be written the first time we switch to another process.
*/
- proc_init_struct(&MainProcess);
- CurrentProcess = &MainProcess;
+ proc_initStruct(&main_process);
+ current_process = &main_process;
- /* Make sure the assembly routine is up-to-date with us */
- ASSERT(asm_switch_version() == 1);
+#if CONFIG_KERN_MONITOR
+ monitor_init();
+ monitor_add(current_process, "main");
+#endif
MOD_INIT(proc);
}
+#if CONFIG_KERN_HEAP
+
+/**
+ * Free all the resources of all zombie processes previously added to the zombie
+ * list.
+ */
+static void proc_freeZombies(void)
+{
+ Process *proc;
+
+ while (1)
+ {
+ PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list));
+ if (proc == NULL)
+ return;
+
+ if (proc->flags & PF_FREESTACK)
+ {
+ PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base,
+ proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t)));
+ }
+ }
+}
+
+/**
+ * Enqueue a process in the zombie list.
+ */
+static void proc_addZombie(Process *proc)
+{
+ Node *node;
+#if CONFIG_KERN_PREEMPT
+ ASSERT(!proc_preemptAllowed());
+#endif
+
+#if CONFIG_KERN_PRI
+ node = &(proc)->link.link;
+#else
+ node = &(proc)->link;
+#endif
+ LIST_ASSERT_VALID(&zombie_list);
+ ADDTAIL(&zombie_list, node);
+}
+
+#endif /* CONFIG_KERN_HEAP */
+
/**
* Create a new process, starting at the provided entry point.
*
+ *
+ * \note The function
+ * \code
+ * proc_new(entry, data, stacksize, stack)
+ * \endcode
+ * is a more convenient way to create a process, as you don't have to specify
+ * the name.
+ *
* \return Process structure of new created process
* if successful, NULL otherwise.
*/
-struct Process *proc_new_with_name(UNUSED(const char *, name), void (*entry)(void), iptr_t data, size_t stacksize, cpustack_t *stack_base)
+struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
{
Process *proc;
- size_t i;
- size_t proc_size_words = ROUND2(sizeof(Process), sizeof(cpustack_t)) / sizeof(cpustack_t);
+ LOG_INFO("name=%s", name);
#if CONFIG_KERN_HEAP
bool free_stack = false;
-#endif
-#if (ARCH & ARCH_EMUL)
- /* Ignore stack provided by caller and use the large enough default instead. */
- stack_base = (cpustack_t *)LIST_HEAD(&StackFreeList);
- REMOVE(LIST_HEAD(&StackFreeList));
- stacksize = CONFIG_PROC_DEFSTACKSIZE;
-#elif CONFIG_KERN_HEAP
+ /*
+ * Free up resources of a zombie process.
+ *
+ * We're implementing a kind of lazy garbage collector here for
+ * efficiency reasons: we can avoid to introduce overhead into another
+ * kernel task dedicated to free up resources (e.g., idle) and we're
+ * not introducing any overhead into the scheduler after a context
+ * switch (that would be *very* bad, because the scheduler runs with
+ * IRQ disabled).
+ *
+ * In this way we are able to release the memory of the zombie tasks
+ * without disabling IRQs and without introducing any significant
+ * overhead in any other kernel task.
+ */
+ proc_freeZombies();
+
/* Did the caller provide a stack for us? */
if (!stack_base)
{
/* Did the caller specify the desired stack size? */
- if (!stacksize)
- stacksize = CONFIG_PROC_DEFSTACKSIZE + sizeof(Process);
+ if (!stack_size)
+ stack_size = KERN_MINSTACKSIZE;
/* Allocate stack dinamically */
- if (!(stack_base = heap_alloc(stacksize)))
+ PROC_ATOMIC(stack_base =
+ (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
+ if (stack_base == NULL)
return NULL;
free_stack = true;
}
-#else
+
+#else // CONFIG_KERN_HEAP
+
/* Stack must have been provided by the user */
- ASSERT(stack_base);
- ASSERT(stacksize);
-#endif
+ ASSERT_VALID_PTR(stack_base);
+ ASSERT(stack_size);
+
+#endif // CONFIG_KERN_HEAP
#if CONFIG_KERN_MONITOR
- /* Fill-in the stack with a special marker to help debugging */
- memset(stack_base, CONFIG_KERN_STACKFILLCODE, stacksize / sizeof(cpustack_t));
+ /*
+ * Fill-in the stack with a special marker to help debugging.
+ * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
+ * than an int, so the (int) cast is required to silence the
+ * warning for truncating its size.
+ */
+ memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
#endif
/* Initialize the process control block */
if (CPU_STACK_GROWS_UPWARD)
{
- proc = (Process*)stack_base;
- proc->stack = stack_base + proc_size_words;
+ proc = (Process *)stack_base;
+ proc->stack = stack_base + PROC_SIZE_WORDS;
+ // On some architecture stack should be aligned, so we do it.
+ proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
if (CPU_SP_ON_EMPTY_SLOT)
proc->stack++;
}
else
{
- proc = (Process*)(stack_base + stacksize / sizeof(cpustack_t) - proc_size_words);
- proc->stack = (cpustack_t*)proc;
+ proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
+ // On some architecture stack should be aligned, so we do it.
+ proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
if (CPU_SP_ON_EMPTY_SLOT)
proc->stack--;
}
+ /* Ensure stack is aligned */
+ ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
- proc_init_struct(proc);
+ stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
+ proc_initStruct(proc);
proc->user_data = data;
-#if CONFIG_KERN_HEAP
+#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
proc->stack_base = stack_base;
proc->stack_size = stack_size;
+ #if CONFIG_KERN_HEAP
if (free_stack)
proc->flags |= PF_FREESTACK;
+ #endif
#endif
+ proc->user_entry = entry;
+ CPU_CREATE_NEW_STACK(proc->stack);
- /* Initialize process stack frame */
- CPU_PUSH_CALL_CONTEXT(proc->stack, proc_exit);
- CPU_PUSH_CALL_CONTEXT(proc->stack, entry);
-
- /* Push a clean set of CPU registers for asm_switch_context() */
- for (i = 0; i < CPU_SAVED_REGS_CNT; i++)
- CPU_PUSH_WORD(proc->stack, CPU_REG_INIT_VALUE(i));
+#if CONFIG_KERN_MONITOR
+ monitor_add(proc, name);
+#endif
/* Add to ready list */
ATOMIC(SCHED_ENQUEUE(proc));
+ return proc;
+}
+
+/**
+ * Return the name of the specified process.
+ *
+ * NULL is a legal argument and will return the name "<NULL>".
+ */
+const char *proc_name(struct Process *proc)
+{
#if CONFIG_KERN_MONITOR
- monitor_add(proc, name, stack_base, stacksize);
+ return proc ? proc->monitor.name : "<NULL>";
+#else
+ (void)proc;
+ return "---";
#endif
+}
- return proc;
+/// Return the name of the currently running process
+const char *proc_currentName(void)
+{
+ return proc_name(proc_current());
}
-/** Rename a process */
+/// Rename a process
void proc_rename(struct Process *proc, const char *name)
{
#if CONFIG_KERN_MONITOR
}
+#if CONFIG_KERN_PRI
/**
- * System scheduler: pass CPU control to the next process in
- * the ready queue.
+ * Change the scheduling priority of a process.
+ *
+ * Process piorities are signed ints, whereas a larger integer value means
+ * higher scheduling priority. The default priority for new processes is 0.
+ * The idle process runs with the lowest possible priority: INT_MIN.
+ *
+ * A process with a higher priority always preempts lower priority processes.
+ * Processes of equal priority share the CPU time according to a simple
+ * round-robin policy.
*
- * Saving and restoring the context on the stack is done
- * by a CPU-dependent support routine which must usually be
- * written in assembly.
+ * As a general rule to maximize responsiveness, compute-bound processes
+ * should be assigned negative priorities and tight, interactive processes
+ * should be assigned positive priorities.
+ *
+ * To avoid interfering with system background activities such as input
+ * processing, application processes should remain within the range -10
+ * and +10.
*/
-void proc_schedule(void)
+void proc_setPri(struct Process *proc, int pri)
{
- struct Process *old_process;
- cpuflags_t flags;
+ if (proc->link.pri == pri)
+ return;
- /* Remember old process to save its context later */
- old_process = CurrentProcess;
+ proc->link.pri = pri;
-#ifdef IRQ_RUNNING
- /* Scheduling in interrupts is a nono. */
- ASSERT(!IRQ_RUNNING());
-#endif
+ if (proc != current_process)
+ ATOMIC(sched_reenqueue(proc));
+}
+#endif // CONFIG_KERN_PRI
- /* Poll on the ready queue for the first ready process */
- IRQ_SAVE_DISABLE(flags);
- while (!(CurrentProcess = (struct Process *)list_remHead(&ProcReadyList)))
- {
- /*
- * Make sure we physically reenable interrupts here, no matter what
- * the current task status is. This is important because if we
- * are idle-spinning, we must allow interrupts, otherwise no
- * process will ever wake up.
- *
- * During idle-spinning, an interrupt can occur and it may
- * modify \p ProcReadyList. To ensure that compiler reload this
- * variable every while cycle we call CPU_MEMORY_BARRIER.
- * The memory barrier ensure that all variables used in this context
- * are reloaded.
- * \todo If there was a way to write sig_wait() so that it does not
- * disable interrupts while waiting, there would not be any
- * reason to do this.
- */
- IRQ_ENABLE;
- CPU_IDLE;
- MEMORY_BARRIER;
- IRQ_DISABLE;
- }
- IRQ_RESTORE(flags);
+INLINE void proc_run(void)
+{
+ void (*entry)(void) = current_process->user_entry;
+
+ LOG_INFO("New process starting at %p", entry);
+ entry();
+}
+/**
+ * Entry point for all the processes.
+ */
+void proc_entry(void)
+{
/*
- * Optimization: don't switch contexts when the active
- * process has not changed.
+ * Return from a context switch assumes interrupts are disabled, so
+ * we need to explicitly re-enable them as soon as possible.
*/
- if (CurrentProcess != old_process)
- {
- cpustack_t *dummy;
-
-#if CONFIG_KERN_PREEMPTIVE
- /* Reset quantum for this process */
- Quantum = CONFIG_KERN_QUANTUM;
-#endif
-
- /* Save context of old process and switch to new process. If there is no
- * old process, we save the old stack pointer into a dummy variable that
- * we ignore. In fact, this happens only when the old process has just
- * exited.
- * TODO: Instead of physically clearing the process at exit time, a zombie
- * list should be created.
- */
- asm_switch_context(&CurrentProcess->stack, old_process ? &old_process->stack : &dummy);
- }
-
- /* This RET resumes the execution on the new process */
+ IRQ_ENABLE;
+ /* Call the actual process's entry point */
+ proc_run();
+ proc_exit();
}
-
/**
* Terminate the current process
*/
void proc_exit(void)
{
+ LOG_INFO("%p:%s", current_process, proc_currentName());
+
#if CONFIG_KERN_MONITOR
- monitor_remove(CurrentProcess);
+ monitor_remove(current_process);
#endif
+ proc_forbid();
#if CONFIG_KERN_HEAP
/*
- * The following code is BROKEN.
- * We are freeing our own stack before entering proc_schedule()
- * BAJO: A correct fix would be to rearrange the scheduler with
- * an additional parameter which frees the old stack/process
- * after a context switch.
+ * Set the task as zombie, its resources will be freed in proc_new() in
+ * a lazy way, when another process will be created.
*/
- if (CurrentProcess->flags & PF_FREESTACK)
- heap_free(CurrentProcess->stack_base, CurrentProcess->stack_size);
- heap_free(CurrentProcess);
+ proc_addZombie(current_process);
#endif
+ current_process = NULL;
+ proc_permit();
-#if (ARCH & ARCH_EMUL)
-#warning This is wrong
- /* Reinsert process stack in free list */
- ADDHEAD(&StackFreeList, (Node *)(CurrentProcess->stack
- - (CONFIG_PROC_DEFSTACKSIZE / sizeof(cpustack_t))));
-
- /*
- * NOTE: At this point the first two words of what used
- * to be our stack contain a list node. From now on, we
- * rely on the compiler not reading/writing the stack.
- */
-#endif /* ARCH_EMUL */
+ proc_switch();
- CurrentProcess = NULL;
- proc_schedule();
- /* not reached */
+ /* never reached */
+ ASSERT(0);
}
-
/**
- * Co-operative context switch
+ * Call the scheduler and eventually replace the current running process.
*/
-void proc_switch(void)
-{
- cpuflags_t flags;
-
- IRQ_SAVE_DISABLE(flags);
- SCHED_ENQUEUE(CurrentProcess);
- IRQ_RESTORE(flags);
-
- proc_schedule();
-}
-
-
-/**
- * Get the pointer to the current process
- */
-struct Process *proc_current(void)
-{
- return CurrentProcess;
-}
-
-/**
- * Get the pointer to the user data of the current process
- */
-iptr_t proc_current_user_data(void)
+void proc_schedule(void)
{
- return CurrentProcess->user_data;
-}
+ Process *old_process = current_process;
+ IRQ_ASSERT_DISABLED();
-#if CONFIG_KERN_PREEMPTIVE
-
-/**
- * Disable preemptive task switching.
- *
- * The scheduler maintains a per-process nesting counter. Task switching is
- * effectively re-enabled only when the number of calls to proc_permit()
- * matches the number of calls to proc_forbid().
- *
- * Calling functions that could sleep while task switching is disabled
- * is dangerous, although supported. Preemptive task switching is
- * resumed while the process is sleeping and disabled again as soon as
- * it wakes up again.
- *
- * \sa proc_permit()
- */
-void proc_forbid(void)
-{
- /* No need to protect against interrupts here. */
- ++CurrentProcess->forbid_cnt;
-}
-
-/**
- * Re-enable preemptive task switching.
- *
- * \sa proc_forbid()
- */
-void proc_permit(void)
-{
- /* No need to protect against interrupts here. */
- --CurrentProcess->forbid_cnt;
+ /* Poll on the ready queue for the first ready process */
+ LIST_ASSERT_VALID(&proc_ready_list);
+ while (!(current_process = (struct Process *)list_remHead(&proc_ready_list)))
+ {
+ /*
+ * Make sure we physically reenable interrupts here, no matter what
+ * the current task status is. This is important because if we
+ * are idle-spinning, we must allow interrupts, otherwise no
+ * process will ever wake up.
+ *
+ * During idle-spinning, an interrupt can occur and it may
+ * modify \p proc_ready_list. To ensure that compiler reload this
+ * variable every while cycle we call CPU_MEMORY_BARRIER.
+ * The memory barrier ensure that all variables used in this context
+ * are reloaded.
+ * \todo If there was a way to write sig_wait() so that it does not
+ * disable interrupts while waiting, there would not be any
+ * reason to do this.
+ */
+ IRQ_ENABLE;
+ CPU_IDLE;
+ MEMORY_BARRIER;
+ IRQ_DISABLE;
+ }
+ proc_switchTo(current_process, old_process);
+ /* This RET resumes the execution on the new process */
+ LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());
}
-
-#endif /* CONFIG_KERN_PREEMPTIVE */