* invalidate any other reasons why the executable file might be covered by
* the GNU General Public License.
*
- * Copyright 2001,2004 Develer S.r.l. (http://www.develer.com/)
- * Copyright 1999,2000,2001 Bernardo Innocenti <bernie@develer.com>
- *
+ * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
+ * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
* -->
*
- * \brief Simple realtime multitasking scheduler.
- * Context switching is only done cooperatively.
+ * \brief Simple cooperative multitasking scheduler.
*
* \version $Id$
- *
- * \author Bernardo Innocenti <bernie@develer.com>
+ * \author Bernie Innocenti <bernie@codewiz.org>
* \author Stefano Fedrigo <aleph@develer.com>
*/
-
#include "proc_p.h"
#include "proc.h"
-#include <cfg/cfg_arch.h> /* ARCH_EMUL */
-#include <cfg/debug.h>
+#include "cfg/cfg_arch.h" // ARCH_EMUL
+#include "cfg/cfg_proc.h"
+#include "cfg/cfg_monitor.h"
+#include <cfg/macros.h> // ROUND_UP2
#include <cfg/module.h>
-#include <cfg/macros.h> /* ABS() */
+#include <cfg/depend.h> // CONFIG_DEPEND()
#include <cpu/irq.h>
#include <cpu/types.h>
#include <cpu/attr.h>
+#include <cpu/frame.h>
-#include <mware/event.h>
+#if CONFIG_KERN_HEAP
+ #include <struct/heap.h>
+#endif
#include <string.h> /* memset() */
-/**
- * CPU dependent context switching routines.
+/*
+ * The scheduer tracks ready processes by enqueuing them in the
+ * ready list.
*
- * \note This function *MUST* preserve also the status of the interrupts.
+ * \note Access to the list must occur while interrupts are disabled.
*/
-EXTERN_C void asm_switch_context(cpustack_t **new_sp, cpustack_t **save_sp);
-EXTERN_C int asm_switch_version(void);
+REGISTER List ProcReadyList;
/*
- * The scheduer tracks ready and waiting processes
- * by enqueuing them in these lists. A pointer to the currently
- * running process is stored in the CurrentProcess pointer.
+ * Holds a pointer to the TCB of the currently running process.
*
- * NOTE: these variables are protected by DI/EI locking
+ * \note User applications should use proc_current() to retrieve this value.
*/
REGISTER Process *CurrentProcess;
-REGISTER List ProcReadyList;
-
-#if CONFIG_KERN_PREEMPTIVE
+#if (ARCH & ARCH_EMUL)
/*
- * The time sharing scheduler forces a task switch when
- * the current process has consumed its quantum.
+ * In some hosted environments, we must emulate the stack on the real
+ * process stack to satisfy consistency checks in system libraries and
+ * because some ABIs place trampolines on the stack.
+ *
+ * Access to this list must be protected by PROC_ATOMIC().
*/
-uint16_t Quantum;
-#endif
-
+List StackFreeList;
-/* In Win32 we must emulate stack on the real process stack */
-#if (ARCH & ARCH_EMUL)
-extern List StackFreeList;
+#define NPROC 10
+cpu_stack_t proc_stacks[NPROC][(64 * 1024) / sizeof(cpu_stack_t)];
#endif
/** The main process (the one that executes main()). */
#if CONFIG_KERN_SIGNALS
proc->sig_recv = 0;
-#endif
-
-#if CONFIG_KERN_PREEMPTIVE
- proc->forbid_cnt = 0;
+ proc->sig_wait = 0;
#endif
#if CONFIG_KERN_HEAP
proc->flags = 0;
#endif
+
+#if CONFIG_KERN_PRI
+ proc->link.pri = 0;
+#endif
+
}
MOD_DEFINE(proc);
{
LIST_INIT(&ProcReadyList);
-#if CONFIG_KERN_MONITOR
- monitor_init();
+#if ARCH & ARCH_EMUL
+ LIST_INIT(&StackFreeList);
+ for (int i = 0; i < NPROC; i++)
+ ADDTAIL(&StackFreeList, (Node *)proc_stacks[i]);
#endif
- /* We "promote" the current context into a real process. The only thing we have
+ /*
+ * We "promote" the current context into a real process. The only thing we have
* to do is create a PCB and make it current. We don't need to setup the stack
* pointer because it will be written the first time we switch to another process.
*/
proc_init_struct(&MainProcess);
CurrentProcess = &MainProcess;
- /* Make sure the assembly routine is up-to-date with us */
- ASSERT(asm_switch_version() == 1);
+#if CONFIG_KERN_MONITOR
+ monitor_init();
+ monitor_add(CurrentProcess, "main");
+#endif
+
+#if CONFIG_KERN_PREEMPT
+ preempt_init();
+#endif
+
MOD_INIT(proc);
}
-
/**
* Create a new process, starting at the provided entry point.
*
* \return Process structure of new created process
* if successful, NULL otherwise.
*/
-struct Process *proc_new_with_name(UNUSED(const char *, name), void (*entry)(void), iptr_t data, size_t stacksize, cpustack_t *stack_base)
+struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
{
Process *proc;
- size_t i;
- size_t proc_size_words = ROUND2(sizeof(Process), sizeof(cpustack_t)) / sizeof(cpustack_t);
+ const size_t PROC_SIZE_WORDS = ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t);
#if CONFIG_KERN_HEAP
bool free_stack = false;
#endif
+ TRACEMSG("name=%s", name);
#if (ARCH & ARCH_EMUL)
/* Ignore stack provided by caller and use the large enough default instead. */
- stack_base = (cpustack_t *)LIST_HEAD(&StackFreeList);
- REMOVE(LIST_HEAD(&StackFreeList));
- stacksize = CONFIG_PROC_DEFSTACKSIZE;
+ PROC_ATOMIC(stack_base = (cpu_stack_t *)list_remHead(&StackFreeList));
+ ASSERT(stack_base);
+
+ stack_size = CONFIG_KERN_MINSTACKSIZE;
#elif CONFIG_KERN_HEAP
/* Did the caller provide a stack for us? */
if (!stack_base)
{
/* Did the caller specify the desired stack size? */
- if (!stacksize)
- stacksize = CONFIG_PROC_DEFSTACKSIZE + sizeof(Process);
+ if (!stack_size)
+ stack_size = CONFIG_KERN_MINSTACKSIZE;
/* Allocate stack dinamically */
- if (!(stack_base = heap_alloc(stacksize)))
+ if (!(stack_base = heap_alloc(stack_size)))
return NULL;
free_stack = true;
}
-#else
+
+#else // !ARCH_EMUL && !CONFIG_KERN_HEAP
+
/* Stack must have been provided by the user */
- ASSERT(stack_base);
- ASSERT(stacksize);
-#endif
+ ASSERT_VALID_PTR(stack_base);
+ ASSERT(stack_size);
+
+#endif // !ARCH_EMUL && !CONFIG_KERN_HEAP
#if CONFIG_KERN_MONITOR
- /* Fill-in the stack with a special marker to help debugging */
- memset(stack_base, CONFIG_KERN_STACKFILLCODE, stacksize / sizeof(cpustack_t));
+ /*
+ * Fill-in the stack with a special marker to help debugging.
+ * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
+ * than an int, so the (int) cast is required to silence the
+ * warning for truncating its size.
+ */
+ memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
#endif
/* Initialize the process control block */
if (CPU_STACK_GROWS_UPWARD)
{
- proc = (Process*)stack_base;
- proc->stack = stack_base + proc_size_words;
+ proc = (Process *)stack_base;
+ proc->stack = stack_base + PROC_SIZE_WORDS;
+ // On some architecture stack should be aligned, so we do it.
+ proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
if (CPU_SP_ON_EMPTY_SLOT)
proc->stack++;
}
else
{
- proc = (Process*)(stack_base + stacksize / sizeof(cpustack_t) - proc_size_words);
- proc->stack = (cpustack_t*)proc;
+ proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
+ // On some architecture stack should be aligned, so we do it.
+ proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
if (CPU_SP_ON_EMPTY_SLOT)
proc->stack--;
}
+ /* Ensure stack is aligned */
+ ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
+ stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
proc_init_struct(proc);
proc->user_data = data;
-#if CONFIG_KERN_HEAP
+#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR | (ARCH & ARCH_EMUL)
proc->stack_base = stack_base;
proc->stack_size = stack_size;
+ #if CONFIG_KERN_HEAP
if (free_stack)
proc->flags |= PF_FREESTACK;
+ #endif
#endif
- /* Initialize process stack frame */
- CPU_PUSH_CALL_CONTEXT(proc->stack, proc_exit);
- CPU_PUSH_CALL_CONTEXT(proc->stack, entry);
+ #if CONFIG_KERN_PREEMPT
- /* Push a clean set of CPU registers for asm_switch_context() */
- for (i = 0; i < CPU_SAVED_REGS_CNT; i++)
- CPU_PUSH_WORD(proc->stack, CPU_REG_INIT_VALUE(i));
+ getcontext(&proc->context);
+ proc->context.uc_stack.ss_sp = proc->stack;
+ proc->context.uc_stack.ss_size = stack_size - 1;
+ proc->context.uc_link = NULL;
+ makecontext(&proc->context, (void (*)(void))proc_entry, 1, entry);
+
+ #else // !CONFIG_KERN_PREEMPT
+
+ CPU_CREATE_NEW_STACK(proc->stack, entry, proc_exit);
+
+ #endif // CONFIG_KERN_PREEMPT
+
+ #if CONFIG_KERN_MONITOR
+ monitor_add(proc, name);
+ #endif
/* Add to ready list */
ATOMIC(SCHED_ENQUEUE(proc));
-#if CONFIG_KERN_MONITOR
- monitor_add(proc, name, stack_base, stacksize);
-#endif
-
return proc;
}
-/** Rename a process */
+/**
+ * Return the name of the specified process.
+ *
+ * NULL is a legal argument and will return the name "<NULL>".
+ */
+const char *proc_name(struct Process *proc)
+{
+ #if CONFIG_KERN_MONITOR
+ return proc ? proc->monitor.name : "<NULL>";
+ #else
+ (void)proc;
+ return "---";
+ #endif
+}
+
+/// Return the name of the currently running process
+const char *proc_currentName(void)
+{
+ return proc_name(proc_current());
+}
+
+/// Rename a process
void proc_rename(struct Process *proc, const char *name)
{
#if CONFIG_KERN_MONITOR
}
+#if CONFIG_KERN_PRI
/**
- * System scheduler: pass CPU control to the next process in
- * the ready queue.
+ * Change the scheduling priority of a process.
+ *
+ * Process piorities are signed ints, whereas a larger integer value means
+ * higher scheduling priority. The default priority for new processes is 0.
+ * The idle process runs with the lowest possible priority: INT_MIN.
+ *
+ * A process with a higher priority always preempts lower priority processes.
+ * Processes of equal priority share the CPU time according to a simple
+ * round-robin policy.
*
- * Saving and restoring the context on the stack is done
- * by a CPU-dependent support routine which must usually be
- * written in assembly.
+ * As a general rule to maximize responsiveness, compute-bound processes
+ * should be assigned negative priorities and tight, interactive processes
+ * should be assigned positive priorities.
+ *
+ * To avoid interfering with system background activities such as input
+ * processing, application processes should remain within the range -10
+ * and +10.
*/
-void proc_schedule(void)
+void proc_setPri(struct Process *proc, int pri)
{
- struct Process *old_process;
- cpuflags_t flags;
-
- /* Remember old process to save its context later */
- old_process = CurrentProcess;
+ if (proc->link.pri == pri)
+ return;
-#ifdef IRQ_RUNNING
- /* Scheduling in interrupts is a nono. */
- ASSERT(!IRQ_RUNNING());
-#endif
+ proc->link.pri = pri;
- /* Poll on the ready queue for the first ready process */
- IRQ_SAVE_DISABLE(flags);
- while (!(CurrentProcess = (struct Process *)list_remHead(&ProcReadyList)))
- {
- /*
- * Make sure we physically reenable interrupts here, no matter what
- * the current task status is. This is important because if we
- * are idle-spinning, we must allow interrupts, otherwise no
- * process will ever wake up.
- *
- * During idle-spinning, an interrupt can occur and it may
- * modify \p ProcReadyList. To ensure that compiler reload this
- * variable every while cycle we call CPU_MEMORY_BARRIER.
- * The memory barrier ensure that all variables used in this context
- * are reloaded.
- * \todo If there was a way to write sig_wait() so that it does not
- * disable interrupts while waiting, there would not be any
- * reason to do this.
- */
- IRQ_ENABLE;
- CPU_IDLE;
- MEMORY_BARRIER;
- IRQ_DISABLE;
- }
- IRQ_RESTORE(flags);
-
- /*
- * Optimization: don't switch contexts when the active
- * process has not changed.
- */
- if (CurrentProcess != old_process)
- {
- cpustack_t *dummy;
-
-#if CONFIG_KERN_PREEMPTIVE
- /* Reset quantum for this process */
- Quantum = CONFIG_KERN_QUANTUM;
-#endif
-
- /* Save context of old process and switch to new process. If there is no
- * old process, we save the old stack pointer into a dummy variable that
- * we ignore. In fact, this happens only when the old process has just
- * exited.
- * TODO: Instead of physically clearing the process at exit time, a zombie
- * list should be created.
- */
- asm_switch_context(&CurrentProcess->stack, old_process ? &old_process->stack : &dummy);
- }
-
- /* This RET resumes the execution on the new process */
+ if (proc != CurrentProcess)
+ {
+ proc_forbid();
+ ATOMIC(sched_reenqueue(proc));
+ proc_permit();
+ }
}
-
+#endif // CONFIG_KERN_PRI
/**
* Terminate the current process
*/
void proc_exit(void)
{
+ TRACEMSG("%p:%s", CurrentProcess, proc_currentName());
+
#if CONFIG_KERN_MONITOR
monitor_remove(CurrentProcess);
#endif
#endif
#if (ARCH & ARCH_EMUL)
-#warning This is wrong
/* Reinsert process stack in free list */
- ADDHEAD(&StackFreeList, (Node *)(CurrentProcess->stack
- - (CONFIG_PROC_DEFSTACKSIZE / sizeof(cpustack_t))));
+ PROC_ATOMIC(ADDHEAD(&StackFreeList, (Node *)CurrentProcess->stack_base));
/*
* NOTE: At this point the first two words of what used
#endif /* ARCH_EMUL */
CurrentProcess = NULL;
- proc_schedule();
+ proc_switch();
/* not reached */
}
-/**
- * Co-operative context switch
- */
-void proc_switch(void)
-{
- cpuflags_t flags;
-
- IRQ_SAVE_DISABLE(flags);
- SCHED_ENQUEUE(CurrentProcess);
- IRQ_RESTORE(flags);
-
- proc_schedule();
-}
-
-
-/**
- * Get the pointer to the current process
- */
-struct Process *proc_current(void)
-{
- return CurrentProcess;
-}
-
/**
* Get the pointer to the user data of the current process
*/
-iptr_t proc_current_user_data(void)
+iptr_t proc_currentUserData(void)
{
return CurrentProcess->user_data;
}
-
-
-#if CONFIG_KERN_PREEMPTIVE
-
-/**
- * Disable preemptive task switching.
- *
- * The scheduler maintains a per-process nesting counter. Task switching is
- * effectively re-enabled only when the number of calls to proc_permit()
- * matches the number of calls to proc_forbid().
- *
- * Calling functions that could sleep while task switching is disabled
- * is dangerous, although supported. Preemptive task switching is
- * resumed while the process is sleeping and disabled again as soon as
- * it wakes up again.
- *
- * \sa proc_permit()
- */
-void proc_forbid(void)
-{
- /* No need to protect against interrupts here. */
- ++CurrentProcess->forbid_cnt;
-}
-
-/**
- * Re-enable preemptive task switching.
- *
- * \sa proc_forbid()
- */
-void proc_permit(void)
-{
- /* No need to protect against interrupts here. */
- --CurrentProcess->forbid_cnt;
-}
-
-#endif /* CONFIG_KERN_PREEMPTIVE */