4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001,2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999,2000,2001 Bernie Innocenti <bernie@codewiz.org>
34 * \brief Simple realtime multitasking scheduler.
35 * Context switching is only done cooperatively.
38 * \author Bernie Innocenti <bernie@codewiz.org>
39 * \author Stefano Fedrigo <aleph@develer.com>
45 #include "cfg/cfg_arch.h" /* ARCH_EMUL */
46 #include <cfg/debug.h>
47 #include <cfg/module.h>
49 // Log settings for cfg/log.h.
50 #define LOG_LEVEL KERN_LOG_LEVEL
51 #define LOG_FORMAT KERN_LOG_FORMAT
55 #include <cpu/types.h>
57 #include <cpu/frame.h>
59 #include <mware/event.h>
61 #include <string.h> /* memset() */
64 * CPU dependent context switching routines.
66 * Saving and restoring the context on the stack is done by a CPU-dependent
67 * support routine which usually needs to be written in assembly.
69 EXTERN_C void asm_switch_context(cpustack_t **new_sp, cpustack_t **save_sp);
72 * The scheduer tracks ready processes by enqueuing them in the
75 * \note Access to the list must occur while interrupts are disabled.
77 REGISTER List ProcReadyList;
80 * Holds a pointer to the TCB of the currently running process.
82 * \note User applications should use proc_current() to retrieve this value.
84 REGISTER Process *CurrentProcess;
86 #if CONFIG_KERN_PREEMPTIVE
88 * The time sharing scheduler forces a task switch when the current
89 * process has exhausted its quantum.
95 #if (ARCH & ARCH_EMUL)
97 * In hosted environments, we must emulate the stack on the real process stack.
99 * Access to this list must be protected by PROC_ATOMIC().
101 extern List StackFreeList;
104 /** The main process (the one that executes main()). */
105 struct Process MainProcess;
108 static void proc_init_struct(Process *proc)
110 /* Avoid warning for unused argument. */
113 #if CONFIG_KERN_SIGNALS
117 #if CONFIG_KERN_PREEMPTIVE
118 proc->forbid_cnt = 0;
130 LIST_INIT(&ProcReadyList);
133 * We "promote" the current context into a real process. The only thing we have
134 * to do is create a PCB and make it current. We don't need to setup the stack
135 * pointer because it will be written the first time we switch to another process.
137 proc_init_struct(&MainProcess);
138 CurrentProcess = &MainProcess;
140 #if CONFIG_KERN_MONITOR
142 monitor_add(CurrentProcess, "main");
150 * Create a new process, starting at the provided entry point.
152 * \return Process structure of new created process
153 * if successful, NULL otherwise.
155 struct Process *proc_new_with_name(UNUSED(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpustack_t *stack_base)
159 const size_t PROC_SIZE_WORDS = ROUND2(sizeof(Process), sizeof(cpustack_t)) / sizeof(cpustack_t);
161 bool free_stack = false;
163 TRACEMSG("name=%s", name);
165 #if (ARCH & ARCH_EMUL)
166 /* Ignore stack provided by caller and use the large enough default instead. */
167 PROC_ATOMIC(stack_base = (cpustack_t *)list_remHead(&StackFreeList));
169 stack_size = CONFIG_PROC_DEFSTACKSIZE;
170 #elif CONFIG_KERN_HEAP
171 /* Did the caller provide a stack for us? */
174 /* Did the caller specify the desired stack size? */
176 stack_size = CONFIG_PROC_DEFSTACKSIZE + sizeof(Process);
178 /* Allocate stack dinamically */
179 if (!(stack_base = heap_alloc(stack_size)))
185 /* Stack must have been provided by the user */
186 ASSERT_VALID_PTR(stack_base);
190 #if CONFIG_KERN_MONITOR
191 /* Fill-in the stack with a special marker to help debugging */
192 #warning size incorrect
193 memset(stack_base, CONFIG_KERN_STACKFILLCODE, stack_size / sizeof(cpustack_t));
196 /* Initialize the process control block */
197 if (CPU_STACK_GROWS_UPWARD)
199 proc = (Process*)stack_base;
200 proc->stack = stack_base + PROC_SIZE_WORDS;
201 if (CPU_SP_ON_EMPTY_SLOT)
206 proc = (Process*)(stack_base + stack_size / sizeof(cpustack_t) - PROC_SIZE_WORDS);
207 proc->stack = (cpustack_t*)proc;
208 if (CPU_SP_ON_EMPTY_SLOT)
212 proc_init_struct(proc);
213 proc->user_data = data;
215 #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR | (ARCH & ARCH_EMUL)
216 proc->stack_base = stack_base;
217 proc->stack_size = stack_size;
220 proc->flags |= PF_FREESTACK;
224 /* Initialize process stack frame */
225 CPU_PUSH_CALL_FRAME(proc->stack, proc_exit);
226 CPU_PUSH_CALL_FRAME(proc->stack, entry);
228 /* Push a clean set of CPU registers for asm_switch_context() */
229 for (i = 0; i < CPU_SAVED_REGS_CNT; i++)
230 CPU_PUSH_WORD(proc->stack, CPU_REG_INIT_VALUE(i));
232 /* Add to ready list */
233 ATOMIC(SCHED_ENQUEUE(proc));
234 ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
236 #if CONFIG_KERN_MONITOR
237 monitor_add(proc, name);
243 /** Rename a process */
244 void proc_rename(struct Process *proc, const char *name)
246 #if CONFIG_KERN_MONITOR
247 monitor_rename(proc, name);
249 (void)proc; (void)name;
255 * System scheduler: pass CPU control to the next process in
258 void proc_schedule(void)
260 struct Process *old_process;
263 ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
264 ASSERT_USER_CONTEXT();
265 ASSERT_IRQ_ENABLED();
267 /* Remember old process to save its context later */
268 old_process = CurrentProcess;
270 /* Poll on the ready queue for the first ready process */
271 IRQ_SAVE_DISABLE(flags);
272 while (!(CurrentProcess = (struct Process *)list_remHead(&ProcReadyList)))
275 * Make sure we physically reenable interrupts here, no matter what
276 * the current task status is. This is important because if we
277 * are idle-spinning, we must allow interrupts, otherwise no
278 * process will ever wake up.
280 * During idle-spinning, an interrupt can occur and it may
281 * modify \p ProcReadyList. To ensure that compiler reload this
282 * variable every while cycle we call CPU_MEMORY_BARRIER.
283 * The memory barrier ensure that all variables used in this context
285 * \todo If there was a way to write sig_wait() so that it does not
286 * disable interrupts while waiting, there would not be any
297 * Optimization: don't switch contexts when the active
298 * process has not changed.
300 if (CurrentProcess != old_process)
304 #if CONFIG_KERN_MONITOR
305 LOG_INFO("Switch from %p(%s) to %p(%s)\n",
306 old_process, old_process ? old_process->monitor.name : "NONE",
307 CurrentProcess, CurrentProcess->monitor.name);
310 #if CONFIG_KERN_PREEMPTIVE
311 /* Reset quantum for this process */
312 Quantum = CONFIG_KERN_QUANTUM;
315 /* Save context of old process and switch to new process. If there is no
316 * old process, we save the old stack pointer into a dummy variable that
317 * we ignore. In fact, this happens only when the old process has just
319 * TODO: Instead of physically clearing the process at exit time, a zombie
320 * list should be created.
322 asm_switch_context(&CurrentProcess->stack, old_process ? &old_process->stack : &dummy);
325 /* This RET resumes the execution on the new process */
330 * Terminate the current process
336 #if CONFIG_KERN_MONITOR
337 monitor_remove(CurrentProcess);
342 * The following code is BROKEN.
343 * We are freeing our own stack before entering proc_schedule()
344 * BAJO: A correct fix would be to rearrange the scheduler with
345 * an additional parameter which frees the old stack/process
346 * after a context switch.
348 if (CurrentProcess->flags & PF_FREESTACK)
349 heap_free(CurrentProcess->stack_base, CurrentProcess->stack_size);
350 heap_free(CurrentProcess);
353 #if (ARCH & ARCH_EMUL)
354 #warning This is wrong
355 /* Reinsert process stack in free list */
356 PROC_ATOMIC(ADDHEAD(&StackFreeList, (Node *)(CurrentProcess->stack
357 - (CONFIG_PROC_DEFSTACKSIZE / sizeof(cpustack_t)))));
360 * NOTE: At this point the first two words of what used
361 * to be our stack contain a list node. From now on, we
362 * rely on the compiler not reading/writing the stack.
364 #endif /* ARCH_EMUL */
366 CurrentProcess = NULL;
373 * Co-operative context switch
375 void proc_switch(void)
377 ATOMIC(SCHED_ENQUEUE(CurrentProcess));
384 * Get the pointer to the current process
386 struct Process *proc_current(void)
388 return CurrentProcess;
392 * Get the pointer to the user data of the current process
394 iptr_t proc_current_user_data(void)
396 return CurrentProcess->user_data;
400 #if CONFIG_KERN_PREEMPTIVE
403 * Disable preemptive task switching.
405 * The scheduler maintains a per-process nesting counter. Task switching is
406 * effectively re-enabled only when the number of calls to proc_permit()
407 * matches the number of calls to proc_forbid().
409 * Calling functions that could sleep while task switching is disabled
410 * is dangerous, although supported. Preemptive task switching is
411 * resumed while the process is sleeping and disabled again as soon as
416 void proc_forbid(void)
418 /* No need to protect against interrupts here. */
419 ++CurrentProcess->forbid_cnt;
423 * Re-enable preemptive task switching.
427 void proc_permit(void)
429 /* No need to protect against interrupts here. */
430 --CurrentProcess->forbid_cnt;
433 #endif /* CONFIG_KERN_PREEMPTIVE */