4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
33 * \brief Simple cooperative multitasking scheduler.
35 * \author Bernie Innocenti <bernie@codewiz.org>
36 * \author Stefano Fedrigo <aleph@develer.com>
42 #include "cfg/cfg_proc.h"
43 #define LOG_LEVEL KERN_LOG_LEVEL
44 #define LOG_FORMAT KERN_LOG_FORMAT
47 #include "cfg/cfg_monitor.h"
48 #include <cfg/macros.h> // ROUND_UP2
49 #include <cfg/module.h>
50 #include <cfg/depend.h> // CONFIG_DEPEND()
53 #include <cpu/types.h>
55 #include <cpu/frame.h>
58 #include <struct/heap.h>
61 #include <string.h> /* memset() */
63 #define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t))
66 * CPU dependent context switching routines.
68 * Saving and restoring the context on the stack is done by a CPU-dependent
69 * support routine which usually needs to be written in assembly.
71 EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp);
74 * The scheduer tracks ready processes by enqueuing them in the
77 * \note Access to the list must occur while interrupts are disabled.
79 REGISTER List proc_ready_list;
82 * Holds a pointer to the TCB of the currently running process.
84 * \note User applications should use proc_current() to retrieve this value.
86 REGISTER Process *current_process;
88 /** The main process (the one that executes main()). */
89 static struct Process main_process;
94 * Local heap dedicated to allocate the memory used by the processes.
96 static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE);
97 static Heap proc_heap;
100 * Keep track of zombie processes (processes that are exiting and need to
101 * release some resources).
103 * \note Access to the list must occur while kernel preemption is disabled.
105 static List zombie_list;
107 #endif /* CONFIG_KERN_HEAP */
109 static void proc_initStruct(Process *proc)
111 /* Avoid warning for unused argument. */
114 #if CONFIG_KERN_SIGNALS
132 LIST_INIT(&proc_ready_list);
135 LIST_INIT(&zombie_list);
136 heap_init(&proc_heap, heap_buf, sizeof(heap_buf));
139 * We "promote" the current context into a real process. The only thing we have
140 * to do is create a PCB and make it current. We don't need to setup the stack
141 * pointer because it will be written the first time we switch to another process.
143 proc_initStruct(&main_process);
144 current_process = &main_process;
146 #if CONFIG_KERN_MONITOR
148 monitor_add(current_process, "main");
157 * Free all the resources of all zombie processes previously added to the zombie
160 static void proc_freeZombies(void)
166 PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list));
170 if (proc->flags & PF_FREESTACK)
172 PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base,
173 proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t)));
179 * Enqueue a process in the zombie list.
181 static void proc_addZombie(Process *proc)
184 #if CONFIG_KERN_PREEMPT
185 ASSERT(!proc_preemptAllowed());
189 node = &(proc)->link.link;
191 node = &(proc)->link;
193 LIST_ASSERT_VALID(&zombie_list);
194 ADDTAIL(&zombie_list, node);
197 #endif /* CONFIG_KERN_HEAP */
200 * Create a new process, starting at the provided entry point.
205 * proc_new(entry, data, stacksize, stack)
207 * is a more convenient way to create a process, as you don't have to specify
210 * \return Process structure of new created process
211 * if successful, NULL otherwise.
213 struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
216 LOG_INFO("name=%s", name);
218 bool free_stack = false;
221 * Free up resources of a zombie process.
223 * We're implementing a kind of lazy garbage collector here for
224 * efficiency reasons: we can avoid to introduce overhead into another
225 * kernel task dedicated to free up resources (e.g., idle) and we're
226 * not introducing any overhead into the scheduler after a context
227 * switch (that would be *very* bad, because the scheduler runs with
230 * In this way we are able to release the memory of the zombie tasks
231 * without disabling IRQs and without introducing any significant
232 * overhead in any other kernel task.
236 /* Did the caller provide a stack for us? */
239 /* Did the caller specify the desired stack size? */
241 stack_size = KERN_MINSTACKSIZE;
243 /* Allocate stack dinamically */
244 PROC_ATOMIC(stack_base =
245 (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
246 if (stack_base == NULL)
252 #else // CONFIG_KERN_HEAP
254 /* Stack must have been provided by the user */
255 ASSERT_VALID_PTR(stack_base);
258 #endif // CONFIG_KERN_HEAP
260 #if CONFIG_KERN_MONITOR
262 * Fill-in the stack with a special marker to help debugging.
263 * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
264 * than an int, so the (int) cast is required to silence the
265 * warning for truncating its size.
267 memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
270 /* Initialize the process control block */
271 if (CPU_STACK_GROWS_UPWARD)
273 proc = (Process *)stack_base;
274 proc->stack = stack_base + PROC_SIZE_WORDS;
275 // On some architecture stack should be aligned, so we do it.
276 proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
277 if (CPU_SP_ON_EMPTY_SLOT)
282 proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
283 // On some architecture stack should be aligned, so we do it.
284 proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
285 if (CPU_SP_ON_EMPTY_SLOT)
288 /* Ensure stack is aligned */
289 ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
291 stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
292 proc_initStruct(proc);
293 proc->user_data = data;
295 #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
296 proc->stack_base = stack_base;
297 proc->stack_size = stack_size;
300 proc->flags |= PF_FREESTACK;
303 proc->user_entry = entry;
304 CPU_CREATE_NEW_STACK(proc->stack);
306 #if CONFIG_KERN_MONITOR
307 monitor_add(proc, name);
310 /* Add to ready list */
311 ATOMIC(SCHED_ENQUEUE(proc));
317 * Return the name of the specified process.
319 * NULL is a legal argument and will return the name "<NULL>".
321 const char *proc_name(struct Process *proc)
323 #if CONFIG_KERN_MONITOR
324 return proc ? proc->monitor.name : "<NULL>";
331 /// Return the name of the currently running process
332 const char *proc_currentName(void)
334 return proc_name(proc_current());
338 void proc_rename(struct Process *proc, const char *name)
340 #if CONFIG_KERN_MONITOR
341 monitor_rename(proc, name);
343 (void)proc; (void)name;
350 * Change the scheduling priority of a process.
352 * Process piorities are signed ints, whereas a larger integer value means
353 * higher scheduling priority. The default priority for new processes is 0.
354 * The idle process runs with the lowest possible priority: INT_MIN.
356 * A process with a higher priority always preempts lower priority processes.
357 * Processes of equal priority share the CPU time according to a simple
358 * round-robin policy.
360 * As a general rule to maximize responsiveness, compute-bound processes
361 * should be assigned negative priorities and tight, interactive processes
362 * should be assigned positive priorities.
364 * To avoid interfering with system background activities such as input
365 * processing, application processes should remain within the range -10
368 void proc_setPri(struct Process *proc, int pri)
370 if (proc->link.pri == pri)
373 proc->link.pri = pri;
375 if (proc != current_process)
376 ATOMIC(sched_reenqueue(proc));
378 #endif // CONFIG_KERN_PRI
380 INLINE void proc_run(void)
382 void (*entry)(void) = current_process->user_entry;
384 LOG_INFO("New process starting at %p", entry);
389 * Entry point for all the processes.
391 void proc_entry(void)
394 * Return from a context switch assumes interrupts are disabled, so
395 * we need to explicitly re-enable them as soon as possible.
398 /* Call the actual process's entry point */
404 * Terminate the current process
408 LOG_INFO("%p:%s", current_process, proc_currentName());
410 #if CONFIG_KERN_MONITOR
411 monitor_remove(current_process);
417 * Set the task as zombie, its resources will be freed in proc_new() in
418 * a lazy way, when another process will be created.
420 proc_addZombie(current_process);
422 current_process = NULL;
433 * Get the pointer to the user data of the current process
435 iptr_t proc_currentUserData(void)
437 return current_process->user_data;
441 * Call the scheduler and eventually replace the current running process.
443 void proc_schedule(void)
445 Process *old_process = current_process;
447 IRQ_ASSERT_DISABLED();
449 /* Poll on the ready queue for the first ready process */
450 LIST_ASSERT_VALID(&proc_ready_list);
451 while (!(current_process = (struct Process *)list_remHead(&proc_ready_list)))
454 * Make sure we physically reenable interrupts here, no matter what
455 * the current task status is. This is important because if we
456 * are idle-spinning, we must allow interrupts, otherwise no
457 * process will ever wake up.
459 * During idle-spinning, an interrupt can occur and it may
460 * modify \p proc_ready_list. To ensure that compiler reload this
461 * variable every while cycle we call CPU_MEMORY_BARRIER.
462 * The memory barrier ensure that all variables used in this context
464 * \todo If there was a way to write sig_wait() so that it does not
465 * disable interrupts while waiting, there would not be any
474 * Optimization: don't switch contexts when the active process has not
477 if (LIKELY(current_process != old_process)) {
481 * Save context of old process and switch to new process. If
482 * there is no old process, we save the old stack pointer into
483 * a dummy variable that we ignore. In fact, this happens only
484 * when the old process has just exited.
486 asm_switch_context(¤t_process->stack,
487 old_process ? &old_process->stack : &dummy);
489 /* This RET resumes the execution on the new process */
490 LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());