4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
33 * \brief Simple cooperative multitasking scheduler.
35 * \author Bernie Innocenti <bernie@codewiz.org>
36 * \author Stefano Fedrigo <aleph@develer.com>
42 #include "cfg/cfg_proc.h"
43 #define LOG_LEVEL KERN_LOG_LEVEL
44 #define LOG_FORMAT KERN_LOG_FORMAT
47 #include "cfg/cfg_monitor.h"
48 #include <cfg/macros.h> // ROUND_UP2
49 #include <cfg/module.h>
50 #include <cfg/depend.h> // CONFIG_DEPEND()
53 #include <cpu/types.h>
55 #include <cpu/frame.h>
58 #include <struct/heap.h>
61 #include <string.h> /* memset() */
63 #define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t))
66 * The scheduer tracks ready processes by enqueuing them in the
69 * \note Access to the list must occur while interrupts are disabled.
71 REGISTER List proc_ready_list;
74 * Holds a pointer to the TCB of the currently running process.
76 * \note User applications should use proc_current() to retrieve this value.
78 REGISTER Process *current_process;
80 /** The main process (the one that executes main()). */
81 static struct Process main_process;
86 * Local heap dedicated to allocate the memory used by the processes.
88 static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE);
89 static Heap proc_heap;
92 * Keep track of zombie processes (processes that are exiting and need to
93 * release some resources).
95 * \note Access to the list must occur while kernel preemption is disabled.
97 static List zombie_list;
99 #endif /* CONFIG_KERN_HEAP */
101 static void proc_initStruct(Process *proc)
103 /* Avoid warning for unused argument. */
106 #if CONFIG_KERN_SIGNALS
124 LIST_INIT(&proc_ready_list);
127 LIST_INIT(&zombie_list);
128 heap_init(&proc_heap, heap_buf, sizeof(heap_buf));
131 * We "promote" the current context into a real process. The only thing we have
132 * to do is create a PCB and make it current. We don't need to setup the stack
133 * pointer because it will be written the first time we switch to another process.
135 proc_initStruct(&main_process);
136 current_process = &main_process;
138 #if CONFIG_KERN_MONITOR
140 monitor_add(current_process, "main");
149 * Free all the resources of all zombie processes previously added to the zombie
152 static void proc_freeZombies(void)
158 PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list));
162 if (proc->flags & PF_FREESTACK)
164 PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base,
165 proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t)));
171 * Enqueue a process in the zombie list.
173 static void proc_addZombie(Process *proc)
176 #if CONFIG_KERN_PREEMPT
177 ASSERT(!proc_preemptAllowed());
181 node = &(proc)->link.link;
183 node = &(proc)->link;
185 LIST_ASSERT_VALID(&zombie_list);
186 ADDTAIL(&zombie_list, node);
189 #endif /* CONFIG_KERN_HEAP */
192 * Create a new process, starting at the provided entry point.
197 * proc_new(entry, data, stacksize, stack)
199 * is a more convenient way to create a process, as you don't have to specify
202 * \return Process structure of new created process
203 * if successful, NULL otherwise.
205 struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
208 LOG_INFO("name=%s", name);
210 bool free_stack = false;
213 * Free up resources of a zombie process.
215 * We're implementing a kind of lazy garbage collector here for
216 * efficiency reasons: we can avoid to introduce overhead into another
217 * kernel task dedicated to free up resources (e.g., idle) and we're
218 * not introducing any overhead into the scheduler after a context
219 * switch (that would be *very* bad, because the scheduler runs with
222 * In this way we are able to release the memory of the zombie tasks
223 * without disabling IRQs and without introducing any significant
224 * overhead in any other kernel task.
228 /* Did the caller provide a stack for us? */
231 /* Did the caller specify the desired stack size? */
233 stack_size = KERN_MINSTACKSIZE;
235 /* Allocate stack dinamically */
236 PROC_ATOMIC(stack_base =
237 (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
238 if (stack_base == NULL)
244 #else // CONFIG_KERN_HEAP
246 /* Stack must have been provided by the user */
247 ASSERT_VALID_PTR(stack_base);
250 #endif // CONFIG_KERN_HEAP
252 #if CONFIG_KERN_MONITOR
254 * Fill-in the stack with a special marker to help debugging.
255 * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
256 * than an int, so the (int) cast is required to silence the
257 * warning for truncating its size.
259 memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
262 /* Initialize the process control block */
263 if (CPU_STACK_GROWS_UPWARD)
265 proc = (Process *)stack_base;
266 proc->stack = stack_base + PROC_SIZE_WORDS;
267 // On some architecture stack should be aligned, so we do it.
268 proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
269 if (CPU_SP_ON_EMPTY_SLOT)
274 proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
275 // On some architecture stack should be aligned, so we do it.
276 proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
277 if (CPU_SP_ON_EMPTY_SLOT)
280 /* Ensure stack is aligned */
281 ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
283 stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
284 proc_initStruct(proc);
285 proc->user_data = data;
287 #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
288 proc->stack_base = stack_base;
289 proc->stack_size = stack_size;
292 proc->flags |= PF_FREESTACK;
295 proc->user_entry = entry;
296 CPU_CREATE_NEW_STACK(proc->stack);
298 #if CONFIG_KERN_MONITOR
299 monitor_add(proc, name);
302 /* Add to ready list */
303 ATOMIC(SCHED_ENQUEUE(proc));
309 * Return the name of the specified process.
311 * NULL is a legal argument and will return the name "<NULL>".
313 const char *proc_name(struct Process *proc)
315 #if CONFIG_KERN_MONITOR
316 return proc ? proc->monitor.name : "<NULL>";
323 /// Return the name of the currently running process
324 const char *proc_currentName(void)
326 return proc_name(proc_current());
330 void proc_rename(struct Process *proc, const char *name)
332 #if CONFIG_KERN_MONITOR
333 monitor_rename(proc, name);
335 (void)proc; (void)name;
342 * Change the scheduling priority of a process.
344 * Process piorities are signed ints, whereas a larger integer value means
345 * higher scheduling priority. The default priority for new processes is 0.
346 * The idle process runs with the lowest possible priority: INT_MIN.
348 * A process with a higher priority always preempts lower priority processes.
349 * Processes of equal priority share the CPU time according to a simple
350 * round-robin policy.
352 * As a general rule to maximize responsiveness, compute-bound processes
353 * should be assigned negative priorities and tight, interactive processes
354 * should be assigned positive priorities.
356 * To avoid interfering with system background activities such as input
357 * processing, application processes should remain within the range -10
360 void proc_setPri(struct Process *proc, int pri)
362 if (proc->link.pri == pri)
365 proc->link.pri = pri;
367 if (proc != current_process)
368 ATOMIC(sched_reenqueue(proc));
370 #endif // CONFIG_KERN_PRI
372 INLINE void proc_run(void)
374 void (*entry)(void) = current_process->user_entry;
376 LOG_INFO("New process starting at %p", entry);
381 * Entry point for all the processes.
383 void proc_entry(void)
386 * Return from a context switch assumes interrupts are disabled, so
387 * we need to explicitly re-enable them as soon as possible.
390 /* Call the actual process's entry point */
396 * Terminate the current process
400 LOG_INFO("%p:%s", current_process, proc_currentName());
402 #if CONFIG_KERN_MONITOR
403 monitor_remove(current_process);
409 * Set the task as zombie, its resources will be freed in proc_new() in
410 * a lazy way, when another process will be created.
412 proc_addZombie(current_process);
414 current_process = NULL;
424 * Call the scheduler and eventually replace the current running process.
426 void proc_schedule(void)
428 Process *old_process = current_process;
430 IRQ_ASSERT_DISABLED();
432 /* Poll on the ready queue for the first ready process */
433 LIST_ASSERT_VALID(&proc_ready_list);
434 while (!(current_process = (struct Process *)list_remHead(&proc_ready_list)))
437 * Make sure we physically reenable interrupts here, no matter what
438 * the current task status is. This is important because if we
439 * are idle-spinning, we must allow interrupts, otherwise no
440 * process will ever wake up.
442 * During idle-spinning, an interrupt can occur and it may
443 * modify \p proc_ready_list. To ensure that compiler reload this
444 * variable every while cycle we call CPU_MEMORY_BARRIER.
445 * The memory barrier ensure that all variables used in this context
447 * \todo If there was a way to write sig_wait() so that it does not
448 * disable interrupts while waiting, there would not be any
456 proc_switchTo(current_process, old_process);
457 /* This RET resumes the execution on the new process */
458 LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());