4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
33 * \brief Simple cooperative multitasking scheduler.
35 * \author Bernie Innocenti <bernie@codewiz.org>
36 * \author Stefano Fedrigo <aleph@develer.com>
42 #include "cfg/cfg_proc.h"
43 #define LOG_LEVEL KERN_LOG_LEVEL
44 #define LOG_FORMAT KERN_LOG_FORMAT
47 #include "cfg/cfg_monitor.h"
48 #include <cfg/macros.h> // ROUND_UP2
49 #include <cfg/module.h>
50 #include <cfg/depend.h> // CONFIG_DEPEND()
53 #include <cpu/types.h>
55 #include <cpu/frame.h>
58 #include <struct/heap.h>
61 #include <string.h> /* memset() */
63 #define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t))
66 * The scheduer tracks ready processes by enqueuing them in the
69 * \note Access to the list must occur while interrupts are disabled.
71 REGISTER List proc_ready_list;
74 * Holds a pointer to the TCB of the currently running process.
76 * \note User applications should use proc_current() to retrieve this value.
78 REGISTER Process *current_process;
80 /** The main process (the one that executes main()). */
81 static struct Process main_process;
86 * Local heap dedicated to allocate the memory used by the processes.
88 static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE);
89 static Heap proc_heap;
92 * Keep track of zombie processes (processes that are exiting and need to
93 * release some resources).
95 * \note Access to the list must occur while kernel preemption is disabled.
97 static List zombie_list;
99 #endif /* CONFIG_KERN_HEAP */
101 static void proc_initStruct(Process *proc)
103 /* Avoid warning for unused argument. */
106 #if CONFIG_KERN_SIGNALS
124 LIST_INIT(&proc_ready_list);
127 LIST_INIT(&zombie_list);
128 heap_init(&proc_heap, heap_buf, sizeof(heap_buf));
131 * We "promote" the current context into a real process. The only thing we have
132 * to do is create a PCB and make it current. We don't need to setup the stack
133 * pointer because it will be written the first time we switch to another process.
135 proc_initStruct(&main_process);
136 current_process = &main_process;
138 #if CONFIG_KERN_MONITOR
140 monitor_add(current_process, "main");
143 #if CONFIG_KERN_PREEMPT
154 * Free all the resources of all zombie processes previously added to the zombie
157 static void proc_freeZombies(void)
163 PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list));
167 if (proc->flags & PF_FREESTACK)
169 PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base,
170 proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t)));
176 * Enqueue a process in the zombie list.
178 static void proc_addZombie(Process *proc)
181 #if CONFIG_KERN_PREEMPT
182 ASSERT(!proc_preemptAllowed());
186 node = &(proc)->link.link;
188 node = &(proc)->link;
190 LIST_ASSERT_VALID(&zombie_list);
191 ADDTAIL(&zombie_list, node);
194 #endif /* CONFIG_KERN_HEAP */
197 * Create a new process, starting at the provided entry point.
202 * proc_new(entry, data, stacksize, stack)
204 * is a more convenient way to create a process, as you don't have to specify
207 * \return Process structure of new created process
208 * if successful, NULL otherwise.
210 struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
213 LOG_INFO("name=%s", name);
215 bool free_stack = false;
218 * Free up resources of a zombie process.
220 * We're implementing a kind of lazy garbage collector here for
221 * efficiency reasons: we can avoid to introduce overhead into another
222 * kernel task dedicated to free up resources (e.g., idle) and we're
223 * not introducing any overhead into the scheduler after a context
224 * switch (that would be *very* bad, because the scheduler runs with
227 * In this way we are able to release the memory of the zombie tasks
228 * without disabling IRQs and without introducing any significant
229 * overhead in any other kernel task.
233 /* Did the caller provide a stack for us? */
236 /* Did the caller specify the desired stack size? */
238 stack_size = KERN_MINSTACKSIZE;
240 /* Allocate stack dinamically */
241 PROC_ATOMIC(stack_base =
242 (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
243 if (stack_base == NULL)
249 #else // CONFIG_KERN_HEAP
251 /* Stack must have been provided by the user */
252 ASSERT_VALID_PTR(stack_base);
255 #endif // CONFIG_KERN_HEAP
257 #if CONFIG_KERN_MONITOR
259 * Fill-in the stack with a special marker to help debugging.
260 * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
261 * than an int, so the (int) cast is required to silence the
262 * warning for truncating its size.
264 memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
267 /* Initialize the process control block */
268 if (CPU_STACK_GROWS_UPWARD)
270 proc = (Process *)stack_base;
271 proc->stack = stack_base + PROC_SIZE_WORDS;
272 // On some architecture stack should be aligned, so we do it.
273 proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
274 if (CPU_SP_ON_EMPTY_SLOT)
279 proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
280 // On some architecture stack should be aligned, so we do it.
281 proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
282 if (CPU_SP_ON_EMPTY_SLOT)
285 /* Ensure stack is aligned */
286 ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
288 stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
289 proc_initStruct(proc);
290 proc->user_data = data;
292 #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
293 proc->stack_base = stack_base;
294 proc->stack_size = stack_size;
297 proc->flags |= PF_FREESTACK;
300 proc->user_entry = entry;
301 CPU_CREATE_NEW_STACK(proc->stack);
303 #if CONFIG_KERN_MONITOR
304 monitor_add(proc, name);
307 /* Add to ready list */
308 ATOMIC(SCHED_ENQUEUE(proc));
314 * Return the name of the specified process.
316 * NULL is a legal argument and will return the name "<NULL>".
318 const char *proc_name(struct Process *proc)
320 #if CONFIG_KERN_MONITOR
321 return proc ? proc->monitor.name : "<NULL>";
328 /// Return the name of the currently running process
329 const char *proc_currentName(void)
331 return proc_name(proc_current());
335 void proc_rename(struct Process *proc, const char *name)
337 #if CONFIG_KERN_MONITOR
338 monitor_rename(proc, name);
340 (void)proc; (void)name;
347 * Change the scheduling priority of a process.
349 * Process piorities are signed ints, whereas a larger integer value means
350 * higher scheduling priority. The default priority for new processes is 0.
351 * The idle process runs with the lowest possible priority: INT_MIN.
353 * A process with a higher priority always preempts lower priority processes.
354 * Processes of equal priority share the CPU time according to a simple
355 * round-robin policy.
357 * As a general rule to maximize responsiveness, compute-bound processes
358 * should be assigned negative priorities and tight, interactive processes
359 * should be assigned positive priorities.
361 * To avoid interfering with system background activities such as input
362 * processing, application processes should remain within the range -10
365 void proc_setPri(struct Process *proc, int pri)
367 if (proc->link.pri == pri)
370 proc->link.pri = pri;
372 if (proc != current_process)
373 ATOMIC(sched_reenqueue(proc));
375 #endif // CONFIG_KERN_PRI
377 INLINE void proc_run(void)
379 void (*entry)(void) = current_process->user_entry;
381 LOG_INFO("New process starting at %p", entry);
386 * Entry point for all the processes.
388 void proc_entry(void)
391 * Return from a context switch assumes interrupts are disabled, so
392 * we need to explicitly re-enable them as soon as possible.
395 /* Call the actual process's entry point */
401 * Terminate the current process
405 LOG_INFO("%p:%s", current_process, proc_currentName());
407 #if CONFIG_KERN_MONITOR
408 monitor_remove(current_process);
414 * Set the task as zombie, its resources will be freed in proc_new() in
415 * a lazy way, when another process will be created.
417 proc_addZombie(current_process);
419 current_process = NULL;
430 * Get the pointer to the user data of the current process
432 iptr_t proc_currentUserData(void)
434 return current_process->user_data;