4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
33 * \brief Simple cooperative multitasking scheduler.
35 * \author Bernie Innocenti <bernie@codewiz.org>
36 * \author Stefano Fedrigo <aleph@develer.com>
42 #include "cfg/cfg_proc.h"
43 #define LOG_LEVEL KERN_LOG_LEVEL
44 #define LOG_FORMAT KERN_LOG_FORMAT
47 #include "cfg/cfg_monitor.h"
48 #include <cfg/macros.h> // ROUND_UP2
49 #include <cfg/module.h>
50 #include <cfg/depend.h> // CONFIG_DEPEND()
53 #include <cpu/types.h>
55 #include <cpu/frame.h>
58 #include <struct/heap.h>
61 #include <string.h> /* memset() */
63 #define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t))
66 * The scheduer tracks ready processes by enqueuing them in the
69 * \note Access to the list must occur while interrupts are disabled.
71 REGISTER List proc_ready_list;
74 * Holds a pointer to the TCB of the currently running process.
76 * \note User applications should use proc_current() to retrieve this value.
78 REGISTER Process *current_process;
80 /** The main process (the one that executes main()). */
81 static struct Process main_process;
86 * Local heap dedicated to allocate the memory used by the processes.
88 static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE);
89 static Heap proc_heap;
92 * Keep track of zombie processes (processes that are exiting and need to
93 * release some resources).
95 * \note Access to the list must occur while kernel preemption is disabled.
97 static List zombie_list;
99 #endif /* CONFIG_KERN_HEAP */
101 static void proc_initStruct(Process *proc)
103 /* Avoid warning for unused argument. */
106 #if CONFIG_KERN_SIGNALS
124 LIST_INIT(&proc_ready_list);
127 LIST_INIT(&zombie_list);
128 heap_init(&proc_heap, heap_buf, sizeof(heap_buf));
131 * We "promote" the current context into a real process. The only thing we have
132 * to do is create a PCB and make it current. We don't need to setup the stack
133 * pointer because it will be written the first time we switch to another process.
135 proc_initStruct(&main_process);
136 current_process = &main_process;
138 #if CONFIG_KERN_MONITOR
140 monitor_add(current_process, "main");
151 * Free all the resources of all zombie processes previously added to the zombie
154 static void proc_freeZombies(void)
160 PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list));
164 if (proc->flags & PF_FREESTACK)
166 PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base,
167 proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t)));
173 * Enqueue a process in the zombie list.
175 static void proc_addZombie(Process *proc)
178 #if CONFIG_KERN_PREEMPT
179 ASSERT(!proc_preemptAllowed());
183 node = &(proc)->link.link;
185 node = &(proc)->link;
187 LIST_ASSERT_VALID(&zombie_list);
188 ADDTAIL(&zombie_list, node);
191 #endif /* CONFIG_KERN_HEAP */
194 * Create a new process, starting at the provided entry point.
199 * proc_new(entry, data, stacksize, stack)
201 * is a more convenient way to create a process, as you don't have to specify
204 * \return Process structure of new created process
205 * if successful, NULL otherwise.
207 struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
210 LOG_INFO("name=%s", name);
212 bool free_stack = false;
215 * Free up resources of a zombie process.
217 * We're implementing a kind of lazy garbage collector here for
218 * efficiency reasons: we can avoid to introduce overhead into another
219 * kernel task dedicated to free up resources (e.g., idle) and we're
220 * not introducing any overhead into the scheduler after a context
221 * switch (that would be *very* bad, because the scheduler runs with
224 * In this way we are able to release the memory of the zombie tasks
225 * without disabling IRQs and without introducing any significant
226 * overhead in any other kernel task.
230 /* Did the caller provide a stack for us? */
233 /* Did the caller specify the desired stack size? */
235 stack_size = KERN_MINSTACKSIZE;
237 /* Allocate stack dinamically */
238 PROC_ATOMIC(stack_base =
239 (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
240 if (stack_base == NULL)
246 #else // CONFIG_KERN_HEAP
248 /* Stack must have been provided by the user */
249 ASSERT_VALID_PTR(stack_base);
252 #endif // CONFIG_KERN_HEAP
254 #if CONFIG_KERN_MONITOR
256 * Fill-in the stack with a special marker to help debugging.
257 * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
258 * than an int, so the (int) cast is required to silence the
259 * warning for truncating its size.
261 memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
264 /* Initialize the process control block */
265 if (CPU_STACK_GROWS_UPWARD)
267 proc = (Process *)stack_base;
268 proc->stack = stack_base + PROC_SIZE_WORDS;
269 // On some architecture stack should be aligned, so we do it.
270 proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
271 if (CPU_SP_ON_EMPTY_SLOT)
276 proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
277 // On some architecture stack should be aligned, so we do it.
278 proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
279 if (CPU_SP_ON_EMPTY_SLOT)
282 /* Ensure stack is aligned */
283 ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
285 stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
286 proc_initStruct(proc);
287 proc->user_data = data;
289 #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
290 proc->stack_base = stack_base;
291 proc->stack_size = stack_size;
294 proc->flags |= PF_FREESTACK;
297 proc->user_entry = entry;
298 CPU_CREATE_NEW_STACK(proc->stack);
300 #if CONFIG_KERN_MONITOR
301 monitor_add(proc, name);
304 /* Add to ready list */
305 ATOMIC(SCHED_ENQUEUE(proc));
311 * Return the name of the specified process.
313 * NULL is a legal argument and will return the name "<NULL>".
315 const char *proc_name(struct Process *proc)
317 #if CONFIG_KERN_MONITOR
318 return proc ? proc->monitor.name : "<NULL>";
325 /// Return the name of the currently running process
326 const char *proc_currentName(void)
328 return proc_name(proc_current());
332 void proc_rename(struct Process *proc, const char *name)
334 #if CONFIG_KERN_MONITOR
335 monitor_rename(proc, name);
337 (void)proc; (void)name;
344 * Change the scheduling priority of a process.
346 * Process piorities are signed ints, whereas a larger integer value means
347 * higher scheduling priority. The default priority for new processes is 0.
348 * The idle process runs with the lowest possible priority: INT_MIN.
350 * A process with a higher priority always preempts lower priority processes.
351 * Processes of equal priority share the CPU time according to a simple
352 * round-robin policy.
354 * As a general rule to maximize responsiveness, compute-bound processes
355 * should be assigned negative priorities and tight, interactive processes
356 * should be assigned positive priorities.
358 * To avoid interfering with system background activities such as input
359 * processing, application processes should remain within the range -10
362 void proc_setPri(struct Process *proc, int pri)
364 if (proc->link.pri == pri)
367 proc->link.pri = pri;
369 if (proc != current_process)
370 ATOMIC(sched_reenqueue(proc));
372 #endif // CONFIG_KERN_PRI
374 INLINE void proc_run(void)
376 void (*entry)(void) = current_process->user_entry;
378 LOG_INFO("New process starting at %p", entry);
383 * Entry point for all the processes.
385 void proc_entry(void)
388 * Return from a context switch assumes interrupts are disabled, so
389 * we need to explicitly re-enable them as soon as possible.
392 /* Call the actual process's entry point */
398 * Terminate the current process
402 LOG_INFO("%p:%s", current_process, proc_currentName());
404 #if CONFIG_KERN_MONITOR
405 monitor_remove(current_process);
411 * Set the task as zombie, its resources will be freed in proc_new() in
412 * a lazy way, when another process will be created.
414 proc_addZombie(current_process);
416 current_process = NULL;
427 * Get the pointer to the user data of the current process
429 iptr_t proc_currentUserData(void)
431 return current_process->user_data;