4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * \brief Simple preemptive multitasking scheduler.
31 * Preemption is explicitly regulated at the exit of each interrupt service
32 * routine (ISR). Each task obtains a time quantum as soon as it is scheduled
33 * on the CPU and its quantum is decremented at each clock tick. The frequency
34 * of the timer determines the system tick granularity and CONFIG_KERN_QUANTUM
35 * the time sharing interval.
37 * When the quantum expires the handler proc_needPreempt() checks if the
38 * preemption is enabled and in this case proc_schedule() is called, that
39 * possibly replaces the current running thread with a different one.
41 * The preemption can be disabled or enabled via proc_forbid() and
42 * proc_permit() primitives. This is implemented using a global atomic counter.
43 * When the counter is greater than 0 the task cannot be preempted; only when
44 * the counter reaches 0 the task can be preempted again.
46 * Preemption-disabled sections may be nested. The preemption will be
47 * re-enabled when the outermost preemption-disabled section completes.
49 * The voluntary preemption still happens via proc_switch() or proc_yield().
50 * The first one assumes the current process has been already added to a
51 * private wait queue (e.g., on a semaphore or a signal), while the second one
52 * takes care of adding the process into the ready queue.
54 * Context switch is done by CPU-dependent support routines. In case of a
55 * voluntary preemption the context switch routine must take care of
56 * saving/restoring only the callee-save registers (the voluntary-preemption is
57 * actually a function call). The kernel-preemption always happens inside a
58 * signal/interrupt context and it must take care of saving all registers. For
59 * this, in the entry point of each ISR the caller-save registers must be
60 * saved. In the ISR exit point, if the context switch must happen, we switch
61 * to user-context and call the same voluntary context switch routine that take
62 * care of saving/restoring also the callee-save registers. On resume from the
63 * switch, the interrupt exit point moves back to interrupt-context, resumes
64 * the caller-save registers (saved in the ISR entry point) and return from the
67 * \note Thread priority (if enabled by CONFIG_KERN_PRI) defines the order in
68 * the \p proc_ready_list and the capability to deschedule a running process. A
69 * low-priority thread can't preempt a high-priority thread.
71 * A high-priority process can preempt a low-priority process immediately (it
72 * will be descheduled and replaced in the interrupt exit point). Processes
73 * running at the same priority can be descheduled when they expire the time
76 * \note Sleeping while preemption is disabled fallbacks to a busy-wait sleep.
77 * Voluntary preemption when preemption is disabled raises a kernel bug.
81 * \brief Simple cooperative and preemptive multitasking scheduler.
83 * \author Bernie Innocenti <bernie@codewiz.org>
84 * \author Stefano Fedrigo <aleph@develer.com>
85 * \author Andrea Righi <arighi@develer.com>
91 #include "cfg/cfg_proc.h"
92 #define LOG_LEVEL KERN_LOG_LEVEL
93 #define LOG_FORMAT KERN_LOG_FORMAT
96 #include "cfg/cfg_monitor.h"
97 #include <cfg/macros.h> // ROUND_UP2
98 #include <cfg/module.h>
99 #include <cfg/depend.h> // CONFIG_DEPEND()
102 #include <cpu/types.h>
103 #include <cpu/attr.h>
104 #include <cpu/frame.h>
107 #include <struct/heap.h>
110 #include <string.h> /* memset() */
112 #define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t))
115 * The scheduer tracks ready processes by enqueuing them in the
118 * \note Access to the list must occur while interrupts are disabled.
120 REGISTER List proc_ready_list;
123 * Holds a pointer to the TCB of the currently running process.
125 * \note User applications should use proc_current() to retrieve this value.
127 REGISTER Process *current_process;
129 /** The main process (the one that executes main()). */
130 static struct Process main_process;
135 * Local heap dedicated to allocate the memory used by the processes.
137 static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE);
138 static Heap proc_heap;
141 * Keep track of zombie processes (processes that are exiting and need to
142 * release some resources).
144 * \note Access to the list must occur while kernel preemption is disabled.
146 static List zombie_list;
148 #endif /* CONFIG_KERN_HEAP */
151 * Check if the process context switch can be performed directly by the
152 * architecture-dependent asm_switch_context() or if it must be delayed
153 * because we're in the middle of an ISR.
155 * Return true if asm_switch_context() can be executed, false
158 * NOTE: if an architecture does not implement IRQ_RUNNING() this function
159 * always returns true.
161 #define CONTEXT_SWITCH_FROM_ISR() (!IRQ_RUNNING())
164 * Save context of old process and switch to new process.
166 static void proc_context_switch(Process *next, Process *prev)
170 if (UNLIKELY(next == prev))
173 * If there is no old process, we save the old stack pointer into a
174 * dummy variable that we ignore. In fact, this happens only when the
175 * old process has just exited.
177 asm_switch_context(&next->stack, prev ? &prev->stack : &dummy);
180 static void proc_initStruct(Process *proc)
182 /* Avoid warning for unused argument. */
185 #if CONFIG_KERN_SIGNALS
203 LIST_INIT(&proc_ready_list);
206 LIST_INIT(&zombie_list);
207 heap_init(&proc_heap, heap_buf, sizeof(heap_buf));
210 * We "promote" the current context into a real process. The only thing we have
211 * to do is create a PCB and make it current. We don't need to setup the stack
212 * pointer because it will be written the first time we switch to another process.
214 proc_initStruct(&main_process);
215 current_process = &main_process;
217 #if CONFIG_KERN_MONITOR
219 monitor_add(current_process, "main");
228 * Free all the resources of all zombie processes previously added to the zombie
231 static void proc_freeZombies(void)
237 PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list));
241 if (proc->flags & PF_FREESTACK)
243 PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base,
244 proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t)));
250 * Enqueue a process in the zombie list.
252 static void proc_addZombie(Process *proc)
255 #if CONFIG_KERN_PREEMPT
256 ASSERT(!proc_preemptAllowed());
260 node = &(proc)->link.link;
262 node = &(proc)->link;
264 LIST_ASSERT_VALID(&zombie_list);
265 ADDTAIL(&zombie_list, node);
268 #endif /* CONFIG_KERN_HEAP */
271 * Create a new process, starting at the provided entry point.
276 * proc_new(entry, data, stacksize, stack)
278 * is a more convenient way to create a process, as you don't have to specify
281 * \return Process structure of new created process
282 * if successful, NULL otherwise.
284 struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
287 LOG_INFO("name=%s", name);
289 bool free_stack = false;
292 * Free up resources of a zombie process.
294 * We're implementing a kind of lazy garbage collector here for
295 * efficiency reasons: we can avoid to introduce overhead into another
296 * kernel task dedicated to free up resources (e.g., idle) and we're
297 * not introducing any overhead into the scheduler after a context
298 * switch (that would be *very* bad, because the scheduler runs with
301 * In this way we are able to release the memory of the zombie tasks
302 * without disabling IRQs and without introducing any significant
303 * overhead in any other kernel task.
307 /* Did the caller provide a stack for us? */
310 /* Did the caller specify the desired stack size? */
312 stack_size = KERN_MINSTACKSIZE;
314 /* Allocate stack dinamically */
315 PROC_ATOMIC(stack_base =
316 (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
317 if (stack_base == NULL)
323 #else // CONFIG_KERN_HEAP
325 /* Stack must have been provided by the user */
326 ASSERT_VALID_PTR(stack_base);
329 #endif // CONFIG_KERN_HEAP
331 #if CONFIG_KERN_MONITOR
333 * Fill-in the stack with a special marker to help debugging.
334 * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
335 * than an int, so the (int) cast is required to silence the
336 * warning for truncating its size.
338 memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
341 /* Initialize the process control block */
342 if (CPU_STACK_GROWS_UPWARD)
344 proc = (Process *)stack_base;
345 proc->stack = stack_base + PROC_SIZE_WORDS;
346 // On some architecture stack should be aligned, so we do it.
347 proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
348 if (CPU_SP_ON_EMPTY_SLOT)
353 proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
354 // On some architecture stack should be aligned, so we do it.
355 proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
356 if (CPU_SP_ON_EMPTY_SLOT)
359 /* Ensure stack is aligned */
360 ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
362 stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
363 proc_initStruct(proc);
364 proc->user_data = data;
366 #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
367 proc->stack_base = stack_base;
368 proc->stack_size = stack_size;
371 proc->flags |= PF_FREESTACK;
374 proc->user_entry = entry;
375 CPU_CREATE_NEW_STACK(proc->stack);
377 #if CONFIG_KERN_MONITOR
378 monitor_add(proc, name);
381 /* Add to ready list */
382 ATOMIC(SCHED_ENQUEUE(proc));
388 * Return the name of the specified process.
390 * NULL is a legal argument and will return the name "<NULL>".
392 const char *proc_name(struct Process *proc)
394 #if CONFIG_KERN_MONITOR
395 return proc ? proc->monitor.name : "<NULL>";
402 /// Return the name of the currently running process
403 const char *proc_currentName(void)
405 return proc_name(proc_current());
409 void proc_rename(struct Process *proc, const char *name)
411 #if CONFIG_KERN_MONITOR
412 monitor_rename(proc, name);
414 (void)proc; (void)name;
421 * Change the scheduling priority of a process.
423 * Process piorities are signed ints, whereas a larger integer value means
424 * higher scheduling priority. The default priority for new processes is 0.
425 * The idle process runs with the lowest possible priority: INT_MIN.
427 * A process with a higher priority always preempts lower priority processes.
428 * Processes of equal priority share the CPU time according to a simple
429 * round-robin policy.
431 * As a general rule to maximize responsiveness, compute-bound processes
432 * should be assigned negative priorities and tight, interactive processes
433 * should be assigned positive priorities.
435 * To avoid interfering with system background activities such as input
436 * processing, application processes should remain within the range -10
439 void proc_setPri(struct Process *proc, int pri)
441 if (proc->link.pri == pri)
444 proc->link.pri = pri;
446 if (proc != current_process)
447 ATOMIC(sched_reenqueue(proc));
449 #endif // CONFIG_KERN_PRI
451 INLINE void proc_run(void)
453 void (*entry)(void) = current_process->user_entry;
455 LOG_INFO("New process starting at %p", entry);
460 * Entry point for all the processes.
462 void proc_entry(void)
465 * Return from a context switch assumes interrupts are disabled, so
466 * we need to explicitly re-enable them as soon as possible.
469 /* Call the actual process's entry point */
475 * Terminate the current process
479 LOG_INFO("%p:%s", current_process, proc_currentName());
481 #if CONFIG_KERN_MONITOR
482 monitor_remove(current_process);
488 * Set the task as zombie, its resources will be freed in proc_new() in
489 * a lazy way, when another process will be created.
491 proc_addZombie(current_process);
493 current_process = NULL;
503 * Call the scheduler and eventually replace the current running process.
505 static void proc_schedule(void)
507 Process *old_process = current_process;
509 IRQ_ASSERT_DISABLED();
511 /* Poll on the ready queue for the first ready process */
512 LIST_ASSERT_VALID(&proc_ready_list);
513 while (!(current_process = (struct Process *)list_remHead(&proc_ready_list)))
516 * Make sure we physically reenable interrupts here, no matter what
517 * the current task status is. This is important because if we
518 * are idle-spinning, we must allow interrupts, otherwise no
519 * process will ever wake up.
521 * During idle-spinning, an interrupt can occur and it may
522 * modify \p proc_ready_list. To ensure that compiler reload this
523 * variable every while cycle we call CPU_MEMORY_BARRIER.
524 * The memory barrier ensure that all variables used in this context
526 * \todo If there was a way to write sig_wait() so that it does not
527 * disable interrupts while waiting, there would not be any
535 if (CONTEXT_SWITCH_FROM_ISR())
536 proc_context_switch(current_process, old_process);
537 /* This RET resumes the execution on the new process */
538 LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());
541 #if CONFIG_KERN_PREEMPT
542 /* Global preemption nesting counter */
543 cpu_atomic_t preempt_count;
546 * The time sharing interval: when a process is scheduled on a CPU it gets an
547 * amount of CONFIG_KERN_QUANTUM clock ticks. When these ticks expires and
548 * preemption is enabled a new process is selected to run.
553 * Check if we need to schedule another task
555 bool proc_needPreempt(void)
557 if (UNLIKELY(current_process == NULL))
559 if (!proc_preemptAllowed())
561 if (LIST_EMPTY(&proc_ready_list))
563 return preempt_quantum() ? prio_next() > prio_curr() :
564 prio_next() >= prio_curr();
568 * Preempt the current task.
570 void proc_preempt(void)
572 IRQ_ASSERT_DISABLED();
573 ASSERT(current_process);
575 /* Perform the kernel preemption */
576 LOG_INFO("preempting %p:%s\n", current_process, proc_currentName());
577 /* We are inside a IRQ context, so ATOMIC is not needed here */
578 SCHED_ENQUEUE(current_process);
579 preempt_reset_quantum();
582 #endif /* CONFIG_KERN_PREEMPT */
584 /* Immediately switch to a particular process */
585 static void proc_switchTo(Process *proc)
587 Process *old_process = current_process;
589 SCHED_ENQUEUE(current_process);
590 preempt_reset_quantum();
591 current_process = proc;
592 proc_context_switch(current_process, old_process);
596 * Give the control of the CPU to another process.
598 * \note Assume the current process has been already added to a wait queue.
600 * \warning This should be considered an internal kernel function, even if it
601 * is allowed, usage from application code is strongly discouraged.
603 void proc_switch(void)
605 ASSERT(proc_preemptAllowed());
607 preempt_reset_quantum();
613 * Immediately wakeup a process, dispatching it to the CPU.
615 void proc_wakeup(Process *proc)
617 ASSERT(proc_preemptAllowed());
618 ASSERT(current_process);
619 IRQ_ASSERT_DISABLED();
621 if (prio_proc(proc) >= prio_curr())
624 SCHED_ENQUEUE_HEAD(proc);
628 * Voluntarily release the CPU.
630 void proc_yield(void)
635 * Voluntary preemption while preemption is disabled is considered
636 * illegal, as not very useful in practice.
638 * ASSERT if it happens.
640 ASSERT(proc_preemptAllowed());
641 IRQ_ASSERT_ENABLED();
644 proc = (struct Process *)list_remHead(&proc_ready_list);