4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
33 * \brief Bertos Kernel core (Process scheduler).
36 * \author Bernie Innocenti <bernie@codewiz.org>
38 * $WIZ$ module_name = "kernel"
39 * $WIZ$ module_configuration = "bertos/cfg/cfg_proc.h"
40 * $WIZ$ module_depends = "switch_ctx"
46 #include "cfg/cfg_proc.h"
47 #include "cfg/cfg_monitor.h"
49 #include <cfg/compiler.h>
51 #if CONFIG_KERN_PREEMPT
52 #include <cfg/debug.h> // ASSERT()
55 #include <cpu/types.h> // cpu_stack_t
56 #include <cpu/frame.h> // CPU_SAVED_REGS_CNT
59 * Forward declaration. The definition of struct Process is private to the
60 * scheduler and hidden in proc_p.h.
65 struct Process *proc_new_with_name(const char *name, void (*entry)(void), iptr_t data, size_t stacksize, cpu_stack_t *stack);
67 #if !CONFIG_KERN_MONITOR
68 #define proc_new(entry,data,size,stack) proc_new_with_name(NULL,(entry),(data),(size),(stack))
70 #define proc_new(entry,data,size,stack) proc_new_with_name(#entry,(entry),(data),(size),(stack))
74 void proc_yield(void);
75 void proc_rename(struct Process *proc, const char *name);
76 const char *proc_name(struct Process *proc);
77 const char *proc_currentName(void);
78 iptr_t proc_currentUserData(void);
80 int proc_testSetup(void);
81 int proc_testRun(void);
82 int proc_testTearDown(void);
85 * Return the context structure of the currently running process.
87 * The details of the Process structure are private to the scheduler.
88 * The address returned by this function is an opaque pointer that can
89 * be passed as an argument to other process-related functions.
91 INLINE struct Process *proc_current(void)
93 extern struct Process *CurrentProcess;
94 return CurrentProcess;
98 void proc_setPri(struct Process *proc, int pri);
100 INLINE void proc_setPri(UNUSED_ARG(struct Process *,proc), UNUSED_ARG(int, pri))
106 * Disable preemptive task switching.
108 * The scheduler maintains a global nesting counter. Task switching is
109 * effectively re-enabled only when the number of calls to proc_permit()
110 * matches the number of calls to proc_forbid().
112 * \note Calling functions that could sleep while task switching is disabled
113 * is dangerous and unsupported.
115 * \note calling proc_forbid() from within an interrupt is illegal and
118 * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
119 * very efficient locking primitive in simple but performance-critical
120 * situations. In all other cases, semaphores offer a more flexible and
121 * fine-grained locking primitive.
125 INLINE void proc_forbid(void)
127 #if CONFIG_KERN_PREEMPT
128 extern cpu_atomic_t _preempt_forbid_cnt;
130 * We don't need to protect the counter against other processes.
131 * The reason why is a bit subtle.
133 * If a process gets here, preempt_forbid_cnt can be either 0,
134 * or != 0. In the latter case, preemption is already disabled
135 * and no concurrency issues can occur.
137 * In the former case, we could be preempted just after reading the
138 * value 0 from memory, and a concurrent process might, in fact,
139 * bump the value of preempt_forbid_cnt under our nose!
141 * BUT: if this ever happens, then we won't get another chance to
142 * run until the other process calls proc_permit() to re-enable
143 * preemption. At this point, the value of preempt_forbid_cnt
144 * must be back to 0, and thus what we had originally read from
145 * memory happens to be valid.
147 * No matter how hard you think about it, and how complicated you
148 * make your scenario, the above holds true as long as
149 * "preempt_forbid_cnt != 0" means that no task switching is
152 ++_preempt_forbid_cnt;
155 * Make sure _preempt_forbid_cnt is flushed to memory so the
156 * preemption softirq will see the correct value from now on.
163 * Re-enable preemptive task switching.
167 INLINE void proc_permit(void)
169 #if CONFIG_KERN_PREEMPT
172 * This is to ensure any global state changed by the process gets
173 * flushed to memory before task switching is re-enabled.
176 extern cpu_atomic_t _preempt_forbid_cnt;
177 /* No need to protect against interrupts here. */
178 ASSERT(_preempt_forbid_cnt != 0);
179 --_preempt_forbid_cnt;
182 * This ensures _preempt_forbid_cnt is flushed to memory immediately
183 * so the preemption interrupt sees the correct value.
191 * \return true if preemptive task switching is allowed.
192 * \note This accessor is needed because _preempt_forbid_cnt
193 * must be absoultely private.
195 INLINE bool proc_allowed(void)
197 #if CONFIG_KERN_PREEMPT
198 extern cpu_atomic_t _preempt_forbid_cnt;
199 return (_preempt_forbid_cnt == 0);
206 * Execute a block of \a CODE atomically with respect to task scheduling.
208 #define PROC_ATOMIC(CODE) \
215 #ifndef CONFIG_KERN_MINSTACKSIZE
217 #if (ARCH & ARCH_EMUL)
218 /* We need a large stack because system libraries are bloated */
219 #define CONFIG_KERN_MINSTACKSIZE 65536
222 * Default stack size for each thread, in bytes.
224 * The goal here is to allow a minimal task to save all of its
225 * registers twice, plus push a maximum of 32 variables on the
228 * The actual size computed by the default formula is:
234 * Note that on most 16bit architectures, interrupts will also
235 * run on the stack of the currently running process. Nested
236 * interrupts will greatly increases the amount of stack space
237 * required per process. Use irqmanager to minimize stack
240 #define CONFIG_KERN_MINSTACKSIZE \
241 (CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
246 /* Memory fill codes to help debugging */
247 #if CONFIG_KERN_MONITOR
248 #include <cpu/types.h>
249 #if (SIZEOF_CPUSTACK_T == 1)
250 /* 8bit cpu_stack_t */
251 #define CONFIG_KERN_STACKFILLCODE 0xA5
252 #define CONFIG_KERN_MEMFILLCODE 0xDB
253 #elif (SIZEOF_CPUSTACK_T == 2)
254 /* 16bit cpu_stack_t */
255 #define CONFIG_KERN_STACKFILLCODE 0xA5A5
256 #define CONFIG_KERN_MEMFILLCODE 0xDBDB
257 #elif (SIZEOF_CPUSTACK_T == 4)
258 /* 32bit cpu_stack_t */
259 #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5UL
260 #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBUL
261 #elif (SIZEOF_CPUSTACK_T == 8)
262 /* 64bit cpu_stack_t */
263 #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5A5A5A5A5ULL
264 #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBDBDBDBDBULL
266 #error No cpu_stack_t size supported!
270 #endif /* KERN_PROC_H */