4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
33 * \brief BeRTOS Kernel core (Process scheduler).
36 * \author Bernie Innocenti <bernie@codewiz.org>
38 * $WIZ$ module_name = "kernel"
39 * $WIZ$ module_configuration = "bertos/cfg/cfg_proc.h"
40 * $WIZ$ module_depends = "switch_ctx", "coop"
41 * $WIZ$ module_supports = "not atmega103"
47 #include "cfg/cfg_proc.h"
48 #include "cfg/cfg_monitor.h"
50 #include <cfg/compiler.h>
52 #if CONFIG_KERN_PREEMPT
53 #include <cfg/debug.h> // ASSERT()
56 #include <cpu/types.h> // cpu_stack_t
57 #include <cpu/frame.h> // CPU_SAVED_REGS_CNT
60 * Define stack for one process.
62 * This macro define a static stack for one process and do
63 * check if given stack size is enough to run process.
65 #define PROC_DEFINE_STACK(name, size) \
66 STATIC_ASSERT(size >= CONFIG_KERN_MINSTACKSIZE); \
67 cpu_stack_t name[size / sizeof(cpu_stack_t)]; \
70 * Forward declaration. The definition of struct Process is private to the
71 * scheduler and hidden in proc_p.h.
76 * Initialize the process subsystem (kernel).
77 * It must be called before using any process related function.
82 * Create a new named process and schedules it for execution.
84 * When defining the stacksize take into account that you may want at least:
85 * \li save all the registers for each nested function call;
86 * \li have memory for the struct Process, which is positioned at the bottom
88 * \li have some memory for temporary variables inside called functions.
90 * The value given by CONFIG_KERN_MINSTACKSIZE is rather safe to use in the first place.
94 * proc_new(entry, data, stacksize, stack)
96 * is a more convenient way to create a process, as you don't have to specify
99 * \param name Name of the process (currently unused).
100 * \param entry Function that the process will execute.
101 * \param data Pointer to user data.
102 * \param stacksize Length of the stack.
103 * \param stack Pointer to the memory area to be used as a stack.
105 struct Process *proc_new_with_name(const char *name, void (*entry)(void), iptr_t data, size_t stacksize, cpu_stack_t *stack);
107 #if !CONFIG_KERN_MONITOR
108 #define proc_new(entry,data,size,stack) proc_new_with_name(NULL,(entry),(data),(size),(stack))
110 #define proc_new(entry,data,size,stack) proc_new_with_name(#entry,(entry),(data),(size),(stack))
114 * Terminate the execution of the current process.
116 void proc_exit(void);
119 * Co-operative context switch.
121 * The process that calls this function will release the CPU before its cpu quantum
122 * expires, the scheduler will run to select the next process that will take control
124 * \note This function is available only if CONFIG_KERN is enabled
125 * \sa cpu_relax(), which is the recommended method to release the cpu.
127 void proc_yield(void);
129 void proc_rename(struct Process *proc, const char *name);
130 const char *proc_name(struct Process *proc);
131 const char *proc_currentName(void);
134 * Return a pointer to the user data of the current process.
136 * To obtain user data, just call this function inside the process. Remember to cast
137 * the returned pointer to the correct type.
138 * \return Pointer to the user data of the current process.
140 iptr_t proc_currentUserData(void);
142 int proc_testSetup(void);
143 int proc_testRun(void);
144 int proc_testTearDown(void);
147 * Return the context structure of the currently running process.
149 * The details of the Process structure are private to the scheduler.
150 * The address returned by this function is an opaque pointer that can
151 * be passed as an argument to other process-related functions.
153 INLINE struct Process *proc_current(void)
155 extern struct Process *CurrentProcess;
156 return CurrentProcess;
160 void proc_setPri(struct Process *proc, int pri);
162 INLINE void proc_setPri(UNUSED_ARG(struct Process *,proc), UNUSED_ARG(int, pri))
168 * Disable preemptive task switching.
170 * The scheduler maintains a global nesting counter. Task switching is
171 * effectively re-enabled only when the number of calls to proc_permit()
172 * matches the number of calls to proc_forbid().
174 * \note Calling functions that could sleep while task switching is disabled
175 * is dangerous and unsupported.
177 * \note calling proc_forbid() from within an interrupt is illegal and
180 * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
181 * very efficient locking primitive in simple but performance-critical
182 * situations. In all other cases, semaphores offer a more flexible and
183 * fine-grained locking primitive.
187 INLINE void proc_forbid(void)
189 #if CONFIG_KERN_PREEMPT
190 extern cpu_atomic_t _preempt_forbid_cnt;
192 * We don't need to protect the counter against other processes.
193 * The reason why is a bit subtle.
195 * If a process gets here, preempt_forbid_cnt can be either 0,
196 * or != 0. In the latter case, preemption is already disabled
197 * and no concurrency issues can occur.
199 * In the former case, we could be preempted just after reading the
200 * value 0 from memory, and a concurrent process might, in fact,
201 * bump the value of preempt_forbid_cnt under our nose!
203 * BUT: if this ever happens, then we won't get another chance to
204 * run until the other process calls proc_permit() to re-enable
205 * preemption. At this point, the value of preempt_forbid_cnt
206 * must be back to 0, and thus what we had originally read from
207 * memory happens to be valid.
209 * No matter how hard you think about it, and how complicated you
210 * make your scenario, the above holds true as long as
211 * "preempt_forbid_cnt != 0" means that no task switching is
214 ++_preempt_forbid_cnt;
217 * Make sure _preempt_forbid_cnt is flushed to memory so the
218 * preemption softirq will see the correct value from now on.
225 * Re-enable preemptive task switching.
229 INLINE void proc_permit(void)
231 #if CONFIG_KERN_PREEMPT
234 * This is to ensure any global state changed by the process gets
235 * flushed to memory before task switching is re-enabled.
238 extern cpu_atomic_t _preempt_forbid_cnt;
239 /* No need to protect against interrupts here. */
240 ASSERT(_preempt_forbid_cnt != 0);
241 --_preempt_forbid_cnt;
244 * This ensures _preempt_forbid_cnt is flushed to memory immediately
245 * so the preemption interrupt sees the correct value.
253 * \return true if preemptive task switching is allowed.
254 * \note This accessor is needed because _preempt_forbid_cnt
255 * must be absoultely private.
257 INLINE bool proc_allowed(void)
259 #if CONFIG_KERN_PREEMPT
260 extern cpu_atomic_t _preempt_forbid_cnt;
261 return (_preempt_forbid_cnt == 0);
268 * Execute a block of \a CODE atomically with respect to task scheduling.
270 #define PROC_ATOMIC(CODE) \
277 #ifndef CONFIG_KERN_MINSTACKSIZE
279 #if (ARCH & ARCH_EMUL)
280 /* We need a large stack because system libraries are bloated */
281 #define CONFIG_KERN_MINSTACKSIZE 65536
284 * Default stack size for each thread, in bytes.
286 * The goal here is to allow a minimal task to save all of its
287 * registers twice, plus push a maximum of 32 variables on the
290 * The actual size computed by the default formula is:
296 * Note that on most 16bit architectures, interrupts will also
297 * run on the stack of the currently running process. Nested
298 * interrupts will greatly increases the amount of stack space
299 * required per process. Use irqmanager to minimize stack
302 #define CONFIG_KERN_MINSTACKSIZE \
303 (CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
308 /* Memory fill codes to help debugging */
309 #if CONFIG_KERN_MONITOR
310 #include <cpu/types.h>
311 #if (SIZEOF_CPUSTACK_T == 1)
312 /* 8bit cpu_stack_t */
313 #define CONFIG_KERN_STACKFILLCODE 0xA5
314 #define CONFIG_KERN_MEMFILLCODE 0xDB
315 #elif (SIZEOF_CPUSTACK_T == 2)
316 /* 16bit cpu_stack_t */
317 #define CONFIG_KERN_STACKFILLCODE 0xA5A5
318 #define CONFIG_KERN_MEMFILLCODE 0xDBDB
319 #elif (SIZEOF_CPUSTACK_T == 4)
320 /* 32bit cpu_stack_t */
321 #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5UL
322 #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBUL
323 #elif (SIZEOF_CPUSTACK_T == 8)
324 /* 64bit cpu_stack_t */
325 #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5A5A5A5A5ULL
326 #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBDBDBDBDBULL
328 #error No cpu_stack_t size supported!
332 #endif /* KERN_PROC_H */