4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
33 * \brief Bertos Kernel core (Process scheduler).
36 * \author Bernie Innocenti <bernie@codewiz.org>
38 * $WIZ$ module_name = "kernel"
39 * $WIZ$ module_configuration = "bertos/cfg/cfg_kern.h"
45 #include "cfg/cfg_kern.h"
46 #include <cfg/compiler.h>
48 #if CONFIG_KERN_PREEMPT
49 #include <cfg/debug.h> // ASSERT()
52 #include <cpu/types.h> // cpu_stack_t
53 #include <cpu/frame.h> // CPU_SAVED_REGS_CNT
56 * Forward declaration. The definition of struct Process is private to the
57 * scheduler and hidden in proc_p.h.
62 struct Process *proc_new_with_name(const char *name, void (*entry)(void), iptr_t data, size_t stacksize, cpu_stack_t *stack);
64 #if !CONFIG_KERN_MONITOR
65 #define proc_new(entry,data,size,stack) proc_new_with_name(NULL,(entry),(data),(size),(stack))
67 #define proc_new(entry,data,size,stack) proc_new_with_name(#entry,(entry),(data),(size),(stack))
71 void proc_yield(void);
72 void proc_rename(struct Process *proc, const char *name);
73 const char *proc_name(struct Process *proc);
74 const char *proc_currentName(void);
75 iptr_t proc_currentUserData(void);
77 int proc_testSetup(void);
78 int proc_testRun(void);
79 int proc_testTearDown(void);
82 * Return the context structure of the currently running process.
84 * The details of the Process structure are private to the scheduler.
85 * The address returned by this function is an opaque pointer that can
86 * be passed as an argument to other process-related functions.
88 INLINE struct Process *proc_current(void)
90 extern struct Process *CurrentProcess;
91 return CurrentProcess;
95 void proc_setPri(struct Process *proc, int pri);
97 INLINE void proc_setPri(UNUSED_ARG(struct Process *,proc), UNUSED_ARG(int, pri))
103 * Disable preemptive task switching.
105 * The scheduler maintains a global nesting counter. Task switching is
106 * effectively re-enabled only when the number of calls to proc_permit()
107 * matches the number of calls to proc_forbid().
109 * \note Calling functions that could sleep while task switching is disabled
110 * is dangerous and unsupported.
112 * \note calling proc_forbid() from within an interrupt is illegal and
115 * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
116 * very efficient locking primitive in simple but performance-critical
117 * situations. In all other cases, semaphores offer a more flexible and
118 * fine-grained locking primitive.
122 INLINE void proc_forbid(void)
124 #if CONFIG_KERN_PREEMPT
125 extern cpu_atomic_t _preempt_forbid_cnt;
127 * We don't need to protect the counter against other processes.
128 * The reason why is a bit subtle.
130 * If a process gets here, preempt_forbid_cnt can be either 0,
131 * or != 0. In the latter case, preemption is already disabled
132 * and no concurrency issues can occur.
134 * In the former case, we could be preempted just after reading the
135 * value 0 from memory, and a concurrent process might, in fact,
136 * bump the value of preempt_forbid_cnt under our nose!
138 * BUT: if this ever happens, then we won't get another chance to
139 * run until the other process calls proc_permit() to re-enable
140 * preemption. At this point, the value of preempt_forbid_cnt
141 * must be back to 0, and thus what we had originally read from
142 * memory happens to be valid.
144 * No matter how hard you think about it, and how complicated you
145 * make your scenario, the above holds true as long as
146 * "preempt_forbid_cnt != 0" means that no task switching is
149 ++_preempt_forbid_cnt;
152 * Make sure _preempt_forbid_cnt is flushed to memory so the
153 * preemption softirq will see the correct value from now on.
160 * Re-enable preemptive task switching.
164 INLINE void proc_permit(void)
166 #if CONFIG_KERN_PREEMPT
169 * This is to ensure any global state changed by the process gets
170 * flushed to memory before task switching is re-enabled.
173 extern cpu_atomic_t _preempt_forbid_cnt;
174 /* No need to protect against interrupts here. */
175 ASSERT(_preempt_forbid_cnt != 0);
176 --_preempt_forbid_cnt;
179 * This ensures _preempt_forbid_cnt is flushed to memory immediately
180 * so the preemption interrupt sees the correct value.
188 * \return true if preemptive task switching is allowed.
189 * \note This accessor is needed because _preempt_forbid_cnt
190 * must be absoultely private.
192 INLINE bool proc_allowed(void)
194 #if CONFIG_KERN_PREEMPT
195 extern cpu_atomic_t _preempt_forbid_cnt;
196 return (_preempt_forbid_cnt == 0);
203 * Execute a block of \a CODE atomically with respect to task scheduling.
205 #define PROC_ATOMIC(CODE) \
212 #ifndef CONFIG_KERN_MINSTACKSIZE
214 #if (ARCH & ARCH_EMUL)
215 /* We need a large stack because system libraries are bloated */
216 #define CONFIG_KERN_MINSTACKSIZE 65536
219 * Default stack size for each thread, in bytes.
221 * The goal here is to allow a minimal task to save all of its
222 * registers twice, plus push a maximum of 32 variables on the
225 * The actual size computed by the default formula is:
231 * Note that on most 16bit architectures, interrupts will also
232 * run on the stack of the currently running process. Nested
233 * interrupts will greatly increases the amount of stack space
234 * required per process. Use irqmanager to minimize stack
237 #define CONFIG_KERN_MINSTACKSIZE \
238 (CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
243 /* Memory fill codes to help debugging */
244 #if CONFIG_KERN_MONITOR
245 #include <cpu/types.h>
246 #if (SIZEOF_CPUSTACK_T == 1)
247 /* 8bit cpu_stack_t */
248 #define CONFIG_KERN_STACKFILLCODE 0xA5
249 #define CONFIG_KERN_MEMFILLCODE 0xDB
250 #elif (SIZEOF_CPUSTACK_T == 2)
251 /* 16bit cpu_stack_t */
252 #define CONFIG_KERN_STACKFILLCODE 0xA5A5
253 #define CONFIG_KERN_MEMFILLCODE 0xDBDB
254 #elif (SIZEOF_CPUSTACK_T == 4)
255 /* 32bit cpu_stack_t */
256 #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5UL
257 #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBUL
258 #elif (SIZEOF_CPUSTACK_T == 8)
259 /* 64bit cpu_stack_t */
260 #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5A5A5A5A5ULL
261 #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBDBDBDBDBULL
263 #error No cpu_stack_t size supported!
267 #endif /* KERN_PROC_H */