4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/)
30 * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
33 * \brief Bertos Kernel core (Process scheduler).
36 * \author Bernie Innocenti <bernie@codewiz.org>
38 * $WIZ$ module_name = "kernel"
39 * $WIZ$ module_configuration = "bertos/cfg/cfg_proc.h"
45 #include "cfg/cfg_proc.h"
46 #include "cfg/cfg_monitor.h"
48 #include <cfg/compiler.h>
50 #if CONFIG_KERN_PREEMPT
51 #include <cfg/debug.h> // ASSERT()
54 #include <cpu/types.h> // cpu_stack_t
55 #include <cpu/frame.h> // CPU_SAVED_REGS_CNT
58 * Forward declaration. The definition of struct Process is private to the
59 * scheduler and hidden in proc_p.h.
64 struct Process *proc_new_with_name(const char *name, void (*entry)(void), iptr_t data, size_t stacksize, cpu_stack_t *stack);
66 #if !CONFIG_KERN_MONITOR
67 #define proc_new(entry,data,size,stack) proc_new_with_name(NULL,(entry),(data),(size),(stack))
69 #define proc_new(entry,data,size,stack) proc_new_with_name(#entry,(entry),(data),(size),(stack))
73 void proc_yield(void);
74 void proc_rename(struct Process *proc, const char *name);
75 const char *proc_name(struct Process *proc);
76 const char *proc_currentName(void);
77 iptr_t proc_currentUserData(void);
79 int proc_testSetup(void);
80 int proc_testRun(void);
81 int proc_testTearDown(void);
84 * Return the context structure of the currently running process.
86 * The details of the Process structure are private to the scheduler.
87 * The address returned by this function is an opaque pointer that can
88 * be passed as an argument to other process-related functions.
90 INLINE struct Process *proc_current(void)
92 extern struct Process *CurrentProcess;
93 return CurrentProcess;
97 void proc_setPri(struct Process *proc, int pri);
99 INLINE void proc_setPri(UNUSED_ARG(struct Process *,proc), UNUSED_ARG(int, pri))
105 * Disable preemptive task switching.
107 * The scheduler maintains a global nesting counter. Task switching is
108 * effectively re-enabled only when the number of calls to proc_permit()
109 * matches the number of calls to proc_forbid().
111 * \note Calling functions that could sleep while task switching is disabled
112 * is dangerous and unsupported.
114 * \note calling proc_forbid() from within an interrupt is illegal and
117 * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
118 * very efficient locking primitive in simple but performance-critical
119 * situations. In all other cases, semaphores offer a more flexible and
120 * fine-grained locking primitive.
124 INLINE void proc_forbid(void)
126 #if CONFIG_KERN_PREEMPT
127 extern cpu_atomic_t _preempt_forbid_cnt;
129 * We don't need to protect the counter against other processes.
130 * The reason why is a bit subtle.
132 * If a process gets here, preempt_forbid_cnt can be either 0,
133 * or != 0. In the latter case, preemption is already disabled
134 * and no concurrency issues can occur.
136 * In the former case, we could be preempted just after reading the
137 * value 0 from memory, and a concurrent process might, in fact,
138 * bump the value of preempt_forbid_cnt under our nose!
140 * BUT: if this ever happens, then we won't get another chance to
141 * run until the other process calls proc_permit() to re-enable
142 * preemption. At this point, the value of preempt_forbid_cnt
143 * must be back to 0, and thus what we had originally read from
144 * memory happens to be valid.
146 * No matter how hard you think about it, and how complicated you
147 * make your scenario, the above holds true as long as
148 * "preempt_forbid_cnt != 0" means that no task switching is
151 ++_preempt_forbid_cnt;
154 * Make sure _preempt_forbid_cnt is flushed to memory so the
155 * preemption softirq will see the correct value from now on.
162 * Re-enable preemptive task switching.
166 INLINE void proc_permit(void)
168 #if CONFIG_KERN_PREEMPT
171 * This is to ensure any global state changed by the process gets
172 * flushed to memory before task switching is re-enabled.
175 extern cpu_atomic_t _preempt_forbid_cnt;
176 /* No need to protect against interrupts here. */
177 ASSERT(_preempt_forbid_cnt != 0);
178 --_preempt_forbid_cnt;
181 * This ensures _preempt_forbid_cnt is flushed to memory immediately
182 * so the preemption interrupt sees the correct value.
190 * \return true if preemptive task switching is allowed.
191 * \note This accessor is needed because _preempt_forbid_cnt
192 * must be absoultely private.
194 INLINE bool proc_allowed(void)
196 #if CONFIG_KERN_PREEMPT
197 extern cpu_atomic_t _preempt_forbid_cnt;
198 return (_preempt_forbid_cnt == 0);
205 * Execute a block of \a CODE atomically with respect to task scheduling.
207 #define PROC_ATOMIC(CODE) \
214 #ifndef CONFIG_KERN_MINSTACKSIZE
216 #if (ARCH & ARCH_EMUL)
217 /* We need a large stack because system libraries are bloated */
218 #define CONFIG_KERN_MINSTACKSIZE 65536
221 * Default stack size for each thread, in bytes.
223 * The goal here is to allow a minimal task to save all of its
224 * registers twice, plus push a maximum of 32 variables on the
227 * The actual size computed by the default formula is:
233 * Note that on most 16bit architectures, interrupts will also
234 * run on the stack of the currently running process. Nested
235 * interrupts will greatly increases the amount of stack space
236 * required per process. Use irqmanager to minimize stack
239 #define CONFIG_KERN_MINSTACKSIZE \
240 (CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
245 /* Memory fill codes to help debugging */
246 #if CONFIG_KERN_MONITOR
247 #include <cpu/types.h>
248 #if (SIZEOF_CPUSTACK_T == 1)
249 /* 8bit cpu_stack_t */
250 #define CONFIG_KERN_STACKFILLCODE 0xA5
251 #define CONFIG_KERN_MEMFILLCODE 0xDB
252 #elif (SIZEOF_CPUSTACK_T == 2)
253 /* 16bit cpu_stack_t */
254 #define CONFIG_KERN_STACKFILLCODE 0xA5A5
255 #define CONFIG_KERN_MEMFILLCODE 0xDBDB
256 #elif (SIZEOF_CPUSTACK_T == 4)
257 /* 32bit cpu_stack_t */
258 #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5UL
259 #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBUL
260 #elif (SIZEOF_CPUSTACK_T == 8)
261 /* 64bit cpu_stack_t */
262 #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5A5A5A5A5ULL
263 #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBDBDBDBDBULL
265 #error No cpu_stack_t size supported!
269 #endif /* KERN_PROC_H */