* Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
* -->
*
+ * \defgroup kern_proc Process (Threads) management
+ * \ingroup kern
+ * \{
+ *
* \brief BeRTOS Kernel core (Process scheduler).
*
- * \version $Id$
+ * This is the core kernel module. It allows you to create new processes
+ * (which are called \b threads in other systems) and set the priority of
+ * each process.
+ *
+ * A process needs a work area (called \b stack) to run. To create a process,
+ * you need to declare a stack area, then create the process.
+ * You may also pass NULL for the stack area, if you have enabled kernel heap:
+ * in this case the stack will be automatically allocated.
+ *
+ * Example:
+ * \code
+ * PROC_DEFINE_STACK(stack1, 200);
+ *
+ * void NORETURN proc1_run(void)
+ * {
+ * while (1)
+ * {
+ * LOG_INFO("I'm alive!\n");
+ * timer_delay(1000);
+ * }
+ * }
+ *
+ *
+ * int main()
+ * {
+ * Process *p1 = proc_new(proc1_run, NULL, stack1, sizeof(stack1));
+ * // here the process is already running
+ * proc_setPri(p1, 2);
+ * // ...
+ * }
+ * \endcode
+ *
+ * The Process struct must be regarded as an opaque data type, do not access
+ * any of its members directly.
+ *
+ * The entry point function should be declared as NORETURN, because it will
+ * remove a warning and enable compiler optimizations.
+ *
+ * You can temporarily disable preemption calling proc_forbid(); remember
+ * to enable it again calling proc_permit().
+ *
+ * \note You should hardly need to manually release the CPU; however you
+ * can do it using the cpu_relax() function. It is illegal to release
+ * the CPU with preemption disabled.
+ *
* \author Bernie Innocenti <bernie@codewiz.org>
*
* $WIZ$ module_name = "kernel"
* $WIZ$ module_configuration = "bertos/cfg/cfg_proc.h"
- * $WIZ$ module_depends = "switch_ctx", "coop"
+ * $WIZ$ module_depends = "switch_ctx"
* $WIZ$ module_supports = "not atmega103"
*/
#include "cfg/cfg_proc.h"
#include "cfg/cfg_signal.h"
#include "cfg/cfg_monitor.h"
+#include "sem.h"
#include <struct/list.h> // Node, PriNode
#include <cfg/compiler.h>
-
-#if CONFIG_KERN_PREEMPT
- #include <cfg/debug.h> // ASSERT()
-#endif
+#include <cfg/debug.h> // ASSERT()
#include <cpu/types.h> // cpu_stack_t
#include <cpu/frame.h> // CPU_SAVED_REGS_CNT
+/* The following silents warnings on nightly tests. We need to regenerate
+ * all the projects before this can be removed.
+ */
+#ifndef CONFIG_KERN_PRI_INHERIT
+#define CONFIG_KERN_PRI_INHERIT 0
+#endif
+
/*
* WARNING: struct Process is considered private, so its definition can change any time
* without notice. DO NOT RELY on any field defined here, use only the interface
{
#if CONFIG_KERN_PRI
PriNode link; /**< Link Process into scheduler lists */
+# if CONFIG_KERN_PRI_INHERIT
+ PriNode inh_link; /**< Link Process into priority inheritance lists */
+ List inh_list; /**< Priority inheritance list for this Process */
+ Semaphore *inh_blocked_by; /**< Semaphore blocking this Process */
+ int orig_pri; /**< Process priority without considering inheritance */
+# endif
#else
Node link; /**< Link Process into scheduler lists */
#endif
iptr_t user_data; /**< Custom data passed to the process */
#if CONFIG_KERN_SIGNALS
- sigmask_t sig_wait; /**< Signals the process is waiting for */
- sigmask_t sig_recv; /**< Received signals */
+ Signal sig;
#endif
#if CONFIG_KERN_HEAP
uint16_t flags; /**< Flags */
#endif
-#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR | (ARCH & ARCH_EMUL)
+#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
cpu_stack_t *stack_base; /**< Base of process stack */
size_t stack_size; /**< Size of process stack */
#endif
-#if CONFIG_KERN_PREEMPT
- ucontext_t context;
-#endif
+ /* The actual process entry point */
+ void (*user_entry)(void);
#if CONFIG_KERN_MONITOR
struct ProcMonitor
* \param data Pointer to user data.
* \param size Length of the stack.
* \param stack Pointer to the memory area to be used as a stack.
- *
+ *
* \return Process structure of new created process
* if successful, NULL otherwise.
*/
*/
void proc_exit(void);
-/**
- * Co-operative context switch.
- *
- * The process that calls this function will release the CPU before its cpu quantum
- * expires, the scheduler will run to select the next process that will take control
- * of the processor.
- * \note This function is available only if CONFIG_KERN is enabled
- * \sa cpu_relax(), which is the recommended method to release the cpu.
+/*
+ * Public scheduling class methods.
*/
void proc_yield(void);
+#if CONFIG_KERN_PREEMPT
+bool proc_needPreempt(void);
+void proc_preempt(void);
+#else
+INLINE bool proc_needPreempt(void)
+{
+ return false;
+}
+
+INLINE void proc_preempt(void)
+{
+}
+#endif
+
void proc_rename(struct Process *proc, const char *name);
const char *proc_name(struct Process *proc);
const char *proc_currentName(void);
* the returned pointer to the correct type.
* \return Pointer to the user data of the current process.
*/
-iptr_t proc_currentUserData(void);
+INLINE iptr_t proc_currentUserData(void)
+{
+ extern struct Process *current_process;
+ return current_process->user_data;
+}
int proc_testSetup(void);
int proc_testRun(void);
*/
INLINE struct Process *proc_current(void)
{
- extern struct Process *CurrentProcess;
- return CurrentProcess;
+ extern struct Process *current_process;
+ return current_process;
}
#if CONFIG_KERN_PRI
void proc_setPri(struct Process *proc, int pri);
+
+ INLINE int proc_pri(struct Process *proc)
+ {
+ return proc->link.pri;
+ }
#else
INLINE void proc_setPri(UNUSED_ARG(struct Process *,proc), UNUSED_ARG(int, pri))
{
}
+
+ INLINE int proc_pri(UNUSED_ARG(struct Process *, proc))
+ {
+ return 0;
+ }
#endif
-/**
- * Disable preemptive task switching.
- *
- * The scheduler maintains a global nesting counter. Task switching is
- * effectively re-enabled only when the number of calls to proc_permit()
- * matches the number of calls to proc_forbid().
- *
- * \note Calling functions that could sleep while task switching is disabled
- * is dangerous and unsupported.
- *
- * \note calling proc_forbid() from within an interrupt is illegal and
- * meaningless.
- *
- * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
- * very efficient locking primitive in simple but performance-critical
- * situations. In all other cases, semaphores offer a more flexible and
- * fine-grained locking primitive.
- *
- * \sa proc_permit()
- */
-INLINE void proc_forbid(void)
-{
- #if CONFIG_KERN_PREEMPT
- extern cpu_atomic_t _preempt_forbid_cnt;
+#if CONFIG_KERN_PREEMPT
+
+ /**
+ * Disable preemptive task switching.
+ *
+ * The scheduler maintains a global nesting counter. Task switching is
+ * effectively re-enabled only when the number of calls to proc_permit()
+ * matches the number of calls to proc_forbid().
+ *
+ * \note Calling functions that could sleep while task switching is disabled
+ * is dangerous and unsupported.
+ *
+ * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
+ * very efficient locking primitive in simple but performance-critical
+ * situations. In all other cases, semaphores offer a more flexible and
+ * fine-grained locking primitive.
+ *
+ * \sa proc_permit()
+ */
+ INLINE void proc_forbid(void)
+ {
+ extern cpu_atomic_t preempt_count;
/*
* We don't need to protect the counter against other processes.
* The reason why is a bit subtle.
* "preempt_forbid_cnt != 0" means that no task switching is
* possible.
*/
- ++_preempt_forbid_cnt;
+ ++preempt_count;
/*
- * Make sure _preempt_forbid_cnt is flushed to memory so the
- * preemption softirq will see the correct value from now on.
+ * Make sure preempt_count is flushed to memory so the preemption
+ * softirq will see the correct value from now on.
*/
MEMORY_BARRIER;
- #endif
-}
+ }
-/**
- * Re-enable preemptive task switching.
- *
- * \sa proc_forbid()
- */
-INLINE void proc_permit(void)
-{
- #if CONFIG_KERN_PREEMPT
+ /**
+ * Re-enable preemptive task switching.
+ *
+ * \sa proc_forbid()
+ */
+ INLINE void proc_permit(void)
+ {
+ extern cpu_atomic_t preempt_count;
/*
* This is to ensure any global state changed by the process gets
* flushed to memory before task switching is re-enabled.
*/
MEMORY_BARRIER;
- extern cpu_atomic_t _preempt_forbid_cnt;
/* No need to protect against interrupts here. */
- ASSERT(_preempt_forbid_cnt != 0);
- --_preempt_forbid_cnt;
-
+ ASSERT(preempt_count > 0);
+ --preempt_count;
/*
- * This ensures _preempt_forbid_cnt is flushed to memory immediately
- * so the preemption interrupt sees the correct value.
+ * This ensures preempt_count is flushed to memory immediately so the
+ * preemption interrupt sees the correct value.
*/
MEMORY_BARRIER;
+ }
- #endif
-}
-
-/**
- * \return true if preemptive task switching is allowed.
- * \note This accessor is needed because _preempt_forbid_cnt
- * must be absoultely private.
- */
-INLINE bool proc_preemptAllowed(void)
-{
- #if CONFIG_KERN_PREEMPT
- extern cpu_atomic_t _preempt_forbid_cnt;
- return (_preempt_forbid_cnt == 0);
- #else
- return true;
- #endif
-}
+ /**
+ * \return true if preemptive task switching is allowed.
+ * \note This accessor is needed because preempt_count
+ * must be absoultely private.
+ */
+ INLINE bool proc_preemptAllowed(void)
+ {
+ extern cpu_atomic_t preempt_count;
+ return (preempt_count == 0);
+ }
+#else /* CONFIG_KERN_PREEMPT */
+ #define proc_forbid() /* NOP */
+ #define proc_permit() /* NOP */
+ #define proc_preemptAllowed() (true)
+#endif /* CONFIG_KERN_PREEMPT */
/** Deprecated, use the proc_preemptAllowed() macro. */
#define proc_allowed() proc_preemptAllowed()
/* We need a large stack because system libraries are bloated */
#define KERN_MINSTACKSIZE 65536
#else
- #define KERN_MINSTACKSIZE \
- (sizeof(Process) + CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
- + 32 * sizeof(int))
+ #if CONFIG_KERN_PREEMPT
+ /*
+ * A preemptible kernel needs a larger stack compared to the
+ * cooperative case. A task can be interrupted anytime in each
+ * node of the call graph, at any level of depth. This may
+ * result in a higher stack consumption, to call the ISR, save
+ * the current user context and to execute the kernel
+ * preemption routines implemented as ISR prologue and
+ * epilogue. All these calls are nested into the process stack.
+ *
+ * So, to reduce the risk of stack overflow/underflow problems
+ * add a x2 to the portion stack reserved to the user process.
+ */
+ #define KERN_MINSTACKSIZE \
+ (sizeof(Process) + CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
+ + 32 * sizeof(int) * 2)
+ #else
+ #define KERN_MINSTACKSIZE \
+ (sizeof(Process) + CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
+ + 32 * sizeof(int))
+ #endif /* CONFIG_KERN_PREEMPT */
+
#endif
#ifndef CONFIG_KERN_MINSTACKSIZE
* \param size Stack size in bytes. It must be at least KERN_MINSTACKSIZE.
*/
#define PROC_DEFINE_STACK(name, size) \
- STATIC_ASSERT((size) >= KERN_MINSTACKSIZE); \
- cpu_stack_t name[((size) + sizeof(cpu_stack_t) - 1) / sizeof(cpu_stack_t)];
+ cpu_stack_t name[((size) + sizeof(cpu_stack_t) - 1) / sizeof(cpu_stack_t)]; \
+ STATIC_ASSERT((size) >= KERN_MINSTACKSIZE);
/* Memory fill codes to help debugging */
#if CONFIG_KERN_MONITOR
#error No cpu_stack_t size supported!
#endif
#endif
+/** \} */ //defgroup kern_proc
#endif /* KERN_PROC_H */