X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fkern%2Fproc.h;h=7ac632ec1fb7fd3702d1dd4e8f20babfde3160f5;hb=71743c2a5a8bf9dbf66a945fd9656baed0d16329;hp=1a35834683c13c27db0bdc23a73bd178bd510b45;hpb=a4bdf6cbff7b5fce39aef18765396cc0aab26bf0;p=bertos.git diff --git a/bertos/kern/proc.h b/bertos/kern/proc.h index 1a358346..7ac632ec 100644 --- a/bertos/kern/proc.h +++ b/bertos/kern/proc.h @@ -30,22 +30,29 @@ * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti * --> * - * \brief Process scheduler (public interface). + * \brief Bertos Kernel core (Process scheduler). * * \version $Id$ * \author Bernie Innocenti + * + * $WIZ$ module_name = "kernel" + * $WIZ$ module_configuration = "bertos/cfg/cfg_proc.h" + * $WIZ$ module_depends = "switch_ctx" */ + #ifndef KERN_PROC_H #define KERN_PROC_H -#include "cfg/cfg_kern.h" +#include "cfg/cfg_proc.h" +#include "cfg/cfg_monitor.h" + #include #if CONFIG_KERN_PREEMPT #include // ASSERT() #endif -#include // cpustack_t +#include // cpu_stack_t #include // CPU_SAVED_REGS_CNT /* @@ -54,9 +61,8 @@ */ struct Process; -/* Task scheduling services */ void proc_init(void); -struct Process *proc_new_with_name(const char* name, void (*entry)(void), iptr_t data, size_t stacksize, cpustack_t *stack); +struct Process *proc_new_with_name(const char *name, void (*entry)(void), iptr_t data, size_t stacksize, cpu_stack_t *stack); #if !CONFIG_KERN_MONITOR #define proc_new(entry,data,size,stack) proc_new_with_name(NULL,(entry),(data),(size),(stack)) @@ -66,16 +72,27 @@ struct Process *proc_new_with_name(const char* name, void (*entry)(void), iptr_t void proc_exit(void); void proc_yield(void); +void proc_rename(struct Process *proc, const char *name); +const char *proc_name(struct Process *proc); +const char *proc_currentName(void); +iptr_t proc_currentUserData(void); int proc_testSetup(void); int proc_testRun(void); int proc_testTearDown(void); -struct Process *proc_current(void); -iptr_t proc_currentUserData(void); -void proc_rename(struct Process *proc, const char *name); -const char *proc_name(struct Process *proc); -const char *proc_currentName(void); +/** + * Return the context structure of the currently running process. + * + * The details of the Process structure are private to the scheduler. + * The address returned by this function is an opaque pointer that can + * be passed as an argument to other process-related functions. + */ +INLINE struct Process *proc_current(void) +{ + extern struct Process *CurrentProcess; + return CurrentProcess; +} #if CONFIG_KERN_PRI void proc_setPri(struct Process *proc, int pri); @@ -95,6 +112,9 @@ const char *proc_currentName(void); * \note Calling functions that could sleep while task switching is disabled * is dangerous and unsupported. * + * \note calling proc_forbid() from within an interrupt is illegal and + * meaningless. + * * \note proc_permit() expands inline to 1-2 asm instructions, so it's a * very efficient locking primitive in simple but performance-critical * situations. In all other cases, semaphores offer a more flexible and @@ -105,8 +125,30 @@ const char *proc_currentName(void); INLINE void proc_forbid(void) { #if CONFIG_KERN_PREEMPT - extern int _preempt_forbid_cnt; - // No need to protect against interrupts here. + extern cpu_atomic_t _preempt_forbid_cnt; + /* + * We don't need to protect the counter against other processes. + * The reason why is a bit subtle. + * + * If a process gets here, preempt_forbid_cnt can be either 0, + * or != 0. In the latter case, preemption is already disabled + * and no concurrency issues can occur. + * + * In the former case, we could be preempted just after reading the + * value 0 from memory, and a concurrent process might, in fact, + * bump the value of preempt_forbid_cnt under our nose! + * + * BUT: if this ever happens, then we won't get another chance to + * run until the other process calls proc_permit() to re-enable + * preemption. At this point, the value of preempt_forbid_cnt + * must be back to 0, and thus what we had originally read from + * memory happens to be valid. + * + * No matter how hard you think about it, and how complicated you + * make your scenario, the above holds true as long as + * "preempt_forbid_cnt != 0" means that no task switching is + * possible. + */ ++_preempt_forbid_cnt; /* @@ -131,10 +173,10 @@ INLINE void proc_permit(void) * flushed to memory before task switching is re-enabled. */ MEMORY_BARRIER; - extern int _preempt_forbid_cnt; + extern cpu_atomic_t _preempt_forbid_cnt; /* No need to protect against interrupts here. */ + ASSERT(_preempt_forbid_cnt != 0); --_preempt_forbid_cnt; - ASSERT(_preempt_forbid_cnt >= 0); /* * This ensures _preempt_forbid_cnt is flushed to memory immediately @@ -153,7 +195,7 @@ INLINE void proc_permit(void) INLINE bool proc_allowed(void) { #if CONFIG_KERN_PREEMPT - extern int _preempt_forbid_cnt; + extern cpu_atomic_t _preempt_forbid_cnt; return (_preempt_forbid_cnt == 0); #else return true; @@ -196,8 +238,8 @@ INLINE bool proc_allowed(void) * usage. */ #define CONFIG_KERN_MINSTACKSIZE \ - (CPU_SAVED_REGS_CNT * 2 * sizeof(cpustack_t) \ - + 32 * sizeof(int)) + (CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \ + + 48 * sizeof(int)) #endif #endif @@ -205,23 +247,23 @@ INLINE bool proc_allowed(void) #if CONFIG_KERN_MONITOR #include #if (SIZEOF_CPUSTACK_T == 1) - /* 8bit cpustack_t */ + /* 8bit cpu_stack_t */ #define CONFIG_KERN_STACKFILLCODE 0xA5 #define CONFIG_KERN_MEMFILLCODE 0xDB #elif (SIZEOF_CPUSTACK_T == 2) - /* 16bit cpustack_t */ + /* 16bit cpu_stack_t */ #define CONFIG_KERN_STACKFILLCODE 0xA5A5 #define CONFIG_KERN_MEMFILLCODE 0xDBDB #elif (SIZEOF_CPUSTACK_T == 4) - /* 32bit cpustack_t */ + /* 32bit cpu_stack_t */ #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5UL #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBUL #elif (SIZEOF_CPUSTACK_T == 8) - /* 64bit cpustack_t */ + /* 64bit cpu_stack_t */ #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5A5A5A5A5ULL #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBDBDBDBDBULL #else - #error No cpustack_t size supported! + #error No cpu_stack_t size supported! #endif #endif