X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fkern%2Fproc.h;h=4efdf9df8e073e4d0a1ff41783dcdf9b32d7c2b0;hb=1200cce6f786accd1e56bfe1982b3c58d297aee7;hp=37415f3de9d5ab7b7ae0bd047530d8d3f41523fa;hpb=c22fe24a0da896a52dbc3882390ec18a440ef56a;p=bertos.git diff --git a/bertos/kern/proc.h b/bertos/kern/proc.h index 37415f3d..4efdf9df 100644 --- a/bertos/kern/proc.h +++ b/bertos/kern/proc.h @@ -26,16 +26,14 @@ * invalidate any other reasons why the executable file might be covered by * the GNU General Public License. * - * Copyright 2001,2004 Develer S.r.l. (http://www.develer.com/) - * Copyright 1999,2000,2001 Bernardo Innocenti - * + * Copyright 2001, 2004 Develer S.r.l. (http://www.develer.com/) + * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti * --> * * \brief Process scheduler (public interface). * * \version $Id$ - * - * \author Bernardo Innocenti + * \author Bernie Innocenti */ #ifndef KERN_PROC_H #define KERN_PROC_H @@ -43,9 +41,17 @@ #include "cfg/cfg_kern.h" #include -#include +#if CONFIG_KERN_PREEMPT + #include // ASSERT() +#endif + +#include // cpustack_t +#include // CPU_SAVED_REGS_CNT -/* Fwd decl */ +/* + * Forward declaration. The definition of struct Process is private to the + * scheduler and hidden in proc_p.h. + */ struct Process; /* Task scheduling services */ @@ -59,20 +65,126 @@ struct Process *proc_new_with_name(const char* name, void (*entry)(void), iptr_t #endif void proc_exit(void); -void proc_switch(void); -void proc_test(void); +void proc_yield(void); + +int proc_testSetup(void); +int proc_testRun(void); +int proc_testTearDown(void); + struct Process *proc_current(void); -iptr_t proc_current_user_data(void); -void proc_rename(struct Process *proc, const char* name); +iptr_t proc_currentUserData(void); +void proc_rename(struct Process *proc, const char *name); +const char *proc_name(struct Process *proc); +const char *proc_currentName(void); -#if CONFIG_KERN_PREEMPTIVE - void proc_forbid(void); - void proc_permit(void); +#if CONFIG_KERN_PRI + void proc_setPri(struct Process *proc, int pri); #else - INLINE void proc_forbid(void) { /* nop */ } - INLINE void proc_permit(void) { /* nop */ } + INLINE void proc_setPri(UNUSED_ARG(struct Process *,proc), UNUSED_ARG(int, pri)) + { + } #endif +/** + * Disable preemptive task switching. + * + * The scheduler maintains a global nesting counter. Task switching is + * effectively re-enabled only when the number of calls to proc_permit() + * matches the number of calls to proc_forbid(). + * + * \note Calling functions that could sleep while task switching is disabled + * is dangerous and unsupported. + * + * \note calling proc_forbid() from within an interrupt is illegal and + * meaningless. + * + * \note proc_permit() expands inline to 1-2 asm instructions, so it's a + * very efficient locking primitive in simple but performance-critical + * situations. In all other cases, semaphores offer a more flexible and + * fine-grained locking primitive. + * + * \sa proc_permit() + */ +INLINE void proc_forbid(void) +{ + #if CONFIG_KERN_PREEMPT + extern cpuatomic_t _preempt_forbid_cnt; + /* + * We don't need to protect the counter against other processes. + * The reason why is a bit subtle. + * + * If a process gets here, preempt_forbid_cnt can be either 0, + * or != 0. In the latter case, preemption is already disabled + * and no concurrency issues can occur. + * + * In the former case, we could be preempted just after reading the + * value 0 from memory, and a concurrent process might, in fact, + * bump the value of preempt_forbid_cnt under our nose! + * + * BUT: if this ever happens, then we won't get another chance to + * run until the other process calls proc_permit() to re-enable + * preemption. At this point, the value of preempt_forbid_cnt + * must be back to 0, and thus what we had originally read from + * memory happens to be valid. + * + * No matter how hard you think about it, and how complicated you + * make your scenario, the above holds true as long as + * "preempt_forbid_cnt != 0" means that no task switching is + * possible. + */ + ++_preempt_forbid_cnt; + + /* + * Make sure _preempt_forbid_cnt is flushed to memory so the + * preemption softirq will see the correct value from now on. + */ + MEMORY_BARRIER; + #endif +} + +/** + * Re-enable preemptive task switching. + * + * \sa proc_forbid() + */ +INLINE void proc_permit(void) +{ + #if CONFIG_KERN_PREEMPT + + /* + * This is to ensure any global state changed by the process gets + * flushed to memory before task switching is re-enabled. + */ + MEMORY_BARRIER; + extern cpuatomic_t _preempt_forbid_cnt; + /* No need to protect against interrupts here. */ + ASSERT(_preempt_forbid_cnt != 0); + --_preempt_forbid_cnt; + + /* + * This ensures _preempt_forbid_cnt is flushed to memory immediately + * so the preemption interrupt sees the correct value. + */ + MEMORY_BARRIER; + + #endif +} + +/** + * \return true if preemptive task switching is allowed. + * \note This accessor is needed because _preempt_forbid_cnt + * must be absoultely private. + */ +INLINE bool proc_allowed(void) +{ + #if CONFIG_KERN_PREEMPT + extern cpuatomic_t _preempt_forbid_cnt; + return (_preempt_forbid_cnt == 0); + #else + return true; + #endif +} + /** * Execute a block of \a CODE atomically with respect to task scheduling. */ @@ -83,4 +195,59 @@ void proc_rename(struct Process *proc, const char* name); proc_permit(); \ } while(0) +#ifndef CONFIG_KERN_MINSTACKSIZE + + #if (ARCH & ARCH_EMUL) + /* We need a large stack because system libraries are bloated */ + #define CONFIG_KERN_MINSTACKSIZE 65536 + #else + /** + * Default stack size for each thread, in bytes. + * + * The goal here is to allow a minimal task to save all of its + * registers twice, plus push a maximum of 32 variables on the + * stack. + * + * The actual size computed by the default formula is: + * AVR: 102 + * i386: 156 + * ARM: 164 + * x86_64: 184 + * + * Note that on most 16bit architectures, interrupts will also + * run on the stack of the currently running process. Nested + * interrupts will greatly increases the amount of stack space + * required per process. Use irqmanager to minimize stack + * usage. + */ + #define CONFIG_KERN_MINSTACKSIZE \ + (CPU_SAVED_REGS_CNT * 2 * sizeof(cpustack_t) \ + + 32 * sizeof(int)) + #endif +#endif + +/* Memory fill codes to help debugging */ +#if CONFIG_KERN_MONITOR + #include + #if (SIZEOF_CPUSTACK_T == 1) + /* 8bit cpustack_t */ + #define CONFIG_KERN_STACKFILLCODE 0xA5 + #define CONFIG_KERN_MEMFILLCODE 0xDB + #elif (SIZEOF_CPUSTACK_T == 2) + /* 16bit cpustack_t */ + #define CONFIG_KERN_STACKFILLCODE 0xA5A5 + #define CONFIG_KERN_MEMFILLCODE 0xDBDB + #elif (SIZEOF_CPUSTACK_T == 4) + /* 32bit cpustack_t */ + #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5UL + #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBUL + #elif (SIZEOF_CPUSTACK_T == 8) + /* 64bit cpustack_t */ + #define CONFIG_KERN_STACKFILLCODE 0xA5A5A5A5A5A5A5A5ULL + #define CONFIG_KERN_MEMFILLCODE 0xDBDBDBDBDBDBDBDBULL + #else + #error No cpustack_t size supported! + #endif +#endif + #endif /* KERN_PROC_H */