X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fkern%2Fproc.c;h=c422e729c6912963f51bfcd89f75a7cee269e290;hb=ae8a609173e4490fd03875f96e388038053b9288;hp=1b9042a707a7d86fc55111ce0ade7a9f3af079ed;hpb=f364e0781c076e8f61c769fbaf29b968a03db2c0;p=bertos.git diff --git a/bertos/kern/proc.c b/bertos/kern/proc.c index 1b9042a7..c422e729 100644 --- a/bertos/kern/proc.c +++ b/bertos/kern/proc.c @@ -32,7 +32,6 @@ * * \brief Simple cooperative multitasking scheduler. * - * \version $Id$ * \author Bernie Innocenti * \author Stefano Fedrigo */ @@ -40,8 +39,11 @@ #include "proc_p.h" #include "proc.h" -#include "cfg/cfg_arch.h" // ARCH_EMUL #include "cfg/cfg_proc.h" +#define LOG_LEVEL KERN_LOG_LEVEL +#define LOG_FORMAT KERN_LOG_FORMAT +#include + #include "cfg/cfg_monitor.h" #include // ROUND_UP2 #include @@ -58,40 +60,45 @@ #include /* memset() */ +#define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t)) + /* * The scheduer tracks ready processes by enqueuing them in the * ready list. * * \note Access to the list must occur while interrupts are disabled. */ -REGISTER List ProcReadyList; +REGISTER List proc_ready_list; /* * Holds a pointer to the TCB of the currently running process. * * \note User applications should use proc_current() to retrieve this value. */ -REGISTER Process *CurrentProcess; +REGISTER Process *current_process; + +/** The main process (the one that executes main()). */ +static struct Process main_process; + +#if CONFIG_KERN_HEAP + +/** + * Local heap dedicated to allocate the memory used by the processes. + */ +static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE); +static Heap proc_heap; -#if (ARCH & ARCH_EMUL) /* - * In some hosted environments, we must emulate the stack on the real - * process stack to satisfy consistency checks in system libraries and - * because some ABIs place trampolines on the stack. + * Keep track of zombie processes (processes that are exiting and need to + * release some resources). * - * Access to this list must be protected by PROC_ATOMIC(). + * \note Access to the list must occur while kernel preemption is disabled. */ -List StackFreeList; +static List zombie_list; -#define NPROC 10 -cpu_stack_t proc_stacks[NPROC][(64 * 1024) / sizeof(cpu_stack_t)]; -#endif +#endif /* CONFIG_KERN_HEAP */ -/** The main process (the one that executes main()). */ -struct Process MainProcess; - - -static void proc_init_struct(Process *proc) +static void proc_initStruct(Process *proc) { /* Avoid warning for unused argument. */ (void)proc; @@ -108,84 +115,141 @@ static void proc_init_struct(Process *proc) #if CONFIG_KERN_PRI proc->link.pri = 0; #endif - } MOD_DEFINE(proc); void proc_init(void) { - LIST_INIT(&ProcReadyList); + LIST_INIT(&proc_ready_list); -#if ARCH & ARCH_EMUL - LIST_INIT(&StackFreeList); - for (int i = 0; i < NPROC; i++) - ADDTAIL(&StackFreeList, (Node *)proc_stacks[i]); +#if CONFIG_KERN_HEAP + LIST_INIT(&zombie_list); + heap_init(&proc_heap, heap_buf, sizeof(heap_buf)); #endif - /* * We "promote" the current context into a real process. The only thing we have * to do is create a PCB and make it current. We don't need to setup the stack * pointer because it will be written the first time we switch to another process. */ - proc_init_struct(&MainProcess); - CurrentProcess = &MainProcess; + proc_initStruct(&main_process); + current_process = &main_process; #if CONFIG_KERN_MONITOR monitor_init(); - monitor_add(CurrentProcess, "main"); + monitor_add(current_process, "main"); #endif + proc_schedInit(); + MOD_INIT(proc); +} + + +#if CONFIG_KERN_HEAP + +/** + * Free all the resources of all zombie processes previously added to the zombie + * list. + */ +static void proc_freeZombies(void) +{ + Process *proc; + + while (1) + { + PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list)); + if (proc == NULL) + return; + + if (proc->flags & PF_FREESTACK) + { + PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base, + proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t))); + } + } +} + +/** + * Enqueue a process in the zombie list. + */ +static void proc_addZombie(Process *proc) +{ + Node *node; #if CONFIG_KERN_PREEMPT - preempt_init(); + ASSERT(!proc_preemptAllowed()); #endif - MOD_INIT(proc); +#if CONFIG_KERN_PRI + node = &(proc)->link.link; +#else + node = &(proc)->link; +#endif + LIST_ASSERT_VALID(&zombie_list); + ADDTAIL(&zombie_list, node); } +#endif /* CONFIG_KERN_HEAP */ + /** * Create a new process, starting at the provided entry point. * + * + * \note The function + * \code + * proc_new(entry, data, stacksize, stack) + * \endcode + * is a more convenient way to create a process, as you don't have to specify + * the name. + * * \return Process structure of new created process * if successful, NULL otherwise. */ struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base) { Process *proc; - const size_t PROC_SIZE_WORDS = ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t); + LOG_INFO("name=%s", name); #if CONFIG_KERN_HEAP bool free_stack = false; -#endif - TRACEMSG("name=%s", name); -#if (ARCH & ARCH_EMUL) - /* Ignore stack provided by caller and use the large enough default instead. */ - PROC_ATOMIC(stack_base = (cpu_stack_t *)list_remHead(&StackFreeList)); - ASSERT(stack_base); + /* + * Free up resources of a zombie process. + * + * We're implementing a kind of lazy garbage collector here for + * efficiency reasons: we can avoid to introduce overhead into another + * kernel task dedicated to free up resources (e.g., idle) and we're + * not introducing any overhead into the scheduler after a context + * switch (that would be *very* bad, because the scheduler runs with + * IRQ disabled). + * + * In this way we are able to release the memory of the zombie tasks + * without disabling IRQs and without introducing any significant + * overhead in any other kernel task. + */ + proc_freeZombies(); - stack_size = CONFIG_KERN_MINSTACKSIZE; -#elif CONFIG_KERN_HEAP /* Did the caller provide a stack for us? */ if (!stack_base) { /* Did the caller specify the desired stack size? */ if (!stack_size) - stack_size = CONFIG_KERN_MINSTACKSIZE; + stack_size = KERN_MINSTACKSIZE; /* Allocate stack dinamically */ - if (!(stack_base = heap_alloc(stack_size))) + PROC_ATOMIC(stack_base = + (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size)); + if (stack_base == NULL) return NULL; free_stack = true; } -#else // !ARCH_EMUL && !CONFIG_KERN_HEAP +#else // CONFIG_KERN_HEAP /* Stack must have been provided by the user */ ASSERT_VALID_PTR(stack_base); ASSERT(stack_size); -#endif // !ARCH_EMUL && !CONFIG_KERN_HEAP +#endif // CONFIG_KERN_HEAP #if CONFIG_KERN_MONITOR /* @@ -203,7 +267,7 @@ struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry) proc = (Process *)stack_base; proc->stack = stack_base + PROC_SIZE_WORDS; // On some architecture stack should be aligned, so we do it. - proc->stack = (void *)proc->stack + (sizeof(cpu_aligned_stack_t) - ((long)proc->stack % sizeof(cpu_aligned_stack_t))); + proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t)))); if (CPU_SP_ON_EMPTY_SLOT) proc->stack++; } @@ -211,16 +275,18 @@ struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry) { proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS); // On some architecture stack should be aligned, so we do it. - proc->stack = (void *)proc - ((long)proc % sizeof(cpu_aligned_stack_t)); + proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t))); if (CPU_SP_ON_EMPTY_SLOT) proc->stack--; } + /* Ensure stack is aligned */ + ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0); - stack_size = stack_size - PROC_SIZE_WORDS; - proc_init_struct(proc); + stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t); + proc_initStruct(proc); proc->user_data = data; -#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR | (ARCH & ARCH_EMUL) +#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR proc->stack_base = stack_base; proc->stack_size = stack_size; #if CONFIG_KERN_HEAP @@ -228,32 +294,12 @@ struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry) proc->flags |= PF_FREESTACK; #endif #endif + proc->user_entry = entry; + CPU_CREATE_NEW_STACK(proc->stack); - #if CONFIG_KERN_PREEMPT - - getcontext(&proc->context); - proc->context.uc_stack.ss_sp = proc->stack; - proc->context.uc_stack.ss_size = stack_size - 1; - proc->context.uc_link = NULL; - makecontext(&proc->context, (void (*)(void))proc_entry, 1, entry); - - #else // !CONFIG_KERN_PREEMPT - { - size_t i; - - /* Initialize process stack frame */ - CPU_PUSH_CALL_FRAME(proc->stack, proc_exit); - CPU_PUSH_CALL_FRAME(proc->stack, entry); - - /* Push a clean set of CPU registers for asm_switch_context() */ - for (i = 0; i < CPU_SAVED_REGS_CNT; i++) - CPU_PUSH_WORD(proc->stack, CPU_REG_INIT_VALUE(i)); - } - #endif // CONFIG_KERN_PREEMPT - - #if CONFIG_KERN_MONITOR - monitor_add(proc, name); - #endif +#if CONFIG_KERN_MONITOR + monitor_add(proc, name); +#endif /* Add to ready list */ ATOMIC(SCHED_ENQUEUE(proc)); @@ -268,12 +314,12 @@ struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry) */ const char *proc_name(struct Process *proc) { - #if CONFIG_KERN_MONITOR - return proc ? proc->monitor.name : ""; - #else - (void)proc; - return "---"; - #endif +#if CONFIG_KERN_MONITOR + return proc ? proc->monitor.name : ""; +#else + (void)proc; + return "---"; +#endif } /// Return the name of the currently running process @@ -315,58 +361,65 @@ void proc_rename(struct Process *proc, const char *name) */ void proc_setPri(struct Process *proc, int pri) { - if (proc->link.pri == pri) - return; + if (proc->link.pri == pri) + return; - proc->link.pri = pri; + proc->link.pri = pri; - if (proc != CurrentProcess) - { - proc_forbid(); - ATOMIC(sched_reenqueue(proc)); - proc_permit(); - } + if (proc != current_process) + ATOMIC(sched_reenqueue(proc)); } #endif // CONFIG_KERN_PRI +INLINE void proc_run(void) +{ + void (*entry)(void) = current_process->user_entry; + + LOG_INFO("New process starting at %p", entry); + entry(); +} + +/** + * Entry point for all the processes. + */ +void proc_entry(void) +{ + /* + * Return from a context switch assumes interrupts are disabled, so + * we need to explicitly re-enable them as soon as possible. + */ + IRQ_ENABLE; + /* Call the actual process's entry point */ + proc_run(); + proc_exit(); +} + /** * Terminate the current process */ void proc_exit(void) { - TRACEMSG("%p:%s", CurrentProcess, proc_currentName()); + LOG_INFO("%p:%s", current_process, proc_currentName()); #if CONFIG_KERN_MONITOR - monitor_remove(CurrentProcess); + monitor_remove(current_process); #endif + proc_forbid(); #if CONFIG_KERN_HEAP /* - * The following code is BROKEN. - * We are freeing our own stack before entering proc_schedule() - * BAJO: A correct fix would be to rearrange the scheduler with - * an additional parameter which frees the old stack/process - * after a context switch. + * Set the task as zombie, its resources will be freed in proc_new() in + * a lazy way, when another process will be created. */ - if (CurrentProcess->flags & PF_FREESTACK) - heap_free(CurrentProcess->stack_base, CurrentProcess->stack_size); - heap_free(CurrentProcess); + proc_addZombie(current_process); #endif + current_process = NULL; + proc_permit(); -#if (ARCH & ARCH_EMUL) - /* Reinsert process stack in free list */ - PROC_ATOMIC(ADDHEAD(&StackFreeList, (Node *)CurrentProcess->stack_base)); - - /* - * NOTE: At this point the first two words of what used - * to be our stack contain a list node. From now on, we - * rely on the compiler not reading/writing the stack. - */ -#endif /* ARCH_EMUL */ - - CurrentProcess = NULL; proc_switch(); - /* not reached */ + + /* never reached */ + ASSERT(0); } @@ -375,5 +428,5 @@ void proc_exit(void) */ iptr_t proc_currentUserData(void) { - return CurrentProcess->user_data; + return current_process->user_data; }