X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;ds=inline;f=bertos%2Fkern%2Fpreempt.c;h=1cb5e5a07f88d61f1989888855d3d8425eadb64c;hb=7b47f57c871da92ea81d28a18f91fae04f3072b3;hp=d3d9badf98f18f688c77f873c91f81aed0ba2818;hpb=ae8a609173e4490fd03875f96e388038053b9288;p=bertos.git diff --git a/bertos/kern/preempt.c b/bertos/kern/preempt.c index d3d9badf..1cb5e5a0 100644 --- a/bertos/kern/preempt.c +++ b/bertos/kern/preempt.c @@ -86,14 +86,11 @@ #include "cfg/cfg_proc.h" -#if CONFIG_KERN_PREEMPT - #include "proc_p.h" #include "proc.h" #include #include -#include // idle_proc #include // CPU_IDLE #include // IRQ_DISABLE()... #include @@ -105,14 +102,6 @@ CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN); MOD_DEFINE(preempt) -/** - * CPU dependent context switching routines. - * - * Saving and restoring the context on the stack is done by a CPU-dependent - * support routine which usually needs to be written in assembly. - */ -EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp); - /* Global preemption nesting counter */ cpu_atomic_t preempt_count; @@ -123,44 +112,25 @@ cpu_atomic_t preempt_count; */ int _proc_quantum; +/** + * Define function prototypes exported outside. + * + * Required to silent gcc "no previous prototype" warnings. + */ +void preempt_yield(void); +int preempt_needPreempt(void); +void preempt_preempt(void); +void preempt_switch(void); +void preempt_wakeup(Process *proc); +void preempt_init(void); + /** * Call the scheduler and eventually replace the current running process. */ static void preempt_schedule(void) { - Process *old_process = current_process; - - IRQ_ASSERT_DISABLED(); - - /* Poll on the ready queue for the first ready process */ - LIST_ASSERT_VALID(&proc_ready_list); - current_process = (Process *)list_remHead(&proc_ready_list); - if (UNLIKELY(!current_process)) - current_process = idle_proc; _proc_quantum = CONFIG_KERN_QUANTUM; - /* - * Optimization: don't switch contexts when the active process has not - * changed. - */ - if (LIKELY(old_process != current_process)) - { - cpu_stack_t *dummy; - - /* - * Save context of old process and switch to new process. If - * there is no old process, we save the old stack pointer into - * a dummy variable that we ignore. In fact, this happens only - * when the old process has just exited. - * - * \todo Instead of physically clearing the process at exit - * time, a zombie list should be created. - */ - asm_switch_context(¤t_process->stack, - old_process ? &old_process->stack : &dummy); - } - - /* This RET resumes the execution on the new process */ - LOG_INFO("resuming %p:%s\n", current_process, proc_currentName()); + proc_schedule(); } /** @@ -187,8 +157,7 @@ void preempt_preempt(void) /* Perform the kernel preemption */ LOG_INFO("preempting %p:%s\n", current_process, proc_currentName()); /* We are inside a IRQ context, so ATOMIC is not needed here */ - if (current_process != idle_proc) - SCHED_ENQUEUE(current_process); + SCHED_ENQUEUE(current_process); preempt_schedule(); } @@ -203,11 +172,32 @@ void preempt_preempt(void) void preempt_switch(void) { ASSERT(proc_preemptAllowed()); - IRQ_ASSERT_ENABLED(); ATOMIC(preempt_schedule()); } +/** + * Immediately wakeup a process, dispatching it to the CPU. + */ +void preempt_wakeup(Process *proc) +{ + ASSERT(proc_preemptAllowed()); + ASSERT(current_process); + IRQ_ASSERT_DISABLED(); + + if (prio_proc(proc) >= prio_curr()) + { + Process *old_process = current_process; + + SCHED_ENQUEUE(current_process); + _proc_quantum = CONFIG_KERN_QUANTUM; + current_process = proc; + proc_switchTo(current_process, old_process); + } + else + SCHED_ENQUEUE_HEAD(proc); +} + /** * Voluntarily release the CPU. */ @@ -230,8 +220,5 @@ void preempt_yield(void) void preempt_init(void) { - idle_init(); MOD_INIT(preempt); } - -#endif // CONFIG_KERN_PREEMPT