X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fkern%2Fpreempt.c;h=f77dd85eff1c632bbd73adf1053b350140cfc1ca;hb=938512200c9fbd8e08c88dbdddbdc1f030d3b663;hp=3037c15cc47f4258fb342e896b15c3937d054cbb;hpb=9d3c2ea8a3b385401bc272b18110620e03da8ba2;p=bertos.git diff --git a/bertos/kern/preempt.c b/bertos/kern/preempt.c index 3037c15c..f77dd85e 100644 --- a/bertos/kern/preempt.c +++ b/bertos/kern/preempt.c @@ -39,7 +39,7 @@ * the time sharing interval. * * When the quantum expires the handler proc_needPreempt() checks if the - * preemption is enabled and in this case proc_schedule() is called, that + * preemption is enabled and in this case preempt_schedule() is called, that * possibly replaces the current running thread with a different one. * * The preemption can be disabled or enabled via proc_forbid() and @@ -86,14 +86,11 @@ #include "cfg/cfg_proc.h" -#if CONFIG_KERN_PREEMPT - #include "proc_p.h" #include "proc.h" #include #include -#include // idle_proc #include // CPU_IDLE #include // IRQ_DISABLE()... #include @@ -105,14 +102,6 @@ CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN); MOD_DEFINE(preempt) -/** - * CPU dependent context switching routines. - * - * Saving and restoring the context on the stack is done by a CPU-dependent - * support routine which usually needs to be written in assembly. - */ -EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp); - /* Global preemption nesting counter */ cpu_atomic_t preempt_count; @@ -123,50 +112,30 @@ cpu_atomic_t preempt_count; */ int _proc_quantum; +/** + * Define function prototypes exported outside. + * + * Required to silent gcc "no previous prototype" warnings. + */ +void preempt_yield(void); +int preempt_needPreempt(void); +void preempt_preempt(void); +void preempt_switch(void); +void preempt_init(void); + /** * Call the scheduler and eventually replace the current running process. */ -static void proc_schedule(void) +static void preempt_schedule(void) { - Process *old_process = current_process; - - IRQ_ASSERT_DISABLED(); - - /* Poll on the ready queue for the first ready process */ - LIST_ASSERT_VALID(&proc_ready_list); - current_process = (Process *)list_remHead(&proc_ready_list); - if (UNLIKELY(!current_process)) - current_process = idle_proc; _proc_quantum = CONFIG_KERN_QUANTUM; - /* - * Optimization: don't switch contexts when the active process has not - * changed. - */ - if (LIKELY(old_process != current_process)) - { - cpu_stack_t *dummy; - - /* - * Save context of old process and switch to new process. If - * there is no old process, we save the old stack pointer into - * a dummy variable that we ignore. In fact, this happens only - * when the old process has just exited. - * - * \todo Instead of physically clearing the process at exit - * time, a zombie list should be created. - */ - asm_switch_context(¤t_process->stack, - old_process ? &old_process->stack : &dummy); - } - - /* This RET resumes the execution on the new process */ - LOG_INFO("resuming %p:%s\n", current_process, proc_currentName()); + proc_schedule(); } /** * Check if we need to schedule another task */ -int proc_needPreempt(void) +int preempt_needPreempt(void) { if (UNLIKELY(current_process == NULL)) return 0; @@ -179,7 +148,7 @@ int proc_needPreempt(void) /** * Preempt the current task. */ -void proc_preempt(void) +void preempt_preempt(void) { IRQ_ASSERT_DISABLED(); ASSERT(current_process); @@ -187,9 +156,8 @@ void proc_preempt(void) /* Perform the kernel preemption */ LOG_INFO("preempting %p:%s\n", current_process, proc_currentName()); /* We are inside a IRQ context, so ATOMIC is not needed here */ - if (current_process != idle_proc) - SCHED_ENQUEUE(current_process); - proc_schedule(); + SCHED_ENQUEUE(current_process); + preempt_schedule(); } /** @@ -200,18 +168,18 @@ void proc_preempt(void) * \warning This should be considered an internal kernel function, even if it * is allowed, usage from application code is strongly discouraged. */ -void proc_switch(void) +void preempt_switch(void) { ASSERT(proc_preemptAllowed()); IRQ_ASSERT_ENABLED(); - ATOMIC(proc_schedule()); + ATOMIC(preempt_schedule()); } /** * Voluntarily release the CPU. */ -void proc_yield(void) +void preempt_yield(void) { /* * Voluntary preemption while preemption is disabled is considered @@ -224,14 +192,11 @@ void proc_yield(void) ATOMIC( SCHED_ENQUEUE(current_process); - proc_schedule(); + preempt_schedule(); ); } void preempt_init(void) { - idle_init(); MOD_INIT(preempt); } - -#endif // CONFIG_KERN_PREEMPT