From: arighi Date: Fri, 16 Apr 2010 15:45:54 +0000 (+0000) Subject: proc: change proc_yield() behaviour. X-Git-Tag: 2.5.0~466 X-Git-Url: https://codewiz.org/gitweb?a=commitdiff_plain;h=6f94c02fb352edc96748561f757c35c61bbd6ed1;p=bertos.git proc: change proc_yield() behaviour. The current proc_yield() behaviour is to add the current process to the ready list and call the scheduler. However, if a process is running at the highest priority and calls proc_yield() it is added into the ready list, but immediately re-executed by the scheduler starving all the other processes. The behaviour of proc_yield() has been changed so that it first picks the next process from the ready list and then enqueue the current running process into the ready list. In this way the semantic of proc_yield() is exactly the same as timer_delay(), or any other kind of delay that requires a context switch. git-svn-id: https://src.develer.com/svnoss/bertos/trunk@3442 38d2e660-2303-0410-9eaa-f027e97ec537 --- diff --git a/bertos/kern/coop.c b/bertos/kern/coop.c index 542ed678..c5cab0ed 100644 --- a/bertos/kern/coop.c +++ b/bertos/kern/coop.c @@ -59,6 +59,15 @@ void coop_yield(void); void coop_switch(void); void coop_wakeup(Process *proc); +static void coop_switchTo(Process *proc) +{ + Process *old_process = current_process; + + SCHED_ENQUEUE(current_process); + current_process = proc; + proc_switchTo(current_process, old_process); +} + /** * Give the control of the CPU to another process. * @@ -82,13 +91,7 @@ void coop_wakeup(Process *proc) IRQ_ASSERT_DISABLED(); if (prio_proc(proc) >= prio_curr()) - { - Process *old_process = current_process; - - SCHED_ENQUEUE(current_process); - current_process = proc; - proc_switchTo(current_process, old_process); - } + coop_switchTo(proc); else SCHED_ENQUEUE_HEAD(proc); } @@ -98,6 +101,11 @@ void coop_wakeup(Process *proc) */ void coop_yield(void) { - ATOMIC(SCHED_ENQUEUE(current_process)); - coop_switch(); + Process *proc; + + IRQ_DISABLE; + proc = (struct Process *)list_remHead(&proc_ready_list); + if (proc) + coop_switchTo(proc); + IRQ_ENABLE; } diff --git a/bertos/kern/preempt.c b/bertos/kern/preempt.c index 4b5e66c3..bdb6d3c9 100644 --- a/bertos/kern/preempt.c +++ b/bertos/kern/preempt.c @@ -124,6 +124,16 @@ void preempt_switch(void); void preempt_wakeup(Process *proc); void preempt_init(void); +static void preempt_switchTo(Process *proc) +{ + Process *old_process = current_process; + + SCHED_ENQUEUE(current_process); + _proc_quantum = CONFIG_KERN_QUANTUM; + current_process = proc; + proc_switchTo(current_process, old_process); +} + /** * Call the scheduler and eventually replace the current running process. */ @@ -188,14 +198,7 @@ void preempt_wakeup(Process *proc) IRQ_ASSERT_DISABLED(); if (prio_proc(proc) >= prio_curr()) - { - Process *old_process = current_process; - - SCHED_ENQUEUE(current_process); - _proc_quantum = CONFIG_KERN_QUANTUM; - current_process = proc; - proc_switchTo(current_process, old_process); - } + preempt_switchTo(proc); else SCHED_ENQUEUE_HEAD(proc); } @@ -205,6 +208,8 @@ void preempt_wakeup(Process *proc) */ void preempt_yield(void) { + Process *proc; + /* * Voluntary preemption while preemption is disabled is considered * illegal, as not very useful in practice. @@ -214,10 +219,11 @@ void preempt_yield(void) ASSERT(proc_preemptAllowed()); IRQ_ASSERT_ENABLED(); - ATOMIC( - SCHED_ENQUEUE(current_process); - preempt_schedule(); - ); + IRQ_DISABLE; + proc = (struct Process *)list_remHead(&proc_ready_list); + if (proc) + preempt_switchTo(proc); + IRQ_ENABLE; } void preempt_init(void)