summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
319029b)
The current proc_yield() behaviour is to add the current process to the
ready list and call the scheduler. However, if a process is running at
the highest priority and calls proc_yield() it is added into the ready
list, but immediately re-executed by the scheduler starving all the
other processes.
The behaviour of proc_yield() has been changed so that it first picks
the next process from the ready list and then enqueue the current
running process into the ready list.
In this way the semantic of proc_yield() is exactly the same as
timer_delay(), or any other kind of delay that requires a context
switch.
git-svn-id: https://src.develer.com/svnoss/bertos/trunk@3442
38d2e660-2303-0410-9eaa-
f027e97ec537
void coop_switch(void);
void coop_wakeup(Process *proc);
void coop_switch(void);
void coop_wakeup(Process *proc);
+static void coop_switchTo(Process *proc)
+{
+ Process *old_process = current_process;
+
+ SCHED_ENQUEUE(current_process);
+ current_process = proc;
+ proc_switchTo(current_process, old_process);
+}
+
/**
* Give the control of the CPU to another process.
*
/**
* Give the control of the CPU to another process.
*
IRQ_ASSERT_DISABLED();
if (prio_proc(proc) >= prio_curr())
IRQ_ASSERT_DISABLED();
if (prio_proc(proc) >= prio_curr())
- {
- Process *old_process = current_process;
-
- SCHED_ENQUEUE(current_process);
- current_process = proc;
- proc_switchTo(current_process, old_process);
- }
else
SCHED_ENQUEUE_HEAD(proc);
}
else
SCHED_ENQUEUE_HEAD(proc);
}
*/
void coop_yield(void)
{
*/
void coop_yield(void)
{
- ATOMIC(SCHED_ENQUEUE(current_process));
- coop_switch();
+ Process *proc;
+
+ IRQ_DISABLE;
+ proc = (struct Process *)list_remHead(&proc_ready_list);
+ if (proc)
+ coop_switchTo(proc);
+ IRQ_ENABLE;
void preempt_wakeup(Process *proc);
void preempt_init(void);
void preempt_wakeup(Process *proc);
void preempt_init(void);
+static void preempt_switchTo(Process *proc)
+{
+ Process *old_process = current_process;
+
+ SCHED_ENQUEUE(current_process);
+ _proc_quantum = CONFIG_KERN_QUANTUM;
+ current_process = proc;
+ proc_switchTo(current_process, old_process);
+}
+
/**
* Call the scheduler and eventually replace the current running process.
*/
/**
* Call the scheduler and eventually replace the current running process.
*/
IRQ_ASSERT_DISABLED();
if (prio_proc(proc) >= prio_curr())
IRQ_ASSERT_DISABLED();
if (prio_proc(proc) >= prio_curr())
- {
- Process *old_process = current_process;
-
- SCHED_ENQUEUE(current_process);
- _proc_quantum = CONFIG_KERN_QUANTUM;
- current_process = proc;
- proc_switchTo(current_process, old_process);
- }
+ preempt_switchTo(proc);
else
SCHED_ENQUEUE_HEAD(proc);
}
else
SCHED_ENQUEUE_HEAD(proc);
}
*/
void preempt_yield(void)
{
*/
void preempt_yield(void)
{
/*
* Voluntary preemption while preemption is disabled is considered
* illegal, as not very useful in practice.
/*
* Voluntary preemption while preemption is disabled is considered
* illegal, as not very useful in practice.
ASSERT(proc_preemptAllowed());
IRQ_ASSERT_ENABLED();
ASSERT(proc_preemptAllowed());
IRQ_ASSERT_ENABLED();
- ATOMIC(
- SCHED_ENQUEUE(current_process);
- preempt_schedule();
- );
+ IRQ_DISABLE;
+ proc = (struct Process *)list_remHead(&proc_ready_list);
+ if (proc)
+ preempt_switchTo(proc);
+ IRQ_ENABLE;
}
void preempt_init(void)
}
void preempt_init(void)