Merge from trunk.
[bertos.git] / bertos / kern / preempt.c
index 3037c15cc47f4258fb342e896b15c3937d054cbb..1cb5e5a07f88d61f1989888855d3d8425eadb64c 100644 (file)
@@ -39,7 +39,7 @@
  * the time sharing interval.
  *
  * When the quantum expires the handler proc_needPreempt() checks if the
- * preemption is enabled and in this case proc_schedule() is called, that
+ * preemption is enabled and in this case preempt_schedule() is called, that
  * possibly replaces the current running thread with a different one.
  *
  * The preemption can be disabled or enabled via proc_forbid() and
 
 #include "cfg/cfg_proc.h"
 
-#if CONFIG_KERN_PREEMPT
-
 #include "proc_p.h"
 #include "proc.h"
 
 #include <kern/irq.h>
 #include <kern/monitor.h>
-#include <kern/idle.h> // idle_proc
 #include <cpu/frame.h> // CPU_IDLE
 #include <cpu/irq.h>   // IRQ_DISABLE()...
 #include <cfg/log.h>
@@ -105,14 +102,6 @@ CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN);
 
 MOD_DEFINE(preempt)
 
-/**
- * CPU dependent context switching routines.
- *
- * Saving and restoring the context on the stack is done by a CPU-dependent
- * support routine which usually needs to be written in assembly.
- */
-EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp);
-
 /* Global preemption nesting counter */
 cpu_atomic_t preempt_count;
 
@@ -123,50 +112,31 @@ cpu_atomic_t preempt_count;
  */
 int _proc_quantum;
 
+/**
+ * Define function prototypes exported outside.
+ *
+ * Required to silent gcc "no previous prototype" warnings.
+ */
+void preempt_yield(void);
+int preempt_needPreempt(void);
+void preempt_preempt(void);
+void preempt_switch(void);
+void preempt_wakeup(Process *proc);
+void preempt_init(void);
+
 /**
  * Call the scheduler and eventually replace the current running process.
  */
-static void proc_schedule(void)
+static void preempt_schedule(void)
 {
-       Process *old_process = current_process;
-
-       IRQ_ASSERT_DISABLED();
-
-       /* Poll on the ready queue for the first ready process */
-       LIST_ASSERT_VALID(&proc_ready_list);
-       current_process = (Process *)list_remHead(&proc_ready_list);
-       if (UNLIKELY(!current_process))
-               current_process = idle_proc;
        _proc_quantum = CONFIG_KERN_QUANTUM;
-       /*
-        * Optimization: don't switch contexts when the active process has not
-        * changed.
-        */
-       if (LIKELY(old_process != current_process))
-       {
-               cpu_stack_t *dummy;
-
-               /*
-                * Save context of old process and switch to new process. If
-                * there is no old process, we save the old stack pointer into
-                * a dummy variable that we ignore. In fact, this happens only
-                * when the old process has just exited.
-                *
-                * \todo Instead of physically clearing the process at exit
-                * time, a zombie list should be created.
-                */
-               asm_switch_context(&current_process->stack,
-                               old_process ? &old_process->stack : &dummy);
-       }
-
-       /* This RET resumes the execution on the new process */
-       LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());
+       proc_schedule();
 }
 
 /**
  * Check if we need to schedule another task
  */
-int proc_needPreempt(void)
+int preempt_needPreempt(void)
 {
        if (UNLIKELY(current_process == NULL))
                return 0;
@@ -179,7 +149,7 @@ int proc_needPreempt(void)
 /**
  * Preempt the current task.
  */
-void proc_preempt(void)
+void preempt_preempt(void)
 {
        IRQ_ASSERT_DISABLED();
        ASSERT(current_process);
@@ -187,9 +157,8 @@ void proc_preempt(void)
        /* Perform the kernel preemption */
        LOG_INFO("preempting %p:%s\n", current_process, proc_currentName());
        /* We are inside a IRQ context, so ATOMIC is not needed here */
-       if (current_process != idle_proc)
-               SCHED_ENQUEUE(current_process);
-       proc_schedule();
+       SCHED_ENQUEUE(current_process);
+       preempt_schedule();
 }
 
 /**
@@ -200,18 +169,39 @@ void proc_preempt(void)
  * \warning This should be considered an internal kernel function, even if it
  * is allowed, usage from application code is strongly discouraged.
  */
-void proc_switch(void)
+void preempt_switch(void)
 {
        ASSERT(proc_preemptAllowed());
-       IRQ_ASSERT_ENABLED();
 
-       ATOMIC(proc_schedule());
+       ATOMIC(preempt_schedule());
+}
+
+/**
+ * Immediately wakeup a process, dispatching it to the CPU.
+ */
+void preempt_wakeup(Process *proc)
+{
+       ASSERT(proc_preemptAllowed());
+       ASSERT(current_process);
+       IRQ_ASSERT_DISABLED();
+
+       if (prio_proc(proc) >= prio_curr())
+       {
+               Process *old_process = current_process;
+
+               SCHED_ENQUEUE(current_process);
+               _proc_quantum = CONFIG_KERN_QUANTUM;
+               current_process = proc;
+               proc_switchTo(current_process, old_process);
+       }
+       else
+               SCHED_ENQUEUE_HEAD(proc);
 }
 
 /**
  * Voluntarily release the CPU.
  */
-void proc_yield(void)
+void preempt_yield(void)
 {
        /*
         * Voluntary preemption while preemption is disabled is considered
@@ -224,14 +214,11 @@ void proc_yield(void)
 
        ATOMIC(
                SCHED_ENQUEUE(current_process);
-               proc_schedule();
+               preempt_schedule();
        );
 }
 
 void preempt_init(void)
 {
-       idle_init();
        MOD_INIT(preempt);
 }
-
-#endif // CONFIG_KERN_PREEMPT