* the time sharing interval.
*
* When the quantum expires the handler proc_needPreempt() checks if the
- * preemption is enabled and in this case proc_schedule() is called, that
+ * preemption is enabled and in this case preempt_schedule() is called, that
* possibly replaces the current running thread with a different one.
*
* The preemption can be disabled or enabled via proc_forbid() and
#include "cfg/cfg_proc.h"
-#if CONFIG_KERN_PREEMPT
-
#include "proc_p.h"
#include "proc.h"
#include <kern/irq.h>
#include <kern/monitor.h>
-#include <kern/idle.h> // idle_proc
#include <cpu/frame.h> // CPU_IDLE
#include <cpu/irq.h> // IRQ_DISABLE()...
#include <cfg/log.h>
MOD_DEFINE(preempt)
-/**
- * CPU dependent context switching routines.
- *
- * Saving and restoring the context on the stack is done by a CPU-dependent
- * support routine which usually needs to be written in assembly.
- */
-EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp);
-
/* Global preemption nesting counter */
cpu_atomic_t preempt_count;
*/
int _proc_quantum;
+/**
+ * Define function prototypes exported outside.
+ *
+ * Required to silent gcc "no previous prototype" warnings.
+ */
+void preempt_yield(void);
+int preempt_needPreempt(void);
+void preempt_preempt(void);
+void preempt_switch(void);
+void preempt_init(void);
+
/**
* Call the scheduler and eventually replace the current running process.
*/
-static void proc_schedule(void)
+static void preempt_schedule(void)
{
- Process *old_process = current_process;
-
- IRQ_ASSERT_DISABLED();
-
- /* Poll on the ready queue for the first ready process */
- LIST_ASSERT_VALID(&proc_ready_list);
- current_process = (Process *)list_remHead(&proc_ready_list);
- if (UNLIKELY(!current_process))
- current_process = idle_proc;
_proc_quantum = CONFIG_KERN_QUANTUM;
- /*
- * Optimization: don't switch contexts when the active process has not
- * changed.
- */
- if (LIKELY(old_process != current_process))
- {
- cpu_stack_t *dummy;
-
- /*
- * Save context of old process and switch to new process. If
- * there is no old process, we save the old stack pointer into
- * a dummy variable that we ignore. In fact, this happens only
- * when the old process has just exited.
- *
- * \todo Instead of physically clearing the process at exit
- * time, a zombie list should be created.
- */
- asm_switch_context(¤t_process->stack,
- old_process ? &old_process->stack : &dummy);
- }
-
- /* This RET resumes the execution on the new process */
- LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());
+ proc_schedule();
}
/**
* Check if we need to schedule another task
*/
-int proc_needPreempt(void)
+int preempt_needPreempt(void)
{
if (UNLIKELY(current_process == NULL))
return 0;
/**
* Preempt the current task.
*/
-void proc_preempt(void)
+void preempt_preempt(void)
{
IRQ_ASSERT_DISABLED();
ASSERT(current_process);
/* Perform the kernel preemption */
LOG_INFO("preempting %p:%s\n", current_process, proc_currentName());
/* We are inside a IRQ context, so ATOMIC is not needed here */
- if (current_process != idle_proc)
- SCHED_ENQUEUE(current_process);
- proc_schedule();
+ SCHED_ENQUEUE(current_process);
+ preempt_schedule();
}
/**
* \warning This should be considered an internal kernel function, even if it
* is allowed, usage from application code is strongly discouraged.
*/
-void proc_switch(void)
+void preempt_switch(void)
{
ASSERT(proc_preemptAllowed());
IRQ_ASSERT_ENABLED();
- ATOMIC(proc_schedule());
+ ATOMIC(preempt_schedule());
}
/**
* Voluntarily release the CPU.
*/
-void proc_yield(void)
+void preempt_yield(void)
{
/*
* Voluntary preemption while preemption is disabled is considered
ATOMIC(
SCHED_ENQUEUE(current_process);
- proc_schedule();
+ preempt_schedule();
);
}
void preempt_init(void)
{
- idle_init();
MOD_INIT(preempt);
}
-
-#endif // CONFIG_KERN_PREEMPT