Kernel preemptible is implemented using the PendSV IRQ. Inside the
SysTick handler when a process expires its time quantum a pending
(PendSV) call is triggered.
At the end of the SysTick handler the PendSV handler is called, using
the architecture's tail-chaining functionality (an ISR call without the
overhead of state saving and restoration between different IRQs), and we
perform the stack-switching.
Voluntary context switch is implemented as a soft-interrupt call
(SVCall), so any process is always suspended / resumed from an interrupt
context.
NOTE: interrupts must be disabled or enabled when resuming a process
context in function of the type of suspension. If a process was
suspended by a voluntary context switch IRQs must be disabled on resume
(voluntary context switch always happen with IRQs disabled). Instead, if
a process was suspended by the kernel preemption IRQs must be always
re-enabled, because the PendSV handler resumes directly the process
context.
If CONFIG_KERN_PREEMPT is not enabled the cooperative implementation
fallback to the default stack-switching mechanism performed directly in
thread-mode and implemented as a normal function call.
git-svn-id: https://src.develer.com/svnoss/bertos/trunk@3396
38d2e660-2303-0410-9eaa-
f027e97ec537
#include "irq_lm3s.h"
#include "timer_lm3s.h"
-ISR_PROTO_CONTEXT_SWITCH(timer_handler);
-
INLINE void timer_hw_setPeriod(unsigned long period)
{
ASSERT(period < (1 << 24));
#define SIZEOF_HPTIME_T 4
/* Timer ISR prototype */
-#define DEFINE_TIMER_ISR void timer_handler(void); \
- void timer_handler(void)
+ISR_PROTO_CONTEXT_SWITCH(timer_handler);
+#define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(timer_handler)
INLINE void timer_hw_irq(void)
{
*/
#include <cfg/compiler.h>
+#include <cfg/cfg_proc.h> /* CONFIG_KERN_PREEMPT */
+#include <kern/proc_p.h>
#include <cfg/debug.h>
#include <cpu/attr.h> /* PAUSE */
+#include <cpu/irq.h> /* IRQ_DISABLE */
+#include <cpu/types.h>
#include "drv/irq_lm3s.h"
#include "drv/clock_lm3s.h"
#include "io/lm3s.h"
extern void __init2(void);
+#if CONFIG_KERN_PREEMPT
+/*
+ * Voluntary context switch handler.
+ */
+static void NAKED svcall_handler(void)
+{
+ asm volatile (
+ /* Save context */
+ "mrs r3, basepri\n\t"
+ "mrs ip, psp\n\t"
+ "stmdb ip!, {r3-r11, lr}\n\t"
+ /* Stack switch */
+ "str ip, [r1]\n\t"
+ "ldr ip, [r0]\n\t"
+ /* Restore context */
+ "ldmia ip!, {r3-r11, lr}\n\t"
+ "msr psp, ip\n\t"
+ "msr basepri, r3\n\t"
+ "bx lr" : : : "memory");
+}
+
+/*
+ * Preemptible context switch handler.
+ */
+static void NAKED pendsv_handler(void)
+{
+ register cpu_stack_t *stack asm("ip");
+
+ asm volatile (
+ "mrs r3, basepri\n\t"
+ "mov %0, %2\n\t"
+ "msr basepri, %0\n\t"
+ "mrs %0, psp\n\t"
+ "stmdb %0!, {r3-r11, lr}\n\t"
+ : "=r"(stack)
+ : "r"(stack), "i"(IRQ_PRIO_DISABLED)
+ : "r3", "memory");
+ proc_current()->stack = stack;
+ proc_preempt();
+ stack = proc_current()->stack;
+ asm volatile (
+ "ldmia %0!, {r3-r11, lr}\n\t"
+ "msr psp, %0\n\t"
+ "msr basepri, r3\n\t"
+ "bx lr"
+ : "=r"(stack) : "r"(stack)
+ : "memory");
+}
+#endif
+
/* Architecture's entry point */
void __init2(void)
{
+ /*
+ * The main application expects IRQs disabled.
+ */
+ IRQ_DISABLE;
+
/*
* PLL may not function properly at default LDO setting.
*
/* Initialize IRQ vector table in RAM */
sysirq_init();
+
+#if CONFIG_KERN_PREEMPT
+ /*
+ * Voluntary context switch handler.
+ *
+ * This software interrupt can always be triggered and must be
+ * dispatched as soon as possible, thus we just disable IRQ priority
+ * for it.
+ */
+ sysirq_setHandler(FAULT_SVCALL, svcall_handler);
+ sysirq_setPriority(FAULT_SVCALL, IRQ_PRIO_MAX);
+ /*
+ * Preemptible context switch handler
+ *
+ * The priority of this IRQ must be the lowest priority in the system
+ * in order to run last in the interrupt service routines' chain.
+ */
+ sysirq_setHandler(FAULT_PENDSV, pendsv_handler);
+ sysirq_setPriority(FAULT_PENDSV, IRQ_PRIO_MIN);
+#endif
}
#define HWREG(x) (*((reg32_t *)(x)))
#define HWREGH(x) (*((reg16_t *)(x)))
#define HWREGB(x) (*((reg8_t *)(x)))
+
#define HWREGBITW(x, b) \
HWREG(((reg32_t)(x) & 0xF0000000) | 0x02000000 | \
(((reg32_t)(x) & 0x000FFFFF) << 5) | ((b) << 2))
#elif CPU_CM3
-
- #define CPU_PUSH_CALL_FRAME(sp, func) \
- do { \
- CPU_PUSH_WORD((sp), 0x01000000); /* xPSR */ \
- CPU_PUSH_WORD((sp), (cpu_stack_t)(func)); /* lr */ \
- } while (0);
+ #if CONFIG_KERN_PREEMPT
+ INLINE void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **old_sp)
+ {
+ register cpu_stack_t **__new_sp asm ("r0") = new_sp;
+ register cpu_stack_t **__old_sp asm ("r1") = old_sp;
+
+ asm volatile ("svc #0"
+ : : "r"(__new_sp), "r"(__old_sp) : "memory", "cc");
+ }
+ #define asm_switch_context asm_switch_context
+
+ #define CPU_PUSH_CALL_FRAME(sp, func) \
+ do { \
+ CPU_PUSH_WORD((sp), 0x01000000); /* xPSR */ \
+ CPU_PUSH_WORD((sp), (cpu_stack_t)(func)); /* pc */ \
+ CPU_PUSH_WORD((sp), 0); /* lr */ \
+ CPU_PUSH_WORD((sp), 0); /* ip */ \
+ CPU_PUSH_WORD((sp), 0); /* r3 */ \
+ CPU_PUSH_WORD((sp), 0); /* r2 */ \
+ CPU_PUSH_WORD((sp), 0); /* r1 */ \
+ CPU_PUSH_WORD((sp), 0); /* r0 */ \
+ CPU_PUSH_WORD((sp), 0xfffffffd); /* lr_exc */ \
+ } while (0);
+
+ #define CPU_CREATE_NEW_STACK(stack) \
+ do { \
+ size_t i; \
+ /* Initialize process stack frame */ \
+ CPU_PUSH_CALL_FRAME(stack, proc_entry); \
+ /* Push a clean set of CPU registers for asm_switch_context() */ \
+ for (i = 0; i < CPU_SAVED_REGS_CNT; i++) \
+ CPU_PUSH_WORD(stack, CPU_REG_INIT_VALUE(i)); \
+ CPU_PUSH_WORD(stack, IRQ_PRIO_DISABLED); \
+ } while (0)
+
+ #else /* !CONFIG_KERN_PREEMPT */
+ #define CPU_PUSH_CALL_FRAME(sp, func) \
+ do { \
+ CPU_PUSH_WORD((sp), 0x01000000); /* xPSR */ \
+ CPU_PUSH_WORD((sp), (cpu_stack_t)(func)); /* pc */ \
+ } while (0);
+ #endif /* CONFIG_KERN_PREEMPT */
#elif CPU_AVR
/*
#elif CPU_CM3
/* Cortex-M3 */
- #define IRQ_DISABLE asm volatile ("cpsid i" : : : "memory", "cc")
- #define IRQ_ENABLE asm volatile ("cpsie i" : : : "memory", "cc")
-
- #define IRQ_SAVE_DISABLE(x) \
+ /*
+ * Interrupt priority.
+ *
+ * NOTE: 0 means that an interrupt is not affected by the global IRQ
+ * priority settings.
+ */
+ #define IRQ_PRIO 0x80
+ #define IRQ_PRIO_MIN 0xf0
+ #define IRQ_PRIO_MAX 0
+ /*
+ * To disable interrupts we just raise the system base priority to a
+ * number lower than the default IRQ priority. In this way, all the
+ * "normal" interrupt can't be triggered. High-priority interrupt can
+ * still happen (at the moment only the soft-interrupt svcall uses a
+ * priority greater than the default IRQ priority).
+ *
+ * To enable interrupts we set the system base priority to 0, that
+ * means IRQ priority mechanism is disabled, and any interrupt can
+ * happen.
+ */
+ #define IRQ_PRIO_DISABLED 0x40
+ #define IRQ_PRIO_ENABLED 0
+
+ #define IRQ_DISABLE \
({ \
+ register cpu_flags_t reg = IRQ_PRIO_DISABLED; \
asm volatile ( \
- "mrs %0, PRIMASK\n" \
- "cpsid i" \
- : "=r" (x) : : "memory", "cc"); \
+ "msr basepri, %0" \
+ : : "r"(reg) : "memory", "cc"); \
})
- #define IRQ_RESTORE(x) \
+ #define IRQ_ENABLE \
({ \
- if (x) \
- IRQ_DISABLE; \
- else \
- IRQ_ENABLE; \
+ register cpu_flags_t reg = IRQ_PRIO_ENABLED; \
+ asm volatile ( \
+ "msr basepri, %0" \
+ : : "r"(reg) : "memory", "cc"); \
})
#define CPU_READ_FLAGS() \
({ \
- cpu_flags_t sreg; \
+ register cpu_flags_t reg; \
+ asm volatile ( \
+ "mrs %0, basepri" \
+ : "=r"(reg) : : "memory", "cc"); \
+ reg; \
+ })
+
+ #define IRQ_SAVE_DISABLE(x) \
+ ({ \
+ x = CPU_READ_FLAGS(); \
+ IRQ_DISABLE; \
+ })
+
+ #define IRQ_RESTORE(x) \
+ ({ \
asm volatile ( \
- "mrs %0, PRIMASK\n\t" \
- : "=r" (sreg) : : "memory", "cc"); \
- sreg; \
+ "msr basepri, %0" \
+ : : "r"(x) : "memory", "cc"); \
})
- #define IRQ_ENABLED() (!CPU_READ_FLAGS())
+ #define IRQ_ENABLED() (CPU_READ_FLAGS() == IRQ_PRIO_ENABLED)
- /* TODO: context switch is not yet supported */
- #define DECLARE_ISR_CONTEXT_SWITCH(func) void func(void)
+ INLINE bool irq_running(void)
+ {
+ register uint32_t ret;
+
+ /*
+ * Check if the current stack pointer is the main stack or
+ * process stack: we use the main stack only in Handler mode,
+ * so this means we're running inside an ISR.
+ */
+ asm volatile (
+ "mrs %0, msp\n\t"
+ "cmp sp, %0\n\t"
+ "ite ne\n\t"
+ "movne %0, #0\n\t"
+ "moveq %0, #1\n\t" : "=r"(ret) : : "cc");
+ return ret;
+ }
+ #define IRQ_RUNNING() irq_running()
+
+ #if CONFIG_KERN_PREEMPT
+
+ #define DECLARE_ISR_CONTEXT_SWITCH(func) \
+ INLINE void __isr_##func(void); \
+ void func(void) \
+ { \
+ __isr_##func(); \
+ if (!proc_needPreempt()) \
+ return; \
+ /*
+ * Set a PendSV request.
+ *
+ * The preemption handler will be called immediately
+ * after this ISR in tail-chaining mode (without the
+ * overhead of hardware state saving and restoration
+ * between interrupts).
+ */ \
+ HWREG(NVIC_INT_CTRL) = NVIC_INT_CTRL_PEND_SV; \
+ } \
+ INLINE void __isr_##func(void)
+
+ /**
+ * With task priorities enabled each ISR is used a point to
+ * check if we need to perform a context switch.
+ *
+ * Instead, without priorities a context switch can occur only
+ * when the running task expires its time quantum. In this last
+ * case, the context switch can only occur in the timer ISR,
+ * that must be always declared with the
+ * DECLARE_ISR_CONTEXT_SWITCH() macro.
+ */
+ #if CONFIG_KERN_PRI
+ #define DECLARE_ISR(func) \
+ DECLARE_ISR_CONTEXT_SWITCH(func)
+ /**
+ * Interrupt service routine prototype: can be used for
+ * forward declarations.
+ */
+ #define ISR_PROTO(func) \
+ ISR_PROTO_CONTEXT_SWITCH(func)
+ #endif /* !CONFIG_KERN_PRI */
+ #endif
+
+ #ifndef ISR_PROTO
+ #define ISR_PROTO(func) void func(void)
+ #endif
+ #ifndef DECLARE_ISR
+ #define DECLARE_ISR(func) void func(void)
+ #endif
+ #ifndef DECLARE_ISR_CONTEXT_SWITCH
+ #define DECLARE_ISR_CONTEXT_SWITCH(func) void func(void)
+ #endif
+ #ifndef ISR_PROTO_CONTEXT_SWITCH
+ #define ISR_PROTO_CONTEXT_SWITCH(func) void func(void)
+ #endif
- /* TODO: context switch is not yet supported */
- #define ISR_PROTO_CONTEXT_SWITCH(func) void func(void)
#elif CPU_ARM
#ifdef __IAR_SYSTEMS_ICC__
#define DECLARE_ISR_CONTEXT_SWITCH(vect) ISR(vect)
#endif
#ifndef ISR_PROTO_CONTEXT_SWITCH
- #define ISR_PROTO_CONTEXT_SWITCH(func) ISR(vect)
+ #define ISR_PROTO_CONTEXT_SWITCH(vect) ISR(vect)
#endif
#else
/// Ensure callee is not running within an interrupt
#define ASSERT_USER_CONTEXT() ASSERT(!IRQ_RUNNING())
#else
+ #define IRQ_RUNNING() false
#define ASSERT_USER_CONTEXT() do {} while(0)
#define ASSERT_IRQ_CONTEXT() do {} while(0)
#endif
return 0;
if (!proc_preemptAllowed())
return 0;
+ if (LIST_EMPTY(&proc_ready_list))
+ return 0;
return _proc_quantum ? prio_next() > prio_curr() :
prio_next() >= prio_curr();
}
MEMORY_BARRIER;
IRQ_DISABLE;
}
- proc_switchTo(current_process, old_process);
+ if (CONTEXT_SWITCH_FROM_ISR())
+ proc_switchTo(current_process, old_process);
/* This RET resumes the execution on the new process */
LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());
}
#include <kern/proc.h> // struct Process
+/*
+ * Check if the process context switch can be performed directly by the
+ * architecture-dependent asm_switch_context() or if it must be delayed
+ * because we're in the middle of an ISR.
+ *
+ * Return true if asm_switch_context() can be executed, false
+ * otherwise.
+ *
+ * NOTE: if an architecture does not implement IRQ_RUNNING() this function
+ * always returns true.
+ */
+#define CONTEXT_SWITCH_FROM_ISR() (!IRQ_RUNNING())
+
+#ifndef asm_switch_context
/**
* CPU dependent context switching routines.
*
* support routine which usually needs to be written in assembly.
*/
EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp);
+#endif
/*
* Save context of old process and switch to new process.