#define CONFIG_KERN_HEAP 0
/**
- * Preemptive process scheduling. WARNING: Experimental, still incomplete!
+ * Preemptive process scheduling.
*
* $WIZ$ type = "boolean"
- * $WIZ$ supports = "False"
+ * $WIZ$ conditional_deps = "timer", "idle"
*/
#define CONFIG_KERN_PREEMPT 0
#ifndef CFG_OS_H
#define CFG_OS_H
+#include "cfg/cfg_proc.h"
+
/*
* OS autodetection (Some systems trigger multiple OS definitions)
*/
sigismember(&sigs__, SIGALRM) ? false : true; \
})
+ #if CONFIG_KERN_PREEMPT
+ #define DECLARE_ISR_CONTEXT_SWITCH(vect) \
+ void vect(UNUSED_ARG(int, arg)); \
+ INLINE void __isr_##vect(void); \
+ void vect(UNUSED_ARG(int, arg)) \
+ { \
+ __isr_##vect(); \
+ IRQ_PREEMPT_HANDLER(); \
+ } \
+ INLINE void __isr_##vect(void)
+ /**
+ * With task priorities enabled each ISR is used a point to
+ * check if we need to perform a context switch.
+ *
+ * Instead, without priorities a context switch can occur only
+ * when the running task expires its time quantum. In this last
+ * case, the context switch can only occur in the timer ISR,
+ * that must be always declared with the
+ * DECLARE_ISR_CONTEXT_SWITCH() macro.
+ */
+ #if CONFIG_KERN_PRI
+ #define DECLARE_ISR(vect) \
+ DECLARE_ISR_CONTEXT_SWITCH(vect)
+ #endif /* CONFIG_KERN_PRI */
+ #endif
+ #ifndef DECLARE_ISR
+ #define DECLARE_ISR(vect) \
+ void vect(UNUSED_ARG(int, arg))
+ #endif
+ #ifndef DECLARE_ISR_CONTEXT_SWITCH
+ #define DECLARE_ISR_CONTEXT_SWITCH(vect) \
+ void vect(UNUSED_ARG(int, arg))
+ #endif
+
#else
#define OS_UNIX 0
#define OS_POSIX 0
* ADC ISR.
* Simply signal the adc process that convertion is complete.
*/
- static void ISR_FUNC adc_conversion_end_irq(void)
+ static DECLARE_ISR(adc_conversion_end_irq)
{
sig_signal(adc_process, SIG_ADC_COMPLETE);
volatile bool sending;
};
-static void uart0_irq_dispatcher(void);
-static void uart1_irq_dispatcher(void);
-static void spi0_irq_handler(void);
+static ISR_PROTO(uart0_irq_dispatcher);
+static ISR_PROTO(uart1_irq_dispatcher);
+static ISR_PROTO(spi0_irq_handler);
#if CPU_ARM_SAM7X
-static void spi1_irq_handler(void);
+static ISR_PROTO(spi1_irq_handler);
#endif
/*
* Callbacks for USART0
/**
* Serial IRQ dispatcher for USART0.
*/
-static void uart0_irq_dispatcher(void) __attribute__ ((interrupt));
-static void uart0_irq_dispatcher(void)
+static DECLARE_ISR(uart0_irq_dispatcher)
{
if (US0_CSR & BV(US_RXRDY))
uart0_irq_rx();
/**
* Serial IRQ dispatcher for USART1.
*/
-static void uart1_irq_dispatcher(void) __attribute__ ((interrupt));
-static void uart1_irq_dispatcher(void)
+static DECLARE_ISR(uart1_irq_dispatcher)
{
if (US1_CSR & BV(US_RXRDY))
uart1_irq_rx();
/**
* SPI0 interrupt handler
*/
-static void spi0_irq_handler(void) __attribute__ ((interrupt));
-static void spi0_irq_handler(void)
+static DECLARE_ISR(spi0_irq_handler)
{
SER_STROBE_ON;
/**
* SPI1 interrupt handler
*/
-static void spi1_irq_handler(void) __attribute__ ((interrupt));
-static void spi1_irq_handler(void)
+static DECLARE_ISR(spi1_irq_handler)
{
SER_STROBE_ON;
}
}
-static void spi0_dma_write_irq_handler(void) __attribute__ ((interrupt));
-static void spi0_dma_write_irq_handler(void)
+static DECLARE_ISR(spi0_dma_write_irq_handler)
{
SPI_DMA_STROBE_ON();
/* Pop sent chars from FIFO */
return 0;
}
-static void spi0_dma_read_irq_handler(void) __attribute__ ((interrupt));
-static void spi0_dma_read_irq_handler(void)
+static DECLARE_ISR(spi0_dma_read_irq_handler)
{
/* do nothing */
AIC_EOICR = 0;
/*
* Forward declaration for interrupt handler
*/
-static void stepper_tc0_irq(void);
-static void stepper_tc1_irq(void);
-static void stepper_tc2_irq(void);
+static ISR_PROTO(stepper_tc0_irq);
+static ISR_PROTO(stepper_tc1_irq);
+static ISR_PROTO(stepper_tc2_irq);
///< Static array of timer counter struct for stepper.
static struct TimerCounter stepper_timers[CONFIG_TC_STEPPER_MAX_NUM] =
/*
* Interrupt handler for timer counter TCKL0
*/
-static void ISR_FUNC stepper_tc0_irq(void)
+DECLARE_ISR(stepper_tc0_irq)
{
/*
* Warning: when we read the status_reg register, we reset it.
/*
* Interrupt handler for timer counter TCKL1
*/
-static void ISR_FUNC stepper_tc1_irq(void)
+DECLARE_ISR(stepper_tc1_irq)
{
/*
* Warning: when we read the status_reg register, we reset it.
/*
* Interrupt handler for timer counter TCKL2
*/
-static void ISR_FUNC stepper_tc2_irq(void)
+DECLARE_ISR(stepper_tc2_irq)
{
/*
* This function checks for interrupt enable state of
* various sources (system timer, etc..) and calls
* the corresponding handler.
+ *
+ * \note On ARM all IRQs are handled by the sysirq_dispatcher, so we can't
+ * differentiate between context-switch and non-context-switch ISR.
*/
-static void sysirq_dispatcher(void) __attribute__ ((interrupt));
-static void sysirq_dispatcher(void)
+static DECLARE_ISR_CONTEXT_SWITCH(sysirq_dispatcher)
{
- for (unsigned i = 0; i < countof(sysirq_tab); i++)
+ unsigned int i;
+
+ for (i = 0; i < countof(sysirq_tab); i++)
{
if (sysirq_tab[i].enabled
&& sysirq_tab[i].handler)
/** HW dependent timer initialization */
#if (CONFIG_TIMER == TIMER_ON_PIT)
+ ISR_PROTO_CONTEXT_SWITCH(timer_handler);
+
void timer_hw_init(void)
{
sysirq_init();
*/
#if (CONFIG_TIMER == TIMER_ON_PIT)
- void timer_handler(void);
+ /*
+ * On ARM all IRQs are handled by the sysirq_dispatcher, so the actual
+ * timer handler can be treated like any other normal routine.
+ */
+ #define DEFINE_TIMER_ISR void timer_handler(void); \
+ void timer_handler(void)
- #define DEFINE_TIMER_ISR void timer_handler(void)
#define TIMER_TICKS_PER_SEC 1000
#define TIMER_HW_CNT (CPU_FREQ / (16 * TIMER_TICKS_PER_SEC) - 1)
#if CPU_ARM_SAM7S_LARGE || CPU_ARM_SAM7X
- /**
- * With a 18.420MHz cristal, master clock is:
- * (((18.420 * PLL_MUL_VAL + 1) / PLL_DIV_VAL) / AT91MCK_PRES) = 48.023MHz
- */
+ /*
+ * With a 18.420MHz cristal, master clock is:
+ * (((18.420 * PLL_MUL_VAL + 1) / PLL_DIV_VAL) / AT91MCK_PRES) = 48.023MHz
+ */
#define PLL_MUL_VAL 72 /**< Real multiplier value is PLL_MUL_VAL + 1! */
#define PLL_DIV_VAL 14
#define AT91MCK_PRES PMC_PRES_CLK_2
- /**
- * Register I/O adresses.
- * \{
- */
+ /*
+ * Register I/O adresses.
+ */
#define MC_BASE 0xFFFFFF00
#define MC_FMR_OFF 0x00000060
#define MC_FWS_2R3W 0x00000100
#define RSTC_KEY 0xA5000000
#define RSTC_URSTEN (1 << 0)
+ #define ARM_MODE_USR 0x10
#define ARM_MODE_FIQ 0x11
#define ARM_MODE_IRQ 0x12
#define ARM_MODE_SVC 0x13
#define ARM_MODE_ABORT 0x17
#define ARM_MODE_UNDEF 0x1B
+ #define ARM_MODE_SYS 0x1F
#else
#error No register I/O definition for selected ARM CPU
/*
* Initialize user stack pointer.
*/
- ldr r13, =__stack_end
+ /* msr CPSR_c, #ARM_MODE_SYS | 0xC0 */
+ ldr r13, =__stack_end
/*
+++ /dev/null
-/**
- * \file
- * <!--
- * This file is part of BeRTOS.
- *
- * Bertos is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * As a special exception, you may use this file as part of a free software
- * library without restriction. Specifically, if other files instantiate
- * templates or use macros or inline functions from this file, or you compile
- * this file and link it with other files to produce an executable, this
- * file does not by itself cause the resulting executable to be covered by
- * the GNU General Public License. This exception does not however
- * invalidate any other reasons why the executable file might be covered by
- * the GNU General Public License.
- *
- * Copyright 2008 Develer S.r.l. (http://www.develer.com/)
- *
- * -->
- *
- * \brief Kernel scheduler macros.
- *
- * \version $Id$
- *
- * \author Francesco Sacchi <batt@develer.com>
- * \author Stefano Fedrigo <aleph@develer.com>
- */
-
-#ifndef CPU_ARM_HW_SWITCH_H
-#define CPU_ARM_HW_SWITCH_H
-
-#include <kern/proc_p.h>
-
-/**
- * Interrupt entry point.
- * Needed because AT91 uses an Interrupt Controller with auto-vectoring.
- */
-#define SCHEDULER_IRQ_ENTRY \
- asm volatile("sub lr, lr, #4 \n\t" /* Adjust LR */ \
- "stmfd sp!, {r0} \n\t" /* Save r0 */ \
- "stmfd sp, {sp}^ \n\t" /* Save user SP */ \
- "sub sp, sp, #4 \n\t" /* Decrement irq SP, writeback is illegal */ \
- "ldmfd sp!, {r0} \n\t" /* Restore user SP immedately in r0 */ \
- "stmfd r0!, {lr} \n\t" /* Store system LR in user stack */ \
- "stmfd r0, {r1-r12,lr}^ \n\t" /* Store registers on user stack (user LR too) */ \
- "sub r0, r0, #52 \n\t" /* Decrement r0, writeback is illegal */ \
- "ldmfd sp!, {r1} \n\t" /* Restore r0 */ \
- "stmfd r0!, {r1} \n\t" /* Store r0 in user stack too */ \
- "mrs r1, spsr \n\t" /* Save SPSR... */ \
- "stmfd r0!, {r1} \n\t" /* ... in user stack */ \
- "ldr r1, =CurrentProcess \n\t" /* Load in r1 &CurrentProcess->stack */ \
- "ldr r1, [r1, %0] \n\t" \
- "str r0, [r1] \n\t" /* Store the process SP */ \
- "sub fp, sp, #4 \n\t" /* Store the process SP */ \
- : /* no output */ \
- : "n" (offsetof(Process, stack)) \
- )
-
-
-#define SCHEDULER_IRQ_EXIT \
- asm volatile("ldr lr, =CurrentProcess \n\t" /* Load &CurrentProcess->stack */ \
- "ldr lr, [lr, %0] \n\t" \
- "ldr lr, [lr] \n\t" /* Load current process SP */ \
- "ldr r0, =0xFFFFF000 \n\t" /* End of interrupt for AT91 */ \
- "str r0, [r0, #0x130] \n\t" /* */ \
- "ldmfd lr!, {r0} \n\t" /* Pop status reg */ \
- "msr spsr, r0 \n\t" /* ... */ \
- "ldmfd lr, {r0-r12,lr}^ \n\t" /* Restore user regs */ \
- "add lr, lr, #56 \n\t" /* 52 + irq link register (extracted below) */ \
- "stmfd sp!, {lr} \n\t" /* Push user stack pointer in irq stack */ \
- "ldmfd sp, {sp}^ \n\t" /* Restore user SP */ \
- "sub sp, sp, #4 \n\t" /* Align irq SP */ \
- "ldmdb lr, {pc}^ \n\t" /* And return to user space (We use ldmdb cause lr is sp+4) */ \
- : /* no output */ \
- : "n" (offsetof(Process, stack)) \
- )
-
-#endif /* CPU_ARM_HW_SWITCH_H */
*
* \author Stefano Fedrigo <aleph@develer.com>
* \author Francesco Sacchi <batt@develer.com>
+ * \author Andrea Righi <arighi@develer.com>
*/
+#include "cfg/cfg_proc.h"
+
/* void asm_switch_context(void **new_sp [r0], void **save_sp [r1]) */
.globl asm_switch_context
asm_switch_context:
- mrs r2, cpsr /* Save status. */
+ /* Save registers */
+ stmfd sp!, {r4 - r11, lr}
+ /* Save old stack pointer */
+ str sp, [r1]
+ /* Load new stack pointer */
+ ldr sp, [r0]
+ /* Load new registers */
+ ldmfd sp!, {r4 - r11, pc}
+
+#if CONFIG_KERN_PREEMPT
+
+/* ARM interrupt mode with IRQ and FIQ disabled */
+#define ARM_IRQ_MODE 0xD2
+/* ARM supervisor mode with IRQ and FIQ disabled */
+#define ARM_SVC_MODE 0xD3
+
+.globl asm_irq_switch_context
+asm_irq_switch_context:
+ /* Return if preemption is not needed */
+ bl proc_needPreempt
+ cmp r0, #0
+ ldmeqfd sp!, {r0 - r3, ip, pc}^
- stmfd sp!, {r2, r4-r11, lr} /* Save registers. */
+ /* Otherwise restore regs used by the ISR */
+ ldmfd sp!, {r0 - r3, ip, lr}
- str sp, [r1] /* Save old stack pointer. */
- ldr sp, [r0] /* Load new stack pointer */
+ /* Save current process context */
+ msr cpsr_c, #ARM_SVC_MODE
+ stmfd sp!, {r0 - r3, ip, lr}
- ldmfd sp!, {r2, r4-r11, lr} /* Load new registers. */
- msr cpsr, r2 /* restore flags reg. */
+ /* Save lr_irq and spsr_irq in process stack */
+ msr cpsr_c, #ARM_IRQ_MODE
+ mov r0, lr
+ mrs r1, spsr
+ msr cpsr_c, #ARM_SVC_MODE
+ stmfd sp!, {r0, r1}
- mov pc, lr
+ /* Perform the context switch */
+ bl proc_preempt
+ /* Restore lr_irq and spsr_irq from process stack */
+ ldmfd sp!, {r0, r1}
+ msr cpsr_c, #ARM_IRQ_MODE
+ mov lr, r0
+ msr spsr_cxsf, r1
-/* proc_entry trampoline needed because ARM does not pop return addresses
-from the stack, but uses lr instead.*/
-.globl asm_proc_entry
-asm_proc_entry:
- mov lr, pc
- /* In r11 we have the real process entry point as set up by CPU_CREATE_NEW_STACK */
- bx r11
- bl proc_exit
+ /* Restore process regs */
+ msr cpsr_c, #ARM_SVC_MODE
+ ldmfd sp!, {r0 - r3, ip, lr}
+ /* Exit from ISR */
+ msr cpsr_c, #ARM_IRQ_MODE
+ movs pc, lr
+#endif /* CONFIG_KERN_PREEMPT */
#include "detect.h"
+#include "cfg/cfg_proc.h" /* CONFIG_KERN_PREEMPT */
#include "cfg/cfg_attr.h" /* CONFIG_FAST_MEM */
#ifdef __GNUC__
#define NOP asm volatile ("nop")
+ /* This is a good thing to insert into busy-wait loops. */
+ #define PAUSE asm volatile ("rep; nop" ::: "memory")
#define BREAKPOINT asm volatile ("int3" ::)
#endif
#define FAST_FUNC /**/
#endif
- /**
- * Function attribute to declare an interrupt service routine.
- */
- #define ISR_FUNC __attribute__((interrupt))
-
/*
* Function attribute to move it into ram memory.
*/
#define FAST_RODATA /* */
#endif
+#ifndef PAUSE
+ /// Generic PAUSE implementation.
+ #define PAUSE {NOP; MEMORY_BARRIER;}
+#endif
+
#endif /* CPU_ATTR_H */
#if CONFIG_SER_HWHANDSHAKE
/// This interrupt is triggered when the CTS line goes high
-SIGNAL(SIG_CTS)
+DECLARE_ISR(SIG_CTS)
{
// Re-enable UDR empty interrupt and TX, then disable CTS interrupt
UCSR0B = BV(BIT_RXCIE0) | BV(BIT_UDRIE0) | BV(BIT_RXEN0) | BV(BIT_TXEN0);
/**
* Serial 0 TX interrupt handler
*/
-SIGNAL(USART0_UDRE_vect)
+DECLARE_ISR(USART0_UDRE_vect)
{
SER_STROBE_ON;
* otherwise we'd stop the serial port with some data
* still pending in the buffer.
*/
-SIGNAL(SIG_UART0_TRANS)
+DECLARE_ISR(SIG_UART0_TRANS)
{
SER_STROBE_ON;
/**
* Serial 1 TX interrupt handler
*/
-SIGNAL(USART1_UDRE_vect)
+DECLARE_ISR(USART1_UDRE_vect)
{
SER_STROBE_ON;
*
* \sa port 0 TX complete handler.
*/
-SIGNAL(USART1_TX_vect)
+DECLARE_ISR(USART1_TX_vect)
{
SER_STROBE_ON;
* RXCIE is cleared. Unfortunately the RXC flag is read-only
* and can't be cleared by code.
*/
-SIGNAL(USART0_RX_vect)
+DECLARE_ISR(USART0_RX_vect)
{
SER_STROBE_ON;
* is heavily loaded, because an interrupt could be retriggered
* when executing the handler prologue before RXCIE is disabled.
*
- * \see SIGNAL(USART1_RX_vect)
+ * \see DECLARE_ISR(USART1_RX_vect)
*/
-SIGNAL(USART1_RX_vect)
+DECLARE_ISR(USART1_RX_vect)
{
SER_STROBE_ON;
/**
* SPI interrupt handler
*/
-SIGNAL(SIG_SPI)
+DECLARE_ISR(SIG_SPI)
{
SER_STROBE_ON;
#define TIMER_PRESCALER 64
#define TIMER_HW_BITS 8
#if CPU_AVR_ATMEGA1281 || CPU_AVR_ATMEGA168
- #define DEFINE_TIMER_ISR SIGNAL(SIG_OUTPUT_COMPARE0A)
+ #define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(TIMER0_COMPA_vect)
#else
- #define DEFINE_TIMER_ISR SIGNAL(SIG_OUTPUT_COMPARE0)
+ #define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(TIMER0_COMP_vect)
#endif
#define TIMER_TICKS_PER_SEC 1000
#define TIMER_HW_CNT OCR_DIVISOR
#define TIMER_HW_BITS 8
/** This value is the maximum in overflow based timers. */
#define TIMER_HW_CNT (1 << TIMER_HW_BITS)
- #define DEFINE_TIMER_ISR SIGNAL(SIG_OVERFLOW1)
+ #define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(TIMER1_OVF_vect)
#define TIMER_TICKS_PER_SEC DIV_ROUND(TIMER_HW_HPTICKS_PER_SEC, TIMER_HW_CNT)
/// Type of time expressed in ticks of the hardware high precision timer
#define TIMER_PRESCALER 64
#define TIMER_HW_BITS 8
#if CPU_AVR_ATMEGA1281 || CPU_AVR_ATMEGA168
- #define DEFINE_TIMER_ISR SIGNAL(SIG_OUTPUT_COMPARE2A)
+ #define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(TIMER2_COMPA_vect)
#else
- #define DEFINE_TIMER_ISR SIGNAL(SIG_OUTPUT_COMPARE2)
+ #define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(TIMER2_COMP_vect)
#endif
#define TIMER_TICKS_PER_SEC 1000
/** Value for OCR register in output-compare based timers. */
#define TIMER_HW_BITS 8
/** This value is the maximum in overflow based timers. */
#define TIMER_HW_CNT (1 << TIMER_HW_BITS)
- #define DEFINE_TIMER_ISR SIGNAL(SIG_OVERFLOW3)
+ #define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(TIMER3_OVF_vect)
#define TIMER_TICKS_PER_SEC DIV_ROUND(TIMER_HW_HPTICKS_PER_SEC, TIMER_HW_CNT)
/// Type of time expressed in ticks of the hardware high precision timer
; r0 is the TEMP REG and can be used freely.
; r1 is the ZERO REG and must always contain 0.
;
-; Stack frame is 19 byte, remember to update
+; Stack frame is 18 byte, remember to update
; CPU_SAVED_REGS_CNT if you change pushed regs.
- in r0,SREG-__SFR_OFFSET
- push r0
-; push r1 ;zero-reg
push r2
push r3
push r4
push r15
push r16
push r17
-; push r18 ;caller-save
-; push r19 ;caller-save
-; push r20 ;caller-save
-; push r21 ;caller-save
-; push r22 ;caller-save
-; push r23 ;caller-save
-; push r24 ;caller-save
-; push r25 ;caller-save
-; push r26 ;caller-save
-; push r27 ;caller-save
+
push r28
push r29
-; push r30 ;caller-save
-; push r31 ;caller-save
in r18,SPL-__SFR_OFFSET ; r18:r19 = SP
in r19,SPH-__SFR_OFFSET
; Two instructions are required to update SP
; so an IRQ can sneak in between them.
; So IRQ *MUST* be disabled and then restored.
+ in r0, SREG-__SFR_OFFSET
cli ; Disable interrupt
out SPL-__SFR_OFFSET,r18 ; SP = *new_sp
out SPH-__SFR_OFFSET,r19
out SREG-__SFR_OFFSET,r0 ; Restore previous IRQ state
-; pop r31 ;caller-save
-; pop r30 ;caller-save
pop r29
pop r28
-; pop r27 ;caller-save
-; pop r26 ;caller-save
-; pop r25 ;caller-save
-; pop r24 ;caller-save
-; pop r23 ;caller-save
-; pop r22 ;caller-save
-; pop r21 ;caller-save
-; pop r20 ;caller-save
-; pop r19 ;caller-save
-; pop r18 ;caller-save
+
pop r17
pop r16
pop r15
pop r4
pop r3
pop r2
-; pop r1 ;zero-reg
- pop r0
- out SREG-__SFR_OFFSET,r0
ret
#include <cfg/compiler.h> /* for uintXX_t */
#if CPU_X86
+ #if CPU_X86_32
- #define CPU_SAVED_REGS_CNT 7
- #define CPU_STACK_GROWS_UPWARD 0
- #define CPU_SP_ON_EMPTY_SLOT 0
+ #define CPU_SAVED_REGS_CNT 4
+ #define CPU_STACK_GROWS_UPWARD 0
+ #define CPU_SP_ON_EMPTY_SLOT 0
+
+ #elif CPU_X86_64
+
+ #define CPU_SAVED_REGS_CNT 8
+ #define CPU_STACK_GROWS_UPWARD 0
+ #define CPU_SP_ON_EMPTY_SLOT 0
+ #else
+ #error "unknown CPU"
+ #endif
#elif CPU_ARM
- #define CPU_SAVED_REGS_CNT 10
+ #define CPU_SAVED_REGS_CNT 8
#define CPU_STACK_GROWS_UPWARD 0
#define CPU_SP_ON_EMPTY_SLOT 0
-
- EXTERN_C void asm_proc_entry(void);
- /**
- * Initialization value for registers in stack frame.
- * For the CPSR register, the initial value is set to:
- * - All flags (N, Z, C, V) set to 0.
- * - IRQ and FIQ enabled.
- * - ARM state.
- * - CPU in Supervisor Mode (SVC).
- */
- #define CPU_CREATE_NEW_STACK(stack, entry, exit) \
- do { \
- /* LR (asm proc_entry trampoline) */ \
- CPU_PUSH_CALL_FRAME(stack, asm_proc_entry); \
- /* R11 (Process entry point) DO NOT CHANGE: asm_proc_entry expects \
- * to find the actual process entry point in R11 */ \
- CPU_PUSH_CALL_FRAME(stack, entry); \
- /* R10 */ \
- CPU_PUSH_WORD(stack, 0x10101010); \
- /* R9 */ \
- CPU_PUSH_WORD(stack, 0x09090909); \
- /* R8 */ \
- CPU_PUSH_WORD(stack, 0x08080808); \
- /* R7 */ \
- CPU_PUSH_WORD(stack, 0x07070707); \
- /* R6 */ \
- CPU_PUSH_WORD(stack, 0x06060606); \
- /* R5 */ \
- CPU_PUSH_WORD(stack, 0x05050505); \
- /* R4 */ \
- CPU_PUSH_WORD(stack, 0x04040404); \
- /* CPSR */ \
- CPU_PUSH_WORD(stack, 0x00000013); \
- } while (0)
-
#elif CPU_PPC
#define CPU_SAVED_REGS_CNT 1
#elif CPU_AVR
- #define CPU_SAVED_REGS_CNT 19
+ #define CPU_SAVED_REGS_CNT 18
#define CPU_STACK_GROWS_UPWARD 0
#define CPU_SP_ON_EMPTY_SLOT 1
- /**
- * Initialization value for registers in stack frame.
- * The register index is not directly corrispondent to CPU
- * register numbers. Index 0 is the SREG register: the initial
- * value is all 0 but the interrupt bit (bit 7).
- */
- #define CPU_REG_INIT_VALUE(reg) (reg == 0 ? 0x80 : 0)
-
#else
#error No CPU_... defined.
#endif
/// Default for macro not defined in the right arch section
#ifndef CPU_REG_INIT_VALUE
- #define CPU_REG_INIT_VALUE(reg) 0
+ #define CPU_REG_INIT_VALUE(reg) (reg)
#endif
/*
*/
#ifndef CPU_CREATE_NEW_STACK
- #define CPU_CREATE_NEW_STACK(stack, entry, exit) \
+ #define CPU_CREATE_NEW_STACK(stack) \
do { \
size_t i; \
/* Initialize process stack frame */ \
- CPU_PUSH_CALL_FRAME(stack, exit); \
- CPU_PUSH_CALL_FRAME(stack, entry); \
+ CPU_PUSH_CALL_FRAME(stack, proc_entry); \
/* Push a clean set of CPU registers for asm_switch_context() */ \
for (i = 0; i < CPU_SAVED_REGS_CNT; i++) \
CPU_PUSH_WORD(stack, CPU_REG_INIT_VALUE(i)); \
#include "detect.h"
#include "types.h"
+#include <kern/preempt.h>
+
#include <cfg/compiler.h> /* for uintXX_t */
+#include "cfg/cfg_proc.h" /* CONFIG_KERN_PREEMPT */
#if CPU_I196
#define IRQ_DISABLE disable_interrupt()
#define IRQ_ENABLED() ((CPU_READ_FLAGS() & 0xc0) != 0xc0)
+ #if CONFIG_KERN_PREEMPT
+ EXTERN_C void asm_irq_switch_context(void);
+
+ /**
+ * At the beginning of any ISR immediately ajust the
+ * return address and store all the caller-save
+ * registers (the ISR may change these registers that
+ * are shared with the user-context).
+ */
+ #define IRQ_ENTRY() asm volatile ( \
+ "sub lr, lr, #4\n\t" \
+ "stmfd sp!, {r0-r3, ip, lr}\n\t")
+ #define IRQ_EXIT() asm volatile ( \
+ "b asm_irq_switch_context\n\t")
+ /**
+ * Function attribute to declare an interrupt service
+ * routine.
+ *
+ * An ISR function must be declared as naked because we
+ * want to add our IRQ_ENTRY() prologue and IRQ_EXIT()
+ * epilogue code to handle the context switch and save
+ * all the registers (not only the callee-save).
+ *
+ */
+ #define ISR_FUNC __attribute__((naked))
+
+ /**
+ * The compiler cannot establish which
+ * registers actually need to be saved, because
+ * the interrupt can happen at any time, so the
+ * "normal" prologue and epilogue used for a
+ * generic function call are not suitable for
+ * the ISR.
+ *
+ * Using a naked function has the drawback that
+ * the stack is not automatically adjusted at
+ * this point, like a "normal" function call.
+ *
+ * So, an ISR can _only_ contain other function
+ * calls and they can't use the stack in any
+ * other way.
+ *
+ * NOTE: we need to explicitly disable IRQs after
+ * IRQ_ENTRY(), because the IRQ status flag is not
+ * masked by the hardware and an IRQ ack inside the ISR
+ * may cause the triggering of another IRQ before
+ * exiting from the current ISR.
+ *
+ * The respective IRQ_ENABLE is not necessary, because
+ * IRQs will be automatically re-enabled when restoring
+ * the context of the user task.
+ */
+ #define DECLARE_ISR_CONTEXT_SWITCH(func) \
+ void ISR_FUNC func(void); \
+ static void __isr_##func(void); \
+ void ISR_FUNC func(void) \
+ { \
+ IRQ_ENTRY(); \
+ IRQ_DISABLE; \
+ __isr_##func(); \
+ IRQ_EXIT(); \
+ } \
+ static void __isr_##func(void)
+ /**
+ * Interrupt service routine prototype: can be used for
+ * forward declarations.
+ */
+ #define ISR_PROTO_CONTEXT_SWITCH(func) \
+ void ISR_FUNC func(void)
+ /**
+ * With task priorities enabled each ISR is used a point to
+ * check if we need to perform a context switch.
+ *
+ * Instead, without priorities a context switch can occur only
+ * when the running task expires its time quantum. In this last
+ * case, the context switch can only occur in the timer
+ * ISR, that must be always declared with the
+ * DECLARE_ISR_CONTEXT_SWITCH() macro.
+ */
+ #if CONFIG_KERN_PRI
+ #define DECLARE_ISR(func) \
+ DECLARE_ISR_CONTEXT_SWITCH(func)
+
+ #define ISR_PROTO(func) \
+ ISR_PROTO_CONTEXT_SWITCH(func)
+ #endif /* !CONFIG_KERN_PRI */
+ #endif /* CONFIG_KERN_PREEMPT */
+
+ #ifndef DECLARE_ISR
+ #define DECLARE_ISR(func) \
+ void __attribute__((interrupt)) func(void)
+ #endif
+ #ifndef DECLARE_ISR_CONTEXT_SWITCH
+ #define DECLARE_ISR_CONTEXT_SWITCH(func) \
+ void __attribute__((interrupt)) func(void)
+ #endif
+ #ifndef ISR_PROTO
+ #define ISR_PROTO(func) \
+ void __attribute__((interrupt)) func(void)
+ #endif
+ #ifndef ISR_PROTO_CONTEXT_SWITCH
+ #define ISR_PROTO_CONTEXT_SWITCH(func) \
+ void __attribute__((interrupt)) func(void)
+ #endif
+
#endif /* !__IAR_SYSTEMS_ICC_ */
#elif CPU_PPC
); \
(bool)(sreg & 0x80); \
})
+ #if CONFIG_KERN_PREEMPT
+ #define DECLARE_ISR_CONTEXT_SWITCH(vect) \
+ INLINE void __isr_##vect(void); \
+ ISR(vect) \
+ { \
+ __isr_##vect(); \
+ IRQ_PREEMPT_HANDLER(); \
+ } \
+ INLINE void __isr_##vect(void)
+
+ /**
+ * Interrupt service routine prototype: can be used for
+ * forward declarations.
+ */
+ #define ISR_PROTO(vect) ISR(vect)
+
+ /**
+ * With task priorities enabled each ISR is used a point to
+ * check if we need to perform a context switch.
+ *
+ * Instead, without priorities a context switch can occur only
+ * when the running task expires its time quantum. In this last
+ * case, the context switch can only occur in the timer ISR,
+ * that must be always declared with the
+ * DECLARE_ISR_CONTEXT_SWITCH() macro.
+ */
+ #if CONFIG_KERN_PRI
+ #define DECLARE_ISR(func) \
+ DECLARE_ISR_CONTEXT_SWITCH(func)
+
+ #define ISR_PROTO(func) \
+ ISR_PROTO_CONTEXT_SWITCH(func)
+ #endif /* !CONFIG_KERN_PRI */
+ #endif
+
+ #ifndef DECLARE_ISR
+ #define ISR_PROTO(vect) ISR(vect)
+ #endif
+ #ifndef DECLARE_ISR
+ #define DECLARE_ISR(vect) ISR(vect)
+ #endif
+ #ifndef DECLARE_ISR_CONTEXT_SWITCH
+ #define DECLARE_ISR_CONTEXT_SWITCH(vect) ISR(vect)
+ #endif
+ #ifndef ISR_PROTO
+ #define ISR_PROTO(func) ISR(vect)
+ #endif
+ #ifndef ISR_PROTO_CONTEXT_SWITCH
+ #define ISR_PROTO_CONTEXT_SWITCH(func) ISR(vect)
+ #endif
+
#else
#error No CPU_... defined.
#endif
-#ifndef IRQ_ENTRY
- #define IRQ_ENTRY() /* NOP */
-#endif
-
-#ifndef IRQ_EXIT
- #define IRQ_EXIT() /* NOP */
-#endif
-
#ifdef IRQ_RUNNING
/// Ensure callee is running within an interrupt
#define ASSERT_IRQ_CONTEXT() ASSERT(IRQ_RUNNING())
#define IRQ_ASSERT_DISABLED() do {} while(0)
#endif
+
+#ifndef IRQ_PREEMPT_HANDLER
+ #if CONFIG_KERN_PREEMPT
+ /**
+ * Handle preemptive context switch inside timer IRQ.
+ */
+ INLINE void IRQ_PREEMPT_HANDLER(void)
+ {
+ if (proc_needPreempt())
+ proc_preempt();
+ }
+ #else
+ #define IRQ_PREEMPT_HANDLER() /* Nothing */
+ #endif
+#endif
+
/**
* Execute \a CODE atomically with respect to interrupts.
*
INLINE void cpu_relax(void)
{
#if CONFIG_KERN
- proc_yield();
+ if (proc_preemptAllowed())
+ proc_yield();
#endif
#if CONFIG_WATCHDOG
/*\}*/
+#ifndef INT_MAX
+ #define INT_MAX ((int)((unsigned int)~0 >> 1))
+ #define INT_MIN (-INT_MAX - 1)
+#endif
+
/* Sanity checks for the above definitions */
STATIC_ASSERT(sizeof(char) == SIZEOF_CHAR);
STATIC_ASSERT(sizeof(short) == SIZEOF_SHORT);
#include <cpu/irq.h>
#include <cpu/power.h> // cpu_relax()
+#include <kern/preempt.h> // proc_decQuantun()
+
/*
* Include platform-specific binding code if we're hosted.
* Try the CPU specific one for bare-metal environments.
/**
* Wait for the specified amount of timer ticks.
+ *
+ * \note Sleeping while preemption is disabled fallbacks to a busy wait sleep.
*/
void timer_delayTicks(ticks_t delay)
{
/* We shouldn't sleep with interrupts disabled */
IRQ_ASSERT_ENABLED();
-#if defined(CONFIG_KERN_SIGNALS) && CONFIG_KERN_SIGNALS
+#if CONFIG_KERN_SIGNALS
Timer t;
- ASSERT(!sig_check(SIG_SINGLE));
- timer_setSignal(&t, proc_current(), SIG_SINGLE);
- timer_setDelay(&t, delay);
- timer_add(&t);
- sig_wait(SIG_SINGLE);
-
-#else /* !CONFIG_KERN_SIGNALS */
-
- ticks_t start = timer_clock();
-
- /* Busy wait */
- while (timer_clock() - start < delay)
- cpu_relax();
-
+ if (proc_preemptAllowed())
+ {
+ ASSERT(!sig_check(SIG_SINGLE));
+ timer_setSignal(&t, proc_current(), SIG_SINGLE);
+ timer_setDelay(&t, delay);
+ timer_add(&t);
+ sig_wait(SIG_SINGLE);
+ }
+ else
#endif /* !CONFIG_KERN_SIGNALS */
+ {
+ ticks_t start = timer_clock();
+
+ /* Busy wait */
+ while (timer_clock() - start < delay)
+ cpu_relax();
+ }
}
}
#endif /* CONFIG_TIMER_UDELAY */
-
/**
* Timer interrupt handler. Find soft timers expired and
* trigger corresponding events.
/* Update the master ms counter */
++_clock;
+ /* Update the current task's quantum (if enabled). */
+ proc_decQuantum();
+
#if CONFIG_TIMER_EVENTS
timer_poll(&timers_queue);
#endif
/* void asm_switch_context(void ** new_sp [sp+4], void ** save_sp [sp+8]) */
.globl SWITCH_CONTEXT
SWITCH_CONTEXT:
- pushl %eax
- pushl %ebx
- pushl %ecx
- pushl %edx
- pushl %esi
- pushl %edi
pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %ebx
movl 0x24(%esp),%ebp /* ebp = save_sp */
movl %esp,(%ebp) /* *save_sp = esp */
movl 0x20(%esp),%ebp /* ebp = new_sp */
movl (%ebp),%esp /* esp = *new_sp */
- popl %ebp
- popl %edi
- popl %esi
- popl %edx
- popl %ecx
popl %ebx
- popl %eax
+ popl %esi
+ popl %edi
+ popl %ebp
ret
/* void asm_switch_context(void **new_sp [%rdi], void **save_sp [%rsi]) */
.globl asm_switch_context
asm_switch_context:
- pushq %rax
- pushq %rbx
- pushq %rcx
- pushq %rdx
- pushq %rsi
- pushq %rdi
pushq %rbp
+ pushq %rdi
+ pushq %rsi
+ pushq %rbx
+ pushq %r8
+ pushq %r9
+ pushq %r10
+ pushq %r11
movq %rsp,(%rsi) /* *save_sp = rsp */
movq (%rdi),%rsp /* rsp = *new_sp */
- popq %rbp
- popq %rdi
- popq %rsi
- popq %rdx
- popq %rcx
+ popq %r11
+ popq %r10
+ popq %r9
+ popq %r8
popq %rbx
- popq %rax
+ popq %rsi
+ popq %rdi
+ popq %rbp
ret
// HW dependent timer initialization
-#define DEFINE_TIMER_ISR void timer_isr(UNUSED_ARG(int, arg))
+#define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(timer_isr)
/** Most Linux kernels can't do better than this (CONFIG_HZ=250). */
#define TIMER_TICKS_PER_SEC 250
// HW dependent timer initialization
-#define DEFINE_TIMER_ISR void timer_isr(void)
+#define DEFINE_TIMER_ISR DECLARE_ISR_CONTEXT_SWITCH(timer_isr)
#define TIMER_TICKS_PER_SEC 250
#define TIMER_HW_CNT (1<<31) /* We assume 32bit integers here */
{
cpu_flags_t flags;
- ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
+ ATOMIC(LIST_ASSERT_VALID(&proc_ready_list));
ASSERT_USER_CONTEXT();
IRQ_ASSERT_ENABLED();
/* Poll on the ready queue for the first ready process */
IRQ_SAVE_DISABLE(flags);
- while (!(CurrentProcess = (struct Process *)list_remHead(&ProcReadyList)))
+ while (!(current_process = (struct Process *)list_remHead(&proc_ready_list)))
{
/*
* Make sure we physically reenable interrupts here, no matter what
* process will ever wake up.
*
* During idle-spinning, an interrupt can occur and it may
- * modify \p ProcReadyList. To ensure that compiler reload this
+ * modify \p proc_ready_list. To ensure that compiler reload this
* variable every while cycle we call CPU_MEMORY_BARRIER.
* The memory barrier ensure that all variables used in this context
* are reloaded.
void proc_switch(void)
{
/* Remember old process to save its context later */
- Process * const old_process = CurrentProcess;
+ Process * const old_process = current_process;
proc_schedule();
* Optimization: don't switch contexts when the active
* process has not changed.
*/
- if (CurrentProcess != old_process)
+ if (current_process != old_process)
{
cpu_stack_t *dummy;
#if CONFIG_KERN_MONITOR
LOG_INFO("Switch from %p(%s) to %p(%s)\n",
old_process, proc_name(old_process),
- CurrentProcess, proc_currentName());
+ current_process, proc_currentName());
#endif
/* Save context of old process and switch to new process. If there is no
* TODO: Instead of physically clearing the process at exit time, a zombie
* list should be created.
*/
- asm_switch_context(&CurrentProcess->stack, old_process ? &old_process->stack : &dummy);
+ asm_switch_context(¤t_process->stack, old_process ? &old_process->stack : &dummy);
}
/* This RET resumes the execution on the new process */
}
-
/**
* Co-operative context switch
*/
void proc_yield(void)
{
- ATOMIC(SCHED_ENQUEUE(CurrentProcess));
+ ATOMIC(SCHED_ENQUEUE(current_process));
proc_switch();
}
#include "idle.h"
#include "proc.h"
+#include <cpu/power.h> // cpu_relax()
#include <cfg/module.h>
+#include <cpu/types.h> // INT_MIN
+#include <kern/proc_p.h>
-// below there's a TRACE so we need a big stack
-PROC_DEFINE_STACK(idle_stack, KERN_MINSTACKSIZE * 2);
+struct Process *idle_proc;
+
+static PROC_DEFINE_STACK(idle_stack, KERN_MINSTACKSIZE);
/**
* The idle process
{
for (;;)
{
- TRACE;
- //monitor_report();
- proc_yield(); // FIXME: CPU_IDLE
+ PAUSE;
+ proc_switch();
}
}
void idle_init(void)
{
- struct Process *idle_proc = proc_new(idle, NULL, sizeof(idle_stack), idle_stack);
- proc_setPri(idle_proc, (int)~0);
+ /*
+ * Idle will be added to the ProcReadyList, but immediately removed
+ * after the first cpu_relax() execution.
+ *
+ * XXX: it would be better to never add idle_proc to the ProcReadyList,
+ * e.g., changing the prototype of proc_new() (or introducing a
+ * proc_new_nostart()) to allow the creation of "sleeping" tasks.
+ */
+ idle_proc = proc_new(idle, NULL, sizeof(idle_stack), idle_stack);
+ proc_setPri(idle_proc, INT_MIN);
}
#ifndef KERN_IDLE_H
#define KERN_IDLE_H
+extern struct Process *idle_proc;
+
void idle_init(void);
#endif /* KERN_IDLE_H */
#include <unistd.h> // FIXME: move POSIX stuff to irq_posix.h
-
MOD_DEFINE(irq)
// FIXME
/* signal handler */
void irq_entry(int signum)
{
-#if CONFIG_KERN_PREEMPT
- Process * const old_process = CurrentProcess;
-#endif
-
irq_handlers[signum]();
-
-#if CONFIG_KERN_PREEMPT
- ASSERT2(CurrentProcess, "no idle proc?");
-
- if (old_process != CurrentProcess)
- {
- IRQ_DISABLE;
-
- TRACEMSG("switching from %p:%s to %p:%s",
- old_process, old_process ? old_process->monitor.name : "---",
- CurrentProcess, proc_currentName());
-
- if (old_process)
- swapcontext(&old_process->context, &CurrentProcess->context);
- else
- setcontext(&CurrentProcess->context);
-
- IRQ_ENABLE;
- }
- TRACEMSG("resuming %p:%s", CurrentProcess, CurrentProcess->monitor.name);
-#endif // CONFIG_KERN_PREEMPT
}
void irq_register(int irq, void (*callback)(void))
Node *node;
int i;
+ proc_forbid();
kprintf("%-9s%-9s%-9s%-9s%s\n", "TCB", "SPbase", "SPsize", "SPfree", "Name");
for (i = 0; i < 56; i++)
kputchar('-');
kputchar('\n');
- proc_forbid();
FOREACH_NODE(node, &MonitorProcs)
{
Process *p = containerof(node, Process, monitor.link);
{
kdbg_init();
- #if CONFIG_KERN_PREEMPT
- kprintf("Init Interrupt (preempt mode)..");
- irq_init();
- kprintf("Done.\n");
- #endif
-
kprintf("Init Timer..");
timer_init();
kprintf("Done.\n");
--- /dev/null
+/**
+ * \file
+ * <!--
+ * This file is part of BeRTOS.
+ *
+ * Bertos is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * As a special exception, you may use this file as part of a free software
+ * library without restriction. Specifically, if other files instantiate
+ * templates or use macros or inline functions from this file, or you compile
+ * this file and link it with other files to produce an executable, this
+ * file does not by itself cause the resulting executable to be covered by
+ * the GNU General Public License. This exception does not however
+ * invalidate any other reasons why the executable file might be covered by
+ * the GNU General Public License.
+ *
+ * Copyright 2001, 2004, 2008 Develer S.r.l. (http://www.develer.com/)
+ * Copyright 1999, 2000, 2001, 2008 Bernie Innocenti <bernie@codewiz.org>
+ * -->
+ *
+ * \brief Choose the multitasking scheduler.
+ *
+ * \author Francesco Sacchi <batt@develer.com>
+ */
+
+
+#include "cfg/cfg_proc.h"
+
+/*
+ * Choose which file to compile depending on
+ * the multitasking type.
+ */
+#if CONFIG_KERN_PREEMPT
+ #include "preempt.c"
+#else
+ #include "coop.c"
+#endif
+
* the GNU General Public License.
*
* Copyright 2008 Bernie Innocenti <bernie@codewiz.org>
+ * Copyright 2009 Andrea Righi <arighi@develer.com>
* -->
*
* \brief Simple preemptive multitasking scheduler.
*
- * All voluntary and preemptive context switching happens on exit from
- * a common interrupt (signal) dispatcher. Preemption on quantum timeout
- * is regulated by a soft-timer. Other kinds of preemption could happen
- * if an interrupt sends a signal to a higher priority process (but this
- * is still unimplemented).
+ * Preemption is explicitly regulated at the exit of each interrupt service
+ * routine (ISR). Each task obtains a time quantum as soon as it is scheduled
+ * on the CPU and its quantum is decremented at each clock tick. The frequency
+ * of the timer determines the system tick granularity and CONFIG_KERN_QUANTUM
+ * the time sharing interval.
*
- * In the POSIX implementaiton, context switching is done by the portable
- * SVR4 swapcontext() facility.
+ * When the quantum expires the handler proc_needPreempt() checks if the
+ * preemption is enabled and in this case proc_schedule() is called, that
+ * possibly replaces the current running thread with a different one.
+ *
+ * The preemption can be disabled or enabled via proc_forbid() and
+ * proc_permit() primitives. This is implemented using a global atomic counter.
+ * When the counter is greater than 0 the task cannot be preempted; only when
+ * the counter reaches 0 the task can be preempted again.
+ *
+ * Preemption-disabled sections may be nested. The preemption will be
+ * re-enabled when the outermost preemption-disabled section completes.
+ *
+ * The voluntary preemption still happens via proc_switch() or proc_yield().
+ * The first one assumes the current process has been already added to a
+ * private wait queue (e.g., on a semaphore or a signal), while the second one
+ * takes care of adding the process into the ready queue.
+ *
+ * Context switch is done by CPU-dependent support routines. In case of a
+ * voluntary preemption the context switch routine must take care of
+ * saving/restoring only the callee-save registers (the voluntary-preemption is
+ * actually a function call). The kernel-preemption always happens inside a
+ * signal/interrupt context and it must take care of saving all registers. For
+ * this, in the entry point of each ISR the caller-save registers must be
+ * saved. In the ISR exit point, if the context switch must happen, we switch
+ * to user-context and call the same voluntary context switch routine that take
+ * care of saving/restoring also the callee-save registers. On resume from the
+ * switch, the interrupt exit point moves back to interrupt-context, resumes
+ * the caller-save registers (saved in the ISR entry point) and return from the
+ * interrupt-context.
+ *
+ * \note Thread priority (if enabled by CONFIG_KERN_PRI) defines the order in
+ * the \p proc_ready_list and the capability to deschedule a running process. A
+ * low-priority thread can't preempt a high-priority thread.
+ *
+ * A high-priority process can preempt a low-priority process immediately (it
+ * will be descheduled and replaced in the interrupt exit point). Processes
+ * running at the same priority can be descheduled when they expire the time
+ * quantum.
+ *
+ * \note Sleeping while preemption is disabled fallbacks to a busy-wait sleep.
+ * Voluntary preemption when preemption is disabled raises a kernel bug.
*
- * \version $Id$
* \author Bernie Innocenti <bernie@codewiz.org>
+ * \author Andrea Righi <arighi@develer.com>
*/
#include "cfg/cfg_proc.h"
#include "proc_p.h"
#include "proc.h"
-#include "idle.h"
#include <kern/irq.h>
#include <kern/monitor.h>
+#include <kern/idle.h> // idle_proc
#include <cpu/frame.h> // CPU_IDLE
#include <cpu/irq.h> // IRQ_DISABLE()...
-#include <drv/timer.h>
+#include <cfg/log.h>
#include <cfg/module.h>
#include <cfg/depend.h> // CONFIG_DEPEND()
// Check config dependencies
-CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN && CONFIG_TIMER_EVENTS && CONFIG_KERN_IRQ);
+CONFIG_DEPEND(CONFIG_KERN_PREEMPT, CONFIG_KERN);
MOD_DEFINE(preempt)
-/// Global preemption disabling nesting counter
-cpu_atomic_t _preempt_forbid_cnt;
+/**
+ * CPU dependent context switching routines.
+ *
+ * Saving and restoring the context on the stack is done by a CPU-dependent
+ * support routine which usually needs to be written in assembly.
+ */
+EXTERN_C void asm_switch_context(cpu_stack_t **new_sp, cpu_stack_t **save_sp);
-static Timer preempt_timer;
+/* Global preemption nesting counter */
+cpu_atomic_t preempt_count;
+/*
+ * The time sharing interval: when a process is scheduled on a CPU it gets an
+ * amount of CONFIG_KERN_QUANTUM clock ticks. When these ticks expires and
+ * preemption is enabled a new process is selected to run.
+ */
+int _proc_quantum;
-void proc_schedule(void)
+/**
+ * Call the scheduler and eventually replace the current running process.
+ */
+static void proc_schedule(void)
{
- IRQ_DISABLE;
-
- ASSERT(proc_preemptAllowed());
- LIST_ASSERT_VALID(&ProcReadyList);
- CurrentProcess = (struct Process *)list_remHead(&ProcReadyList);
- ASSERT2(CurrentProcess, "no idle proc?");
-
- IRQ_ENABLE;
+ Process *old_process = current_process;
+
+ IRQ_ASSERT_DISABLED();
+
+ /* Poll on the ready queue for the first ready process */
+ LIST_ASSERT_VALID(&proc_ready_list);
+ current_process = (Process *)list_remHead(&proc_ready_list);
+ if (UNLIKELY(!current_process))
+ current_process = idle_proc;
+ _proc_quantum = CONFIG_KERN_QUANTUM;
+ /*
+ * Optimization: don't switch contexts when the active process has not
+ * changed.
+ */
+ if (LIKELY(old_process != current_process))
+ {
+ cpu_stack_t *dummy;
+
+ /*
+ * Save context of old process and switch to new process. If
+ * there is no old process, we save the old stack pointer into
+ * a dummy variable that we ignore. In fact, this happens only
+ * when the old process has just exited.
+ *
+ * \todo Instead of physically clearing the process at exit
+ * time, a zombie list should be created.
+ */
+ asm_switch_context(¤t_process->stack,
+ old_process ? &old_process->stack : &dummy);
+ }
- TRACEMSG("launching %p:%s", CurrentProcess, proc_currentName());
+ /* This RET resumes the execution on the new process */
+ LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());
}
-void proc_preempt(UNUSED_ARG(void *, param))
+/**
+ * Check if we need to schedule another task
+ */
+int proc_needPreempt(void)
{
- if (proc_preemptAllowed())
- {
- IRQ_DISABLE;
-
- #if CONFIG_KERN_PRI
- Process *rival = (Process *)LIST_HEAD(&ProcReadyList);
- if (rival && rival->link.pri >= CurrentProcess->link.pri)
- {
- #endif
-
- TRACEMSG("preempting %p:%s", CurrentProcess, proc_currentName());
-
-// FIXME: this still breaks havoc, probably because of some reentrancy issue
-#if 0
- SCHED_ENQUEUE(CurrentProcess);
- proc_schedule();
-#endif
- #if CONFIG_KERN_PRI
- }
- #endif
-
- IRQ_ENABLE;
- }
+ if (UNLIKELY(current_process == NULL))
+ return 0;
+ if (!proc_preemptAllowed())
+ return 0;
+ return _proc_quantum ? prio_next() > prio_curr() :
+ prio_next() >= prio_curr();
+}
- timer_setDelay(&preempt_timer, CONFIG_KERN_QUANTUM);
- timer_add(&preempt_timer);
+/**
+ * Preempt the current task.
+ */
+void proc_preempt(void)
+{
+ IRQ_ASSERT_DISABLED();
+ ASSERT(current_process);
+
+ /* Perform the kernel preemption */
+ LOG_INFO("preempting %p:%s\n", current_process, proc_currentName());
+ /* We are inside a IRQ context, so ATOMIC is not needed here */
+ if (current_process != idle_proc)
+ SCHED_ENQUEUE(current_process);
+ proc_schedule();
}
+/**
+ * Give the control of the CPU to another process.
+ *
+ * \note Assume the current process has been already added to a wait queue.
+ *
+ * \warning This should be considered an internal kernel function, even if it
+ * is allowed, usage from application code is strongly discouraged.
+ */
void proc_switch(void)
{
- ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
- TRACEMSG("%p:%s", CurrentProcess, proc_currentName());
- ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
-
- /* Sleeping with IRQs disabled or preemption forbidden is illegal */
- IRQ_ASSERT_ENABLED();
ASSERT(proc_preemptAllowed());
- // Will invoke proc_switch() in interrupt context
- kill(0, SIGUSR1);
+ ATOMIC(proc_schedule());
}
+/**
+ * Voluntarily release the CPU.
+ */
void proc_yield(void)
{
- TRACEMSG("%p:%s", CurrentProcess, proc_currentName());
-
- IRQ_DISABLE;
- SCHED_ENQUEUE(CurrentProcess);
- IRQ_ENABLE;
-
- proc_switch();
-}
+ /*
+ * Voluntary preemption while preemption is disabled is considered
+ * illegal, as not very useful in practice.
+ *
+ * ASSERT if it happens.
+ */
+ ASSERT(proc_preemptAllowed());
-void proc_entry(void (*user_entry)(void))
-{
- user_entry();
- proc_exit();
+ ATOMIC(
+ SCHED_ENQUEUE(current_process);
+ proc_schedule();
+ );
}
void preempt_init(void)
{
- MOD_CHECK(irq);
- MOD_CHECK(timer);
-
- irq_register(SIGUSR1, proc_schedule);
-
- timer_setSoftint(&preempt_timer, proc_preempt, NULL);
- timer_setDelay(&preempt_timer, CONFIG_KERN_QUANTUM);
- timer_add(&preempt_timer);
-
idle_init();
-
MOD_INIT(preempt);
}
--- /dev/null
+/**
+ * \file
+ * <!--
+ * This file is part of BeRTOS.
+ *
+ * Bertos is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * As a special exception, you may use this file as part of a free software
+ * library without restriction. Specifically, if other files instantiate
+ * templates or use macros or inline functions from this file, or you compile
+ * this file and link it with other files to produce an executable, this
+ * file does not by itself cause the resulting executable to be covered by
+ * the GNU General Public License. This exception does not however
+ * invalidate any other reasons why the executable file might be covered by
+ * the GNU General Public License.
+ *
+ * Copyright 2010 Develer S.r.l. (http://www.develer.com/)
+ *
+ * -->
+ *
+ * \brief Preeptive kernel funtion interfaces.
+ *
+ * \author Francesco Sacchi <batt@develer.com>
+ */
+
+#ifndef KERN_PREEMPT_H
+#define KERN_PREEMPT_H
+
+#include <cfg/compiler.h>
+
+#if CONFIG_KERN_PREEMPT
+ void preempt_init(void);
+ void proc_preempt(void);
+ int proc_needPreempt(void);
+
+ INLINE void proc_decQuantum(void)
+ {
+ extern int _proc_quantum;
+ if (_proc_quantum > 0)
+ _proc_quantum--;
+ }
+#else /* !CONFIG_KERN_PREEMPT */
+ #define proc_decQuantum() /* NOP */
+#endif /* CONFIG_KERN_PREEMPT */
+
+#endif /* KERN_PREEMPT_H */
#define LOG_FORMAT KERN_LOG_FORMAT
#include <cfg/log.h>
-#include "cfg/cfg_arch.h" // ARCH_EMUL
#include "cfg/cfg_monitor.h"
#include <cfg/macros.h> // ROUND_UP2
#include <cfg/module.h>
*
* \note Access to the list must occur while interrupts are disabled.
*/
-REGISTER List ProcReadyList;
+REGISTER List proc_ready_list;
/*
* Holds a pointer to the TCB of the currently running process.
*
* \note User applications should use proc_current() to retrieve this value.
*/
-REGISTER Process *CurrentProcess;
+REGISTER Process *current_process;
+
+/** The main process (the one that executes main()). */
+static struct Process main_process;
+
+#if CONFIG_KERN_HEAP
+
+/**
+ * Local heap dedicated to allocate the memory used by the processes.
+ */
+static HEAP_DEFINE_BUF(heap_buf, KERN_MINSTACKSIZE * 128);
+static Heap proc_heap;
-#if (ARCH & ARCH_EMUL)
/*
- * In some hosted environments, we must emulate the stack on the real
- * process stack to satisfy consistency checks in system libraries and
- * because some ABIs place trampolines on the stack.
+ * Keep track of zombie processes (processes that are exiting and need to
+ * release some resources).
*
- * Access to this list must be protected by PROC_ATOMIC().
+ * \note Access to the list must occur while kernel preemption is disabled.
*/
-List StackFreeList;
+static List zombie_list;
-#define NPROC 10
-cpu_stack_t proc_stacks[NPROC][(64 * 1024) / sizeof(cpu_stack_t)];
-#endif
-
-/** The main process (the one that executes main()). */
-struct Process MainProcess;
+#endif /* CONFIG_KERN_HEAP */
-
-static void proc_init_struct(Process *proc)
+static void proc_initStruct(Process *proc)
{
/* Avoid warning for unused argument. */
(void)proc;
#if CONFIG_KERN_PRI
proc->link.pri = 0;
#endif
-
}
MOD_DEFINE(proc);
void proc_init(void)
{
- LIST_INIT(&ProcReadyList);
+ LIST_INIT(&proc_ready_list);
-#if ARCH & ARCH_EMUL
- LIST_INIT(&StackFreeList);
- for (int i = 0; i < NPROC; i++)
- ADDTAIL(&StackFreeList, (Node *)proc_stacks[i]);
+#if CONFIG_KERN_HEAP
+ LIST_INIT(&zombie_list);
+ heap_init(&proc_heap, heap_buf, sizeof(heap_buf));
#endif
-
/*
* We "promote" the current context into a real process. The only thing we have
* to do is create a PCB and make it current. We don't need to setup the stack
* pointer because it will be written the first time we switch to another process.
*/
- proc_init_struct(&MainProcess);
- CurrentProcess = &MainProcess;
+ proc_initStruct(&main_process);
+ current_process = &main_process;
#if CONFIG_KERN_MONITOR
monitor_init();
- monitor_add(CurrentProcess, "main");
+ monitor_add(current_process, "main");
#endif
#if CONFIG_KERN_PREEMPT
MOD_INIT(proc);
}
+
+#if CONFIG_KERN_HEAP
+
+/**
+ * Free all the resources of all zombie processes previously added to the zombie
+ * list.
+ */
+static void proc_freeZombies(void)
+{
+ Process *proc;
+
+ while (1)
+ {
+ PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list));
+ if (proc == NULL)
+ return;
+
+ if (proc->flags & PF_FREESTACK)
+ PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base,
+ proc->stack_size));
+ }
+}
+
+/**
+ * Enqueue a process in the zombie list.
+ */
+static void proc_addZombie(Process *proc)
+{
+ Node *node;
+#if CONFIG_KERN_PREEMPT
+ ASSERT(!proc_preemptAllowed());
+#endif
+
+#if CONFIG_KERN_PRI
+ node = &(proc)->link.link;
+#else
+ node = &(proc)->link;
+#endif
+ LIST_ASSERT_VALID(&zombie_list);
+ ADDTAIL(&zombie_list, node);
+}
+
+#endif /* CONFIG_KERN_HEAP */
+
/**
* Create a new process, starting at the provided entry point.
*
{
Process *proc;
const size_t PROC_SIZE_WORDS = ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t);
+ LOG_INFO("name=%s", name);
#if CONFIG_KERN_HEAP
bool free_stack = false;
-#endif
- LOG_INFO("name=%s", name);
-#if (ARCH & ARCH_EMUL)
- /* Ignore stack provided by caller and use the large enough default instead. */
- PROC_ATOMIC(stack_base = (cpu_stack_t *)list_remHead(&StackFreeList));
- ASSERT(stack_base);
+ /*
+ * Free up resources of a zombie process.
+ *
+ * We're implementing a kind of lazy garbage collector here for
+ * efficiency reasons: we can avoid to introduce overhead into another
+ * kernel task dedicated to free up resources (e.g., idle) and we're
+ * not introducing any overhead into the scheduler after a context
+ * switch (that would be *very* bad, because the scheduler runs with
+ * IRQ disabled).
+ *
+ * In this way we are able to release the memory of the zombie tasks
+ * without disabling IRQs and without introducing any significant
+ * overhead in any other kernel task.
+ */
+ proc_freeZombies();
- stack_size = KERN_MINSTACKSIZE;
-#elif CONFIG_KERN_HEAP
/* Did the caller provide a stack for us? */
if (!stack_base)
{
stack_size = KERN_MINSTACKSIZE;
/* Allocate stack dinamically */
- if (!(stack_base = heap_alloc(stack_size)))
+ PROC_ATOMIC(stack_base =
+ (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
+ if (stack_base == NULL)
return NULL;
free_stack = true;
}
-#else // !ARCH_EMUL && !CONFIG_KERN_HEAP
+#else // CONFIG_KERN_HEAP
/* Stack must have been provided by the user */
ASSERT_VALID_PTR(stack_base);
ASSERT(stack_size);
-#endif // !ARCH_EMUL && !CONFIG_KERN_HEAP
+#endif // CONFIG_KERN_HEAP
#if CONFIG_KERN_MONITOR
/*
ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
- proc_init_struct(proc);
+ proc_initStruct(proc);
proc->user_data = data;
-#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR | (ARCH & ARCH_EMUL)
+#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
proc->stack_base = stack_base;
proc->stack_size = stack_size;
#if CONFIG_KERN_HEAP
proc->flags |= PF_FREESTACK;
#endif
#endif
+ proc->user_entry = entry;
+ CPU_CREATE_NEW_STACK(proc->stack);
- #if CONFIG_KERN_PREEMPT
-
- getcontext(&proc->context);
- proc->context.uc_stack.ss_sp = proc->stack;
- proc->context.uc_stack.ss_size = stack_size - 1;
- proc->context.uc_link = NULL;
- makecontext(&proc->context, (void (*)(void))proc_entry, 1, entry);
-
- #else // !CONFIG_KERN_PREEMPT
-
- CPU_CREATE_NEW_STACK(proc->stack, entry, proc_exit);
-
- #endif // CONFIG_KERN_PREEMPT
-
- #if CONFIG_KERN_MONITOR
- monitor_add(proc, name);
- #endif
+#if CONFIG_KERN_MONITOR
+ monitor_add(proc, name);
+#endif
/* Add to ready list */
ATOMIC(SCHED_ENQUEUE(proc));
*/
const char *proc_name(struct Process *proc)
{
- #if CONFIG_KERN_MONITOR
- return proc ? proc->monitor.name : "<NULL>";
- #else
- (void)proc;
- return "---";
- #endif
+#if CONFIG_KERN_MONITOR
+ return proc ? proc->monitor.name : "<NULL>";
+#else
+ (void)proc;
+ return "---";
+#endif
}
/// Return the name of the currently running process
*/
void proc_setPri(struct Process *proc, int pri)
{
- if (proc->link.pri == pri)
- return;
+ if (proc->link.pri == pri)
+ return;
- proc->link.pri = pri;
+ proc->link.pri = pri;
- if (proc != CurrentProcess)
- {
- proc_forbid();
- ATOMIC(sched_reenqueue(proc));
- proc_permit();
- }
+ if (proc != current_process)
+ ATOMIC(sched_reenqueue(proc));
}
#endif // CONFIG_KERN_PRI
+INLINE void proc_run(void)
+{
+ void (*entry)(void) = current_process->user_entry;
+
+ LOG_INFO("New process starting at %p", entry);
+ entry();
+}
+
+/**
+ * Entry point for all the processes.
+ */
+void proc_entry(void)
+{
+ /*
+ * Return from a context switch assumes interrupts are disabled, so
+ * we need to explicitly re-enable them as soon as possible.
+ */
+ IRQ_ENABLE;
+ /* Call the actual process's entry point */
+ proc_run();
+ proc_exit();
+}
+
/**
* Terminate the current process
*/
void proc_exit(void)
{
- LOG_INFO("%p:%s", CurrentProcess, proc_currentName());
+ LOG_INFO("%p:%s", current_process, proc_currentName());
#if CONFIG_KERN_MONITOR
- monitor_remove(CurrentProcess);
+ monitor_remove(current_process);
#endif
+ proc_forbid();
#if CONFIG_KERN_HEAP
/*
- * The following code is BROKEN.
- * We are freeing our own stack before entering proc_schedule()
- * BAJO: A correct fix would be to rearrange the scheduler with
- * an additional parameter which frees the old stack/process
- * after a context switch.
+ * Set the task as zombie, its resources will be freed in proc_new() in
+ * a lazy way, when another process will be created.
*/
- if (CurrentProcess->flags & PF_FREESTACK)
- heap_free(CurrentProcess->stack_base, CurrentProcess->stack_size);
- heap_free(CurrentProcess);
+ proc_addZombie(current_process);
#endif
+ current_process = NULL;
+ proc_permit();
-#if (ARCH & ARCH_EMUL)
- /* Reinsert process stack in free list */
- PROC_ATOMIC(ADDHEAD(&StackFreeList, (Node *)CurrentProcess->stack_base));
-
- /*
- * NOTE: At this point the first two words of what used
- * to be our stack contain a list node. From now on, we
- * rely on the compiler not reading/writing the stack.
- */
-#endif /* ARCH_EMUL */
-
- CurrentProcess = NULL;
proc_switch();
- /* not reached */
+
+ /* never reached */
+ ASSERT(0);
}
*/
iptr_t proc_currentUserData(void)
{
- return CurrentProcess->user_data;
+ return current_process->user_data;
}
*
* $WIZ$ module_name = "kernel"
* $WIZ$ module_configuration = "bertos/cfg/cfg_proc.h"
- * $WIZ$ module_depends = "switch_ctx", "coop"
+ * $WIZ$ module_depends = "switch_ctx", "mtask"
* $WIZ$ module_supports = "not atmega103"
*/
uint16_t flags; /**< Flags */
#endif
-#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR | (ARCH & ARCH_EMUL)
+#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
cpu_stack_t *stack_base; /**< Base of process stack */
size_t stack_size; /**< Size of process stack */
#endif
-#if CONFIG_KERN_PREEMPT
- ucontext_t context;
-#endif
+ /* The actual process entry point */
+ void (*user_entry)(void);
#if CONFIG_KERN_MONITOR
struct ProcMonitor
* \param data Pointer to user data.
* \param size Length of the stack.
* \param stack Pointer to the memory area to be used as a stack.
- *
+ *
* \return Process structure of new created process
* if successful, NULL otherwise.
*/
*/
INLINE struct Process *proc_current(void)
{
- extern struct Process *CurrentProcess;
- return CurrentProcess;
+ extern struct Process *current_process;
+ return current_process;
}
#if CONFIG_KERN_PRI
}
#endif
-/**
- * Disable preemptive task switching.
- *
- * The scheduler maintains a global nesting counter. Task switching is
- * effectively re-enabled only when the number of calls to proc_permit()
- * matches the number of calls to proc_forbid().
- *
- * \note Calling functions that could sleep while task switching is disabled
- * is dangerous and unsupported.
- *
- * \note calling proc_forbid() from within an interrupt is illegal and
- * meaningless.
- *
- * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
- * very efficient locking primitive in simple but performance-critical
- * situations. In all other cases, semaphores offer a more flexible and
- * fine-grained locking primitive.
- *
- * \sa proc_permit()
- */
-INLINE void proc_forbid(void)
-{
- #if CONFIG_KERN_PREEMPT
- extern cpu_atomic_t _preempt_forbid_cnt;
+#if CONFIG_KERN_PREEMPT
+
+ /**
+ * Disable preemptive task switching.
+ *
+ * The scheduler maintains a global nesting counter. Task switching is
+ * effectively re-enabled only when the number of calls to proc_permit()
+ * matches the number of calls to proc_forbid().
+ *
+ * \note Calling functions that could sleep while task switching is disabled
+ * is dangerous and unsupported.
+ *
+ * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
+ * very efficient locking primitive in simple but performance-critical
+ * situations. In all other cases, semaphores offer a more flexible and
+ * fine-grained locking primitive.
+ *
+ * \sa proc_permit()
+ */
+ INLINE void proc_forbid(void)
+ {
+ extern cpu_atomic_t preempt_count;
/*
* We don't need to protect the counter against other processes.
* The reason why is a bit subtle.
* "preempt_forbid_cnt != 0" means that no task switching is
* possible.
*/
- ++_preempt_forbid_cnt;
+ ++preempt_count;
/*
- * Make sure _preempt_forbid_cnt is flushed to memory so the
- * preemption softirq will see the correct value from now on.
+ * Make sure preempt_count is flushed to memory so the preemption
+ * softirq will see the correct value from now on.
*/
MEMORY_BARRIER;
- #endif
-}
+ }
-/**
- * Re-enable preemptive task switching.
- *
- * \sa proc_forbid()
- */
-INLINE void proc_permit(void)
-{
- #if CONFIG_KERN_PREEMPT
+ /**
+ * Re-enable preemptive task switching.
+ *
+ * \sa proc_forbid()
+ */
+ INLINE void proc_permit(void)
+ {
+ extern cpu_atomic_t preempt_count;
/*
* This is to ensure any global state changed by the process gets
* flushed to memory before task switching is re-enabled.
*/
MEMORY_BARRIER;
- extern cpu_atomic_t _preempt_forbid_cnt;
/* No need to protect against interrupts here. */
- ASSERT(_preempt_forbid_cnt != 0);
- --_preempt_forbid_cnt;
-
+ ASSERT(preempt_count > 0);
+ --preempt_count;
/*
- * This ensures _preempt_forbid_cnt is flushed to memory immediately
- * so the preemption interrupt sees the correct value.
+ * This ensures preempt_count is flushed to memory immediately so the
+ * preemption interrupt sees the correct value.
*/
MEMORY_BARRIER;
+ }
- #endif
-}
-
-/**
- * \return true if preemptive task switching is allowed.
- * \note This accessor is needed because _preempt_forbid_cnt
- * must be absoultely private.
- */
-INLINE bool proc_preemptAllowed(void)
-{
- #if CONFIG_KERN_PREEMPT
- extern cpu_atomic_t _preempt_forbid_cnt;
- return (_preempt_forbid_cnt == 0);
- #else
- return true;
- #endif
-}
+ /**
+ * \return true if preemptive task switching is allowed.
+ * \note This accessor is needed because preempt_count
+ * must be absoultely private.
+ */
+ INLINE bool proc_preemptAllowed(void)
+ {
+ extern cpu_atomic_t preempt_count;
+ return (preempt_count == 0);
+ }
+#else /* CONFIG_KERN_PREEMPT */
+ #define proc_forbid() /* NOP */
+ #define proc_permit() /* NOP */
+ #define proc_preemptAllowed() (true)
+#endif /* CONFIG_KERN_PREEMPT */
/** Deprecated, use the proc_preemptAllowed() macro. */
#define proc_allowed() proc_preemptAllowed()
/* We need a large stack because system libraries are bloated */
#define KERN_MINSTACKSIZE 65536
#else
- #define KERN_MINSTACKSIZE \
- (sizeof(Process) + CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
- + 32 * sizeof(int))
+ #if CONFIG_KERN_PREEMPT
+ /*
+ * A preemptible kernel needs a larger stack compared to the
+ * cooperative case. A task can be interrupted anytime in each
+ * node of the call graph, at any level of depth. This may
+ * result in a higher stack consumption, to call the ISR, save
+ * the current user context and to execute the kernel
+ * preemption routines implemented as ISR prologue and
+ * epilogue. All these calls are nested into the process stack.
+ *
+ * So, to reduce the risk of stack overflow/underflow problems
+ * add a x2 to the portion stack reserved to the user process.
+ */
+ #define KERN_MINSTACKSIZE \
+ (sizeof(Process) + CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
+ + 32 * sizeof(int) * 2)
+ #else
+ #define KERN_MINSTACKSIZE \
+ (sizeof(Process) + CPU_SAVED_REGS_CNT * 2 * sizeof(cpu_stack_t) \
+ + 32 * sizeof(int))
+ #endif /* CONFIG_KERN_PREEMPT */
+
#endif
#ifndef CONFIG_KERN_MINSTACKSIZE
* \param size Stack size in bytes. It must be at least KERN_MINSTACKSIZE.
*/
#define PROC_DEFINE_STACK(name, size) \
- STATIC_ASSERT((size) >= KERN_MINSTACKSIZE); \
- cpu_stack_t name[((size) + sizeof(cpu_stack_t) - 1) / sizeof(cpu_stack_t)];
+ cpu_stack_t name[((size) + sizeof(cpu_stack_t) - 1) / sizeof(cpu_stack_t)]; \
+ STATIC_ASSERT((size) >= KERN_MINSTACKSIZE)
/* Memory fill codes to help debugging */
#if CONFIG_KERN_MONITOR
#include <cpu/types.h> /* for cpu_stack_t */
#include <cpu/irq.h> // IRQ_ASSERT_DISABLED()
-#if CONFIG_KERN_PREEMPT
- #include <ucontext.h> // XXX
-#endif
-
#include <kern/proc.h> // struct Process
+#include <kern/idle.h> // idle_proc
/**
/** Track running processes. */
-extern REGISTER Process *CurrentProcess;
+extern REGISTER Process *current_process;
/**
* Track ready processes.
*
* Access to this list must be performed with interrupts disabled
*/
-extern REGISTER List ProcReadyList;
+extern REGISTER List proc_ready_list;
#if CONFIG_KERN_PRI
- #define SCHED_ENQUEUE_INTERNAL(proc) LIST_ENQUEUE(&ProcReadyList, &(proc)->link)
+ #define prio_next() (LIST_EMPTY(&proc_ready_list) ? idle_proc->link.pri : \
+ ((PriNode *)LIST_HEAD(&proc_ready_list))->pri)
+ #define prio_curr() (current_process->link.pri)
+
+ #define SCHED_ENQUEUE_INTERNAL(proc) \
+ LIST_ENQUEUE(&proc_ready_list, &(proc)->link)
+ #define SCHED_ENQUEUE_HEAD_INTERNAL(proc) \
+ LIST_ENQUEUE_HEAD(&proc_ready_list, &(proc)->link)
#else
- #define SCHED_ENQUEUE_INTERNAL(proc) ADDTAIL(&ProcReadyList, &(proc)->link)
+ #define prio_next() 0
+ #define prio_curr() 0
+
+ #define SCHED_ENQUEUE_INTERNAL(proc) ADDTAIL(&proc_ready_list, &(proc)->link)
+ #define SCHED_ENQUEUE_HEAD_INTERNAL(proc) ADDHEAD(&proc_ready_list, &(proc)->link)
#endif
/**
*/
#define SCHED_ENQUEUE(proc) do { \
IRQ_ASSERT_DISABLED(); \
- LIST_ASSERT_VALID(&ProcReadyList); \
+ LIST_ASSERT_VALID(&proc_ready_list); \
SCHED_ENQUEUE_INTERNAL(proc); \
} while (0)
+#define SCHED_ENQUEUE_HEAD(proc) do { \
+ IRQ_ASSERT_DISABLED(); \
+ LIST_ASSERT_VALID(&proc_ready_list); \
+ SCHED_ENQUEUE_HEAD_INTERNAL(proc); \
+ } while (0)
+
+
#if CONFIG_KERN_PRI
/**
* Changes the priority of an already enqueued process.
INLINE void sched_reenqueue(struct Process *proc)
{
IRQ_ASSERT_DISABLED();
- LIST_ASSERT_VALID(&ProcReadyList);
+ LIST_ASSERT_VALID(&proc_ready_list);
Node *n;
PriNode *pos = NULL;
- FOREACH_NODE(n, &ProcReadyList)
+ FOREACH_NODE(n, &proc_ready_list)
{
if (n == &proc->link.link)
{
if (pos)
{
REMOVE(&proc->link.link);
- LIST_ENQUEUE(&ProcReadyList, &proc->link);
+ LIST_ENQUEUE(&proc_ready_list, &proc->link);
}
}
#endif //CONFIG_KERN_PRI
/// Schedule another process *without* adding the current one to the ready list.
void proc_switch(void);
-
-#if CONFIG_KERN_PREEMPT
-void proc_entry(void (*user_entry)(void));
-void preempt_init(void);
-#endif
+void proc_entry(void);
#if CONFIG_KERN_MONITOR
/** Initialize the monitor */
* invalidate any other reasons why the executable file might be covered by
* the GNU General Public License.
*
- * Copyright 2008 Develer S.r.l. (http://www.develer.com/)
+ * Copyright 2009 Develer S.r.l. (http://www.develer.com/)
* -->
*
*
- * \brief Test kernel process.
+ * \brief Test kernel preemption.
*
- * \version $Id$
- * \author Daniele Basile <asterix@develer.com>
+ * This testcase spawns TASKS parallel threads that runs for TIME seconds. They
+ * continuously spin updating a global counter (one counter for each thread).
+ *
+ * At exit each thread checks if the others have been che chance to update
+ * their own counter. If not, it means the preemption didn't occur and the
+ * testcase returns an error message.
+ *
+ * Otherwise, if all the threads have been able to update their own counter it
+ * means preemption successfully occurs, since there is no active sleep inside
+ * each thread's implementation.
+ *
+ * \author Andrea Righi <arighi@develer.com>
*
* $test$: cp bertos/cfg/cfg_proc.h $cfgdir/
* $test$: echo "#undef CONFIG_KERN" >> $cfgdir/cfg_proc.h
* $test$: echo "#define CONFIG_KERN 1" >> $cfgdir/cfg_proc.h
* $test$: echo "#undef CONFIG_KERN_PRI" >> $cfgdir/cfg_proc.h
* $test$: echo "#define CONFIG_KERN_PRI 1" >> $cfgdir/cfg_proc.h
+ * $test$: echo "#undef CONFIG_KERN_PREEMPT" >> $cfgdir/cfg_proc.h
+ * $test$: echo "#define CONFIG_KERN_PREEMPT 1" >> $cfgdir/cfg_proc.h
* $test$: cp bertos/cfg/cfg_monitor.h $cfgdir/
- * $test$: echo "#undef CONFIG_KERN_MONITOR" >> $cfgdir/cfg_monitor.h
- * $test$: echo "#define CONFIG_KERN_MONITOR 1" >> $cfgdir/cfg_monitor.h
+ * $test$: sed -i "s/CONFIG_KERN_MONITOR 0/CONFIG_KERN_MONITOR 1/" $cfgdir/cfg_monitor.h
* $test$: cp bertos/cfg/cfg_signal.h $cfgdir/
* $test$: echo "#undef CONFIG_KERN_SIGNALS" >> $cfgdir/cfg_signal.h
* $test$: echo "#define CONFIG_KERN_SIGNALS 1" >> $cfgdir/cfg_signal.h
+ *
*/
+#include <stdio.h> // sprintf
+#include <string.h> // memset
+
#include <kern/proc.h>
#include <kern/irq.h>
#include <kern/monitor.h>
#include <drv/timer.h>
#include <cfg/test.h>
+#include <cfg/cfg_proc.h>
+enum
+{
+ TEST_OK = 1,
+ TEST_FAIL = 2,
+};
-// Global settings for the test.
-#define DELAY 5
-
-// Settings for the test process.
-//Process 1
-#define INC_PROC_T1 1
-#define DELAY_PROC_T1 INC_PROC_T1*DELAY
-//Process 2
-#define INC_PROC_T2 3
-#define DELAY_PROC_T2 INC_PROC_T2*DELAY
-//Process 3
-#define INC_PROC_T3 5
-#define DELAY_PROC_T3 INC_PROC_T3*DELAY
-//Process 4
-#define INC_PROC_T4 7
-#define DELAY_PROC_T4 INC_PROC_T4*DELAY
-//Process 5
-#define INC_PROC_T5 11
-#define DELAY_PROC_T5 INC_PROC_T5*DELAY
-//Process 6
-#define INC_PROC_T6 13
-#define DELAY_PROC_T6 INC_PROC_T6*DELAY
-//Process 7
-#define INC_PROC_T7 17
-#define DELAY_PROC_T7 INC_PROC_T7*DELAY
-//Process 8
-#define INC_PROC_T8 19
-#define DELAY_PROC_T8 INC_PROC_T8*DELAY
-
-//Global count for each process.
-unsigned int t1_count = 0;
-unsigned int t2_count = 0;
-unsigned int t3_count = 0;
-unsigned int t4_count = 0;
-unsigned int t5_count = 0;
-unsigned int t6_count = 0;
-unsigned int t7_count = 0;
-unsigned int t8_count = 0;
-
-/*
- * These macros generate the code needed to create the test process functions.
- */
-#define PROC_TEST(num) static void proc_test##num(void) \
-{ \
- for (int i = 0; i < INC_PROC_T##num; ++i) \
- { \
- t##num##_count++; \
- kputs("> Process[" #num "]\n"); \
- timer_delay(DELAY_PROC_T##num); \
- } \
-}
+/* Number of tasks to spawn */
+#define TASKS 8
+
+static char name[TASKS][32];
-#define PROC_TEST_STACK(num) PROC_DEFINE_STACK(proc_test##num##_stack, KERN_MINSTACKSIZE);
-#define PROC_TEST_INIT(num) proc_new(proc_test##num, NULL, sizeof(proc_test##num##_stack), proc_test##num##_stack);
+static unsigned int done[TASKS];
-// Define process
-PROC_TEST(1)
-PROC_TEST(2)
-PROC_TEST(3)
-PROC_TEST(4)
-PROC_TEST(5)
-PROC_TEST(6)
-PROC_TEST(7)
-PROC_TEST(8)
+#define WORKER_STACK_SIZE KERN_MINSTACKSIZE * 3
+
+/* Base time delay for processes using timer_delay() */
+#define DELAY 5
// Define process stacks for test.
-PROC_TEST_STACK(1)
-PROC_TEST_STACK(2)
-PROC_TEST_STACK(3)
-PROC_TEST_STACK(4)
-PROC_TEST_STACK(5)
-PROC_TEST_STACK(6)
-PROC_TEST_STACK(7)
-PROC_TEST_STACK(8)
+static cpu_stack_t worker_stack[TASKS][WORKER_STACK_SIZE / sizeof(cpu_stack_t)];
+
+static int prime_numbers[] =
+{
+ 1, 3, 5, 7, 11, 13, 17, 19,
+ 23, 29, 31, 37, 41, 43, 47, 53,
+};
+
+STATIC_ASSERT(TASKS <= countof(prime_numbers));
+
+static void worker(void)
+{
+ long pid = (long)proc_currentUserData();
+ long tot = prime_numbers[pid - 1];
+ unsigned int my_count = 0;
+ int i;
+
+ for (i = 0; i < tot; i++)
+ {
+ my_count++;
+ PROC_ATOMIC(kprintf("> %s[%ld] running\n", __func__, pid));
+ timer_delay(tot * DELAY);
+ }
+ done[pid - 1] = 1;
+ PROC_ATOMIC(kprintf("> %s[%ld] completed\n", __func__, pid));
+}
+
+static int worker_test(void)
+{
+ long i;
+
+ // Init the test processes
+ kputs("Run Proc test..\n");
+ for (i = 0; i < TASKS; i++)
+ {
+ sprintf(&name[i][0], "worker_%ld", i + 1);
+ proc_new_with_name(name[i], worker, (iptr_t)(i + 1),
+ WORKER_STACK_SIZE, &worker_stack[i][0]);
+ }
+ kputs("> Main: Processes started\n");
+ while (1)
+ {
+ for (i = 0; i < TASKS; i++)
+ {
+ if (!done[i])
+ break;
+ }
+ if (i == TASKS)
+ break;
+ monitor_report();
+ timer_delay(93);
+ }
+ kputs("> Main: process test finished..ok!\n");
+ return 0;
+}
+
+#if CONFIG_KERN_PREEMPT
+/* Time to run each preemptible thread (in seconds) */
+#define TIME 10
+
+static char preempt_name[TASKS][32];
+
+static cpu_atomic_t barrier[TASKS];
+static cpu_atomic_t main_barrier;
+
+static unsigned int preempt_counter[TASKS];
+static unsigned int preempt_done[TASKS];
+
+static cpu_stack_t preempt_worker_stack[TASKS][WORKER_STACK_SIZE / sizeof(cpu_stack_t)];
+
+static void preempt_worker(void)
+{
+ long pid = (long)proc_currentUserData();
+ unsigned int *my_count = &preempt_counter[pid - 1];
+ ticks_t start, stop;
+ int i;
+
+ barrier[pid - 1] = 1;
+ /* Synchronize on the main barrier */
+ while (!main_barrier)
+ proc_yield();
+ PROC_ATOMIC(kprintf("> %s[%ld] running\n", __func__, pid));
+ start = timer_clock();
+ stop = ms_to_ticks(TIME * 1000);
+ while (timer_clock() - start < stop)
+ {
+ IRQ_ASSERT_ENABLED();
+ (*my_count)++;
+ /* be sure to wrap to a value different than 0 */
+ if (UNLIKELY(*my_count == (unsigned int)~0))
+ *my_count = 1;
+ }
+ PROC_ATOMIC(kprintf("> %s[%ld] completed: (counter = %d)\n",
+ __func__, pid, *my_count));
+ for (i = 0; i < TASKS; i++)
+ if (!preempt_counter[i])
+ {
+ preempt_done[pid - 1] = TEST_FAIL;
+ return;
+ }
+ preempt_done[pid - 1] = TEST_OK;
+}
+
+static int preempt_worker_test(void)
+{
+ unsigned long score = 0;
+ long i;
+
+ // Init the test processes
+ kputs("Run Preemption test..\n");
+ for (i = 0; i < TASKS; i++)
+ {
+ sprintf(&preempt_name[i][0], "preempt_worker_%ld", i + 1);
+ proc_new_with_name(preempt_name[i], preempt_worker, (iptr_t)(i + 1),
+ WORKER_STACK_SIZE, &preempt_worker_stack[i][0]);
+ }
+ kputs("> Main: Processes created\n");
+ /* Synchronize on start */
+ while (1)
+ {
+ for (i = 0; i < TASKS; i++)
+ if (!barrier[i])
+ break;
+ if (i == TASKS)
+ break;
+ proc_yield();
+ }
+ /* Now all threads have been created, start them all */
+ main_barrier = 1;
+ MEMORY_BARRIER;
+ kputs("> Main: Processes started\n");
+ while (1)
+ {
+ for (i = 0; i < TASKS; i++)
+ {
+ if (!preempt_done[i])
+ break;
+ else if (preempt_done[i] == TEST_FAIL)
+ {
+ kputs("> Main: process test finished..fail!\n");
+ return -1;
+ }
+ }
+ if (i == TASKS)
+ break;
+ monitor_report();
+ timer_delay(1000);
+ }
+ for (i = 0; i < TASKS; i++)
+ score += preempt_counter[i];
+ kputs("> Main: process test finished..ok!\n");
+ kprintf("> Score: %lu\n", score);
+ return 0;
+}
+#endif /* CONFIG_KERN_PREEMPT */
+
+#if CONFIG_KERN_SIGNALS & CONFIG_KERN_PRI
+
+#define PROC_PRI_TEST_STACK(num) PROC_DEFINE_STACK(proc_test##num##_stack, KERN_MINSTACKSIZE);
// Define params to test priority
#define PROC_PRI_TEST(num) static void proc_pri_test##num(void) \
}
// Default priority is 0
-#define PROC_PRI_TEST_INIT(num, proc) \
-do { \
- struct Process *p = proc_new(proc_pri_test##num, (proc), sizeof(proc_test##num##_stack), proc_test##num##_stack); \
- proc_setPri(p, num + 1); \
+#define PROC_PRI_TEST_INIT(num, proc) \
+do { \
+ struct Process *p = proc_new(proc_pri_test##num, (proc), \
+ sizeof(proc_test##num##_stack), \
+ proc_test##num##_stack); \
+ proc_setPri(p, num + 1); \
} while (0)
-PROC_TEST_STACK(0)
+PROC_PRI_TEST_STACK(0)
+PROC_PRI_TEST_STACK(1)
+PROC_PRI_TEST_STACK(2)
+
PROC_PRI_TEST(0)
PROC_PRI_TEST(1)
PROC_PRI_TEST(2)
-
-/**
- * Process scheduling test
- */
-int proc_testRun(void)
+static int prio_worker_test(void)
{
- int ret_value = 0;
- kprintf("Run Process test..\n");
-
- //Init the process tests
- PROC_TEST_INIT(1)
- PROC_TEST_INIT(2)
- PROC_TEST_INIT(3)
- PROC_TEST_INIT(4)
- PROC_TEST_INIT(5)
- PROC_TEST_INIT(6)
- PROC_TEST_INIT(7)
- PROC_TEST_INIT(8)
- kputs("> Main: Processes created\n");
-
- for (int i = 0; i < 30; ++i)
- {
- kputs("> Main\n");
- timer_delay(93);
- monitor_report();
- }
-
- if( t1_count == INC_PROC_T1 &&
- t2_count == INC_PROC_T2 &&
- t3_count == INC_PROC_T3 &&
- t4_count == INC_PROC_T4 &&
- t5_count == INC_PROC_T5 &&
- t6_count == INC_PROC_T6 &&
- t7_count == INC_PROC_T7 &&
- t8_count == INC_PROC_T8)
- {
- kputs("> Main: process test finished..ok!\n");
- ret_value = 0;
- }
- else
- {
- kputs("> Main: process test..fail!\n");
- ret_value = -1;
- }
+ struct Process *curr = proc_current();
+ int orig_pri = curr->link.pri;
+ int ret = 0;
-#if CONFIG_KERN_SIGNALS & CONFIG_KERN_PRI
// test process priority
// main process must have the higher priority to check signals received
proc_setPri(proc_current(), 10);
- struct Process *curr = proc_current();
+ kputs("Run Priority test..\n");
// the order in which the processes are created is important!
PROC_PRI_TEST_INIT(0, curr);
PROC_PRI_TEST_INIT(1, curr);
// signals must be: USER2, 1, 0 in order
sigmask_t signals = sig_wait(SIG_USER0 | SIG_USER1 | SIG_USER2);
if (!(signals & SIG_USER2))
- goto priority_fail;
-
+ {
+ ret = -1;
+ goto out;
+ }
signals = sig_wait(SIG_USER0 | SIG_USER1 | SIG_USER2);
if (!(signals & SIG_USER1))
- goto priority_fail;
-
+ {
+ ret = -1;
+ goto out;
+ }
signals = sig_wait(SIG_USER0 | SIG_USER1 | SIG_USER2);
if (!(signals & SIG_USER0))
- goto priority_fail;
-
+ {
+ ret = -1;
+ goto out;
+ }
// All processes must have quit by now, but just in case...
signals = sig_waitTimeout(SIG_USER0 | SIG_USER1 | SIG_USER2, 200);
if (signals & (SIG_USER0 | SIG_USER1 | SIG_USER2))
- goto priority_fail;
-
+ {
+ ret = -1;
+ goto out;
+ }
if (signals & SIG_TIMEOUT)
{
kputs("Priority test successfull.\n");
}
+out:
+ proc_setPri(proc_current(), orig_pri);
+ if (ret != 0)
+ kputs("Priority test failed.\n");
+ return ret;
+}
+#endif /* CONFIG_KERN_SIGNALS & CONFIG_KERN_PRI */
- return ret_value;
-
-priority_fail:
- kputs("Priority test failed.\n");
- return -1;
-
-#endif
-
- return ret_value;
-
+/**
+ * Process scheduling test
+ */
+int proc_testRun(void)
+{
+#if CONFIG_KERN_PREEMPT
+ // Clear shared data (this is needed when this testcase is embedded in
+ // the demo application).
+ memset(preempt_counter, 0, sizeof(preempt_counter));
+ memset(preempt_done, 0, sizeof(preempt_done));
+ memset(barrier, 0, sizeof(barrier));
+ main_barrier = 0;
+#endif /* CONFIG_KERN_PREEMPT */
+ memset(done, 0, sizeof(done));
+
+ /* Start tests */
+ worker_test();
+#if CONFIG_KERN_PREEMPT
+ preempt_worker_test();
+#endif /* CONFIG_KERN_PREEMPT */
+#if CONFIG_KERN_SIGNALS & CONFIG_KERN_PRI
+ prio_worker_test();
+#endif /* CONFIG_KERN_SIGNALS & CONFIG_KERN_PRI */
+ return 0;
}
int proc_testSetup(void)
{
kdbg_init();
- #if CONFIG_KERN_PREEMPT
- kprintf("Init Interrupt (preempt mode)..");
- irq_init();
- kprintf("Done.\n");
- #endif
-
kprintf("Init Timer..");
timer_init();
kprintf("Done.\n");
proc_forbid();
sem_verify(s);
- if ((!s->owner) || (s->owner == CurrentProcess))
+ if ((!s->owner) || (s->owner == current_process))
{
- s->owner = CurrentProcess;
+ s->owner = current_process;
s->nest_count++;
result = true;
}
sem_verify(s);
/* Is the semaphore already locked by another process? */
- if (UNLIKELY(s->owner && (s->owner != CurrentProcess)))
+ if (UNLIKELY(s->owner && (s->owner != current_process)))
{
/* Append calling process to the wait queue */
- ADDTAIL(&s->wait_queue, (Node *)CurrentProcess);
+ ADDTAIL(&s->wait_queue, (Node *)current_process);
/*
* We will wake up only when the current owner calls
ASSERT(LIST_EMPTY(&s->wait_queue));
/* The semaphore was free: lock it */
- s->owner = CurrentProcess;
+ s->owner = current_process;
s->nest_count++;
proc_permit();
}
proc_forbid();
sem_verify(s);
- ASSERT(s->owner == CurrentProcess);
+ ASSERT(s->owner == current_process);
/*
* Decrement nesting count and check if the semaphore
sem_init(&sem);
kprintf("Done.\n");
- #if CONFIG_KERN_PREEMPT
- kprintf("Init Interrupt (preempt mode)..");
- irq_init();
- kprintf("Done.\n");
- #endif
-
kprintf("Init Timer..");
timer_init();
kprintf("Done.\n");
cpu_flags_t flags;
IRQ_SAVE_DISABLE(flags);
- result = CurrentProcess->sig_recv & sigs;
- CurrentProcess->sig_recv &= ~sigs;
+ result = current_process->sig_recv & sigs;
+ current_process->sig_recv &= ~sigs;
IRQ_RESTORE(flags);
return result;
sigmask_t sig_wait(sigmask_t sigs)
{
sigmask_t result;
- cpu_flags_t flags;
/* Sleeping with IRQs disabled or preemption forbidden is illegal */
IRQ_ASSERT_ENABLED();
* In this case, we'd deadlock with the signal bit already set
* and the process never being reinserted into the ready list.
*/
- // FIXME: just use IRQ_DISABLE() here
- IRQ_SAVE_DISABLE(flags);
+ IRQ_DISABLE;
/* Loop until we get at least one of the signals */
- while (!(result = CurrentProcess->sig_recv & sigs))
+ while (!(result = current_process->sig_recv & sigs))
{
/*
* Tell "them" that we want to be awaken when any of these
* signals arrives.
*/
- CurrentProcess->sig_wait = sigs;
+ current_process->sig_wait = sigs;
/*
* Go to sleep and proc_switch() to another process.
* We re-enable IRQs because proc_switch() does not
* guarantee to save and restore the interrupt mask.
*/
- IRQ_RESTORE(flags);
+ IRQ_ENABLE;
proc_switch();
- IRQ_SAVE_DISABLE(flags);
+ IRQ_DISABLE;
/*
* When we come back here, the wait mask must have been
* one of the signals we were expecting must have been
* delivered to us.
*/
- ASSERT(!CurrentProcess->sig_wait);
- ASSERT(CurrentProcess->sig_recv & sigs);
+ ASSERT(!current_process->sig_wait);
+ ASSERT(current_process->sig_recv & sigs);
}
/* Signals found: clear them and return */
- CurrentProcess->sig_recv &= ~sigs;
+ current_process->sig_recv &= ~sigs;
- IRQ_RESTORE(flags);
+ IRQ_ENABLE;
return result;
}
/* Check if process needs to be awoken */
if (proc->sig_recv & proc->sig_wait)
{
- /* Wake up process and enqueue in ready list */
+ /*
+ * Wake up process and enqueue in ready list.
+ *
+ * Move this process to the head of the ready list, so that it
+ * will be chosen at the next scheduling point.
+ */
proc->sig_wait = 0;
- SCHED_ENQUEUE(proc);
+ SCHED_ENQUEUE_HEAD(proc);
}
IRQ_RESTORE(flags);
{
kdbg_init();
- #if CONFIG_KERN_PREEMPT
- kprintf("Init Interrupt (preempt mode)..");
- irq_init();
- kprintf("Done.\n");
- #endif
-
kprintf("Init Timer..");
timer_init();
kprintf("Done.\n");
* \param size Heap size in bytes.
*/
#define HEAP_DEFINE_BUF(name, size) \
- heap_buf_t name[((size) + sizeof(heap_buf_t) - 1) / sizeof(heap_buf_t)];
+ heap_buf_t name[((size) + sizeof(heap_buf_t) - 1) / sizeof(heap_buf_t)]; \
+ STATIC_ASSERT(sizeof(name) % sizeof(heap_buf_t) == 0)
/// Initialize \a heap within the buffer pointed by \a memory which is of \a size bytes
void heap_init(struct Heap* heap, void* memory, size_t size);
/**
* Insert a priority node in a priority queue.
*
- * The new node is inserted immediately before the
- * first node with lower priority or appended to
- * the tail if no such node exists.
+ * The new node is inserted immediately before the first node with the same
+ * priority or appended to the tail if no such node exists.
+ */
+#define LIST_ENQUEUE_HEAD(list, node) \
+ do { \
+ PriNode *ln; \
+ LIST_ASSERT_NOT_CONTAINS((list),(node)); \
+ FOREACH_NODE(ln, (list)) \
+ if (ln->pri <= (node)->pri) \
+ break; \
+ INSERT_BEFORE(&(node)->link, &ln->link); \
+ } while (0)
+
+/**
+ * Insert a priority node in a priority queue.
+ *
+ * The new node is inserted immediately before the first node with lower
+ * priority or appended to the tail if no such node exists.
*/
#define LIST_ENQUEUE(list, node) \
do { \
bertos/mware/event.c \
bertos/mware/formatwr.c \
bertos/mware/hex.c \
+ bertos/mware/sprintf.c \
bertos/kern/kfile.c \
bertos/kern/proc.c \
- bertos/kern/coop.c \
+ bertos/kern/mtask.c \
+ bertos/kern/idle.c \
bertos/kern/proc_test.c \
bertos/kern/monitor.c \
bertos/kern/signal.c \
at91sam7s_PREFIX = arm-none-eabi-
at91sam7s_CPPAFLAGS = -O0 -g -gdwarf-2 -g -gen-debug
-at91sam7s_CPPFLAGS = -O0 -D'ARCH=0' -D__ARM_AT91SAM7S256__ -D'CPU_FREQ=(48023000UL)' -D'WIZ_AUTOGEN' -g3 -gdwarf-2 -fverbose-asm -Iexamples/at91sam7 -Ibertos/cpu/arm
+at91sam7s_CPPFLAGS = -O0 -D'ARCH=0' -D__ARM_AT91SAM7S256__ -D'CPU_FREQ=(48023000UL)' -D'WIZ_AUTOGEN' -g3 -gdwarf-2 -fverbose-asm -Iexamples/at91sam7 -Ibertos/cpu/arm -fomit-frame-pointer
at91sam7s_LDFLAGS = -nostartfiles -T bertos/cpu/arm/scripts/at91sam7_256_rom.ld -Wl,--no-warn-mismatch
-
at91sam7s_CPU = arm7tdmi
at91sam7s_PROGRAMMER_CPU = at91sam7
bertos/mware/event.c \
bertos/mware/formatwr.c \
bertos/mware/hex.c \
+ bertos/mware/sprintf.c \
+ bertos/struct/heap.c \
bertos/kern/kfile.c \
bertos/kern/proc.c \
- bertos/kern/coop.c \
+ bertos/kern/mtask.c \
+ bertos/kern/idle.c \
bertos/kern/proc_test.c \
bertos/kern/monitor.c \
bertos/kern/signal.c \
at91sam7x_PREFIX = arm-none-eabi-
at91sam7x_CPPAFLAGS = -O0 -g -gdwarf-2 -g -gen-debug
-at91sam7x_CPPFLAGS = -O0 -D'ARCH=0' -D__ARM_AT91SAM7X256__ -D'CPU_FREQ=(48023000UL)' -D'WIZ_AUTOGEN' -g3 -gdwarf-2 -fverbose-asm -Iexamples/at91sam7 -Ibertos/cpu/arm
+at91sam7x_CPPFLAGS = -O0 -D'ARCH=0' -D__ARM_AT91SAM7X256__ -D'CPU_FREQ=(48023000UL)' -D'WIZ_AUTOGEN' -g3 -gdwarf-2 -fverbose-asm -Iexamples/at91sam7 -Ibertos/cpu/arm -fomit-frame-pointer
at91sam7x_LDFLAGS = -nostartfiles -T bertos/cpu/arm/scripts/at91sam7_256_rom.ld -Wl,--no-warn-mismatch
at91sam7x_CPU = arm7tdmi
*
* $WIZ$ type = "boolean"
*/
-#define CONFIG_KERN_PREEMPT 0
+#define CONFIG_KERN_PREEMPT 1
/**
* Priority-based scheduling policy.
* $WIZ$ type = "boolean"
*/
-#define CONFIG_KERN_PRI 0
+#define CONFIG_KERN_PRI 1
/**
* Time sharing quantum (a prime number prevents interference effects) [ms].
#define CONFIG_KERN_HEAP 0
/**
- * Preemptive process scheduling. WARNING: Experimental, still incomplete!
+ * Preemptive process scheduling.
*
* $WIZ$ type = "boolean"
*/
-#define CONFIG_KERN_PREEMPT 0
+#define CONFIG_KERN_PREEMPT 1
/**
* Priority-based scheduling policy.
--- /dev/null
+/**
+ * \file
+ * <!--
+ * This file is part of BeRTOS.
+ *
+ * Bertos is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * As a special exception, you may use this file as part of a free software
+ * library without restriction. Specifically, if other files instantiate
+ * templates or use macros or inline functions from this file, or you compile
+ * this file and link it with other files to produce an executable, this
+ * file does not by itself cause the resulting executable to be covered by
+ * the GNU General Public License. This exception does not however
+ * invalidate any other reasons why the executable file might be covered by
+ * the GNU General Public License.
+ *
+ * Copyright 2008 Develer S.r.l. (http://www.develer.com/)
+ * All Rights Reserved.
+ * -->
+ *
+ * \brief Configuration file for timer module.
+ *
+ * \version $Id$
+ *
+ * \author Daniele Basile <asterix@develer.com>
+ */
+
+#ifndef CFG_TIMER_H
+#define CFG_TIMER_H
+
+/**
+ * Hardware timer selection for drv/timer.c.
+ * $WIZ$ type = "enum"
+ * $WIZ$ value_list = "timer_select"
+ */
+#define CONFIG_TIMER TIMER_DEFAULT
+
+/**
+ * Debug timer interrupt using a strobe pin.
+ * $WIZ$ type = "boolean"
+ */
+#define CONFIG_TIMER_STROBE 0
+
+/**
+ * Enable asynchronous timers.
+ * $WIZ$ type = "boolean"
+ */
+#define CONFIG_TIMER_EVENTS 1
+
+/**
+ * Support hi-res timer_usleep().
+ * $WIZ$ type = "boolean"
+ */
+#define CONFIG_TIMER_UDELAY 1
+
+#endif /* CFG_TIMER_H */
{
emul_init(&argc, argv);
- #if CONFIG_KERN_PREEMPT
- irq_init();
- #endif
timer_init();
buz_init();
kbd_init();
bertos/mware/observer.c \
bertos/mware/resource.c \
bertos/mware/sprintf.c \
+ bertos/struct/heap.c \
bertos/kern/idle.c \
+ bertos/kern/mtask.c \
bertos/kern/irq.c \
- bertos/kern/coop.c \
bertos/kern/proc.c \
bertos/kern/proc_test.c \
bertos/kern/sem.c \
bertos/drv/kdebug.c
bertos/drv/timer.c
bertos/fs/battfs.c
- bertos/kern/coop.c
bertos/kern/idle.c
bertos/kern/kfile.c
bertos/kern/monitor.c
bertos/kern/proc.c
bertos/kern/signal.c
bertos/kern/sem.c
+ bertos/kern/mtask.c
bertos/mware/event.c
bertos/mware/formatwr.c
bertos/mware/hex.c