#include <kern/irq.h>
#include <kern/monitor.h>
#include <cpu/frame.h> // CPU_IDLE
+#include <cpu/irq.h> // IRQ_DISABLE()...
#include <drv/timer.h>
#include <cfg/module.h>
+int preempt_forbid_cnt;
Timer preempt_timer;
-/**
- * Disable preemptive task switching.
- *
- * The scheduler maintains a per-process nesting counter. Task switching is
- * effectively re-enabled only when the number of calls to proc_permit()
- * matches the number of calls to proc_forbid().
- *
- * Calling functions that could sleep while task switching is disabled
- * is dangerous, although supported. Preemptive task switching is
- * resumed while the process is sleeping and disabled again as soon as
- * it wakes up again.
- *
- * \sa proc_permit()
- */
-void proc_forbid(void)
-{
- /* No need to protect against interrupts here. */
- ++CurrentProcess->forbid_cnt;
-}
-
-/**
- * Re-enable preemptive task switching.
- *
- * \sa proc_forbid()
- */
-void proc_permit(void)
-{
- /* No need to protect against interrupts here. */
- --CurrentProcess->forbid_cnt;
-}
-
void proc_preempt(void)
{
void proc_preempt_timer(UNUSED_ARG(void *, param))
{
+ /* Abort if task preemption is disabled */
+ if (preempt_forbid_cnt)
+ return;
+
IRQ_DISABLE;
/*
if (!CurrentProcess->forbid_cnt)
{
TRACEMSG("preempting %p:%s", CurrentProcess, CurrentProcess->monitor.name);
- LIST_ASSERT_VALID(&ProcReadyList);
SCHED_ENQUEUE(CurrentProcess);
proc_preempt();
}
void proc_schedule(void)
{
- TRACE;
+ ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
+ TRACEMSG("%p:%s", CurrentProcess, proc_currentName());
+ ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
+
+ /* Sleeping with IRQs disabled or preemption forbidden is illegal */
+ ASSERT_IRQ_ENABLED();
+ ASSERT(preempt_forbid_cnt == 0);
// Will invoke proc_preempt() in interrupt context
kill(0, SIGUSR1);
void proc_yield(void)
{
- TRACE;
+ TRACEMSG("%p:%s", CurrentProcess, proc_currentName());
- ASSERT_IRQ_ENABLED();
IRQ_DISABLE;
SCHED_ENQUEUE(CurrentProcess);
- LIST_ASSERT_VALID(&ProcReadyList);
- proc_schedule();
IRQ_ENABLE;
+
+ proc_schedule();
}
void proc_entry(void (*user_entry)(void))
static cpustack_t idle_stack[CONFIG_PROC_DEFSTACKSIZE / sizeof(cpustack_t)];
-/*
+// FIXME: move this to kern/idle.c
+/**
* The idle process
*
- * This process never dies and never sleeps. It's also quite apathic
+ * This process never dies and never sleeps. It's also quite lazy, apathic
* and a bit antisocial.
*
* Having an idle process costs some stack space, but simplifies the
for (;;)
{
TRACE;
- monitor_report();
+ //monitor_report();
proc_yield(); // FIXME: CPU_IDLE
}
}
proc->sig_recv = 0;
#endif
-#if CONFIG_KERN_PREEMPT
- proc->forbid_cnt = 0;
-#endif
-
#if CONFIG_KERN_HEAP
proc->flags = 0;
#endif
/* Add to ready list */
ATOMIC(SCHED_ENQUEUE(proc));
- ATOMIC(LIST_ASSERT_VALID(&ProcReadyList));
#if CONFIG_KERN_MONITOR
monitor_add(proc, name);
return proc;
}
-/** Rename a process */
+/**
+ * Return the name of the specified process.
+ *
+ * NULL is a legal argument and will return the name "<NULL>".
+ */
+const char *proc_name(struct Process *proc)
+{
+ #if CONFIG_KERN_MONITOR
+ return proc ? proc->monitor.name : "<NULL>";
+ #else
+ (void)proc;
+ return "---";
+ #endif
+}
+
+/// Return the name of the currently running process
+const char *proc_currentName(void)
+{
+ return proc_name(proc_current());
+}
+
+/// Rename a process
void proc_rename(struct Process *proc, const char *name)
{
#if CONFIG_KERN_MONITOR
#endif
}
-
/**
* Terminate the current process
*/
void proc_exit(void)
{
- TRACEMSG("%p:%s", CurrentProcess, CurrentProcess->monitor.name);
+ TRACEMSG("%p:%s", CurrentProcess, proc_currentName());
#if CONFIG_KERN_MONITOR
monitor_remove(CurrentProcess);
/**
* Get the pointer to the user data of the current process
*/
-iptr_t proc_current_user_data(void)
+iptr_t proc_currentUserData(void)
{
return CurrentProcess->user_data;
}
#include "cfg/cfg_kern.h"
#include <cfg/compiler.h>
-#include <cpu/irq.h>
+#if CONFIG_KERN_PREEMPT
+ #include <cfg/debug.h> // ASSERT()
+#endif
-/* Fwd decl */
+#include <cpu/types.h> // cpustack_t
+
+/*
+ * Forward declaration. The definition of struct Process is private to the
+ * scheduler and hidden in proc_p.h.
+ */
struct Process;
/* Task scheduling services */
int proc_testTearDown(void);
struct Process *proc_current(void);
-iptr_t proc_current_user_data(void);
-void proc_rename(struct Process *proc, const char* name);
+iptr_t proc_currentUserData(void);
+void proc_rename(struct Process *proc, const char *name);
+const char *proc_name(struct Process *proc);
+const char *proc_currentName(void);
+
+/**
+ * Disable preemptive task switching.
+ *
+ * The scheduler maintains a global nesting counter. Task switching is
+ * effectively re-enabled only when the number of calls to proc_permit()
+ * matches the number of calls to proc_forbid().
+ *
+ * \note Calling functions that could sleep while task switching is disabled
+ * is dangerous and unsupported.
+ *
+ * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
+ * very efficient locking primitive in simple but performance-critical
+ * situations. In all other cases, semaphores offer a more flexible and
+ * fine-grained locking primitive.
+ *
+ * \sa proc_permit()
+ */
+INLINE void proc_forbid(void)
+{
+ #if CONFIG_KERN_PREEMPT
+ // No need to protect against interrupts here.
+ extern int preempt_forbid_cnt;
+ ++preempt_forbid_cnt;
+
+ /*
+ * Make sure preempt_forbid_cnt is flushed to memory so the
+ * preemption softirq will see the correct value from now on.
+ */
+ MEMORY_BARRIER;
+ #endif
+}
+
+/**
+ * Re-enable preemptive task switching.
+ *
+ * \sa proc_forbid()
+ */
+INLINE void proc_permit(void)
+{
+ #if CONFIG_KERN_PREEMPT
+
+ /*
+ * This is to ensure any global state changed by the process gets
+ * flushed to memory before task switching is re-enabled.
+ */
+ MEMORY_BARRIER;
+
+ /* No need to protect against interrupts here. */
+ extern int preempt_forbid_cnt;
+ --preempt_forbid_cnt;
+ ASSERT(preempt_forbid_cnt >= 0);
+
+ /*
+ * This ensures preempt_forbid_cnt is flushed to memory immediately
+ * so the preemption interrupt sees the correct value.
+ */
+ MEMORY_BARRIER;
+
+ #endif
+}
-#if CONFIG_KERN_PREEMPT
- void proc_forbid(void);
- void proc_permit(void);
-#else
- INLINE void proc_forbid(void) { /* nop */ }
- INLINE void proc_permit(void) { /* nop */ }
-#endif
/**
* Execute a block of \a CODE atomically with respect to task scheduling.
sigmask_t sig_recv; /**< Received signals */
#endif
-#if CONFIG_KERN_PREEMPTIVE
- int forbid_cnt; /**< Nesting count for proc_forbid()/proc_permit(). */
- bool leaving; /**< XXX: maybe global? */
- ucontext_t context;
-#endif
-
#if CONFIG_KERN_HEAP
uint16_t flags; /**< Flags */
#endif
size_t stack_size; /**< Size of process stack */
#endif
+#if CONFIG_KERN_PREEMPTIVE
+ ucontext_t context;
+#endif
+
#if CONFIG_KERN_MONITOR
struct ProcMonitor
{
* \note This macro is *NOT* protected against the scheduler. Access to
* this list must be performed with interrupts disabled.
*/
-#define SCHED_ENQUEUE(proc) ADDTAIL(&ProcReadyList, &(proc)->link)
+#define SCHED_ENQUEUE(proc) do { \
+ LIST_ASSERT_VALID(&ProcReadyList); \
+ ADDTAIL(&ProcReadyList, &(proc)->link); \
+ } while (0)
/** Schedule to another process *without* adding the current to the ready list. */
void proc_schedule(void);