X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fkern%2Fproc.c;h=0bc046f803533999d3f52c46b3ff87b9e6803886;hb=4ac3fd00c7407310d00e3b09fc96ac2293de674e;hp=7d0de4721c3db41f409475abf62cce70f6488dac;hpb=ef33330c3e830d29142c4a933c1a77732a20f025;p=bertos.git diff --git a/bertos/kern/proc.c b/bertos/kern/proc.c index 7d0de472..0bc046f8 100644 --- a/bertos/kern/proc.c +++ b/bertos/kern/proc.c @@ -26,205 +26,387 @@ * invalidate any other reasons why the executable file might be covered by * the GNU General Public License. * - * Copyright 2001,2004 Develer S.r.l. (http://www.develer.com/) - * Copyright 1999,2000,2001 Bernardo Innocenti + * \brief Simple preemptive multitasking scheduler. * - * --> + * Preemption is explicitly regulated at the exit of each interrupt service + * routine (ISR). Each task obtains a time quantum as soon as it is scheduled + * on the CPU and its quantum is decremented at each clock tick. The frequency + * of the timer determines the system tick granularity and CONFIG_KERN_QUANTUM + * the time sharing interval. + * + * When the quantum expires the handler proc_needPreempt() checks if the + * preemption is enabled and in this case proc_schedule() is called, that + * possibly replaces the current running thread with a different one. + * + * The preemption can be disabled or enabled via proc_forbid() and + * proc_permit() primitives. This is implemented using a global atomic counter. + * When the counter is greater than 0 the task cannot be preempted; only when + * the counter reaches 0 the task can be preempted again. + * + * Preemption-disabled sections may be nested. The preemption will be + * re-enabled when the outermost preemption-disabled section completes. + * + * The voluntary preemption still happens via proc_switch() or proc_yield(). + * The first one assumes the current process has been already added to a + * private wait queue (e.g., on a semaphore or a signal), while the second one + * takes care of adding the process into the ready queue. + * + * Context switch is done by CPU-dependent support routines. In case of a + * voluntary preemption the context switch routine must take care of + * saving/restoring only the callee-save registers (the voluntary-preemption is + * actually a function call). The kernel-preemption always happens inside a + * signal/interrupt context and it must take care of saving all registers. For + * this, in the entry point of each ISR the caller-save registers must be + * saved. In the ISR exit point, if the context switch must happen, we switch + * to user-context and call the same voluntary context switch routine that take + * care of saving/restoring also the callee-save registers. On resume from the + * switch, the interrupt exit point moves back to interrupt-context, resumes + * the caller-save registers (saved in the ISR entry point) and return from the + * interrupt-context. * - * \brief Simple realtime multitasking scheduler. - * Context switching is only done cooperatively. + * \note Thread priority (if enabled by CONFIG_KERN_PRI) defines the order in + * the \p proc_ready_list and the capability to deschedule a running process. A + * low-priority thread can't preempt a high-priority thread. * - * \version $Id$ + * A high-priority process can preempt a low-priority process immediately (it + * will be descheduled and replaced in the interrupt exit point). Processes + * running at the same priority can be descheduled when they expire the time + * quantum. * - * \author Bernardo Innocenti + * \note Sleeping while preemption is disabled fallbacks to a busy-wait sleep. + * Voluntary preemption when preemption is disabled raises a kernel bug. + * + * --> + * + * \brief Simple cooperative and preemptive multitasking scheduler. + * + * \author Bernie Innocenti * \author Stefano Fedrigo + * \author Andrea Righi */ - #include "proc_p.h" #include "proc.h" -#include "cfg/cfg_arch.h" /* ARCH_EMUL */ -#include +#include "cfg/cfg_proc.h" +#define LOG_LEVEL KERN_LOG_LEVEL +#define LOG_FORMAT KERN_LOG_FORMAT +#include + +#include "cfg/cfg_monitor.h" +#include // ROUND_UP2 #include -#include /* ABS() */ +#include // CONFIG_DEPEND() #include #include #include +#include -#include +#if CONFIG_KERN_HEAP + #include +#endif #include /* memset() */ -/** - * CPU dependent context switching routines. +#define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t)) + +/* + * The scheduer tracks ready processes by enqueuing them in the + * ready list. * - * \note This function *MUST* preserve also the status of the interrupts. + * \note Access to the list must occur while interrupts are disabled. */ -EXTERN_C void asm_switch_context(cpustack_t **new_sp, cpustack_t **save_sp); -EXTERN_C int asm_switch_version(void); +REGISTER List proc_ready_list; /* - * The scheduer tracks ready and waiting processes - * by enqueuing them in these lists. A pointer to the currently - * running process is stored in the CurrentProcess pointer. + * Holds a pointer to the TCB of the currently running process. * - * NOTE: these variables are protected by DI/EI locking + * \note User applications should use proc_current() to retrieve this value. */ -REGISTER Process *CurrentProcess; -REGISTER List ProcReadyList; +REGISTER Process *current_process; + +/** The main process (the one that executes main()). */ +static struct Process main_process; + +#if CONFIG_KERN_HEAP +/** + * Local heap dedicated to allocate the memory used by the processes. + */ +static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE); +static Heap proc_heap; -#if CONFIG_KERN_PREEMPTIVE /* - * The time sharing scheduler forces a task switch when - * the current process has consumed its quantum. + * Keep track of zombie processes (processes that are exiting and need to + * release some resources). + * + * \note Access to the list must occur while kernel preemption is disabled. */ -uint16_t Quantum; -#endif +static List zombie_list; +#endif /* CONFIG_KERN_HEAP */ -/* In Win32 we must emulate stack on the real process stack */ -#if (ARCH & ARCH_EMUL) -extern List StackFreeList; -#endif +/* + * Check if the process context switch can be performed directly by the + * architecture-dependent asm_switch_context() or if it must be delayed + * because we're in the middle of an ISR. + * + * Return true if asm_switch_context() can be executed, false + * otherwise. + * + * NOTE: if an architecture does not implement IRQ_RUNNING() this function + * always returns true. + */ +#define CONTEXT_SWITCH_FROM_ISR() (!IRQ_RUNNING()) -/** The main process (the one that executes main()). */ -struct Process MainProcess; +/* + * Save context of old process and switch to new process. + */ +static void proc_context_switch(Process *next, Process *prev) +{ + cpu_stack_t *dummy; + if (UNLIKELY(next == prev)) + return; + /* + * If there is no old process, we save the old stack pointer into a + * dummy variable that we ignore. In fact, this happens only when the + * old process has just exited. + */ + asm_switch_context(&next->stack, prev ? &prev->stack : &dummy); +} -static void proc_init_struct(Process *proc) +static void proc_initStruct(Process *proc) { /* Avoid warning for unused argument. */ (void)proc; #if CONFIG_KERN_SIGNALS proc->sig_recv = 0; -#endif - -#if CONFIG_KERN_PREEMPTIVE - proc->forbid_cnt = 0; + proc->sig_wait = 0; #endif #if CONFIG_KERN_HEAP proc->flags = 0; #endif + +#if CONFIG_KERN_PRI + proc->link.pri = 0; +#endif } MOD_DEFINE(proc); void proc_init(void) { - LIST_INIT(&ProcReadyList); + LIST_INIT(&proc_ready_list); -#if CONFIG_KERN_MONITOR - monitor_init(); +#if CONFIG_KERN_HEAP + LIST_INIT(&zombie_list); + heap_init(&proc_heap, heap_buf, sizeof(heap_buf)); #endif - - /* We "promote" the current context into a real process. The only thing we have + /* + * We "promote" the current context into a real process. The only thing we have * to do is create a PCB and make it current. We don't need to setup the stack * pointer because it will be written the first time we switch to another process. */ - proc_init_struct(&MainProcess); - CurrentProcess = &MainProcess; + proc_initStruct(&main_process); + current_process = &main_process; - /* Make sure the assembly routine is up-to-date with us */ - ASSERT(asm_switch_version() == 1); +#if CONFIG_KERN_MONITOR + monitor_init(); + monitor_add(current_process, "main"); +#endif MOD_INIT(proc); } +#if CONFIG_KERN_HEAP + +/** + * Free all the resources of all zombie processes previously added to the zombie + * list. + */ +static void proc_freeZombies(void) +{ + Process *proc; + + while (1) + { + PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list)); + if (proc == NULL) + return; + + if (proc->flags & PF_FREESTACK) + { + PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base, + proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t))); + } + } +} + +/** + * Enqueue a process in the zombie list. + */ +static void proc_addZombie(Process *proc) +{ + Node *node; +#if CONFIG_KERN_PREEMPT + ASSERT(!proc_preemptAllowed()); +#endif + +#if CONFIG_KERN_PRI + node = &(proc)->link.link; +#else + node = &(proc)->link; +#endif + LIST_ASSERT_VALID(&zombie_list); + ADDTAIL(&zombie_list, node); +} + +#endif /* CONFIG_KERN_HEAP */ + /** * Create a new process, starting at the provided entry point. * + * + * \note The function + * \code + * proc_new(entry, data, stacksize, stack) + * \endcode + * is a more convenient way to create a process, as you don't have to specify + * the name. + * * \return Process structure of new created process * if successful, NULL otherwise. */ -struct Process *proc_new_with_name(UNUSED(const char *, name), void (*entry)(void), iptr_t data, size_t stacksize, cpustack_t *stack_base) +struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base) { Process *proc; - size_t i; - size_t proc_size_words = ROUND2(sizeof(Process), sizeof(cpustack_t)) / sizeof(cpustack_t); + LOG_INFO("name=%s", name); #if CONFIG_KERN_HEAP bool free_stack = false; -#endif -#if (ARCH & ARCH_EMUL) - /* Ignore stack provided by caller and use the large enough default instead. */ - stack_base = (cpustack_t *)list_remHead(&StackFreeList); + /* + * Free up resources of a zombie process. + * + * We're implementing a kind of lazy garbage collector here for + * efficiency reasons: we can avoid to introduce overhead into another + * kernel task dedicated to free up resources (e.g., idle) and we're + * not introducing any overhead into the scheduler after a context + * switch (that would be *very* bad, because the scheduler runs with + * IRQ disabled). + * + * In this way we are able to release the memory of the zombie tasks + * without disabling IRQs and without introducing any significant + * overhead in any other kernel task. + */ + proc_freeZombies(); - stacksize = CONFIG_PROC_DEFSTACKSIZE; -#elif CONFIG_KERN_HEAP /* Did the caller provide a stack for us? */ if (!stack_base) { /* Did the caller specify the desired stack size? */ - if (!stacksize) - stacksize = CONFIG_PROC_DEFSTACKSIZE + sizeof(Process); + if (!stack_size) + stack_size = KERN_MINSTACKSIZE; /* Allocate stack dinamically */ - if (!(stack_base = heap_alloc(stacksize))) + PROC_ATOMIC(stack_base = + (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size)); + if (stack_base == NULL) return NULL; free_stack = true; } -#else + +#else // CONFIG_KERN_HEAP + /* Stack must have been provided by the user */ - ASSERT(stack_base); - ASSERT(stacksize); -#endif + ASSERT2(IS_VALID_PTR(stack_base), "Invalid stack pointer. Did you forget to \ + enable CONFIG_KERN_HEAP?"); + ASSERT2(stack_size, "Stack size cannot be 0."); + +#endif // CONFIG_KERN_HEAP #if CONFIG_KERN_MONITOR - /* Fill-in the stack with a special marker to help debugging */ - memset(stack_base, CONFIG_KERN_STACKFILLCODE, stacksize / sizeof(cpustack_t)); + /* + * Fill-in the stack with a special marker to help debugging. + * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger + * than an int, so the (int) cast is required to silence the + * warning for truncating its size. + */ + memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size); #endif /* Initialize the process control block */ if (CPU_STACK_GROWS_UPWARD) { - proc = (Process*)stack_base; - proc->stack = stack_base + proc_size_words; + proc = (Process *)stack_base; + proc->stack = stack_base + PROC_SIZE_WORDS; + // On some architecture stack should be aligned, so we do it. + proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t)))); if (CPU_SP_ON_EMPTY_SLOT) proc->stack++; } else { - proc = (Process*)(stack_base + stacksize / sizeof(cpustack_t) - proc_size_words); - proc->stack = (cpustack_t*)proc; + proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS); + // On some architecture stack should be aligned, so we do it. + proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t))); if (CPU_SP_ON_EMPTY_SLOT) proc->stack--; } + /* Ensure stack is aligned */ + ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0); - proc_init_struct(proc); + stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t); + proc_initStruct(proc); proc->user_data = data; -#if CONFIG_KERN_HEAP +#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR proc->stack_base = stack_base; proc->stack_size = stack_size; + #if CONFIG_KERN_HEAP if (free_stack) proc->flags |= PF_FREESTACK; + #endif #endif + proc->user_entry = entry; + CPU_CREATE_NEW_STACK(proc->stack); - /* Initialize process stack frame */ - CPU_PUSH_CALL_CONTEXT(proc->stack, proc_exit); - CPU_PUSH_CALL_CONTEXT(proc->stack, entry); - - /* Push a clean set of CPU registers for asm_switch_context() */ - for (i = 0; i < CPU_SAVED_REGS_CNT; i++) - CPU_PUSH_WORD(proc->stack, CPU_REG_INIT_VALUE(i)); +#if CONFIG_KERN_MONITOR + monitor_add(proc, name); +#endif /* Add to ready list */ ATOMIC(SCHED_ENQUEUE(proc)); + return proc; +} + +/** + * Return the name of the specified process. + * + * NULL is a legal argument and will return the name "". + */ +const char *proc_name(struct Process *proc) +{ #if CONFIG_KERN_MONITOR - monitor_add(proc, name, stack_base, stacksize); + return proc ? proc->monitor.name : ""; +#else + (void)proc; + return "---"; #endif +} - return proc; +/// Return the name of the currently running process +const char *proc_currentName(void) +{ + return proc_name(proc_current()); } -/** Rename a process */ +/// Rename a process void proc_rename(struct Process *proc, const char *name) { #if CONFIG_KERN_MONITOR @@ -235,30 +417,101 @@ void proc_rename(struct Process *proc, const char *name) } +#if CONFIG_KERN_PRI /** - * System scheduler: pass CPU control to the next process in - * the ready queue. + * Change the scheduling priority of a process. + * + * Process piorities are signed ints, whereas a larger integer value means + * higher scheduling priority. The default priority for new processes is 0. + * The idle process runs with the lowest possible priority: INT_MIN. + * + * A process with a higher priority always preempts lower priority processes. + * Processes of equal priority share the CPU time according to a simple + * round-robin policy. * - * Saving and restoring the context on the stack is done - * by a CPU-dependent support routine which must usually be - * written in assembly. + * As a general rule to maximize responsiveness, compute-bound processes + * should be assigned negative priorities and tight, interactive processes + * should be assigned positive priorities. + * + * To avoid interfering with system background activities such as input + * processing, application processes should remain within the range -10 + * and +10. + */ +void proc_setPri(struct Process *proc, int pri) +{ + if (proc->link.pri == pri) + return; + + proc->link.pri = pri; + + if (proc != current_process) + ATOMIC(sched_reenqueue(proc)); +} +#endif // CONFIG_KERN_PRI + +INLINE void proc_run(void) +{ + void (*entry)(void) = current_process->user_entry; + + LOG_INFO("New process starting at %p", entry); + entry(); +} + +/** + * Entry point for all the processes. + */ +void proc_entry(void) +{ + /* + * Return from a context switch assumes interrupts are disabled, so + * we need to explicitly re-enable them as soon as possible. + */ + IRQ_ENABLE; + /* Call the actual process's entry point */ + proc_run(); + proc_exit(); +} + +/** + * Terminate the current process */ -void proc_schedule(void) +void proc_exit(void) { - struct Process *old_process; - cpuflags_t flags; + LOG_INFO("%p:%s", current_process, proc_currentName()); - /* Remember old process to save its context later */ - old_process = CurrentProcess; +#if CONFIG_KERN_MONITOR + monitor_remove(current_process); +#endif -#ifdef IRQ_RUNNING - /* Scheduling in interrupts is a nono. */ - ASSERT(!IRQ_RUNNING()); + proc_forbid(); +#if CONFIG_KERN_HEAP + /* + * Set the task as zombie, its resources will be freed in proc_new() in + * a lazy way, when another process will be created. + */ + proc_addZombie(current_process); #endif + current_process = NULL; + proc_permit(); + + proc_switch(); + + /* never reached */ + ASSERT(0); +} + +/** + * Call the scheduler and eventually replace the current running process. + */ +static void proc_schedule(void) +{ + Process *old_process = current_process; + + IRQ_ASSERT_DISABLED(); /* Poll on the ready queue for the first ready process */ - IRQ_SAVE_DISABLE(flags); - while (!(CurrentProcess = (struct Process *)list_remHead(&ProcReadyList))) + LIST_ASSERT_VALID(&proc_ready_list); + while (!(current_process = (struct Process *)list_remHead(&proc_ready_list))) { /* * Make sure we physically reenable interrupts here, no matter what @@ -267,7 +520,7 @@ void proc_schedule(void) * process will ever wake up. * * During idle-spinning, an interrupt can occur and it may - * modify \p ProcReadyList. To ensure that compiler reload this + * modify \p proc_ready_list. To ensure that compiler reload this * variable every while cycle we call CPU_MEMORY_BARRIER. * The memory barrier ensure that all variables used in this context * are reloaded. @@ -280,139 +533,117 @@ void proc_schedule(void) MEMORY_BARRIER; IRQ_DISABLE; } - IRQ_RESTORE(flags); - - /* - * Optimization: don't switch contexts when the active - * process has not changed. - */ - if (CurrentProcess != old_process) - { - cpustack_t *dummy; - -#if CONFIG_KERN_PREEMPTIVE - /* Reset quantum for this process */ - Quantum = CONFIG_KERN_QUANTUM; -#endif - - /* Save context of old process and switch to new process. If there is no - * old process, we save the old stack pointer into a dummy variable that - * we ignore. In fact, this happens only when the old process has just - * exited. - * TODO: Instead of physically clearing the process at exit time, a zombie - * list should be created. - */ - asm_switch_context(&CurrentProcess->stack, old_process ? &old_process->stack : &dummy); - } - + if (CONTEXT_SWITCH_FROM_ISR()) + proc_context_switch(current_process, old_process); /* This RET resumes the execution on the new process */ + LOG_INFO("resuming %p:%s\n", current_process, proc_currentName()); } +#if CONFIG_KERN_PREEMPT +/* Global preemption nesting counter */ +cpu_atomic_t preempt_count; + +/* + * The time sharing interval: when a process is scheduled on a CPU it gets an + * amount of CONFIG_KERN_QUANTUM clock ticks. When these ticks expires and + * preemption is enabled a new process is selected to run. + */ +int _proc_quantum; /** - * Terminate the current process + * Check if we need to schedule another task */ -void proc_exit(void) +bool proc_needPreempt(void) { -#if CONFIG_KERN_MONITOR - monitor_remove(CurrentProcess); -#endif - -#if CONFIG_KERN_HEAP - /* - * The following code is BROKEN. - * We are freeing our own stack before entering proc_schedule() - * BAJO: A correct fix would be to rearrange the scheduler with - * an additional parameter which frees the old stack/process - * after a context switch. - */ - if (CurrentProcess->flags & PF_FREESTACK) - heap_free(CurrentProcess->stack_base, CurrentProcess->stack_size); - heap_free(CurrentProcess); -#endif - -#if (ARCH & ARCH_EMUL) -#warning This is wrong - /* Reinsert process stack in free list */ - ADDHEAD(&StackFreeList, (Node *)(CurrentProcess->stack - - (CONFIG_PROC_DEFSTACKSIZE / sizeof(cpustack_t)))); - - /* - * NOTE: At this point the first two words of what used - * to be our stack contain a list node. From now on, we - * rely on the compiler not reading/writing the stack. - */ -#endif /* ARCH_EMUL */ - - CurrentProcess = NULL; - proc_schedule(); - /* not reached */ + if (UNLIKELY(current_process == NULL)) + return false; + if (!proc_preemptAllowed()) + return false; + if (LIST_EMPTY(&proc_ready_list)) + return false; + return preempt_quantum() ? prio_next() > prio_curr() : + prio_next() >= prio_curr(); } - /** - * Co-operative context switch + * Preempt the current task. */ -void proc_switch(void) +void proc_preempt(void) { - cpuflags_t flags; - - IRQ_SAVE_DISABLE(flags); - SCHED_ENQUEUE(CurrentProcess); - IRQ_RESTORE(flags); - + IRQ_ASSERT_DISABLED(); + ASSERT(current_process); + + /* Perform the kernel preemption */ + LOG_INFO("preempting %p:%s\n", current_process, proc_currentName()); + /* We are inside a IRQ context, so ATOMIC is not needed here */ + SCHED_ENQUEUE(current_process); + preempt_reset_quantum(); proc_schedule(); } +#endif /* CONFIG_KERN_PREEMPT */ - -/** - * Get the pointer to the current process - */ -struct Process *proc_current(void) +/* Immediately switch to a particular process */ +static void proc_switchTo(Process *proc) { - return CurrentProcess; + Process *old_process = current_process; + + SCHED_ENQUEUE(current_process); + preempt_reset_quantum(); + current_process = proc; + proc_context_switch(current_process, old_process); } /** - * Get the pointer to the user data of the current process + * Give the control of the CPU to another process. + * + * \note Assume the current process has been already added to a wait queue. + * + * \warning This should be considered an internal kernel function, even if it + * is allowed, usage from application code is strongly discouraged. */ -iptr_t proc_current_user_data(void) +void proc_switch(void) { - return CurrentProcess->user_data; + ASSERT(proc_preemptAllowed()); + ATOMIC( + preempt_reset_quantum(); + proc_schedule(); + ); } - -#if CONFIG_KERN_PREEMPTIVE - /** - * Disable preemptive task switching. - * - * The scheduler maintains a per-process nesting counter. Task switching is - * effectively re-enabled only when the number of calls to proc_permit() - * matches the number of calls to proc_forbid(). - * - * Calling functions that could sleep while task switching is disabled - * is dangerous, although supported. Preemptive task switching is - * resumed while the process is sleeping and disabled again as soon as - * it wakes up again. - * - * \sa proc_permit() + * Immediately wakeup a process, dispatching it to the CPU. */ -void proc_forbid(void) +void proc_wakeup(Process *proc) { - /* No need to protect against interrupts here. */ - ++CurrentProcess->forbid_cnt; + ASSERT(proc_preemptAllowed()); + ASSERT(current_process); + IRQ_ASSERT_DISABLED(); + + if (prio_proc(proc) >= prio_curr()) + proc_switchTo(proc); + else + SCHED_ENQUEUE_HEAD(proc); } /** - * Re-enable preemptive task switching. - * - * \sa proc_forbid() + * Voluntarily release the CPU. */ -void proc_permit(void) +void proc_yield(void) { - /* No need to protect against interrupts here. */ - --CurrentProcess->forbid_cnt; -} + Process *proc; -#endif /* CONFIG_KERN_PREEMPTIVE */ + /* + * Voluntary preemption while preemption is disabled is considered + * illegal, as not very useful in practice. + * + * ASSERT if it happens. + */ + ASSERT(proc_preemptAllowed()); + IRQ_ASSERT_ENABLED(); + + IRQ_DISABLE; + proc = (struct Process *)list_remHead(&proc_ready_list); + if (proc) + proc_switchTo(proc); + IRQ_ENABLE; +}