+ extern struct Process *current_process;
+ return current_process;
+}
+
+#if CONFIG_KERN_PRI
+ void proc_setPri(struct Process *proc, int pri);
+#else
+ INLINE void proc_setPri(UNUSED_ARG(struct Process *,proc), UNUSED_ARG(int, pri))
+ {
+ }
+#endif
+
+#if CONFIG_KERN_PREEMPT
+
+ /**
+ * Disable preemptive task switching.
+ *
+ * The scheduler maintains a global nesting counter. Task switching is
+ * effectively re-enabled only when the number of calls to proc_permit()
+ * matches the number of calls to proc_forbid().
+ *
+ * \note Calling functions that could sleep while task switching is disabled
+ * is dangerous and unsupported.
+ *
+ * \note proc_permit() expands inline to 1-2 asm instructions, so it's a
+ * very efficient locking primitive in simple but performance-critical
+ * situations. In all other cases, semaphores offer a more flexible and
+ * fine-grained locking primitive.
+ *
+ * \sa proc_permit()
+ */
+ INLINE void proc_forbid(void)
+ {
+ extern cpu_atomic_t preempt_count;
+ /*
+ * We don't need to protect the counter against other processes.
+ * The reason why is a bit subtle.
+ *
+ * If a process gets here, preempt_forbid_cnt can be either 0,
+ * or != 0. In the latter case, preemption is already disabled
+ * and no concurrency issues can occur.
+ *
+ * In the former case, we could be preempted just after reading the
+ * value 0 from memory, and a concurrent process might, in fact,
+ * bump the value of preempt_forbid_cnt under our nose!
+ *
+ * BUT: if this ever happens, then we won't get another chance to
+ * run until the other process calls proc_permit() to re-enable
+ * preemption. At this point, the value of preempt_forbid_cnt
+ * must be back to 0, and thus what we had originally read from
+ * memory happens to be valid.
+ *
+ * No matter how hard you think about it, and how complicated you
+ * make your scenario, the above holds true as long as
+ * "preempt_forbid_cnt != 0" means that no task switching is
+ * possible.
+ */
+ ++preempt_count;