1 #include "cfg/cfg_lwip.h"
3 #define LOG_LEVEL 3 //INFO
4 #define LOG_FORMAT 0 //TERSE
10 #include <cpu/types.h>
12 #include <arch/sys_arch.h>
15 #include <kern/signal.h>
17 #include <kern/proc.h>
18 #include <kern/proc_p.h>
20 #include <struct/heap.h>
22 #include <mware/event.h>
24 /****************************************************************************/
27 * Generic mutex (binary semaphore) implementation
29 * TODO: move this to a different place (i.e., bertos/kern/sem.c).
31 INLINE void mutex_verify(struct Mutex *s)
35 LIST_ASSERT_VALID(&s->wait_queue);
36 ASSERT((s->count == MUTEX_LOCKED) || (s->count == MUTEX_UNLOCKED));
39 bool mutex_attempt(struct Mutex *s)
41 return cpu_atomic_xchg(&s->count, MUTEX_LOCKED) == MUTEX_UNLOCKED;
44 static NOINLINE void mutex_slowpath_obtain(struct Mutex *s)
48 ADDTAIL(&s->wait_queue, (Node *)current_process)
53 void mutex_obtain(struct Mutex *s)
55 if (UNLIKELY(cpu_atomic_xchg(&s->count, MUTEX_LOCKED)) !=
57 mutex_slowpath_obtain(s);
60 void mutex_release(struct Mutex *s)
66 proc = (Process *)list_remHead(&s->wait_queue);
71 ATOMIC(proc_wakeup(proc));
74 void mutex_init(struct Mutex *s)
76 LIST_INIT(&s->wait_queue);
80 /****************************************************************************/
82 typedef struct SemNode
88 #define MAX_SEM_CNT 16
90 static struct SemNode sem_pool[MAX_SEM_CNT];
94 * Creates and returns a new semaphore.
96 * \param count Specifies the initial state of the semaphore.
97 * \return The semaphore or SYS_SEM_NULL on error.
99 sys_sem_t sys_sem_new(u8_t count)
103 PROC_ATOMIC(sem = (SemNode *)list_remHead(&free_sem));
106 LOG_ERR("Out of semaphores!\n");
110 mutex_init(&sem->sem);
111 // must obtain semaphore depending on the parameter
112 // NOTE: count == 1 means that the semaphore is unlocked
114 mutex_obtain(&sem->sem);
115 return (sys_sem_t)&sem->sem;
119 * Frees a semaphore created by sys_sem_new.
121 * \param semaphore Mutex to be freed
123 void sys_sem_free(sys_sem_t semaphore)
125 SemNode *sem = containerof(semaphore, SemNode, sem);
126 PROC_ATOMIC(ADDHEAD(&free_sem, &sem->node));
130 * Signals (or releases) a semaphore.
132 void sys_sem_signal(sys_sem_t sem)
138 * Blocks the thread while waiting for the semaphore to be signaled.
140 * The timeout parameter specifies how many milliseconds the function should block
141 * before returning; if the function times out, it should return SYS_ARCH_TIMEOUT.
142 * If timeout=0, then the function should block indefinitely.
143 * If the function acquires the semaphore, it should return how many milliseconds
144 * expired while waiting for the semaphore.
145 * The function may return 0 if the semaphore was immediately available.
147 u32_t sys_arch_sem_wait(sys_sem_t sem, u32_t timeout)
149 ticks_t end, start = timer_clock();
154 return ticks_to_ms(timer_clock() - start);
161 } while ((end - start < ms_to_ticks(timeout) && !mutex_attempt(sem)));
163 return (end - start > ms_to_ticks(timeout)) ?
164 SYS_ARCH_TIMEOUT : (u32_t)ticks_to_ms(end - start);
169 typedef struct IpPort
175 #define MAX_PORT_CNT 16
176 static struct IpPort port_pool[MAX_PORT_CNT];
177 static List free_port;
185 #define MAX_MSG_CNT 32
186 static struct IpMsg msg_pool[MAX_MSG_CNT];
187 static List free_msg;
189 // TODO: allocate memory for 'size' messages
190 sys_mbox_t sys_mbox_new(UNUSED_ARG(int, size))
194 PROC_ATOMIC(port = (IpPort *)list_remHead(&free_port));
197 LOG_ERR("Out of message ports!\n");
198 return SYS_MBOX_NULL;
200 msg_initPort(&port->port, event_createGeneric());
201 port->port.event.Ev.Sig.sig_proc = NULL;
203 return (sys_mbox_t)(&port->port);
206 void sys_mbox_free(sys_mbox_t mbox)
208 IpPort *port = containerof(mbox, IpPort, port);
209 PROC_ATOMIC(ADDHEAD(&free_port, &port->node));
212 void sys_mbox_post(sys_mbox_t mbox, void *data)
214 sys_mbox_trypost(mbox, data);
218 * Try to post the "msg" to the mailbox. Returns ERR_MEM if this one
219 * is full, else, ERR_OK if the "msg" is posted.
221 err_t sys_mbox_trypost(sys_mbox_t mbox, void *data)
225 PROC_ATOMIC(msg = (IpMsg *)list_remHead(&free_msg));
228 LOG_ERR("out of messages!\n");
234 ADDTAIL(&mbox->queue, &msg->msg.link);
235 msg_unlockPort(mbox);
237 if (mbox->event.Ev.Sig.sig_proc)
238 event_do(&mbox->event);
243 u32_t sys_arch_mbox_fetch(sys_mbox_t mbox, void **data, u32_t timeout)
245 /* Blocks the thread until a message arrives in the mailbox, but does
246 not block the thread longer than "timeout" milliseconds (similar to
247 the sys_arch_sem_wait() function). If "timeout" is 0, the thread should
248 be blocked until a message arrives. The "msg" argument is a result
249 parameter that is set by the function (i.e., by doing "*msg =
250 ptr"). The "msg" parameter maybe NULL to indicate that the message
253 The return values are the same as for the sys_arch_sem_wait() function:
254 Number of milliseconds spent waiting or SYS_ARCH_TIMEOUT if there was a
257 Note that a function with a similar name, sys_mbox_fetch(), is
262 ticks_t start = timer_clock();
271 mbox->event.Ev.Sig.sig_proc = proc_current();
274 event_wait(&mbox->event);
277 if (!event_waitTimeout(&mbox->event,
278 ms_to_ticks(timeout)))
280 mbox->event.Ev.Sig.sig_proc = NULL;
281 return SYS_ARCH_TIMEOUT;
285 mbox->event.Ev.Sig.sig_proc = NULL;
287 *data = containerof(msg, IpMsg, msg)->data;
289 PROC_ATOMIC(ADDHEAD(&free_msg, &msg->link));
291 return ticks_to_ms(timer_clock() - start);
294 u32_t sys_arch_mbox_tryfetch(sys_mbox_t mbox, void **data)
296 /* This is similar to sys_arch_mbox_fetch, however if a message is not
297 present in the mailbox, it immediately returns with the code
298 SYS_MBOX_EMPTY. On success 0 is returned.
300 To allow for efficient implementations, this can be defined as a
301 function-like macro in sys_arch.h instead of a normal function. For
302 example, a naive implementation could be:
303 #define sys_arch_mbox_tryfetch(mbox,msg) \
304 sys_arch_mbox_fetch(mbox,msg,1)
305 although this would introduce unnecessary delays.
312 return SYS_MBOX_EMPTY;
314 *data = containerof(msg, IpMsg, msg)->data;
315 PROC_ATOMIC(ADDHEAD(&free_msg, &msg->link));
320 typedef struct ThreadNode
324 void (*entry)(void *);
326 struct sys_timeouts timeout;
329 #define MAX_THREAD_CNT 8
331 static ThreadNode thread_pool[MAX_THREAD_CNT];
332 static List free_thread;
333 static List used_thread;
335 static struct sys_timeouts lwip_system_timeouts; // Default timeouts list for lwIP
337 struct sys_timeouts *sys_arch_timeouts(void)
339 ThreadNode *thread_node;
340 struct Process *curr_pid = proc_current();
342 FOREACH_NODE(thread_node, &used_thread)
344 if (thread_node->pid == curr_pid)
345 return &(thread_node->timeout);
348 return &lwip_system_timeouts;
351 static void thread_trampoline(void)
353 ThreadNode *thread_node = (ThreadNode *)proc_currentUserData();
355 thread_node->entry(thread_node->arg);
358 #if !CONFIG_KERN_HEAP
360 * NOTE: threads are never destroyed, consequently these stacks are never
361 * deallocated. So, the stack allocator can be implemented as a simple index
362 * that is atomically incremented at each allocation.
364 static cpu_stack_t thread_stack[MAX_THREAD_CNT]
365 [DEFAULT_THREAD_STACKSIZE / sizeof(cpu_stack_t)]
366 ALIGNED(sizeof(cpu_stack_t));
367 static int last_stack;
370 sys_thread_t sys_thread_new(const char *name, void (* thread)(void *arg),
371 void *arg, int stacksize, int prio)
373 ThreadNode *thread_node;
374 cpu_stack_t *stackbase;
377 thread_node = (ThreadNode *)list_remHead(&free_thread);
378 if (UNLIKELY(!thread_node))
381 LOG_ERR("Out of threads!\n");
384 ADDHEAD(&used_thread, &thread_node->node);
387 thread_node->entry = thread;
388 thread_node->arg = arg;
390 #if !CONFIG_KERN_HEAP
391 ASSERT(stacksize <= DEFAULT_THREAD_STACKSIZE);
392 PROC_ATOMIC(stackbase = thread_stack[last_stack++]);
396 thread_node->pid = proc_new_with_name(name, thread_trampoline,
397 (void *)thread_node, stacksize, stackbase);
398 if (thread_node->pid == NULL)
402 proc_setPri(thread_node->pid, prio);
404 /* Avoid warnings when priorities are disabled */
408 return thread_node->pid;
413 LIST_INIT(&free_sem);
414 LIST_INIT(&free_port);
415 LIST_INIT(&free_msg);
416 LIST_INIT(&free_thread);
417 LIST_INIT(&used_thread);
419 for (int i = 0; i < MAX_SEM_CNT; ++i)
420 ADDHEAD(&free_sem, &sem_pool[i].node);
422 for (int i = 0; i < MAX_PORT_CNT; ++i)
423 ADDHEAD(&free_port, &port_pool[i].node);
425 for (int i = 0; i < MAX_MSG_CNT; ++i)
426 ADDHEAD(&free_msg, &msg_pool[i].msg.link);
428 for (int i = 0; i < MAX_THREAD_CNT; ++i)
429 ADDHEAD(&free_thread, &thread_pool[i].node);