1 #include <cfg/cfg_lwip.h>
10 #include <cpu/types.h>
12 #include <arch/sys_arch.h>
15 #include <kern/signal.h>
17 #include <kern/proc.h>
18 #include <kern/proc_p.h>
20 #include <struct/heap.h>
22 #include <mware/event.h>
24 /****************************************************************************/
27 * Generic mutex (binary semaphore) implementation
29 * TODO: move this to a different place (i.e., bertos/kern/sem.c).
31 INLINE void mutex_verify(struct Mutex *s)
35 LIST_ASSERT_VALID(&s->wait_queue);
36 ASSERT((s->count == MUTEX_LOCKED) || (s->count == MUTEX_UNLOCKED));
39 bool mutex_attempt(struct Mutex *s)
41 return cpu_atomic_xchg(&s->count, MUTEX_LOCKED) == MUTEX_UNLOCKED;
44 static NOINLINE void mutex_slowpath_obtain(struct Mutex *s)
48 ADDTAIL(&s->wait_queue, (Node *)current_process)
53 void mutex_obtain(struct Mutex *s)
55 if (UNLIKELY(cpu_atomic_xchg(&s->count, MUTEX_LOCKED)) !=
57 mutex_slowpath_obtain(s);
60 void mutex_release(struct Mutex *s)
66 proc = (Process *)list_remHead(&s->wait_queue);
71 ATOMIC(proc_wakeup(proc));
74 void mutex_init(struct Mutex *s)
76 LIST_INIT(&s->wait_queue);
80 /****************************************************************************/
82 typedef struct SemNode
88 #define MAX_SEM_CNT 16
90 static struct SemNode sem_pool[MAX_SEM_CNT];
94 * Creates and returns a new semaphore.
96 * \param count Specifies the initial state of the semaphore.
97 * \return The semaphore or SYS_SEM_NULL on error.
99 sys_sem_t sys_sem_new(u8_t count)
103 PROC_ATOMIC(sem = (SemNode *)list_remHead(&free_sem));
106 LOG_ERR("Out of semaphores!\n");
110 mutex_init(&sem->sem);
111 // must obtain semaphore depending on the parameter
112 // NOTE: count == 1 means that the semaphore is unlocked
114 mutex_obtain(&sem->sem);
115 return (sys_sem_t)&sem->sem;
119 * Frees a semaphore created by sys_sem_new.
121 * \param semaphore Mutex to be freed
123 void sys_sem_free(sys_sem_t semaphore)
125 SemNode *sem = containerof(semaphore, SemNode, sem);
126 PROC_ATOMIC(ADDHEAD(&free_sem, &sem->node));
130 * Signals (or releases) a semaphore.
132 void sys_sem_signal(sys_sem_t sem)
138 * Blocks the thread while waiting for the semaphore to be signaled.
140 * The timeout parameter specifies how many milliseconds the function should block
141 * before returning; if the function times out, it should return SYS_ARCH_TIMEOUT.
142 * If timeout=0, then the function should block indefinitely.
143 * If the function acquires the semaphore, it should return how many milliseconds
144 * expired while waiting for the semaphore.
145 * The function may return 0 if the semaphore was immediately available.
147 u32_t sys_arch_sem_wait(sys_sem_t sem, u32_t timeout)
149 ticks_t end, start = timer_clock();
154 return ticks_to_ms(timer_clock() - start);
160 } while ((end - start < ms_to_ticks(timeout) && !mutex_attempt(sem)));
162 return (end - start > ms_to_ticks(timeout)) ?
163 SYS_ARCH_TIMEOUT : (u32_t)ticks_to_ms(end - start);
168 typedef struct IpPort
174 #define MAX_PORT_CNT 16
175 static struct IpPort port_pool[MAX_PORT_CNT];
176 static List free_port;
184 #define MAX_MSG_CNT 32
185 static struct IpMsg msg_pool[MAX_MSG_CNT];
186 static List free_msg;
188 // TODO: allocate memory for 'size' messages
189 sys_mbox_t sys_mbox_new(UNUSED_ARG(int, size))
193 PROC_ATOMIC(port = (IpPort *)list_remHead(&free_port));
196 LOG_ERR("Out of message ports!\n");
197 return SYS_MBOX_NULL;
199 msg_initPort(&port->port, event_createGeneric());
201 return (sys_mbox_t)(&port->port);
204 void sys_mbox_free(sys_mbox_t mbox)
206 IpPort *port = containerof(mbox, IpPort, port);
207 PROC_ATOMIC(ADDHEAD(&free_port, &port->node));
210 void sys_mbox_post(sys_mbox_t mbox, void *data)
212 if (UNLIKELY(sys_mbox_trypost(mbox, data) == ERR_MEM))
213 LOG_ERR("out of messages!\n");
217 * Try to post the "msg" to the mailbox. Returns ERR_MEM if this one
218 * is full, else, ERR_OK if the "msg" is posted.
220 err_t sys_mbox_trypost(sys_mbox_t mbox, void *data)
224 PROC_ATOMIC(msg = (IpMsg *)list_remHead(&free_msg));
228 msg_put(mbox, &msg->msg);
233 u32_t sys_arch_mbox_fetch(sys_mbox_t mbox, void **data, u32_t timeout)
235 /* Blocks the thread until a message arrives in the mailbox, but does
236 not block the thread longer than "timeout" milliseconds (similar to
237 the sys_arch_sem_wait() function). If "timeout" is 0, the thread should
238 be blocked until a message arrives. The "msg" argument is a result
239 parameter that is set by the function (i.e., by doing "*msg =
240 ptr"). The "msg" parameter maybe NULL to indicate that the message
243 The return values are the same as for the sys_arch_sem_wait() function:
244 Number of milliseconds spent waiting or SYS_ARCH_TIMEOUT if there was a
247 Note that a function with a similar name, sys_mbox_fetch(), is
252 ticks_t start = timer_clock();
262 event_wait(&mbox->event);
263 else if (!event_waitTimeout(&mbox->event,
264 ms_to_ticks(timeout)))
265 return SYS_ARCH_TIMEOUT;
268 *data = containerof(msg, IpMsg, msg)->data;
270 PROC_ATOMIC(ADDHEAD(&free_msg, &msg->link));
272 return ticks_to_ms(timer_clock() - start);
275 u32_t sys_arch_mbox_tryfetch(sys_mbox_t mbox, void **data)
277 /* This is similar to sys_arch_mbox_fetch, however if a message is not
278 present in the mailbox, it immediately returns with the code
279 SYS_MBOX_EMPTY. On success 0 is returned.
281 To allow for efficient implementations, this can be defined as a
282 function-like macro in sys_arch.h instead of a normal function. For
283 example, a naive implementation could be:
284 #define sys_arch_mbox_tryfetch(mbox,msg) \
285 sys_arch_mbox_fetch(mbox,msg,1)
286 although this would introduce unnecessary delays.
293 return SYS_MBOX_EMPTY;
295 *data = containerof(msg, IpMsg, msg)->data;
296 PROC_ATOMIC(ADDHEAD(&free_msg, &msg->link));
301 typedef struct ThreadNode
305 void (*entry)(void *);
307 struct sys_timeouts timeout;
310 #define MAX_THREAD_CNT 8
312 static ThreadNode thread_pool[MAX_THREAD_CNT];
313 static List free_thread;
314 static List used_thread;
316 static struct sys_timeouts lwip_system_timeouts; // Default timeouts list for lwIP
318 struct sys_timeouts *sys_arch_timeouts(void)
320 ThreadNode *thread_node;
321 struct Process *curr_pid = proc_current();
323 FOREACH_NODE(thread_node, &used_thread)
325 if (thread_node->pid == curr_pid)
326 return &(thread_node->timeout);
329 return &lwip_system_timeouts;
332 static void thread_trampoline(void)
334 ThreadNode *thread_node = (ThreadNode *)proc_currentUserData();
336 thread_node->entry(thread_node->arg);
339 #if !CONFIG_KERN_HEAP
341 * NOTE: threads are never destroyed, consequently these stacks are never
342 * deallocated. So, the stack allocator can be implemented as a simple index
343 * that is atomically incremented at each allocation.
345 static cpu_stack_t thread_stack[MAX_THREAD_CNT]
346 [DEFAULT_THREAD_STACKSIZE / sizeof(cpu_stack_t)]
347 ALIGNED(sizeof(cpu_stack_t));
348 static int last_stack;
351 sys_thread_t sys_thread_new(char *name, void (* thread)(void *arg),
352 void *arg, int stacksize, int prio)
354 ThreadNode *thread_node;
355 cpu_stack_t *stackbase;
358 thread_node = (ThreadNode *)list_remHead(&free_thread);
359 if (UNLIKELY(!thread_node))
364 ADDHEAD(&used_thread, &thread_node->node);
367 thread_node->entry = thread;
368 thread_node->arg = arg;
370 #if !CONFIG_KERN_HEAP
371 ASSERT(stacksize <= DEFAULT_THREAD_STACKSIZE);
372 PROC_ATOMIC(stackbase = &thread_stack[last_stack++]);
376 thread_node->pid = proc_new_with_name(name, thread_trampoline,
377 (void *)thread_node, stacksize, stackbase);
378 if (thread_node->pid == NULL)
382 proc_setPri(thread_node->pid, prio);
385 return thread_node->pid;
390 LIST_INIT(&free_sem);
391 LIST_INIT(&free_port);
392 LIST_INIT(&free_msg);
393 LIST_INIT(&free_thread);
394 LIST_INIT(&used_thread);
396 for (int i = 0; i < MAX_SEM_CNT; ++i)
397 ADDHEAD(&free_sem, &sem_pool[i].node);
399 for (int i = 0; i < MAX_PORT_CNT; ++i)
400 ADDHEAD(&free_port, &port_pool[i].node);
402 for (int i = 0; i < MAX_MSG_CNT; ++i)
403 ADDHEAD(&free_msg, &msg_pool[i].msg.link);
405 for (int i = 0; i < MAX_THREAD_CNT; ++i)
406 ADDHEAD(&free_thread, &thread_pool[i].node);