bda8b49729a60cf2c2807dc74edadc1187705fbb
[bertos.git] / bertos / cpu / irq.h
1 /**
2  * \file
3  * <!--
4  * This file is part of BeRTOS.
5  *
6  * Bertos is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * As a special exception, you may use this file as part of a free software
21  * library without restriction.  Specifically, if other files instantiate
22  * templates or use macros or inline functions from this file, or you compile
23  * this file and link it with other files to produce an executable, this
24  * file does not by itself cause the resulting executable to be covered by
25  * the GNU General Public License.  This exception does not however
26  * invalidate any other reasons why the executable file might be covered by
27  * the GNU General Public License.
28  *
29  * Copyright 2004, 2005, 2006, 2007 Develer S.r.l. (http://www.develer.com/)
30  * Copyright 2004 Giovanni Bajo
31  *
32  * -->
33  *
34  * \brief CPU-specific IRQ definitions.
35  *
36  * \author Giovanni Bajo <rasky@develer.com>
37  * \author Bernie Innocenti <bernie@codewiz.org>
38  * \author Stefano Fedrigo <aleph@develer.com>
39  * \author Francesco Sacchi <batt@develer.com>
40  */
41 #ifndef CPU_IRQ_H
42 #define CPU_IRQ_H
43
44 #include "detect.h"
45 #include "types.h"
46
47 #include <kern/proc.h> /* proc_needPreempt() / proc_preempt() */
48
49 #include <cfg/compiler.h> /* for uintXX_t */
50 #include "cfg/cfg_proc.h" /* CONFIG_KERN_PREEMPT */
51
52 #if CPU_I196
53         #define IRQ_DISABLE             disable_interrupt()
54         #define IRQ_ENABLE              enable_interrupt()
55 #elif CPU_X86
56
57         /* Get IRQ_* definitions from the hosting environment. */
58         #include <cfg/os.h>
59         #if OS_EMBEDDED
60                 #define IRQ_DISABLE             FIXME
61                 #define IRQ_ENABLE              FIXME
62                 #define IRQ_SAVE_DISABLE(x)     FIXME
63                 #define IRQ_RESTORE(x)          FIXME
64         #endif /* OS_EMBEDDED */
65
66 #elif CPU_CM3
67         /* Cortex-M3 */
68
69         /*
70          * Interrupt priority.
71          *
72          * NOTE: 0 means that an interrupt is not affected by the global IRQ
73          * priority settings.
74          */
75         #define IRQ_PRIO                0x80
76         #define IRQ_PRIO_MIN            0xf0
77         #define IRQ_PRIO_MAX            0
78         /*
79          * To disable interrupts we just raise the system base priority to a
80          * number lower than the default IRQ priority. In this way, all the
81          * "normal" interrupt can't be triggered. High-priority interrupt can
82          * still happen (at the moment only the soft-interrupt svcall uses a
83          * priority greater than the default IRQ priority).
84          *
85          * To enable interrupts we set the system base priority to 0, that
86          * means IRQ priority mechanism is disabled, and any interrupt can
87          * happen.
88          */
89         #define IRQ_PRIO_DISABLED       0x40
90         #define IRQ_PRIO_ENABLED        0
91
92         #define IRQ_DISABLE                                             \
93         ({                                                              \
94                 register cpu_flags_t reg = IRQ_PRIO_DISABLED;           \
95                 asm volatile (                                          \
96                         "msr basepri, %0"                               \
97                         : : "r"(reg) : "memory", "cc");                 \
98         })
99
100         #define IRQ_ENABLE                                              \
101         ({                                                              \
102                 register cpu_flags_t reg = IRQ_PRIO_ENABLED;            \
103                 asm volatile (                                          \
104                         "msr basepri, %0"                               \
105                         : : "r"(reg) : "memory", "cc");                 \
106         })
107
108         #define CPU_READ_FLAGS()                                        \
109         ({                                                              \
110                 register cpu_flags_t reg;                               \
111                 asm volatile (                                          \
112                         "mrs %0, basepri"                               \
113                          : "=r"(reg) : : "memory", "cc");               \
114                 reg;                                                    \
115         })
116
117         #define IRQ_SAVE_DISABLE(x)                                     \
118         ({                                                              \
119                 x = CPU_READ_FLAGS();                                   \
120                 IRQ_DISABLE;                                            \
121         })
122
123         #define IRQ_RESTORE(x)                                          \
124         ({                                                              \
125                 asm volatile (                                          \
126                         "msr basepri, %0"                               \
127                         : : "r"(x) : "memory", "cc");                   \
128         })
129
130         #define IRQ_ENABLED() (CPU_READ_FLAGS() == IRQ_PRIO_ENABLED)
131
132         INLINE bool irq_running(void)
133         {
134                 register uint32_t ret;
135
136                 /*
137                  * Check if the current stack pointer is the main stack or
138                  * process stack: we use the main stack only in Handler mode,
139                  * so this means we're running inside an ISR.
140                  */
141                 asm volatile (
142                         "mrs %0, msp\n\t"
143                         "cmp sp, %0\n\t"
144                         "ite ne\n\t"
145                         "movne %0, #0\n\t"
146                         "moveq %0, #1\n\t" : "=r"(ret) : : "cc");
147                 return ret;
148         }
149         #define IRQ_RUNNING() irq_running()
150
151         #if CONFIG_KERN_PREEMPT
152
153                 #define DECLARE_ISR_CONTEXT_SWITCH(func)                \
154                 void func(void);                                        \
155                 INLINE void __isr_##func(void);                         \
156                 void func(void)                                         \
157                 {                                                       \
158                         __isr_##func();                                 \
159                         if (!proc_needPreempt())                        \
160                                 return;                                 \
161                         /*
162                          * Set a PendSV request.
163                          *
164                          * The preemption handler will be called immediately
165                          * after this ISR in tail-chaining mode (without the
166                          * overhead of hardware state saving and restoration
167                          * between interrupts).
168                          */                                             \
169                         HWREG(NVIC_INT_CTRL) = NVIC_INT_CTRL_PEND_SV;   \
170                 }                                                       \
171                 INLINE void __isr_##func(void)
172
173                 /**
174                  * With task priorities enabled each ISR is used a point to
175                  * check if we need to perform a context switch.
176                  *
177                  * Instead, without priorities a context switch can occur only
178                  * when the running task expires its time quantum. In this last
179                  * case, the context switch can only occur in the timer ISR,
180                  * that must be always declared with the
181                  * DECLARE_ISR_CONTEXT_SWITCH() macro.
182                  */
183                 #if CONFIG_KERN_PRI
184                         #define DECLARE_ISR(func) \
185                                 DECLARE_ISR_CONTEXT_SWITCH(func)
186                         /**
187                          * Interrupt service routine prototype: can be used for
188                          * forward declarations.
189                          */
190                         #define ISR_PROTO(func) \
191                                 ISR_PROTO_CONTEXT_SWITCH(func)
192                 #endif /* !CONFIG_KERN_PRI */
193         #endif
194
195         #ifndef ISR_PROTO
196                 #define ISR_PROTO(func) void func(void)
197         #endif
198         #ifndef DECLARE_ISR
199                 #define DECLARE_ISR(func) void func(void)
200         #endif
201         #ifndef DECLARE_ISR_CONTEXT_SWITCH
202                 #define DECLARE_ISR_CONTEXT_SWITCH(func) void func(void)
203         #endif
204         #ifndef ISR_PROTO_CONTEXT_SWITCH
205                 #define ISR_PROTO_CONTEXT_SWITCH(func) void func(void)
206         #endif
207
208 #elif CPU_ARM
209
210         #ifdef __IAR_SYSTEMS_ICC__
211
212                 #include <inarm.h>
213
214                 #if __CPU_MODE__ == 1 /* Thumb */
215                         /* Use stubs */
216                         extern cpu_flags_t get_CPSR(void);
217                         extern void set_CPSR(cpu_flags_t flags);
218                 #else
219                         #define get_CPSR __get_CPSR
220                         #define set_CPSR __set_CPSR
221                 #endif
222
223                 #define IRQ_DISABLE __disable_interrupt()
224                 #define IRQ_ENABLE  __enable_interrupt()
225
226                 #define IRQ_SAVE_DISABLE(x) \
227                 do { \
228                         (x) = get_CPSR(); \
229                         __disable_interrupt(); \
230                 } while (0)
231
232                 #define IRQ_RESTORE(x) \
233                 do { \
234                         set_CPSR(x); \
235                 } while (0)
236
237                 #define IRQ_ENABLED() \
238                         ((bool)(get_CPSR() & 0xb0))
239
240         #else /* !__IAR_SYSTEMS_ICC__ */
241
242                 #define IRQ_DISABLE                                     \
243                 do {                                                    \
244                         cpu_flags_t sreg;                               \
245                         asm volatile (                                  \
246                                 "mrs %0, cpsr\n\t"                      \
247                                 "orr %0, %0, #0xc0\n\t"                 \
248                                 "msr cpsr_c, %0\n\t"                    \
249                                 : "=r" (sreg) : : "memory", "cc");      \
250                 } while (0)
251
252                 #define IRQ_ENABLE                                      \
253                 do {                                                    \
254                         cpu_flags_t sreg;                               \
255                         asm volatile (                                  \
256                                 "mrs %0, cpsr\n\t"                      \
257                                 "bic %0, %0, #0xc0\n\t"                 \
258                                 "msr cpsr_c, %0\n\t"                    \
259                                 : "=r" (sreg) : : "memory", "cc");      \
260                 } while (0)
261
262                 #define IRQ_SAVE_DISABLE(x)                             \
263                 do {                                                    \
264                         register cpu_flags_t sreg;                      \
265                         asm volatile (                                  \
266                                 "mrs %0, cpsr\n\t"                      \
267                                 "orr %1, %0, #0xc0\n\t"                 \
268                                 "msr cpsr_c, %1\n\t"                    \
269                                 : "=r" (x), "=r" (sreg)                 \
270                                 : : "memory", "cc");                    \
271                 } while (0)
272
273                 #define IRQ_RESTORE(x)                                  \
274                 do {                                                    \
275                         asm volatile (                                  \
276                                 "msr cpsr_c, %0\n\t"                    \
277                                 : : "r" (x) : "memory", "cc");          \
278                 } while (0)
279
280                 #define CPU_READ_FLAGS()                                \
281                 ({                                                      \
282                         cpu_flags_t sreg;                               \
283                         asm volatile (                                  \
284                                 "mrs %0, cpsr\n\t"                      \
285                                 : "=r" (sreg) : : "memory", "cc");      \
286                         sreg;                                           \
287                 })
288
289                 #define IRQ_ENABLED() ((CPU_READ_FLAGS() & 0xc0) != 0xc0)
290
291                 #if CONFIG_KERN_PREEMPT
292                         EXTERN_C void asm_irq_switch_context(void);
293
294                         /**
295                          * At the beginning of any ISR immediately ajust the
296                          * return address and store all the caller-save
297                          * registers (the ISR may change these registers that
298                          * are shared with the user-context).
299                          */
300                         #define IRQ_ENTRY() asm volatile ( \
301                                                 "sub    lr, lr, #4\n\t" \
302                                                 "stmfd  sp!, {r0-r3, ip, lr}\n\t")
303                         #define IRQ_EXIT()  asm volatile ( \
304                                                 "b      asm_irq_switch_context\n\t")
305                         /**
306                          * Function attribute to declare an interrupt service
307                          * routine.
308                          *
309                          * An ISR function must be declared as naked because we
310                          * want to add our IRQ_ENTRY() prologue and IRQ_EXIT()
311                          * epilogue code to handle the context switch and save
312                          * all the registers (not only the callee-save).
313                          *
314                          */
315                         #define ISR_FUNC __attribute__((naked))
316
317                         /**
318                          * The compiler cannot establish which
319                          * registers actually need to be saved, because
320                          * the interrupt can happen at any time, so the
321                          * "normal" prologue and epilogue used for a
322                          * generic function call are not suitable for
323                          * the ISR.
324                          *
325                          * Using a naked function has the drawback that
326                          * the stack is not automatically adjusted at
327                          * this point, like a "normal" function call.
328                          *
329                          * So, an ISR can _only_ contain other function
330                          * calls and they can't use the stack in any
331                          * other way.
332                          *
333                          * NOTE: we need to explicitly disable IRQs after
334                          * IRQ_ENTRY(), because the IRQ status flag is not
335                          * masked by the hardware and an IRQ ack inside the ISR
336                          * may cause the triggering of another IRQ before
337                          * exiting from the current ISR.
338                          *
339                          * The respective IRQ_ENABLE is not necessary, because
340                          * IRQs will be automatically re-enabled when restoring
341                          * the context of the user task.
342                          */
343                         #define DECLARE_ISR_CONTEXT_SWITCH(func)                \
344                                 void ISR_FUNC func(void);                       \
345                                 static NOINLINE void __isr_##func(void);        \
346                                 void ISR_FUNC func(void)                        \
347                                 {                                               \
348                                         IRQ_ENTRY();                            \
349                                         IRQ_DISABLE;                            \
350                                         __isr_##func();                         \
351                                         IRQ_EXIT();                             \
352                                 }                                               \
353                                 static void __isr_##func(void)
354                         /**
355                          * Interrupt service routine prototype: can be used for
356                          * forward declarations.
357                          */
358                         #define ISR_PROTO_CONTEXT_SWITCH(func)  \
359                                 void ISR_FUNC func(void)
360                         /**
361                          * With task priorities enabled each ISR is used a point to
362                          * check if we need to perform a context switch.
363                          *
364                          * Instead, without priorities a context switch can occur only
365                          * when the running task expires its time quantum. In this last
366                          * case, the context switch can only occur in the timer
367                          * ISR, that must be always declared with the
368                          * DECLARE_ISR_CONTEXT_SWITCH() macro.
369                          */
370                         #if CONFIG_KERN_PRI
371                                 #define DECLARE_ISR(func) \
372                                         DECLARE_ISR_CONTEXT_SWITCH(func)
373
374                                 #define ISR_PROTO(func) \
375                                         ISR_PROTO_CONTEXT_SWITCH(func)
376                         #endif /* !CONFIG_KERN_PRI */
377                 #endif /* CONFIG_KERN_PREEMPT */
378
379                 #ifndef ISR_FUNC
380                         #define ISR_FUNC  __attribute__((interrupt))
381                 #endif
382                 #ifndef DECLARE_ISR
383                         #define DECLARE_ISR(func) \
384                                 void ISR_FUNC func(void);                               \
385                                 /*                                                      \
386                                  * FIXME: avoid the inlining of this function.          \
387                                  *                                                      \
388                                  * This is terribly inefficient, but it's a             \
389                                  * reliable workaround to avoid gcc blowing             \
390                                  * away the stack (see the bug below):                  \
391                                  *                                                      \
392                                  * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=41999    \
393                                  */                                                     \
394                                 static NOINLINE void __isr_##func(void);                \
395                                 void ISR_FUNC func(void)                                \
396                                 {                                                       \
397                                         __isr_##func();                                 \
398                                 }                                                       \
399                                 static void __isr_##func(void)
400                 #endif
401                 #ifndef DECLARE_ISR_CONTEXT_SWITCH
402                         #define DECLARE_ISR_CONTEXT_SWITCH(func) DECLARE_ISR(func)
403                 #endif
404                 #ifndef ISR_PROTO
405                         #define ISR_PROTO(func) void ISR_FUNC func(void)
406                 #endif
407                 #ifndef ISR_PROTO_CONTEXT_SWITCH
408                         #define ISR_PROTO_CONTEXT_SWITCH(func) ISR_PROTO(func)
409                 #endif
410
411         #endif /* !__IAR_SYSTEMS_ICC_ */
412
413 #elif CPU_PPC
414
415         /* Get IRQ_* definitions from the hosting environment. */
416         #include <cfg/os.h>
417         #if OS_EMBEDDED
418                 #define IRQ_DISABLE         FIXME
419                 #define IRQ_ENABLE          FIXME
420                 #define IRQ_SAVE_DISABLE(x) FIXME
421                 #define IRQ_RESTORE(x)      FIXME
422                 #define IRQ_ENABLED()       FIXME
423         #endif /* OS_EMBEDDED */
424
425 #elif CPU_DSP56K
426
427         #define IRQ_DISABLE             do { asm(bfset #0x0200,SR); asm(nop); } while (0)
428         #define IRQ_ENABLE              do { asm(bfclr #0x0200,SR); asm(nop); } while (0)
429
430         #define IRQ_SAVE_DISABLE(x)  \
431                 do { (void)x; asm(move SR,x); asm(bfset #0x0200,SR); } while (0)
432         #define IRQ_RESTORE(x)  \
433                 do { (void)x; asm(move x,SR); } while (0)
434
435         static inline bool irq_running(void)
436         {
437                 extern void *user_sp;
438                 return !!user_sp;
439         }
440         #define IRQ_RUNNING() irq_running()
441
442         static inline bool irq_enabled(void)
443         {
444                 uint16_t x;
445                 asm(move SR,x);
446                 return !(x & 0x0200);
447         }
448         #define IRQ_ENABLED() irq_enabled()
449
450 #elif CPU_AVR
451
452         #define IRQ_DISABLE   asm volatile ("cli" ::)
453         #define IRQ_ENABLE    asm volatile ("sei" ::)
454
455         #define IRQ_SAVE_DISABLE(x) \
456         do { \
457                 __asm__ __volatile__( \
458                         "in %0,__SREG__\n\t" \
459                         "cli" \
460                         : "=r" (x) : /* no inputs */ : "cc" \
461                 ); \
462         } while (0)
463
464         #define IRQ_RESTORE(x) \
465         do { \
466                 __asm__ __volatile__( \
467                         "out __SREG__,%0" : /* no outputs */ : "r" (x) : "cc" \
468                 ); \
469         } while (0)
470
471         #define IRQ_ENABLED() \
472         ({ \
473                 uint8_t sreg; \
474                 __asm__ __volatile__( \
475                         "in %0,__SREG__\n\t" \
476                         : "=r" (sreg)  /* no inputs & no clobbers */ \
477                 ); \
478                 (bool)(sreg & 0x80); \
479         })
480         #if CONFIG_KERN_PREEMPT
481                 #define DECLARE_ISR_CONTEXT_SWITCH(vect)                \
482                         INLINE void __isr_##vect(void);                 \
483                         ISR(vect)                                       \
484                         {                                               \
485                                 __isr_##vect();                         \
486                                 IRQ_PREEMPT_HANDLER();                  \
487                         }                                               \
488                         INLINE void __isr_##vect(void)
489
490                 /**
491                  * With task priorities enabled each ISR is used a point to
492                  * check if we need to perform a context switch.
493                  *
494                  * Instead, without priorities a context switch can occur only
495                  * when the running task expires its time quantum. In this last
496                  * case, the context switch can only occur in the timer ISR,
497                  * that must be always declared with the
498                  * DECLARE_ISR_CONTEXT_SWITCH() macro.
499                  */
500                 #if CONFIG_KERN_PRI
501                         #define DECLARE_ISR(func) \
502                                 DECLARE_ISR_CONTEXT_SWITCH(func)
503                         /**
504                          * Interrupt service routine prototype: can be used for
505                          * forward declarations.
506                          */
507                         #define ISR_PROTO(func) \
508                                 ISR_PROTO_CONTEXT_SWITCH(func)
509                 #endif /* !CONFIG_KERN_PRI */
510         #endif
511
512         #ifndef ISR_PROTO
513                 #define ISR_PROTO(vect) ISR(vect)
514         #endif
515         #ifndef DECLARE_ISR
516                 #define DECLARE_ISR(vect) ISR(vect)
517         #endif
518         #ifndef DECLARE_ISR_CONTEXT_SWITCH
519                 #define DECLARE_ISR_CONTEXT_SWITCH(vect) ISR(vect)
520         #endif
521         #ifndef ISR_PROTO_CONTEXT_SWITCH
522                 #define ISR_PROTO_CONTEXT_SWITCH(vect) ISR(vect)
523         #endif
524
525 #else
526         #error No CPU_... defined.
527 #endif
528
529 #ifdef IRQ_RUNNING
530         /// Ensure callee is running within an interrupt
531         #define ASSERT_IRQ_CONTEXT()  ASSERT(IRQ_RUNNING())
532
533         /// Ensure callee is not running within an interrupt
534         #define ASSERT_USER_CONTEXT() ASSERT(!IRQ_RUNNING())
535 #else
536         #define IRQ_RUNNING()   false
537         #define ASSERT_USER_CONTEXT()  do {} while(0)
538         #define ASSERT_IRQ_CONTEXT()   do {} while(0)
539 #endif
540
541 #ifdef IRQ_ENABLED
542         /// Ensure interrupts are enabled
543         #define IRQ_ASSERT_ENABLED()  ASSERT(IRQ_ENABLED())
544
545         /// Ensure interrupts are not enabled
546         #define IRQ_ASSERT_DISABLED() ASSERT(!IRQ_ENABLED())
547 #else
548         #define IRQ_ASSERT_ENABLED() do {} while(0)
549         #define IRQ_ASSERT_DISABLED() do {} while(0)
550 #endif
551
552
553 #ifndef IRQ_PREEMPT_HANDLER
554         #if CONFIG_KERN_PREEMPT
555                 /**
556                  * Handle preemptive context switch inside timer IRQ.
557                  */
558                 INLINE void IRQ_PREEMPT_HANDLER(void)
559                 {
560                         if (proc_needPreempt())
561                                 proc_preempt();
562                 }
563         #else
564                 #define IRQ_PREEMPT_HANDLER() /* Nothing */
565         #endif
566 #endif
567
568 /**
569  * Execute \a CODE atomically with respect to interrupts.
570  *
571  * \see IRQ_SAVE_DISABLE IRQ_RESTORE
572  */
573 #define ATOMIC(CODE) \
574         do { \
575                 cpu_flags_t __flags; \
576                 IRQ_SAVE_DISABLE(__flags); \
577                 CODE; \
578                 IRQ_RESTORE(__flags); \
579         } while (0)
580
581 #endif /* CPU_IRQ_H */