add support to the IAR Embedded Workbench(TM) compiler
[bertos.git] / bertos / cpu / irq.h
1 /**
2  * \file
3  * <!--
4  * This file is part of BeRTOS.
5  *
6  * Bertos is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * As a special exception, you may use this file as part of a free software
21  * library without restriction.  Specifically, if other files instantiate
22  * templates or use macros or inline functions from this file, or you compile
23  * this file and link it with other files to produce an executable, this
24  * file does not by itself cause the resulting executable to be covered by
25  * the GNU General Public License.  This exception does not however
26  * invalidate any other reasons why the executable file might be covered by
27  * the GNU General Public License.
28  *
29  * Copyright 2004, 2005, 2006, 2007 Develer S.r.l. (http://www.develer.com/)
30  * Copyright 2004 Giovanni Bajo
31  *
32  * -->
33  *
34  * \brief CPU-specific IRQ definitions.
35  *
36  * \author Giovanni Bajo <rasky@develer.com>
37  * \author Bernie Innocenti <bernie@codewiz.org>
38  * \author Stefano Fedrigo <aleph@develer.com>
39  * \author Francesco Sacchi <batt@develer.com>
40  */
41 #ifndef CPU_IRQ_H
42 #define CPU_IRQ_H
43
44 #include "detect.h"
45 #include "types.h"
46
47 #include <kern/proc.h> /* proc_needPreempt() / proc_preempt() */
48
49 #include <cfg/compiler.h> /* for uintXX_t */
50 #include "cfg/cfg_proc.h" /* CONFIG_KERN_PREEMPT */
51
52 #if CPU_I196
53         #define IRQ_DISABLE             disable_interrupt()
54         #define IRQ_ENABLE              enable_interrupt()
55 #elif CPU_X86
56
57         /* Get IRQ_* definitions from the hosting environment. */
58         #include <cfg/os.h>
59         #if OS_EMBEDDED
60                 #define IRQ_DISABLE             FIXME
61                 #define IRQ_ENABLE              FIXME
62                 #define IRQ_SAVE_DISABLE(x)     FIXME
63                 #define IRQ_RESTORE(x)          FIXME
64         #endif /* OS_EMBEDDED */
65
66 #elif CPU_CM3
67         /* Cortex-M3 */
68
69         /*
70          * Interrupt priority.
71          *
72          * NOTE: 0 means that an interrupt is not affected by the global IRQ
73          * priority settings.
74          */
75         #define IRQ_PRIO                    0x80
76         #define IRQ_PRIO_MIN            0xf0
77         #define IRQ_PRIO_MAX            0
78         /*
79          * To disable interrupts we just raise the system base priority to a
80          * number lower than the default IRQ priority. In this way, all the
81          * "normal" interrupt can't be triggered. High-priority interrupt can
82          * still happen (at the moment only the soft-interrupt svcall uses a
83          * priority greater than the default IRQ priority).
84          *
85          * To enable interrupts we set the system base priority to 0, that
86          * means IRQ priority mechanism is disabled, and any interrupt can
87          * happen.
88          */
89         #define IRQ_PRIO_DISABLED       0x40
90         #define IRQ_PRIO_ENABLED        0
91
92         #ifdef __IAR_SYSTEMS_ICC__
93                 INLINE cpu_flags_t CPU_READ_FLAGS(void)
94                 {
95                         return __get_BASEPRI();
96                 }
97
98                 INLINE void CPU_WRITE_FLAGS(cpu_flags_t flags)
99                 {
100                         __set_BASEPRI(flags);
101                 }
102
103                 extern uint32_t CPU_READ_IPSR(void);
104                 extern bool irq_running(void);
105
106                 #define IRQ_DISABLE     CPU_WRITE_FLAGS(IRQ_PRIO_DISABLED)
107
108                 #define IRQ_ENABLE      CPU_WRITE_FLAGS(IRQ_PRIO_ENABLED)
109
110                 #define IRQ_SAVE_DISABLE(x)                                     \
111                 do {                                                            \
112                         x = CPU_READ_FLAGS();                                   \
113                         IRQ_DISABLE;                                            \
114                 } while (0)
115
116                 #define IRQ_RESTORE(x)                                          \
117                 do {                                                            \
118                         CPU_WRITE_FLAGS(x);                                     \
119                 } while (0)
120         #else /* !__IAR_SYSTEMS_ICC__ */
121                 #define IRQ_DISABLE                                             \
122                 ({                                                              \
123                         register cpu_flags_t reg = IRQ_PRIO_DISABLED;           \
124                         asm volatile (                                          \
125                                 "msr basepri, %0"                               \
126                                 : : "r"(reg) : "memory", "cc");                 \
127                 })
128
129                 #define IRQ_ENABLE                                              \
130                 ({                                                              \
131                         register cpu_flags_t reg = IRQ_PRIO_ENABLED;            \
132                         asm volatile (                                          \
133                                 "msr basepri, %0"                               \
134                                 : : "r"(reg) : "memory", "cc");                 \
135                 })
136
137                 #define CPU_READ_FLAGS()                                        \
138                 ({                                                              \
139                         register cpu_flags_t reg;                               \
140                         asm volatile (                                          \
141                                 "mrs %0, basepri"                               \
142                                  : "=r"(reg) : : "memory", "cc");               \
143                         reg;                                                    \
144                 })
145
146                 #define IRQ_SAVE_DISABLE(x)                                     \
147                 ({                                                              \
148                         x = CPU_READ_FLAGS();                                   \
149                         IRQ_DISABLE;                                            \
150                 })
151
152                 #define IRQ_RESTORE(x)                                          \
153                 ({                                                              \
154                         asm volatile (                                          \
155                                 "msr basepri, %0"                               \
156                                 : : "r"(x) : "memory", "cc");                   \
157                 })
158
159                 INLINE bool irq_running(void)
160                 {
161                         register uint32_t ret;
162
163                         /*
164                          * Check if the current stack pointer is the main stack or
165                          * process stack: we use the main stack only in Handler mode,
166                          * so this means we're running inside an ISR.
167                          */
168                         asm volatile (
169                                 "mrs %0, msp\n\t"
170                                 "cmp sp, %0\n\t"
171                                 "ite ne\n\t"
172                                 "movne %0, #0\n\t"
173                                 "moveq %0, #1\n\t" : "=r"(ret) : : "cc");
174                         return ret;
175                 }
176         #endif /* __IAR_SYSTEMS_ICC__ */
177
178         #define IRQ_ENABLED() (CPU_READ_FLAGS() == IRQ_PRIO_ENABLED)
179
180         #define IRQ_RUNNING() irq_running()
181
182         #if (CONFIG_KERN && CONFIG_KERN_PREEMPT)
183
184                 #define DECLARE_ISR_CONTEXT_SWITCH(func)                \
185                 void func(void);                                        \
186                 INLINE void __isr_##func(void);                         \
187                 void func(void)                                         \
188                 {                                                       \
189                         __isr_##func();                                 \
190                         if (!proc_needPreempt())                        \
191                                 return;                                 \
192                         /*
193                          * Set a PendSV request.
194                          *
195                          * The preemption handler will be called immediately
196                          * after this ISR in tail-chaining mode (without the
197                          * overhead of hardware state saving and restoration
198                          * between interrupts).
199                          */                                             \
200                         HWREG(NVIC_INT_CTRL) = NVIC_INT_CTRL_PEND_SV;   \
201                 }                                                       \
202                 INLINE void __isr_##func(void)
203
204                 /**
205                  * With task priorities enabled each ISR is used a point to
206                  * check if we need to perform a context switch.
207                  *
208                  * Instead, without priorities a context switch can occur only
209                  * when the running task expires its time quantum. In this last
210                  * case, the context switch can only occur in the timer ISR,
211                  * that must be always declared with the
212                  * DECLARE_ISR_CONTEXT_SWITCH() macro.
213                  */
214                 #if CONFIG_KERN_PRI
215                         #define DECLARE_ISR(func) \
216                                 DECLARE_ISR_CONTEXT_SWITCH(func)
217                         /**
218                          * Interrupt service routine prototype: can be used for
219                          * forward declarations.
220                          */
221                         #define ISR_PROTO(func) \
222                                 ISR_PROTO_CONTEXT_SWITCH(func)
223                 #endif /* !CONFIG_KERN_PRI */
224         #endif
225
226         #ifndef ISR_PROTO
227                 #define ISR_PROTO(func) void func(void)
228         #endif
229         #ifndef DECLARE_ISR
230                 #define DECLARE_ISR(func) void func(void)
231         #endif
232         #ifndef DECLARE_ISR_CONTEXT_SWITCH
233                 #define DECLARE_ISR_CONTEXT_SWITCH(func) void func(void)
234         #endif
235         #ifndef ISR_PROTO_CONTEXT_SWITCH
236                 #define ISR_PROTO_CONTEXT_SWITCH(func) void func(void)
237         #endif
238
239 #elif CPU_ARM
240
241         #ifdef __IAR_SYSTEMS_ICC__
242
243                 #include <inarm.h>
244
245                 #if __CPU_MODE__ == 1 /* Thumb */
246                         /* Use stubs */
247                         extern cpu_flags_t get_CPSR(void);
248                         extern void set_CPSR(cpu_flags_t flags);
249                 #else
250                         #define get_CPSR __get_CPSR
251                         #define set_CPSR __set_CPSR
252                 #endif
253
254                 #define IRQ_DISABLE __disable_interrupt()
255                 #define IRQ_ENABLE  __enable_interrupt()
256
257                 #define IRQ_SAVE_DISABLE(x) \
258                 do { \
259                         (x) = get_CPSR(); \
260                         __disable_interrupt(); \
261                 } while (0)
262
263                 #define IRQ_RESTORE(x) \
264                 do { \
265                         set_CPSR(x); \
266                 } while (0)
267
268                 #define IRQ_ENABLED() \
269                         ((bool)(get_CPSR() & 0xb0))
270
271         #else /* !__IAR_SYSTEMS_ICC__ */
272
273                 #define IRQ_DISABLE                                     \
274                 do {                                                    \
275                         cpu_flags_t sreg;                               \
276                         asm volatile (                                  \
277                                 "mrs %0, cpsr\n\t"                      \
278                                 "orr %0, %0, #0xc0\n\t"                 \
279                                 "msr cpsr_c, %0\n\t"                    \
280                                 : "=r" (sreg) : : "memory", "cc");      \
281                 } while (0)
282
283                 #define IRQ_ENABLE                                      \
284                 do {                                                    \
285                         cpu_flags_t sreg;                               \
286                         asm volatile (                                  \
287                                 "mrs %0, cpsr\n\t"                      \
288                                 "bic %0, %0, #0xc0\n\t"                 \
289                                 "msr cpsr_c, %0\n\t"                    \
290                                 : "=r" (sreg) : : "memory", "cc");      \
291                 } while (0)
292
293                 #define IRQ_SAVE_DISABLE(x)                             \
294                 do {                                                    \
295                         register cpu_flags_t sreg;                      \
296                         asm volatile (                                  \
297                                 "mrs %0, cpsr\n\t"                      \
298                                 "orr %1, %0, #0xc0\n\t"                 \
299                                 "msr cpsr_c, %1\n\t"                    \
300                                 : "=r" (x), "=r" (sreg)                 \
301                                 : : "memory", "cc");                    \
302                 } while (0)
303
304                 #define IRQ_RESTORE(x)                                  \
305                 do {                                                    \
306                         asm volatile (                                  \
307                                 "msr cpsr_c, %0\n\t"                    \
308                                 : : "r" (x) : "memory", "cc");          \
309                 } while (0)
310
311                 #define CPU_READ_FLAGS()                                \
312                 ({                                                      \
313                         cpu_flags_t sreg;                               \
314                         asm volatile (                                  \
315                                 "mrs %0, cpsr\n\t"                      \
316                                 : "=r" (sreg) : : "memory", "cc");      \
317                         sreg;                                           \
318                 })
319
320                 #define IRQ_ENABLED() ((CPU_READ_FLAGS() & 0xc0) != 0xc0)
321
322                 #if (CONFIG_KERN && CONFIG_KERN_PREEMPT)
323                         EXTERN_C void asm_irq_switch_context(void);
324
325                         /**
326                          * At the beginning of any ISR immediately ajust the
327                          * return address and store all the caller-save
328                          * registers (the ISR may change these registers that
329                          * are shared with the user-context).
330                          */
331                         #define IRQ_ENTRY() asm volatile ( \
332                                                 "sub    lr, lr, #4\n\t" \
333                                                 "stmfd  sp!, {r0-r3, ip, lr}\n\t")
334                         #define IRQ_EXIT()  asm volatile ( \
335                                                 "b      asm_irq_switch_context\n\t")
336                         /**
337                          * Function attribute to declare an interrupt service
338                          * routine.
339                          *
340                          * An ISR function must be declared as naked because we
341                          * want to add our IRQ_ENTRY() prologue and IRQ_EXIT()
342                          * epilogue code to handle the context switch and save
343                          * all the registers (not only the callee-save).
344                          *
345                          */
346                         #define ISR_FUNC __attribute__((naked))
347
348                         /**
349                          * The compiler cannot establish which
350                          * registers actually need to be saved, because
351                          * the interrupt can happen at any time, so the
352                          * "normal" prologue and epilogue used for a
353                          * generic function call are not suitable for
354                          * the ISR.
355                          *
356                          * Using a naked function has the drawback that
357                          * the stack is not automatically adjusted at
358                          * this point, like a "normal" function call.
359                          *
360                          * So, an ISR can _only_ contain other function
361                          * calls and they can't use the stack in any
362                          * other way.
363                          *
364                          * NOTE: we need to explicitly disable IRQs after
365                          * IRQ_ENTRY(), because the IRQ status flag is not
366                          * masked by the hardware and an IRQ ack inside the ISR
367                          * may cause the triggering of another IRQ before
368                          * exiting from the current ISR.
369                          *
370                          * The respective IRQ_ENABLE is not necessary, because
371                          * IRQs will be automatically re-enabled when restoring
372                          * the context of the user task.
373                          */
374                         #define DECLARE_ISR_CONTEXT_SWITCH(func)                \
375                                 void ISR_FUNC func(void);                       \
376                                 static NOINLINE void __isr_##func(void);        \
377                                 void ISR_FUNC func(void)                        \
378                                 {                                               \
379                                         IRQ_ENTRY();                            \
380                                         IRQ_DISABLE;                            \
381                                         __isr_##func();                         \
382                                         IRQ_EXIT();                             \
383                                 }                                               \
384                                 static NOINLINE void __isr_##func(void)
385                         /**
386                          * Interrupt service routine prototype: can be used for
387                          * forward declarations.
388                          */
389                         #define ISR_PROTO_CONTEXT_SWITCH(func)  \
390                                 void ISR_FUNC func(void)
391                         /**
392                          * With task priorities enabled each ISR is used a point to
393                          * check if we need to perform a context switch.
394                          *
395                          * Instead, without priorities a context switch can occur only
396                          * when the running task expires its time quantum. In this last
397                          * case, the context switch can only occur in the timer
398                          * ISR, that must be always declared with the
399                          * DECLARE_ISR_CONTEXT_SWITCH() macro.
400                          */
401                         #if CONFIG_KERN_PRI
402                                 #define DECLARE_ISR(func) \
403                                         DECLARE_ISR_CONTEXT_SWITCH(func)
404
405                                 #define ISR_PROTO(func) \
406                                         ISR_PROTO_CONTEXT_SWITCH(func)
407                         #endif /* !CONFIG_KERN_PRI */
408                 #endif /* CONFIG_KERN_PREEMPT */
409
410                 #ifndef ISR_FUNC
411                         #define ISR_FUNC __attribute__((naked))
412                 #endif
413                 #ifndef DECLARE_ISR
414                         #define DECLARE_ISR(func) \
415                                 void ISR_FUNC func(void);                               \
416                                 /*                                                      \
417                                  * FIXME: avoid the inlining of this function.          \
418                                  *                                                      \
419                                  * This is terribly inefficient, but it's a             \
420                                  * reliable workaround to avoid gcc blowing             \
421                                  * away the stack (see the bug below):                  \
422                                  *                                                      \
423                                  * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=41999    \
424                                  */                                                     \
425                                 static NOINLINE void __isr_##func(void);                \
426                                 void ISR_FUNC func(void)                                \
427                                 {                                                       \
428                                         asm volatile (                                  \
429                                                 "sub    lr, lr, #4\n\t"                 \
430                                                 "stmfd  sp!, {r0-r3, ip, lr}\n\t");     \
431                                         __isr_##func();                                 \
432                                         asm volatile (                                  \
433                                                 "ldmfd sp!, {r0-r3, ip, pc}^\n\t");     \
434                                 }                                                       \
435                                 static NOINLINE void __isr_##func(void)
436                 #endif
437                 #ifndef DECLARE_ISR_CONTEXT_SWITCH
438                         #define DECLARE_ISR_CONTEXT_SWITCH(func) DECLARE_ISR(func)
439                 #endif
440                 #ifndef ISR_PROTO
441                         #define ISR_PROTO(func) void ISR_FUNC func(void)
442                 #endif
443                 #ifndef ISR_PROTO_CONTEXT_SWITCH
444                         #define ISR_PROTO_CONTEXT_SWITCH(func) ISR_PROTO(func)
445                 #endif
446
447         #endif /* !__IAR_SYSTEMS_ICC_ */
448
449 #elif CPU_PPC
450
451         /* Get IRQ_* definitions from the hosting environment. */
452         #include <cfg/os.h>
453         #if OS_EMBEDDED
454                 #define IRQ_DISABLE         FIXME
455                 #define IRQ_ENABLE          FIXME
456                 #define IRQ_SAVE_DISABLE(x) FIXME
457                 #define IRQ_RESTORE(x)      FIXME
458                 #define IRQ_ENABLED()       FIXME
459         #endif /* OS_EMBEDDED */
460
461 #elif CPU_DSP56K
462
463         #define IRQ_DISABLE             do { asm(bfset #0x0200,SR); asm(nop); } while (0)
464         #define IRQ_ENABLE              do { asm(bfclr #0x0200,SR); asm(nop); } while (0)
465
466         #define IRQ_SAVE_DISABLE(x)  \
467                 do { (void)x; asm(move SR,x); asm(bfset #0x0200,SR); } while (0)
468         #define IRQ_RESTORE(x)  \
469                 do { (void)x; asm(move x,SR); } while (0)
470
471         static inline bool irq_running(void)
472         {
473                 extern void *user_sp;
474                 return !!user_sp;
475         }
476         #define IRQ_RUNNING() irq_running()
477
478         static inline bool irq_enabled(void)
479         {
480                 uint16_t x;
481                 asm(move SR,x);
482                 return !(x & 0x0200);
483         }
484         #define IRQ_ENABLED() irq_enabled()
485
486 #elif CPU_AVR
487
488         #define IRQ_DISABLE   asm volatile ("cli" ::)
489         #define IRQ_ENABLE    asm volatile ("sei" ::)
490
491         #define IRQ_SAVE_DISABLE(x) \
492         do { \
493                 __asm__ __volatile__( \
494                         "in %0,__SREG__\n\t" \
495                         "cli" \
496                         : "=r" (x) : /* no inputs */ : "cc" \
497                 ); \
498         } while (0)
499
500         #define IRQ_RESTORE(x) \
501         do { \
502                 __asm__ __volatile__( \
503                         "out __SREG__,%0" : /* no outputs */ : "r" (x) : "cc" \
504                 ); \
505         } while (0)
506
507         #define IRQ_ENABLED() \
508         ({ \
509                 uint8_t sreg; \
510                 __asm__ __volatile__( \
511                         "in %0,__SREG__\n\t" \
512                         : "=r" (sreg)  /* no inputs & no clobbers */ \
513                 ); \
514                 (bool)(sreg & 0x80); \
515         })
516         #if (CONFIG_KERN && CONFIG_KERN_PREEMPT)
517                 #define DECLARE_ISR_CONTEXT_SWITCH(vect)                \
518                         INLINE void __isr_##vect(void);                 \
519                         ISR(vect)                                       \
520                         {                                               \
521                                 __isr_##vect();                         \
522                                 IRQ_PREEMPT_HANDLER();                  \
523                         }                                               \
524                         INLINE void __isr_##vect(void)
525
526                 /**
527                  * With task priorities enabled each ISR is used a point to
528                  * check if we need to perform a context switch.
529                  *
530                  * Instead, without priorities a context switch can occur only
531                  * when the running task expires its time quantum. In this last
532                  * case, the context switch can only occur in the timer ISR,
533                  * that must be always declared with the
534                  * DECLARE_ISR_CONTEXT_SWITCH() macro.
535                  */
536                 #if CONFIG_KERN_PRI
537                         #define DECLARE_ISR(func) \
538                                 DECLARE_ISR_CONTEXT_SWITCH(func)
539                         /**
540                          * Interrupt service routine prototype: can be used for
541                          * forward declarations.
542                          */
543                         #define ISR_PROTO(func) \
544                                 ISR_PROTO_CONTEXT_SWITCH(func)
545                 #endif /* !CONFIG_KERN_PRI */
546         #endif
547
548         #ifndef ISR_PROTO
549                 #define ISR_PROTO(vect) ISR(vect)
550         #endif
551         #ifndef DECLARE_ISR
552                 #define DECLARE_ISR(vect) ISR(vect)
553         #endif
554         #ifndef DECLARE_ISR_CONTEXT_SWITCH
555                 #define DECLARE_ISR_CONTEXT_SWITCH(vect) ISR(vect)
556         #endif
557         #ifndef ISR_PROTO_CONTEXT_SWITCH
558                 #define ISR_PROTO_CONTEXT_SWITCH(vect) ISR(vect)
559         #endif
560
561 #elif CPU_MSP430
562
563         /* Get the compiler defined macros */
564         #include <signal.h>
565         #define IRQ_DISABLE         dint()
566         #define IRQ_ENABLE          eint()
567
568 #else
569         #error No CPU_... defined.
570 #endif
571
572 #ifdef IRQ_RUNNING
573         /// Ensure callee is running within an interrupt
574         #define ASSERT_IRQ_CONTEXT()  ASSERT(IRQ_RUNNING())
575
576         /// Ensure callee is not running within an interrupt
577         #define ASSERT_USER_CONTEXT() ASSERT(!IRQ_RUNNING())
578 #else
579         #define IRQ_RUNNING()   false
580         #define ASSERT_USER_CONTEXT()  do {} while(0)
581         #define ASSERT_IRQ_CONTEXT()   do {} while(0)
582 #endif
583
584 #ifdef IRQ_ENABLED
585         /// Ensure interrupts are enabled
586         #define IRQ_ASSERT_ENABLED()  ASSERT(IRQ_ENABLED())
587
588         /// Ensure interrupts are not enabled
589         #define IRQ_ASSERT_DISABLED() ASSERT(!IRQ_ENABLED())
590 #else
591         #define IRQ_ASSERT_ENABLED() do {} while(0)
592         #define IRQ_ASSERT_DISABLED() do {} while(0)
593 #endif
594
595
596 #ifndef IRQ_PREEMPT_HANDLER
597         #if (CONFIG_KERN && CONFIG_KERN_PREEMPT)
598                 /**
599                  * Handle preemptive context switch inside timer IRQ.
600                  */
601                 INLINE void IRQ_PREEMPT_HANDLER(void)
602                 {
603                         if (proc_needPreempt())
604                                 proc_preempt();
605                 }
606         #else
607                 #define IRQ_PREEMPT_HANDLER() /* Nothing */
608         #endif
609 #endif
610
611 /**
612  * Execute \a CODE atomically with respect to interrupts.
613  *
614  * \see IRQ_SAVE_DISABLE IRQ_RESTORE
615  */
616 #define ATOMIC(CODE) \
617         do { \
618                 cpu_flags_t __flags; \
619                 IRQ_SAVE_DISABLE(__flags); \
620                 CODE; \
621                 IRQ_RESTORE(__flags); \
622         } while (0)
623
624 #endif /* CPU_IRQ_H */