Add IRQ_RUNNING() implementation for ARM7.
[bertos.git] / bertos / cpu / irq.h
1 /**
2  * \file
3  * <!--
4  * This file is part of BeRTOS.
5  *
6  * Bertos is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * As a special exception, you may use this file as part of a free software
21  * library without restriction.  Specifically, if other files instantiate
22  * templates or use macros or inline functions from this file, or you compile
23  * this file and link it with other files to produce an executable, this
24  * file does not by itself cause the resulting executable to be covered by
25  * the GNU General Public License.  This exception does not however
26  * invalidate any other reasons why the executable file might be covered by
27  * the GNU General Public License.
28  *
29  * Copyright 2004, 2005, 2006, 2007 Develer S.r.l. (http://www.develer.com/)
30  * Copyright 2004 Giovanni Bajo
31  *
32  * -->
33  *
34  * \brief CPU-specific IRQ definitions.
35  *
36  * \author Giovanni Bajo <rasky@develer.com>
37  * \author Bernie Innocenti <bernie@codewiz.org>
38  * \author Stefano Fedrigo <aleph@develer.com>
39  * \author Francesco Sacchi <batt@develer.com>
40  */
41 #ifndef CPU_IRQ_H
42 #define CPU_IRQ_H
43
44 #include "detect.h"
45 #include "types.h"
46
47 #include <kern/proc.h> /* proc_needPreempt() / proc_preempt() */
48
49 #include <cfg/compiler.h> /* for uintXX_t */
50 #include "cfg/cfg_proc.h" /* CONFIG_KERN_PREEMPT */
51
52 #if CPU_I196
53         #define IRQ_DISABLE             disable_interrupt()
54         #define IRQ_ENABLE              enable_interrupt()
55 #elif CPU_X86
56
57         /* Get IRQ_* definitions from the hosting environment. */
58         #include <cfg/os.h>
59         #if OS_EMBEDDED
60                 #define IRQ_DISABLE             FIXME
61                 #define IRQ_ENABLE              FIXME
62                 #define IRQ_SAVE_DISABLE(x)     FIXME
63                 #define IRQ_RESTORE(x)          FIXME
64         #endif /* OS_EMBEDDED */
65
66 #elif CPU_CM3
67         /* Cortex-M3 */
68
69         /*
70          * Interrupt priority.
71          *
72          * NOTE: 0 means that an interrupt is not affected by the global IRQ
73          * priority settings.
74          */
75         #define IRQ_PRIO                    0x80
76         #define IRQ_PRIO_MIN            0xf0
77         #define IRQ_PRIO_MAX            0
78         /*
79          * To disable interrupts we just raise the system base priority to a
80          * number lower than the default IRQ priority. In this way, all the
81          * "normal" interrupt can't be triggered. High-priority interrupt can
82          * still happen (at the moment only the soft-interrupt svcall uses a
83          * priority greater than the default IRQ priority).
84          *
85          * To enable interrupts we set the system base priority to 0, that
86          * means IRQ priority mechanism is disabled, and any interrupt can
87          * happen.
88          */
89         #define IRQ_PRIO_DISABLED       0x40
90         #define IRQ_PRIO_ENABLED        0
91
92         #ifdef __IAR_SYSTEMS_ICC__
93                 INLINE cpu_flags_t CPU_READ_FLAGS(void)
94                 {
95                         return __get_BASEPRI();
96                 }
97
98                 INLINE void CPU_WRITE_FLAGS(cpu_flags_t flags)
99                 {
100                         __set_BASEPRI(flags);
101                 }
102
103                 extern uint32_t CPU_READ_IPSR(void);
104                 extern bool irq_running(void);
105
106                 #define IRQ_DISABLE     CPU_WRITE_FLAGS(IRQ_PRIO_DISABLED)
107
108                 #define IRQ_ENABLE      CPU_WRITE_FLAGS(IRQ_PRIO_ENABLED)
109
110                 #define IRQ_SAVE_DISABLE(x)                                     \
111                 do {                                                            \
112                         x = CPU_READ_FLAGS();                                   \
113                         IRQ_DISABLE;                                            \
114                 } while (0)
115
116                 #define IRQ_RESTORE(x)                                          \
117                 do {                                                            \
118                         CPU_WRITE_FLAGS(x);                                     \
119                 } while (0)
120         #else /* !__IAR_SYSTEMS_ICC__ */
121                 #define IRQ_DISABLE                                             \
122                 ({                                                              \
123                         register cpu_flags_t reg = IRQ_PRIO_DISABLED;           \
124                         asm volatile (                                          \
125                                 "msr basepri, %0"                               \
126                                 : : "r"(reg) : "memory", "cc");                 \
127                 })
128
129                 #define IRQ_ENABLE                                              \
130                 ({                                                              \
131                         register cpu_flags_t reg = IRQ_PRIO_ENABLED;            \
132                         asm volatile (                                          \
133                                 "msr basepri, %0"                               \
134                                 : : "r"(reg) : "memory", "cc");                 \
135                 })
136
137                 #define CPU_READ_FLAGS()                                        \
138                 ({                                                              \
139                         register cpu_flags_t reg;                               \
140                         asm volatile (                                          \
141                                 "mrs %0, basepri"                               \
142                                  : "=r"(reg) : : "memory", "cc");               \
143                         reg;                                                    \
144                 })
145
146                 #define IRQ_SAVE_DISABLE(x)                                     \
147                 ({                                                              \
148                         x = CPU_READ_FLAGS();                                   \
149                         IRQ_DISABLE;                                            \
150                 })
151
152                 #define IRQ_RESTORE(x)                                          \
153                 ({                                                              \
154                         asm volatile (                                          \
155                                 "msr basepri, %0"                               \
156                                 : : "r"(x) : "memory", "cc");                   \
157                 })
158
159                 INLINE bool irq_running(void)
160                 {
161                         register uint32_t ret;
162
163                         /*
164                          * Check if the current stack pointer is the main stack or
165                          * process stack: we use the main stack only in Handler mode,
166                          * so this means we're running inside an ISR.
167                          */
168                         asm volatile (
169                                 "mrs %0, msp\n\t"
170                                 "cmp sp, %0\n\t"
171                                 "ite ne\n\t"
172                                 "movne %0, #0\n\t"
173                                 "moveq %0, #1\n\t" : "=r"(ret) : : "cc");
174                         return ret;
175                 }
176         #endif /* __IAR_SYSTEMS_ICC__ */
177
178         #define IRQ_ENABLED() (CPU_READ_FLAGS() == IRQ_PRIO_ENABLED)
179
180         #define IRQ_RUNNING() irq_running()
181
182         #if (CONFIG_KERN && CONFIG_KERN_PREEMPT)
183
184                 #define DECLARE_ISR_CONTEXT_SWITCH(func)                \
185                 void func(void);                                        \
186                 INLINE void __isr_##func(void);                         \
187                 void func(void)                                         \
188                 {                                                       \
189                         __isr_##func();                                 \
190                         if (!proc_needPreempt())                        \
191                                 return;                                 \
192                         /*
193                          * Set a PendSV request.
194                          *
195                          * The preemption handler will be called immediately
196                          * after this ISR in tail-chaining mode (without the
197                          * overhead of hardware state saving and restoration
198                          * between interrupts).
199                          */                                             \
200                         HWREG(NVIC_INT_CTRL) = NVIC_INT_CTRL_PEND_SV;   \
201                 }                                                       \
202                 INLINE void __isr_##func(void)
203
204                 /**
205                  * With task priorities enabled each ISR is used a point to
206                  * check if we need to perform a context switch.
207                  *
208                  * Instead, without priorities a context switch can occur only
209                  * when the running task expires its time quantum. In this last
210                  * case, the context switch can only occur in the timer ISR,
211                  * that must be always declared with the
212                  * DECLARE_ISR_CONTEXT_SWITCH() macro.
213                  */
214                 #if CONFIG_KERN_PRI
215                         #define DECLARE_ISR(func) \
216                                 DECLARE_ISR_CONTEXT_SWITCH(func)
217                         /**
218                          * Interrupt service routine prototype: can be used for
219                          * forward declarations.
220                          */
221                         #define ISR_PROTO(func) \
222                                 ISR_PROTO_CONTEXT_SWITCH(func)
223                 #endif /* !CONFIG_KERN_PRI */
224         #endif
225
226         #ifndef ISR_PROTO
227                 #define ISR_PROTO(func) void func(void)
228         #endif
229         #ifndef DECLARE_ISR
230                 #define DECLARE_ISR(func) void func(void)
231         #endif
232         #ifndef DECLARE_ISR_CONTEXT_SWITCH
233                 #define DECLARE_ISR_CONTEXT_SWITCH(func) void func(void)
234         #endif
235         #ifndef ISR_PROTO_CONTEXT_SWITCH
236                 #define ISR_PROTO_CONTEXT_SWITCH(func) void func(void)
237         #endif
238
239 #elif CPU_ARM
240
241         #ifdef __IAR_SYSTEMS_ICC__
242
243                 #include <inarm.h>
244
245                 #if __CPU_MODE__ == 1 /* Thumb */
246                         /* Use stubs */
247                         extern cpu_flags_t get_CPSR(void);
248                         extern void set_CPSR(cpu_flags_t flags);
249                 #else
250                         #define get_CPSR __get_CPSR
251                         #define set_CPSR __set_CPSR
252                 #endif
253
254                 #define IRQ_DISABLE __disable_interrupt()
255                 #define IRQ_ENABLE  __enable_interrupt()
256
257                 #define IRQ_SAVE_DISABLE(x) \
258                 do { \
259                         (x) = get_CPSR(); \
260                         __disable_interrupt(); \
261                 } while (0)
262
263                 #define IRQ_RESTORE(x) \
264                 do { \
265                         set_CPSR(x); \
266                 } while (0)
267
268                 #define IRQ_ENABLED() \
269                         ((bool)(get_CPSR() & 0xb0))
270
271         #else /* !__IAR_SYSTEMS_ICC__ */
272
273                 #define IRQ_DISABLE                                     \
274                 do {                                                    \
275                         cpu_flags_t sreg;                               \
276                         asm volatile (                                  \
277                                 "mrs %0, cpsr\n\t"                      \
278                                 "orr %0, %0, #0xc0\n\t"                 \
279                                 "msr cpsr_c, %0\n\t"                    \
280                                 : "=r" (sreg) : : "memory", "cc");      \
281                 } while (0)
282
283                 #define IRQ_ENABLE                                      \
284                 do {                                                    \
285                         cpu_flags_t sreg;                               \
286                         asm volatile (                                  \
287                                 "mrs %0, cpsr\n\t"                      \
288                                 "bic %0, %0, #0xc0\n\t"                 \
289                                 "msr cpsr_c, %0\n\t"                    \
290                                 : "=r" (sreg) : : "memory", "cc");      \
291                 } while (0)
292
293                 #define IRQ_SAVE_DISABLE(x)                             \
294                 do {                                                    \
295                         register cpu_flags_t sreg;                      \
296                         asm volatile (                                  \
297                                 "mrs %0, cpsr\n\t"                      \
298                                 "orr %1, %0, #0xc0\n\t"                 \
299                                 "msr cpsr_c, %1\n\t"                    \
300                                 : "=r" (x), "=r" (sreg)                 \
301                                 : : "memory", "cc");                    \
302                 } while (0)
303
304                 #define IRQ_RESTORE(x)                                  \
305                 do {                                                    \
306                         asm volatile (                                  \
307                                 "msr cpsr_c, %0\n\t"                    \
308                                 : : "r" (x) : "memory", "cc");          \
309                 } while (0)
310
311                 #define CPU_READ_FLAGS()                                \
312                 ({                                                      \
313                         cpu_flags_t sreg;                               \
314                         asm volatile (                                  \
315                                 "mrs %0, cpsr\n\t"                      \
316                                 : "=r" (sreg) : : "memory", "cc");      \
317                         sreg;                                           \
318                 })
319
320                 #define IRQ_ENABLED() ((CPU_READ_FLAGS() & 0xc0) != 0xc0)
321
322                 #define IRQ_RUNNING() ((CPU_READ_FLAGS() & 0x0F) == 0x02)
323
324                 #if (CONFIG_KERN && CONFIG_KERN_PREEMPT)
325                         EXTERN_C void asm_irq_switch_context(void);
326
327                         /**
328                          * At the beginning of any ISR immediately ajust the
329                          * return address and store all the caller-save
330                          * registers (the ISR may change these registers that
331                          * are shared with the user-context).
332                          */
333                         #define IRQ_ENTRY() asm volatile ( \
334                                                 "sub    lr, lr, #4\n\t" \
335                                                 "stmfd  sp!, {r0-r3, ip, lr}\n\t")
336                         #define IRQ_EXIT()  asm volatile ( \
337                                                 "b      asm_irq_switch_context\n\t")
338                         /**
339                          * Function attribute to declare an interrupt service
340                          * routine.
341                          *
342                          * An ISR function must be declared as naked because we
343                          * want to add our IRQ_ENTRY() prologue and IRQ_EXIT()
344                          * epilogue code to handle the context switch and save
345                          * all the registers (not only the callee-save).
346                          *
347                          */
348                         #define ISR_FUNC __attribute__((naked))
349
350                         /**
351                          * The compiler cannot establish which
352                          * registers actually need to be saved, because
353                          * the interrupt can happen at any time, so the
354                          * "normal" prologue and epilogue used for a
355                          * generic function call are not suitable for
356                          * the ISR.
357                          *
358                          * Using a naked function has the drawback that
359                          * the stack is not automatically adjusted at
360                          * this point, like a "normal" function call.
361                          *
362                          * So, an ISR can _only_ contain other function
363                          * calls and they can't use the stack in any
364                          * other way.
365                          *
366                          * NOTE: we need to explicitly disable IRQs after
367                          * IRQ_ENTRY(), because the IRQ status flag is not
368                          * masked by the hardware and an IRQ ack inside the ISR
369                          * may cause the triggering of another IRQ before
370                          * exiting from the current ISR.
371                          *
372                          * The respective IRQ_ENABLE is not necessary, because
373                          * IRQs will be automatically re-enabled when restoring
374                          * the context of the user task.
375                          */
376                         #define DECLARE_ISR_CONTEXT_SWITCH(func)                \
377                                 void ISR_FUNC func(void);                       \
378                                 static NOINLINE void __isr_##func(void);        \
379                                 void ISR_FUNC func(void)                        \
380                                 {                                               \
381                                         IRQ_ENTRY();                            \
382                                         IRQ_DISABLE;                            \
383                                         __isr_##func();                         \
384                                         IRQ_EXIT();                             \
385                                 }                                               \
386                                 static NOINLINE void __isr_##func(void)
387                         /**
388                          * Interrupt service routine prototype: can be used for
389                          * forward declarations.
390                          */
391                         #define ISR_PROTO_CONTEXT_SWITCH(func)  \
392                                 void ISR_FUNC func(void)
393                         /**
394                          * With task priorities enabled each ISR is used a point to
395                          * check if we need to perform a context switch.
396                          *
397                          * Instead, without priorities a context switch can occur only
398                          * when the running task expires its time quantum. In this last
399                          * case, the context switch can only occur in the timer
400                          * ISR, that must be always declared with the
401                          * DECLARE_ISR_CONTEXT_SWITCH() macro.
402                          */
403                         #if CONFIG_KERN_PRI
404                                 #define DECLARE_ISR(func) \
405                                         DECLARE_ISR_CONTEXT_SWITCH(func)
406
407                                 #define ISR_PROTO(func) \
408                                         ISR_PROTO_CONTEXT_SWITCH(func)
409                         #endif /* !CONFIG_KERN_PRI */
410                 #endif /* CONFIG_KERN_PREEMPT */
411
412                 #ifndef ISR_FUNC
413                         #define ISR_FUNC __attribute__((naked))
414                 #endif
415                 #ifndef DECLARE_ISR
416                         #define DECLARE_ISR(func) \
417                                 void ISR_FUNC func(void);                               \
418                                 /*                                                      \
419                                  * FIXME: avoid the inlining of this function.          \
420                                  *                                                      \
421                                  * This is terribly inefficient, but it's a             \
422                                  * reliable workaround to avoid gcc blowing             \
423                                  * away the stack (see the bug below):                  \
424                                  *                                                      \
425                                  * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=41999    \
426                                  */                                                     \
427                                 static NOINLINE void __isr_##func(void);                \
428                                 void ISR_FUNC func(void)                                \
429                                 {                                                       \
430                                         asm volatile (                                  \
431                                                 "sub    lr, lr, #4\n\t"                 \
432                                                 "stmfd  sp!, {r0-r3, ip, lr}\n\t");     \
433                                         __isr_##func();                                 \
434                                         asm volatile (                                  \
435                                                 "ldmfd sp!, {r0-r3, ip, pc}^\n\t");     \
436                                 }                                                       \
437                                 static NOINLINE void __isr_##func(void)
438                 #endif
439                 #ifndef DECLARE_ISR_CONTEXT_SWITCH
440                         #define DECLARE_ISR_CONTEXT_SWITCH(func) DECLARE_ISR(func)
441                 #endif
442                 #ifndef ISR_PROTO
443                         #define ISR_PROTO(func) void ISR_FUNC func(void)
444                 #endif
445                 #ifndef ISR_PROTO_CONTEXT_SWITCH
446                         #define ISR_PROTO_CONTEXT_SWITCH(func) ISR_PROTO(func)
447                 #endif
448
449         #endif /* !__IAR_SYSTEMS_ICC_ */
450
451 #elif CPU_PPC
452
453         /* Get IRQ_* definitions from the hosting environment. */
454         #include <cfg/os.h>
455         #if OS_EMBEDDED
456                 #define IRQ_DISABLE         FIXME
457                 #define IRQ_ENABLE          FIXME
458                 #define IRQ_SAVE_DISABLE(x) FIXME
459                 #define IRQ_RESTORE(x)      FIXME
460                 #define IRQ_ENABLED()       FIXME
461         #endif /* OS_EMBEDDED */
462
463 #elif CPU_DSP56K
464
465         #define IRQ_DISABLE             do { asm(bfset #0x0200,SR); asm(nop); } while (0)
466         #define IRQ_ENABLE              do { asm(bfclr #0x0200,SR); asm(nop); } while (0)
467
468         #define IRQ_SAVE_DISABLE(x)  \
469                 do { (void)x; asm(move SR,x); asm(bfset #0x0200,SR); } while (0)
470         #define IRQ_RESTORE(x)  \
471                 do { (void)x; asm(move x,SR); } while (0)
472
473         static inline bool irq_running(void)
474         {
475                 extern void *user_sp;
476                 return !!user_sp;
477         }
478         #define IRQ_RUNNING() irq_running()
479
480         static inline bool irq_enabled(void)
481         {
482                 uint16_t x;
483                 asm(move SR,x);
484                 return !(x & 0x0200);
485         }
486         #define IRQ_ENABLED() irq_enabled()
487
488 #elif CPU_AVR
489
490         #define IRQ_DISABLE   asm volatile ("cli" ::)
491         #define IRQ_ENABLE    asm volatile ("sei" ::)
492
493         #define IRQ_SAVE_DISABLE(x) \
494         do { \
495                 __asm__ __volatile__( \
496                         "in %0,__SREG__\n\t" \
497                         "cli" \
498                         : "=r" (x) : /* no inputs */ : "cc" \
499                 ); \
500         } while (0)
501
502         #define IRQ_RESTORE(x) \
503         do { \
504                 __asm__ __volatile__( \
505                         "out __SREG__,%0" : /* no outputs */ : "r" (x) : "cc" \
506                 ); \
507         } while (0)
508
509         #define IRQ_ENABLED() \
510         ({ \
511                 uint8_t sreg; \
512                 __asm__ __volatile__( \
513                         "in %0,__SREG__\n\t" \
514                         : "=r" (sreg)  /* no inputs & no clobbers */ \
515                 ); \
516                 (bool)(sreg & 0x80); \
517         })
518         #if (CONFIG_KERN && CONFIG_KERN_PREEMPT)
519                 #define DECLARE_ISR_CONTEXT_SWITCH(vect)                \
520                         INLINE void __isr_##vect(void);                 \
521                         ISR(vect)                                       \
522                         {                                               \
523                                 __isr_##vect();                         \
524                                 IRQ_PREEMPT_HANDLER();                  \
525                         }                                               \
526                         INLINE void __isr_##vect(void)
527
528                 /**
529                  * With task priorities enabled each ISR is used a point to
530                  * check if we need to perform a context switch.
531                  *
532                  * Instead, without priorities a context switch can occur only
533                  * when the running task expires its time quantum. In this last
534                  * case, the context switch can only occur in the timer ISR,
535                  * that must be always declared with the
536                  * DECLARE_ISR_CONTEXT_SWITCH() macro.
537                  */
538                 #if CONFIG_KERN_PRI
539                         #define DECLARE_ISR(func) \
540                                 DECLARE_ISR_CONTEXT_SWITCH(func)
541                         /**
542                          * Interrupt service routine prototype: can be used for
543                          * forward declarations.
544                          */
545                         #define ISR_PROTO(func) \
546                                 ISR_PROTO_CONTEXT_SWITCH(func)
547                 #endif /* !CONFIG_KERN_PRI */
548         #endif
549
550         #ifndef ISR_PROTO
551                 #define ISR_PROTO(vect) ISR(vect)
552         #endif
553         #ifndef DECLARE_ISR
554                 #define DECLARE_ISR(vect) ISR(vect)
555         #endif
556         #ifndef DECLARE_ISR_CONTEXT_SWITCH
557                 #define DECLARE_ISR_CONTEXT_SWITCH(vect) ISR(vect)
558         #endif
559         #ifndef ISR_PROTO_CONTEXT_SWITCH
560                 #define ISR_PROTO_CONTEXT_SWITCH(vect) ISR(vect)
561         #endif
562
563 #elif CPU_MSP430
564
565         /* Get the compiler defined macros */
566         #include <signal.h>
567         #define IRQ_DISABLE         dint()
568         #define IRQ_ENABLE          eint()
569
570 #else
571         #error No CPU_... defined.
572 #endif
573
574 #ifdef IRQ_RUNNING
575         /// Ensure callee is running within an interrupt
576         #define ASSERT_IRQ_CONTEXT()  ASSERT(IRQ_RUNNING())
577
578         /// Ensure callee is not running within an interrupt
579         #define ASSERT_USER_CONTEXT() ASSERT(!IRQ_RUNNING())
580 #else
581         #define IRQ_RUNNING()   false
582         #define ASSERT_USER_CONTEXT()  do {} while(0)
583         #define ASSERT_IRQ_CONTEXT()   do {} while(0)
584 #endif
585
586 #ifdef IRQ_ENABLED
587         /// Ensure interrupts are enabled
588         #define IRQ_ASSERT_ENABLED()  ASSERT(IRQ_ENABLED())
589
590         /// Ensure interrupts are not enabled
591         #define IRQ_ASSERT_DISABLED() ASSERT(!IRQ_ENABLED())
592 #else
593         #define IRQ_ASSERT_ENABLED() do {} while(0)
594         #define IRQ_ASSERT_DISABLED() do {} while(0)
595 #endif
596
597
598 #ifndef IRQ_PREEMPT_HANDLER
599         #if (CONFIG_KERN && CONFIG_KERN_PREEMPT)
600                 /**
601                  * Handle preemptive context switch inside timer IRQ.
602                  */
603                 INLINE void IRQ_PREEMPT_HANDLER(void)
604                 {
605                         if (proc_needPreempt())
606                                 proc_preempt();
607                 }
608         #else
609                 #define IRQ_PREEMPT_HANDLER() /* Nothing */
610         #endif
611 #endif
612
613 /**
614  * Execute \a CODE atomically with respect to interrupts.
615  *
616  * \see IRQ_SAVE_DISABLE IRQ_RESTORE
617  */
618 #define ATOMIC(CODE) \
619         do { \
620                 cpu_flags_t __flags; \
621                 IRQ_SAVE_DISABLE(__flags); \
622                 CODE; \
623                 IRQ_RESTORE(__flags); \
624         } while (0)
625
626 #endif /* CPU_IRQ_H */