ARM7TDMI: simplify IRQ_SAVE_DISABLE().
[bertos.git] / bertos / cpu / irq.h
1 /**
2  * \file
3  * <!--
4  * This file is part of BeRTOS.
5  *
6  * Bertos is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * As a special exception, you may use this file as part of a free software
21  * library without restriction.  Specifically, if other files instantiate
22  * templates or use macros or inline functions from this file, or you compile
23  * this file and link it with other files to produce an executable, this
24  * file does not by itself cause the resulting executable to be covered by
25  * the GNU General Public License.  This exception does not however
26  * invalidate any other reasons why the executable file might be covered by
27  * the GNU General Public License.
28  *
29  * Copyright 2004, 2005, 2006, 2007 Develer S.r.l. (http://www.develer.com/)
30  * Copyright 2004 Giovanni Bajo
31  *
32  * -->
33  *
34  * \brief CPU-specific IRQ definitions.
35  *
36  * \author Giovanni Bajo <rasky@develer.com>
37  * \author Bernie Innocenti <bernie@codewiz.org>
38  * \author Stefano Fedrigo <aleph@develer.com>
39  * \author Francesco Sacchi <batt@develer.com>
40  */
41 #ifndef CPU_IRQ_H
42 #define CPU_IRQ_H
43
44 #include "detect.h"
45 #include "types.h"
46
47 #include <kern/proc.h> /* proc_needPreempt() / proc_preempt() */
48
49 #include <cfg/compiler.h> /* for uintXX_t */
50 #include "cfg/cfg_proc.h" /* CONFIG_KERN_PREEMPT */
51
52 #if CPU_I196
53         #define IRQ_DISABLE             disable_interrupt()
54         #define IRQ_ENABLE              enable_interrupt()
55 #elif CPU_X86
56
57         /* Get IRQ_* definitions from the hosting environment. */
58         #include <cfg/os.h>
59         #if OS_EMBEDDED
60                 #define IRQ_DISABLE             FIXME
61                 #define IRQ_ENABLE              FIXME
62                 #define IRQ_SAVE_DISABLE(x)     FIXME
63                 #define IRQ_RESTORE(x)          FIXME
64         #endif /* OS_EMBEDDED */
65
66 #elif CPU_CM3
67         /* Cortex-M3 */
68
69         /*
70          * Interrupt priority.
71          *
72          * NOTE: 0 means that an interrupt is not affected by the global IRQ
73          * priority settings.
74          */
75         #define IRQ_PRIO                0x80
76         #define IRQ_PRIO_MIN            0xf0
77         #define IRQ_PRIO_MAX            0
78         /*
79          * To disable interrupts we just raise the system base priority to a
80          * number lower than the default IRQ priority. In this way, all the
81          * "normal" interrupt can't be triggered. High-priority interrupt can
82          * still happen (at the moment only the soft-interrupt svcall uses a
83          * priority greater than the default IRQ priority).
84          *
85          * To enable interrupts we set the system base priority to 0, that
86          * means IRQ priority mechanism is disabled, and any interrupt can
87          * happen.
88          */
89         #define IRQ_PRIO_DISABLED       0x40
90         #define IRQ_PRIO_ENABLED        0
91
92         #define IRQ_DISABLE                                             \
93         ({                                                              \
94                 register cpu_flags_t reg = IRQ_PRIO_DISABLED;           \
95                 asm volatile (                                          \
96                         "msr basepri, %0"                               \
97                         : : "r"(reg) : "memory", "cc");                 \
98         })
99
100         #define IRQ_ENABLE                                              \
101         ({                                                              \
102                 register cpu_flags_t reg = IRQ_PRIO_ENABLED;            \
103                 asm volatile (                                          \
104                         "msr basepri, %0"                               \
105                         : : "r"(reg) : "memory", "cc");                 \
106         })
107
108         #define CPU_READ_FLAGS()                                        \
109         ({                                                              \
110                 register cpu_flags_t reg;                               \
111                 asm volatile (                                          \
112                         "mrs %0, basepri"                               \
113                          : "=r"(reg) : : "memory", "cc");               \
114                 reg;                                                    \
115         })
116
117         #define IRQ_SAVE_DISABLE(x)                                     \
118         ({                                                              \
119                 x = CPU_READ_FLAGS();                                   \
120                 IRQ_DISABLE;                                            \
121         })
122
123         #define IRQ_RESTORE(x)                                          \
124         ({                                                              \
125                 asm volatile (                                          \
126                         "msr basepri, %0"                               \
127                         : : "r"(x) : "memory", "cc");                   \
128         })
129
130         #define IRQ_ENABLED() (CPU_READ_FLAGS() == IRQ_PRIO_ENABLED)
131
132         INLINE bool irq_running(void)
133         {
134                 register uint32_t ret;
135
136                 /*
137                  * Check if the current stack pointer is the main stack or
138                  * process stack: we use the main stack only in Handler mode,
139                  * so this means we're running inside an ISR.
140                  */
141                 asm volatile (
142                         "mrs %0, msp\n\t"
143                         "cmp sp, %0\n\t"
144                         "ite ne\n\t"
145                         "movne %0, #0\n\t"
146                         "moveq %0, #1\n\t" : "=r"(ret) : : "cc");
147                 return ret;
148         }
149         #define IRQ_RUNNING() irq_running()
150
151         #if CONFIG_KERN_PREEMPT
152
153                 #define DECLARE_ISR_CONTEXT_SWITCH(func)                \
154                 void func(void);                                        \
155                 INLINE void __isr_##func(void);                         \
156                 void func(void)                                         \
157                 {                                                       \
158                         __isr_##func();                                 \
159                         if (!proc_needPreempt())                        \
160                                 return;                                 \
161                         /*
162                          * Set a PendSV request.
163                          *
164                          * The preemption handler will be called immediately
165                          * after this ISR in tail-chaining mode (without the
166                          * overhead of hardware state saving and restoration
167                          * between interrupts).
168                          */                                             \
169                         HWREG(NVIC_INT_CTRL) = NVIC_INT_CTRL_PEND_SV;   \
170                 }                                                       \
171                 INLINE void __isr_##func(void)
172
173                 /**
174                  * With task priorities enabled each ISR is used a point to
175                  * check if we need to perform a context switch.
176                  *
177                  * Instead, without priorities a context switch can occur only
178                  * when the running task expires its time quantum. In this last
179                  * case, the context switch can only occur in the timer ISR,
180                  * that must be always declared with the
181                  * DECLARE_ISR_CONTEXT_SWITCH() macro.
182                  */
183                 #if CONFIG_KERN_PRI
184                         #define DECLARE_ISR(func) \
185                                 DECLARE_ISR_CONTEXT_SWITCH(func)
186                         /**
187                          * Interrupt service routine prototype: can be used for
188                          * forward declarations.
189                          */
190                         #define ISR_PROTO(func) \
191                                 ISR_PROTO_CONTEXT_SWITCH(func)
192                 #endif /* !CONFIG_KERN_PRI */
193         #endif
194
195         #ifndef ISR_PROTO
196                 #define ISR_PROTO(func) void func(void)
197         #endif
198         #ifndef DECLARE_ISR
199                 #define DECLARE_ISR(func) void func(void)
200         #endif
201         #ifndef DECLARE_ISR_CONTEXT_SWITCH
202                 #define DECLARE_ISR_CONTEXT_SWITCH(func) void func(void)
203         #endif
204         #ifndef ISR_PROTO_CONTEXT_SWITCH
205                 #define ISR_PROTO_CONTEXT_SWITCH(func) void func(void)
206         #endif
207
208 #elif CPU_ARM
209
210         #ifdef __IAR_SYSTEMS_ICC__
211
212                 #include <inarm.h>
213
214                 #if __CPU_MODE__ == 1 /* Thumb */
215                         /* Use stubs */
216                         extern cpu_flags_t get_CPSR(void);
217                         extern void set_CPSR(cpu_flags_t flags);
218                 #else
219                         #define get_CPSR __get_CPSR
220                         #define set_CPSR __set_CPSR
221                 #endif
222
223                 #define IRQ_DISABLE __disable_interrupt()
224                 #define IRQ_ENABLE  __enable_interrupt()
225
226                 #define IRQ_SAVE_DISABLE(x) \
227                 do { \
228                         (x) = get_CPSR(); \
229                         __disable_interrupt(); \
230                 } while (0)
231
232                 #define IRQ_RESTORE(x) \
233                 do { \
234                         set_CPSR(x); \
235                 } while (0)
236
237                 #define IRQ_ENABLED() \
238                         ((bool)(get_CPSR() & 0xb0))
239
240         #else /* !__IAR_SYSTEMS_ICC__ */
241
242                 #define IRQ_DISABLE                                     \
243                 do {                                                    \
244                         cpu_flags_t sreg;                               \
245                         asm volatile (                                  \
246                                 "mrs %0, cpsr\n\t"                      \
247                                 "orr %0, %0, #0xc0\n\t"                 \
248                                 "msr cpsr_c, %0\n\t"                    \
249                                 : "=r" (sreg) : : "memory", "cc");      \
250                 } while (0)
251
252                 #define IRQ_ENABLE                                      \
253                 do {                                                    \
254                         cpu_flags_t sreg;                               \
255                         asm volatile (                                  \
256                                 "mrs %0, cpsr\n\t"                      \
257                                 "bic %0, %0, #0xc0\n\t"                 \
258                                 "msr cpsr_c, %0\n\t"                    \
259                                 : "=r" (sreg) : : "memory", "cc");      \
260                 } while (0)
261
262                 #define IRQ_SAVE_DISABLE(x)                             \
263                 do {                                                    \
264                         register cpu_flags_t sreg;                      \
265                         asm volatile (                                  \
266                                 "mrs %0, cpsr\n\t"                      \
267                                 "orr %1, %0, #0xc0\n\t"                 \
268                                 "msr cpsr_c, %1\n\t"                    \
269                                 : "=r" (x), "=r" (sreg)                 \
270                                 : : "memory", "cc");                    \
271                 } while (0)
272
273                 #define IRQ_RESTORE(x)                                  \
274                 do {                                                    \
275                         asm volatile (                                  \
276                                 "msr cpsr_c, %0\n\t"                    \
277                                 : : "r" (x) : "memory", "cc");          \
278                 } while (0)
279
280                 #define CPU_READ_FLAGS()                                \
281                 ({                                                      \
282                         cpu_flags_t sreg;                               \
283                         asm volatile (                                  \
284                                 "mrs %0, cpsr\n\t"                      \
285                                 : "=r" (sreg) : : "memory", "cc");      \
286                         sreg;                                           \
287                 })
288
289                 #define IRQ_ENABLED() (!(CPU_READ_FLAGS() & 0x80))
290
291                 #if CONFIG_KERN_PREEMPT
292                         EXTERN_C void asm_irq_switch_context(void);
293
294                         /**
295                          * At the beginning of any ISR immediately ajust the
296                          * return address and store all the caller-save
297                          * registers (the ISR may change these registers that
298                          * are shared with the user-context).
299                          */
300                         #define IRQ_ENTRY() asm volatile ( \
301                                                 "sub    lr, lr, #4\n\t" \
302                                                 "stmfd  sp!, {r0-r3, ip, lr}\n\t")
303                         #define IRQ_EXIT()  asm volatile ( \
304                                                 "b      asm_irq_switch_context\n\t")
305                         /**
306                          * Function attribute to declare an interrupt service
307                          * routine.
308                          *
309                          * An ISR function must be declared as naked because we
310                          * want to add our IRQ_ENTRY() prologue and IRQ_EXIT()
311                          * epilogue code to handle the context switch and save
312                          * all the registers (not only the callee-save).
313                          *
314                          */
315                         #define ISR_FUNC __attribute__((naked))
316
317                         /**
318                          * The compiler cannot establish which
319                          * registers actually need to be saved, because
320                          * the interrupt can happen at any time, so the
321                          * "normal" prologue and epilogue used for a
322                          * generic function call are not suitable for
323                          * the ISR.
324                          *
325                          * Using a naked function has the drawback that
326                          * the stack is not automatically adjusted at
327                          * this point, like a "normal" function call.
328                          *
329                          * So, an ISR can _only_ contain other function
330                          * calls and they can't use the stack in any
331                          * other way.
332                          *
333                          * NOTE: we need to explicitly disable IRQs after
334                          * IRQ_ENTRY(), because the IRQ status flag is not
335                          * masked by the hardware and an IRQ ack inside the ISR
336                          * may cause the triggering of another IRQ before
337                          * exiting from the current ISR.
338                          *
339                          * The respective IRQ_ENABLE is not necessary, because
340                          * IRQs will be automatically re-enabled when restoring
341                          * the context of the user task.
342                          */
343                         #define DECLARE_ISR_CONTEXT_SWITCH(func)        \
344                                 void ISR_FUNC func(void);               \
345                                 static void __isr_##func(void);         \
346                                 void ISR_FUNC func(void)                \
347                                 {                                       \
348                                         IRQ_ENTRY();                    \
349                                         IRQ_DISABLE;                    \
350                                         __isr_##func();                 \
351                                         IRQ_EXIT();                     \
352                                 }                                       \
353                                 static void __isr_##func(void)
354                         /**
355                          * Interrupt service routine prototype: can be used for
356                          * forward declarations.
357                          */
358                         #define ISR_PROTO_CONTEXT_SWITCH(func)  \
359                                 void ISR_FUNC func(void)
360                         /**
361                          * With task priorities enabled each ISR is used a point to
362                          * check if we need to perform a context switch.
363                          *
364                          * Instead, without priorities a context switch can occur only
365                          * when the running task expires its time quantum. In this last
366                          * case, the context switch can only occur in the timer
367                          * ISR, that must be always declared with the
368                          * DECLARE_ISR_CONTEXT_SWITCH() macro.
369                          */
370                         #if CONFIG_KERN_PRI
371                                 #define DECLARE_ISR(func) \
372                                         DECLARE_ISR_CONTEXT_SWITCH(func)
373
374                                 #define ISR_PROTO(func) \
375                                         ISR_PROTO_CONTEXT_SWITCH(func)
376                         #endif /* !CONFIG_KERN_PRI */
377                 #endif /* CONFIG_KERN_PREEMPT */
378
379                 #ifndef DECLARE_ISR
380                         #define DECLARE_ISR(func) \
381                                 void __attribute__((interrupt)) func(void)
382                 #endif
383                 #ifndef DECLARE_ISR_CONTEXT_SWITCH
384                         #define DECLARE_ISR_CONTEXT_SWITCH(func) \
385                                 void __attribute__((interrupt)) func(void)
386                 #endif
387                 #ifndef ISR_PROTO
388                         #define ISR_PROTO(func) \
389                                 void __attribute__((interrupt)) func(void)
390                 #endif
391                 #ifndef ISR_PROTO_CONTEXT_SWITCH
392                         #define ISR_PROTO_CONTEXT_SWITCH(func)  \
393                                 void __attribute__((interrupt)) func(void)
394                 #endif
395
396         #endif /* !__IAR_SYSTEMS_ICC_ */
397
398 #elif CPU_PPC
399
400         /* Get IRQ_* definitions from the hosting environment. */
401         #include <cfg/os.h>
402         #if OS_EMBEDDED
403                 #define IRQ_DISABLE         FIXME
404                 #define IRQ_ENABLE          FIXME
405                 #define IRQ_SAVE_DISABLE(x) FIXME
406                 #define IRQ_RESTORE(x)      FIXME
407                 #define IRQ_ENABLED()       FIXME
408         #endif /* OS_EMBEDDED */
409
410 #elif CPU_DSP56K
411
412         #define IRQ_DISABLE             do { asm(bfset #0x0200,SR); asm(nop); } while (0)
413         #define IRQ_ENABLE              do { asm(bfclr #0x0200,SR); asm(nop); } while (0)
414
415         #define IRQ_SAVE_DISABLE(x)  \
416                 do { (void)x; asm(move SR,x); asm(bfset #0x0200,SR); } while (0)
417         #define IRQ_RESTORE(x)  \
418                 do { (void)x; asm(move x,SR); } while (0)
419
420         static inline bool irq_running(void)
421         {
422                 extern void *user_sp;
423                 return !!user_sp;
424         }
425         #define IRQ_RUNNING() irq_running()
426
427         static inline bool irq_enabled(void)
428         {
429                 uint16_t x;
430                 asm(move SR,x);
431                 return !(x & 0x0200);
432         }
433         #define IRQ_ENABLED() irq_enabled()
434
435 #elif CPU_AVR
436
437         #define IRQ_DISABLE   asm volatile ("cli" ::)
438         #define IRQ_ENABLE    asm volatile ("sei" ::)
439
440         #define IRQ_SAVE_DISABLE(x) \
441         do { \
442                 __asm__ __volatile__( \
443                         "in %0,__SREG__\n\t" \
444                         "cli" \
445                         : "=r" (x) : /* no inputs */ : "cc" \
446                 ); \
447         } while (0)
448
449         #define IRQ_RESTORE(x) \
450         do { \
451                 __asm__ __volatile__( \
452                         "out __SREG__,%0" : /* no outputs */ : "r" (x) : "cc" \
453                 ); \
454         } while (0)
455
456         #define IRQ_ENABLED() \
457         ({ \
458                 uint8_t sreg; \
459                 __asm__ __volatile__( \
460                         "in %0,__SREG__\n\t" \
461                         : "=r" (sreg)  /* no inputs & no clobbers */ \
462                 ); \
463                 (bool)(sreg & 0x80); \
464         })
465         #if CONFIG_KERN_PREEMPT
466                 #define DECLARE_ISR_CONTEXT_SWITCH(vect)                \
467                         INLINE void __isr_##vect(void);                 \
468                         ISR(vect)                                       \
469                         {                                               \
470                                 __isr_##vect();                         \
471                                 IRQ_PREEMPT_HANDLER();                  \
472                         }                                               \
473                         INLINE void __isr_##vect(void)
474
475                 /**
476                  * With task priorities enabled each ISR is used a point to
477                  * check if we need to perform a context switch.
478                  *
479                  * Instead, without priorities a context switch can occur only
480                  * when the running task expires its time quantum. In this last
481                  * case, the context switch can only occur in the timer ISR,
482                  * that must be always declared with the
483                  * DECLARE_ISR_CONTEXT_SWITCH() macro.
484                  */
485                 #if CONFIG_KERN_PRI
486                         #define DECLARE_ISR(func) \
487                                 DECLARE_ISR_CONTEXT_SWITCH(func)
488                         /**
489                          * Interrupt service routine prototype: can be used for
490                          * forward declarations.
491                          */
492                         #define ISR_PROTO(func) \
493                                 ISR_PROTO_CONTEXT_SWITCH(func)
494                 #endif /* !CONFIG_KERN_PRI */
495         #endif
496
497         #ifndef ISR_PROTO
498                 #define ISR_PROTO(vect) ISR(vect)
499         #endif
500         #ifndef DECLARE_ISR
501                 #define DECLARE_ISR(vect) ISR(vect)
502         #endif
503         #ifndef DECLARE_ISR_CONTEXT_SWITCH
504                 #define DECLARE_ISR_CONTEXT_SWITCH(vect) ISR(vect)
505         #endif
506         #ifndef ISR_PROTO_CONTEXT_SWITCH
507                 #define ISR_PROTO_CONTEXT_SWITCH(vect) ISR(vect)
508         #endif
509
510 #else
511         #error No CPU_... defined.
512 #endif
513
514 #ifdef IRQ_RUNNING
515         /// Ensure callee is running within an interrupt
516         #define ASSERT_IRQ_CONTEXT()  ASSERT(IRQ_RUNNING())
517
518         /// Ensure callee is not running within an interrupt
519         #define ASSERT_USER_CONTEXT() ASSERT(!IRQ_RUNNING())
520 #else
521         #define IRQ_RUNNING()   false
522         #define ASSERT_USER_CONTEXT()  do {} while(0)
523         #define ASSERT_IRQ_CONTEXT()   do {} while(0)
524 #endif
525
526 #ifdef IRQ_ENABLED
527         /// Ensure interrupts are enabled
528         #define IRQ_ASSERT_ENABLED()  ASSERT(IRQ_ENABLED())
529
530         /// Ensure interrupts are not enabled
531         #define IRQ_ASSERT_DISABLED() ASSERT(!IRQ_ENABLED())
532 #else
533         #define IRQ_ASSERT_ENABLED() do {} while(0)
534         #define IRQ_ASSERT_DISABLED() do {} while(0)
535 #endif
536
537
538 #ifndef IRQ_PREEMPT_HANDLER
539         #if CONFIG_KERN_PREEMPT
540                 /**
541                  * Handle preemptive context switch inside timer IRQ.
542                  */
543                 INLINE void IRQ_PREEMPT_HANDLER(void)
544                 {
545                         if (proc_needPreempt())
546                                 proc_preempt();
547                 }
548         #else
549                 #define IRQ_PREEMPT_HANDLER() /* Nothing */
550         #endif
551 #endif
552
553 /**
554  * Execute \a CODE atomically with respect to interrupts.
555  *
556  * \see IRQ_SAVE_DISABLE IRQ_RESTORE
557  */
558 #define ATOMIC(CODE) \
559         do { \
560                 cpu_flags_t __flags; \
561                 IRQ_SAVE_DISABLE(__flags); \
562                 CODE; \
563                 IRQ_RESTORE(__flags); \
564         } while (0)
565
566 #endif /* CPU_IRQ_H */