Changes to compile with sparse.
[bertos.git] / cfg / cpu.h
1 /**
2  * \file
3  * <!--
4  * Copyright 2004, 2005 Develer S.r.l. (http://www.develer.com/)
5  * Copyright 2004 Giovanni Bajo
6  * This file is part of DevLib - See README.devlib for information.
7  * -->
8  *
9  * \brief CPU-specific definitions
10  *
11  * \author Giovanni Bajo <rasky@develer.com>
12  * \author Bernardo Innocenti <bernie@develer.com>
13  * \author Stefano Fedrigo <aleph@develer.com>
14  */
15 #ifndef DEVLIB_CPU_H
16 #define DEVLIB_CPU_H
17
18 #include <cfg/compiler.h> /* for uintXX_t */
19 #include <cfg/arch_config.h>  /* ARCH_EMUL */
20
21
22 /**
23  * \name Macros for determining CPU endianness.
24  * \{
25  */
26 #define CPU_BIG_ENDIAN    0x1234
27 #define CPU_LITTLE_ENDIAN 0x3412 /* Look twice, pal. This is not a bug. */
28 /*\}*/
29
30 /** Macro to include cpu-specific versions of the headers. */
31 #define CPU_HEADER(module)          PP_STRINGIZE(PP_CAT3(module, _, CPU_ID).h)
32
33 /** Macro to include cpu-specific versions of implementation files. */
34 #define CPU_CSOURCE(module)         PP_STRINGIZE(PP_CAT3(module, _, CPU_ID).c)
35
36
37 #if CPU_I196
38
39         #define NOP                     nop_instruction()
40         #define IRQ_DISABLE             disable_interrupt()
41         #define IRQ_ENABLE              enable_interrupt()
42
43         typedef uint16_t cpuflags_t; // FIXME
44         typedef unsigned int cpustack_t;
45
46         #define CPU_REG_BITS            16
47         #define CPU_REGS_CNT            16
48         #define CPU_STACK_GROWS_UPWARD  0
49         #define CPU_SP_ON_EMPTY_SLOT    0
50         #define CPU_BYTE_ORDER          CPU_LITTLE_ENDIAN
51         #define CPU_HARVARD             0
52
53 #elif CPU_X86
54
55         #define NOP                     asm volatile ("nop")
56
57         /* Get IRQ_* definitions from the hosting environment. */
58         #include <cfg/os.h>
59         #if OS_EMBEDDED
60                 #define IRQ_DISABLE             FIXME
61                 #define IRQ_ENABLE              FIXME
62                 #define IRQ_SAVE_DISABLE(x)     FIXME
63                 #define IRQ_RESTORE(x)          FIXME
64                 typedef uint32_t cpuflags_t; // FIXME
65         #endif /* OS_EMBEDDED */
66
67
68         #define CPU_REGS_CNT            7
69         #define CPU_SAVED_REGS_CNT      7
70         #define CPU_STACK_GROWS_UPWARD  0
71         #define CPU_SP_ON_EMPTY_SLOT    0
72         #define CPU_BYTE_ORDER          CPU_LITTLE_ENDIAN
73         #define CPU_HARVARD             0
74
75         #if CPU_X86_64
76                 typedef uint64_t cpustack_t;
77                 #define CPU_REG_BITS    64
78
79                 #ifdef __WIN64__
80                         /* WIN64 is an IL32-P64 weirdo. */
81                         #define SIZEOF_LONG  4
82                 #endif
83         #else
84                 typedef uint32_t cpustack_t;
85                 #define CPU_REG_BITS    32
86         #endif
87
88 #elif CPU_ARM
89
90         typedef uint32_t cpuflags_t;
91         typedef uint32_t cpustack_t;
92
93         /* Register counts include SREG too */
94         #define CPU_REG_BITS           32
95         #define CPU_REGS_CNT           16
96         #define CPU_SAVED_REGS_CNT     FIXME
97         #define CPU_STACK_GROWS_UPWARD 0  //FIXME
98         #define CPU_SP_ON_EMPTY_SLOT   0  //FIXME
99         #define CPU_BYTE_ORDER         (__BIG_ENDIAN__ ? CPU_BIG_ENDIAN : CPU_LITTLE_ENDIAN)
100         #define CPU_HARVARD            0
101
102         #ifdef __IAR_SYSTEMS_ICC__
103
104                 #include <inarm.h>
105
106                 #if __CPU_MODE__ == 1 /* Thumb */
107                         /* Use stubs */
108                         extern cpuflags_t get_CPSR(void);
109                         extern void set_CPSR(cpuflags_t flags);
110                 #else
111                         #define get_CPSR __get_CPSR
112                         #define set_CPSR __set_CPSR
113                 #endif
114
115                 #define NOP         __no_operation()
116                 #define IRQ_DISABLE __disable_interrupt()
117                 #define IRQ_ENABLE  __enable_interrupt()
118
119                 #define IRQ_SAVE_DISABLE(x) \
120                 do { \
121                         (x) = get_CPSR(); \
122                         __disable_interrupt(); \
123                 } while (0)
124
125                 #define IRQ_RESTORE(x) \
126                 do { \
127                         set_CPSR(x); \
128                 } while (0)
129
130                 #define IRQ_GETSTATE() \
131                         ((bool)(get_CPSR() & 0xb0))
132
133                 #define BREAKPOINT  /* asm("bkpt 0") DOES NOT WORK */
134
135         #else /* !__IAR_SYSTEMS_ICC__ */
136
137                 #warning "IRQ_ macros need testing!"
138
139                 #define NOP         asm volatile ("mov r0,r0" ::)
140
141                 #define IRQ_DISABLE \
142                 do { \
143                         asm volatile ( \
144                                 "mrs r0, cpsr\n\t" \
145                                 "orr r0, r0, #0xb0\n\t" \
146                                 "msr cpsr, r0" \
147                                 :: \
148                         ); \
149                 } while (0)
150
151                 #define IRQ_ENABLE \
152                 do { \
153                         asm volatile ( \
154                                 "mrs r0, cpsr\n\t" \
155                                 "bic r0, r0, #0xb0\n\t" \
156                                 "msr cpsr, r0" \
157                                 :: \
158                         ); \
159                 } while (0)
160
161                 #define IRQ_SAVE_DISABLE(x) \
162                 do { \
163                         asm volatile ( \
164                                 "mrs r0, cpsr\n\t" \
165                                 "mov %0, r0\n\t" \
166                                 "orr r0, r0, #0xb0\n\t" \
167                                 "msr cpsr, r0" \
168                                 : "=r" (x) \
169                                 : /* no inputs */ \
170                                 : "r0" \
171                         ); \
172                 } while (0)
173
174                 #define IRQ_RESTORE(x) \
175                 do { \
176                         asm volatile ( \
177                                 "mov r0, %0\n\t" \
178                                 "msr cpsr, r0" \
179                                 : /* no outputs */ \
180                                 : "r" (x) \
181                                 : "r0" \
182                         ); \
183                 } while (0)
184
185                 #define IRQ_GETSTATE() \
186                 ({ \
187                         uint32_t sreg; \
188                         asm volatile ( \
189                                 "mrs r0, cpsr\n\t" \
190                                 "mov %0, r0" \
191                                 : "=r" (sreg) \
192                                 : /* no inputs */ \
193                                 : "r0" \
194                         ); \
195                         (bool)(sreg & 0xb0); \
196                 })
197
198         #endif /* __IAR_SYSTEMS_ICC_ */
199
200 #elif CPU_PPC
201         #define NOP                 asm volatile ("nop" ::)
202
203         #define IRQ_DISABLE         FIXME
204         #define IRQ_ENABLE          FIXME
205         #define IRQ_SAVE_DISABLE(x) FIXME
206         #define IRQ_RESTORE(x)      FIXME
207         #define IRQ_GETSTATE()      FIXME
208
209         typedef uint32_t cpuflags_t; // FIXME
210         typedef uint32_t cpustack_t; // FIXME
211
212         /* Register counts include SREG too */
213         #define CPU_REG_BITS           (CPU_PPC32 ? 32 : 64)
214         #define CPU_REGS_CNT           FIXME
215         #define CPU_SAVED_REGS_CNT     FIXME
216         #define CPU_STACK_GROWS_UPWARD 0  //FIXME
217         #define CPU_SP_ON_EMPTY_SLOT   0  //FIXME
218         #define CPU_BYTE_ORDER         (__BIG_ENDIAN__ ? CPU_BIG_ENDIAN : CPU_LITTLE_ENDIAN)
219         #define CPU_HARVARD            0
220
221 #elif CPU_DSP56K
222
223         #define NOP                     asm(nop)
224         #define BREAKPOINT              asm(debug)
225         #define IRQ_DISABLE             do { asm(bfset #0x0200,SR); asm(nop); } while (0)
226         #define IRQ_ENABLE              do { asm(bfclr #0x0200,SR); asm(nop); } while (0)
227
228         #define IRQ_SAVE_DISABLE(x)  \
229                 do { (void)x; asm(move SR,x); asm(bfset #0x0200,SR); } while (0)
230         #define IRQ_RESTORE(x)  \
231                 do { (void)x; asm(move x,SR); } while (0)
232
233         static inline bool irq_running(void)
234         {
235                 extern void *user_sp;
236                 return !!user_sp;
237         }
238         #define IRQ_RUNNING() irq_running()
239
240         static inline bool irq_getstate(void)
241         {
242                 uint16_t x;
243                 asm(move SR,x);
244                 return !(x & 0x0200);
245         }
246         #define IRQ_GETSTATE() irq_getstate()
247
248         typedef uint16_t cpuflags_t;
249         typedef unsigned int cpustack_t;
250
251         #define CPU_REG_BITS            16
252         #define CPU_REGS_CNT            FIXME
253         #define CPU_SAVED_REGS_CNT      8
254         #define CPU_STACK_GROWS_UPWARD  1
255         #define CPU_SP_ON_EMPTY_SLOT    0
256         #define CPU_BYTE_ORDER          CPU_BIG_ENDIAN
257         #define CPU_HARVARD             1
258
259         /* Memory is word-addessed in the DSP56K */
260         #define CPU_BITS_PER_CHAR  16
261         #define SIZEOF_SHORT        1
262         #define SIZEOF_INT          1
263         #define SIZEOF_LONG         2
264         #define SIZEOF_PTR          1
265
266 #elif CPU_AVR
267
268         #define NOP           asm volatile ("nop" ::)
269         #define IRQ_DISABLE   asm volatile ("cli" ::)
270         #define IRQ_ENABLE    asm volatile ("sei" ::)
271
272         #define IRQ_SAVE_DISABLE(x) \
273         do { \
274                 __asm__ __volatile__( \
275                         "in %0,__SREG__\n\t" \
276                         "cli" \
277                         : "=r" (x) : /* no inputs */ : "cc" \
278                 ); \
279         } while (0)
280
281         #define IRQ_RESTORE(x) \
282         do { \
283                 __asm__ __volatile__( \
284                         "out __SREG__,%0" : /* no outputs */ : "r" (x) : "cc" \
285                 ); \
286         } while (0)
287
288         #define IRQ_GETSTATE() \
289         ({ \
290                 uint8_t sreg; \
291                 __asm__ __volatile__( \
292                         "in %0,__SREG__\n\t" \
293                         : "=r" (sreg)  /* no inputs & no clobbers */ \
294                 ); \
295                 (bool)(sreg & 0x80); \
296         })
297
298         typedef uint8_t cpuflags_t;
299         typedef uint8_t cpustack_t;
300
301         /* Register counts include SREG too */
302         #define CPU_REG_BITS            8
303         #define CPU_REGS_CNT           33
304         #define CPU_SAVED_REGS_CNT     19
305         #define CPU_STACK_GROWS_UPWARD  0
306         #define CPU_SP_ON_EMPTY_SLOT    1
307         #define CPU_BYTE_ORDER          CPU_LITTLE_ENDIAN
308         #define CPU_HARVARD             1
309
310         /**
311          * Initialization value for registers in stack frame.
312          * The register index is not directly corrispondent to CPU
313          * register numbers. Index 0 is the SREG register: the initial
314          * value is all 0 but the interrupt bit (bit 7).
315          */
316         #define CPU_REG_INIT_VALUE(reg) (reg == 0 ? 0x80 : 0)
317
318 #else
319         #error No CPU_... defined.
320 #endif
321
322 /**
323  * Execute \a CODE atomically with respect to interrupts.
324  *
325  * \see IRQ_SAVE_DISABLE IRQ_RESTORE
326  */
327 #define ATOMIC(CODE) \
328         do { \
329                 cpuflags_t __flags; \
330                 IRQ_SAVE_DISABLE(__flags); \
331                 CODE; \
332                 IRQ_RESTORE(__flags); \
333         } while (0)
334
335
336 /// Default for macro not defined in the right arch section
337 #ifndef CPU_REG_INIT_VALUE
338         #define CPU_REG_INIT_VALUE(reg)     0
339 #endif
340
341
342 #ifndef CPU_STACK_GROWS_UPWARD
343         #error CPU_STACK_GROWS_UPWARD should have been defined to either 0 or 1
344 #endif
345
346 #ifndef CPU_SP_ON_EMPTY_SLOT
347         #error CPU_SP_ON_EMPTY_SLOT should have been defined to either 0 or 1
348 #endif
349
350 /*
351  * Support stack handling peculiarities of a few CPUs.
352  *
353  * Most processors let their stack grow downward and
354  * keep SP pointing at the last pushed value.
355  */
356 #if !CPU_STACK_GROWS_UPWARD
357         #if !CPU_SP_ON_EMPTY_SLOT
358                 /* Most microprocessors (x86, m68k...) */
359                 #define CPU_PUSH_WORD(sp, data) \
360                         do { *--(sp) = (data); } while (0)
361                 #define CPU_POP_WORD(sp) \
362                         (*(sp)++)
363         #else
364                 /* AVR insanity */
365                 #define CPU_PUSH_WORD(sp, data) \
366                         do { *(sp)-- = (data); } while (0)
367                 #define CPU_POP_WORD(sp) \
368                         (*++(sp))
369         #endif
370
371 #else /* CPU_STACK_GROWS_UPWARD */
372
373         #if !CPU_SP_ON_EMPTY_SLOT
374                 /* DSP56K and other weirdos */
375                 #define CPU_PUSH_WORD(sp, data) \
376                         do { *++(sp) = (cpustack_t)(data); } while (0)
377                 #define CPU_POP_WORD(sp) \
378                         (*(sp)--)
379         #else
380                 #error I bet you cannot find a CPU like this
381         #endif
382 #endif
383
384
385 #if CPU_DSP56K
386         /*
387          * DSP56k pushes both PC and SR to the stack in the JSR instruction, but
388          * RTS discards SR while returning (it does not restore it). So we push
389          * 0 to fake the same context.
390          */
391         #define CPU_PUSH_CALL_CONTEXT(sp, func) \
392                 do { \
393                         CPU_PUSH_WORD((sp), (func)); \
394                         CPU_PUSH_WORD((sp), 0x100); \
395                 } while (0);
396
397 #elif CPU_AVR
398         /*
399          * In AVR, the addresses are pushed into the stack as little-endian, while
400          * memory accesses are big-endian (actually, it's a 8-bit CPU, so there is
401          * no natural endianess).
402          */
403         #define CPU_PUSH_CALL_CONTEXT(sp, func) \
404                 do { \
405                         uint16_t funcaddr = (uint16_t)(func); \
406                         CPU_PUSH_WORD((sp), funcaddr); \
407                         CPU_PUSH_WORD((sp), funcaddr>>8); \
408                 } while (0)
409
410 #else
411         #define CPU_PUSH_CALL_CONTEXT(sp, func) \
412                 CPU_PUSH_WORD((sp), (cpustack_t)(func))
413 #endif
414
415
416 /**
417  * \name Default type sizes.
418  *
419  * These defaults are reasonable for most 16/32bit machines.
420  * Some of these macros may be overridden by CPU-specific code above.
421  *
422  * ANSI C requires that the following equations be true:
423  * \code
424  *   sizeof(char) <= sizeof(short) <= sizeof(int) <= sizeof(long)
425  *   sizeof(float) <= sizeof(double)
426  *   CPU_BITS_PER_CHAR  >= 8
427  *   CPU_BITS_PER_SHORT >= 8
428  *   CPU_BITS_PER_INT   >= 16
429  *   CPU_BITS_PER_LONG  >= 32
430  * \endcode
431  * \{
432  */
433 #ifndef SIZEOF_CHAR
434 #define SIZEOF_CHAR  1
435 #endif
436
437 #ifndef SIZEOF_SHORT
438 #define SIZEOF_SHORT  2
439 #endif
440
441 #ifndef SIZEOF_INT
442 #if CPU_REG_BITS < 32
443         #define SIZEOF_INT  2
444 #else
445         #define SIZEOF_INT  4
446 #endif
447 #endif /* !SIZEOF_INT */
448
449 #ifndef SIZEOF_LONG
450 #if CPU_REG_BITS > 32
451         #define SIZEOF_LONG  8
452 #else
453         #define SIZEOF_LONG  4
454 #endif
455 #endif
456
457 #ifndef SIZEOF_PTR
458 #if CPU_REG_BITS < 32
459         #define SIZEOF_PTR   2
460 #elif CPU_REG_BITS == 32
461         #define SIZEOF_PTR   4
462 #else /* CPU_REG_BITS > 32 */
463         #define SIZEOF_PTR   8
464 #endif
465 #endif
466
467 #ifndef CPU_BITS_PER_CHAR
468 #define CPU_BITS_PER_CHAR   (SIZEOF_CHAR * 8)
469 #endif
470
471 #ifndef CPU_BITS_PER_SHORT
472 #define CPU_BITS_PER_SHORT  (SIZEOF_SHORT * CPU_BITS_PER_CHAR)
473 #endif
474
475 #ifndef CPU_BITS_PER_INT
476 #define CPU_BITS_PER_INT    (SIZEOF_INT * CPU_BITS_PER_CHAR)
477 #endif
478
479 #ifndef CPU_BITS_PER_LONG
480 #define CPU_BITS_PER_LONG   (SIZEOF_LONG * CPU_BITS_PER_CHAR)
481 #endif
482
483 #ifndef CPU_BITS_PER_PTR
484 #define CPU_BITS_PER_PTR    (SIZEOF_PTR * CPU_BITS_PER_CHAR)
485 #endif
486
487 #ifndef BREAKPOINT
488 #define BREAKPOINT /* nop */
489 #endif
490
491 /*\}*/
492
493 /* Sanity checks for the above definitions */
494 STATIC_ASSERT(sizeof(char) == SIZEOF_CHAR);
495 STATIC_ASSERT(sizeof(short) == SIZEOF_SHORT);
496 STATIC_ASSERT(sizeof(long) == SIZEOF_LONG);
497 STATIC_ASSERT(sizeof(int) == SIZEOF_INT);
498 STATIC_ASSERT(sizeof(void *) == SIZEOF_PTR);
499 STATIC_ASSERT(sizeof(int8_t) * CPU_BITS_PER_CHAR == 8);
500 STATIC_ASSERT(sizeof(uint8_t) * CPU_BITS_PER_CHAR == 8);
501 STATIC_ASSERT(sizeof(int16_t) * CPU_BITS_PER_CHAR == 16);
502 STATIC_ASSERT(sizeof(uint16_t) * CPU_BITS_PER_CHAR == 16);
503 STATIC_ASSERT(sizeof(int32_t) * CPU_BITS_PER_CHAR == 32);
504 STATIC_ASSERT(sizeof(uint32_t) * CPU_BITS_PER_CHAR == 32);
505 #ifdef __HAS_INT64_T__
506 STATIC_ASSERT(sizeof(int64_t) * CPU_BITS_PER_CHAR == 64);
507 STATIC_ASSERT(sizeof(uint64_t) * CPU_BITS_PER_CHAR == 64);
508 #endif
509
510 /**
511  * \def CPU_IDLE
512  *
513  * \brief Invoked by the scheduler to stop the CPU when idle.
514  *
515  * This hook can be redefined to put the CPU in low-power mode, or to
516  * profile system load with an external strobe, or to save CPU cycles
517  * in hosted environments such as emulators.
518  */
519 #ifndef CPU_IDLE
520         #if defined(ARCH_EMUL) && (ARCH & ARCH_EMUL)
521                 /* This emulator hook should yield the CPU to the host.  */
522                 EXTERN_C_BEGIN
523                 void emul_idle(void);
524                 EXTERN_C_END
525                 #define CPU_IDLE emul_idle()
526         #else /* !ARCH_EMUL */
527                 #define CPU_IDLE do { /* nothing */ } while (0)
528         #endif /* !ARCH_EMUL */
529 #endif /* !CPU_IDLE */
530
531 #endif /* DEVLIB_CPU_H */