4 * Copyright 2004 Develer S.r.l. (http://www.develer.com/)
5 * Copyright 1999,2000,2001 Bernardo Innocenti <bernie@develer.com>
6 * This file is part of DevLib - See devlib/README for information.
9 * \brief Heap subsystem (public interface).
13 * \author Bernardo Innocenti <bernie@develer.com>
18 * Revision 1.2 2004/08/04 15:54:18 rasky
19 * Merge da SC: prima versione veramente funzionante
21 * Revision 1.1 2004/07/31 16:33:58 rasky
22 * Spostato lo heap da kern/ a mware/
24 * Revision 1.2 2004/06/03 11:27:09 bernie
25 * Add dual-license information.
27 * Revision 1.1 2004/05/23 17:27:00 bernie
28 * Import kern/ subdirectory.
33 #include <string.h> // memset()
34 #include <drv/kdebug.h> // ASSERT()
36 /* NOTE: struct size must be a 2's power! */
37 typedef struct _MemChunk
39 struct _MemChunk *next;
43 STATIC_ASSERT(IS_POW2(sizeof(MemChunk)));
45 #define FREE_FILL_CODE 0xDEAD
46 #define ALLOC_FILL_CODE 0xBEEF
48 void heap_init(struct Heap* h, void* memory, size_t size)
51 memset(memory, FREE_FILL_CODE, size);
54 /* Initialize heap with a single big chunk */
55 h->FreeList = (MemChunk *)memory;
56 h->FreeList->next = NULL;
57 h->FreeList->size = size;
61 void *heap_allocmem(struct Heap* h, size_t size)
63 MemChunk *chunk, *prev;
65 /* Round size up to the allocation granularity */
66 size = ROUND2(size, sizeof(MemChunk));
68 /* Handle allocations of 0 bytes */
70 size = sizeof(MemChunk);
72 /* Walk on the free list looking for any chunk big enough to
73 * fit the requested block size.
75 for (prev = (MemChunk *)&h->FreeList, chunk = h->FreeList;
77 prev = chunk, chunk = chunk->next)
79 if (chunk->size >= size)
81 if (chunk->size == size)
83 /* Just remove this chunk from the free list */
84 prev->next = chunk->next;
86 memset(chunk, ALLOC_FILL_CODE, size);
92 /* Allocate from the END of an existing chunk */
95 memset((uint8_t *)chunk + chunk->size, ALLOC_FILL_CODE, size);
97 return (void *)((uint8_t *)chunk + chunk->size);
102 return NULL; /* fail */
106 void heap_freemem(struct Heap* h, void *mem, size_t size)
113 memset(mem, FREE_FILL_CODE, size);
116 /* Round size up to the allocation granularity */
117 size = ROUND2(size, sizeof(MemChunk));
119 /* Handle allocations of 0 bytes */
121 size = sizeof(MemChunk);
123 /* Special case: first chunk in the free list */
124 ASSERT((uint8_t*)mem != (uint8_t*)h->FreeList);
125 if (((uint8_t *)mem) < ((uint8_t *)h->FreeList))
127 /* Insert memory block before the current free list head */
128 prev = (MemChunk *)mem;
129 prev->next = h->FreeList;
133 else /* Normal case: not the first chunk in the free list */
136 * Walk on the free list. Stop at the insertion point (when mem
137 * is between prev and prev->next)
140 while (prev->next < (MemChunk *)mem && prev->next)
143 /* Make sure mem is not *within* prev */
144 ASSERT((uint8_t*)mem >= (uint8_t*)prev + prev->size);
146 /* Should it be merged with previous block? */
147 if (((uint8_t *)prev) + prev->size == ((uint8_t *)mem))
152 else /* not merged with previous chunk */
154 MemChunk *curr = (MemChunk*)mem;
156 /* insert it after the previous node
157 * and move the 'prev' pointer forward
158 * for the following operations
160 curr->next = prev->next;
164 /* Adjust for the following test */
169 /* Also merge with next chunk? */
170 if (((uint8_t *)prev) + prev->size == ((uint8_t *)prev->next))
172 prev->size += prev->next->size;
173 prev->next = prev->next->next;
175 /* There should be only one merge opportunity, becuase we always merge on free */
176 ASSERT((uint8_t*)prev + prev->size != (uint8_t*)prev->next);
180 #if CONFIG_HEAP_MALLOC
182 void *heap_malloc(struct Heap* h, size_t size)
186 size += sizeof(size_t);
187 if ((mem = (size_t*)heap_allocmem(h, size)))
193 void *heap_calloc(struct Heap* h, size_t size)
197 if ((mem = heap_malloc(h, size)))
198 memset(mem, 0, size);
203 void heap_free(struct Heap* h, void *mem_)
205 size_t* mem = (size_t*)mem_;
207 heap_freemem(h, mem, *mem);
210 #endif /* CONFIG_HEAP_MALLOC */