aboutsummaryrefslogtreecommitdiffstats
path: root/src/kernel/heap.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/kernel/heap.c')
-rw-r--r--src/kernel/heap.c35
1 files changed, 26 insertions, 9 deletions
diff --git a/src/kernel/heap.c b/src/kernel/heap.c
index 123f7e6..b125d6e 100644
--- a/src/kernel/heap.c
+++ b/src/kernel/heap.c
@@ -20,8 +20,10 @@
#include "heap.h"
+// Defines size of alignment on memory allocations
#define HEAP_ALIGN 4
+// Linked list of free'd memory
static alloc_t *free_blocks;
static void *heap_end;
@@ -34,8 +36,10 @@ void heap_init(void *buf)
uint32_t heap_free(void)
{
uint32_t total = 0;
+ // Count free'd block sizes
for (alloc_t *node = free_blocks; node != 0; node = node->next)
total += node->size;
+ // Add remaining free memory to that total
return total + (0x20018000 - (uint32_t)heap_end);
}
@@ -44,26 +48,29 @@ void *malloc(uint32_t size)
if (size == 0)
return 0;
+ // Round size to an aligned value
size = (size + sizeof(alloc_t) + HEAP_ALIGN) & ~(HEAP_ALIGN - 1);
+ // Begin searching through free'd blocks to see if we can reuse some memory.
alloc_t *node = free_blocks;
alloc_t *prev = 0;
while (node != 0) {
if (node->size >= size) {
- // get out of the free chain
+ // If we can use this chunk, remove it from the free_blocks chain
if (prev != 0)
prev->next = node->next;
else
free_blocks = node->next;
node->next = 0;
- // split alloc if too big
- if (node->size > size + 64) {
+ // If this chunk is really big, give back the extra space
+ if (node->size > size + 64) { // TODO why 64..?
alloc_t *leftover = (alloc_t *)((uint32_t)node
+ sizeof(alloc_t) + size);
leftover->size = node->size - size - sizeof(alloc_t);
leftover->next = 0;
free((uint8_t *)leftover + sizeof(alloc_t));
+
node->size = size;
return (void *)((uint8_t *)node + sizeof(alloc_t));
}
@@ -75,6 +82,7 @@ void *malloc(uint32_t size)
node = node->next;
}
+ // No reusable chunks, take from the end of the heap
node = (alloc_t *)heap_end;
node->size = size;
node->next = 0;
@@ -86,9 +94,10 @@ void *malloc(uint32_t size)
void *calloc(uint32_t count, uint32_t size)
{
+ // Simply malloc and zero
uint8_t *buf = malloc(count * size);
for (uint32_t i = 0; i < count * size; i++)
- buf[i] = 0;
+ buf[i] = 0; // TODO safe?
return buf;
}
@@ -97,24 +106,30 @@ void free(void *buf)
if (buf == 0)
return;
+ // Get the alloc_t structure of this chunk
alloc_t *alloc = (alloc_t *)((uint8_t *)buf - sizeof(alloc_t));
if (alloc->next != 0)
return;
- // check for adjacent free'd blocks
+ // Search through the free_blocks list to see if this free chunk can merge
+ // into an adjacent free chunk.
int merged = 0;
for (alloc_t *prev = 0, *node = free_blocks; node != 0; prev = node, node = node->next) {
+ // If the node after the current one is ours
if ((uint32_t)node + sizeof(alloc_t) + node->size == (uint32_t)alloc) {
- // block before
+ // Merge by adding our node's size to this one
merged |= 1;
node->size += sizeof(alloc_t) + alloc->size;
break;
- //alloc = node;
- } else if ((uint32_t)buf + alloc->size == (uint32_t)node) {
- // block after
+ }
+ // Or, if this current node is the one after ours
+ else if ((uint32_t)buf + alloc->size == (uint32_t)node) {
+ // Merge the current node into ours
merged |= 1;
alloc->size += sizeof(alloc_t) + node->size;
alloc->next = node->next;
+
+ // Take the current node's place in the free_blocks chain
if (prev != 0)
prev->next = alloc;
else
@@ -123,9 +138,11 @@ void free(void *buf)
}
}
+ // If we couldn't merge, simply append to the chain
if (merged == 0) {
alloc->next = free_blocks;
free_blocks = alloc;
}
}
+