summaryrefslogtreecommitdiff
path: root/kernel/kmalloc.c
diff options
context:
space:
mode:
authorMiguel <m.i@gmx.at>2018-09-15 17:53:27 +0200
committerMiguel <m.i@gmx.at>2018-09-15 17:53:27 +0200
commitcd50c8d1047832bbb0798b368fde0428ef749422 (patch)
treefcacf85f58fefeffa482630f31ef208a8bc9d03a /kernel/kmalloc.c
parent0b010d22dbf845ad030e2e7320f9c5935b2604a4 (diff)
improved in-kernel alloc/dealloc. addded colorless logging and struggling with mouse and kb
Diffstat (limited to 'kernel/kmalloc.c')
-rw-r--r--kernel/kmalloc.c117
1 files changed, 87 insertions, 30 deletions
diff --git a/kernel/kmalloc.c b/kernel/kmalloc.c
index eb35a44..356aab0 100644
--- a/kernel/kmalloc.c
+++ b/kernel/kmalloc.c
@@ -1,61 +1,118 @@
#include "kernel.h"
#include "kmalloc.h"
+#include "spinlock.h"
#include "log.h"
-#include "spinlock.h"
+#define BLOCKS KMALLOC_MEM_SIZE/KMALLOC_BLOCK_SIZE
-static uint8_t data[KMALLOC_MEM_SIZE]; // bytes
+// this is in .bss so we can assume it was zeroed!
+static uint8_t data[KMALLOC_MEM_SIZE] __attribute__((aligned (4096))); // bytes
+static uint8_t map[BLOCKS];
+//
+static uint32_t data_addr;
static uint32_t next;
static uint32_t first;
static uint8_t init=0;
+static uint32_t next_free(uint32_t start)
+{
+ for(int i=start;i<BLOCKS;i++)
+ {
+ if(!map[i])return i;
+ return next_free(i+map[i]);
+ }
+ return BLOCKS; // out of mem
+}
+
+static uint32_t next_used(uint32_t start,uint32_t max)
+{
+ for(int i=start;i<BLOCKS;i++)
+ {
+ if(map[i])return i;
+ //means i is free
+ if(i-start>=max)return i;
+ }
+ return BLOCKS; // all free
+}
+
+static uint32_t free_cont(uint32_t blocks)
+{
+ uint32_t pos=0;
+
+ while(1)
+ {
+ pos=next_free(pos);
+// klog("next_free:%d",pos);
+ if(pos==BLOCKS)return BLOCKS; // out of mem
+ uint32_t end=next_used(pos,blocks);
+// klog("next_used:%d",end);
+ if(end-pos>=blocks)return pos;
+// klog("here we have only %d blocks but we need at least %d",end-pos+1,blocks);
+ pos=end;
+ }
+}
+
+static void mark_used(uint32_t start,uint32_t blocks)
+{
+ uint32_t b=blocks;
+ for(int i=start;i<start+blocks;i++)
+ {
+ if(map[i]!=0)kpanic("memory map corrupted?");
+ map[i]=b;
+ b--;
+ }
+}
+
+static void mark_free(uint32_t start,uint32_t blocks)
+{
+ if(map[start]!=blocks)
+ {
+ kpanic("mark_free(%d,%d),mem map corrupted at %d (value=%d)?",start,blocks,start,map[start]);
+ }
+
+ if(start!=0&&(map[start-1]!=1&&map[start-1]!=0))
+ {
+ kpanic("mem map corrupted one before %d (value=%d)?",start,map[start-1]);
+ }
+
+ map[start]=0;
+ if(blocks>1)mark_free(start+1,blocks-1);
+}
+
// will be initialized on first call to kballoc() //
static void kmallocinit()
{
- fixme("implement and USE! kfree");
next=&(data[0]);
+ data_addr=next;
first=next;
+ if(next%4096)kpanic("kmalloc data not aligned properly.");
- //TODO: optionally tell gcc to align this itself.
- if(next%4096)
- {
- next+=4096;
- next/=4096;
- next*=4096;
- }
- //
-
- klog("kmalloc_init: 0x%08X",next);
+ klog("In-Kernel Block Memory Allocation Initialized at: 0x%08X (free: %d x 4096KB BLOCKS)",next,BLOCKS);
init=1;
}
// kernel block memory allocation //
uint32_t kballoc(uint32_t size)
{
- size*=4096;
+ if(size>255)kpanic("max supported size 255 blocks");
spinlock_spin(SPINLOCK_ALLOC);
+ if(!init)kmallocinit();
- if(!init)kmallocinit();
-
- uint32_t old=next;
- next+=size;
-
+ uint32_t blk=free_cont(size);
+ if(blk==BLOCKS)kpanic("out of mem");
+ mark_used(blk,size);
spinlock_release(SPINLOCK_ALLOC);
- if(next>=first+KMALLOC_MEM_SIZE)
- {
- kpanic("kballoc ran out of memory! maybe increase KMALLOC_MEM_SIZE in kmalloc.c?");
- }
-
-// klog("(%d) : 0x%08X (~%dKB left)",size,old,(KMALLOC_MEM_SIZE-next+first)/1024);
- return old;
+ return data_addr+blk*4096;
}
-//TODO: allow freeing memory!!
-uint32_t kbfree(uint32_t pos)
+void kbfree(uint32_t pos)
{
- kpanic("kbfree NOT IMPLEMENTED YET");
+ uint32_t blk=(pos-data_addr)/4096;
+ spinlock_spin(SPINLOCK_ALLOC);
+ klog("freeing %d blocks ad 0x%08X",map[blk],pos);
+ mark_free(blk,map[blk]);
+ spinlock_release(SPINLOCK_ALLOC);
}
-