summaryrefslogtreecommitdiff
path: root/kernel/ringbuffer.c
diff options
context:
space:
mode:
authorMichal Idziorek <m.i@gmx.at>2015-05-23 00:40:15 +0200
committerMichal Idziorek <m.i@gmx.at>2015-05-23 00:40:15 +0200
commit566134df26612d34f2a377659c55fb0f1ae9dfa4 (patch)
treeac92db9a901065cb85399b100d913dce7f769a5b /kernel/ringbuffer.c
parent573a28a2ea7534a1e85d2e0a3aa3d06b1218e08c (diff)
reverted interrupts disabling to conservative. and starting fs layer
Diffstat (limited to 'kernel/ringbuffer.c')
-rw-r--r--kernel/ringbuffer.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/kernel/ringbuffer.c b/kernel/ringbuffer.c
index a749b16..cba7eb4 100644
--- a/kernel/ringbuffer.c
+++ b/kernel/ringbuffer.c
@@ -1,10 +1,14 @@
#define FOOLOS_MODULE_NAME "ringbuffer"
#include "lib/logger/log.h"
-// TODO: why do we disable interrupts? (Eg. kb input)
-
#include "ringbuffer.h"
+
+
+// TODO: this is disabled because a kb interrupt can occur anytime
+// and the kernel will need to access the ringbuffer while we are accessing!
+// DO WE need a spinlock in general? do not use a global one anyway!!!!
+
static int sl=9;
ringbuffer ringbuffer_init(uint32_t size)
@@ -38,15 +42,14 @@ volatile bool ringbuffer_put(ringbuffer* f,uint8_t c)
lock_release(sl);
x86_int_enable();
-
return true;
}
volatile bool ringbuffer_has(ringbuffer* f)
{
+ x86_int_disable();
bool res=true;
- x86_int_disable();
lock_spin(sl);
if(f->front==f->back)
@@ -54,15 +57,14 @@ volatile bool ringbuffer_has(ringbuffer* f)
lock_release(sl);
x86_int_enable();
-
return res;
}
volatile uint8_t ringbuffer_get(ringbuffer* f) // non blocking . please check first
{
+ x86_int_disable();
char c;
- x86_int_disable();
lock_spin(sl);
if(f->front==f->back)
@@ -80,6 +82,5 @@ volatile uint8_t ringbuffer_get(ringbuffer* f) // non blocking . please check fi
lock_release(sl);
x86_int_enable();
-
return c;
}