summaryrefslogtreecommitdiff
path: root/kernel/scheduler.c
diff options
context:
space:
mode:
authorMiguel <m.i@gmx.at>2018-09-16 11:39:36 +0200
committerMiguel <m.i@gmx.at>2018-09-16 11:39:36 +0200
commit740ae2e69995df37c44fe61f57642ee642982ca2 (patch)
treeb84ec79e9243fd0fe103fab1c1acc1e91e60f53d /kernel/scheduler.c
parente0449c5adc89eec9f378cb40a56762bf314a80ea (diff)
cleanup and starting improve mounts and file access (pipes, sysfiles, ext2)
Diffstat (limited to 'kernel/scheduler.c')
-rw-r--r--kernel/scheduler.c60
1 files changed, 43 insertions, 17 deletions
diff --git a/kernel/scheduler.c b/kernel/scheduler.c
index b5d1b34..798d119 100644
--- a/kernel/scheduler.c
+++ b/kernel/scheduler.c
@@ -13,11 +13,16 @@
#include "vmem.h"
#include "spinlock.h"
+#include "ringbuffer.h"
+#include "keyboard.h"
#include "syscalls.h"
#include "fs/ext2.h"
#define NO_TASK 0xffffffff
+//TODO: ugly!
+extern ringbuffer kb_in;
+
static volatile uint32_t pid=1000;
static uint32_t nextPID()
@@ -105,6 +110,19 @@ volatile void scheduler_init(uint32_t cpu, void *dir)
task_pusha(task_list[cpu][2].esp);
}
+static uint32_t scheduler_schedule(uint32_t idx)
+{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ if(task_list[cpu][idx].active && !task_list[cpu][idx].syscall)
+ {
+ current_task[cpu]=idx;
+ install_tss(cpu,task_list[cpu][idx].esp0);
+ x86_set_page_directory(task_list[cpu][idx].vmem);
+ return task_list[cpu][idx].esp;
+ }
+ return 0;
+}
+
//
// REMEMBER WE ARE INSIDE AN INTERRUPT HERE - DON'T WASTE TIME!
//
@@ -115,39 +133,36 @@ volatile void scheduler_init(uint32_t cpu, void *dir)
//
// we need to return a NEW stack pointer where popa will get the registers the new task requires
//
-volatile uint32_t scheduler_run(uint32_t oldesp)
+
+volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t preference)
{
uint32_t cpu=smp_get(SMP_APIC_ID);
uint32_t init=smp_get(SMP_SCHEDULER_INIT);
+
if(init){
scheduler_init(cpu,x86_get_page_directory());
smp_set(SMP_SCHEDULER_INIT,0);
}
+
else task_list[cpu][current_task[cpu]].esp=oldesp;
+ uint32_t esp;
+ esp=scheduler_schedule(preference); // try preference
+ if(esp)return esp;
+
for(int i=0;i<MAX_TASKS;i++)
{
int idx=(current_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style
if(idx==2)continue;// skip sleeper here
- if(task_list[cpu][idx].active && !task_list[cpu][idx].syscall) // find active non-blocked task
- {
- //TODO: do NOT do this! deadlock imminent!
- //if(cpu==0)klog("schedule %d->%d on cpu %d",current_task[cpu],idx,cpu );
- current_task[cpu]=idx;
- install_tss(cpu,task_list[cpu][idx].esp0);
- x86_set_page_directory(task_list[cpu][idx].vmem);
- return task_list[cpu][idx].esp;
- }
+ esp=scheduler_schedule(idx); // try preference
+ if(esp)return esp;
+
}
// force the sleeper task
- current_task[cpu]=2;
- install_tss(cpu,task_list[cpu][2].esp0);
- x86_set_page_directory(task_list[cpu][2].vmem);
- return task_list[cpu][2].esp;
+ return scheduler_schedule(2);
-// kpanic("nothing to schedule!");
}
@@ -232,8 +247,7 @@ uint32_t scheduler_wake_worker(uint32_t oldesp)
{
uint32_t cpu=smp_get(SMP_APIC_ID);
task_list[cpu][0].syscall=false; // wake (syscall misused)
- scheduler_wake_all();
- return scheduler_run(oldesp);
+ return scheduler_run(oldesp,0);
}
void scheduler_wake_all()
@@ -259,6 +273,18 @@ void task_syscall_worker()
while(1)
{
+ bool wake=false;
+
+ // TODO: move to user programm!
+ x86_cli(); // disable temporarily mouse/kb/timer interrupts.
+ while(ringbuffer_has(&kb_in)){
+ wake=true;
+ keyboard_handle(ringbuffer_get(&kb_in));
+ }
+ x86_sti();
+
+ if(wake)scheduler_wake_all();
+
for(int i=0;i<MAX_TASKS;i++)
{
if(task_list[cpu][i].active,task_list[cpu][i].try&&task_list[cpu][i].syscall)