summaryrefslogtreecommitdiff
path: root/kernel/scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/scheduler.c')
-rw-r--r--kernel/scheduler.c182
1 files changed, 132 insertions, 50 deletions
diff --git a/kernel/scheduler.c b/kernel/scheduler.c
index 541eb15..3b50259 100644
--- a/kernel/scheduler.c
+++ b/kernel/scheduler.c
@@ -19,13 +19,13 @@
#define NO_TASK 0xffffffff
-static volatile uint32_t pid=1;
+static volatile uint32_t pid=1000;
-uint32_t nextPID()
+static uint32_t nextPID()
{
spinlock_spin(SPINLOCK_PID);
- uint32_t ret=pid;
pid++;
+ uint32_t ret=pid;
spinlock_release(SPINLOCK_PID);
return ret;
}
@@ -37,7 +37,7 @@ static volatile uint32_t current_task[SMP_MAX_PROC];
static volatile struct task_list_struct
{
volatile bool active; // is this slot used (Y/N)
- volatile uint32_t pid; // process id (TODO)
+ volatile uint32_t pid; // process id
volatile uint32_t parent; // parent process id
volatile uint32_t esp; // stack pointer of the task
@@ -46,11 +46,14 @@ static volatile struct task_list_struct
volatile uint32_t brk; // memory brk pos
- volatile bool wait; // waiting for syscall to be processed.
+ volatile bool try; // waiting coz syscall not processed yet
+ volatile bool syscall; // syscall in progress
volatile uint32_t eax;
volatile uint32_t ebx;
volatile uint32_t ecx;
volatile uint32_t edx;
+
+ volatile bool thread; // is this a light thread?
}task_list[SMP_MAX_PROC][MAX_TASKS];
@@ -70,7 +73,8 @@ volatile void scheduler_init(uint32_t cpu, void *dir)
task_list[cpu][0].parent=0;
task_list[cpu][0].pid=nextPID();
task_list[cpu][0].active=true;
- task_list[cpu][0].wait=false;
+ task_list[cpu][0].syscall=false;
+ task_list[cpu][0].thread=false;
task_list[cpu][0].vmem=dir;
task_list[cpu][0].esp = VMEM_CPU_STACK_TOP-0x200;
task_list[cpu][0].esp0 = 0; // esp0 not needed by kernel space tasks
@@ -81,7 +85,8 @@ volatile void scheduler_init(uint32_t cpu, void *dir)
task_list[cpu][1].parent=0;
task_list[cpu][1].pid=nextPID();
task_list[cpu][1].active=true;
- task_list[cpu][1].wait=false;
+ task_list[cpu][0].thread=false;
+ task_list[cpu][1].syscall=false;
task_list[cpu][1].vmem=dir;
task_list[cpu][1].esp = kballoc(4)+4*4096-0x200; // 4 pages stack
task_list[cpu][1].esp0 =kballoc(4)+4*4096; // esp0 not needed by kernel space tasks
@@ -114,7 +119,7 @@ volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t force_pid)
{
int idx=(current_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style
- if(task_list[cpu][idx].active && !task_list[cpu][idx].wait) // find active non-blocked task
+ if(task_list[cpu][idx].active && !task_list[cpu][idx].syscall) // find active non-blocked task
{
//TODO: do NOT do this! deadlock imminent!
//if(cpu==0)klog("schedule %d->%d on cpu %d",current_task[cpu],idx,cpu );
@@ -136,13 +141,13 @@ void scheduler_func()
uint32_t cpu=smp_get(SMP_APIC_ID);
- if(task_get_current_pid()==0)
+ if(current_task[cpu]==0)
while(1)
{
task_syscall_worker();
}
- if(task_get_current_pid()==1)
+ if(current_task[cpu]==1)
while(1)
{
@@ -150,38 +155,36 @@ void scheduler_func()
{
uint32_t alloc;
uint32_t entry_global=load_elf(BIN_INIT,&alloc);
- task_set_brk(alloc);
+ task_set_brk(task_get_current_pid(),alloc);
asm_usermode(entry_global);
while(1);
}
}
}
-volatile int task_reset(uint32_t pid, uint32_t entry, uint32_t stack)
+volatile int add_task(uint32_t parent_pid,uint32_t vmem, bool thread)
{
+ uint32_t parent=task_runs(parent_pid);
uint32_t cpu=smp_get(SMP_APIC_ID);
- uint32_t *stk=task_list[cpu][pid].esp;
- stk[14]=entry;
- stk[17]=stack;
- return 1;
-}
-volatile int add_task(uint32_t parent,uint32_t vmem)
-{
- uint32_t cpu=smp_get(SMP_APIC_ID);
for(int i=0;i<MAX_TASKS;i++)
{
- if(task_list[cpu][i].active!=true)
+ if(task_list[cpu][i].active!=true) // find a free slot.
{
task_list[cpu][i].pid=nextPID();
+ task_list[cpu][i].parent=parent_pid;
+ task_list[cpu][i].thread=thread;
- task_list[cpu][i].parent=task_list[cpu][parent].pid;
task_list[cpu][i].vmem=vmem;
- task_list[cpu][i].esp = kballoc(4)+2*4096; // center
+ task_list[cpu][i].esp = kballoc(4)+2*4096; // center
task_list[cpu][i].esp0 = kballoc(4)+4*4096;
- task_list[cpu][i].wait=false;
- task_list[cpu][i].brk=task_list[cpu][current_task[cpu]].brk;
+
+ task_list[cpu][i].active=true; //TODO: LOCK! (also other similar)
+ task_list[cpu][i].syscall=false;
+ task_list[cpu][i].try=false;
+
+ task_list[cpu][i].brk=task_list[cpu][parent].brk;
uint32_t *source=(uint32_t *)task_list[cpu][parent].esp;
uint32_t *dst=(uint32_t *)task_list[cpu][i].esp;
@@ -197,13 +200,22 @@ volatile int add_task(uint32_t parent,uint32_t vmem)
stack[12]=0x1;
stack[13]=0; // this task returns pid=0 to the caller
- task_list[cpu][i].active=true; //TODO: LOCK! (also other similar)
- return i;
+ return task_list[cpu][i].pid;
}
}
kpanic("out of task slots!");
}
+void task_wake_all()
+{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ // simple approach, any syscall might unblock any other syscall // TODO: better! TODO: all cpus!
+ for(int i=0;i<MAX_TASKS;i++)
+ {
+ task_list[cpu][i].try=true;
+ }
+}
+
/**
* kernel space worker thread
@@ -211,40 +223,49 @@ volatile int add_task(uint32_t parent,uint32_t vmem)
* we can get interrupted by an interrupt ANYTIME!
*
*/
-
void task_syscall_worker()
{
+ /// TODO: cross check all cpus!
uint32_t cpu=smp_get(SMP_APIC_ID);
+
while(1)
{
- bool nowork=true;
for(int i=0;i<MAX_TASKS;i++)
{
- if(task_list[cpu][i].wait)
+ if(task_list[cpu][i].active,task_list[cpu][i].try&&task_list[cpu][i].syscall)
{
+
uint32_t syscall=task_list[cpu][i].eax;
- klog("task %d waiting on syscall %d/%s. processing...",i,syscall,syscall_get_name(syscall));
+ klog("task pid=%d waiting on syscall %d/%s on cpu %d slot %d.",task_list[cpu][i].pid,syscall,syscall_get_name(syscall),cpu,i);
+
task_list[cpu][0].vmem=task_list[cpu][i].vmem; // switch syscall worker to pagedir of calling userprog
x86_set_page_directory(task_list[cpu][0].vmem);
- nowork=false;
-
- uint32_t ret = syscall_generic(task_list[cpu][i].eax,
+ uint32_t ok = syscall_generic_test(task_list[cpu][i].eax,
task_list[cpu][i].edx,
task_list[cpu][i].ecx,
task_list[cpu][i].ebx,
- i);
+ task_list[cpu][i].pid);
- if(task_list[cpu][i].eax==SYSCALL_WAIT)
+ if(!ok)
{
+ task_list[cpu][i].try=false;
continue;
}
+ uint32_t ret = syscall_generic(task_list[cpu][i].eax,
+ task_list[cpu][i].edx,
+ task_list[cpu][i].ecx,
+ task_list[cpu][i].ebx,
+ task_list[cpu][i].pid);
+
+ task_wake_all();
+
uint32_t *stack=task_list[cpu][i].esp;
stack[12]=0x1;
stack[13]=ret;
- task_list[cpu][i].wait=false;
+ task_list[cpu][i].syscall=false;
}
}
@@ -254,48 +275,109 @@ void task_syscall_worker()
}
}
-
volatile uint32_t task_syscall(uint32_t eax,uint32_t ebx, uint32_t ecx, uint32_t edx)
{
uint32_t cpu=smp_get(SMP_APIC_ID);
- task_list[cpu][current_task[cpu]].wait=true;
+ task_list[cpu][current_task[cpu]].syscall=true;
+ task_list[cpu][current_task[cpu]].try=true;
task_list[cpu][current_task[cpu]].eax=eax;
task_list[cpu][current_task[cpu]].ebx=ebx;
task_list[cpu][current_task[cpu]].ecx=ecx;
task_list[cpu][current_task[cpu]].edx=edx;
- task_list[cpu][0].wait=false;
return 1;
}
volatile uint32_t task_fork(uint32_t pid)
{
+ uint32_t idx=task_runs(pid);
uint32_t cpu=smp_get(SMP_APIC_ID);
- int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][pid].vmem,false));
- klog("[%d] forked -> [%d] (free blocks remaining: %d )", pid, ret,0);
+ int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][idx].vmem,false),false);
+ klog("[%d] forked -> [%d]", pid, ret);
return ret;
}
+
volatile uint32_t task_clone(uint32_t pid)
{
+ uint32_t idx=task_runs(pid);
uint32_t cpu=smp_get(SMP_APIC_ID);
- int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][pid].vmem,true));
- klog("[%d] cloned -> [%d] (free blocks remaining: %d )", pid, ret,0);
+ int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][idx].vmem,true),true);
+ klog("[%d] cloned -> [%d]", pid, ret);
return ret;
}
-volatile int task_get_current_pid()
+volatile uint32_t task_get_brk(uint32_t pid)
{
uint32_t cpu=smp_get(SMP_APIC_ID);
- return current_task[cpu];
+ uint32_t idx=task_idx(pid);
+ return task_list[cpu][idx].brk;
}
-volatile uint32_t task_get_brk()
+volatile void task_set_brk(uint32_t pid, uint32_t brk)
{
uint32_t cpu=smp_get(SMP_APIC_ID);
- return task_list[cpu][current_task[cpu]].brk;
+ uint32_t idx=task_idx(pid);
+ task_list[cpu][idx].brk=brk;
}
-volatile void task_set_brk(uint32_t brk)
+volatile uint32_t task_get_current_pid()
{
uint32_t cpu=smp_get(SMP_APIC_ID);
- task_list[cpu][current_task[cpu]].brk=brk;
+ return task_list[cpu][current_task[cpu]].pid;
+}
+
+volatile uint32_t task_get_parent(uint32_t pid)
+{
+ uint32_t idx=task_idx(pid);
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ return task_list[cpu][idx].parent;
+}
+
+volatile int task_reset(uint32_t pid, uint32_t entry, uint32_t stack,uint32_t brk)
+{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ uint32_t idx=task_idx(pid);
+ uint32_t *stk=task_list[cpu][idx].esp;
+ task_list[cpu][idx].brk=brk;
+
+ stk[14]=entry;
+ stk[17]=stack;
+ return 1;
+}
+
+uint32_t task_idx(uint32_t pid)
+{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+
+ for(int i=0;i<MAX_TASKS;i++)
+ {
+ if(task_list[cpu][i].pid==pid)return i;
+ }
+
+ return 0;
+}
+
+uint32_t task_runs(uint32_t pid)
+{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+
+ for(int i=0;i<MAX_TASKS;i++)
+ {
+ if(task_list[cpu][i].active==true&& task_list[cpu][i].pid==pid)return i;
+ }
+
+ return 0;
+}
+
+void task_exit(uint32_t pid)
+{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ uint32_t idx=task_runs(pid);
+
+ for(int i=0;i<MAX_TASKS;i++)
+ {
+ if(task_list[cpu][i].active==true&& task_list[cpu][i].pid==pid)
+ task_list[cpu][i].active=false;
+ }
+
+ vmem_free_space_dir(task_list[cpu][idx].vmem,false);
}