summaryrefslogtreecommitdiff
path: root/kernel/scheduler.c
diff options
context:
space:
mode:
authorMiguel <m.i@gmx.at>2018-09-12 02:10:11 +0200
committerMiguel <m.i@gmx.at>2018-09-12 02:10:11 +0200
commita3ee159ebfd2d088432e386e8809840784f697e7 (patch)
tree9ecfc506bb65ec982aae47f161afa1777359d3f7 /kernel/scheduler.c
parentc9351caacd49c8442cc586f9e53a2dcc49a004aa (diff)
working on smp scheduler
Diffstat (limited to 'kernel/scheduler.c')
-rw-r--r--kernel/scheduler.c361
1 files changed, 192 insertions, 169 deletions
diff --git a/kernel/scheduler.c b/kernel/scheduler.c
index cac5ba7..d0b4b68 100644
--- a/kernel/scheduler.c
+++ b/kernel/scheduler.c
@@ -2,6 +2,7 @@
#include "kernel.h"
#include "gdt.h"
+#include "smp.h"
#include "mem.h"
#include "fs/elf.h"
#include "asm_x86.h"
@@ -16,9 +17,10 @@
#define NO_TASK 0xffffffff
-static volatile uint32_t current_task=NO_TASK;
+// we hold this stuff per cpu
+static volatile uint32_t current_task[SMP_MAX_PROC];
-// TODO: per cpu!
+// we hold this stuff per cpu
static volatile struct task_list_struct
{
volatile bool active; // is this slot used (Y/N)
@@ -37,11 +39,142 @@ static volatile struct task_list_struct
volatile uint32_t ecx;
volatile uint32_t edx;
-}task_list[MAX_TASKS];
+}task_list[SMP_MAX_PROC][MAX_TASKS];
+
+// init tasks //
+volatile void scheduler_init(uint32_t cpu, void *dir)
+{
+ for(int i=0;i<MAX_TASKS;i++)
+ {
+ task_list[cpu][i].active=false;
+ }
+
+ current_task[cpu]=0;
+
+ // need to make space on the esp stacks for pushing vals vias task_pusha
+
+ // this is our main kernel task at slot 0 (per cpu)
+ task_list[cpu][0].parent=0;
+ task_list[cpu][0].active=true;
+ task_list[cpu][0].wait=false;
+ task_list[cpu][0].vmem=dir;
+ task_list[cpu][0].esp = VMEM_CPU_STACK_TOP-0x200;
+ task_list[cpu][0].esp0 = 0; // esp0 not needed by kernel space tasks
+
+ task_pusha(task_list[cpu][0].esp);
+
+ // this is our main kernel task at slot 0 (per cpu)
+ task_list[cpu][1].parent=0;
+ task_list[cpu][1].active=true;
+ task_list[cpu][1].wait=false;
+ task_list[cpu][1].vmem=dir;
+ task_list[cpu][1].esp = kballoc(4)+4*4096-0x200; // 4 pages stack
+ task_list[cpu][1].esp0 =kballoc(4)+4*4096; // esp0 not needed by kernel space tasks
+
+ task_pusha(task_list[cpu][0].esp);
+ task_pusha(task_list[cpu][1].esp);
+}
+
+//
+// REMEMBER WE ARE INSIDE AN INTERRUPT HERE - DON'T WASTE TIME!
+//
+// oldesp - is the adress of the stack pointer when pit_interrupt_handler was entered.
+// registers have been pushed with pusha to this old stack.
+//
+// stack pointer was moved to the 16kb stack we have from multiboot.s
+//
+// we need to return a NEW stack pointer where popa will get the registers the new task requires
+//
+volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t force_pid)
+{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ uint32_t init=smp_get(SMP_SCHEDULER_INIT);
+ if(init){
+ scheduler_init(cpu,x86_get_page_directory());
+ smp_set(SMP_SCHEDULER_INIT,0);
+ }
+ else task_list[cpu][current_task[cpu]].esp=oldesp;
+
+ for(int i=0;i<MAX_TASKS;i++)
+ {
+ int idx=(current_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style
+
+ if(task_list[cpu][idx].active && !task_list[cpu][idx].wait) // find active non-blocked task
+ {
+ current_task[cpu]=idx;
+
+ install_tss(cpu,task_list[cpu][idx].esp0);
+ x86_set_page_directory(task_list[cpu][idx].vmem);
+ return task_list[cpu][idx].esp;
+ }
+ }
+
+ kpanic("nothing to schedule!");
+}
+
+
+void scheduler_func()
+{
+ // we need enable here again (since the pushed eflags have it disabled)? TODO: why??
+ x86_sti();
+
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+
+ if(task_get_current_pid()==0)
+ while(1)
+ {
+ task_syscall_worker();
+ }
+
+ if(task_get_current_pid()==1)
+
+ while(1)
+ {
+ if(cpu==0)
+ {
+ uint32_t alloc;
+ uint32_t entry_global=load_elf(BIN_INIT,&alloc);
+ task_set_brk(alloc);
+ klog("breakpoint: 0x%08x",alloc);
+ asm_usermode(entry_global);
+ while(1);
+ }
+// else syscall_write(1, "x",1); // stdout
+
+ }
+}
+ //asm("hlt");
+ /*
+
+ // if we are pid 0, replace ourselves with /bin/init and enter usermode
+ if(task_get_current_pid()==0)
+ {
+ uint32_t alloc;
+ uint32_t entry_global=load_elf(BIN_INIT,&alloc);
+ task_set_brk(alloc);
+ asm_usermode(entry_global);
+ }
+
+ // kernel worker thread: SLEEPER
+ if(task_get_current_pid()==1)
+ {
+ while(1)
+ {
+ __asm__("hlt");
+ }
+ }
+
+ // kernel worker thread: SYSCALL CHECKER
+ if(task_get_current_pid()==2)
+ {
+ task_syscall_worker();
+ }
+ */
volatile int task_reset(uint32_t pid, uint32_t entry, uint32_t stack)
{
- uint32_t *stk=task_list[pid].esp;
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ uint32_t *stk=task_list[cpu][pid].esp;
stk[14]=entry;
stk[17]=stack;
return 1;
@@ -49,24 +182,25 @@ volatile int task_reset(uint32_t pid, uint32_t entry, uint32_t stack)
volatile int add_task(uint32_t parent,uint32_t vmem)
{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
for(int i=0;i<MAX_TASKS;i++)
{
- if(task_list[i].active!=true)
+ if(task_list[cpu][i].active!=true)
{
- task_list[i].parent=parent;
+ task_list[cpu][i].parent=parent;
// TODO: do this without paging please!
- task_list[i].vmem=vmem;
- task_list[i].esp = kballoc(4)+2*4096; // center
+ task_list[cpu][i].vmem=vmem;
+ task_list[cpu][i].esp = kballoc(4)+2*4096; // center
// TODO: use own page here and copy it instead of this!
- task_list[i].esp0 = kballoc(4)+4*4096;
+ task_list[cpu][i].esp0 = kballoc(4)+4*4096;
- task_list[i].wait=false;
- task_list[i].brk=task_list[current_task].brk;
+ task_list[cpu][i].wait=false;
+ task_list[cpu][i].brk=task_list[cpu][current_task[cpu]].brk;
- uint32_t *source=(uint32_t *)task_list[parent].esp;
- uint32_t *dst=(uint32_t *)task_list[i].esp;
+ uint32_t *source=(uint32_t *)task_list[cpu][parent].esp;
+ uint32_t *dst=(uint32_t *)task_list[cpu][i].esp;
for(int x=0;x<100;x++) //TODO: better copy this page too instead of stack
{
@@ -75,11 +209,11 @@ volatile int add_task(uint32_t parent,uint32_t vmem)
source++;
}
- uint32_t *stack=task_list[i].esp;
+ uint32_t *stack=task_list[cpu][i].esp;
stack[12]=0x1;
stack[13]=0; // this task returns pid=0 to the caller
- task_list[i].active=true; //TODO: LOCK! (also other similar)
+ task_list[cpu][i].active=true; //TODO: LOCK! (also other similar)
return i;
}
}
@@ -97,6 +231,7 @@ volatile int add_task(uint32_t parent,uint32_t vmem)
void task_syscall_worker()
{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
while(1)
{
//klog("checking if any pending syscalls.");
@@ -104,40 +239,40 @@ void task_syscall_worker()
bool nowork=true;
for(int i=0;i<MAX_TASKS;i++)
{
- if(task_list[i].wait)
+ if(task_list[cpu][i].wait)
{
- // klog("task %d waiting on syscall %d. processing...",i,task_list[i].eax);
- task_list[2].vmem=task_list[i].vmem; // switch syscall worker to pagedir of calling userprog
- x86_set_page_directory(task_list[2].vmem);
+ // klog("task %d waiting on syscall %d. processing...",i,task_list[cpu][i].eax);
+ task_list[cpu][2].vmem=task_list[cpu][i].vmem; // switch syscall worker to pagedir of calling userprog
+ x86_set_page_directory(task_list[cpu][2].vmem);
- if(task_list[i].eax==SYSCALL_WAIT)
+ if(task_list[cpu][i].eax==SYSCALL_WAIT)
{
continue;
}
- if(task_list[i].eax==SYSCALL_READ)
+ if(task_list[cpu][i].eax==SYSCALL_READ)
{
uint32_t ok= chk_syscall_read(
- task_list[i].edx,
- task_list[i].ecx,
- task_list[i].ebx
+ task_list[cpu][i].edx,
+ task_list[cpu][i].ecx,
+ task_list[cpu][i].ebx
);
if(!ok)continue;
}
nowork=false;
- uint32_t ret= syscall_generic(task_list[i].eax,
- task_list[i].edx,
- task_list[i].ecx,
- task_list[i].ebx,
+ uint32_t ret= syscall_generic(task_list[cpu][i].eax,
+ task_list[cpu][i].edx,
+ task_list[cpu][i].ecx,
+ task_list[cpu][i].ebx,
i);
- uint32_t *stack=task_list[i].esp;
+ uint32_t *stack=task_list[cpu][i].esp;
stack[12]=0x1;
stack[13]=ret;
- task_list[i].wait=false;
+ task_list[cpu][i].wait=false;
}
}
@@ -147,67 +282,17 @@ void task_syscall_worker()
}
}
-//
-// REMEMBER WE ARE INSIDE AN INTERRUPT HERE - DON'T WASTE TIME!
-//
-// oldesp - is the adress of the stack pointer when pit_interrupt_handler was entered.
-// registers have been pushed with pusha to this old stack.
-//
-// stack pointer was moved to the 16kb stack we have from multiboot.s
-//
-// we need to return a NEW stack pointer where popa will get the registers the new task requires
-//
-volatile uint32_t my_scheduler(uint32_t oldesp,uint32_t force_pid)
-{
- uint32_t *apic_id=0x8000000; //TODO: test cpu private pages
- //klog("scheduler 0x%x",*apic_id); // TODO: do not log we are inisde an interrupt!!
-
- //
- static bool first=true;
- if(current_task==NO_TASK)return oldesp;
- if(!first) task_list[current_task].esp=oldesp;
- first=false;
- //
-
- if(force_pid>-1)
- {
- int pid=force_pid;
- current_task=pid;
- install_tss(0,task_list[pid].esp0);
-
- x86_set_page_directory(task_list[pid].vmem);
- return task_list[pid].esp;
- }
-
- for(int i=0;i<MAX_TASKS;i++)
- {
- int pid=(current_task+1+i)%MAX_TASKS; // schedule round robin style
-
- if(task_list[pid].active && !task_list[pid].wait) // find active non-blocked task
- {
- //if(current_task!=pid)klog("switch from %d to %d", current_task, pid);
-
- current_task=pid;
- install_tss(0,task_list[pid].esp0);
-
- x86_set_page_directory(task_list[pid].vmem);
- return task_list[pid].esp;
- }
-
- }
-
- kpanic("nothing to schedule!");
-}
volatile uint32_t task_syscall(uint32_t eax,uint32_t ebx, uint32_t ecx, uint32_t edx)
{
- task_list[current_task].wait=true;
- task_list[current_task].eax=eax;
- task_list[current_task].ebx=ebx;
- task_list[current_task].ecx=ecx;
- task_list[current_task].edx=edx;
-
- task_list[2].wait=false;
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ task_list[cpu][current_task[cpu]].wait=true;
+ task_list[cpu][current_task[cpu]].eax=eax;
+ task_list[cpu][current_task[cpu]].ebx=ebx;
+ task_list[cpu][current_task[cpu]].ecx=ecx;
+ task_list[cpu][current_task[cpu]].edx=edx;
+
+ task_list[cpu][2].wait=false;
return 1;
}
@@ -215,118 +300,56 @@ volatile uint32_t task_syscall(uint32_t eax,uint32_t ebx, uint32_t ecx, uint32_t
//TODO: notify waiting parent when child finished;
volatile uint32_t task_exit(uint32_t pid)
{
- task_list[pid].active=false;
- int parent_pid=task_list[pid].parent;
- if(task_list[parent_pid].wait&&task_list[parent_pid].eax==SYSCALL_WAIT)
- task_list[parent_pid].wait=false;
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ task_list[cpu][pid].active=false;
+ int parent_pid=task_list[cpu][pid].parent;
+ if(task_list[cpu][parent_pid].wait&&task_list[cpu][parent_pid].eax==SYSCALL_WAIT)
+ task_list[cpu][parent_pid].wait=false;
klog("[%d] exit", pid);
- vmem_free_dir(task_list[pid].vmem);
+ vmem_free_dir(task_list[cpu][pid].vmem);
return 1;
}
volatile uint32_t task_wait(uint32_t pid)
{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
klog("[%d] wait", pid);
- task_list[pid].wait=true;
- task_list[pid].eax=SYSCALL_WAIT;
+ task_list[cpu][pid].wait=true;
+ task_list[cpu][pid].eax=SYSCALL_WAIT;
return 1;
}
volatile uint32_t task_fork(uint32_t pid)
{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
//TODO: what will happen if we get rescheduled!?!?!
- int ret=add_task(pid,vmem_new_space_dir(task_list[pid].vmem,false));
+ int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][pid].vmem,false));
klog("[%d] forked -> [%d] (free blocks remaining: %d )", pid, ret,0);
return ret;
}
volatile uint32_t task_clone(uint32_t pid)
{
+ uint32_t cpu=smp_get(SMP_APIC_ID);
//TODO: what will happen if we get rescheduled!?!?!
- int ret=add_task(pid,vmem_new_space_dir(task_list[pid].vmem,true));
+ int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][pid].vmem,true));
klog("[%d] cloned -> [%d] (free blocks remaining: %d )", pid, ret,0);
return ret;
}
-// init task (root of all other tasks / processes) //
-volatile void scheduler_init(void *dir)
-{
- for(int i=0;i<MAX_TASKS;i++)
- {
- task_list[i].active=false;
- }
-
- current_task=0;
-
- // this is our main user task on slot 0
- task_list[0].parent=0;
- task_list[0].active=true;
- task_list[0].wait=false;
- task_list[0].vmem=dir;
- task_list[0].esp = kballoc(4)+3*4096;
- task_list[0].esp0 = kballoc(4)+4*4096;
-
-// task_list[1].parent=0;
-// task_list[1].active=true;
-// task_list[1].waiting=false;
-// task_list[1].syscall=false;
-// task_list[1].vmem=dir;
-// task_list[1].esp = kballoc(4)+3*4096;
-// task_list[1].esp0 = 0; // not needed by kernel space tasks
-
- task_list[2].parent=0;
- task_list[2].active=true;
- task_list[2].wait=false;
- task_list[2].vmem=dir;
- task_list[2].esp = kballoc(4)+3*4096;
- task_list[2].esp0 = 0; // not needed by kernel space tasks
-
- task_pusha(task_list[2].esp);
-// task_pusha(task_list[1].esp);
- task_pusha(task_list[0].esp);
-}
-
volatile int task_get_current_pid()
{
- return current_task;
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ return current_task[cpu];
}
volatile uint32_t task_get_brk()
{
- return task_list[current_task].brk;
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ return task_list[cpu][current_task[cpu]].brk;
}
volatile void task_set_brk(uint32_t brk)
{
- task_list[current_task].brk=brk;
-}
-
-void userfunc()
-{
-
- // we need enable here again (since the pushed eflags have it disabled)!
- x86_sti();
-
- // if we are pid 0, replace ourselves with /bin/init and enter usermode
- if(task_get_current_pid()==0)
- {
- uint32_t alloc;
- uint32_t entry_global=load_elf(BIN_INIT,&alloc);
- task_set_brk(alloc);
- asm_usermode(entry_global);
- }
-
- // kernel worker thread: SLEEPER
- if(task_get_current_pid()==1)
- {
- while(1)
- {
- __asm__("hlt");
- }
- }
-
- // kernel worker thread: SYSCALL CHECKER
- if(task_get_current_pid()==2)
- {
- task_syscall_worker();
- }
+ uint32_t cpu=smp_get(SMP_APIC_ID);
+ task_list[cpu][current_task[cpu]].brk=brk;
}