diff options
| author | Miguel <m.i@gmx.at> | 2018-09-15 23:46:04 +0200 |
|---|---|---|
| committer | Miguel <m.i@gmx.at> | 2018-09-15 23:46:04 +0200 |
| commit | e0449c5adc89eec9f378cb40a56762bf314a80ea (patch) | |
| tree | 75478a642ae45647455734453794b8c1af86f0a1 /kernel/scheduler.c | |
| parent | cd50c8d1047832bbb0798b368fde0428ef749422 (diff) | |
scheduler / sleep process
Diffstat (limited to 'kernel/scheduler.c')
| -rw-r--r-- | kernel/scheduler.c | 75 |
1 files changed, 61 insertions, 14 deletions
diff --git a/kernel/scheduler.c b/kernel/scheduler.c index ad6b370..b5d1b34 100644 --- a/kernel/scheduler.c +++ b/kernel/scheduler.c @@ -73,25 +73,36 @@ volatile void scheduler_init(uint32_t cpu, void *dir) task_list[cpu][0].pid=nextPID(); task_list[cpu][0].active=true; task_list[cpu][0].syscall=false; - task_list[cpu][0].thread=false; + task_list[cpu][1].thread=false; task_list[cpu][0].vmem=dir; task_list[cpu][0].esp = VMEM_CPU_STACK_TOP-0x200; task_list[cpu][0].esp0 = 0; // esp0 not needed by kernel space tasks - task_pusha(task_list[cpu][0].esp); - - // this is our main kernel task at slot 0 (per cpu) + // this will go to userspace task_list[cpu][1].parent=0; task_list[cpu][1].pid=nextPID(); task_list[cpu][1].active=true; - task_list[cpu][0].thread=false; + task_list[cpu][1].thread=false; task_list[cpu][1].syscall=false; task_list[cpu][1].vmem=dir; task_list[cpu][1].esp = kballoc(4)+4*4096-0x200; // 4 pages stack task_list[cpu][1].esp0 =kballoc(4)+4*4096; // esp0 not needed by kernel space tasks + + // sleeper + task_list[cpu][2].parent=0; + task_list[cpu][2].pid=nextPID(); + task_list[cpu][2].active=true; + task_list[cpu][2].thread=false; + task_list[cpu][2].syscall=false; + task_list[cpu][2].vmem=dir; + task_list[cpu][2].esp = kballoc(4)+4*4096-0x200; // 4 pages stack + task_list[cpu][2].esp0 =kballoc(4)+4*4096; // esp0 not needed by kernel space tasks + + // stacks task_pusha(task_list[cpu][0].esp); task_pusha(task_list[cpu][1].esp); + task_pusha(task_list[cpu][2].esp); } // @@ -104,7 +115,7 @@ volatile void scheduler_init(uint32_t cpu, void *dir) // // we need to return a NEW stack pointer where popa will get the registers the new task requires // -volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t force_pid) +volatile uint32_t scheduler_run(uint32_t oldesp) { uint32_t cpu=smp_get(SMP_APIC_ID); uint32_t init=smp_get(SMP_SCHEDULER_INIT); @@ -117,6 +128,7 @@ volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t force_pid) for(int i=0;i<MAX_TASKS;i++) { int idx=(current_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style + if(idx==2)continue;// skip sleeper here if(task_list[cpu][idx].active && !task_list[cpu][idx].syscall) // find active non-blocked task { @@ -129,7 +141,13 @@ volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t force_pid) } } - kpanic("nothing to schedule!"); + // force the sleeper task + current_task[cpu]=2; + install_tss(cpu,task_list[cpu][2].esp0); + x86_set_page_directory(task_list[cpu][2].vmem); + return task_list[cpu][2].esp; + +// kpanic("nothing to schedule!"); } @@ -146,8 +164,13 @@ void scheduler_func() task_syscall_worker(); } - if(current_task[cpu]==1) + if(current_task[cpu]==2) + while(1) + { + while(1)asm("hlt"); // sleeper task + } + if(current_task[cpu]==1) while(1) { if(cpu==0) @@ -205,7 +228,15 @@ volatile int add_task(uint32_t parent_pid,uint32_t vmem, bool thread) kpanic("out of task slots!"); } -void task_wake_all() +uint32_t scheduler_wake_worker(uint32_t oldesp) +{ + uint32_t cpu=smp_get(SMP_APIC_ID); + task_list[cpu][0].syscall=false; // wake (syscall misused) + scheduler_wake_all(); + return scheduler_run(oldesp); +} + +void scheduler_wake_all() { uint32_t cpu=smp_get(SMP_APIC_ID); // simple approach, any syscall might unblock any other syscall // TODO: better! TODO: all cpus! @@ -215,7 +246,6 @@ void task_wake_all() } } - /** * kernel space worker thread * @@ -258,7 +288,7 @@ void task_syscall_worker() task_list[cpu][i].ebx, task_list[cpu][i].pid); - task_wake_all(); + scheduler_wake_all(); uint32_t *stack=task_list[cpu][i].esp; stack[12]=0x1; @@ -268,9 +298,26 @@ void task_syscall_worker() } } - //task_list[cpu][0].wait=true; - //if (nowork)__asm__("hlt"); - __asm__("int $0x81"); // wake scheduler! + bool nowork=true; + + x86_cli(); // disable all incoming new work (mouse,kb,timer). software syscalls can not appear since we are here ;) + for(int i=0;i<MAX_TASKS;i++) /// ... and check if aaany work is pending. + { + if(task_list[cpu][i].active,task_list[cpu][i].try&&task_list[cpu][i].syscall) + { + nowork=false; + break; + } + } + if(nowork) + { + task_list[cpu][0].syscall=true; // sleep (syscall misused) + } + x86_sti(); + if(nowork) + { + __asm__("int $0x81"); // wake scheduler! + } } } |
