summaryrefslogtreecommitdiff
path: root/kernel/scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/scheduler.c')
-rw-r--r--kernel/scheduler.c151
1 files changed, 77 insertions, 74 deletions
diff --git a/kernel/scheduler.c b/kernel/scheduler.c
index a4e3743..9fdff7d 100644
--- a/kernel/scheduler.c
+++ b/kernel/scheduler.c
@@ -74,10 +74,10 @@ volatile void scheduler_init(uint32_t cpu, void *dir)
}
current_task[cpu]=0;
- last_task[cpu]=0;
+// last_task[cpu]=0;
// need to make space on the esp stacks for pushing vals vias task_pusha
-
+
// this is our main kernel task at slot 0 (per cpu)
task_list[cpu][0].parent=0;
task_list[cpu][0].pid=nextPID();
@@ -85,10 +85,10 @@ volatile void scheduler_init(uint32_t cpu, void *dir)
task_list[cpu][0].syscall=false;
task_list[cpu][0].thread=false;
task_list[cpu][0].vmem=dir;
- task_list[cpu][0].esp = VMEM_CPU_STACK_TOP-0x200-8;
- task_list[cpu][0].esp0 = 0; // esp0 not needed by kernel space tasks
- strcpy(task_list[cpu][0].name,"kernel_worker");
- fd_init_std_streams(task_list[cpu][0].pid,0);
+ task_list[cpu][0].esp = kballoc(4)+4*4096-0x200-8; // 4 pages stack & prealign
+ task_list[cpu][0].esp0 = 0; // esp0 not required by kernel space tasks
+ strcpy(task_list[cpu][0].name,"kernel worker");
+ //fd_init_std_streams(task_list[cpu][0].pid,0);
// this will go to userspace
task_list[cpu][1].parent=0;
@@ -97,11 +97,11 @@ volatile void scheduler_init(uint32_t cpu, void *dir)
task_list[cpu][1].thread=false;
task_list[cpu][1].syscall=false;
task_list[cpu][1].vmem=dir;
- task_list[cpu][1].esp = kballoc(4)+4*4096-0x200-8; // 4 pages stack & prealign
- task_list[cpu][1].esp0 =kballoc(4)+4*4096; // esp0 not needed by kernel space tasks
- strcpy(task_list[cpu][1].name,"init");
- fd_init_std_streams(task_list[cpu][1].pid,0);
-
+ task_list[cpu][1].esp = kballoc(4)+4*4096-0x200-8; // 4 pages stack & prealign
+ task_list[cpu][1].esp = VMEM_USER_STACK_TOP-0x200-8;
+ task_list[cpu][1].esp0 = kballoc(4)+4*4096; // esp0 needed by user space tasks
+ strcpy(task_list[cpu][1].name,"userspace init");
+ //fd_init_std_streams(task_list[cpu][1].pid,0);
// sleeper
task_list[cpu][2].parent=0;
@@ -111,9 +111,9 @@ volatile void scheduler_init(uint32_t cpu, void *dir)
task_list[cpu][2].syscall=false;
task_list[cpu][2].vmem=dir;
task_list[cpu][2].esp = kballoc(4)+4*4096-0x200-8; // 4 pages stack & prealign
- task_list[cpu][2].esp0 =kballoc(4)+4*4096; // esp0 not needed by kernel space tasks
+ task_list[cpu][2].esp0 =0; // esp0 not needed by kernel space tasks
strcpy(task_list[cpu][2].name,"idle process");
- fd_init_std_streams(task_list[cpu][2].pid,0);
+ //fd_init_std_streams(task_list[cpu][2].pid,0);
// stacks
task_pusha(task_list[cpu][0].esp);
@@ -128,8 +128,8 @@ static uint32_t scheduler_schedule(uint32_t idx)
if(task_list[cpu][idx].active && !task_list[cpu][idx].syscall)
{
- if(current_task[cpu]!=0)last_task[cpu]=current_task[cpu];
- if(current_task[cpu]==idx)return task_list[cpu][idx].esp;
+ //if(current_task[cpu]!=0)last_task[cpu]=current_task[cpu];
+ //if(current_task[cpu]==idx)return task_list[cpu][idx].esp;
current_task[cpu]=idx;
@@ -162,49 +162,26 @@ volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t preference)
uint32_t cpu=smp_get(SMP_APIC_ID);
uint32_t init=smp_get(SMP_SCHEDULER_INIT);
- if(init){
+ if(init)
+ {
scheduler_init(cpu,x86_get_page_directory());
smp_set(SMP_SCHEDULER_INIT,0);
+ klog("Scheduler initialized for cpu %d",cpu);
+ return task_list[cpu][current_task[cpu]].esp;
}
-
else task_list[cpu][current_task[cpu]].esp=oldesp;
- uint32_t esp;
-
- if(preference!=-1)
- {
- esp=scheduler_schedule(preference); // try preference
- if(esp)return esp;
-
- if(current_task[cpu]==0)// we have interrupted a task with ring1 work
- {
- esp=scheduler_schedule(last_task[cpu]); // try preference
- if(esp)return esp;
- }
- }
- else
- {
- //klog("preempt %d", last_task[cpu]);
- }
-
for(int i=0;i<MAX_TASKS;i++)
{
- int idx=(last_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style
-// if(preference==-1&&idx==0)continue;
-
- if(idx==preference||idx==2)continue;// skip sleeper and preferred tasks here.
-
- esp=scheduler_schedule(idx);
-
- if(esp){
- //klog("%d",idx);
- return esp;
- }
+ int idx=(current_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style
+// if(idx==2)continue;// skip sleeper here
+ uint32_t esp=scheduler_schedule(idx);
+ if(esp)return esp;
}
+ kpanic("nothing left to schedule");
- // force the sleeper task...
+ // force the sleeper task here ...
return scheduler_schedule(2);
-
}
@@ -213,35 +190,60 @@ void scheduler_func()
// we need enable here again (since the pushed eflags have it disabled)? TODO: why they disabled it!???
x86_sti();
+
uint32_t cpu=smp_get(SMP_APIC_ID);
+ fixme("this will dadlock on context switch during log if never switched back before finish");
+
if(current_task[cpu]==0)
- while(1)
{
- task_syscall_worker();
+ while(1)
+ {
+ uint64_t t0=x86_rdtscp();
+ asm("hlt"); // sleeper task
+ uint64_t t1=x86_rdtscp();
+ klog("task 0 / slept cycles: l:%d h:%d",(t1-t0));
+ }
+
+// task_syscall_worker();
+ //task_list[cpu][0].syscall=true; // sleep
+ //__asm__("int $0x81"); // wake scheduler! with IPI
+
}
- if(current_task[cpu]==2)
- while(1)
+ if(current_task[cpu]==1)
{
- uint64_t t0=x86_rdtscp();
- asm("hlt"); // sleeper task
- uint64_t t1=x86_rdtscp();
- klog("slept: l:%d h:%d",(t1-t0));
+ if(cpu==0)
+ {
+ uint32_t alloc;
+ uint32_t entry_global=load_elf(BIN_INIT,&alloc);
+ task_set_brk(task_get_current_pid(),alloc);
+ asm_usermode(entry_global);
+ kpanic("init died on cpu %d",cpu);
+ }
+
+ while(1)
+ {
+ uint64_t t0=x86_rdtscp();
+ asm("hlt"); // sleeper task
+ uint64_t t1=x86_rdtscp();
+ klog("task 1 / slept cycles: l:%d h:%d",(t1-t0));
+ }
}
- if(current_task[cpu]==1)
- while(1)
+ if(current_task[cpu]==2)
{
- if(cpu==0)
- {
- uint32_t alloc;
- uint32_t entry_global=load_elf(BIN_INIT,&alloc);
- task_set_brk(task_get_current_pid(),alloc);
- asm_usermode(entry_global);
- while(1);
- }
+ while(1)
+ {
+ uint64_t t0=x86_rdtscp();
+ asm("hlt"); // sleeper task
+ uint64_t t1=x86_rdtscp();
+ klog("task 2 (sleeper) / slept cycles: l:%d h:%d",(t1-t0));
+ }
}
+
+ kpanic("unknwon task");
+
}
volatile int add_task(uint32_t parent_pid,uint32_t vmem, bool thread, char *name)
@@ -324,9 +326,10 @@ void scheduler_wake_all()
*/
void task_syscall_worker()
{
- static uint32_t c=0;
- /// TODO: cross check all cpus!
uint32_t cpu=smp_get(SMP_APIC_ID);
+ task_list[cpu][0].syscall=true; // sleep (syscall misused)
+ return;
+ /// TODO: cross check all cpus!
while(1)
{
@@ -335,14 +338,14 @@ void task_syscall_worker()
//TODO: would be enough only to lock during ringbuffer acces!?
- x86_cli(); // disable temporarily mouse/kb/timer interrupts.
- wake|=keyboard_worker();
- wake_mouse|=mouse_worker();
- x86_sti();
+ //x86_cli(); // disable temporarily mouse/kb/timer interrupts.
+ //wake|=keyboard_worker();
+ //wake_mouse|=mouse_worker();
+ //x86_sti();
- if(wake_mouse)compositor_swap_buffers();
+ //if(wake_mouse)compositor_swap_buffers();
- if(wake)scheduler_wake_all();
+ //if(wake)scheduler_wake_all();
//if(cpu==0)compositor_swap_buffers();