summaryrefslogtreecommitdiff
path: root/kernel/scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/scheduler.c')
-rw-r--r--kernel/scheduler.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/kernel/scheduler.c b/kernel/scheduler.c
index 5a65b7e..8cadcb8 100644
--- a/kernel/scheduler.c
+++ b/kernel/scheduler.c
@@ -133,7 +133,7 @@ static uint32_t scheduler_schedule(uint32_t idx)
current_task[cpu]=idx;
- //klog("idx %d",idx);
+// klog("cpu %d rescheduled to %d",cpu,idx);
//klog("name: %s",task_list[cpu][idx].name);
//klog("cpu %d / idx %d / pid %d / name: %5s",cpu,idx,task_list[cpu][idx].pid,task_list[cpu][idx].name);
@@ -171,10 +171,16 @@ volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t preference)
}
else task_list[cpu][current_task[cpu]].esp=oldesp;
+ if(preference>0) // try preference first if any..
+ {
+ uint32_t esp=scheduler_schedule(preference);
+ if(esp)return esp;
+ }
+
for(int i=0;i<MAX_TASKS;i++)
{
int idx=(current_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style
- if(idx==2)continue;// skip sleeper here
+ if(idx==2||idx==preference)continue;// skip sleeper and preference
uint32_t esp=scheduler_schedule(idx);
if(esp)return esp;
}
@@ -193,33 +199,7 @@ void scheduler_func()
fixme("this will dadlock on context switch during log if never switched back before finish");
- if(current_task[cpu]==0)
- {
- /*
- while(1)
- {
- uint64_t t0=x86_rdtscp();
- asm("hlt"); // sleeper task
- uint64_t t1=x86_rdtscp();
- klog("task 0 / slept cycles: l:%d h:%d",(t1-t0));
- }
- */
-
- while(1)
- {
- if(cpu==0)
- {
- task_syscall_worker();
- }
- else
- {
- asm("hlt"); // sleeper task
- }
- }
- //task_list[cpu][0].syscall=true; // sleep
- //__asm__("int $0x81"); // wake scheduler! with IPI
-
- }
+ if(current_task[cpu]==0)task_syscall_worker();
if(current_task[cpu]==1)
{
@@ -249,7 +229,7 @@ void scheduler_func()
uint64_t t0=x86_rdtscp();
asm("hlt"); // sleeper task
uint64_t t1=x86_rdtscp();
- // klog("task 2 (sleeper) / slept cycles: l:%d h:%d",(t1-t0));
+ klog("sleeper on cpu %d / slept cycles: l:%d h:%d",cpu,(t1-t0));
}
}
@@ -330,6 +310,21 @@ void scheduler_wake_all()
void task_syscall_worker()
{
uint32_t cpu=smp_get(SMP_APIC_ID);
+
+ while(1)
+ {
+ if(cpu==0) // this stuff is processed by cpu 0
+ {
+ keyboard_worker();
+ mouse_worker();
+ compositor_swap_buffers();
+ }
+
+ __asm__("int $0x81"); // we are ready! force reschedule
+ }
+
+ /*
+ uint32_t cpu=smp_get(SMP_APIC_ID);
//task_list[cpu][0].syscall=true; // sleep (syscall misused)
//return;
/// TODO: cross check all cpus!
@@ -344,10 +339,13 @@ void task_syscall_worker()
//x86_cli(); // disable temporarily mouse/kb/timer interrupts.
wake|=keyboard_worker();
wake_mouse|=mouse_worker();
+ compositor_swap_buffers();
+ __asm__("int $0x81"); // wake scheduler! with IPI
+
+
//x86_sti();
if(wake_mouse)compositor_wake();
- compositor_swap_buffers();
if(wake)scheduler_wake_all();
//if(cpu==0)compositor_swap_buffers();
@@ -419,6 +417,7 @@ void task_syscall_worker()
__asm__("int $0x81"); // wake scheduler! with IPI
}
}
+*/
}
// !!! REMEMBER THIS IS INSIDE AN INTERRUPT !!!
@@ -518,6 +517,7 @@ volatile int task_reset(uint32_t pid, uint32_t entry, uint32_t stack,uint32_t br
stk[14]=entry;
stk[17]=stack;
+ __asm__("int $0x81"); // now we also reschedule (at least for execve sake...)
return 1;
}