summaryrefslogtreecommitdiff
path: root/kernel/scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/scheduler.c')
-rw-r--r--kernel/scheduler.c46
1 files changed, 18 insertions, 28 deletions
diff --git a/kernel/scheduler.c b/kernel/scheduler.c
index 6c7df28..985b3bf 100644
--- a/kernel/scheduler.c
+++ b/kernel/scheduler.c
@@ -102,10 +102,9 @@ volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t force_pid)
if(task_list[cpu][idx].active && !task_list[cpu][idx].wait) // find active non-blocked task
{
- //TODO: do NOT do this!
- //klog("schedule %d->%d in cpu %d",current_task[cpu],idx,cpu );
+ //TODO: do NOT do this! deadlock imminent!
+ //if(cpu==0)klog("schedule %d->%d on cpu %d",current_task[cpu],idx,cpu );
current_task[cpu]=idx;
-
install_tss(cpu,task_list[cpu][idx].esp0);
x86_set_page_directory(task_list[cpu][idx].vmem);
return task_list[cpu][idx].esp;
@@ -138,12 +137,9 @@ void scheduler_func()
uint32_t alloc;
uint32_t entry_global=load_elf(BIN_INIT,&alloc);
task_set_brk(alloc);
- klog("breakpoint: 0x%08x",alloc);
asm_usermode(entry_global);
- while(1);
+ while(1);
}
-// else syscall_write(1, "x",1); // stdout
-
}
}
@@ -174,7 +170,7 @@ volatile int add_task(uint32_t parent,uint32_t vmem)
uint32_t *source=(uint32_t *)task_list[cpu][parent].esp;
uint32_t *dst=(uint32_t *)task_list[cpu][i].esp;
- for(int x=0;x<100;x++) //TODO: better copy this page too instead of stack
+ for(int x=0;x<100;x++) //TODO: maybe better copy this page too instead of stack
{
*dst=*source;
dst++;
@@ -189,7 +185,6 @@ volatile int add_task(uint32_t parent,uint32_t vmem)
return i;
}
}
-
kpanic("out of task slots!");
}
@@ -206,8 +201,6 @@ void task_syscall_worker()
uint32_t cpu=smp_get(SMP_APIC_ID);
while(1)
{
- //klog("checking if any pending syscalls.");
-
bool nowork=true;
for(int i=0;i<MAX_TASKS;i++)
{
@@ -215,8 +208,8 @@ void task_syscall_worker()
{
uint32_t syscall=task_list[cpu][i].eax;
klog("task %d waiting on syscall %d/%s. processing...",i,syscall,syscall_get_name(syscall));
- task_list[cpu][2].vmem=task_list[cpu][i].vmem; // switch syscall worker to pagedir of calling userprog
- x86_set_page_directory(task_list[cpu][2].vmem);
+ task_list[cpu][0].vmem=task_list[cpu][i].vmem; // switch syscall worker to pagedir of calling userprog
+ x86_set_page_directory(task_list[cpu][0].vmem);
if(task_list[cpu][i].eax==SYSCALL_WAIT)
{
@@ -225,12 +218,12 @@ void task_syscall_worker()
if(task_list[cpu][i].eax==SYSCALL_READ)
{
- uint32_t ok= chk_syscall_read(
- task_list[cpu][i].edx,
- task_list[cpu][i].ecx,
- task_list[cpu][i].ebx
- );
- if(!ok)continue;
+ uint32_t ok= chk_syscall_read(
+ task_list[cpu][i].edx,
+ task_list[cpu][i].ecx,
+ task_list[cpu][i].ebx
+ );
+ if(!ok)continue;
}
nowork=false;
@@ -249,9 +242,9 @@ void task_syscall_worker()
}
}
- //task_list[2].wait=true;
- if (nowork)__asm__("hlt");
- else __asm__("int $0x81"); // wake scheduler!
+ //task_list[cpu][0].wait=true;
+ //if (nowork)__asm__("hlt");
+ __asm__("int $0x81"); // wake scheduler!
}
}
@@ -264,8 +257,7 @@ volatile uint32_t task_syscall(uint32_t eax,uint32_t ebx, uint32_t ecx, uint32_t
task_list[cpu][current_task[cpu]].ebx=ebx;
task_list[cpu][current_task[cpu]].ecx=ecx;
task_list[cpu][current_task[cpu]].edx=edx;
-
- task_list[cpu][2].wait=false;
+ task_list[cpu][0].wait=false;
return 1;
}
@@ -294,16 +286,14 @@ volatile uint32_t task_wait(uint32_t pid)
volatile uint32_t task_fork(uint32_t pid)
{
- uint32_t cpu=smp_get(SMP_APIC_ID);
-//TODO: what will happen if we get rescheduled!?!?!
+ uint32_t cpu=smp_get(SMP_APIC_ID);
int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][pid].vmem,false));
klog("[%d] forked -> [%d] (free blocks remaining: %d )", pid, ret,0);
return ret;
}
volatile uint32_t task_clone(uint32_t pid)
{
- uint32_t cpu=smp_get(SMP_APIC_ID);
-//TODO: what will happen if we get rescheduled!?!?!
+ uint32_t cpu=smp_get(SMP_APIC_ID);
int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][pid].vmem,true));
klog("[%d] cloned -> [%d] (free blocks remaining: %d )", pid, ret,0);
return ret;