summaryrefslogtreecommitdiff
path: root/kernel/scheduler.c
blob: 11b6db454ae948ce8846cbb06e240a5f926c4902 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
#include "scheduler.h"
#include "kernel.h"
#include "gdt.h"
#include "log.h"

#include "smp.h"
#include "mem.h"
#include "fs/elf.h"
#include "asm_x86.h"
#include "asm_task.h"
#include "asm_usermode.h"
#include "kmalloc.h"

#include "vmem.h"
#include "syscalls.h"
#include "fs/fs.h"
#include "fs/ext2.h"

#define NO_TASK 0xffffffff

// we hold this stuff per cpu
static volatile uint32_t current_task[SMP_MAX_PROC];

// we hold this stuff per cpu
static volatile struct task_list_struct
{
    volatile bool active;      // is this slot used (Y/N)
    volatile uint32_t pid;     // process id (TODO)

    volatile uint32_t parent;  // parent process id
    volatile uint32_t esp;     // stack pointer of the task
    volatile uint32_t esp0;    // tss.esp0
    volatile struct pdirectory *vmem; // number of virtual memory table

    volatile uint32_t brk;     // memory brk pos

    volatile bool    wait; // waiting for syscall to be processed.
    volatile uint32_t eax;
    volatile uint32_t ebx;
    volatile uint32_t ecx;
    volatile uint32_t edx;
    
}task_list[SMP_MAX_PROC][MAX_TASKS];

// init tasks //
volatile void scheduler_init(uint32_t cpu, void *dir)
{
    for(int i=0;i<MAX_TASKS;i++)
    {
	task_list[cpu][i].active=false;
    }

    current_task[cpu]=0;
   
    // need to make space on the esp stacks for pushing vals vias task_pusha
    
    // this is our main kernel task at slot 0 (per cpu)
    task_list[cpu][0].parent=0;
    task_list[cpu][0].active=true;
    task_list[cpu][0].wait=false;
    task_list[cpu][0].vmem=dir;
    task_list[cpu][0].esp = VMEM_CPU_STACK_TOP-0x200;
    task_list[cpu][0].esp0 = 0; // esp0 not needed by kernel space tasks

    task_pusha(task_list[cpu][0].esp);

    // this is our main kernel task at slot 0 (per cpu)
    task_list[cpu][1].parent=0;
    task_list[cpu][1].active=true;
    task_list[cpu][1].wait=false;
    task_list[cpu][1].vmem=dir;
    task_list[cpu][1].esp = kballoc(4)+4*4096-0x200; // 4 pages stack
    task_list[cpu][1].esp0 =kballoc(4)+4*4096;  // esp0 not needed by kernel space tasks

    task_pusha(task_list[cpu][0].esp);
    task_pusha(task_list[cpu][1].esp);
}

//
// REMEMBER WE ARE INSIDE AN INTERRUPT HERE - DON'T WASTE TIME!
//
// oldesp - is the adress of the stack pointer when pit_interrupt_handler was entered.
// registers have been pushed with pusha to this old stack.
//
// stack pointer was moved to the 16kb stack we have from multiboot.s
//
// we need to return a NEW stack pointer where popa will get the registers the new task requires
//
volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t force_pid)
{
    uint32_t cpu=smp_get(SMP_APIC_ID);
    uint32_t init=smp_get(SMP_SCHEDULER_INIT);
    if(init){
	scheduler_init(cpu,x86_get_page_directory());
        smp_set(SMP_SCHEDULER_INIT,0);
    }
    else task_list[cpu][current_task[cpu]].esp=oldesp;

    for(int i=0;i<MAX_TASKS;i++)
    {
	int idx=(current_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style

	if(task_list[cpu][idx].active && !task_list[cpu][idx].wait) // find active non-blocked task
	{
	    current_task[cpu]=idx;
	    
	    install_tss(cpu,task_list[cpu][idx].esp0);
	    x86_set_page_directory(task_list[cpu][idx].vmem);
	    return task_list[cpu][idx].esp;
	}
    }

    kpanic("nothing to schedule!");
}


void scheduler_func()
{
    // we need enable here again (since the pushed eflags have it disabled)? TODO: why they disabled it!???
    x86_sti(); 

    uint32_t cpu=smp_get(SMP_APIC_ID);

    if(task_get_current_pid()==0)
    while(1)
    {
        task_syscall_worker();
    }

    if(task_get_current_pid()==1)

    while(1)
    {
	if(cpu==0)
	{
	    uint32_t alloc;
	    uint32_t entry_global=load_elf(BIN_INIT,&alloc);
	    task_set_brk(alloc);
	    klog("breakpoint: 0x%08x",alloc);
	    asm_usermode(entry_global); 
        while(1);
	}
//	else syscall_write(1, "x",1); // stdout
	    
    }
}

volatile int task_reset(uint32_t pid, uint32_t entry, uint32_t stack)
{
    uint32_t cpu=smp_get(SMP_APIC_ID);
    uint32_t *stk=task_list[cpu][pid].esp;
    stk[14]=entry;
    stk[17]=stack;
    return 1;
}

volatile int add_task(uint32_t parent,uint32_t vmem)
{
    uint32_t cpu=smp_get(SMP_APIC_ID);
    for(int i=0;i<MAX_TASKS;i++)
    {
	if(task_list[cpu][i].active!=true)
	{
	    task_list[cpu][i].parent=parent;
	    task_list[cpu][i].vmem=vmem; 
	    task_list[cpu][i].esp = kballoc(4)+2*4096; // center

            task_list[cpu][i].esp0 = kballoc(4)+4*4096;
	    task_list[cpu][i].wait=false;
	    task_list[cpu][i].brk=task_list[cpu][current_task[cpu]].brk;

            uint32_t *source=(uint32_t *)task_list[cpu][parent].esp;
	    uint32_t *dst=(uint32_t *)task_list[cpu][i].esp;

	    for(int x=0;x<100;x++) //TODO:  better copy this page too instead of stack
	    {
		*dst=*source;
		dst++;
		source++;
	    }

	    uint32_t *stack=task_list[cpu][i].esp;
	    stack[12]=0x1; 
	    stack[13]=0; // this task returns pid=0 to the caller

	    task_list[cpu][i].active=true; //TODO: LOCK! (also other similar)
	    return i;
	}
    }

    kpanic("out of task slots!");
}


/**
 * kernel space worker thread
 *
 * we can get interrupted by an interrupt ANYTIME!
 *
 */

void task_syscall_worker()
{
    uint32_t cpu=smp_get(SMP_APIC_ID);
    while(1)
    {
	//klog("checking if any pending syscalls.");

	bool nowork=true;
	for(int i=0;i<MAX_TASKS;i++)
	{
	    if(task_list[cpu][i].wait)
	    {
		uint32_t syscall=task_list[cpu][i].eax;
                klog("task %d waiting on syscall %d/%s. processing...",i,syscall,syscall_get_name(syscall));
		task_list[cpu][2].vmem=task_list[cpu][i].vmem; // switch syscall worker to pagedir of calling userprog
		x86_set_page_directory(task_list[cpu][2].vmem);

		if(task_list[cpu][i].eax==SYSCALL_WAIT)
		{
		    continue;
		}

		if(task_list[cpu][i].eax==SYSCALL_READ)
		{
		uint32_t ok= chk_syscall_read(
			    task_list[cpu][i].edx,
			    task_list[cpu][i].ecx,
			    task_list[cpu][i].ebx
			    );
		if(!ok)continue;
		}

		nowork=false;

		uint32_t ret= syscall_generic(task_list[cpu][i].eax,
			    task_list[cpu][i].edx,
			    task_list[cpu][i].ecx,
			    task_list[cpu][i].ebx,
			    i);

		uint32_t *stack=task_list[cpu][i].esp;
		stack[12]=0x1; 
		stack[13]=ret; 
	    
		task_list[cpu][i].wait=false;
	    }
	}

	//task_list[2].wait=true;
	if (nowork)__asm__("hlt");
	else __asm__("int $0x81"); // wake scheduler!
    }
}


volatile uint32_t task_syscall(uint32_t eax,uint32_t ebx, uint32_t ecx, uint32_t edx)
{
    uint32_t cpu=smp_get(SMP_APIC_ID);
    task_list[cpu][current_task[cpu]].wait=true;
    task_list[cpu][current_task[cpu]].eax=eax;
    task_list[cpu][current_task[cpu]].ebx=ebx;
    task_list[cpu][current_task[cpu]].ecx=ecx;
    task_list[cpu][current_task[cpu]].edx=edx;

    task_list[cpu][2].wait=false;
    return 1;
}

//TODO: free vmem too!
//TODO: notify waiting parent when child finished;
volatile uint32_t task_exit(uint32_t pid)
{ 
    uint32_t cpu=smp_get(SMP_APIC_ID);
    task_list[cpu][pid].active=false;
    int parent_pid=task_list[cpu][pid].parent;
    if(task_list[cpu][parent_pid].wait&&task_list[cpu][parent_pid].eax==SYSCALL_WAIT)
	    task_list[cpu][parent_pid].wait=false;
    klog("[%d] exit", pid);
//    vmem_free_dir(task_list[cpu][pid].vmem);
    return 1;
}

volatile uint32_t task_wait(uint32_t pid)
{ 
    uint32_t cpu=smp_get(SMP_APIC_ID);
    klog("[%d] wait", pid);
    task_list[cpu][pid].wait=true;
    task_list[cpu][pid].eax=SYSCALL_WAIT;
    return 1;
}

volatile uint32_t task_fork(uint32_t pid)
{ 
    uint32_t cpu=smp_get(SMP_APIC_ID);
//TODO: what will happen if we get rescheduled!?!?!
   int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][pid].vmem,false));
   klog("[%d] forked -> [%d] (free blocks remaining: %d )", pid, ret,0);
   return ret;
}
volatile uint32_t task_clone(uint32_t pid)
{ 
    uint32_t cpu=smp_get(SMP_APIC_ID);
//TODO: what will happen if we get rescheduled!?!?!
   int ret=add_task(pid,vmem_new_space_dir(task_list[cpu][pid].vmem,true));
   klog("[%d] cloned -> [%d] (free blocks remaining: %d )", pid, ret,0);
   return ret;
}

volatile int task_get_current_pid()
{
    uint32_t cpu=smp_get(SMP_APIC_ID);
    return current_task[cpu];
}

volatile uint32_t task_get_brk()
{
    uint32_t cpu=smp_get(SMP_APIC_ID);
    return task_list[cpu][current_task[cpu]].brk;
}

volatile void task_set_brk(uint32_t brk)
{
    uint32_t cpu=smp_get(SMP_APIC_ID);
    task_list[cpu][current_task[cpu]].brk=brk;
}