diff options
| author | Miguel <m.i@gmx.at> | 2018-10-18 06:23:26 +0200 |
|---|---|---|
| committer | Miguel <m.i@gmx.at> | 2018-10-18 06:23:26 +0200 |
| commit | e2a6ca9d03a3c0743384f0955609650f2cdce9bb (patch) | |
| tree | 09da82ba6fbb51c9492d05bd94a3d7b044492061 /kernel | |
| parent | 4d1a149531bc5d672cdf4a5a3e010662f9611d61 (diff) | |
struggling with new syscalls
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/interrupts.c | 105 | ||||
| -rw-r--r-- | kernel/kernel.c | 2 | ||||
| -rw-r--r-- | kernel/kernel.h | 7 | ||||
| -rw-r--r-- | kernel/log.c | 4 | ||||
| -rw-r--r-- | kernel/ringbuffer.c | 37 | ||||
| -rw-r--r-- | kernel/ringbuffer.h | 62 | ||||
| -rw-r--r-- | kernel/scheduler.c | 62 | ||||
| -rw-r--r-- | kernel/syscalls.c | 23 |
8 files changed, 206 insertions, 96 deletions
diff --git a/kernel/interrupts.c b/kernel/interrupts.c index b7cdd74..8bc4f47 100644 --- a/kernel/interrupts.c +++ b/kernel/interrupts.c @@ -13,6 +13,7 @@ #include "apic.h" #include "ringbuffer.h" #include "compositor.h" +#include "syscalls.h" /** The size of our interrupts table */ #define INT_MAX 256 // 0-255 @@ -51,8 +52,37 @@ static void int_install_ir(int irq, uint16_t flags, uint16_t sel, void *addr) } /** - * Register an interrupt handler for given irq number. + * Register an interrupt handler for a given irq number. + * + * Some general notes on interrupts + * (TODO: consider moving this somewehre else) + * + * 1) In case of a syscall (coming from userspace) the handler should: + * a) For a blocking call, set required params and call the scheduler! + * b) Otherweise call x86_sti() by itself as soon as possiblle to + * reenable interrupts. + * + * 2) In case of APIC Timer interrupt call the scheduler. + * + * 3) Keyboard, Mouse, E1000, PIT + * just push your stuff somewhere for later processing as fast as + * you can and reschedule to a kernel worker OR just iret. + * + * 4) IPI + * we use them to force rescheduling only now + * treated in the same way as a APIC Timer interrupt... + * TODO: NOT CALL THIS IPI!! since we send it only on same cpu now :P + * + * NOTE: apic_eoi() should be called in all cases except the syscalls, + * to signal the "end of interrupt" to the APIC. + * + * TODO: REMEMBER THAT WE WILL HAVE TO CHECK ALL OUR SYSCALL ARE REENTRANT! + * ALSO IN MULTICPU SETTINGS! (check the userspace wrappers as well!) + * EACH prog requires a sufficient kernel-stack as well! (check this!) + * WE CAN GUARD with spinlocks / disabling interrupts when really needed.... + * */ + void interrupt_register(uint32_t irq, uint32_t func_addr) { if(irq<128||irq>160)kpanic("irq number out of range!"); @@ -72,36 +102,75 @@ uint32_t interrupt_handler(uint32_t esp, uint32_t irq) { uint32_t (*f)(uint32_t esp)=handlers[irq]; esp=f(esp); - apic_eoi(); + if(irq!=INTERRUPT_SYSCALL)apic_eoi(); return esp; } - if(irq==INTERRUPT_APIC_TIMER) + if(irq==INTERRUPT_APIC_TIMER || irq==INTERRUPT_IPI) { - if(cpu==0) - { - compositor_wake2(); - scheduler_wake_all(); - scheduler_wake_worker(esp); + if(cpu==0) // thi + { + // limit compositor to APIC_TIMER freq.(60hz?) + if(irq==INTERRUPT_APIC_TIMER)compositor_wake2(); } - } - if(irq==INTERRUPT_APIC_TIMER || irq==INTERRUPT_IPI) - { - esp=scheduler_run(esp,-1); + esp=scheduler_run(esp,-1); // just schedule to next task apic_eoi(); return esp; } if(irq==INTERRUPT_SYSCALL) { - apic_eoi(); + int pid=task_get_current_pid(); + uint32_t *stack; - stack=esp; - task_syscall(stack[11],stack[8],stack[10],stack[9]); //eax,ebx,ecx,edx - scheduler_wake_worker(esp); - esp=scheduler_run(esp,-1); - return esp; + stack=esp; + + // extract variables from stack + // (NOTE we pass them in some fucked up order, + // they were pushed via pusha! + uint32_t eax=stack[11]; + uint32_t ebx=stack[8]; + uint32_t ecx=stack[10]; + uint32_t edx=stack[9]; + + // only chance to do somehting before we reenable interrupts! + syscall_generic_prep(eax,edx,ecx,ebx,pid); + + // now we don't give a shit about gettting interrupted, yeah! + // this is guaranteed to cause heavy troubles... since we are + // not reentrant and do not guard anythin.. + + // keep brute force rescheduling till' we get through.... + // TODO: implement some waiting queue and wake only if there + // is any chance this will not fail again and again and again.. + // TODO: get rid of this big KERNEL LOCK + // it will also not work with SMP + while(true) + { + // x86_cli(); + int ok=syscall_generic_test(eax,edx,ecx,ebx,pid); + // x86_sti(); + if(ok)break; + else __asm__("int $0x81"); + } + + // uff, once we got through we can do the syscall and get out + // of this place... + // x86_cli(); + uint32_t ret=syscall_generic(eax,edx,ecx,ebx,pid); + // x86_sti(); + + stack[12]=0x1; // indicate with 0x1 we WANT to set a return + // value in ebx (0x0 will skip it) + stack[13]=ret; // and THIS is our return value! + + #ifdef LOG_SYSCALLS + klog("syscall ret=0x%08X",ret); + #endif + + //__asm__("int $0x81"); + return esp; // return to asm interrupt handler... and iret. } kpanic("unhandled interrupt %d",irq); diff --git a/kernel/kernel.c b/kernel/kernel.c index 31f169b..a630f94 100644 --- a/kernel/kernel.c +++ b/kernel/kernel.c @@ -175,7 +175,7 @@ void kernel_main(uint32_t eax,uint32_t ebx) klog("Unix Time = %u seconds",unixtime); klog("Symmetric Multi Processing (SMP) start ... "); - for(int i=1;i<cfg_acpi.processors;i++)apic_sipi(i,0x7); +// for(int i=1;i<cfg_acpi.processors;i++)apic_sipi(i,0x7); smp_bsp(); // start base processor } diff --git a/kernel/kernel.h b/kernel/kernel.h index 9e8d5e5..5685f7d 100644 --- a/kernel/kernel.h +++ b/kernel/kernel.h @@ -29,14 +29,17 @@ REFERENCES //#define FOOLOS_UNIT_TESTING // Run Unit Tests //#define FOOLOS_LOG_OFF // Turn off logging (disables serial port alltogether) //#define FOOLOS_COLORLESS // Turn off colors in log -//#define LOG_SYSCALLS +//#define LOG_SYSCALLS // this will lock !!! #define HIDE_FIXME +//#define RINGBUFFER_WARN + #define FOOLOS_APIC_FREQ 60 // how many apic ticks per second +//#define FOOLOS_APIC_FREQ 1 // how many apic ticks per second SLOW-MO #define MAX_MOUNTS 10 -#define BIN_INIT "/bin/init" +#define BIN_INIT "/bin/xterm" #define VESA_FONT_PATH "/doc/fonts/binfont.bin" #define FIFO_MAX_RINGBUFFERS 20 diff --git a/kernel/log.c b/kernel/log.c index 179f9a8..0e9adf1 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -51,7 +51,7 @@ void log(char *module_name, int prio, char *format_string, ...) tfp_sprintf(buf_log,"\033[36;40m%s\033[33;40mCPU %02d:\033[31;40m%s:\033[37;40m %s\n",buf_time,cpu,module_name,buf_info); // tfp_sprintf(buf_log,"%sCPU %02d:%s: %s\n",buf_time,cpu,module_name,buf_info); - spinlock_spin(SPINLOCK_LOG); +// spinlock_spin(SPINLOCK_LOG); log_string(buf_log); - spinlock_release(SPINLOCK_LOG); +// spinlock_release(SPINLOCK_LOG); } diff --git a/kernel/ringbuffer.c b/kernel/ringbuffer.c index f11e02b..e3f5ce8 100644 --- a/kernel/ringbuffer.c +++ b/kernel/ringbuffer.c @@ -1,15 +1,14 @@ #include "ringbuffer.h" #include "kmalloc.h" #include "log.h" -#include "asm_x86.h" ringbuffer ringbuffer_init(uint32_t size) { ringbuffer f; f.data=kballoc(size); f.size=size*4096; - f.front=0; - f.back=0; + f.head=0; + f.tail=0; return f; } @@ -20,41 +19,51 @@ void ringbuffer_free(ringbuffer *f) bool ringbuffer_full(ringbuffer* f) { - return((f->back+1)%f->size==f->front); + return((f->tail+1)%f->size==f->head); +} + +bool ringbuffer_not_full(ringbuffer* f) +{ + return((f->tail+1)%f->size!=f->head); } bool ringbuffer_empty(ringbuffer* f) { - return(f->front==f->back); + return(f->head==f->tail); } bool ringbuffer_has(ringbuffer* f) { - return !ringbuffer_empty(f); + return(f->head!=f->tail); } bool ringbuffer_put(ringbuffer* f,uint8_t c) { if(ringbuffer_full(f)) { +#ifdef RINGBUFFER_WARN klog("ringbuffer is full!"); +#endif return false; } - f->data[f->back]=c; - f->back++; - f->back%=f->size; - + f->data[f->tail]=c; + f->tail=(f->tail+1)%f->size; return true; } uint8_t ringbuffer_get(ringbuffer* f) { - if(ringbuffer_empty(f))return 0; // indistinguishable from value 0 :( // TODO + if(ringbuffer_empty(f)) + { +#ifdef RINGBUFFER_WARN + klog("ringbuffer is empty!"); +#endif + return 0; // indistinguishable from byte value 0x00. + } - uint8_t c = f->data[f->front]; - f->front++; - f->front%=f->size; + uint8_t c = f->data[f->head]; + f->head=(f->head+1)%f->size; return c; } diff --git a/kernel/ringbuffer.h b/kernel/ringbuffer.h index f68f766..9610150 100644 --- a/kernel/ringbuffer.h +++ b/kernel/ringbuffer.h @@ -8,16 +8,37 @@ * * Requires * -------- + * * Requires kballoc/kbfree - block allocation * - * Thread - * ------ - * This is __not__ threadsafe. It is your job to lock accesses to - * reads/writes. + * Thread Safety + * ------------- + * + * **NOTE** : If you have more than one producer and one consumer + * you will have to do some locking! + * + * ringbuffer_init() and ringbuffer_free() can be logically called only + * once anyway (for each ringbuffer). + * + * The structure is believed to be thread-safe as long as you have + * ONLY _ONE_ single producer AND _ONE_ single consumer PER ringbuffer. + * + * With one Consumer and one Producer: + * + * The Consumer can use: + * - ringbuffer_has() - will not give false positives. + * - ringbuffer_empty() - will not give false negatives. + * - ringbuffer_get() - acceseses only the head-pointer. + * + * The Producer can use: + * - ringbuffer_not_full() - will not give false positives + * - ringbuffer_full() - will not give false negatives. + * - ringbuffer_put() - accesses only the tail-pointer. * * Todo * ---- - * provide soemthing to read large blocks faster? + * provide soemthing to read large blocks faster. + * */ #ifndef RINGBUFFER_H @@ -27,12 +48,12 @@ #include <stdbool.h> /** Ringbuffer sturcutre */ -typedef struct ringbuffer_struct +typedef struct ringbuffer_struct { - uint32_t size; - uint32_t front; - uint32_t back; - uint8_t *data; + uint32_t size; // size in bytes + uint32_t head; // current head idx + uint32_t tail; // current tail idx + uint8_t *data; // the data itself }ringbuffer; /** Create a new fifo/ringbuffer of given size (in blocks) */ @@ -41,19 +62,22 @@ ringbuffer ringbuffer_init(uint32_t blocks); /** Deallocate buffer */ void ringbuffer_free(ringbuffer *f); -/** Put one _char_ into buffer. Returns true on success (i.e. buffer not full) */ -bool ringbuffer_put(ringbuffer*,uint8_t); +/** Put one _byte_ into the buffer. */ +bool ringbuffer_put(ringbuffer*,uint8_t); -/** Get a single _char_ from the buffer, - * Return value __0__ might indicate that the buffer is empty. - * check with _ringbuffer_has()_ to be sure. - */ +/** Get a single _byte_ from the buffer. */ uint8_t ringbuffer_get(ringbuffer*); -/** Check if buffer is not empty */ -bool ringbuffer_has(ringbuffer*); +/** Check if the buffer is not empty */ +bool ringbuffer_has(ringbuffer*); -/** Check if buffer is full */ +/** Check if the buffer is empty */ +bool ringbuffer_empty(ringbuffer*); + +/** Check if the buffer is full */ bool ringbuffer_full(ringbuffer* f); +/** Check if the buffer is not full */ +bool ringbuffer_not_full(ringbuffer* f); + #endif diff --git a/kernel/scheduler.c b/kernel/scheduler.c index 5a65b7e..8cadcb8 100644 --- a/kernel/scheduler.c +++ b/kernel/scheduler.c @@ -133,7 +133,7 @@ static uint32_t scheduler_schedule(uint32_t idx) current_task[cpu]=idx; - //klog("idx %d",idx); +// klog("cpu %d rescheduled to %d",cpu,idx); //klog("name: %s",task_list[cpu][idx].name); //klog("cpu %d / idx %d / pid %d / name: %5s",cpu,idx,task_list[cpu][idx].pid,task_list[cpu][idx].name); @@ -171,10 +171,16 @@ volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t preference) } else task_list[cpu][current_task[cpu]].esp=oldesp; + if(preference>0) // try preference first if any.. + { + uint32_t esp=scheduler_schedule(preference); + if(esp)return esp; + } + for(int i=0;i<MAX_TASKS;i++) { int idx=(current_task[cpu]+1+i)%MAX_TASKS; // schedule round robin style - if(idx==2)continue;// skip sleeper here + if(idx==2||idx==preference)continue;// skip sleeper and preference uint32_t esp=scheduler_schedule(idx); if(esp)return esp; } @@ -193,33 +199,7 @@ void scheduler_func() fixme("this will dadlock on context switch during log if never switched back before finish"); - if(current_task[cpu]==0) - { - /* - while(1) - { - uint64_t t0=x86_rdtscp(); - asm("hlt"); // sleeper task - uint64_t t1=x86_rdtscp(); - klog("task 0 / slept cycles: l:%d h:%d",(t1-t0)); - } - */ - - while(1) - { - if(cpu==0) - { - task_syscall_worker(); - } - else - { - asm("hlt"); // sleeper task - } - } - //task_list[cpu][0].syscall=true; // sleep - //__asm__("int $0x81"); // wake scheduler! with IPI - - } + if(current_task[cpu]==0)task_syscall_worker(); if(current_task[cpu]==1) { @@ -249,7 +229,7 @@ void scheduler_func() uint64_t t0=x86_rdtscp(); asm("hlt"); // sleeper task uint64_t t1=x86_rdtscp(); - // klog("task 2 (sleeper) / slept cycles: l:%d h:%d",(t1-t0)); + klog("sleeper on cpu %d / slept cycles: l:%d h:%d",cpu,(t1-t0)); } } @@ -330,6 +310,21 @@ void scheduler_wake_all() void task_syscall_worker() { uint32_t cpu=smp_get(SMP_APIC_ID); + + while(1) + { + if(cpu==0) // this stuff is processed by cpu 0 + { + keyboard_worker(); + mouse_worker(); + compositor_swap_buffers(); + } + + __asm__("int $0x81"); // we are ready! force reschedule + } + + /* + uint32_t cpu=smp_get(SMP_APIC_ID); //task_list[cpu][0].syscall=true; // sleep (syscall misused) //return; /// TODO: cross check all cpus! @@ -344,10 +339,13 @@ void task_syscall_worker() //x86_cli(); // disable temporarily mouse/kb/timer interrupts. wake|=keyboard_worker(); wake_mouse|=mouse_worker(); + compositor_swap_buffers(); + __asm__("int $0x81"); // wake scheduler! with IPI + + //x86_sti(); if(wake_mouse)compositor_wake(); - compositor_swap_buffers(); if(wake)scheduler_wake_all(); //if(cpu==0)compositor_swap_buffers(); @@ -419,6 +417,7 @@ void task_syscall_worker() __asm__("int $0x81"); // wake scheduler! with IPI } } +*/ } // !!! REMEMBER THIS IS INSIDE AN INTERRUPT !!! @@ -518,6 +517,7 @@ volatile int task_reset(uint32_t pid, uint32_t entry, uint32_t stack,uint32_t br stk[14]=entry; stk[17]=stack; + __asm__("int $0x81"); // now we also reschedule (at least for execve sake...) return 1; } diff --git a/kernel/syscalls.c b/kernel/syscalls.c index aa27ed8..23c88a7 100644 --- a/kernel/syscalls.c +++ b/kernel/syscalls.c @@ -391,6 +391,8 @@ int copy_args(char **in, char **out) // int execve(const char *filename, char *const argv[], char *const envp[]); int syscall_execve(const char *name, char *const argv[], char *const env[], int pid) { + uint32_t alloc; + uint32_t entry_global=load_elf(name,&alloc); fixme("not overwrite yourself?"); int arg_count=0; @@ -404,9 +406,6 @@ int syscall_execve(const char *name, char *const argv[], char *const env[], int if(env!=NULL)copy_args(env,env1); else env1=NULL; - uint32_t alloc; - uint32_t entry_global=load_elf(name,&alloc); - if(!entry_global){ set_errno(ENOENT); return -1; @@ -416,8 +415,8 @@ int syscall_execve(const char *name, char *const argv[], char *const env[], int *--stack=argv1; *--stack=arg_count; *--stack=env1; - task_reset(pid,entry_global,stack,alloc); task_set_name(pid,name); + task_reset(pid,entry_global,stack,alloc); return 0; } @@ -618,14 +617,17 @@ uint32_t syscall_gui_win(uint32_t p1, uint32_t p2, uint32_t p3, uint32_t pid) uint32_t fdn=nextfd(pid); fds[pid][fdn]=fd_from_ringbuffer(); tty[pid]=fdn; - invl[pid]=ringbuffer_init(4); + invl[pid]=ringbuffer_init(1); task_add_win(pid,&invl[pid]); return 1; } -/** Generics */ +/** Generics . prep before we reenable interrupts*/ uint32_t syscall_generic_prep(uint32_t nr,uint32_t p1, uint32_t p2, uint32_t p3, uint32_t pid) { +#ifdef LOG_SYSCALLS + klog("prep syscall [%s] for pid:%d",syscall_get_name(nr),pid); +#endif struct timeval *tv=p2; switch(nr){ @@ -647,9 +649,12 @@ uint32_t syscall_generic_prep(uint32_t nr,uint32_t p1, uint32_t p2, uint32_t p3, return 1; } -/** Generics */ -uint32_t syscall_generic_test(uint32_t nr,uint32_t p1, uint32_t p2, uint32_t p3, uint32_t pid) +/** Generics. test if we can continue.. reschedule otherwise */ +volatile uint32_t syscall_generic_test(uint32_t nr,uint32_t p1, uint32_t p2, uint32_t p3, uint32_t pid) { +#ifdef LOG_SYSCALLS + klog("testing syscall [%s] for pid:%d",syscall_get_name(nr),pid); +#endif switch(nr){ case SYSCALL_WAIT : return !task_runs(p1); @@ -664,7 +669,7 @@ uint32_t syscall_generic_test(uint32_t nr,uint32_t p1, uint32_t p2, uint32_t p3, return 1;//other syscalls never block for now. } -/** Generics */ +/** Generics. finally do the work! */ uint32_t syscall_generic(uint32_t nr,uint32_t p1, uint32_t p2, uint32_t p3, uint32_t pid) { |
