From 4ddca59e2c07a98988ffb07571d2b35c4c90f5ac Mon Sep 17 00:00:00 2001 From: Miguel Date: Fri, 28 Sep 2018 01:52:07 +0200 Subject: reactiveate threads. allow user space access to framebuffer --- kernel/kernel.h | 1 + kernel/scheduler.c | 18 ++++++++++++------ kernel/syscalls.c | 11 ++++++++++- kernel/vmem.c | 55 +++++++++++++++++++++++++++++++++--------------------- 4 files changed, 57 insertions(+), 28 deletions(-) (limited to 'kernel') diff --git a/kernel/kernel.h b/kernel/kernel.h index a296b18..5546071 100644 --- a/kernel/kernel.h +++ b/kernel/kernel.h @@ -80,6 +80,7 @@ REFERENCES #define VMEM_USER_NEWLIB 0xF5000000 // 1 page / newlib reentrancy struct. 1 per thread #define VMEM_FRAMEBUFFER 0xF6000000 // 8192 pages (32megs) / identity mapped +#define VMEM_FRAMEBUFFER_PAGES (1024*8) #define VMEM_EXT2_RAMIMAGE 0xF8000000 // 8192 pages (32megs) / identity mapped #endif diff --git a/kernel/scheduler.c b/kernel/scheduler.c index 20b69ed..0c24f94 100644 --- a/kernel/scheduler.c +++ b/kernel/scheduler.c @@ -35,6 +35,7 @@ static uint32_t nextPID() // we hold this stuff per cpu static volatile uint32_t current_task[SMP_MAX_PROC]; +static volatile uint32_t last_task[SMP_MAX_PROC]; // last non ring1 task // we hold this stuff per cpu static volatile struct task_list_struct @@ -69,6 +70,7 @@ volatile void scheduler_init(uint32_t cpu, void *dir) } current_task[cpu]=0; + last_task[cpu]=0; // need to make space on the esp stacks for pushing vals vias task_pusha @@ -117,6 +119,8 @@ static uint32_t scheduler_schedule(uint32_t idx) uint32_t cpu=smp_get(SMP_APIC_ID); if(task_list[cpu][idx].active && !task_list[cpu][idx].syscall) { + if(current_task[cpu]!=0)last_task[cpu]=current_task[cpu]; + current_task[cpu]=idx; install_tss(cpu,task_list[cpu][idx].esp0); x86_set_page_directory(task_list[cpu][idx].vmem); @@ -150,19 +154,21 @@ volatile uint32_t scheduler_run(uint32_t oldesp,uint32_t preference) uint32_t esp; esp=scheduler_schedule(preference); // try preference + if(esp)return esp; for(int i=0;i=VMEM_FRAMEBUFFER&&virtm_entries[j]; uint32_t src_phys=pd_entry_get_frame(&src_pd); @@ -340,6 +344,7 @@ void vmem_free_space_dir(pdirectory *dir,bool stack_only) { mem_free_block(src_phys); } + virt+=4096; } } @@ -360,13 +365,7 @@ pdirectory* vmem_new_space_dir(pdirectory *copy_dir,bool stack_only) if(!pt_entry_is_user(src_pt))dir->m_entries [i]=src_pt; } - // threads share this // - if(stack_only) - { - // TODO - kpanic("not impl!"); - } - else + if(!stack_only) { vmem_add_alloc(dir,VMEM_USER_PROG,VMEM_USER_PROG_PAGES,true); vmem_add_alloc(dir,VMEM_USER_ENV,1,true); @@ -392,6 +391,8 @@ pdirectory* vmem_new_space_dir(pdirectory *copy_dir,bool stack_only) for(int j=0;j<1024;j++) { + if(virt>=VMEM_FRAMEBUFFER&&virtm_entries[j]; uint32_t dst_pd=dst_table->m_entries[j]; @@ -400,24 +401,36 @@ pdirectory* vmem_new_space_dir(pdirectory *copy_dir,bool stack_only) if(src_pd) { - //klog("copy virt: %x / phys: %x -> %x",virt,src_phys,dst_phys); - - vmem_clear_one(mydir,VMEM_COPY_PAGE); - vmem_clear_one(mydir,VMEM_COPY_PAGE+4096); - - vmem_add_remap(mydir,src_phys,VMEM_COPY_PAGE,1,false); - vmem_add_remap(mydir,dst_phys,VMEM_COPY_PAGE+4096,1,false); - - x86_invlpg(VMEM_COPY_PAGE); // refresh TLB - x86_invlpg(VMEM_COPY_PAGE+4096); // refresh TLB - - memcpy(VMEM_COPY_PAGE+4096,VMEM_COPY_PAGE,4096); - } + if(stack_only&&(virt==VMEM_USER_ENV||(virt>=VMEM_USER_PROG&&virt %x",virt,src_phys,dst_phys); + + vmem_clear_one(mydir,VMEM_COPY_PAGE); + vmem_clear_one(mydir,VMEM_COPY_PAGE+4096); + + vmem_add_remap(mydir,src_phys,VMEM_COPY_PAGE,1,false); + vmem_add_remap(mydir,dst_phys,VMEM_COPY_PAGE+4096,1,false); + + x86_invlpg(VMEM_COPY_PAGE); // refresh TLB + x86_invlpg(VMEM_COPY_PAGE+4096); // refresh TLB + + memcpy(VMEM_COPY_PAGE+4096,VMEM_COPY_PAGE,4096); + } + } virt+=4096; } } else virt+=4096*1024; } + + vmem_add_remap(dir,fb_addr,VMEM_FRAMEBUFFER,VMEM_FRAMEBUFFER_PAGES,true);//32megs should be enough for 4k (think about pitch) + return dir; } -- cgit v1.2.3