summaryrefslogtreecommitdiff
path: root/kernel/vmem.c
diff options
context:
space:
mode:
authorMiguel <m.i@gmx.at>2018-09-12 15:23:38 +0200
committerMiguel <m.i@gmx.at>2018-09-12 15:23:38 +0200
commitc72944b5646863ffaaaff94dc03b939a08566203 (patch)
tree3ce3d74fc27b297cd6958d0d8ba588ccbbe16bfb /kernel/vmem.c
parent800b1a878ec34a8ff30ed093e20561182fa6ae36 (diff)
struggling with vmem
Diffstat (limited to 'kernel/vmem.c')
-rw-r--r--kernel/vmem.c56
1 files changed, 52 insertions, 4 deletions
diff --git a/kernel/vmem.c b/kernel/vmem.c
index 3752725..10c2401 100644
--- a/kernel/vmem.c
+++ b/kernel/vmem.c
@@ -1,6 +1,7 @@
#include <stdlib.h>
#include "kernel.h"
+#include "log.h"
#include "vmem.h"
#include "asm_x86.h"
@@ -200,10 +201,20 @@ static pdirectory* vmem_clean_dir()
return dir;
}
+static void vmem_clear_one(pdirectory* dir,uint32_t virt)
+{
+ pd_entry* entry = &dir->m_entries [PAGE_DIRECTORY_INDEX (virt) ];
+ ptable* table;
+ if(*entry!=0){
+ table= pd_entry_get_frame (entry);
+ table->m_entries [PAGE_TABLE_INDEX (virt) ] = 0;
+ }
+}
+
// addresses need to be page aligned. (or will be forced down)
static void vmem_add_generic(pdirectory* dir,uint32_t phys,uint32_t virt,uint32_t pages, bool alloc, bool user)
{
- fixme("make sure the pages are marked as used in the physical mem manager, really?");
+ //fixme("make sure the pages are marked as used in the physical mem manager, really?");
//force align
phys/=4096;
@@ -257,6 +268,7 @@ static void vmem_add_generic(pdirectory* dir,uint32_t phys,uint32_t virt,uint32_
pages--;
}
}
+
static void vmem_add_alloc(pdirectory* dir,uint32_t addr,uint32_t pages,bool user)
{
vmem_add_generic(dir,addr,addr,pages, true,user);
@@ -266,6 +278,7 @@ static void vmem_add_remap(pdirectory* dir,uint32_t phys,uint32_t virt,uint32_t
{
vmem_add_generic(dir,phys,virt,pages, false,user);
}
+
static void vmem_add_identity(pdirectory* dir,uint32_t addr,uint32_t pages, bool user)
{
vmem_add_generic(dir,addr,addr,pages, false,user);
@@ -290,10 +303,10 @@ pdirectory* vmem_kernel_dir()
vmem_add_remap(dir,mod_start,VMEM_EXT2_RAMIMAGE,1024*8,false);//32megs for ramimage: TODO: check if enough?
vmem_add_alloc(dir,VMEM_CPU_PRIVATE,4,false);
- vmem_add_alloc(dir,VMEM_CPU_STACK_TOP-4096*4,4,false);
+ vmem_add_alloc(dir,VMEM_CPU_STACK_BOT,4,false);
vmem_add_alloc(dir,VMEM_USER_PROG,1024*2,true);
- vmem_add_alloc(dir,VMEM_USER_STACK_TOP-4096*4,4,true);
+ vmem_add_alloc(dir,VMEM_USER_STACK_TOP-4096*10,10,true);
return dir;
}
@@ -301,7 +314,7 @@ pdirectory* vmem_kernel_dir()
pdirectory* vmem_new_space_dir(pdirectory *copy_dir,bool stack_only)
{
pdirectory* dir = vmem_clean_dir();
- //copy non-user pages.
+ //link non-user pages.
for(int i=0;i<1024;i++)
{
uint32_t src_pt=copy_dir->m_entries [i];
@@ -311,6 +324,41 @@ pdirectory* vmem_new_space_dir(pdirectory *copy_dir,bool stack_only)
vmem_add_alloc(dir,VMEM_USER_PROG,1024*2,true);
vmem_add_alloc(dir,VMEM_USER_STACK_TOP-4096*4,4,true);
+ x86_cli(); // plese dear timer, do not schedule us away
+ pdirectory* orig=x86_get_page_directory();
+ x86_set_page_directory(dir);
+
+ //copy user pages.
+ //TODO: stack only!
+ uint32_t virt=0;
+ for(int i=0;i<1024;i++)
+ {
+ uint32_t src_pt=copy_dir->m_entries [i];
+ if(pt_entry_is_user(src_pt))
+ {
+ ptable *table=pt_entry_get_frame(&src_pt);
+ for(int j=0;j<1024;j++)
+ {
+ uint32_t src_pd=table->m_entries[j];
+ uint32_t phys=pd_entry_get_frame(&src_pd);
+ if(src_pd)
+ {
+ // klog("copy %x to %x",phys,virt);
+ vmem_clear_one(dir,VMEM_COPY_PAGE);
+ vmem_add_remap(dir,phys,VMEM_COPY_PAGE,1,false);
+ x86_invlpg(VMEM_COPY_PAGE); // refresh TLB
+ memcpy(virt,VMEM_COPY_PAGE,4096);
+ }
+ virt+=4096;
+
+ }
+
+ }
+ else virt+=4096*1024;
+ }
+ x86_set_page_directory(orig);
+ x86_sti();
+
return dir;
}