memory #7

Merged
xamidev merged 7 commits from memory into main 2026-01-04 09:28:00 +01:00
2 changed files with 7 additions and 60 deletions
Showing only changes of commit 923758a4ea - Show all commits

View File

@@ -43,63 +43,10 @@ static uint64_t* alloc_page_table()
__attribute__((aligned(4096))) __attribute__((aligned(4096)))
uint64_t *kernel_pml4; uint64_t *kernel_pml4;
// Will map a page ONLY according to the kernel_pml4 root table. // Map a page, taking virt and phys address. This will go through the paging structures
// For kernel initialization/mapping only // beginning at the given root table, translate the virtual address in indexes in
// Deprecated, will be removed // page table/directories, and then mapping the correct page table entry with the
/* void paging_kmap_page(uint64_t virt, uint64_t phys, uint64_t flags) // given physical address + flags
{
virt = PAGE_ALIGN_DOWN(virt);
phys = PAGE_ALIGN_DOWN(phys);
// Translate the virt address into page table indexes
uint64_t pml4_i = PML4_INDEX(virt);
uint64_t pdpt_i = PDPT_INDEX(virt);
uint64_t pd_i = PD_INDEX(virt);
uint64_t pt_i = PT_INDEX(virt);
uint64_t *pdpt, *pd, *pt;
// PML4
// If the entry at index is not present, allocate enough space for it
// then populate the entry with correct addr + flags
if (!(kernel_pml4[pml4_i] & PTE_PRESENT))
{
pdpt = alloc_page_table();
kernel_pml4[pml4_i] = VIRT_TO_PHYS(pdpt) | PTE_PRESENT | PTE_WRITABLE;
}
else {
pdpt = (uint64_t *)PHYS_TO_VIRT(kernel_pml4[pml4_i] & ~0xFFFULL);
}
// PDPT: same here
if (!(pdpt[pdpt_i] & PTE_PRESENT))
{
pd = alloc_page_table();
pdpt[pdpt_i] = VIRT_TO_PHYS(pd) | PTE_PRESENT | PTE_WRITABLE;
}
else {
pd = (uint64_t *)PHYS_TO_VIRT(pdpt[pdpt_i] & ~0xFFFULL);
}
// PD: and here
if (!(pd[pd_i] & PTE_PRESENT))
{
pt = alloc_page_table();
pd[pd_i] = VIRT_TO_PHYS(pt) | PTE_PRESENT | PTE_WRITABLE;
}
else {
pt = (uint64_t *)PHYS_TO_VIRT(pd[pd_i] & ~0xFFFULL);
}
// PT: finally, populate the page table entry
pt[pt_i] = phys | flags | PTE_PRESENT;
// Flush TLB (apply changes)
invlpg((void *)virt);
} */
// Same as above, only this one takes any root table (not only kernel)
// Duplicate code but don't worry about it, I'll refactor one day
void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags) void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags)
{ {
virt = PAGE_ALIGN_DOWN(virt); virt = PAGE_ALIGN_DOWN(virt);

View File

@@ -55,9 +55,9 @@ void* vmm_alloc(size_t length, size_t flags)
// the next page so its coherent with the PMM // the next page so its coherent with the PMM
size_t len = ALIGN_UP(length, PAGE_SIZE); size_t len = ALIGN_UP(length, PAGE_SIZE);
// Some linked list shenanigans will be here // Need to implement this (as linked list)
// but for now we'd need some kheap to kmalloc the linked list items // but for now kernel heap is sufficient
// else we can't do it // The VMM will prob be more useful when we have userspace
} }
void vmm_init() void vmm_init()