From 923758a4ea160e09f9d3e43dd80d44ba919a9723 Mon Sep 17 00:00:00 2001 From: xamidev Date: Sun, 4 Jan 2026 09:24:25 +0100 Subject: [PATCH] Remove useless code/comments --- src/mem/paging/paging.c | 61 +++-------------------------------------- src/mem/paging/vmm.c | 6 ++-- 2 files changed, 7 insertions(+), 60 deletions(-) diff --git a/src/mem/paging/paging.c b/src/mem/paging/paging.c index 4e50d2e..ce07aa1 100644 --- a/src/mem/paging/paging.c +++ b/src/mem/paging/paging.c @@ -43,63 +43,10 @@ static uint64_t* alloc_page_table() __attribute__((aligned(4096))) uint64_t *kernel_pml4; -// Will map a page ONLY according to the kernel_pml4 root table. -// For kernel initialization/mapping only -// Deprecated, will be removed -/* void paging_kmap_page(uint64_t virt, uint64_t phys, uint64_t flags) -{ - virt = PAGE_ALIGN_DOWN(virt); - phys = PAGE_ALIGN_DOWN(phys); - - // Translate the virt address into page table indexes - uint64_t pml4_i = PML4_INDEX(virt); - uint64_t pdpt_i = PDPT_INDEX(virt); - uint64_t pd_i = PD_INDEX(virt); - uint64_t pt_i = PT_INDEX(virt); - - uint64_t *pdpt, *pd, *pt; - - // PML4 - // If the entry at index is not present, allocate enough space for it - // then populate the entry with correct addr + flags - if (!(kernel_pml4[pml4_i] & PTE_PRESENT)) - { - pdpt = alloc_page_table(); - kernel_pml4[pml4_i] = VIRT_TO_PHYS(pdpt) | PTE_PRESENT | PTE_WRITABLE; - } - else { - pdpt = (uint64_t *)PHYS_TO_VIRT(kernel_pml4[pml4_i] & ~0xFFFULL); - } - - // PDPT: same here - if (!(pdpt[pdpt_i] & PTE_PRESENT)) - { - pd = alloc_page_table(); - pdpt[pdpt_i] = VIRT_TO_PHYS(pd) | PTE_PRESENT | PTE_WRITABLE; - } - else { - pd = (uint64_t *)PHYS_TO_VIRT(pdpt[pdpt_i] & ~0xFFFULL); - } - - // PD: and here - if (!(pd[pd_i] & PTE_PRESENT)) - { - pt = alloc_page_table(); - pd[pd_i] = VIRT_TO_PHYS(pt) | PTE_PRESENT | PTE_WRITABLE; - } - else { - pt = (uint64_t *)PHYS_TO_VIRT(pd[pd_i] & ~0xFFFULL); - } - - // PT: finally, populate the page table entry - pt[pt_i] = phys | flags | PTE_PRESENT; - - // Flush TLB (apply changes) - invlpg((void *)virt); -} */ - -// Same as above, only this one takes any root table (not only kernel) -// Duplicate code but don't worry about it, I'll refactor one day +// Map a page, taking virt and phys address. This will go through the paging structures +// beginning at the given root table, translate the virtual address in indexes in +// page table/directories, and then mapping the correct page table entry with the +// given physical address + flags void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags) { virt = PAGE_ALIGN_DOWN(virt); diff --git a/src/mem/paging/vmm.c b/src/mem/paging/vmm.c index 630a479..ec507b6 100644 --- a/src/mem/paging/vmm.c +++ b/src/mem/paging/vmm.c @@ -55,9 +55,9 @@ void* vmm_alloc(size_t length, size_t flags) // the next page so its coherent with the PMM size_t len = ALIGN_UP(length, PAGE_SIZE); - // Some linked list shenanigans will be here - // but for now we'd need some kheap to kmalloc the linked list items - // else we can't do it + // Need to implement this (as linked list) + // but for now kernel heap is sufficient + // The VMM will prob be more useful when we have userspace } void vmm_init()