Small kernel heap for VMM internals, kmalloc/kfree

This commit is contained in:
2026-01-03 13:48:10 +01:00
parent c065df6ff3
commit e18b73c8a0
11 changed files with 322 additions and 30 deletions

View File

@@ -39,10 +39,14 @@ static uint64_t* alloc_page_table()
return virt;
}
// Kernel paging root table, that will be placed in cr3
__attribute__((aligned(4096)))
static uint64_t *kernel_pml4;
uint64_t *kernel_pml4;
void map_page(uint64_t virt, uint64_t phys, uint64_t flags)
// Will map a page ONLY according to the kernel_pml4 root table.
// For kernel initialization/mapping only
// Deprecated, will be removed
/* void paging_kmap_page(uint64_t virt, uint64_t phys, uint64_t flags)
{
virt = PAGE_ALIGN_DOWN(virt);
phys = PAGE_ALIGN_DOWN(phys);
@@ -90,37 +94,87 @@ void map_page(uint64_t virt, uint64_t phys, uint64_t flags)
// PT: finally, populate the page table entry
pt[pt_i] = phys | flags | PTE_PRESENT;
// Flush TLB (apply changes)
invlpg((void *)virt);
} */
// Same as above, only this one takes any root table (not only kernel)
// Duplicate code but don't worry about it, I'll refactor one day
void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags)
{
virt = PAGE_ALIGN_DOWN(virt);
phys = PAGE_ALIGN_DOWN(phys);
// Translate the virt address into page table indexes
uint64_t pml4_i = PML4_INDEX(virt);
uint64_t pdpt_i = PDPT_INDEX(virt);
uint64_t pd_i = PD_INDEX(virt);
uint64_t pt_i = PT_INDEX(virt);
uint64_t *pdpt, *pd, *pt;
// PML4
// If the entry at index is not present, allocate enough space for it
// then populate the entry with correct addr + flags
if (!(root_table[pml4_i] & PTE_PRESENT))
{
pdpt = alloc_page_table();
root_table[pml4_i] = VIRT_TO_PHYS(pdpt) | PTE_PRESENT | PTE_WRITABLE;
}
else {
pdpt = (uint64_t *)PHYS_TO_VIRT(root_table[pml4_i] & ~0xFFFULL);
}
// PDPT: same here
if (!(pdpt[pdpt_i] & PTE_PRESENT))
{
pd = alloc_page_table();
pdpt[pdpt_i] = VIRT_TO_PHYS(pd) | PTE_PRESENT | PTE_WRITABLE;
}
else {
pd = (uint64_t *)PHYS_TO_VIRT(pdpt[pdpt_i] & ~0xFFFULL);
}
// PD: and here
if (!(pd[pd_i] & PTE_PRESENT))
{
pt = alloc_page_table();
pd[pd_i] = VIRT_TO_PHYS(pt) | PTE_PRESENT | PTE_WRITABLE;
}
else {
pt = (uint64_t *)PHYS_TO_VIRT(pd[pd_i] & ~0xFFFULL);
}
// PT: finally, populate the page table entry
pt[pt_i] = phys | flags | PTE_PRESENT;
// Flush TLB (apply changes)
invlpg((void *)virt);
}
uint64_t kernel_phys_base;
uint64_t kernel_virt_base;
void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb)
{
// We should map the kernel, GDT, IDT, stack, framebuffer.
// Optionally we could map ACPI tables (we can find them in the Limine memmap)
uint64_t kernel_phys_base = kaddr->physical_base;
uint64_t kernel_virt_base = kaddr->virtual_base;
kernel_phys_base = kaddr->physical_base;
kernel_virt_base = kaddr->virtual_base;
DEBUG("Kernel lives at virt=0x%p phys=0x%p", kernel_virt_base, kernel_phys_base);
kernel_pml4 = alloc_page_table();
// for debug
uint64_t page_count = 0;
// First 16 MB identity-mapped (phys = virt)
// This is because there might be some leftover stuff in the lower phys addresses
// from boot/bios/acpi/...
for (uint64_t i=0; i<0x1000000; i += PAGE_SIZE)
{
map_page(i, i, PTE_WRITABLE);
page_count++;
}
DEBUG("Mapped %u pages for the identity-mapping of the first 16 MB", page_count); page_count = 0;
// HHDM map first 1 GB using given offset
for (uint64_t i=0; i<0x40000000; i += PAGE_SIZE)
{
map_page(i+hhdm_off, i, PTE_WRITABLE);
//paging_kmap_page(i+hhdm_off, i, PTE_WRITABLE);
paging_map_page(kernel_pml4, i+hhdm_off, i, PTE_WRITABLE);
page_count++;
}
DEBUG("Mapped %u pages for first 1GB (HHDM)", page_count); page_count = 0;
@@ -131,7 +185,8 @@ void paging_init(struct limine_kernel_address_response* kaddr, struct limine_fra
// For now who gives a shit, let's RWX all kernel
for (uint64_t i = 0; i < KERNEL_SIZE; i += PAGE_SIZE)
{
map_page(kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE);
//paging_kmap_page(kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE);
paging_map_page(kernel_pml4, kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE);
page_count++;
}
DEBUG("Mapped %u pages for kernel", page_count); page_count = 0;
@@ -145,7 +200,8 @@ void paging_init(struct limine_kernel_address_response* kaddr, struct limine_fra
// Map the framebuffer (with cache-disable & write-through)
for (uint64_t i=0; i<fb_pages; i++)
{
map_page(fb_virt+i*PAGE_SIZE, fb_phys+i*PAGE_SIZE, PTE_WRITABLE | PTE_PCD | PTE_PWT);
//paging_kmap_page(fb_virt+i*PAGE_SIZE, fb_phys+i*PAGE_SIZE, PTE_WRITABLE | PTE_PCD | PTE_PWT);
paging_map_page(kernel_pml4, fb_virt+i*PAGE_SIZE, fb_phys+i*PAGE_SIZE, PTE_WRITABLE | PTE_PCD | PTE_PWT);
page_count++;
}
DEBUG("Mapped %u pages for framebuffer", page_count);

View File

@@ -8,6 +8,7 @@
#include <limine.h>
void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb);
void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags);
extern uint64_t hhdm_off;

View File

@@ -25,7 +25,6 @@ We will look for the biggest usable physical memory region
and use this for the bitmap. The reserved memory will be ignored.
*/
struct usable_memory* usable_mem;
struct limine_memmap_entry* biggest_entry;
static void pmm_find_biggest_usable_region(struct limine_memmap_response* memmap, struct limine_hhdm_response* hhdm)

View File

@@ -7,12 +7,4 @@ void pmm_init(struct limine_memmap_response* memmap, struct limine_hhdm_response
void pmm_free(uintptr_t addr);
uintptr_t pmm_alloc();
// Might be upgraded to a freelist later.
// For now, we can take the biggest usable region and we will be fine.
struct usable_memory
{
uint64_t base; // physical
uint64_t length;
};
#endif

66
src/mem/paging/vmm.c Normal file
View File

@@ -0,0 +1,66 @@
/*
The VMM (virtual memory manager) will have two roles:
- mapping pages
- unmapping pages
in a specified virtual space
compared to the PMM which allocs/frees 4kb frames ("physical pages").
*/
#include "vmm.h"
#include "paging.h"
#include <stddef.h>
#include "pmm.h"
#include <kernel.h>
void* vmm_pt_root = 0;
// Linked list head for virtual memory objects
struct vm_object* vm_objs = NULL;
uint64_t convert_x86_vm_flags(size_t flags)
{
uint64_t value = 0;
if (flags & VM_FLAG_WRITE)
{
value |= PTE_WRITABLE;
}
if (flags & VM_FLAG_USER)
{
value |= PTE_USER;
}
if ((flags & VM_FLAG_EXEC) == 0)
{
value |= PTE_NOEXEC;
}
return value;
}
extern uint64_t *kernel_pml4;
void vmm_setup_pt_root()
{
// We alloc a physical page (frame) for the pointer, then map it
// to virt (pointer)
uintptr_t phys = pmm_alloc();
vmm_pt_root = (void*)kernel_pml4;
paging_map_page(kernel_pml4, (uint64_t)vmm_pt_root, phys, convert_x86_vm_flags(VM_FLAG_WRITE | VM_FLAG_EXEC));
DEBUG("VMM setup: vmm_pt_root=0x%p (phys=0x%p)", vmm_pt_root, phys);
}
void* vmm_alloc(size_t length, size_t flags)
{
// We will try to allocate at least length bytes, which have to be rounded UP to
// the next page so its coherent with the PMM
size_t len = ALIGN_UP(length, PAGE_SIZE);
// Some linked list shenanigans will be here
// but for now we'd need some kheap to kmalloc the linked list items
// else we can't do it
}
void vmm_init()
{
vmm_setup_pt_root();
}

29
src/mem/paging/vmm.h Normal file
View File

@@ -0,0 +1,29 @@
#ifndef VMM_H
#define VMM_H
#include <stdint.h>
#include <stddef.h>
/*
This will be our linked list of virtual memory objects.
Flags here aren't x86 flags, they are platform-agnostic
kernel-defined flags.
*/
struct vm_object
{
uintptr_t base;
size_t length;
size_t flags;
struct vm_object* next;
};
// Flags bitfield
#define VM_FLAG_NONE 0
#define VM_FLAG_WRITE (1 << 0)
#define VM_FLAG_EXEC (1 << 1)
#define VM_FLAG_USER (1 << 2)
void vmm_init();
#endif