Alloc extra pages for raw binary

This commit is contained in:
2026-05-10 19:36:01 +02:00
parent 01911bdd32
commit 1142699c48
2 changed files with 7 additions and 1 deletions
+2
View File
@@ -44,6 +44,8 @@
#define USER_STACK_TOP 0x80000000
#define USER_STACK_PAGES 16 // 16*4096 = 64kb
#define USER_CODE_START 0x400000 // like linux
#define USER_RAW_EXTRA_PAGES 8192 // Extra writable pages after raw image for .bss/heap
// TODO: throw this away and make an ELF loader instead bruh
/* paging */
#define PAGING_MAX_PHYS 0x200000000
+5 -1
View File
@@ -18,6 +18,7 @@ compared to the PMM which allocs/frees 4kb frames ("physical pages").
#include <mem/paging.h>
#include <stddef.h>
#include <mem/pmm.h>
#include <mem/utils.h>
#include <kernel.h>
extern uint64_t *kernel_pml4;
@@ -116,6 +117,8 @@ void* vmm_map(uint64_t* pml4, uint64_t virt, uint64_t flags)
panic(NULL, "VMM/PMM out of memory!");
}
memset(PHYS_TO_VIRT(phys), 0, PAGE_SIZE);
paging_map_page(pml4, virt, phys, flags | PTE_PRESENT);
return (void*)virt;
}
@@ -255,8 +258,9 @@ uintptr_t vmm_alloc_user_code(uint64_t* pml4, void* code_addr, uint64_t code_siz
// Round code_size up to next page boundary
uint64_t code_size_aligned = (code_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
uint64_t mapped_size = code_size_aligned + ((uint64_t)USER_RAW_EXTRA_PAGES * PAGE_SIZE);
for (uint64_t i=code_start; i<code_start+code_size_aligned; i+=PAGE_SIZE) {
for (uint64_t i=code_start; i<code_start+mapped_size; i+=PAGE_SIZE) {
vmm_map(pml4, i, PTE_PRESENT | PTE_WRITABLE | PTE_USER);
}