Small kernel heap for VMM internals, kmalloc/kfree

This commit is contained in:
2026-01-03 13:48:10 +01:00
parent c065df6ff3
commit e18b73c8a0
11 changed files with 322 additions and 30 deletions

View File

@@ -1,4 +1,4 @@
SOURCES = src/mem/paging/paging.c src/mem/paging/pmm.c src/string/string.c src/io/kbd/ps2.c src/io/serial/serial.c src/io/term/printf.c src/io/term/term.c src/idt/idt.c src/mem/gdt/gdt.c src/mem/misc/utils.c src/time/timer.c src/kmain.c SOURCES = src/mem/heap/kheap.c src/mem/paging/vmm.c src/mem/paging/paging.c src/mem/paging/pmm.c src/string/string.c src/io/kbd/ps2.c src/io/serial/serial.c src/io/term/printf.c src/io/term/term.c src/idt/idt.c src/mem/gdt/gdt.c src/mem/misc/utils.c src/time/timer.c src/kmain.c
build: build:
rm -f *.o rm -f *.o
@@ -30,7 +30,7 @@ build-iso: limine/limine build
./limine/limine bios-install pepper.iso ./limine/limine bios-install pepper.iso
debug: debug:
qemu-system-x86_64 -drive file=pepper.iso -s -S -d int -no-reboot & qemu-system-x86_64 -drive file=pepper.iso -s -S -d int -no-reboot -no-shutdown &
gdb pepperk --command=debug.gdb gdb pepperk --command=debug.gdb
run: build-iso run: build-iso

View File

@@ -16,4 +16,8 @@ enum ErrorCodes
#define DEBUG(log, ...) fctprintf((void*)&skputc, 0, "debug: [%s]: " log "\r\n", __FILE__, ##__VA_ARGS__) #define DEBUG(log, ...) fctprintf((void*)&skputc, 0, "debug: [%s]: " log "\r\n", __FILE__, ##__VA_ARGS__)
// printf("debug: [%s]: " log "\n", __FILE__, ##__VA_ARGS__); // printf("debug: [%s]: " log "\n", __FILE__, ##__VA_ARGS__);
void hcf();
#define assert(check) do { if(!(check)) hcf(); } while(0)
#endif #endif

View File

@@ -12,6 +12,8 @@
#include "io/kbd/ps2.h" #include "io/kbd/ps2.h"
#include "mem/paging/pmm.h" #include "mem/paging/pmm.h"
#include "mem/paging/paging.h" #include "mem/paging/paging.h"
#include "mem/paging/vmm.h"
#include "mem/heap/kheap.h"
// Limine version used // Limine version used
__attribute__((used, section(".limine_requests"))) __attribute__((used, section(".limine_requests")))
@@ -53,8 +55,8 @@ static volatile LIMINE_REQUESTS_END_MARKER;
struct limine_framebuffer* framebuffer; struct limine_framebuffer* framebuffer;
// Panic // Panic (should dump registers etc. in the future)
static void hcf() void hcf()
{ {
for (;;) for (;;)
{ {
@@ -92,8 +94,19 @@ void kmain()
SET_INTERRUPTS; SET_INTERRUPTS;
pmm_init(memmap_request.response, hhdm_request.response); pmm_init(memmap_request.response, hhdm_request.response);
// Remap kernel , HHDM and framebuffer
paging_init(kerneladdr_request.response, framebuffer); paging_init(kerneladdr_request.response, framebuffer);
kheap_init();
void* ptr = kmalloc(10); DEBUG("(KMALLOC TEST) Allocated 10 bytes at 0x%p", ptr);
void* ptr2 = kmalloc(200); DEBUG("(KMALLOC TEST) Allocated 200 bytes at 0x%p", ptr2);
kfree(ptr);
void* ptr3 = kmalloc(5); DEBUG("(KMALLOC TEST) Allocated 5 bytes at 0x%p", ptr3);
vmm_init();
keyboard_init(FR); keyboard_init(FR);
term_init(); term_init();

106
src/mem/heap/kheap.c Normal file
View File

@@ -0,0 +1,106 @@
#include "kheap.h"
#include "mem/paging/paging.h"
#include "mem/paging/pmm.h"
#include <stddef.h>
#include <kernel.h>
extern uint64_t kernel_phys_base;
extern uint64_t kernel_virt_base;
uintptr_t kheap_start;
static struct heap_block_t* head = NULL;
static uintptr_t end;
// Kernel root table (level 4)
extern uint64_t *kernel_pml4;
static void kheap_map_page()
{
uintptr_t phys = pmm_alloc();
paging_map_page(kernel_pml4, end, phys, PTE_PRESENT | PTE_WRITABLE | PTE_NOEXEC);
end += PAGE_SIZE;
DEBUG("Mapped first kheap page");
}
void kheap_init()
{
kheap_start = ALIGN_UP(kernel_virt_base + KERNEL_SIZE, PAGE_SIZE);
end = kheap_start;
// At least 1 page must be mapped for it to work
kheap_map_page();
// Give linked list head its properties
head = (struct heap_block_t*)kheap_start;
head->size = PAGE_SIZE - sizeof(struct heap_block_t);
head->free = true;
head->next = NULL;
DEBUG("kheap initialized, head=0x%p, size=%u", head, head->size);
}
void* kmalloc(size_t size)
{
// No size, no memory allocated!
if (!size) return NULL;
struct heap_block_t* curr = head;
while (curr)
{
// Is block free and big enough for us?
if (curr->free && curr->size >= size)
{
// We split the block if it is big enough
if (curr->size > size + sizeof(struct heap_block_t))
{
struct heap_block_t* new_block = (struct heap_block_t*)((uintptr_t)curr + sizeof(struct heap_block_t) + size);
// We have to subtract the size of our block struct
new_block->size = curr->size - size - sizeof(struct heap_block_t);
new_block->free = true;
// Then we chain up the block in the list
new_block->next = curr->next;
curr->next = new_block;
curr->size = size;
}
// Found a good block, we return it
curr->free = false;
return (void*)((uintptr_t)curr + sizeof(struct heap_block_t));
}
// Continue browsing the list if nothing good was found yet
curr = curr->next;
}
// If we're hear it means we didn't have enough memory
// for the block allocation. So we will allocate more..
uintptr_t old_end = end;
kheap_map_page();
struct heap_block_t* block = (struct heap_block_t*)old_end;
block->size = PAGE_SIZE - sizeof(struct heap_block_t);
block->free = false;
block->next = NULL;
// Put the block at the end of the list
curr = head;
while (curr->next)
{
curr = curr->next;
}
curr->next = block;
return (void*)((uintptr_t)block + sizeof(struct heap_block_t));
}
void kfree(void* ptr)
{
// Nothing to free
if (!ptr) return;
// Set it free!
struct heap_block_t* block = (struct heap_block_t*)((uintptr_t)ptr - sizeof(struct heap_block_t));
block->free = true;
}

26
src/mem/heap/kheap.h Normal file
View File

@@ -0,0 +1,26 @@
#ifndef KHEAP_H
#define KHEAP_H
// We need some kind of simple kernel heap to make our linked list
// for the VMM, as we need "malloc" and "free" for that data structure.
// When the kernel heap is ready, we can alloc our VM object linked list
// and then continue working on the VMM.
// 16MB should be enough for some linked lists
#define KHEAP_SIZE (16*1024*1024)
#include <stdbool.h>
#include <stddef.h>
struct heap_block_t
{
size_t size;
bool free;
struct heap_block_t* next;
};
void kheap_init();
void* kmalloc(size_t size);
void kfree(void* ptr);
#endif

View File

@@ -39,10 +39,14 @@ static uint64_t* alloc_page_table()
return virt; return virt;
} }
// Kernel paging root table, that will be placed in cr3
__attribute__((aligned(4096))) __attribute__((aligned(4096)))
static uint64_t *kernel_pml4; uint64_t *kernel_pml4;
void map_page(uint64_t virt, uint64_t phys, uint64_t flags) // Will map a page ONLY according to the kernel_pml4 root table.
// For kernel initialization/mapping only
// Deprecated, will be removed
/* void paging_kmap_page(uint64_t virt, uint64_t phys, uint64_t flags)
{ {
virt = PAGE_ALIGN_DOWN(virt); virt = PAGE_ALIGN_DOWN(virt);
phys = PAGE_ALIGN_DOWN(phys); phys = PAGE_ALIGN_DOWN(phys);
@@ -92,35 +96,85 @@ void map_page(uint64_t virt, uint64_t phys, uint64_t flags)
// Flush TLB (apply changes) // Flush TLB (apply changes)
invlpg((void *)virt); invlpg((void *)virt);
} */
// Same as above, only this one takes any root table (not only kernel)
// Duplicate code but don't worry about it, I'll refactor one day
void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags)
{
virt = PAGE_ALIGN_DOWN(virt);
phys = PAGE_ALIGN_DOWN(phys);
// Translate the virt address into page table indexes
uint64_t pml4_i = PML4_INDEX(virt);
uint64_t pdpt_i = PDPT_INDEX(virt);
uint64_t pd_i = PD_INDEX(virt);
uint64_t pt_i = PT_INDEX(virt);
uint64_t *pdpt, *pd, *pt;
// PML4
// If the entry at index is not present, allocate enough space for it
// then populate the entry with correct addr + flags
if (!(root_table[pml4_i] & PTE_PRESENT))
{
pdpt = alloc_page_table();
root_table[pml4_i] = VIRT_TO_PHYS(pdpt) | PTE_PRESENT | PTE_WRITABLE;
} }
else {
pdpt = (uint64_t *)PHYS_TO_VIRT(root_table[pml4_i] & ~0xFFFULL);
}
// PDPT: same here
if (!(pdpt[pdpt_i] & PTE_PRESENT))
{
pd = alloc_page_table();
pdpt[pdpt_i] = VIRT_TO_PHYS(pd) | PTE_PRESENT | PTE_WRITABLE;
}
else {
pd = (uint64_t *)PHYS_TO_VIRT(pdpt[pdpt_i] & ~0xFFFULL);
}
// PD: and here
if (!(pd[pd_i] & PTE_PRESENT))
{
pt = alloc_page_table();
pd[pd_i] = VIRT_TO_PHYS(pt) | PTE_PRESENT | PTE_WRITABLE;
}
else {
pt = (uint64_t *)PHYS_TO_VIRT(pd[pd_i] & ~0xFFFULL);
}
// PT: finally, populate the page table entry
pt[pt_i] = phys | flags | PTE_PRESENT;
// Flush TLB (apply changes)
invlpg((void *)virt);
}
uint64_t kernel_phys_base;
uint64_t kernel_virt_base;
void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb) void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb)
{ {
// We should map the kernel, GDT, IDT, stack, framebuffer. // We should map the kernel, GDT, IDT, stack, framebuffer.
// Optionally we could map ACPI tables (we can find them in the Limine memmap) // Optionally we could map ACPI tables (we can find them in the Limine memmap)
uint64_t kernel_phys_base = kaddr->physical_base; kernel_phys_base = kaddr->physical_base;
uint64_t kernel_virt_base = kaddr->virtual_base; kernel_virt_base = kaddr->virtual_base;
DEBUG("Kernel lives at virt=0x%p phys=0x%p", kernel_virt_base, kernel_phys_base);
kernel_pml4 = alloc_page_table(); kernel_pml4 = alloc_page_table();
// for debug // for debug
uint64_t page_count = 0; uint64_t page_count = 0;
// First 16 MB identity-mapped (phys = virt)
// This is because there might be some leftover stuff in the lower phys addresses
// from boot/bios/acpi/...
for (uint64_t i=0; i<0x1000000; i += PAGE_SIZE)
{
map_page(i, i, PTE_WRITABLE);
page_count++;
}
DEBUG("Mapped %u pages for the identity-mapping of the first 16 MB", page_count); page_count = 0;
// HHDM map first 1 GB using given offset // HHDM map first 1 GB using given offset
for (uint64_t i=0; i<0x40000000; i += PAGE_SIZE) for (uint64_t i=0; i<0x40000000; i += PAGE_SIZE)
{ {
map_page(i+hhdm_off, i, PTE_WRITABLE); //paging_kmap_page(i+hhdm_off, i, PTE_WRITABLE);
paging_map_page(kernel_pml4, i+hhdm_off, i, PTE_WRITABLE);
page_count++; page_count++;
} }
DEBUG("Mapped %u pages for first 1GB (HHDM)", page_count); page_count = 0; DEBUG("Mapped %u pages for first 1GB (HHDM)", page_count); page_count = 0;
@@ -131,7 +185,8 @@ void paging_init(struct limine_kernel_address_response* kaddr, struct limine_fra
// For now who gives a shit, let's RWX all kernel // For now who gives a shit, let's RWX all kernel
for (uint64_t i = 0; i < KERNEL_SIZE; i += PAGE_SIZE) for (uint64_t i = 0; i < KERNEL_SIZE; i += PAGE_SIZE)
{ {
map_page(kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE); //paging_kmap_page(kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE);
paging_map_page(kernel_pml4, kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE);
page_count++; page_count++;
} }
DEBUG("Mapped %u pages for kernel", page_count); page_count = 0; DEBUG("Mapped %u pages for kernel", page_count); page_count = 0;
@@ -145,7 +200,8 @@ void paging_init(struct limine_kernel_address_response* kaddr, struct limine_fra
// Map the framebuffer (with cache-disable & write-through) // Map the framebuffer (with cache-disable & write-through)
for (uint64_t i=0; i<fb_pages; i++) for (uint64_t i=0; i<fb_pages; i++)
{ {
map_page(fb_virt+i*PAGE_SIZE, fb_phys+i*PAGE_SIZE, PTE_WRITABLE | PTE_PCD | PTE_PWT); //paging_kmap_page(fb_virt+i*PAGE_SIZE, fb_phys+i*PAGE_SIZE, PTE_WRITABLE | PTE_PCD | PTE_PWT);
paging_map_page(kernel_pml4, fb_virt+i*PAGE_SIZE, fb_phys+i*PAGE_SIZE, PTE_WRITABLE | PTE_PCD | PTE_PWT);
page_count++; page_count++;
} }
DEBUG("Mapped %u pages for framebuffer", page_count); DEBUG("Mapped %u pages for framebuffer", page_count);

View File

@@ -8,6 +8,7 @@
#include <limine.h> #include <limine.h>
void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb); void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb);
void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags);
extern uint64_t hhdm_off; extern uint64_t hhdm_off;

View File

@@ -25,7 +25,6 @@ We will look for the biggest usable physical memory region
and use this for the bitmap. The reserved memory will be ignored. and use this for the bitmap. The reserved memory will be ignored.
*/ */
struct usable_memory* usable_mem;
struct limine_memmap_entry* biggest_entry; struct limine_memmap_entry* biggest_entry;
static void pmm_find_biggest_usable_region(struct limine_memmap_response* memmap, struct limine_hhdm_response* hhdm) static void pmm_find_biggest_usable_region(struct limine_memmap_response* memmap, struct limine_hhdm_response* hhdm)

View File

@@ -7,12 +7,4 @@ void pmm_init(struct limine_memmap_response* memmap, struct limine_hhdm_response
void pmm_free(uintptr_t addr); void pmm_free(uintptr_t addr);
uintptr_t pmm_alloc(); uintptr_t pmm_alloc();
// Might be upgraded to a freelist later.
// For now, we can take the biggest usable region and we will be fine.
struct usable_memory
{
uint64_t base; // physical
uint64_t length;
};
#endif #endif

66
src/mem/paging/vmm.c Normal file
View File

@@ -0,0 +1,66 @@
/*
The VMM (virtual memory manager) will have two roles:
- mapping pages
- unmapping pages
in a specified virtual space
compared to the PMM which allocs/frees 4kb frames ("physical pages").
*/
#include "vmm.h"
#include "paging.h"
#include <stddef.h>
#include "pmm.h"
#include <kernel.h>
void* vmm_pt_root = 0;
// Linked list head for virtual memory objects
struct vm_object* vm_objs = NULL;
uint64_t convert_x86_vm_flags(size_t flags)
{
uint64_t value = 0;
if (flags & VM_FLAG_WRITE)
{
value |= PTE_WRITABLE;
}
if (flags & VM_FLAG_USER)
{
value |= PTE_USER;
}
if ((flags & VM_FLAG_EXEC) == 0)
{
value |= PTE_NOEXEC;
}
return value;
}
extern uint64_t *kernel_pml4;
void vmm_setup_pt_root()
{
// We alloc a physical page (frame) for the pointer, then map it
// to virt (pointer)
uintptr_t phys = pmm_alloc();
vmm_pt_root = (void*)kernel_pml4;
paging_map_page(kernel_pml4, (uint64_t)vmm_pt_root, phys, convert_x86_vm_flags(VM_FLAG_WRITE | VM_FLAG_EXEC));
DEBUG("VMM setup: vmm_pt_root=0x%p (phys=0x%p)", vmm_pt_root, phys);
}
void* vmm_alloc(size_t length, size_t flags)
{
// We will try to allocate at least length bytes, which have to be rounded UP to
// the next page so its coherent with the PMM
size_t len = ALIGN_UP(length, PAGE_SIZE);
// Some linked list shenanigans will be here
// but for now we'd need some kheap to kmalloc the linked list items
// else we can't do it
}
void vmm_init()
{
vmm_setup_pt_root();
}

29
src/mem/paging/vmm.h Normal file
View File

@@ -0,0 +1,29 @@
#ifndef VMM_H
#define VMM_H
#include <stdint.h>
#include <stddef.h>
/*
This will be our linked list of virtual memory objects.
Flags here aren't x86 flags, they are platform-agnostic
kernel-defined flags.
*/
struct vm_object
{
uintptr_t base;
size_t length;
size_t flags;
struct vm_object* next;
};
// Flags bitfield
#define VM_FLAG_NONE 0
#define VM_FLAG_WRITE (1 << 0)
#define VM_FLAG_EXEC (1 << 1)
#define VM_FLAG_USER (1 << 2)
void vmm_init();
#endif