147 lines
3.9 KiB
C
147 lines
3.9 KiB
C
/*
|
|
* @author xamidev <xamidev@riseup.net>
|
|
* @brief Kernel heap
|
|
* @license GPL-3.0-only
|
|
*/
|
|
|
|
#include "kheap.h"
|
|
#include "mem/paging/paging.h"
|
|
#include "mem/paging/pmm.h"
|
|
#include <stddef.h>
|
|
#include <kernel.h>
|
|
#include "sched/process.h"
|
|
#include "config.h"
|
|
|
|
extern uint64_t kernel_phys_base;
|
|
extern uint64_t kernel_virt_base;
|
|
|
|
uintptr_t kheap_start;
|
|
|
|
static struct heap_block_t* head = NULL;
|
|
static uintptr_t end;
|
|
|
|
// Kernel root table (level 4)
|
|
extern uint64_t *kernel_pml4;
|
|
|
|
static void kheap_grow(size_t size)
|
|
{
|
|
size_t pages = ALIGN_UP(size + sizeof(struct heap_block_t), PAGE_SIZE) / PAGE_SIZE;
|
|
|
|
if (pages == 0) pages = 1;
|
|
|
|
for (size_t i = 0; i < pages; i++)
|
|
{
|
|
kheap_map_page();
|
|
}
|
|
}
|
|
|
|
void kheap_map_page()
|
|
{
|
|
uintptr_t phys = pmm_alloc();
|
|
paging_map_page(kernel_pml4, end, phys, PTE_PRESENT | PTE_WRITABLE | PTE_NOEXEC);
|
|
end += PAGE_SIZE;
|
|
//DEBUG("Mapped first kheap page");
|
|
}
|
|
|
|
void kheap_init()
|
|
{
|
|
kheap_start = ALIGN_UP(kernel_virt_base + KERNEL_SIZE, PAGE_SIZE);
|
|
end = kheap_start;
|
|
|
|
// At least 1 page must be mapped for it to work
|
|
kheap_map_page();
|
|
|
|
// Give linked list head its properties
|
|
head = (struct heap_block_t*)kheap_start;
|
|
head->size = PAGE_SIZE - sizeof(struct heap_block_t);
|
|
head->free = true;
|
|
head->next = NULL;
|
|
DEBUG("kheap initialized, head=0x%p, size=%u", head, head->size);
|
|
}
|
|
|
|
void* kmalloc(size_t size)
|
|
{
|
|
// No size, no memory allocated!
|
|
if (!size) return NULL;
|
|
size = ALIGN(size);
|
|
|
|
struct heap_block_t* curr = head;
|
|
|
|
while (curr)
|
|
{
|
|
// Is block free and big enough for us?
|
|
if (curr->free && curr->size >= size)
|
|
{
|
|
// We split the block if it is big enough
|
|
if (curr->size >= size + BLOCK_MIN_SIZE)
|
|
{
|
|
//struct heap_block_t* new_block = (struct heap_block_t*)((uintptr_t)curr + sizeof(struct heap_block_t) + size);
|
|
struct heap_block_t* split = (struct heap_block_t*)((uintptr_t)curr + sizeof(*curr) + size);
|
|
|
|
split->size = curr->size - size - sizeof(*curr);
|
|
split->free = true;
|
|
split->next = curr->next;
|
|
|
|
curr->next = split;
|
|
curr->size = size;
|
|
}
|
|
|
|
// Found a good block, we return it
|
|
curr->free = false;
|
|
return (void*)((uintptr_t)curr + sizeof(struct heap_block_t));
|
|
}
|
|
// Continue browsing the list if nothing good was found yet
|
|
curr = curr->next;
|
|
}
|
|
|
|
// If we're here it means we didn't have enough memory
|
|
// for the block allocation. So we will allocate more..
|
|
uintptr_t old_end = end;
|
|
kheap_grow(size + sizeof(struct heap_block_t));
|
|
|
|
struct heap_block_t* block = (struct heap_block_t*)old_end;
|
|
block->size = ALIGN_UP(end - old_end - sizeof(struct heap_block_t), 16);
|
|
block->free = true;
|
|
block->next = NULL;
|
|
|
|
// Put the block at the end of the list
|
|
curr = head;
|
|
while (curr->next)
|
|
{
|
|
curr = curr->next;
|
|
}
|
|
curr->next = block;
|
|
|
|
return kmalloc(size);
|
|
}
|
|
|
|
void kfree(void* ptr)
|
|
{
|
|
// Nothing to free
|
|
if (!ptr) return;
|
|
|
|
// Set it free!
|
|
struct heap_block_t* block = (struct heap_block_t*)((uintptr_t)ptr - sizeof(struct heap_block_t));
|
|
block->free = true;
|
|
|
|
// merge adjacent free blocks (coalescing)
|
|
struct heap_block_t* curr = head;
|
|
while (curr && curr->next)
|
|
{
|
|
if (curr->free && curr->next->free)
|
|
{
|
|
curr->size += sizeof(*curr) + curr->next->size;
|
|
curr->next = curr->next->next;
|
|
continue;
|
|
}
|
|
curr = curr->next;
|
|
}
|
|
}
|
|
|
|
// Should alloc enough for a stack (at least 64kb) to be used for a process.
|
|
// Should return a pointer to top of the stack (as stack grows DOWNWARDS)
|
|
void* kalloc_stack()
|
|
{
|
|
uint8_t* ptr = kmalloc(PROCESS_STACK_SIZE); // As it's out of kmalloc, stack is already mapped into kernel space
|
|
return ptr ? ptr+PROCESS_STACK_SIZE : NULL;
|
|
} |