diff --git a/Makefile b/Makefile index 6c70425..a94d962 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -SOURCES = src/mem/paging/paging.c src/mem/paging/pmm.c src/string/string.c src/io/kbd/ps2.c src/io/serial/serial.c src/io/term/printf.c src/io/term/term.c src/idt/idt.c src/mem/gdt/gdt.c src/mem/misc/utils.c src/time/timer.c src/kmain.c +SOURCES = src/mem/heap/kheap.c src/mem/paging/vmm.c src/mem/paging/paging.c src/mem/paging/pmm.c src/string/string.c src/io/kbd/ps2.c src/io/serial/serial.c src/io/term/printf.c src/io/term/term.c src/idt/idt.c src/mem/gdt/gdt.c src/mem/misc/utils.c src/time/timer.c src/kmain.c build: rm -f *.o @@ -30,7 +30,7 @@ build-iso: limine/limine build ./limine/limine bios-install pepper.iso debug: - qemu-system-x86_64 -drive file=pepper.iso -s -S -d int -no-reboot & + qemu-system-x86_64 -drive file=pepper.iso -s -S -d int -no-reboot -no-shutdown & gdb pepperk --command=debug.gdb run: build-iso diff --git a/src/kernel.h b/src/kernel.h index 0daeeef..bbbd46e 100644 --- a/src/kernel.h +++ b/src/kernel.h @@ -16,4 +16,8 @@ enum ErrorCodes #define DEBUG(log, ...) fctprintf((void*)&skputc, 0, "debug: [%s]: " log "\r\n", __FILE__, ##__VA_ARGS__) // printf("debug: [%s]: " log "\n", __FILE__, ##__VA_ARGS__); + +void hcf(); +#define assert(check) do { if(!(check)) hcf(); } while(0) + #endif diff --git a/src/kmain.c b/src/kmain.c index 64b1ced..13e50ae 100644 --- a/src/kmain.c +++ b/src/kmain.c @@ -12,6 +12,8 @@ #include "io/kbd/ps2.h" #include "mem/paging/pmm.h" #include "mem/paging/paging.h" +#include "mem/paging/vmm.h" +#include "mem/heap/kheap.h" // Limine version used __attribute__((used, section(".limine_requests"))) @@ -53,8 +55,8 @@ static volatile LIMINE_REQUESTS_END_MARKER; struct limine_framebuffer* framebuffer; -// Panic -static void hcf() +// Panic (should dump registers etc. in the future) +void hcf() { for (;;) { @@ -92,7 +94,18 @@ void kmain() SET_INTERRUPTS; pmm_init(memmap_request.response, hhdm_request.response); + + // Remap kernel , HHDM and framebuffer paging_init(kerneladdr_request.response, framebuffer); + + kheap_init(); + + void* ptr = kmalloc(10); DEBUG("(KMALLOC TEST) Allocated 10 bytes at 0x%p", ptr); + void* ptr2 = kmalloc(200); DEBUG("(KMALLOC TEST) Allocated 200 bytes at 0x%p", ptr2); + kfree(ptr); + void* ptr3 = kmalloc(5); DEBUG("(KMALLOC TEST) Allocated 5 bytes at 0x%p", ptr3); + + vmm_init(); keyboard_init(FR); diff --git a/src/mem/heap/kheap.c b/src/mem/heap/kheap.c new file mode 100644 index 0000000..aff1f48 --- /dev/null +++ b/src/mem/heap/kheap.c @@ -0,0 +1,106 @@ +#include "kheap.h" +#include "mem/paging/paging.h" +#include "mem/paging/pmm.h" +#include +#include + +extern uint64_t kernel_phys_base; +extern uint64_t kernel_virt_base; + +uintptr_t kheap_start; + +static struct heap_block_t* head = NULL; +static uintptr_t end; + +// Kernel root table (level 4) +extern uint64_t *kernel_pml4; + +static void kheap_map_page() +{ + uintptr_t phys = pmm_alloc(); + paging_map_page(kernel_pml4, end, phys, PTE_PRESENT | PTE_WRITABLE | PTE_NOEXEC); + end += PAGE_SIZE; + DEBUG("Mapped first kheap page"); +} + +void kheap_init() +{ + kheap_start = ALIGN_UP(kernel_virt_base + KERNEL_SIZE, PAGE_SIZE); + end = kheap_start; + + // At least 1 page must be mapped for it to work + kheap_map_page(); + + // Give linked list head its properties + head = (struct heap_block_t*)kheap_start; + head->size = PAGE_SIZE - sizeof(struct heap_block_t); + head->free = true; + head->next = NULL; + DEBUG("kheap initialized, head=0x%p, size=%u", head, head->size); +} + +void* kmalloc(size_t size) +{ + // No size, no memory allocated! + if (!size) return NULL; + + struct heap_block_t* curr = head; + + while (curr) + { + // Is block free and big enough for us? + if (curr->free && curr->size >= size) + { + // We split the block if it is big enough + if (curr->size > size + sizeof(struct heap_block_t)) + { + struct heap_block_t* new_block = (struct heap_block_t*)((uintptr_t)curr + sizeof(struct heap_block_t) + size); + // We have to subtract the size of our block struct + new_block->size = curr->size - size - sizeof(struct heap_block_t); + new_block->free = true; + + // Then we chain up the block in the list + new_block->next = curr->next; + curr->next = new_block; + + curr->size = size; + } + + // Found a good block, we return it + curr->free = false; + return (void*)((uintptr_t)curr + sizeof(struct heap_block_t)); + } + // Continue browsing the list if nothing good was found yet + curr = curr->next; + } + + // If we're hear it means we didn't have enough memory + // for the block allocation. So we will allocate more.. + uintptr_t old_end = end; + kheap_map_page(); + + struct heap_block_t* block = (struct heap_block_t*)old_end; + block->size = PAGE_SIZE - sizeof(struct heap_block_t); + block->free = false; + block->next = NULL; + + // Put the block at the end of the list + curr = head; + while (curr->next) + { + curr = curr->next; + } + curr->next = block; + + return (void*)((uintptr_t)block + sizeof(struct heap_block_t)); +} + +void kfree(void* ptr) +{ + // Nothing to free + if (!ptr) return; + + // Set it free! + struct heap_block_t* block = (struct heap_block_t*)((uintptr_t)ptr - sizeof(struct heap_block_t)); + block->free = true; +} \ No newline at end of file diff --git a/src/mem/heap/kheap.h b/src/mem/heap/kheap.h new file mode 100644 index 0000000..f8bb8b5 --- /dev/null +++ b/src/mem/heap/kheap.h @@ -0,0 +1,26 @@ +#ifndef KHEAP_H +#define KHEAP_H + +// We need some kind of simple kernel heap to make our linked list +// for the VMM, as we need "malloc" and "free" for that data structure. +// When the kernel heap is ready, we can alloc our VM object linked list +// and then continue working on the VMM. + +// 16MB should be enough for some linked lists +#define KHEAP_SIZE (16*1024*1024) + +#include +#include + +struct heap_block_t +{ + size_t size; + bool free; + struct heap_block_t* next; +}; + +void kheap_init(); +void* kmalloc(size_t size); +void kfree(void* ptr); + +#endif \ No newline at end of file diff --git a/src/mem/paging/paging.c b/src/mem/paging/paging.c index 779c2a1..4e50d2e 100644 --- a/src/mem/paging/paging.c +++ b/src/mem/paging/paging.c @@ -39,10 +39,14 @@ static uint64_t* alloc_page_table() return virt; } +// Kernel paging root table, that will be placed in cr3 __attribute__((aligned(4096))) -static uint64_t *kernel_pml4; +uint64_t *kernel_pml4; -void map_page(uint64_t virt, uint64_t phys, uint64_t flags) +// Will map a page ONLY according to the kernel_pml4 root table. +// For kernel initialization/mapping only +// Deprecated, will be removed +/* void paging_kmap_page(uint64_t virt, uint64_t phys, uint64_t flags) { virt = PAGE_ALIGN_DOWN(virt); phys = PAGE_ALIGN_DOWN(phys); @@ -90,37 +94,87 @@ void map_page(uint64_t virt, uint64_t phys, uint64_t flags) // PT: finally, populate the page table entry pt[pt_i] = phys | flags | PTE_PRESENT; + // Flush TLB (apply changes) + invlpg((void *)virt); +} */ + +// Same as above, only this one takes any root table (not only kernel) +// Duplicate code but don't worry about it, I'll refactor one day +void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags) +{ + virt = PAGE_ALIGN_DOWN(virt); + phys = PAGE_ALIGN_DOWN(phys); + + // Translate the virt address into page table indexes + uint64_t pml4_i = PML4_INDEX(virt); + uint64_t pdpt_i = PDPT_INDEX(virt); + uint64_t pd_i = PD_INDEX(virt); + uint64_t pt_i = PT_INDEX(virt); + + uint64_t *pdpt, *pd, *pt; + + // PML4 + // If the entry at index is not present, allocate enough space for it + // then populate the entry with correct addr + flags + if (!(root_table[pml4_i] & PTE_PRESENT)) + { + pdpt = alloc_page_table(); + root_table[pml4_i] = VIRT_TO_PHYS(pdpt) | PTE_PRESENT | PTE_WRITABLE; + } + else { + pdpt = (uint64_t *)PHYS_TO_VIRT(root_table[pml4_i] & ~0xFFFULL); + } + + // PDPT: same here + if (!(pdpt[pdpt_i] & PTE_PRESENT)) + { + pd = alloc_page_table(); + pdpt[pdpt_i] = VIRT_TO_PHYS(pd) | PTE_PRESENT | PTE_WRITABLE; + } + else { + pd = (uint64_t *)PHYS_TO_VIRT(pdpt[pdpt_i] & ~0xFFFULL); + } + + // PD: and here + if (!(pd[pd_i] & PTE_PRESENT)) + { + pt = alloc_page_table(); + pd[pd_i] = VIRT_TO_PHYS(pt) | PTE_PRESENT | PTE_WRITABLE; + } + else { + pt = (uint64_t *)PHYS_TO_VIRT(pd[pd_i] & ~0xFFFULL); + } + + // PT: finally, populate the page table entry + pt[pt_i] = phys | flags | PTE_PRESENT; + // Flush TLB (apply changes) invlpg((void *)virt); } +uint64_t kernel_phys_base; +uint64_t kernel_virt_base; + void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb) { // We should map the kernel, GDT, IDT, stack, framebuffer. // Optionally we could map ACPI tables (we can find them in the Limine memmap) - uint64_t kernel_phys_base = kaddr->physical_base; - uint64_t kernel_virt_base = kaddr->virtual_base; + kernel_phys_base = kaddr->physical_base; + kernel_virt_base = kaddr->virtual_base; + + DEBUG("Kernel lives at virt=0x%p phys=0x%p", kernel_virt_base, kernel_phys_base); kernel_pml4 = alloc_page_table(); // for debug uint64_t page_count = 0; - // First 16 MB identity-mapped (phys = virt) - // This is because there might be some leftover stuff in the lower phys addresses - // from boot/bios/acpi/... - for (uint64_t i=0; i<0x1000000; i += PAGE_SIZE) - { - map_page(i, i, PTE_WRITABLE); - page_count++; - } - DEBUG("Mapped %u pages for the identity-mapping of the first 16 MB", page_count); page_count = 0; - // HHDM map first 1 GB using given offset for (uint64_t i=0; i<0x40000000; i += PAGE_SIZE) { - map_page(i+hhdm_off, i, PTE_WRITABLE); + //paging_kmap_page(i+hhdm_off, i, PTE_WRITABLE); + paging_map_page(kernel_pml4, i+hhdm_off, i, PTE_WRITABLE); page_count++; } DEBUG("Mapped %u pages for first 1GB (HHDM)", page_count); page_count = 0; @@ -131,7 +185,8 @@ void paging_init(struct limine_kernel_address_response* kaddr, struct limine_fra // For now who gives a shit, let's RWX all kernel for (uint64_t i = 0; i < KERNEL_SIZE; i += PAGE_SIZE) { - map_page(kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE); + //paging_kmap_page(kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE); + paging_map_page(kernel_pml4, kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE); page_count++; } DEBUG("Mapped %u pages for kernel", page_count); page_count = 0; @@ -145,7 +200,8 @@ void paging_init(struct limine_kernel_address_response* kaddr, struct limine_fra // Map the framebuffer (with cache-disable & write-through) for (uint64_t i=0; i void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb); +void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags); extern uint64_t hhdm_off; diff --git a/src/mem/paging/pmm.c b/src/mem/paging/pmm.c index df98430..2128f88 100644 --- a/src/mem/paging/pmm.c +++ b/src/mem/paging/pmm.c @@ -25,7 +25,6 @@ We will look for the biggest usable physical memory region and use this for the bitmap. The reserved memory will be ignored. */ -struct usable_memory* usable_mem; struct limine_memmap_entry* biggest_entry; static void pmm_find_biggest_usable_region(struct limine_memmap_response* memmap, struct limine_hhdm_response* hhdm) diff --git a/src/mem/paging/pmm.h b/src/mem/paging/pmm.h index 30202de..084c334 100644 --- a/src/mem/paging/pmm.h +++ b/src/mem/paging/pmm.h @@ -7,12 +7,4 @@ void pmm_init(struct limine_memmap_response* memmap, struct limine_hhdm_response void pmm_free(uintptr_t addr); uintptr_t pmm_alloc(); -// Might be upgraded to a freelist later. -// For now, we can take the biggest usable region and we will be fine. -struct usable_memory -{ - uint64_t base; // physical - uint64_t length; -}; - #endif \ No newline at end of file diff --git a/src/mem/paging/vmm.c b/src/mem/paging/vmm.c new file mode 100644 index 0000000..630a479 --- /dev/null +++ b/src/mem/paging/vmm.c @@ -0,0 +1,66 @@ +/* +The VMM (virtual memory manager) will have two roles: +- mapping pages +- unmapping pages +in a specified virtual space + +compared to the PMM which allocs/frees 4kb frames ("physical pages"). +*/ + +#include "vmm.h" +#include "paging.h" +#include +#include "pmm.h" +#include + +void* vmm_pt_root = 0; + +// Linked list head for virtual memory objects +struct vm_object* vm_objs = NULL; + + +uint64_t convert_x86_vm_flags(size_t flags) +{ + uint64_t value = 0; + if (flags & VM_FLAG_WRITE) + { + value |= PTE_WRITABLE; + } + if (flags & VM_FLAG_USER) + { + value |= PTE_USER; + } + if ((flags & VM_FLAG_EXEC) == 0) + { + value |= PTE_NOEXEC; + } + return value; +} + +extern uint64_t *kernel_pml4; + +void vmm_setup_pt_root() +{ + // We alloc a physical page (frame) for the pointer, then map it + // to virt (pointer) + uintptr_t phys = pmm_alloc(); + vmm_pt_root = (void*)kernel_pml4; + paging_map_page(kernel_pml4, (uint64_t)vmm_pt_root, phys, convert_x86_vm_flags(VM_FLAG_WRITE | VM_FLAG_EXEC)); + DEBUG("VMM setup: vmm_pt_root=0x%p (phys=0x%p)", vmm_pt_root, phys); +} + +void* vmm_alloc(size_t length, size_t flags) +{ + // We will try to allocate at least length bytes, which have to be rounded UP to + // the next page so its coherent with the PMM + size_t len = ALIGN_UP(length, PAGE_SIZE); + + // Some linked list shenanigans will be here + // but for now we'd need some kheap to kmalloc the linked list items + // else we can't do it +} + +void vmm_init() +{ + vmm_setup_pt_root(); +} \ No newline at end of file diff --git a/src/mem/paging/vmm.h b/src/mem/paging/vmm.h new file mode 100644 index 0000000..3f862be --- /dev/null +++ b/src/mem/paging/vmm.h @@ -0,0 +1,29 @@ +#ifndef VMM_H +#define VMM_H + +#include +#include + +/* +This will be our linked list of virtual memory objects. +Flags here aren't x86 flags, they are platform-agnostic +kernel-defined flags. +*/ + +struct vm_object +{ + uintptr_t base; + size_t length; + size_t flags; + struct vm_object* next; +}; + +// Flags bitfield +#define VM_FLAG_NONE 0 +#define VM_FLAG_WRITE (1 << 0) +#define VM_FLAG_EXEC (1 << 1) +#define VM_FLAG_USER (1 << 2) + +void vmm_init(); + +#endif \ No newline at end of file