Merge pull request 'memory' (#7) from memory into main
Reviewed-on: #7
This commit was merged in pull request #7.
This commit is contained in:
6
Makefile
6
Makefile
@@ -1,8 +1,8 @@
|
||||
SOURCES = src/io/kbd/ps2.c src/io/serial/serial.c src/io/term/printf.c src/io/term/term.c src/idt/idt.c src/mem/gdt/gdt.c src/mem/misc/utils.c src/time/timer.c src/kmain.c
|
||||
SOURCES = src/mem/heap/kheap.c src/mem/paging/vmm.c src/mem/paging/paging.c src/mem/paging/pmm.c src/string/string.c src/io/kbd/ps2.c src/io/serial/serial.c src/io/term/printf.c src/io/term/term.c src/idt/idt.c src/mem/gdt/gdt.c src/mem/misc/utils.c src/time/timer.c src/kmain.c
|
||||
|
||||
build:
|
||||
rm -f *.o
|
||||
x86_64-elf-gcc -g -c -I src $(SOURCES) -Wall -Wextra -std=gnu99 -nostdlib -ffreestanding -fno-stack-protector -fno-stack-check -fno-PIC -ffunction-sections -fdata-sections -mcmodel=kernel
|
||||
x86_64-elf-gcc -g -c -Isrc $(SOURCES) -Wall -Wextra -std=gnu99 -nostdlib -ffreestanding -fno-stack-protector -fno-stack-check -fno-PIC -ffunction-sections -fdata-sections -mcmodel=kernel
|
||||
objcopy -O elf64-x86-64 -B i386 -I binary zap-light16.psf zap-light16.o
|
||||
nasm -f elf64 src/idt/idt.S -o idt_stub.o
|
||||
x86_64-elf-ld -o pepperk -T linker.ld *.o
|
||||
@@ -30,7 +30,7 @@ build-iso: limine/limine build
|
||||
./limine/limine bios-install pepper.iso
|
||||
|
||||
debug:
|
||||
qemu-system-x86_64 -drive file=pepper.iso -s -S -d int -no-reboot &
|
||||
qemu-system-x86_64 -drive file=pepper.iso -s -S -d int -no-reboot -no-shutdown &
|
||||
gdb pepperk --command=debug.gdb
|
||||
|
||||
run: build-iso
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#include "idt.h"
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include "../io/serial/serial.h"
|
||||
#include "../io/kbd/ps2.h"
|
||||
#include "io/serial/serial.h"
|
||||
#include "io/kbd/ps2.h"
|
||||
#include <kernel.h>
|
||||
|
||||
struct interrupt_descriptor idt[256];
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
// PS/2 Keyboard support
|
||||
|
||||
#include "../serial/serial.h"
|
||||
#include "io/serial/serial.h"
|
||||
#include "ps2.h"
|
||||
#include <stdint.h>
|
||||
#include "../term/term.h"
|
||||
#include "io/term/term.h"
|
||||
#include <kernel.h>
|
||||
|
||||
// The key status bitfield will be used to see if ALT, CONTROL, or SHIFT is pressed
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
// Terminal output
|
||||
/*
|
||||
There are a couple of bugs here and there but for now I don't care too much
|
||||
because this shitty implementation will be replaced one day by Flanterm
|
||||
(once memory management is okay: paging & kernel malloc)
|
||||
*/
|
||||
|
||||
#include <limine.h>
|
||||
#include <stddef.h>
|
||||
|
||||
10
src/kernel.h
10
src/kernel.h
@@ -13,9 +13,11 @@ enum ErrorCodes
|
||||
#include "io/serial/serial.h"
|
||||
#include "io/term/printf.h"
|
||||
|
||||
// Still lacks print formatting...
|
||||
#define DEBUG(log, ...) \
|
||||
printf("debug: [%s]: " log "\n", __FILE__, ##__VA_ARGS__); \
|
||||
fctprintf((void*)&skputc, 0, "debug: [%s]: %s\n", __FILE__, log)
|
||||
#define DEBUG(log, ...) fctprintf((void*)&skputc, 0, "debug: [%s]: " log "\r\n", __FILE__, ##__VA_ARGS__)
|
||||
|
||||
// printf("debug: [%s]: " log "\n", __FILE__, ##__VA_ARGS__);
|
||||
|
||||
void hcf();
|
||||
#define assert(check) do { if(!(check)) hcf(); } while(0)
|
||||
|
||||
#endif
|
||||
|
||||
60
src/kmain.c
60
src/kmain.c
@@ -10,6 +10,10 @@
|
||||
#include "kernel.h"
|
||||
#include "time/timer.h"
|
||||
#include "io/kbd/ps2.h"
|
||||
#include "mem/paging/pmm.h"
|
||||
#include "mem/paging/paging.h"
|
||||
#include "mem/paging/vmm.h"
|
||||
#include "mem/heap/kheap.h"
|
||||
|
||||
// Limine version used
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
@@ -22,6 +26,27 @@ static volatile struct limine_framebuffer_request framebuffer_request = {
|
||||
.revision = 0
|
||||
};
|
||||
|
||||
// Memory map request
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
static volatile struct limine_memmap_request memmap_request = {
|
||||
.id = LIMINE_MEMMAP_REQUEST,
|
||||
.revision = 0
|
||||
};
|
||||
|
||||
// Higher Half Direct Map
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
static volatile struct limine_hhdm_request hhdm_request = {
|
||||
.id = LIMINE_HHDM_REQUEST,
|
||||
.revision = 0
|
||||
};
|
||||
|
||||
// Executable Address/Kernel Address (find base phys/virt address of kernel)
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
static volatile struct limine_kernel_address_request kerneladdr_request = {
|
||||
.id = LIMINE_KERNEL_ADDRESS_REQUEST,
|
||||
.revision = 0
|
||||
};
|
||||
|
||||
__attribute__((used, section(".limine_requests_start")))
|
||||
static volatile LIMINE_REQUESTS_START_MARKER;
|
||||
|
||||
@@ -30,8 +55,8 @@ static volatile LIMINE_REQUESTS_END_MARKER;
|
||||
|
||||
struct limine_framebuffer* framebuffer;
|
||||
|
||||
// Panic
|
||||
static void hcf()
|
||||
// Panic (should dump registers etc. in the future)
|
||||
void hcf()
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
@@ -45,23 +70,50 @@ void kmain()
|
||||
if (!LIMINE_BASE_REVISION_SUPPORTED) hcf();
|
||||
if (framebuffer_request.response == NULL || framebuffer_request.response->framebuffer_count < 1) hcf();
|
||||
|
||||
// We should probably grab all the boot info in a boot context struct
|
||||
// that would be a bit cleaner than this mess
|
||||
|
||||
// Get the first framebuffer from the response
|
||||
framebuffer = framebuffer_request.response->framebuffers[0];
|
||||
|
||||
term_init();
|
||||
serial_init();
|
||||
|
||||
if (memmap_request.response == NULL) hcf();
|
||||
memmap_display(memmap_request.response);
|
||||
|
||||
if (hhdm_request.response == NULL) hcf();
|
||||
hhdm_display(hhdm_request.response);
|
||||
|
||||
if (kerneladdr_request.response == NULL) hcf();
|
||||
DEBUG("kernel: phys_base=0x%p virt_base=0x%p", kerneladdr_request.response->physical_base, kerneladdr_request.response->virtual_base);
|
||||
|
||||
CLEAR_INTERRUPTS;
|
||||
gdt_init();
|
||||
idt_init();
|
||||
timer_init();
|
||||
SET_INTERRUPTS;
|
||||
|
||||
pmm_init(memmap_request.response, hhdm_request.response);
|
||||
|
||||
// Remap kernel , HHDM and framebuffer
|
||||
paging_init(kerneladdr_request.response, framebuffer);
|
||||
|
||||
kheap_init();
|
||||
|
||||
void* ptr = kmalloc(10); DEBUG("(KMALLOC TEST) Allocated 10 bytes at 0x%p", ptr);
|
||||
void* ptr2 = kmalloc(200); DEBUG("(KMALLOC TEST) Allocated 200 bytes at 0x%p", ptr2);
|
||||
kfree(ptr);
|
||||
void* ptr3 = kmalloc(5); DEBUG("(KMALLOC TEST) Allocated 5 bytes at 0x%p", ptr3);
|
||||
|
||||
vmm_init();
|
||||
|
||||
keyboard_init(FR);
|
||||
|
||||
term_init();
|
||||
// Draw something
|
||||
printf("%s, %s!\n", "Hello", "world");
|
||||
// Yoohoooooo!
|
||||
DEBUG("kernel initialized successfully! hanging... wow=%d", 42);
|
||||
//DEBUG("kernel initialized successfully! hanging... wow=%d", 42);
|
||||
printf("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed non justo a magna bibendum auctor viverra rutrum diam. In hac habitasse platea dictumst. Vestibulum suscipit ipsum eget tortor maximus lobortis. Donec vel ipsum id lacus fringilla bibendum id eget risus. Fusce vestibulum diam sit amet nunc ultricies, nec rutrum nibh congue. Donec fringilla a dui sit amet ullamcorper. Donec pharetra quis tortor id congue. Aliquam erat volutpat. Duis suscipit nulla vel ligula iaculis, in gravida mauris pellentesque. Vestibulum nunc nisl, posuere eu eros et, dictum molestie dolor. Donec posuere laoreet hendrerit. Suspendisse potenti. Proin fringilla vehicula malesuada. Quisque a dui est. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur nec aliquam lacus, at lacinia enim. ");
|
||||
hcf();
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "gdt.h"
|
||||
#include <stdint.h>
|
||||
#include "../../io/serial/serial.h"
|
||||
#include "io/serial/serial.h"
|
||||
#include <kernel.h>
|
||||
|
||||
// Descriptors are 8-byte wide (64bits)
|
||||
|
||||
106
src/mem/heap/kheap.c
Normal file
106
src/mem/heap/kheap.c
Normal file
@@ -0,0 +1,106 @@
|
||||
#include "kheap.h"
|
||||
#include "mem/paging/paging.h"
|
||||
#include "mem/paging/pmm.h"
|
||||
#include <stddef.h>
|
||||
#include <kernel.h>
|
||||
|
||||
extern uint64_t kernel_phys_base;
|
||||
extern uint64_t kernel_virt_base;
|
||||
|
||||
uintptr_t kheap_start;
|
||||
|
||||
static struct heap_block_t* head = NULL;
|
||||
static uintptr_t end;
|
||||
|
||||
// Kernel root table (level 4)
|
||||
extern uint64_t *kernel_pml4;
|
||||
|
||||
static void kheap_map_page()
|
||||
{
|
||||
uintptr_t phys = pmm_alloc();
|
||||
paging_map_page(kernel_pml4, end, phys, PTE_PRESENT | PTE_WRITABLE | PTE_NOEXEC);
|
||||
end += PAGE_SIZE;
|
||||
DEBUG("Mapped first kheap page");
|
||||
}
|
||||
|
||||
void kheap_init()
|
||||
{
|
||||
kheap_start = ALIGN_UP(kernel_virt_base + KERNEL_SIZE, PAGE_SIZE);
|
||||
end = kheap_start;
|
||||
|
||||
// At least 1 page must be mapped for it to work
|
||||
kheap_map_page();
|
||||
|
||||
// Give linked list head its properties
|
||||
head = (struct heap_block_t*)kheap_start;
|
||||
head->size = PAGE_SIZE - sizeof(struct heap_block_t);
|
||||
head->free = true;
|
||||
head->next = NULL;
|
||||
DEBUG("kheap initialized, head=0x%p, size=%u", head, head->size);
|
||||
}
|
||||
|
||||
void* kmalloc(size_t size)
|
||||
{
|
||||
// No size, no memory allocated!
|
||||
if (!size) return NULL;
|
||||
|
||||
struct heap_block_t* curr = head;
|
||||
|
||||
while (curr)
|
||||
{
|
||||
// Is block free and big enough for us?
|
||||
if (curr->free && curr->size >= size)
|
||||
{
|
||||
// We split the block if it is big enough
|
||||
if (curr->size > size + sizeof(struct heap_block_t))
|
||||
{
|
||||
struct heap_block_t* new_block = (struct heap_block_t*)((uintptr_t)curr + sizeof(struct heap_block_t) + size);
|
||||
// We have to subtract the size of our block struct
|
||||
new_block->size = curr->size - size - sizeof(struct heap_block_t);
|
||||
new_block->free = true;
|
||||
|
||||
// Then we chain up the block in the list
|
||||
new_block->next = curr->next;
|
||||
curr->next = new_block;
|
||||
|
||||
curr->size = size;
|
||||
}
|
||||
|
||||
// Found a good block, we return it
|
||||
curr->free = false;
|
||||
return (void*)((uintptr_t)curr + sizeof(struct heap_block_t));
|
||||
}
|
||||
// Continue browsing the list if nothing good was found yet
|
||||
curr = curr->next;
|
||||
}
|
||||
|
||||
// If we're hear it means we didn't have enough memory
|
||||
// for the block allocation. So we will allocate more..
|
||||
uintptr_t old_end = end;
|
||||
kheap_map_page();
|
||||
|
||||
struct heap_block_t* block = (struct heap_block_t*)old_end;
|
||||
block->size = PAGE_SIZE - sizeof(struct heap_block_t);
|
||||
block->free = false;
|
||||
block->next = NULL;
|
||||
|
||||
// Put the block at the end of the list
|
||||
curr = head;
|
||||
while (curr->next)
|
||||
{
|
||||
curr = curr->next;
|
||||
}
|
||||
curr->next = block;
|
||||
|
||||
return (void*)((uintptr_t)block + sizeof(struct heap_block_t));
|
||||
}
|
||||
|
||||
void kfree(void* ptr)
|
||||
{
|
||||
// Nothing to free
|
||||
if (!ptr) return;
|
||||
|
||||
// Set it free!
|
||||
struct heap_block_t* block = (struct heap_block_t*)((uintptr_t)ptr - sizeof(struct heap_block_t));
|
||||
block->free = true;
|
||||
}
|
||||
26
src/mem/heap/kheap.h
Normal file
26
src/mem/heap/kheap.h
Normal file
@@ -0,0 +1,26 @@
|
||||
#ifndef KHEAP_H
|
||||
#define KHEAP_H
|
||||
|
||||
// We need some kind of simple kernel heap to make our linked list
|
||||
// for the VMM, as we need "malloc" and "free" for that data structure.
|
||||
// When the kernel heap is ready, we can alloc our VM object linked list
|
||||
// and then continue working on the VMM.
|
||||
|
||||
// 16MB should be enough for some linked lists
|
||||
#define KHEAP_SIZE (16*1024*1024)
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
|
||||
struct heap_block_t
|
||||
{
|
||||
size_t size;
|
||||
bool free;
|
||||
struct heap_block_t* next;
|
||||
};
|
||||
|
||||
void kheap_init();
|
||||
void* kmalloc(size_t size);
|
||||
void kfree(void* ptr);
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,8 @@
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <limine.h>
|
||||
#include "kernel.h"
|
||||
#include "string/string.h"
|
||||
|
||||
// We won't be linked to standard library, but still need the basic mem* functions
|
||||
// so everything goes allright with the compiler
|
||||
@@ -68,3 +71,52 @@ int memcmp(const void* s1, const void* s2, size_t n)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Display the memmap so we see how the memory is laid out at handoff
|
||||
void memmap_display(struct limine_memmap_response* response)
|
||||
{
|
||||
DEBUG("Got memory map from Limine: revision %u, %u entries", response->revision, response->entry_count);
|
||||
|
||||
for (size_t i=0; i<response->entry_count; i++)
|
||||
{
|
||||
struct limine_memmap_entry* entry = response->entries[i];
|
||||
char type[32] = {0};
|
||||
switch(entry->type)
|
||||
{
|
||||
case LIMINE_MEMMAP_USABLE:
|
||||
strcpy(type, "USABLE");
|
||||
break;
|
||||
case LIMINE_MEMMAP_RESERVED:
|
||||
strcpy(type, "RESERVED");
|
||||
break;
|
||||
case LIMINE_MEMMAP_ACPI_RECLAIMABLE:
|
||||
strcpy(type, "ACPI_RECLAIMABLE");
|
||||
break;
|
||||
case LIMINE_MEMMAP_ACPI_NVS:
|
||||
strcpy(type, "ACPI_NVS");
|
||||
break;
|
||||
case LIMINE_MEMMAP_BAD_MEMORY:
|
||||
strcpy(type, "BAD_MEMORY");
|
||||
break;
|
||||
case LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE:
|
||||
strcpy(type, "BOOTLOADER_RECLAIMABLE");
|
||||
break;
|
||||
case LIMINE_MEMMAP_KERNEL_AND_MODULES:
|
||||
strcpy(type, "KERNEL_AND_MODULES");
|
||||
break;
|
||||
case LIMINE_MEMMAP_FRAMEBUFFER:
|
||||
strcpy(type, "FRAMEBUFFER");
|
||||
break;
|
||||
default:
|
||||
strcpy(type, "UNKNOWN");
|
||||
break;
|
||||
}
|
||||
DEBUG("entry %02u: [0x%016x | %016u bytes] - %s", i, entry->base, entry->length, type);
|
||||
}
|
||||
}
|
||||
|
||||
// Display the HHDM
|
||||
void hhdm_display(struct limine_hhdm_response* hhdm)
|
||||
{
|
||||
DEBUG("Got HHDM revision=%u offset=0x%p", hhdm->revision, hhdm->offset);
|
||||
}
|
||||
@@ -8,4 +8,7 @@ void* memset(void* s, int c, size_t n);
|
||||
void* memmove(void *dest, const void* src, size_t n);
|
||||
int memcmp(const void* s1, const void* s2, size_t n);
|
||||
|
||||
void memmap_display(struct limine_memmap_response* response);
|
||||
void hhdm_display(struct limine_hhdm_response* hhdm);
|
||||
|
||||
#endif
|
||||
159
src/mem/paging/paging.c
Normal file
159
src/mem/paging/paging.c
Normal file
@@ -0,0 +1,159 @@
|
||||
#include "paging.h"
|
||||
#include "pmm.h"
|
||||
#include <kernel.h>
|
||||
#include <stddef.h>
|
||||
#include <limine.h>
|
||||
|
||||
/*
|
||||
Paging on x86 uses four different page table levels:
|
||||
cr3 register contains the phys address for the PML4 (root directory)
|
||||
|
||||
Each directory/table is made of 512 entries, each one uint64_t
|
||||
Each of these entries have special bits (PRESENT/WRITEABLE/USER/etc.)
|
||||
that dictates their attributes. Also these bits fall back on children tables.
|
||||
|
||||
If we use 1GB huge pages: PML4 -> PDPT -> 1gb pages
|
||||
2MB huge pages: PML4 -> PDPT -> PD -> 2mb pages
|
||||
4KB (regular size): PML4 -> PDPT -> PD -> PT -> 4kb pages
|
||||
*/
|
||||
|
||||
static inline void load_cr3(uint64_t value) {
|
||||
asm volatile ("mov %0, %%cr3" :: "r"(value) : "memory");
|
||||
}
|
||||
|
||||
// To flush TLB
|
||||
static inline void invlpg(void *addr)
|
||||
{
|
||||
asm volatile("invlpg (%0)" :: "r"(addr) : "memory");
|
||||
}
|
||||
|
||||
// Allocates a 512-entry 64bit page table/directory/whatever (zeroed)
|
||||
static uint64_t* alloc_page_table()
|
||||
{
|
||||
uint64_t* virt = (uint64_t*)PHYS_TO_VIRT(pmm_alloc());
|
||||
|
||||
for (size_t i=0; i<512; i++)
|
||||
{
|
||||
virt[i] = 0;
|
||||
}
|
||||
return virt;
|
||||
}
|
||||
|
||||
// Kernel paging root table, that will be placed in cr3
|
||||
__attribute__((aligned(4096)))
|
||||
uint64_t *kernel_pml4;
|
||||
|
||||
// Map a page, taking virt and phys address. This will go through the paging structures
|
||||
// beginning at the given root table, translate the virtual address in indexes in
|
||||
// page table/directories, and then mapping the correct page table entry with the
|
||||
// given physical address + flags
|
||||
void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags)
|
||||
{
|
||||
virt = PAGE_ALIGN_DOWN(virt);
|
||||
phys = PAGE_ALIGN_DOWN(phys);
|
||||
|
||||
// Translate the virt address into page table indexes
|
||||
uint64_t pml4_i = PML4_INDEX(virt);
|
||||
uint64_t pdpt_i = PDPT_INDEX(virt);
|
||||
uint64_t pd_i = PD_INDEX(virt);
|
||||
uint64_t pt_i = PT_INDEX(virt);
|
||||
|
||||
uint64_t *pdpt, *pd, *pt;
|
||||
|
||||
// PML4
|
||||
// If the entry at index is not present, allocate enough space for it
|
||||
// then populate the entry with correct addr + flags
|
||||
if (!(root_table[pml4_i] & PTE_PRESENT))
|
||||
{
|
||||
pdpt = alloc_page_table();
|
||||
root_table[pml4_i] = VIRT_TO_PHYS(pdpt) | PTE_PRESENT | PTE_WRITABLE;
|
||||
}
|
||||
else {
|
||||
pdpt = (uint64_t *)PHYS_TO_VIRT(root_table[pml4_i] & ~0xFFFULL);
|
||||
}
|
||||
|
||||
// PDPT: same here
|
||||
if (!(pdpt[pdpt_i] & PTE_PRESENT))
|
||||
{
|
||||
pd = alloc_page_table();
|
||||
pdpt[pdpt_i] = VIRT_TO_PHYS(pd) | PTE_PRESENT | PTE_WRITABLE;
|
||||
}
|
||||
else {
|
||||
pd = (uint64_t *)PHYS_TO_VIRT(pdpt[pdpt_i] & ~0xFFFULL);
|
||||
}
|
||||
|
||||
// PD: and here
|
||||
if (!(pd[pd_i] & PTE_PRESENT))
|
||||
{
|
||||
pt = alloc_page_table();
|
||||
pd[pd_i] = VIRT_TO_PHYS(pt) | PTE_PRESENT | PTE_WRITABLE;
|
||||
}
|
||||
else {
|
||||
pt = (uint64_t *)PHYS_TO_VIRT(pd[pd_i] & ~0xFFFULL);
|
||||
}
|
||||
|
||||
// PT: finally, populate the page table entry
|
||||
pt[pt_i] = phys | flags | PTE_PRESENT;
|
||||
|
||||
// Flush TLB (apply changes)
|
||||
invlpg((void *)virt);
|
||||
}
|
||||
|
||||
uint64_t kernel_phys_base;
|
||||
uint64_t kernel_virt_base;
|
||||
|
||||
void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb)
|
||||
{
|
||||
// We should map the kernel, GDT, IDT, stack, framebuffer.
|
||||
// Optionally we could map ACPI tables (we can find them in the Limine memmap)
|
||||
|
||||
kernel_phys_base = kaddr->physical_base;
|
||||
kernel_virt_base = kaddr->virtual_base;
|
||||
|
||||
DEBUG("Kernel lives at virt=0x%p phys=0x%p", kernel_virt_base, kernel_phys_base);
|
||||
|
||||
kernel_pml4 = alloc_page_table();
|
||||
|
||||
// for debug
|
||||
uint64_t page_count = 0;
|
||||
|
||||
// HHDM map first 1 GB using given offset
|
||||
for (uint64_t i=0; i<0x40000000; i += PAGE_SIZE)
|
||||
{
|
||||
//paging_kmap_page(i+hhdm_off, i, PTE_WRITABLE);
|
||||
paging_map_page(kernel_pml4, i+hhdm_off, i, PTE_WRITABLE);
|
||||
page_count++;
|
||||
}
|
||||
DEBUG("Mapped %u pages for first 1GB (HHDM)", page_count); page_count = 0;
|
||||
|
||||
// Map the kernel (according to virt/phys_base given by Limine)
|
||||
// SOME DAY when we want a safer kernel we should map .text as Read/Exec
|
||||
// .rodata as Read and .data as Read/Write
|
||||
// For now who gives a shit, let's RWX all kernel
|
||||
for (uint64_t i = 0; i < KERNEL_SIZE; i += PAGE_SIZE)
|
||||
{
|
||||
//paging_kmap_page(kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE);
|
||||
paging_map_page(kernel_pml4, kernel_virt_base+i, kernel_phys_base+i, PTE_WRITABLE);
|
||||
page_count++;
|
||||
}
|
||||
DEBUG("Mapped %u pages for kernel", page_count); page_count = 0;
|
||||
|
||||
// Get the framebuffer phys/virt address, and size
|
||||
uint64_t fb_virt = (uint64_t)fb->address;
|
||||
uint64_t fb_phys = VIRT_TO_PHYS(fb_virt);
|
||||
uint64_t fb_size = fb->pitch * fb->height;
|
||||
uint64_t fb_pages = (fb_size + PAGE_SIZE-1)/PAGE_SIZE;
|
||||
|
||||
// Map the framebuffer (with cache-disable & write-through)
|
||||
for (uint64_t i=0; i<fb_pages; i++)
|
||||
{
|
||||
//paging_kmap_page(fb_virt+i*PAGE_SIZE, fb_phys+i*PAGE_SIZE, PTE_WRITABLE | PTE_PCD | PTE_PWT);
|
||||
paging_map_page(kernel_pml4, fb_virt+i*PAGE_SIZE, fb_phys+i*PAGE_SIZE, PTE_WRITABLE | PTE_PCD | PTE_PWT);
|
||||
page_count++;
|
||||
}
|
||||
DEBUG("Mapped %u pages for framebuffer", page_count);
|
||||
|
||||
// Finally, we load the physical address of our PML4 (root table) into cr3
|
||||
load_cr3(VIRT_TO_PHYS(kernel_pml4));
|
||||
DEBUG("cr3 loaded, we're still alive");
|
||||
}
|
||||
44
src/mem/paging/paging.h
Normal file
44
src/mem/paging/paging.h
Normal file
@@ -0,0 +1,44 @@
|
||||
#ifndef PAGING_H
|
||||
#define PAGING_H
|
||||
|
||||
#define PAGE_SIZE 4096
|
||||
#define BITS_PER_ROW 64
|
||||
|
||||
#include <stdint.h>
|
||||
#include <limine.h>
|
||||
|
||||
void paging_init(struct limine_kernel_address_response* kaddr, struct limine_framebuffer* fb);
|
||||
void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags);
|
||||
|
||||
extern uint64_t hhdm_off;
|
||||
|
||||
#define PHYS_TO_VIRT(x) ((void*)((uintptr_t)(x) + hhdm_off))
|
||||
#define VIRT_TO_PHYS(x) ((uintptr_t)(x) - hhdm_off)
|
||||
|
||||
// Stole it
|
||||
#define ALIGN_UP(x, align) (((x) + ((align) - 1)) & ~((align) - 1))
|
||||
#define ALIGN_DOWN(x, align) ((x) & ~((align) - 1))
|
||||
#define PAGE_ALIGN_DOWN(x) ((x) & ~0xFFFULL)
|
||||
|
||||
#define PML4_INDEX(x) (((x) >> 39) & 0x1FF)
|
||||
#define PDPT_INDEX(x) (((x) >> 30) & 0x1FF)
|
||||
#define PD_INDEX(x) (((x) >> 21) & 0x1FF)
|
||||
#define PT_INDEX(x) (((x) >> 12) & 0x1FF)
|
||||
|
||||
// Page entry special bits
|
||||
// Bits set on a parent (directory, table) fall back to their children
|
||||
#define PTE_PRESENT (1ULL << 0)
|
||||
#define PTE_WRITABLE (1ULL << 1)
|
||||
#define PTE_USER (1ULL << 2)
|
||||
#define PTE_PWT (1ULL << 3)
|
||||
#define PTE_PCD (1ULL << 4)
|
||||
#define PTE_HUGE (1ULL << 7)
|
||||
#define PTE_NOEXEC (1ULL << 63)
|
||||
|
||||
// Specified in linker.ld
|
||||
#define KERNEL_BASE 0xFFFFFFFF80000000ULL
|
||||
|
||||
// 2 MB should be enough (as of now, the whole kernel ELF is around 75kb)
|
||||
#define KERNEL_SIZE 0x200000
|
||||
|
||||
#endif
|
||||
103
src/mem/paging/pmm.c
Normal file
103
src/mem/paging/pmm.c
Normal file
@@ -0,0 +1,103 @@
|
||||
// OMG here we are. I'm cooked.
|
||||
|
||||
/*
|
||||
pmm - Physical Memory Manager
|
||||
will manage 4kb pages physically
|
||||
it will probably need to get some info from Limine,
|
||||
to see which pages are used by kernel/bootloader/mmio/fb etc.
|
||||
*/
|
||||
|
||||
#include "paging.h"
|
||||
#include <limine.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <kernel.h>
|
||||
#include "mem/misc/utils.h"
|
||||
#include "pmm.h"
|
||||
|
||||
/*
|
||||
First we'll have to discover the physical memory layout,
|
||||
and for that we can use a Limine request.
|
||||
*/
|
||||
|
||||
/*
|
||||
We will look for the biggest usable physical memory region
|
||||
and use this for the bitmap. The reserved memory will be ignored.
|
||||
*/
|
||||
|
||||
struct limine_memmap_entry* biggest_entry;
|
||||
|
||||
static void pmm_find_biggest_usable_region(struct limine_memmap_response* memmap, struct limine_hhdm_response* hhdm)
|
||||
{
|
||||
// Max length of a usable memory region
|
||||
uint64_t length_max = 0;
|
||||
uint64_t offset = hhdm->offset;
|
||||
|
||||
DEBUG("Usable Memory:");
|
||||
for (size_t i=0; i<memmap->entry_count; i++)
|
||||
{
|
||||
struct limine_memmap_entry* entry = memmap->entries[i];
|
||||
|
||||
if (entry->type == LIMINE_MEMMAP_USABLE)
|
||||
{
|
||||
DEBUG("0x%p-0x%p mapped at 0x%p-0x%p", entry->base, entry->base+entry->length,
|
||||
entry->base+offset, entry->base+entry->length+offset);
|
||||
if (entry->length > length_max)
|
||||
{
|
||||
length_max = entry->length;
|
||||
biggest_entry = entry;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG("Biggest usable memory region:");
|
||||
DEBUG("0x%p-0x%p mapped at 0x%p-0x%p", biggest_entry->base, biggest_entry->base + biggest_entry->length,
|
||||
biggest_entry->base+offset, biggest_entry->base+biggest_entry->length+offset);
|
||||
}
|
||||
|
||||
// Offset from Higher Half Direct Map
|
||||
uint64_t hhdm_off;
|
||||
|
||||
static uintptr_t g_freelist = 0;
|
||||
|
||||
uintptr_t pmm_alloc()
|
||||
{
|
||||
if (!g_freelist) return 0;
|
||||
uintptr_t addr = g_freelist;
|
||||
g_freelist = *(uintptr_t*) PHYS_TO_VIRT(g_freelist);
|
||||
return addr;
|
||||
}
|
||||
|
||||
void pmm_free(uintptr_t addr)
|
||||
{
|
||||
*(uintptr_t*) PHYS_TO_VIRT(addr) = g_freelist;
|
||||
g_freelist = addr;
|
||||
}
|
||||
|
||||
static void pmm_init_freelist()
|
||||
{
|
||||
// We simply call pmm_free() on each page that is marked USABLE
|
||||
// in our big memory region.
|
||||
uint64_t base = ALIGN_UP(biggest_entry->base, PAGE_SIZE);
|
||||
uint64_t end = ALIGN_DOWN(biggest_entry->base + biggest_entry->length, PAGE_SIZE);
|
||||
|
||||
uint64_t page_count=0;
|
||||
for (uint64_t addr = base; addr < end; addr += PAGE_SIZE)
|
||||
{
|
||||
pmm_free(addr);
|
||||
//DEBUG("page %u lives at phys 0x%p (virt 0x%p)", page_count, addr, PHYS_TO_VIRT(addr));
|
||||
page_count++;
|
||||
}
|
||||
DEBUG("%u frames in freelist, available for use (%u bytes)", page_count, page_count*PAGE_SIZE);
|
||||
}
|
||||
|
||||
void pmm_init(struct limine_memmap_response* memmap, struct limine_hhdm_response* hhdm)
|
||||
{
|
||||
hhdm_off = hhdm->offset;
|
||||
pmm_find_biggest_usable_region(memmap, hhdm);
|
||||
//pmm_allocate_bitmap(hhdm); too complicated for my small brain
|
||||
|
||||
// Now we have biggest USABLE region,
|
||||
// so to populate the free list we just iterate through it
|
||||
pmm_init_freelist();
|
||||
}
|
||||
10
src/mem/paging/pmm.h
Normal file
10
src/mem/paging/pmm.h
Normal file
@@ -0,0 +1,10 @@
|
||||
#ifndef PAGING_PMM_H
|
||||
#define PAGING_PMM_H
|
||||
|
||||
#include <limine.h>
|
||||
|
||||
void pmm_init(struct limine_memmap_response* memmap, struct limine_hhdm_response* hhdm);
|
||||
void pmm_free(uintptr_t addr);
|
||||
uintptr_t pmm_alloc();
|
||||
|
||||
#endif
|
||||
66
src/mem/paging/vmm.c
Normal file
66
src/mem/paging/vmm.c
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
The VMM (virtual memory manager) will have two roles:
|
||||
- mapping pages
|
||||
- unmapping pages
|
||||
in a specified virtual space
|
||||
|
||||
compared to the PMM which allocs/frees 4kb frames ("physical pages").
|
||||
*/
|
||||
|
||||
#include "vmm.h"
|
||||
#include "paging.h"
|
||||
#include <stddef.h>
|
||||
#include "pmm.h"
|
||||
#include <kernel.h>
|
||||
|
||||
void* vmm_pt_root = 0;
|
||||
|
||||
// Linked list head for virtual memory objects
|
||||
struct vm_object* vm_objs = NULL;
|
||||
|
||||
|
||||
uint64_t convert_x86_vm_flags(size_t flags)
|
||||
{
|
||||
uint64_t value = 0;
|
||||
if (flags & VM_FLAG_WRITE)
|
||||
{
|
||||
value |= PTE_WRITABLE;
|
||||
}
|
||||
if (flags & VM_FLAG_USER)
|
||||
{
|
||||
value |= PTE_USER;
|
||||
}
|
||||
if ((flags & VM_FLAG_EXEC) == 0)
|
||||
{
|
||||
value |= PTE_NOEXEC;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
extern uint64_t *kernel_pml4;
|
||||
|
||||
void vmm_setup_pt_root()
|
||||
{
|
||||
// We alloc a physical page (frame) for the pointer, then map it
|
||||
// to virt (pointer)
|
||||
uintptr_t phys = pmm_alloc();
|
||||
vmm_pt_root = (void*)kernel_pml4;
|
||||
paging_map_page(kernel_pml4, (uint64_t)vmm_pt_root, phys, convert_x86_vm_flags(VM_FLAG_WRITE | VM_FLAG_EXEC));
|
||||
DEBUG("VMM setup: vmm_pt_root=0x%p (phys=0x%p)", vmm_pt_root, phys);
|
||||
}
|
||||
|
||||
void* vmm_alloc(size_t length, size_t flags)
|
||||
{
|
||||
// We will try to allocate at least length bytes, which have to be rounded UP to
|
||||
// the next page so its coherent with the PMM
|
||||
size_t len = ALIGN_UP(length, PAGE_SIZE);
|
||||
|
||||
// Need to implement this (as linked list)
|
||||
// but for now kernel heap is sufficient
|
||||
// The VMM will prob be more useful when we have userspace
|
||||
}
|
||||
|
||||
void vmm_init()
|
||||
{
|
||||
vmm_setup_pt_root();
|
||||
}
|
||||
29
src/mem/paging/vmm.h
Normal file
29
src/mem/paging/vmm.h
Normal file
@@ -0,0 +1,29 @@
|
||||
#ifndef VMM_H
|
||||
#define VMM_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
/*
|
||||
This will be our linked list of virtual memory objects.
|
||||
Flags here aren't x86 flags, they are platform-agnostic
|
||||
kernel-defined flags.
|
||||
*/
|
||||
|
||||
struct vm_object
|
||||
{
|
||||
uintptr_t base;
|
||||
size_t length;
|
||||
size_t flags;
|
||||
struct vm_object* next;
|
||||
};
|
||||
|
||||
// Flags bitfield
|
||||
#define VM_FLAG_NONE 0
|
||||
#define VM_FLAG_WRITE (1 << 0)
|
||||
#define VM_FLAG_EXEC (1 << 1)
|
||||
#define VM_FLAG_USER (1 << 2)
|
||||
|
||||
void vmm_init();
|
||||
|
||||
#endif
|
||||
6
src/string/string.c
Normal file
6
src/string/string.c
Normal file
@@ -0,0 +1,6 @@
|
||||
char* strcpy(char *dest, const char *src)
|
||||
{
|
||||
char *temp = dest;
|
||||
while((*dest++ = *src++));
|
||||
return temp;
|
||||
}
|
||||
6
src/string/string.h
Normal file
6
src/string/string.h
Normal file
@@ -0,0 +1,6 @@
|
||||
#ifndef STRING_H
|
||||
#define STRING_H
|
||||
|
||||
char *strcpy(char *dest, const char *src);
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <stdint.h>
|
||||
#include "../io/serial/serial.h"
|
||||
#include "io/serial/serial.h"
|
||||
#include <kernel.h>
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user