syscall #17
+1
-2
@@ -27,8 +27,7 @@
|
||||
/* process */
|
||||
#define PROCESS_NAME_MAX 64
|
||||
#define PROCESS_STACK_SIZE 0x10000 // 64kb
|
||||
#define PROCESS_BASE 0x400000
|
||||
#define PROCESS_STACK_BASE 0x1000000
|
||||
#define PROCESS_STACK_TOP 0x80000000
|
||||
|
||||
/* sched */
|
||||
// 1 tick = 1 ms => quantum = 10ms
|
||||
|
||||
@@ -16,9 +16,11 @@
|
||||
|
||||
void paging_init(struct boot_context boot_ctx);
|
||||
void paging_map_page(uint64_t* root_table, uint64_t virt, uint64_t phys, uint64_t flags);
|
||||
uint64_t* alloc_page_table();
|
||||
|
||||
// To swap root page tables
|
||||
void load_cr3(uint64_t value);
|
||||
void invlpg(void *addr);
|
||||
|
||||
extern uint64_t hhdm_off;
|
||||
|
||||
|
||||
+12
-17
@@ -9,26 +9,21 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/*
|
||||
This will be our linked list of virtual memory objects.
|
||||
Flags here aren't x86 flags, they are platform-agnostic
|
||||
kernel-defined flags.
|
||||
*/
|
||||
|
||||
struct vm_object {
|
||||
uintptr_t base;
|
||||
size_t length;
|
||||
size_t flags;
|
||||
struct vm_object* next;
|
||||
struct vmm_context {
|
||||
uint64_t* pml4;
|
||||
};
|
||||
|
||||
// Flags bitfield
|
||||
#define VM_FLAG_NONE 0
|
||||
#define VM_FLAG_WRITE (1 << 0)
|
||||
#define VM_FLAG_EXEC (1 << 1)
|
||||
#define VM_FLAG_USER (1 << 2)
|
||||
|
||||
void vmm_init(void);
|
||||
void* vmm_alloc_region(uint64_t* pml4, size_t pages, uint64_t flags);
|
||||
bool vmm_is_mapped(uint64_t* pml4, uint64_t virt);
|
||||
void vmm_unmap(uint64_t* pml4, uint64_t virt);
|
||||
void* vmm_map(uint64_t* pml4, uint64_t virt, uint64_t flags);
|
||||
uint64_t* vmm_create_address_space();
|
||||
uint64_t vmm_virt_to_phys(uint64_t* pml4, uint64_t virt);
|
||||
|
||||
#define VMM_USER_SPACE_START 0x0000000000001000
|
||||
#define VMM_USER_SPACE_END 0x00007FFFFFFFF000
|
||||
|
||||
#endif
|
||||
@@ -32,6 +32,8 @@ global vector_19_handler
|
||||
global vector_20_handler
|
||||
global vector_21_handler
|
||||
|
||||
global vector_128_handler
|
||||
|
||||
interrupt_stub:
|
||||
; We'll push all general-purpose registers to the stack,
|
||||
; so they're intact and don't bother the code that was
|
||||
@@ -313,3 +315,10 @@ vector_33_handler:
|
||||
push qword 0
|
||||
push qword 33
|
||||
jmp interrupt_stub
|
||||
|
||||
; Syscall Interrupt (0x80)
|
||||
align 16
|
||||
vector_128_handler:
|
||||
push qword 0
|
||||
push qword 128
|
||||
jmp interrupt_stub
|
||||
+7
-1
@@ -21,6 +21,8 @@ struct idtr idt_reg;
|
||||
// Address to our first interrupt handler
|
||||
extern char vector_0_handler[];
|
||||
|
||||
extern char vector_128_handler[];
|
||||
|
||||
// Timer ticks
|
||||
extern volatile uint64_t ticks;
|
||||
|
||||
@@ -75,7 +77,7 @@ void idt_init()
|
||||
idt_set_entry(i, vector_0_handler + (i*16), 0);
|
||||
}
|
||||
|
||||
idt_set_entry(0x80, syscall_handler, 0);
|
||||
idt_set_entry(0x80, vector_128_handler, 0);
|
||||
|
||||
idt_load(&idt);
|
||||
DEBUG("IDT initialized");
|
||||
@@ -274,6 +276,10 @@ struct cpu_status_t* interrupt_dispatch(struct cpu_status_t* context)
|
||||
outb(0x20, 0x20);
|
||||
break;
|
||||
|
||||
case 128: // Syscall Interrupt (0x80)
|
||||
syscall_handler(context);
|
||||
break;
|
||||
|
||||
default:
|
||||
DEBUG("Unexpected Interrupt");
|
||||
break;
|
||||
|
||||
@@ -42,6 +42,7 @@ void pedicel_main(void* arg)
|
||||
}
|
||||
|
||||
if (strncmp(input_buf, "syscall", 7) == 0) {
|
||||
__asm__ volatile("mov $0x00, %rdi");
|
||||
__asm__ volatile("int $0x80");
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -124,5 +124,6 @@ void kmain()
|
||||
|
||||
printf(PEPPEROS_SPLASH);
|
||||
init.all = true;
|
||||
|
||||
idle();
|
||||
}
|
||||
|
||||
+2
-2
@@ -43,7 +43,7 @@ void load_cr3(uint64_t value) {
|
||||
* This function is used to flush at least the TLB entrie(s)
|
||||
* for the page that contains the <addr> address.
|
||||
*/
|
||||
static inline void invlpg(void *addr)
|
||||
void invlpg(void *addr)
|
||||
{
|
||||
asm volatile("invlpg (%0)" :: "r"(addr) : "memory");
|
||||
}
|
||||
@@ -59,7 +59,7 @@ static inline void invlpg(void *addr)
|
||||
* Return:
|
||||
* <virt> - Pointer to allocated page table
|
||||
*/
|
||||
static uint64_t* alloc_page_table()
|
||||
uint64_t* alloc_page_table()
|
||||
{
|
||||
uint64_t* virt = (uint64_t*)PHYS_TO_VIRT(pmm_alloc());
|
||||
|
||||
|
||||
+198
-47
@@ -19,60 +19,211 @@ compared to the PMM which allocs/frees 4kb frames ("physical pages").
|
||||
#include <mem/pmm.h>
|
||||
#include <kernel.h>
|
||||
|
||||
void* vmm_pt_root = 0;
|
||||
|
||||
// Linked list head for virtual memory objects
|
||||
struct vm_object* vm_objs = NULL;
|
||||
|
||||
/*
|
||||
* Will have to be rewritten and expanded,
|
||||
* to prepare for userspace.
|
||||
* The platform-agnostic flags will be removed
|
||||
* because as long as the kernel is x86 only,
|
||||
* we don't need over complication.
|
||||
* Plus I don't plan to port to other architectures
|
||||
*/
|
||||
|
||||
uint64_t convert_x86_vm_flags(size_t flags)
|
||||
{
|
||||
uint64_t value = 0;
|
||||
if (flags & VM_FLAG_WRITE)
|
||||
{
|
||||
value |= PTE_WRITABLE;
|
||||
}
|
||||
if (flags & VM_FLAG_USER)
|
||||
{
|
||||
value |= PTE_USER;
|
||||
}
|
||||
if ((flags & VM_FLAG_EXEC) == 0)
|
||||
{
|
||||
value |= PTE_NOEXEC;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
extern uint64_t *kernel_pml4;
|
||||
|
||||
void vmm_setup_pt_root()
|
||||
/*
|
||||
* vmm_switch_to - Switch to a different VMM context
|
||||
* @ctx: VMM context to switch to
|
||||
*
|
||||
* This function makes the CPU switch to another
|
||||
* virtual memory context, by using the PML4 address
|
||||
* specified in the VMM context pointed to by @ctx.
|
||||
*/
|
||||
void vmm_switch_to(struct vmm_context* ctx)
|
||||
{
|
||||
// We alloc a physical page (frame) for the pointer, then map it
|
||||
// to virt (pointer)
|
||||
uintptr_t phys = pmm_alloc();
|
||||
vmm_pt_root = (void*)kernel_pml4;
|
||||
paging_map_page(kernel_pml4, (uint64_t)vmm_pt_root, phys, convert_x86_vm_flags(VM_FLAG_WRITE | VM_FLAG_EXEC));
|
||||
DEBUG("VMM setup: vmm_pt_root=0x%p (phys=0x%p)", vmm_pt_root, phys);
|
||||
if (!ctx || !ctx->pml4) {
|
||||
panic(NULL, "Attempted to switch to bad PML4!");
|
||||
}
|
||||
uint64_t pml4 = VIRT_TO_PHYS(ctx->pml4);
|
||||
asm volatile ("mov %0, %%cr3" :: "r"(pml4) : "memory");
|
||||
}
|
||||
|
||||
/* void* vmm_alloc(size_t length, size_t flags)
|
||||
/*
|
||||
* vmm_virt_to_phys - Translate from virtual to physical address
|
||||
* @pml4: virtual address of the Page Map Level 4 (root page table)
|
||||
* @virt: virtual address to translate
|
||||
*
|
||||
* This function goes through page table structures, beginning at
|
||||
* the root page table which lives at @pml4, and translates @virt
|
||||
* to a physical address, if it's found in the tables.
|
||||
*
|
||||
* Return:
|
||||
* <phys> - physical address
|
||||
* %-1 - address is not present in page tables pointed to by @pml4
|
||||
*/
|
||||
uint64_t vmm_virt_to_phys(uint64_t* pml4, uint64_t virt)
|
||||
{
|
||||
// We will try to allocate at least length bytes, which have to be rounded UP to
|
||||
// the next page so its coherent with the PMM
|
||||
size_t len = ALIGN_UP(length, PAGE_SIZE);
|
||||
uint64_t pml4_i = PML4_INDEX(virt);
|
||||
uint64_t pdpt_i = PDPT_INDEX(virt);
|
||||
uint64_t pd_i = PD_INDEX(virt);
|
||||
uint64_t pt_i = PT_INDEX(virt);
|
||||
|
||||
// Need to implement this (as linked list)
|
||||
// but for now kernel heap is sufficient
|
||||
// The VMM will prob be more useful when we have userspace
|
||||
} */
|
||||
if (!(pml4[pml4_i] & PTE_PRESENT)) return -1;
|
||||
uint64_t* pdpt = (uint64_t*)PHYS_TO_VIRT(pml4[pml4_i] & PTE_ADDR_MASK);
|
||||
|
||||
if (!(pdpt[pdpt_i] & PTE_PRESENT)) return -1;
|
||||
uint64_t* pd = (uint64_t*)PHYS_TO_VIRT(pdpt[pdpt_i] & PTE_ADDR_MASK);
|
||||
|
||||
if (!(pd[pd_i] & PTE_PRESENT)) return -1;
|
||||
uint64_t* pt = (uint64_t*)PHYS_TO_VIRT(pd[pd_i] & PTE_ADDR_MASK);
|
||||
|
||||
if (!(pt[pt_i] & PTE_PRESENT)) return -1;
|
||||
|
||||
uint64_t phys = (pt[pt_i] & PTE_ADDR_MASK) + (virt & 0xFFF);
|
||||
return phys;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmm_create_address_space - Create a new address space
|
||||
*
|
||||
* This function allocates a PML4, and then copies the kernel
|
||||
* page tables into it.
|
||||
*
|
||||
* Return:
|
||||
* <pml4> - address of the new PML4
|
||||
* NULL - on error (couldn't allocate a page table)
|
||||
*/
|
||||
uint64_t* vmm_create_address_space()
|
||||
{
|
||||
uint64_t* pml4 = alloc_page_table();
|
||||
if (!pml4) return NULL;
|
||||
|
||||
for (size_t i=256; i<512; i++) {
|
||||
pml4[i] = kernel_pml4[i];
|
||||
}
|
||||
|
||||
return pml4;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmm_map - Map & allocate a page
|
||||
* @pml4: Page Map Level 4 (root table)
|
||||
* @virt: Virtual address to map
|
||||
* @flags: Flags to apply on page
|
||||
*
|
||||
* This function allocates a page frame with the PMM,
|
||||
* and maps this page to the provided @virt address,
|
||||
* with the corresponding @flags.
|
||||
*
|
||||
* Return:
|
||||
* <virt> - virtual address
|
||||
*/
|
||||
void* vmm_map(uint64_t* pml4, uint64_t virt, uint64_t flags)
|
||||
{
|
||||
uint64_t phys = pmm_alloc();
|
||||
if (!phys) {
|
||||
panic(NULL, "VMM/PMM out of memory!");
|
||||
}
|
||||
|
||||
paging_map_page(pml4, virt, phys, flags | PTE_PRESENT);
|
||||
return (void*)virt;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmm_unmap - Unmap & free a page
|
||||
* @pml4: Page Map Level 4 (root table)
|
||||
* @virt: Virtual address to unmap
|
||||
*
|
||||
* This function frees a page frame with the PMM,
|
||||
* and unmaps the virtual page at @virt.
|
||||
*/
|
||||
void vmm_unmap(uint64_t* pml4, uint64_t virt)
|
||||
{
|
||||
uint64_t pml4_i = PML4_INDEX(virt);
|
||||
uint64_t pdpt_i = PDPT_INDEX(virt);
|
||||
uint64_t pd_i = PD_INDEX(virt);
|
||||
uint64_t pt_i = PT_INDEX(virt);
|
||||
|
||||
if (!(pml4[pml4_i] & PTE_PRESENT)) return;
|
||||
uint64_t* pdpt = (uint64_t*)PHYS_TO_VIRT(pml4[pml4_i] & PTE_ADDR_MASK);
|
||||
|
||||
if (!(pdpt[pdpt_i] & PTE_PRESENT)) return;
|
||||
uint64_t* pd = (uint64_t*)PHYS_TO_VIRT(pdpt[pdpt_i] & PTE_ADDR_MASK);
|
||||
|
||||
if (!(pd[pd_i] & PTE_PRESENT)) return;
|
||||
uint64_t* pt = (uint64_t*)PHYS_TO_VIRT(pd[pd_i] & PTE_ADDR_MASK);
|
||||
|
||||
if (!(pt[pt_i] & PTE_PRESENT)) return;
|
||||
|
||||
uint64_t phys = pt[pt_i] & PTE_ADDR_MASK;
|
||||
pmm_free(phys);
|
||||
|
||||
pt[pt_i] = 0;
|
||||
|
||||
invlpg((void*)virt);
|
||||
}
|
||||
|
||||
/*
|
||||
* vmm_is_mapped - Check if an address is mapped
|
||||
* @pml4: Page Map Level 4 (root table)
|
||||
* @virt: Virtual address to check
|
||||
*
|
||||
* This function checks if the @virt address is
|
||||
* mapped in the tables pointed to by @pml4.
|
||||
*
|
||||
* Return:
|
||||
* true - @virt is mapped in tables of @pml4
|
||||
* false - @virt is not mapped there
|
||||
*/
|
||||
bool vmm_is_mapped(uint64_t* pml4, uint64_t virt)
|
||||
{
|
||||
uint64_t pml4_i = PML4_INDEX(virt);
|
||||
uint64_t pdpt_i = PDPT_INDEX(virt);
|
||||
uint64_t pd_i = PD_INDEX(virt);
|
||||
uint64_t pt_i = PT_INDEX(virt);
|
||||
|
||||
if (!(pml4[pml4_i] & PTE_PRESENT)) return false;
|
||||
uint64_t* pdpt = (uint64_t*)PHYS_TO_VIRT(pml4[pml4_i] & PTE_ADDR_MASK);
|
||||
|
||||
if (!(pdpt[pdpt_i] & PTE_PRESENT)) return false;
|
||||
uint64_t* pd = (uint64_t*)PHYS_TO_VIRT(pdpt[pdpt_i] & PTE_ADDR_MASK);
|
||||
|
||||
if (!(pd[pd_i] & PTE_PRESENT)) return false;
|
||||
uint64_t* pt = (uint64_t*)PHYS_TO_VIRT(pd[pd_i] & PTE_ADDR_MASK);
|
||||
|
||||
return (pt[pt_i] & PTE_PRESENT);
|
||||
}
|
||||
|
||||
/*
|
||||
* vmm_alloc_range - Map and allocate a memory range
|
||||
* @pml4: Page Map Level 4 (root table)
|
||||
* @pages: Amount of pages to allocate/map
|
||||
* @flags: Flags to put on mapped pages
|
||||
*
|
||||
* This function looks for enough space in page tables
|
||||
* to map @pages pages, then maps them into the provided
|
||||
* @pml4 with the provided @flags and allocates them.
|
||||
*
|
||||
* Return:
|
||||
* <start_virt> - the starting virtual address for the mapped range
|
||||
*/
|
||||
void* vmm_alloc_region(uint64_t* pml4, size_t pages, uint64_t flags)
|
||||
{
|
||||
uint64_t found_pages = 0;
|
||||
uint64_t start_virt = VMM_USER_SPACE_START;
|
||||
|
||||
for (uint64_t curr = VMM_USER_SPACE_START; curr < VMM_USER_SPACE_END; curr += PAGE_SIZE) {
|
||||
if (!vmm_is_mapped(pml4, curr)) {
|
||||
if (found_pages == 0) start_virt = curr;
|
||||
found_pages++;
|
||||
} else {
|
||||
found_pages = 0;
|
||||
}
|
||||
|
||||
if (found_pages == pages) {
|
||||
for (size_t i = 0; i < pages; i++) {
|
||||
uint64_t addr_to_map = start_virt + (i * PAGE_SIZE);
|
||||
if (!vmm_map(pml4, addr_to_map, flags)) {
|
||||
panic(NULL, "VMM out of memory!");
|
||||
}
|
||||
}
|
||||
return (void*)start_virt;
|
||||
}
|
||||
}
|
||||
|
||||
panic(NULL, "VMM out of memory!");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void vmm_init()
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user