232 lines
6.3 KiB
C
232 lines
6.3 KiB
C
/*
|
|
* @author xamidev <xamidev@riseup.net>
|
|
* @brief Virtual memory manager
|
|
* @license GPL-3.0-only
|
|
*/
|
|
|
|
/*
|
|
The VMM (virtual memory manager) will have two roles:
|
|
- mapping pages
|
|
- unmapping pages
|
|
in a specified virtual space
|
|
|
|
compared to the PMM which allocs/frees 4kb frames ("physical pages").
|
|
*/
|
|
|
|
#include <mem/vmm.h>
|
|
#include <mem/paging.h>
|
|
#include <stddef.h>
|
|
#include <mem/pmm.h>
|
|
#include <kernel.h>
|
|
|
|
extern uint64_t *kernel_pml4;
|
|
|
|
/*
|
|
* vmm_switch_to - Switch to a different VMM context
|
|
* @ctx: VMM context to switch to
|
|
*
|
|
* This function makes the CPU switch to another
|
|
* virtual memory context, by using the PML4 address
|
|
* specified in the VMM context pointed to by @ctx.
|
|
*/
|
|
void vmm_switch_to(struct vmm_context* ctx)
|
|
{
|
|
if (!ctx || !ctx->pml4) {
|
|
panic(NULL, "Attempted to switch to bad PML4!");
|
|
}
|
|
uint64_t pml4 = VIRT_TO_PHYS(ctx->pml4);
|
|
asm volatile ("mov %0, %%cr3" :: "r"(pml4) : "memory");
|
|
}
|
|
|
|
/*
|
|
* vmm_virt_to_phys - Translate from virtual to physical address
|
|
* @pml4: virtual address of the Page Map Level 4 (root page table)
|
|
* @virt: virtual address to translate
|
|
*
|
|
* This function goes through page table structures, beginning at
|
|
* the root page table which lives at @pml4, and translates @virt
|
|
* to a physical address, if it's found in the tables.
|
|
*
|
|
* Return:
|
|
* <phys> - physical address
|
|
* %-1 - address is not present in page tables pointed to by @pml4
|
|
*/
|
|
uint64_t vmm_virt_to_phys(uint64_t* pml4, uint64_t virt)
|
|
{
|
|
uint64_t pml4_i = PML4_INDEX(virt);
|
|
uint64_t pdpt_i = PDPT_INDEX(virt);
|
|
uint64_t pd_i = PD_INDEX(virt);
|
|
uint64_t pt_i = PT_INDEX(virt);
|
|
|
|
if (!(pml4[pml4_i] & PTE_PRESENT)) return -1;
|
|
uint64_t* pdpt = (uint64_t*)PHYS_TO_VIRT(pml4[pml4_i] & PTE_ADDR_MASK);
|
|
|
|
if (!(pdpt[pdpt_i] & PTE_PRESENT)) return -1;
|
|
uint64_t* pd = (uint64_t*)PHYS_TO_VIRT(pdpt[pdpt_i] & PTE_ADDR_MASK);
|
|
|
|
if (!(pd[pd_i] & PTE_PRESENT)) return -1;
|
|
uint64_t* pt = (uint64_t*)PHYS_TO_VIRT(pd[pd_i] & PTE_ADDR_MASK);
|
|
|
|
if (!(pt[pt_i] & PTE_PRESENT)) return -1;
|
|
|
|
uint64_t phys = (pt[pt_i] & PTE_ADDR_MASK) + (virt & 0xFFF);
|
|
return phys;
|
|
}
|
|
|
|
/*
|
|
* vmm_create_address_space - Create a new address space
|
|
*
|
|
* This function allocates a PML4, and then copies the kernel
|
|
* page tables into it.
|
|
*
|
|
* Return:
|
|
* <pml4> - address of the new PML4
|
|
* NULL - on error (couldn't allocate a page table)
|
|
*/
|
|
uint64_t* vmm_create_address_space()
|
|
{
|
|
uint64_t* pml4 = alloc_page_table();
|
|
if (!pml4) return NULL;
|
|
|
|
for (size_t i=256; i<512; i++) {
|
|
pml4[i] = kernel_pml4[i];
|
|
}
|
|
|
|
return pml4;
|
|
}
|
|
|
|
/*
|
|
* vmm_map - Map & allocate a page
|
|
* @pml4: Page Map Level 4 (root table)
|
|
* @virt: Virtual address to map
|
|
* @flags: Flags to apply on page
|
|
*
|
|
* This function allocates a page frame with the PMM,
|
|
* and maps this page to the provided @virt address,
|
|
* with the corresponding @flags.
|
|
*
|
|
* Return:
|
|
* <virt> - virtual address
|
|
*/
|
|
void* vmm_map(uint64_t* pml4, uint64_t virt, uint64_t flags)
|
|
{
|
|
uint64_t phys = pmm_alloc();
|
|
if (!phys) {
|
|
panic(NULL, "VMM/PMM out of memory!");
|
|
}
|
|
|
|
paging_map_page(pml4, virt, phys, flags | PTE_PRESENT);
|
|
return (void*)virt;
|
|
}
|
|
|
|
/*
|
|
* vmm_unmap - Unmap & free a page
|
|
* @pml4: Page Map Level 4 (root table)
|
|
* @virt: Virtual address to unmap
|
|
*
|
|
* This function frees a page frame with the PMM,
|
|
* and unmaps the virtual page at @virt.
|
|
*/
|
|
void vmm_unmap(uint64_t* pml4, uint64_t virt)
|
|
{
|
|
uint64_t pml4_i = PML4_INDEX(virt);
|
|
uint64_t pdpt_i = PDPT_INDEX(virt);
|
|
uint64_t pd_i = PD_INDEX(virt);
|
|
uint64_t pt_i = PT_INDEX(virt);
|
|
|
|
if (!(pml4[pml4_i] & PTE_PRESENT)) return;
|
|
uint64_t* pdpt = (uint64_t*)PHYS_TO_VIRT(pml4[pml4_i] & PTE_ADDR_MASK);
|
|
|
|
if (!(pdpt[pdpt_i] & PTE_PRESENT)) return;
|
|
uint64_t* pd = (uint64_t*)PHYS_TO_VIRT(pdpt[pdpt_i] & PTE_ADDR_MASK);
|
|
|
|
if (!(pd[pd_i] & PTE_PRESENT)) return;
|
|
uint64_t* pt = (uint64_t*)PHYS_TO_VIRT(pd[pd_i] & PTE_ADDR_MASK);
|
|
|
|
if (!(pt[pt_i] & PTE_PRESENT)) return;
|
|
|
|
uint64_t phys = pt[pt_i] & PTE_ADDR_MASK;
|
|
pmm_free(phys);
|
|
|
|
pt[pt_i] = 0;
|
|
|
|
invlpg((void*)virt);
|
|
}
|
|
|
|
/*
|
|
* vmm_is_mapped - Check if an address is mapped
|
|
* @pml4: Page Map Level 4 (root table)
|
|
* @virt: Virtual address to check
|
|
*
|
|
* This function checks if the @virt address is
|
|
* mapped in the tables pointed to by @pml4.
|
|
*
|
|
* Return:
|
|
* true - @virt is mapped in tables of @pml4
|
|
* false - @virt is not mapped there
|
|
*/
|
|
bool vmm_is_mapped(uint64_t* pml4, uint64_t virt)
|
|
{
|
|
uint64_t pml4_i = PML4_INDEX(virt);
|
|
uint64_t pdpt_i = PDPT_INDEX(virt);
|
|
uint64_t pd_i = PD_INDEX(virt);
|
|
uint64_t pt_i = PT_INDEX(virt);
|
|
|
|
if (!(pml4[pml4_i] & PTE_PRESENT)) return false;
|
|
uint64_t* pdpt = (uint64_t*)PHYS_TO_VIRT(pml4[pml4_i] & PTE_ADDR_MASK);
|
|
|
|
if (!(pdpt[pdpt_i] & PTE_PRESENT)) return false;
|
|
uint64_t* pd = (uint64_t*)PHYS_TO_VIRT(pdpt[pdpt_i] & PTE_ADDR_MASK);
|
|
|
|
if (!(pd[pd_i] & PTE_PRESENT)) return false;
|
|
uint64_t* pt = (uint64_t*)PHYS_TO_VIRT(pd[pd_i] & PTE_ADDR_MASK);
|
|
|
|
return (pt[pt_i] & PTE_PRESENT);
|
|
}
|
|
|
|
/*
|
|
* vmm_alloc_range - Map and allocate a memory range
|
|
* @pml4: Page Map Level 4 (root table)
|
|
* @pages: Amount of pages to allocate/map
|
|
* @flags: Flags to put on mapped pages
|
|
*
|
|
* This function looks for enough space in page tables
|
|
* to map @pages pages, then maps them into the provided
|
|
* @pml4 with the provided @flags and allocates them.
|
|
*
|
|
* Return:
|
|
* <start_virt> - the starting virtual address for the mapped range
|
|
*/
|
|
void* vmm_alloc_region(uint64_t* pml4, size_t pages, uint64_t flags)
|
|
{
|
|
uint64_t found_pages = 0;
|
|
uint64_t start_virt = VMM_USER_SPACE_START;
|
|
|
|
for (uint64_t curr = VMM_USER_SPACE_START; curr < VMM_USER_SPACE_END; curr += PAGE_SIZE) {
|
|
if (!vmm_is_mapped(pml4, curr)) {
|
|
if (found_pages == 0) start_virt = curr;
|
|
found_pages++;
|
|
} else {
|
|
found_pages = 0;
|
|
}
|
|
|
|
if (found_pages == pages) {
|
|
for (size_t i = 0; i < pages; i++) {
|
|
uint64_t addr_to_map = start_virt + (i * PAGE_SIZE);
|
|
if (!vmm_map(pml4, addr_to_map, flags)) {
|
|
panic(NULL, "VMM out of memory!");
|
|
}
|
|
}
|
|
return (void*)start_virt;
|
|
}
|
|
}
|
|
|
|
panic(NULL, "VMM out of memory!");
|
|
return NULL;
|
|
}
|
|
|
|
void vmm_init()
|
|
{
|
|
// NO U
|
|
//vmm_setup_pt_root();
|
|
} |