sp1/amd64: mmu: Add page mapping implementation

Signed-off-by: Ian Moffett <ian@mirocom.org>
This commit is contained in:
2026-04-21 21:43:38 -04:00
parent c9239f6921
commit 1bff60c27a
2 changed files with 205 additions and 0 deletions
+178
View File
@@ -9,10 +9,15 @@
* consent from Mirocom Laboratories.
*/
#include <sys/param.h>
#include <sys/cdefs.h>
#include <sys/status.h>
#include <mu/mmu.h>
#include <mm/vm.h>
#include <mm/physmem.h>
#include <machine/tlb.h>
#include <stdbool.h>
#include <string.h>
/*
* Page-Table Entry (PTE) flags
@@ -31,6 +36,154 @@
#define PTE_GLOBAL BIT(8) /* Global; sticky */
#define PTE_NX BIT(63) /* Execute-disable */
/* 57-bit linear addresses */
#define CR4_LA57 BIT(12)
/*
* Represents valid pagemap levels
*/
typedef enum {
PAGELVL_PML1,
PAGELVL_PML2,
PAGELVL_PML3,
PAGELVL_PML4,
PAGELVL_PML5
} pagelvl_t;
/*
* Obtain page table flags from protection flags
*
* @prot: Protection flags to extract from
*/
static size_t
mmu_prot_to_pte(int prot)
{
size_t pte_flags = PTE_P | PTE_NX;
if (ISSET(prot, PROT_WRITE))
pte_flags |= PTE_RW;
if (ISSET(prot, PROT_EXEC))
pte_flags &= ~PTE_NX;
return pte_flags;
}
/*
* Verify if a pagesize is valid
*
* @ps: Pagesize to verify
*/
static bool
mmu_ps_valid(pagelvl_t ps)
{
switch (ps) {
case PAGESIZE_4K:
return true;
}
return false;
}
/*
* Obtain the top-level in use for the current machine
* configuration.
*/
static inline pagelvl_t
mmu_get_level(void)
{
uint64_t cr4;
__asmv(
"mov %%cr4, %0"
: "=r" (cr4)
:
: "memory"
);
return ISSET(cr4, CR4_LA57)
? PAGELVL_PML5
: PAGELVL_PML4;
}
/*
* This function extracts those cute 9 bit segments that
* function as indices into pagemap levels.
*
* @vma: Virtual memory address used as key
* @lvl: Level to extract
*/
static inline size_t
mmu_extract_index(uintptr_t vma, pagelvl_t lvl)
{
switch (lvl) {
case PAGELVL_PML1:
return (vma >> 12) & 0x1FF;
case PAGELVL_PML2:
return (vma >> 21) & 0x1FF;
case PAGELVL_PML3:
return (vma >> 30) & 0x1FF;
case PAGELVL_PML4:
return (vma >> 39) & 0x1FF;
case PAGELVL_PML5:
return (vma >> 48) & 0x1FF;
}
return (size_t)-1;
}
/*
* Extract a pagemap index
*
* @vfr: Virtual fuck region
* @vma: Virtual memory address
* @lvl: Pagemap level to extract
* @alloc: If true, allocate new entries
*/
static uint64_t *
mmu_extract_level(struct mmu_vfr *vfr, uintptr_t vma, pagelvl_t lvl, bool alloc)
{
uintptr_t *pmap, pma;
void *tmp_p;
size_t index;
pagelvl_t cur_level;
if (vfr == NULL || lvl > PAGELVL_PML5) {
return NULL;
}
cur_level = mmu_get_level();
pmap = pma_to_vma((vfr->cr3 & PTE_ADDR_MASK));
while (cur_level > lvl) {
index = mmu_extract_index(vma, cur_level);
/* Is this entry present? */
if (ISSET(pmap[index], PTE_P)) {
pmap = pma_to_vma((pmap[index] & PTE_ADDR_MASK));
--cur_level;
continue;
}
if (!alloc) {
return NULL;
}
pma = mm_physmem_alloc(1);
if (pma == 0) {
return NULL;
}
tmp_p = pma_to_vma(pma);
memset(tmp_p, 0, 4096);
pmap[index] = pma | (PTE_P | PTE_RW | PTE_US);
pmap = tmp_p;
--cur_level;
}
return pmap;
}
void
mu_mmu_readvfr(struct mmu_vfr *res)
{
@@ -89,3 +242,28 @@ mu_mmu_forkvfr(struct mmu_vfr *vfr, struct mmu_vfr *res)
res->cr3 = dest;
return STATUS_SUCCESS;
}
status_t
mu_mmu_map(struct mmu_vfr *vfr, uintptr_t vma, uintptr_t pma,
int prot, pagesize_t ps)
{
uintptr_t *tbl;
size_t index, flags;
if (vfr == NULL || !mmu_ps_valid(ps)) {
return STATUS_INVALID_PARAM;
}
tbl = mmu_extract_level(vfr, vma, PAGELVL_PML1, true);
if (tbl == NULL) {
return STATUS_NO_MEMORY;
}
index = mmu_extract_index(vma, PAGELVL_PML1);
flags = mmu_prot_to_pte(prot);
tbl[index] = pma | flags;
md_tlb_flush(vma);
return STATUS_SUCCESS;
}
+27
View File
@@ -13,6 +13,17 @@
#define _MU_MMU_H_ 1
#include <sys/status.h>
#include <sys/mman.h>
/*
* Represents valid page sizes that can be used when
* creating mappings
*/
typedef enum {
PAGESIZE_4K,
PAGESIZE_2M,
PAGESIZE_1G
} pagesize_t;
/*
* Each running SP1 process is to have a virtual fuck region
@@ -35,6 +46,22 @@ void mu_mmu_readvfr(struct mmu_vfr *res);
*/
void mu_mmu_writevfr(struct mmu_vfr *vfr);
/*
* Create a virtual to physical mapping within a
* specific virtual fuck region
*
* @vfr: Virtual fuck region to map within
* @vma: Virtual memory address to map
* @pma: Physical memory address to map to
* @prot: Protection flags
* @pagesize_t: Pagesize to map
*/
status_t mu_mmu_map(
struct mmu_vfr *vfr, uintptr_t vma,
uintptr_t pma, int prot,
pagesize_t ps
);
/*
* Fork a VFR and clear out the lower half
*