aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/lib/paging.zig80
-rw-r--r--src/lib/process.zig10
2 files changed, 45 insertions, 45 deletions
diff --git a/src/lib/paging.zig b/src/lib/paging.zig
index 129fc84..87a5c0c 100644
--- a/src/lib/paging.zig
+++ b/src/lib/paging.zig
@@ -5,6 +5,8 @@
// This is an implementation of Sv39 paging, meaning that the virtual addresses
// are 39 bits wide. Sv32 and Sv48 are currently not implemented.
+const std = @import("std");
+
const hwinfo = @import("hwinfo.zig");
// Defined by linker script.
@@ -28,15 +30,10 @@ inline fn heapSize() usize {
}
pub const page_size: usize = 0x1000; // 4096 bytes
+pub const log2_page_size: u8 = @intCast(std.math.log2(page_size));
pub var next_mmio_vaddr: usize = 0xff000000;
-// Aligns an address with an offset to the next page.
-// Doesn't change addresses that are already aligned.
-fn pageAlign(addr: usize) usize {
- return (addr + (page_size - 1)) & ~(page_size - 1);
-}
-
pub const AllocError = error{
ZeroSize,
OutOfMemory,
@@ -138,8 +135,8 @@ fn physicalPageNumbers(paddr: usize) [3]usize {
}
// Returns the page numbers of an address as a single integer.
-fn pageNumber(addr: usize) usize {
- return addr >> 12;
+fn pageNumber(addr: usize) u44 {
+ return @intCast(addr >> 12);
}
pub const EntryFlags = packed struct(u8) {
@@ -311,7 +308,7 @@ pub const Table = struct {
v.* = .{
.flags = EntryFlags.branch,
.rsw = 0,
- .mapping = @intCast(pageNumber(@intFromPtr(page))), // Remove the offset, a mapping is just the PPN.
+ .mapping = pageNumber(@intFromPtr(page.ptr)), // Remove the offset, a mapping is just the PPN.
.reserved = 0,
};
}
@@ -373,11 +370,11 @@ pub const Table = struct {
// Create a mask starting directly below / after PN[i].
// Since all levels can have leaves i is not guaranteed to be zero.
- const offsetMask = (@as(usize, 1) << @intCast(12 + 9 * i)) - 1;
- const offset = vaddr & offsetMask;
- const ppnJoined = v.mappingAddr() & ~offsetMask;
+ const offset_mask = (@as(usize, 1) << @intCast(12 + 9 * i)) - 1;
+ const offset = vaddr & offset_mask;
+ const ppn_joined = v.mappingAddr() & ~offset_mask;
- return ppnJoined | offset;
+ return ppn_joined | offset;
}
// Get the entries of the page table of the current level.
@@ -399,15 +396,15 @@ pub const Table = struct {
// from accessing machine-reserved memory by accident.
pub fn identityMapRange(root: *Table, start: usize, end: usize, flags: EntryFlags) !void {
// Mask out the offset within the starting page.
- const startPage = start & ~(page_size - 1);
+ const start_page = start & ~(page_size - 1);
// Mask out the offset within the ending page, but ensure the returned page address
// is always the last required page for the mapping (end is exclusive,
// so subtracting 1 ends up in the previous page on boundaries,
// eliminating one useless mapping). The resulting value is inclusive.
- const endPage = (end - 1) & ~(page_size - 1);
+ const end_page = (end - 1) & ~(page_size - 1);
- var page = startPage;
- while (page <= endPage) : (page += page_size) {
+ var page = start_page;
+ while (page <= end_page) : (page += page_size) {
try root.map(page, page, flags, 0);
}
}
@@ -458,20 +455,21 @@ pub fn init() void {
}
// Allocate memory pages. Passing n <= 0 results in an error.
-pub fn alloc(n: usize) !*void {
+pub fn alloc(n: usize) ![]align(page_size) u8 {
if (n <= 0) return AllocError.ZeroSize;
const num_pages = heapSize() / page_size;
// Start allocating beyond page descriptors.
- const alloc_start = pageAlign(@intFromPtr(heap_start) + num_pages * @sizeOf(Page));
+ const pages = @intFromPtr(heap_start) + num_pages * @sizeOf(Page);
+ const alloc_start = std.mem.alignForwardLog2(pages, log2_page_size);
- const pages: [*]Page = @ptrCast(heap_start);
+ const descriptors: [*]Page = @ptrCast(heap_start);
// Iterate over potential starting points.
// The subtraction of n prevents unnecessary iterations for starting points
// that don't leave enough space for the whole allocation.
for (0..num_pages - n) |i| {
- if (!@bitCast(pages[i].flags.active)) {
+ if (!@bitCast(descriptors[i].flags.active)) {
// Free starting page found.
var insufficient = false;
@@ -479,7 +477,7 @@ pub fn alloc(n: usize) !*void {
// Check if there is enough contiguous free space for the whole allocation.
// If not, move on to the next potential starting point.
for (i..n + i) |j| {
- if (@bitCast(pages[j].flags.active)) {
+ if (@bitCast(descriptors[j].flags.active)) {
insufficient = true;
break;
}
@@ -488,12 +486,14 @@ pub fn alloc(n: usize) !*void {
if (!insufficient) {
// Mark all allocated pages as taken.
for (i..n + i - 1) |j| {
- try pages[j].take(false);
+ try descriptors[j].take(false);
}
- try pages[n + i - 1].take(true);
+ try descriptors[n + i - 1].take(true);
// Construct a pointer to the first page using its descriptor number.
- return @ptrFromInt(alloc_start + i * page_size);
+ const first = alloc_start + i * page_size;
+ const allocation: [*]align(page_size) u8 = @ptrFromInt(first);
+ return allocation[0 .. n * page_size];
}
}
}
@@ -501,20 +501,23 @@ pub fn alloc(n: usize) !*void {
return AllocError.OutOfMemory;
}
-// Free (contiguous) memory page(s). Provides limited protection against double-frees.
-pub fn free(ptr: *void) !void {
+// Free (contiguous) memory page(s).
+pub fn free(memory: anytype) void {
+ const Slice = @typeInfo(@TypeOf(memory)).Pointer;
+ const bytes = std.mem.asBytes(memory);
+ const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
+ if (bytes_len == 0) return;
+
const num_pages = heapSize() / page_size;
// Start allocating beyond page descriptors.
- const alloc_start = pageAlign(@intFromPtr(heap_start) + num_pages * @sizeOf(Page));
+ const pages = @intFromPtr(heap_start) + num_pages * @sizeOf(Page);
+ const alloc_start = std.mem.alignForwardLog2(pages, log2_page_size);
// Restore the address to the page descriptor flags from the address of its contents
// by restoring the descriptor number and indexing the descriptor table
// at the start of the heap using it.
- const addr = @intFromPtr(heap_start) + (@intFromPtr(ptr) - alloc_start) / page_size;
-
- // Ensure basic address sanity.
- // Does not check descriptor table bounds.
- if (addr < @intFromPtr(heap_start) or addr >= @intFromPtr(heap_start) + heapSize()) return AllocError.OutOfRange;
+ const descriptor_offset = (@intFromPtr(bytes.ptr) - alloc_start) / page_size;
+ const addr = @intFromPtr(heap_start) + descriptor_offset;
var page: [*]Page = @ptrFromInt(addr);
@@ -524,26 +527,23 @@ pub fn free(ptr: *void) !void {
page[0].flags = Page.Flags.clear;
}
- // Free page encountered, but it isn't marked as the last. Potential double-free.
- if (!@bitCast(page[0].flags.last)) return AllocError.DoubleFree;
-
// Mark the last page as free.
page[0].flags = Page.Flags.clear;
}
// Allocate memory pages and overwrite their contents with zeroes for added security.
// Passing n <= 0 results in an error.
-pub fn zeroedAlloc(n: usize) !*void {
+pub fn zeroedAlloc(n: usize) ![]align(page_size) u8 {
const ret = try alloc(n);
// Write zeroes in batches of 64-bit to reduce the amount of store instructions.
// The remainder / remaining bytes don't need to be accounted for
// because page_size (4096) is divisible by 8.
- const size = (n * page_size) / 8;
- const ptr: [*]volatile u64 = @alignCast(@ptrCast(ret));
+ const len = (n * page_size) / 8;
+ const ptr: []volatile u64 = @as([*]volatile u64, @ptrCast(ret))[0..len];
- for (0..size) |i| {
+ for (0..len) |i| {
ptr[i] = 0;
}
diff --git a/src/lib/process.zig b/src/lib/process.zig
index cb96e7e..e07c6a4 100644
--- a/src/lib/process.zig
+++ b/src/lib/process.zig
@@ -31,7 +31,7 @@ pub const State = enum(u8) {
pub const Info = extern struct {
id: u16,
trap_frame: trap.Frame,
- stack: [*]u8,
+ stack: *[num_stack_pages * paging.page_size]u8,
pc: usize,
page_table: *paging.Table,
state: State,
@@ -49,15 +49,15 @@ pub const Info = extern struct {
fn new(entry: usize) !Info {
const stack = try paging.alloc(num_stack_pages);
- errdefer paging.free(stack) catch {};
+ errdefer paging.free(stack);
const procmem: *paging.Table = @alignCast(@ptrCast(try paging.zeroedAlloc(1)));
- errdefer paging.free(@ptrCast(procmem)) catch {};
+ errdefer paging.free(procmem);
var proc = Info{
.id = next_pid,
.trap_frame = std.mem.zeroInit(trap.Frame, .{}),
- .stack = @alignCast(@ptrCast(stack)),
+ .stack = @ptrCast(stack),
.pc = entry,
.page_table = procmem,
.state = .waiting,
@@ -150,7 +150,7 @@ pub fn switchTo(proc: *Info) noreturn {
pub fn demo(allocator: std.mem.Allocator) !noreturn {
const entry: [*]u8 = @alignCast(@ptrCast(try paging.zeroedAlloc(1)));
- defer paging.free(@ptrCast(entry)) catch {};
+ defer paging.free(entry);
entry[0] = 0x73;
entry[1] = 0x00;