aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHimbeer <himbeer@disroot.org>2024-04-16 15:10:04 +0200
committerHimbeer <himbeer@disroot.org>2024-04-16 15:10:04 +0200
commitc5331b4d3d8f02a4b8eb0166ae4ecce3c4d08942 (patch)
tree1d58c1dd1eedac528b60c8a05502ca246c013635
parentf7a77113e2229974886beebcbf3b891bcf118ab7 (diff)
initial paging helpers
-rw-r--r--linker.ld25
-rw-r--r--src/paging.zig192
2 files changed, 213 insertions, 4 deletions
diff --git a/linker.ld b/linker.ld
index 323eb69..46d03e0 100644
--- a/linker.ld
+++ b/linker.ld
@@ -1,5 +1,9 @@
OUTPUT_ARCH("riscv")
+MEMORY {
+ ram (wxa) : ORIGIN = 0x80200000, LENGTH = 128M
+}
+
ENTRY(start)
PHDRS {
@@ -13,11 +17,24 @@ SECTIONS {
.text (0x80200000) : {
*(.text.start)
*(.text .text.*)
- } : lo_rx
+ } > ram AT > ram : lo_rx
+
+ .rodata : { *(.rodata .rodata.*) } > ram AT > ram : lo_r
+ .data : { *(.data .data.* ) } > ram AT > ram : lo_rw
+ .bss : {
+ *(.bss .bss.*)
- .rodata : { *(.rodata .rodata.*) } : lo_r
- .data : { *(.data .data.* ) } : lo_rw
- .bss : { *(.bss .bss.* ) } : lo_rw
+ PROVIDE(_bss_end = .);
+ } > ram AT > ram : lo_rw
/DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
+
+ PROVIDE(_memory_start = ORIGIN(ram));
+ PROVIDE(_memory_end = ORIGIN(ram) + LENGTH(ram));
+
+ PROVIDE(_stack_start = _bss_end);
+ PROVIDE(_stack_end = _stack_start + 0x80000);
+
+ PROVIDE(_heap_start = _stack_end);
+ PROVIDE(_heap_size = _memory_end - _heap_start);
}
diff --git a/src/paging.zig b/src/paging.zig
new file mode 100644
index 0000000..fd55e6f
--- /dev/null
+++ b/src/paging.zig
@@ -0,0 +1,192 @@
+const heap_start = @extern(*void, .{ .name = "_heap_start" });
+const heap_size = @extern(*void, .{ .name = "_heap_size" });
+
+const num_pages = @intFromPtr(heap_size) / page_size;
+const alloc_start: *void = @ptrFromInt(heap_start + num_pages * page_size);
+
+pub const page_size = 0x1000;
+
+pub const AllocError = error{
+ ZeroSize,
+ OutOfMemory,
+ OutOfRange,
+ DoubleFree,
+};
+
+pub const TableError = error{
+ NotALeaf,
+};
+
+pub const PageFlags = enum(u8) {
+ empty = 0,
+ active = 1 << 0,
+ last = 1 << 1,
+};
+
+pub const Page = struct {
+ flags: u8,
+
+ pub fn isFree(self: Page) bool {
+ return !(self.flags & PageFlags.active);
+ }
+
+ pub fn isLast(self: Page) bool {
+ return self.flags & PageFlags.last;
+ }
+
+ pub fn clear(self: *Page) void {
+ self.flags = PageFlags.empty;
+ }
+};
+
+pub const EntryFlags = enum(i64) {
+ valid = 1 << 0,
+ read = 1 << 1,
+ write = 1 << 2,
+ exec = 1 << 3,
+ user = 1 << 4,
+ global = 1 << 5,
+ accessed = 1 << 6,
+ dirty = 1 << 7,
+};
+
+pub const Entry = packed struct {
+ bits: i64,
+
+ pub fn isValid(self: Entry) bool {
+ return self.bits & EntryFlags.valid;
+ }
+
+ pub fn isLeaf(self: Entry) bool {
+ return self.bits & (EntryFlags.read | EntryFlags.write | EntryFlags.exec);
+ }
+};
+
+pub const Table = struct {
+ entries: [512]Entry,
+
+ pub fn map(root: *Table, vaddr: usize, paddr: usize, flags: i64, level: usize) !void {
+ if (!.{ .bits = bits }.isLeaf()) return TableError.NotALeaf;
+
+ const vpn = [_]usize{ (vaddr >> 12) & 0x1ff, (vaddr >> 21) & 0x1ff, (vaddr >> 30) & 0x1ff };
+ const ppn = [_]usize{ (paddr >> 12) & 0x1ff, (paddr >> 21) & 0x1ff, (paddr >> 30) & 0x3ff_ffff };
+
+ var v = &root.entries[vpn[2]];
+
+ for (level..2) |iInv| {
+ const i = 1 - iInv;
+
+ if (!v.isValid()) {
+ const page = zalloc(1);
+ v.bits = (page >> 2) | EntryFlags.valid;
+ }
+
+ const entry = (v.bits & ~0x3ff) << 2;
+ v = entry + vpn[i];
+ }
+
+ const entry = (ppn[2] << 28) | (ppn[1] << 19) | (ppn[0] << 10) | flags | EntryFlags.valid;
+ v.bits = entry;
+ }
+
+ pub fn unmap(root: *Table) void {
+ for (root.entries) |level2Entry| {
+ if (level2Entry.isValid() and !level2Entry.isLeaf()) {
+ const level1Table = (level2Entry.bits & ~0x3ff) << 2;
+
+ for (level1Table.entries) |level1Entry| {
+ if (level1Entry.isValid() and !level2Entry.isLeaf()) {
+ const level0 = (level1Entry.bits & ~0x3ff) << 2;
+ free(level0);
+ }
+ }
+
+ free(level1Table);
+ }
+ }
+ }
+
+ pub fn virt2phys(root: *const Table, vaddr: usize) ?usize {
+ const vpn = [_]usize{ (vaddr >> 12) & 0x1ff, (vaddr >> 21) & 0x1ff, (vaddr >> 30) & 0x1ff };
+
+ var v = &root.entries[vpn[2]];
+
+ for (level..3) |iInv| {
+ const i = 2 - iInv;
+
+ if (!v.isValid()) {
+ break;
+ } else if (v.isLeaf()) {
+ const offsetMask = (1 << (12 + 9 * i)) - 1;
+ const vaddrPageOffset = vaddr & offsetMask;
+ const pagePAddr = (v.bits << 2) & ~offsetMask;
+
+ return pagePAddr | vaddrPageOffset;
+ }
+
+ const entry = (v.bits & ~0x3ff) << 2;
+ v = entry + vpn[i - 1];
+ }
+
+ return null;
+ }
+};
+
+pub fn alloc(n: usize) !*void {
+ if (n <= 0) return AllocError.ZeroSize;
+
+ const pages: *[num_pages]Page = @ptrCast(heap_start);
+
+ for (0..num_pages - n) |i| {
+ if (pages[i].isFree()) {
+ var insufficient = false;
+
+ for (i..n + i) |j| {
+ if (!pages[j].isFree()) {
+ insufficient = true;
+ break;
+ }
+ }
+
+ if (!insufficient) {
+ return alloc_start + i * page_size;
+ }
+ }
+ }
+
+ return AllocError.OutOfMemory;
+}
+
+pub fn free(ptr: *void) !void {
+ const addr = @intFromPtr(heap_start) + (ptr - alloc_start) / page_size;
+
+ if (addr < @intFromPtr(heap_start) or addr >= @intFromPtr(heap_start) + @intFromPtr(heap_size)) return AllocError.OutOfRange;
+
+ const page: [*]Page = @ptrCast(addr);
+
+ while (!page.isFree() and !page.isLast()) {
+ page.clear();
+ page += 1;
+ }
+
+ if (!page.isLast()) return AllocError.DoubleFree;
+
+ page.clear();
+}
+
+pub fn zalloc(n: usize) !*void {
+ const ret = try alloc(n);
+
+ // Write zeroes in batches of 64-bit to reduce the amount of store instructions.
+ // The remainder / remaining bytes don't need to be accounted for
+ // because `page_size` (4096) is divisible by 8.
+
+ const size = (n * page_size) / 8;
+ const ptr: *volatile [size]u64 = @ptrCast(ret);
+
+ for (0..size) |i| {
+ ptr[i] = 0;
+ }
+
+ return ret;
+}