aboutsummaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authorHimbeer <himbeer@disroot.org>2024-05-23 13:48:01 +0200
committerHimbeer <himbeer@disroot.org>2024-05-23 13:48:01 +0200
commit3274a700daff545437f919041cbdce6938eede06 (patch)
tree60a4ec5ebb1406af20733027a2bb4a5d54e54908 /src/lib
parent0f61d3bed969fecb35e438bfac2fe34f588834c6 (diff)
Drop FDT support in favor of custom HWI format
Fixes numerous parsing bugs and increases efficiency. The kernel now runs successfully on the Lichee Pi 4A.
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/Console.zig39
-rw-r--r--src/lib/cfg/platform/lpi4a.hwibin0 -> 64 bytes
-rw-r--r--src/lib/cfg/platform/lpi4a.txt2
-rw-r--r--src/lib/hwinfo.zig96
-rw-r--r--src/lib/instructions.zig66
-rw-r--r--src/lib/interrupts.zig332
-rw-r--r--src/lib/mem.zig196
-rw-r--r--src/lib/paging.zig557
-rw-r--r--src/lib/pci.zig761
-rw-r--r--src/lib/plic.zig101
-rw-r--r--src/lib/process.zig173
-rw-r--r--src/lib/sbi.zig123
-rw-r--r--src/lib/sbi/debug_console.zig35
-rw-r--r--src/lib/sbi/legacy.zig41
-rw-r--r--src/lib/sbi/sys_reset.zig37
-rw-r--r--src/lib/sbi/time.zig39
-rw-r--r--src/lib/syscall.zig15
-rw-r--r--src/lib/trap.zig11
18 files changed, 2624 insertions, 0 deletions
diff --git a/src/lib/Console.zig b/src/lib/Console.zig
new file mode 100644
index 0000000..1e2920d
--- /dev/null
+++ b/src/lib/Console.zig
@@ -0,0 +1,39 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const debug_console = @import("sbi/debug_console.zig");
+const legacy = @import("sbi/legacy.zig");
+
+provider: Provider,
+
+const Self = @This();
+
+pub const Provider = union(enum) {
+ sbi_debug: debug_console.Writer,
+ sbi_legacy: legacy.Writer,
+};
+
+pub fn autoChoose() ?Self {
+ if (debug_console.writer()) |sbi_con| {
+ return .{
+ .provider = .{ .sbi_debug = sbi_con },
+ };
+ } else |_| {}
+ if (legacy.writer()) |sbi_legacy_con| {
+ return .{
+ .provider = .{ .sbi_legacy = sbi_legacy_con },
+ };
+ } else |_| {}
+
+ return null;
+}
+
+pub fn writer(console: *const Self) std.io.AnyWriter {
+ switch (console.provider) {
+ .sbi_debug => return console.provider.sbi_debug.any(),
+ .sbi_legacy => return console.provider.sbi_legacy.any(),
+ }
+}
diff --git a/src/lib/cfg/platform/lpi4a.hwi b/src/lib/cfg/platform/lpi4a.hwi
new file mode 100644
index 0000000..cf57fc0
--- /dev/null
+++ b/src/lib/cfg/platform/lpi4a.hwi
Binary files differ
diff --git a/src/lib/cfg/platform/lpi4a.txt b/src/lib/cfg/platform/lpi4a.txt
new file mode 100644
index 0000000..6213698
--- /dev/null
+++ b/src/lib/cfg/platform/lpi4a.txt
@@ -0,0 +1,2 @@
+cpus 0 0 0x2dc6c0
+plic 0xffd8000000 0x4000000
diff --git a/src/lib/hwinfo.zig b/src/lib/hwinfo.zig
new file mode 100644
index 0000000..a2d6a5e
--- /dev/null
+++ b/src/lib/hwinfo.zig
@@ -0,0 +1,96 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const config = @import("config");
+const std = @import("std");
+
+const hw_info = @embedFile("cfg/platform/" ++ config.platform ++ ".hwi");
+//const devices: *[hw_info.len / @sizeOf(Dev)]Dev = @ptrCast(hw_info);
+const devices = std.io.FixedBufferStream([]const u8){ .buffer = hw_info, .pos = 0 };
+
+pub const ParseError = error{
+ MissingKind,
+ MissingRegAddr,
+ MissingRegLen,
+
+ UnknownDevKind,
+};
+
+pub const DevKind = enum(u32) {
+ cpus,
+ plic,
+ pcie,
+ pci,
+
+ pub fn parse(buf: []const u8) !DevKind {
+ if (std.mem.eql(u8, buf, "cpus")) {
+ return .cpus;
+ } else if (std.mem.eql(u8, buf, "plic")) {
+ return .plic;
+ } else if (std.mem.eql(u8, buf, "pcie")) {
+ return .pcie;
+ } else if (std.mem.eql(u8, buf, "pci")) {
+ return .pci;
+ }
+
+ return ParseError.UnknownDevKind;
+ }
+};
+
+pub const Dev = extern struct {
+ kind: DevKind,
+ reg: Reg,
+ value: u64,
+
+ pub fn parse(buf: []const u8) !Dev {
+ var columns = std.mem.tokenizeScalar(u8, buf, ' ');
+ const kind_buf = columns.next() orelse return ParseError.MissingKind;
+ const reg_addr_buf = columns.next() orelse return ParseError.MissingRegAddr;
+ const reg_len_buf = columns.next() orelse return ParseError.MissingRegLen;
+ const value_buf = columns.next() orelse "0";
+
+ return .{
+ .kind = try DevKind.parse(kind_buf),
+ .reg = .{
+ .addr = try std.fmt.parseUnsigned(u64, reg_addr_buf, 0),
+ .len = try std.fmt.parseUnsigned(u64, reg_len_buf, 0),
+ },
+ .value = try std.fmt.parseUnsigned(u64, value_buf, 0),
+ };
+ }
+};
+
+pub const Reg = extern struct {
+ addr: u64,
+ len: u64,
+
+ pub fn slice(self: Reg, comptime T: type) []volatile T {
+ const ptr: [*]volatile T = @ptrFromInt(self.addr);
+ return ptr[0 .. self.len / @sizeOf(T)];
+ }
+};
+
+pub const ByKind = struct {
+ kind: DevKind,
+ fbs: std.io.FixedBufferStream([]const u8),
+
+ pub fn next(it: *ByKind) !?Dev {
+ const reader = it.fbs.reader();
+ while (reader.readStruct(Dev)) |device| {
+ if (device.kind == it.kind) {
+ return device;
+ }
+ } else |_| {}
+
+ return null;
+ }
+
+ pub fn reset(it: *ByKind) !void {
+ try it.fbs.seekTo(0);
+ }
+};
+
+pub fn byKind(kind: DevKind) ByKind {
+ return .{ .kind = kind, .fbs = devices };
+}
diff --git a/src/lib/instructions.zig b/src/lib/instructions.zig
new file mode 100644
index 0000000..2ae53ec
--- /dev/null
+++ b/src/lib/instructions.zig
@@ -0,0 +1,66 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const paging = @import("paging.zig");
+
+pub const SbiRet = struct {
+ err: isize,
+ val: isize,
+};
+
+// # Arguments
+//
+// * ext_id: Extension ID
+// * fn_id: Function ID (within extension)
+// * a0: Argument 0
+// * a1: Argument 1
+// * a2: Argument 2
+pub fn ecall(ext_id: usize, fn_id: usize, a0: usize, a1: usize, a2: usize) SbiRet {
+ var ret = SbiRet{ .err = 0, .val = 0 };
+
+ asm volatile (
+ \\ ecall
+ \\ sw a0, 0(%[err])
+ \\ sw a1, 0(%[val])
+ :
+ : [err] "r" (&ret.err),
+ [val] "r" (&ret.val),
+ [eid] "{a7}" (ext_id),
+ [fid] "{a6}" (fn_id),
+ [a0] "{a0}" (a0),
+ [a1] "{a1}" (a1),
+ [a2] "{a2}" (a2),
+ );
+
+ return ret;
+}
+
+pub fn stackPointer() usize {
+ return asm volatile (""
+ : [value] "={sp}" (-> usize),
+ );
+}
+
+pub const setSatp = setCsrFn(paging.Satp, "satp").?;
+pub const setSscratch = setCsrFn(usize, "sscratch").?;
+pub const setSepc = setCsrFn(usize, "sepc").?;
+
+pub fn setCsrFn(comptime T: type, csr: []const u8) ?fn (T) callconv(.Inline) void {
+ if (csr.len > 8) return null;
+
+ return struct {
+ inline fn setCsr(value: T) void {
+ const bits: usize = @bitCast(value);
+
+ comptime var buf = [_]u8{0} ** 23;
+
+ asm volatile (std.fmt.bufPrint(buf[0..], "csrw {s}, %[bits]", .{csr}) catch unreachable
+ :
+ : [bits] "r" (bits),
+ );
+ }
+ }.setCsr;
+}
diff --git a/src/lib/interrupts.zig b/src/lib/interrupts.zig
new file mode 100644
index 0000000..35a20d4
--- /dev/null
+++ b/src/lib/interrupts.zig
@@ -0,0 +1,332 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const Console = @import("Console.zig");
+const instructions = @import("instructions.zig");
+const paging = @import("paging.zig");
+const plic = @import("plic.zig");
+const process = @import("process.zig");
+const syscall = @import("syscall.zig");
+const time = @import("sbi/time.zig");
+const trap = @import("trap.zig");
+
+pub var trap_frame: trap.Frame = undefined;
+
+pub const SupervisorTrapVector = packed struct(usize) {
+ pub const Mode = enum(u2) {
+ direct = 0,
+ vectored = 1,
+ };
+
+ mode: u2,
+ base_addr: u62,
+
+ pub fn fromBaseAddr(mode: Mode, base_addr: usize) SupervisorTrapVector {
+ return .{
+ .mode = @intFromEnum(mode),
+ .base_addr = base_addr >> 2,
+ };
+ }
+};
+
+pub const Enable = packed struct(usize) {
+ u_software: u1,
+ s_software: u1,
+ reserved0: u2,
+ u_timer: u1,
+ s_timer: u1,
+ reserved1: u2,
+ u_external: u1,
+ s_external: u1,
+ reserved2: u54,
+
+ pub const all = .{
+ .u_software = 1,
+ .s_software = 1,
+ .reserved0 = 0,
+ .u_timer = 1,
+ .s_timer = 1,
+ .reserved1 = 0,
+ .u_external = 1,
+ .s_external = 1,
+ .reserved2 = 0,
+ };
+};
+
+pub const Cause = packed struct(usize) {
+ num: u63,
+ @"async": u1,
+
+ pub fn isAsync(self: Cause) bool {
+ return self.@"async" == 1;
+ }
+};
+
+pub const AsyncCause = enum(u63) {
+ user_software = 0,
+ supervisor_software = 1,
+ user_timer = 4,
+ supervisor_timer = 5,
+ user_external = 8,
+ supervisor_external = 9,
+ _,
+};
+
+pub const SyncCause = enum(u63) {
+ instruction_address_misaligned = 0,
+ instruction_access_fault = 1,
+ illegal_instruction = 2,
+ breakpoint = 3,
+ load_access_fault = 5,
+ amo_address_misaligned = 6,
+ store_or_amo_access_fault = 7,
+ ecall = 8,
+ instruction_page_fault = 12,
+ load_page_fault = 13,
+ store_or_amo_page_fault = 15,
+ _,
+};
+
+pub const Sstatus = packed struct(usize) {
+ u_interrupts: u1,
+ s_interrupts: u1,
+ reserved0: u2,
+ u_interrupts_previous: u1,
+ s_interrupts_previous: u1,
+ reserved1: u2,
+ previous_privilege: u1,
+ reserved2: u4,
+ fs: u2,
+ xs: u2,
+ reserved3: u1,
+ sum: u1,
+ mxr: u1,
+ reserved4: u12,
+ u_xlen: u2,
+ reserved5: u29,
+ sd: u1,
+};
+
+export fn handleTrap(epc: usize, tval: usize, cause_bits: usize, hart_id: usize, status: Sstatus, frame: *trap.Frame) usize {
+ _ = &status;
+ _ = &frame;
+
+ const console = Console.autoChoose() orelse while (true) asm volatile ("wfi");
+ const w = console.writer();
+
+ const cause: Cause = @bitCast(cause_bits);
+
+ if (cause.isAsync()) {
+ switch (@as(AsyncCause, @enumFromInt(cause.num))) {
+ .supervisor_software => w.print("Hart {d}: Software interrupt\r\n", .{hart_id}) catch while (true) {},
+ .supervisor_timer => {
+ if (status.previous_privilege == 0) {
+ // Trapped from U-mode, update pc for next time slice.
+ //
+ // We can simply use the last node of the process list here
+ // because the scheduler moves a process to the end of the queue
+ // before returning into it.
+
+ process.list.last.?.data.pc = epc;
+ process.list.last.?.data.state = .waiting;
+
+ schedule() catch |err| {
+ std.debug.panic("Hart {d}: Unable to schedule next process: {any}", .{ hart_id, err });
+ };
+ }
+
+ // Don't interrupt kernel code, it may never run otherwise.
+ },
+ .supervisor_external => {
+ const context: u14 = @intCast(2 * hart_id + 1);
+
+ const external_cause = plic.default.claim(context) catch |err| {
+ std.debug.panic("Hart {d}: Unable to claim external interrupt: {any}", .{ hart_id, err });
+ };
+ if (external_cause) |source| {
+ w.print("Hart {d}: External interrupt: 0x{x}\r\n", .{ hart_id, source }) catch while (true) {};
+ handleExternal(source);
+ plic.default.complete(context, source) catch |err| {
+ std.debug.panic("Hart {d}: Unable to complete external interrupt: {any}", .{ hart_id, err });
+ };
+ }
+ },
+ else => {
+ std.debug.panic("Hart {d}: Unhandled asynchronous interrupt: {d}", .{ hart_id, cause.num });
+ },
+ }
+ } else {
+ switch (@as(SyncCause, @enumFromInt(cause.num))) {
+ .illegal_instruction => {
+ std.debug.panic("Hart {d}: Illegal instruction, EPC = 0x{x:0>16}", .{ hart_id, epc });
+ },
+ .instruction_access_fault => {
+ std.debug.panic("Hart {d}: Instruction access fault: EPC = 0x{x:0>16}, TVAL = 0x{x:0>16}", .{ hart_id, epc, tval });
+ },
+ .load_access_fault => {
+ std.debug.panic("Hart {d}: Load access fault: EPC = 0x{x:0>16}, TVAL = 0x{x:0>16}", .{ hart_id, epc, tval });
+ },
+ .store_or_amo_access_fault => {
+ std.debug.panic("Hart {d}: Store/AMO access fault: EPC = 0x{x:0>16}, TVAL = 0x{x:0>16}", .{ hart_id, epc, tval });
+ },
+ .ecall => {
+ syscall.handle(frame) catch |err| switch (err) {
+ syscall.Error.UnknownSyscall => {
+ const a0 = frame.general_purpose_registers[10];
+ w.print("Hart {d}: Unknown syscall, a0 = 0x{x:0>16}\r\n", .{ hart_id, a0 }) catch while (true) {};
+ },
+ };
+
+ return epc + 4;
+ },
+ .instruction_page_fault => {
+ std.debug.panic("Hart {d}: Instruction page fault: EPC = 0x{x:0>16}, TVAL = 0x{x:0>16}", .{ hart_id, epc, tval });
+ },
+ .load_page_fault => {
+ std.debug.panic("Hart {d}: Load page fault: EPC = 0x{x:0>16}, TVAL = 0x{x:0>16}", .{ hart_id, epc, tval });
+ },
+ .store_or_amo_page_fault => {
+ std.debug.panic("Hart {d}: Store/AMO page fault: EPC = 0x{x:0>16}, TVAL = 0x{x:0>16}", .{ hart_id, epc, tval });
+ },
+ else => {
+ std.debug.panic("Hart {d}: Unhandled synchronous interrupt: {d}, EPC = 0x{x:0>16}", .{ hart_id, cause.num, epc });
+ },
+ }
+ }
+
+ return epc;
+}
+
+fn handleExternal(interrupt: ?u10) void {
+ _ = &interrupt;
+}
+
+export fn supervisorTrapVector() align(4) callconv(.Naked) noreturn {
+ asm volatile (
+ \\ csrrw t6, sscratch, t6
+ \\
+ \\ sd x1, 8(t6)
+ \\ sd x2, 16(t6)
+ \\ sd x3, 24(t6)
+ \\ sd x4, 32(t6)
+ \\ sd x5, 40(t6)
+ \\ sd x6, 48(t6)
+ \\ sd x7, 56(t6)
+ \\ sd x8, 64(t6)
+ \\ sd x9, 72(t6)
+ \\ sd x10, 80(t6)
+ \\ sd x11, 88(t6)
+ \\ sd x12, 96(t6)
+ \\ sd x13, 104(t6)
+ \\ sd x14, 112(t6)
+ \\ sd x15, 120(t6)
+ \\ sd x16, 128(t6)
+ \\ sd x17, 136(t6)
+ \\ sd x18, 144(t6)
+ \\ sd x19, 152(t6)
+ \\ sd x20, 160(t6)
+ \\ sd x21, 168(t6)
+ \\ sd x22, 176(t6)
+ \\ sd x23, 184(t6)
+ \\ sd x24, 192(t6)
+ \\ sd x25, 200(t6)
+ \\ sd x26, 208(t6)
+ \\ sd x27, 216(t6)
+ \\ sd x28, 224(t6)
+ \\ sd x29, 232(t6)
+ \\ sd x30, 240(t6)
+ \\
+ \\ mv t5, t6
+ \\ csrr t6, sscratch
+ \\
+ \\ sd x31, 248(t5)
+ \\
+ \\ csrw sscratch, t5
+ \\
+ \\ csrr a0, sepc
+ \\ csrr a1, stval
+ \\ csrr a2, scause
+ // \\ csrr a3, mhartid
+ // Use zero for the hart id until a solution is found.
+ \\ mv a3, zero
+ \\ csrr a4, sstatus
+ \\ mv a5, t5
+ \\ la sp, _stvec_stack_end
+ \\ call handleTrap
+ \\
+ \\ csrw sepc, a0
+ \\
+ \\ csrr t6, sscratch
+ \\
+ \\ ld x1, 8(t6)
+ \\ ld x2, 16(t6)
+ \\ ld x3, 24(t6)
+ \\ ld x4, 32(t6)
+ \\ ld x5, 40(t6)
+ \\ ld x6, 48(t6)
+ \\ ld x7, 56(t6)
+ \\ ld x8, 64(t6)
+ \\ ld x9, 72(t6)
+ \\ ld x10, 80(t6)
+ \\ ld x11, 88(t6)
+ \\ ld x12, 96(t6)
+ \\ ld x13, 104(t6)
+ \\ ld x14, 112(t6)
+ \\ ld x15, 120(t6)
+ \\ ld x16, 128(t6)
+ \\ ld x17, 136(t6)
+ \\ ld x18, 144(t6)
+ \\ ld x19, 152(t6)
+ \\ ld x20, 160(t6)
+ \\ ld x21, 168(t6)
+ \\ ld x22, 176(t6)
+ \\ ld x23, 184(t6)
+ \\ ld x24, 192(t6)
+ \\ ld x25, 200(t6)
+ \\ ld x26, 208(t6)
+ \\ ld x27, 216(t6)
+ \\ ld x28, 224(t6)
+ \\ ld x29, 232(t6)
+ \\ ld x30, 240(t6)
+ \\ ld x31, 248(t6)
+ \\
+ \\ sret
+ );
+}
+
+fn schedule() !noreturn {
+ if (process.next()) |next| {
+ try time.interruptInMillis(process.schedule_interval_millis);
+ process.switchTo(next);
+ }
+
+ return process.Error.EmptySchedule;
+}
+
+pub fn init() void {
+ trap_frame = .{
+ .general_purpose_registers = [_]usize{0} ** 32,
+ .floating_point_registers = [_]usize{0} ** 32,
+ .satp = 0,
+ .stack_pointer = @ptrFromInt(instructions.stackPointer()),
+ .hart_id = 0,
+ };
+
+ asm volatile (
+ \\ csrw sscratch, %[trapframe]
+ \\
+ \\ la t0, supervisorTrapVector
+ \\ csrw stvec, t0
+ \\
+ \\ csrr t0, sstatus
+ \\ ori t0, t0, 2
+ \\ csrw sstatus, t0
+ :
+ : [trapframe] "r" (&trap_frame),
+ );
+}
+
+pub const setEnabled = instructions.setCsrFn(Enable, "sie").?;
diff --git a/src/lib/mem.zig b/src/lib/mem.zig
new file mode 100644
index 0000000..639ac39
--- /dev/null
+++ b/src/lib/mem.zig
@@ -0,0 +1,196 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const paging = @import("paging.zig");
+
+const Chunk = struct {
+ flags: Flags,
+ len: usize,
+
+ const Flags = packed struct(u8) {
+ active: u1,
+ reserved: u7,
+ };
+
+ pub fn next(self: *const Chunk) [*]Chunk {
+ const byte_ptr: [*]const u8 = @ptrCast(@as([*]const Chunk, @ptrCast(self)) + 1);
+ return @constCast(@ptrCast(@alignCast(byte_ptr + self.len)));
+ }
+
+ pub fn take(self: *Chunk) void {
+ self.flags.active = 1;
+ }
+
+ pub fn clear(self: *Chunk) void {
+ self.flags = std.mem.zeroInit(Flags, .{});
+ }
+};
+
+pub const ChunkAllocatorConfig = struct {
+ auto_merge_free: bool = true,
+};
+
+pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type {
+ return struct {
+ head: ?[*]Chunk,
+ pages: usize,
+
+ const Self = @This();
+
+ pub fn init(pages: usize) !Self {
+ const chunks: [*]Chunk = @ptrCast(@alignCast(try paging.zeroedAlloc(pages)));
+ chunks[0].len = (pages * paging.page_size) - @sizeOf(Chunk);
+ return .{ .head = chunks, .pages = pages };
+ }
+
+ pub fn deinit(self: *Self) void {
+ if (self.head) |head| {
+ paging.free(head);
+ }
+ }
+
+ pub fn allocator(self: *Self) std.mem.Allocator {
+ return .{
+ .ptr = self,
+ .vtable = &.{
+ .alloc = alloc,
+ .resize = resize,
+ .free = free,
+ },
+ };
+ }
+
+ pub fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 {
+ _ = ret_addr;
+
+ const self: *Self = @ptrCast(@alignCast(ctx));
+
+ const ptr_align = @as(usize, 1) << @as(std.mem.Allocator.Log2Align, @intCast(log2_ptr_align));
+
+ var chunk = self.head orelse return null;
+ const bound = @intFromPtr(chunk) + (self.pages * paging.page_size);
+
+ while (@intFromPtr(chunk) < bound) : (chunk = chunk[0].next()) {
+ const adjust_off = std.mem.alignPointerOffset(@as([*]u8, @ptrCast(chunk + 1)), ptr_align) orelse return null;
+ const aligned_len = len + adjust_off;
+
+ // Is this chunk free and large enough to hold the requested allocation?
+ if (!@bitCast(chunk[0].flags.active) and chunk[0].len >= aligned_len) {
+ const remaining = chunk[0].len - aligned_len;
+
+ chunk[0].take();
+
+ if (remaining > @sizeOf(Chunk)) {
+ const new_successor: *Chunk = @ptrCast(@alignCast(@as([*]u8, @ptrCast(chunk + 1)) + aligned_len));
+
+ new_successor.clear();
+ new_successor.len = remaining - @sizeOf(Chunk);
+
+ chunk[0].len = aligned_len;
+ }
+
+ return std.mem.alignPointer(@as([*]u8, @ptrCast(chunk + 1)), ptr_align);
+ }
+ }
+
+ return null;
+ }
+
+ // Only expands into the next free chunk (if there is one).
+ // You may want to call mergeFree first if auto_merge_free was configured to false.
+ pub fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
+ _ = ret_addr;
+
+ const self: *Self = @ptrCast(@alignCast(ctx));
+
+ const ptr_align = @as(usize, 1) << @as(std.mem.Allocator.Log2Align, @intCast(log2_buf_align));
+
+ const head = self.head orelse return false;
+ const bound = @intFromPtr(head) + (self.pages * paging.page_size);
+
+ const chunk = @as([*]Chunk, @ptrCast(@alignCast(buf.ptr))) - 1;
+
+ const adjust_off = std.mem.alignPointerOffset(@as([*]u8, @ptrCast(chunk + 1)), ptr_align) orelse return false;
+ const aligned_new_len = new_len + adjust_off;
+
+ if (aligned_new_len < chunk[0].len) {
+ const regained = chunk[0].len - aligned_new_len;
+ if (regained > @sizeOf(Chunk)) {
+ const new_successor: *Chunk = @ptrCast(@alignCast(@as([*]u8, @ptrCast(chunk + 1)) + aligned_new_len));
+
+ new_successor.clear();
+ new_successor.len = regained - @sizeOf(Chunk);
+
+ chunk[0].len = aligned_new_len;
+ }
+
+ return true;
+ } else if (aligned_new_len > chunk[0].len) {
+ const successor = chunk[0].next();
+ if (@intFromPtr(successor) >= bound) return false;
+
+ const total_len = chunk[0].len + @sizeOf(Chunk) + successor[0].len;
+
+ if (!@bitCast(successor[0].flags.active) and aligned_new_len <= total_len) {
+ const remaining = total_len - aligned_new_len;
+
+ if (remaining > @sizeOf(Chunk)) {
+ const new_successor: *Chunk = @ptrCast(@alignCast(@as([*]u8, @ptrCast(chunk + 1)) + aligned_new_len));
+
+ new_successor.clear();
+ new_successor.len = remaining - @sizeOf(Chunk);
+
+ chunk[0].len = aligned_new_len;
+ } else {
+ chunk[0].len = total_len;
+ }
+
+ return true;
+ }
+
+ return false;
+ } else return true;
+ }
+
+ pub fn free(ctx: *anyopaque, old_mem: []u8, log2_old_align: u8, ret_addr: usize) void {
+ _ = log2_old_align;
+ _ = ret_addr;
+
+ const self: *Self = @ptrCast(@alignCast(ctx));
+
+ // Safety check. Do not free memory in uninitialized / undefined pages.
+ if (self.head == null) return;
+
+ const chunk = @as([*]Chunk, @ptrCast(@alignCast(old_mem.ptr))) - 1;
+ chunk[0].clear();
+
+ if (config.auto_merge_free) {
+ self.mergeFree();
+ }
+ }
+
+ pub fn mergeFree(self: *Self) void {
+ var chunk = self.head orelse return;
+ const bound = @intFromPtr(chunk) + (self.pages * paging.page_size);
+
+ while (@intFromPtr(chunk) < bound) : (chunk = chunk[0].next()) {
+ const successor = chunk[0].next();
+
+ if (@intFromPtr(successor) >= bound) {
+ // Safety check.
+ // Should never run if the implementation is working correctly.
+ //
+ // Ensure that there is a successor within bounds.
+ // The loop condition is not sufficient here, it only detects
+ // non-erroneous list ends (i.e. chunk == bound).
+ break;
+ } else if (!@bitCast(chunk[0].flags.active) and !@bitCast(successor[0].flags.active)) {
+ chunk[0].len += @sizeOf(Chunk) + successor[0].len;
+ }
+ }
+ }
+ };
+}
diff --git a/src/lib/paging.zig b/src/lib/paging.zig
new file mode 100644
index 0000000..1e785e7
--- /dev/null
+++ b/src/lib/paging.zig
@@ -0,0 +1,557 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+// This is an implementation of Sv39 paging, meaning that the virtual addresses
+// are 39 bits wide. Sv32 and Sv48 are currently not implemented.
+
+const hwinfo = @import("hwinfo.zig");
+
+// Defined by linker script.
+pub const text_start = @extern(*anyopaque, .{ .name = "_text_start" });
+pub const text_end = @extern(*anyopaque, .{ .name = "_text_end" });
+pub const rodata_start = @extern(*anyopaque, .{ .name = "_rodata_start" });
+pub const rodata_end = @extern(*anyopaque, .{ .name = "_rodata_end" });
+pub const data_start = @extern(*anyopaque, .{ .name = "_data_start" });
+pub const data_end = @extern(*anyopaque, .{ .name = "_data_end" });
+pub const sdata_start = @extern(*anyopaque, .{ .name = "_sdata_start" });
+pub const sdata_end = @extern(*anyopaque, .{ .name = "_sdata_end" });
+pub const bss_start = @extern(*anyopaque, .{ .name = "_bss_start" });
+pub const bss_end = @extern(*anyopaque, .{ .name = "_bss_end" });
+pub const sbss_start = @extern(*anyopaque, .{ .name = "_sbss_start" });
+pub const sbss_end = @extern(*anyopaque, .{ .name = "_sbss_end" });
+pub const stack_start = @extern(*anyopaque, .{ .name = "_stack_start" });
+pub const stack_end = @extern(*anyopaque, .{ .name = "_stack_end" });
+pub const stvec_stack_start = @extern(*anyopaque, .{ .name = "_stvec_stack_start" });
+pub const stvec_stack_end = @extern(*anyopaque, .{ .name = "_stvec_stack_end" });
+pub const heap_start = @extern(*anyopaque, .{ .name = "_heap_start" });
+pub const heap_end = @extern(*anyopaque, .{ .name = "_heap_end" });
+
+inline fn heapSize() usize {
+ return @intFromPtr(heap_end) - @intFromPtr(heap_start);
+}
+
+pub const page_size: usize = 0x1000; // 4096 bytes
+
+pub var next_mmio_vaddr: usize = 0xff000000;
+
+// Aligns an address with an offset to the next page.
+// Doesn't change addresses that are already aligned.
+fn pageAlign(addr: usize) usize {
+ return (addr + (page_size - 1)) & ~(page_size - 1);
+}
+
+pub const AllocError = error{
+ ZeroSize,
+ OutOfMemory,
+ OutOfRange,
+ DoubleFree,
+ AlreadyTaken,
+};
+
+pub const TableError = error{
+ NotALeaf,
+};
+
+pub const Mode = enum(u4) {
+ bare = 0,
+ sv39 = 8,
+ sv48 = 9,
+};
+
+// SATP register, configures and enables the MMU (and thus paging).
+pub const Satp = packed struct(usize) {
+ pub const Asid = u16;
+
+ // Reverse field order so that @bitCast yields a usize with the right order.
+ // Without this writing the value to the SATP register enables an invalid page table,
+ // leaves the MMU disabled or causes other bugs.
+ ppn: u44,
+ asid: Asid,
+ mode: Mode,
+};
+
+// A page descriptor for use by the heap allocator.
+pub const Page = struct {
+ flags: Flags,
+
+ pub const Flags = packed struct {
+ active: u1,
+ last: u1, // Last page of contiguous allocation
+
+ pub const clear = .{
+ .active = 0,
+ .last = 0,
+ };
+ };
+
+ // Marks a page as taken, optionally flagging it as the last page of an allocation.
+ // Fails if the page is already taken.
+ // Returns whether the operation was successful.
+ pub fn take(self: *Page, last: bool) !void {
+ if (@bitCast(self.flags.active)) return AllocError.AlreadyTaken;
+
+ self.flags.active = 1;
+ if (last) self.flags.last = 1;
+ }
+};
+
+// Returns the offset from the page base. Works with both physical and virtual addresses.
+// Offsets are never translated.
+fn offsetOf(addr: usize) usize {
+ // Offset is in bottom 12 bits of both physical and virtual addresses.
+ return addr & 0xfff;
+}
+
+// Returns the virtual page numbers of a virtual address by paging level.
+fn virtualPageNumbers(vaddr: usize) [3]usize {
+ // Virtual address format:
+ //
+ // VPN[2] | VPN[1] | VPN[0] | offset
+ // 9 bits | 9 bits | 9 bits | 12 bits
+ //
+ // Virtual page numbers are indexes into the page table of their level,
+ // i.e. VPN[2] is an index to the root page table on level 2
+ // whereas VPN[1] is an index to the page table on level 1 specified by VPN[2].
+ //
+ // Offsets are never translated.
+
+ return [3]usize{
+ (vaddr >> 12) & 0x1ff,
+ (vaddr >> 21) & 0x1ff,
+ (vaddr >> 30) & 0x1ff,
+ };
+}
+
+// Returns the physical page numbers of a physical address by paging level.
+fn physicalPageNumbers(paddr: usize) [3]usize {
+ // Physical address format:
+ //
+ // PPN[2] | PPN[1] | PPN[0] | offset
+ // 26 bits | 9 bits | 9 bits | 12 bits
+ //
+ // PPN[i] is what to map VPN[i] to.
+ //
+ // Offsets are never translated.
+
+ return [3]usize{
+ (paddr >> 12) & 0x1ff,
+ (paddr >> 21) & 0x1ff,
+ (paddr >> 30) & 0x3ff_ffff,
+ };
+}
+
+// Returns the page numbers of an address as a single integer.
+fn pageNumber(addr: usize) usize {
+ return addr >> 12;
+}
+
+pub const EntryFlags = packed struct(u8) {
+ valid: u1,
+ read: u1,
+ write: u1,
+ exec: u1,
+ user: u1,
+ global: u1,
+ accessed: u1,
+ dirty: u1,
+
+ pub const branch = EntryFlags{
+ .valid = 1,
+ .read = 0,
+ .write = 0,
+ .exec = 0,
+ .user = 0,
+ .global = 0,
+ .accessed = 0,
+ .dirty = 0,
+ };
+
+ pub const readOnly = EntryFlags{
+ .valid = 1,
+ .read = 1,
+ .write = 0,
+ .exec = 0,
+ .user = 0,
+ .global = 0,
+ .accessed = 1,
+ .dirty = 0,
+ };
+
+ pub const readWrite = EntryFlags{
+ .valid = 1,
+ .read = 1,
+ .write = 1,
+ .exec = 0,
+ .user = 0,
+ .global = 0,
+ .accessed = 1,
+ .dirty = 1,
+ };
+
+ pub const readExec = EntryFlags{
+ .valid = 1,
+ .read = 1,
+ .write = 0,
+ .exec = 1,
+ .user = 0,
+ .global = 0,
+ .accessed = 1,
+ .dirty = 0,
+ };
+
+ pub const userReadOnly = EntryFlags{
+ .valid = 1,
+ .read = 1,
+ .write = 0,
+ .exec = 0,
+ .user = 1,
+ .global = 0,
+ .accessed = 1,
+ .dirty = 0,
+ };
+
+ pub const userReadWrite = EntryFlags{
+ .valid = 1,
+ .read = 1,
+ .write = 1,
+ .exec = 0,
+ .user = 1,
+ .global = 0,
+ .accessed = 1,
+ .dirty = 1,
+ };
+
+ pub const userReadExec = EntryFlags{
+ .valid = 1,
+ .read = 1,
+ .write = 0,
+ .exec = 1,
+ .user = 1,
+ .global = 0,
+ .accessed = 1,
+ .dirty = 0,
+ };
+
+ pub fn isLeaf(self: EntryFlags) bool {
+ return @bitCast(self.read) or @bitCast(self.write) or @bitCast(self.exec);
+ }
+};
+
+pub const Entry = packed struct(u64) {
+ // Reverse field order so that @bitCast yields a u64 with the right order.
+ // Without this writing the value to a page table creates an invalid entry,
+ // thus resulting in page faults or hanging.
+ flags: EntryFlags,
+ rsw: u2, // Reserved for supervisor use. Currently unused.
+ mapping: u44,
+ reserved: u10,
+
+ // Returns the physical page numbers to map to by paging level.
+ pub fn physicalPageNumbers(self: Entry) [3]usize {
+ // Mapping format:
+ //
+ // PPN[2] | PPN[1] | PPN[0]
+ // 26 bits | 9 bits | 9 bits
+ //
+ // PPN[i] is what to map VPN[i] to.
+
+ return [3]usize{
+ self.mapping & 0x1ff,
+ (self.mapping >> 9) & 0x1ff,
+ (self.mapping >> 18) & 0x3ff_ffff,
+ };
+ }
+
+ pub fn mappingAddr(self: Entry) usize {
+ // Apply an offset of zero since entries always point to an aligned page
+ // and this function should return a usable memory address.
+ // Callers can change the offset if needed.
+ return self.mapping << 12;
+ }
+
+ pub fn isValid(self: Entry) bool {
+ return @bitCast(self.flags.valid);
+ }
+
+ // Returns whether the entry is a mapping (true) or another page table (false).
+ pub fn isLeaf(self: Entry) bool {
+ return self.flags.isLeaf();
+ }
+};
+
+pub const Table = struct {
+ // Do not add any fields. The unmap function relies on mappings pointing to page tables,
+ // casting them to this data structure. This cast becomes invalid if additional fields
+ // are added, especially if they preceed the entries field.
+
+ entries: [512]Entry,
+
+ // Create a mapping of a certain virtual page address to a physical page address,
+ // discarding offsets. The mapping is written to the specified level,
+ // creating page tables as needed.
+ //
+ // The mapping must be a leaf, meaning that passing flags
+ // that indicate no access permissions at all will return an error.
+ //
+ // This function internally uses zeroedAlloc to allocate memory for the required page tables,
+ // but assumes that the physical address to map to has already been allocated by the caller.
+ pub fn map(root: *Table, vaddr: usize, paddr: usize, flags: EntryFlags, level: usize) !void {
+ if (!flags.isLeaf()) return TableError.NotALeaf;
+
+ const vpn = virtualPageNumbers(vaddr);
+
+ // Grab the entry in the root (level 2) page table.
+ var v = &root.entries[vpn[2]];
+
+ // Walk the page table levels from high to low under the assumption that root is valid.
+ for (level..2) |iInv| {
+ const i = 1 - iInv;
+
+ // If this entry doesn't point to a lower-level page table or memory page yet,
+ // allocate one.
+ if (!v.isValid()) {
+ const page = try zeroedAlloc(1);
+ v.* = .{
+ .flags = EntryFlags.branch,
+ .rsw = 0,
+ .mapping = @intCast(pageNumber(@intFromPtr(page))), // Remove the offset, a mapping is just the PPN.
+ .reserved = 0,
+ };
+ }
+
+ // Get the entries of the existing or newly created page table.
+ // This cast is safe because the only field of a Table is its entries.
+ const table: *Table = @ptrFromInt(v.mappingAddr());
+ // Grab the entry of the table by indexing it according to the corresponding VPN.
+ v = &table.entries[vpn[i]];
+ }
+
+ // Write the actual mapping to the correct table on the requested level.
+ v.* = .{
+ .flags = flags,
+ .rsw = 0,
+ .mapping = @intCast(pageNumber(paddr)), // Remove the offset, a mapping is just the PPN.
+ .reserved = 0,
+ };
+ }
+
+ // Deallocate child page tables recursively. The provided table itself is not affected,
+ // allowing partial unmapping of multi-level tables.
+ //
+ // This function does not deallocate memory pages mapped by the provided table
+ // or any of its (recursive) children.
+ pub fn unmap(table: *Table) void {
+ for (table.entries) |entry| {
+ if (entry.isValid() and !entry.isLeaf()) {
+ // This cast is safe because the only field of a Table is its entries.
+ const lowerLevelTable: *Table = @ptrFromInt(entry.mappingAddr());
+ lowerLevelTable.unmap();
+ entry.flags.valid = 0;
+ free(lowerLevelTable);
+ }
+ }
+ }
+
+ // Returns the physical address to a virtual address using the provided level 2 page table.
+ // This can be used to access virtual addresses whose page table isn't active
+ // in the MMU / SATP CSR (Control and Status Register), making it possible
+ // to access the memory space of a user mode process (from its perspective)
+ // from supervisor mode cleanly.
+ //
+ // The absence of a return value is equivalent to a page fault.
+ pub fn translate(root: *const Table, vaddr: usize) ?usize {
+ const vpn = virtualPageNumbers(vaddr);
+
+ // Grab the entry in the root (level 2) page table.
+ var v = &root.entries[vpn[2]];
+
+ // Walk the page table levels from high to low.
+ for (0..3) |iInv| {
+ const i = 2 - iInv;
+
+ if (!v.isValid()) {
+ break;
+ } else if (v.isLeaf()) {
+ // Mapping found.
+
+ // Create a mask starting directly below / after PN[i].
+ // Since all levels can have leaves i is not guaranteed to be zero.
+ const offsetMask = (@as(usize, 1) << @intCast(12 + 9 * i)) - 1;
+ const offset = vaddr & offsetMask;
+ const ppnJoined = v.mappingAddr() & ~offsetMask;
+
+ return ppnJoined | offset;
+ }
+
+ // Get the entries of the page table of the current level.
+ const entry: *[512]Entry = @ptrFromInt(v.mappingAddr());
+ // Grab the entry of the table by indexing it according to the corresponding VPN.
+ v = &entry[vpn[i - 1]];
+ }
+
+ return null;
+ }
+
+ // Creates an identity mapping for all pages needed for the specified range
+ // using the map function. An identity mapping doesn't actually translate
+ // memory addresses, virtual addresses and physical addresses are the same.
+ //
+ // The start address is inclusive while end is exclusive.
+ //
+ // This is still useful because it can be used to prevent the kernel
+ // from accessing machine-reserved memory by accident.
+ pub fn identityMapRange(root: *Table, start: usize, end: usize, flags: EntryFlags) !void {
+ // Mask out the offset within the starting page.
+ const startPage = start & ~(page_size - 1);
+ // Mask out the offset within the ending page, but ensure the returned page address
+ // is always the last required page for the mapping (end is exclusive,
+ // so subtracting 1 ends up in the previous page on boundaries,
+ // eliminating one useless mapping). The resulting value is inclusive.
+ const endPage = (end - 1) & ~(page_size - 1);
+
+ var page = startPage;
+ while (page <= endPage) : (page += page_size) {
+ try root.map(page, page, flags, 0);
+ }
+ }
+
+ // Constructs the SATP register value needed to activate the specified page table
+ // using the provided Address Space Identifier (ASID).
+ //
+ // The kernel page table always has ASID 0 (not mandated by the RISC-V specification).
+ pub fn satp(root: *const Table, asid: Satp.Asid) Satp {
+ return .{
+ .ppn = @intCast(pageNumber(@intFromPtr(root))),
+ .asid = asid,
+ .mode = .sv39,
+ };
+ }
+
+ pub fn mapKernel(root: *Table) !void {
+ try root.identityMapRange(@intFromPtr(text_start), @intFromPtr(text_end), EntryFlags.readExec);
+ try root.identityMapRange(@intFromPtr(rodata_start), @intFromPtr(rodata_end), EntryFlags.readOnly);
+ try root.identityMapRange(@intFromPtr(data_start), @intFromPtr(data_end), EntryFlags.readWrite);
+ try root.identityMapRange(@intFromPtr(sdata_start), @intFromPtr(sdata_end), EntryFlags.readWrite);
+ try root.identityMapRange(@intFromPtr(bss_start), @intFromPtr(bss_end), EntryFlags.readWrite);
+ try root.identityMapRange(@intFromPtr(sbss_start), @intFromPtr(sbss_end), EntryFlags.readWrite);
+ try root.identityMapRange(@intFromPtr(stack_start), @intFromPtr(stack_end), EntryFlags.readWrite);
+ try root.identityMapRange(@intFromPtr(stvec_stack_start), @intFromPtr(stvec_stack_end), EntryFlags.readWrite);
+ try root.identityMapRange(@intFromPtr(heap_start), @intFromPtr(heap_end), EntryFlags.readWrite);
+ }
+
+ pub fn mapDevice(root: *Table, reg: *hwinfo.Reg) !void {
+ const physical_start = reg.start & ~(page_size - 1);
+ const physical_end = (reg.start + reg.len - 1) & ~(page_size - 1);
+
+ reg.addr = next_mmio_vaddr | (reg.start & (page_size - 1));
+
+ var paddr = physical_start;
+ while (paddr <= physical_end) : (paddr += page_size) {
+ try root.map(next_mmio_vaddr, paddr, EntryFlags.readWrite, 0);
+ next_mmio_vaddr += page_size;
+ }
+ }
+};
+
+pub fn init() void {
+ const num_pages = heapSize() / page_size;
+ const pages: [*]Page = @ptrCast(heap_start);
+
+ for (0..num_pages) |i| {
+ pages[i].flags = Page.Flags.clear;
+ }
+}
+
+// Allocate memory pages. Passing n <= 0 results in an error.
+pub fn alloc(n: usize) !*void {
+ if (n <= 0) return AllocError.ZeroSize;
+
+ const num_pages = heapSize() / page_size;
+ // Start allocating beyond page descriptors.
+ const alloc_start = pageAlign(@intFromPtr(heap_start) + num_pages * @sizeOf(Page));
+
+ const pages: [*]Page = @ptrCast(heap_start);
+
+ // Iterate over potential starting points.
+ // The subtraction of n prevents unnecessary iterations for starting points
+ // that don't leave enough space for the whole allocation.
+ for (0..num_pages - n) |i| {
+ if (!@bitCast(pages[i].flags.active)) {
+ // Free starting page found.
+
+ var insufficient = false;
+
+ // Check if there is enough contiguous free space for the whole allocation.
+ // If not, move on to the next potential starting point.
+ for (i..n + i) |j| {
+ if (@bitCast(pages[j].flags.active)) {
+ insufficient = true;
+ break;
+ }
+ }
+
+ if (!insufficient) {
+ // Mark all allocated pages as taken.
+ for (i..n + i - 1) |j| {
+ try pages[j].take(false);
+ }
+ try pages[n + i - 1].take(true);
+
+ // Construct a pointer to the first page using its descriptor number.
+ return @ptrFromInt(alloc_start + i * page_size);
+ }
+ }
+ }
+
+ return AllocError.OutOfMemory;
+}
+
+// Free (contiguous) memory page(s). Provides limited protection against double-frees.
+pub fn free(ptr: *void) !void {
+ const num_pages = heapSize() / page_size;
+ // Start allocating beyond page descriptors.
+ const alloc_start = pageAlign(@intFromPtr(heap_start) + num_pages * @sizeOf(Page));
+
+ // Restore the address to the page descriptor flags from the address of its contents
+ // by restoring the descriptor number and indexing the descriptor table
+ // at the start of the heap using it.
+ const addr = @intFromPtr(heap_start) + (@intFromPtr(ptr) - alloc_start) / page_size;
+
+ // Ensure basic address sanity.
+ // Does not check descriptor table bounds.
+ if (addr < @intFromPtr(heap_start) or addr >= @intFromPtr(heap_start) + heapSize()) return AllocError.OutOfRange;
+
+ var page: [*]Page = @ptrFromInt(addr);
+
+ // Mark all but the last page as free.
+ // A double-free check is performed on the last page before it is freed.
+ while (@bitCast(page[0].flags.active) and !@bitCast(page[0].flags.last)) : (page += 1) {
+ page[0].flags = Page.Flags.clear;
+ }
+
+ // Free page encountered, but it isn't marked as the last. Potential double-free.
+ if (!@bitCast(page[0].flags.last)) return AllocError.DoubleFree;
+
+ // Mark the last page as free.
+ page[0].flags = Page.Flags.clear;
+}
+
+// Allocate memory pages and overwrite their contents with zeroes for added security.
+// Passing n <= 0 results in an error.
+pub fn zeroedAlloc(n: usize) !*void {
+ const ret = try alloc(n);
+
+ // Write zeroes in batches of 64-bit to reduce the amount of store instructions.
+ // The remainder / remaining bytes don't need to be accounted for
+ // because page_size (4096) is divisible by 8.
+
+ const size = (n * page_size) / 8;
+ const ptr: [*]volatile u64 = @alignCast(@ptrCast(ret));
+
+ for (0..size) |i| {
+ ptr[i] = 0;
+ }
+
+ return ret;
+}
diff --git a/src/lib/pci.zig b/src/lib/pci.zig
new file mode 100644
index 0000000..7b862ee
--- /dev/null
+++ b/src/lib/pci.zig
@@ -0,0 +1,761 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const hwinfo = @import("hwinfo.zig");
+
+pub const Error = error{
+ NoRootNode,
+ NoSoCnode,
+ NoAddrCells,
+ NoSizeCells,
+ NoPcinode,
+ NoUnitAddr,
+ NoReg,
+ NoCompatInfo,
+ Incompatible,
+};
+
+pub const CamType = enum {
+ conventional,
+ enhanced,
+};
+
+pub const Controller = union(CamType) {
+ conventional: CamController,
+ enhanced: EcamController,
+
+ pub fn cfgSpace(self: Controller, bus: u8, device: u5, function: u3) *volatile CfgSpace {
+ return switch (self) {
+ .conventional => |controller| controller.cfgSpace(bus, device, function),
+ .enhanced => |controller| controller.cfgSpace(bus, device, function),
+ };
+ }
+};
+
+pub const CamController = struct {
+ reg: []volatile u8,
+
+ pub fn cfgSpace(self: CamController, bus: u8, device: u5, function: u3) *volatile CfgSpace {
+ const mmio_base = @intFromPtr(self.reg.ptr);
+ const addr = mmio_base + ((@as(usize, bus) * 0x100) + (@as(usize, device) * 0x08) + function) * 0x1000;
+ return @ptrFromInt(addr);
+ }
+};
+
+pub const EcamController = struct {
+ reg: []volatile u8,
+
+ pub fn cfgSpace(self: EcamController, bus: u8, device: u5, function: u3) *volatile CfgSpace {
+ const mmio_base = @intFromPtr(self.reg.ptr);
+ const addr = mmio_base + ((@as(usize, bus) * 0x100) + (@as(usize, device) * 0x08) + function) * 0x1000;
+ return @ptrFromInt(addr);
+ }
+};
+
+pub const HeaderType = enum(u8) {
+ general = 0x0,
+ pci2pci = 0x1,
+ pci2cardbus = 0x2,
+};
+
+pub const Header = packed union {
+ general: GeneralHeader,
+ pci2pci: Pci2Pciheader,
+ pci2cardbus: Pci2CardBusHeader,
+};
+
+pub const GeneralHeader = packed struct(u384) {
+ base_addr0: u32,
+ base_addr1: u32,
+ base_addr2: u32,
+ base_addr3: u32,
+ base_addr4: u32,
+ base_addr5: u32,
+ cardbus_cis_pointer: u32,
+ subsystem_vendor_id: u16,
+ subsystem_id: u16,
+ expansion_rom_base_addr: u32,
+ capabilities_pointer: u8,
+ reserved0: u24,
+ reserved1: u32,
+ interrupt_line: u8,
+ interrupt_pin: u8,
+ min_grant: u8,
+ max_latency: u8,
+
+ pub fn getBaseAddr0(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.base_addr0);
+ }
+
+ pub fn getBaseAddr1(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.base_addr1);
+ }
+
+ pub fn getBaseAddr2(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.base_addr2);
+ }
+
+ pub fn getBaseAddr3(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.base_addr3);
+ }
+
+ pub fn getBaseAddr4(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.base_addr4);
+ }
+
+ pub fn getBaseAddr5(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.base_addr5);
+ }
+
+ pub fn getCardBusCisPointer(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.cardbus_cis_pointer);
+ }
+
+ pub fn getSubsystemVendorId(self: *const volatile GeneralHeader) u16 {
+ return std.mem.littleToNative(u16, self.subsystem_vendor_id);
+ }
+
+ pub fn getSubsystemId(self: *const volatile GeneralHeader) u16 {
+ return std.mem.littleToNative(u16, self.subsystem_id);
+ }
+
+ pub fn getExpansionRomBaseAddr(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.expansion_rom_base_addr);
+ }
+
+ pub fn getCapabilitiesPointer(self: *const volatile GeneralHeader) u8 {
+ return std.mem.littleToNative(u8, self.capabilities_pointer);
+ }
+
+ pub fn getReserved0(self: *const volatile GeneralHeader) u24 {
+ return std.mem.littleToNative(u24, self.reserved0);
+ }
+
+ pub fn getReserved1(self: *const volatile GeneralHeader) u32 {
+ return std.mem.littleToNative(u32, self.reserved1);
+ }
+
+ pub fn getInterruptLine(self: *const volatile GeneralHeader) u8 {
+ return std.mem.littleToNative(u8, self.interrupt_line);
+ }
+
+ pub fn getInterruptPin(self: *const volatile GeneralHeader) u8 {
+ return std.mem.littleToNative(u8, self.interrupt_pin);
+ }
+
+ pub fn getMinGrant(self: *const volatile GeneralHeader) u8 {
+ return std.mem.littleToNative(u8, self.min_grant);
+ }
+
+ pub fn getMaxLatency(self: *const volatile GeneralHeader) u8 {
+ return std.mem.littleToNative(u8, self.max_latency);
+ }
+
+ pub fn setBaseAddr0(self: *const volatile GeneralHeader, new_base_addr0: u32) void {
+ self.base_addr0 = std.mem.nativeToLittle(u32, new_base_addr0);
+ }
+
+ pub fn setBaseAddr1(self: *const volatile GeneralHeader, new_base_addr1: u32) void {
+ self.base_addr1 = std.mem.nativeToLittle(u32, new_base_addr1);
+ }
+
+ pub fn setBaseAddr2(self: *const volatile GeneralHeader, new_base_addr2: u32) void {
+ self.base_addr2 = std.mem.nativeToLittle(u32, new_base_addr2);
+ }
+
+ pub fn setBaseAddr3(self: *const volatile GeneralHeader, new_base_addr3: u32) void {
+ self.base_addr3 = std.mem.nativeToLittle(u32, new_base_addr3);
+ }
+
+ pub fn setBaseAddr4(self: *const volatile GeneralHeader, new_base_addr4: u32) void {
+ self.base_addr4 = std.mem.nativeToLittle(u32, new_base_addr4);
+ }
+
+ pub fn setBaseAddr5(self: *const volatile GeneralHeader, new_base_addr5: u32) void {
+ self.base_addr5 = std.mem.nativeToLittle(u32, new_base_addr5);
+ }
+
+ pub fn setCardBusCisPointer(self: *const volatile GeneralHeader, new_cardbus_cis_pointer: u32) void {
+ self.cardbus_cis_pointer = std.mem.nativeToLittle(u32, new_cardbus_cis_pointer);
+ }
+
+ pub fn setSubsystemVendorId(self: *const volatile GeneralHeader, new_subsystem_vendor_id: u16) void {
+ self.subsystem_vendor_id = std.mem.nativeToLittle(u16, new_subsystem_vendor_id);
+ }
+
+ pub fn setSubsystemId(self: *const volatile GeneralHeader, new_subsystem_id: u16) void {
+ self.subsystem_id = std.mem.nativeToLittle(u16, new_subsystem_id);
+ }
+
+ pub fn setExpansionRomBaseAddr(self: *const volatile GeneralHeader, new_expansion_rom_base_addr: u32) void {
+ self.expansion_rom_base_addr = std.mem.nativeToLittle(u32, new_expansion_rom_base_addr);
+ }
+
+ pub fn setCapabilitiesPointer(self: *const volatile GeneralHeader, new_capabilities_pointer: u8) void {
+ self.capabilities_pointer = std.mem.nativeToLittle(u8, new_capabilities_pointer);
+ }
+
+ pub fn setReserved0(self: *const volatile GeneralHeader, new_reserved0: u24) void {
+ self.reserved0 = std.mem.nativeToLittle(u24, new_reserved0);
+ }
+
+ pub fn setReserved1(self: *const volatile GeneralHeader, new_reserved1: u32) void {
+ self.reserved1 = std.mem.nativeToLittle(u32, new_reserved1);
+ }
+
+ pub fn setInterruptLine(self: *const volatile GeneralHeader, new_interrupt_line: u8) void {
+ self.interrupt_line = std.mem.nativeToLittle(u8, new_interrupt_line);
+ }
+
+ pub fn setInterruptPin(self: *const volatile GeneralHeader, new_interrupt_pin: u8) void {
+ self.interrupt_pin = std.mem.nativeToLittle(u8, new_interrupt_pin);
+ }
+
+ pub fn setMinGrant(self: *const volatile GeneralHeader, new_min_grant: u8) void {
+ self.min_grant = std.mem.nativeToLittle(u8, new_min_grant);
+ }
+
+ pub fn setMaxLatency(self: *const volatile GeneralHeader, new_max_latency: u8) void {
+ self.max_latency = std.mem.nativeToLittle(u8, new_max_latency);
+ }
+};
+
+pub const Pci2Pciheader = packed struct(u384) {
+ base_addr0: u32,
+ base_addr1: u32,
+ primary_bus_number: u8,
+ secondary_bus_number: u8,
+ subordinate_bus_number: u8,
+ secondary_latency_timer: u8,
+ io_base: u8,
+ io_limit: u8,
+ secondary_status: u16,
+ memory_base: u16,
+ memory_limit: u16,
+ prefetchable_memory_base: u16,
+ prefetchable_memory_limit: u16,
+ prefetchable_base_upper_32_bits: u32,
+ prefetchable_limit_upper_32_bits: u32,
+ io_base_upper_16_bits: u16,
+ io_limit_upper_16_bits: u16,
+ capability_pointer: u8,
+ reserved0: u24,
+ expansion_rom_base_addr: u32,
+ interrupt_line: u8,
+ interrupt_pin: u8,
+ bridge_control: u16,
+
+ pub fn getBaseAddr0(self: *const volatile Pci2Pciheader) u32 {
+ return std.mem.littleToNative(u32, self.base_addr0);
+ }
+
+ pub fn getBaseAddr1(self: *const volatile Pci2Pciheader) u32 {
+ return std.mem.littleToNative(u32, self.base_addr1);
+ }
+
+ pub fn getPrimaryBusNumber(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.primary_bus_number);
+ }
+
+ pub fn getSecondaryBusNumber(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.secondary_bus_number);
+ }
+
+ pub fn getSubordinateBusNumber(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.subordinate_bus_number);
+ }
+
+ pub fn getSecondaryLatencyTimer(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.secondary_latency_timer);
+ }
+
+ pub fn getIoBase(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.io_base);
+ }
+
+ pub fn getIoLimit(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.io_limit);
+ }
+
+ pub fn getSecondaryStatus(self: *const volatile Pci2Pciheader) u16 {
+ return std.mem.littleToNative(u16, self.secondary_status);
+ }
+
+ pub fn getMemoryBase(self: *const volatile Pci2Pciheader) u16 {
+ return std.mem.littleToNative(u16, self.memory_base);
+ }
+
+ pub fn getMemoryLimit(self: *const volatile Pci2Pciheader) u16 {
+ return std.mem.littleToNative(u16, self.memory_limit);
+ }
+
+ pub fn getPrefetchableMemoryBase(self: *const volatile Pci2Pciheader) u16 {
+ return std.mem.littleToNative(u16, self.prefetchable_memory_base);
+ }
+
+ pub fn getPrefetchableMemoryLimit(self: *const volatile Pci2Pciheader) u16 {
+ return std.mem.littleToNative(u16, self.prefetchable_memory_limit);
+ }
+
+ pub fn getPrefetchableBaseUpper32Bits(self: *const volatile Pci2Pciheader) u32 {
+ return std.mem.littleToNative(u32, self.prefetchable_base_upper_32_bits);
+ }
+
+ pub fn getPrefetchableLimitUpper32Bits(self: *const volatile Pci2Pciheader) u32 {
+ return std.mem.littleToNative(u32, self.prefetchable_limit_upper_32_bits);
+ }
+
+ pub fn getIoBaseUpper16Bits(self: *const volatile Pci2Pciheader) u16 {
+ return std.mem.littleToNative(u16, self.io_base_upper_16_bits);
+ }
+
+ pub fn getIoLimitUpper16Bits(self: *const volatile Pci2Pciheader) u16 {
+ return std.mem.littleToNative(u16, self.io_limit_upper_16_bits);
+ }
+
+ pub fn getCapabilityPointer(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.capability_pointer);
+ }
+
+ pub fn getReserved0(self: *const volatile Pci2Pciheader) u24 {
+ return std.mem.littleToNative(u24, self.reserved0);
+ }
+
+ pub fn getExpansionRomBaseAddr(self: *const volatile Pci2Pciheader) u32 {
+ return std.mem.littleToNative(u32, self.expansion_rom_base_addr);
+ }
+
+ pub fn getInterruptLine(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.interrupt_line);
+ }
+
+ pub fn getInterruptPin(self: *const volatile Pci2Pciheader) u8 {
+ return std.mem.littleToNative(u8, self.interrupt_pin);
+ }
+
+ pub fn getBridgeControl(self: *const volatile Pci2Pciheader) u16 {
+ return std.mem.littleToNative(u16, self.bridge_control);
+ }
+
+ pub fn setBaseAddr0(self: *volatile Pci2Pciheader, new_base_addr0: u32) void {
+ self.base_addr0 = std.mem.nativeToLittle(u32, new_base_addr0);
+ }
+
+ pub fn setBaseAddr1(self: *volatile Pci2Pciheader, new_base_addr1: u32) void {
+ self.base_addr1 = std.mem.nativeToLittle(u32, new_base_addr1);
+ }
+
+ pub fn setPrimaryBusNumber(self: *volatile Pci2Pciheader, new_primary_bus_number: u8) void {
+ self.primary_bus_number = std.mem.nativeToLittle(u8, new_primary_bus_number);
+ }
+
+ pub fn setSecondaryBusNumber(self: *volatile Pci2Pciheader, new_secondary_bus_number: u8) void {
+ self.secondary_bus_number = std.mem.nativeToLittle(u8, new_secondary_bus_number);
+ }
+
+ pub fn setSubordinateBusNumber(self: *volatile Pci2Pciheader, new_subordinate_bus_number: u8) void {
+ self.subordinate_bus_number = std.mem.nativeToLittle(u8, new_subordinate_bus_number);
+ }
+
+ pub fn setSecondaryLatencyTimer(self: *volatile Pci2Pciheader, new_secondary_latency_timer: u8) void {
+ self.secondary_latency_timer = std.mem.nativeToLittle(u8, new_secondary_latency_timer);
+ }
+
+ pub fn setIoBase(self: *volatile Pci2Pciheader, new_io_base: u8) void {
+ self.io_base = std.mem.nativeToLittle(u8, new_io_base);
+ }
+
+ pub fn setIoLimit(self: *volatile Pci2Pciheader, new_io_limit: u8) void {
+ self.io_limit = std.mem.nativeToLittle(u8, new_io_limit);
+ }
+
+ pub fn setSecondaryStatus(self: *volatile Pci2Pciheader, new_secondary_status: u16) void {
+ self.secondary_status = std.mem.nativeToLittle(u16, new_secondary_status);
+ }
+
+ pub fn setMemoryBase(self: *volatile Pci2Pciheader, new_memory_base: u16) void {
+ self.memory_base = std.mem.nativeToLittle(u16, new_memory_base);
+ }
+
+ pub fn setMemoryLimit(self: *volatile Pci2Pciheader, new_memory_limit: u16) void {
+ self.memory_limit = std.mem.nativeToLittle(u16, new_memory_limit);
+ }
+
+ pub fn setPrefetchableMemoryBase(self: *volatile Pci2Pciheader, new_prefetchable_memory_base: u16) void {
+ self.prefetchable_memory_base = std.mem.nativeToLittle(u16, new_prefetchable_memory_base);
+ }
+
+ pub fn setPrefetchableMemoryLimit(self: *volatile Pci2Pciheader, new_prefetchable_memory_limit: u16) void {
+ self.prefetchable_memory_limit = std.mem.nativeToLittle(u16, new_prefetchable_memory_limit);
+ }
+
+ pub fn setPrefetchableBaseUpper32Bits(self: *volatile Pci2Pciheader, new_prefetchable_base_upper_32_bits: u32) void {
+ self.prefetchable_base_upper_32_bits = std.mem.nativeToLittle(u32, new_prefetchable_base_upper_32_bits);
+ }
+
+ pub fn setPrefetchableLimitUpper32Bits(self: *volatile Pci2Pciheader, new_prefetchable_limit_upper_32_bits: u32) void {
+ self.prefetchable_limit_upper_32_bits = std.mem.nativeToLittle(u32, new_prefetchable_limit_upper_32_bits);
+ }
+
+ pub fn setIoBaseUpper16Bits(self: *volatile Pci2Pciheader, new_io_base_upper_16_bits: u16) void {
+ self.io_base_upper_16_bits = std.mem.nativeToLittle(u16, new_io_base_upper_16_bits);
+ }
+
+ pub fn setIoLimitUpper16Bits(self: *volatile Pci2Pciheader, new_io_limit_upper_16_bits: u16) void {
+ self.io_limit_upper_16_bits = std.mem.nativeToLittle(u16, new_io_limit_upper_16_bits);
+ }
+
+ pub fn setCapabilityPointer(self: *volatile Pci2Pciheader, new_capability_pointer: u8) void {
+ self.capability_pointer = std.mem.nativeToLittle(u8, new_capability_pointer);
+ }
+
+ pub fn setReserved0(self: *volatile Pci2Pciheader, new_reserved0: u24) void {
+ self.reserved0 = std.mem.nativeToLittle(u24, new_reserved0);
+ }
+
+ pub fn setExpansionRomBaseAddr(self: *volatile Pci2Pciheader, new_expansion_rom_base_addr: u32) void {
+ self.expansion_rom_base_addr = std.mem.nativeToLittle(u32, new_expansion_rom_base_addr);
+ }
+
+ pub fn setInterruptLine(self: *volatile Pci2Pciheader, new_interrupt_line: u8) void {
+ self.interrupt_line = std.mem.nativeToLittle(u8, new_interrupt_line);
+ }
+
+ pub fn setInterruptPin(self: *volatile Pci2Pciheader, new_interrupt_pin: u8) void {
+ self.interrupt_pin = std.mem.nativeToLittle(u8, new_interrupt_pin);
+ }
+
+ pub fn setBridgeControl(self: *volatile Pci2Pciheader, new_bridge_control: u16) void {
+ self.bridge_control = std.mem.nativeToLittle(u16, new_bridge_control);
+ }
+};
+
+pub const Pci2CardBusHeader = packed struct(u448) {
+ cardbus_socket_exca_base_addr: u32,
+ capabilities_list_offset: u8,
+ reserved0: u8,
+ secondary_status: u16,
+ pci_bus_number: u8,
+ cardbus_bus_number: u8,
+ subordinate_bus_number: u8,
+ cardbus_latency_timer: u8,
+ memory_base_addr0: u32,
+ memory_limit0: u32,
+ memory_base_addr1: u32,
+ memory_limit1: u32,
+ io_base_addr0: u32,
+ io_limit0: u32,
+ io_base_addr1: u32,
+ io_limit1: u32,
+ interrupt_line: u8,
+ interrupt_pin: u8,
+ bridge_control: u16,
+ subsystem_device_id: u16,
+ subsystem_vendor_id: u16,
+ pc_card_legacy_mode_16_bit_base_addr: u32,
+
+ pub fn getCardBusSocketExCaBaseAddr(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.cardbus_socket_exca_base_addr);
+ }
+
+ pub fn getCapabilitiesListOffset(self: *const volatile Pci2CardBusHeader) u8 {
+ return std.mem.littleToNative(u8, self.capabilities_list_offset);
+ }
+
+ pub fn getReserved0(self: *const volatile Pci2CardBusHeader) u8 {
+ return std.mem.littleToNative(u8, self.reserved0);
+ }
+
+ pub fn getSecondaryStatus(self: *const volatile Pci2CardBusHeader) u16 {
+ return std.mem.littleToNative(u16, self.secondary_status);
+ }
+
+ pub fn getPciBusNumber(self: *const volatile Pci2CardBusHeader) u8 {
+ return std.mem.littleToNative(u8, self.pci_bus_number);
+ }
+
+ pub fn getCardBusBusNumber(self: *const volatile Pci2CardBusHeader) u8 {
+ return std.mem.littleToNative(u8, self.cardbus_bus_number);
+ }
+
+ pub fn getSubordinateBusNumber(self: *const volatile Pci2CardBusHeader) u8 {
+ return std.mem.littleToNative(u8, self.subordinate_bus_number);
+ }
+
+ pub fn getCardBusLatencyTimer(self: *const volatile Pci2CardBusHeader) u8 {
+ return std.mem.littleToNative(u8, self.cardbus_latency_timer);
+ }
+
+ pub fn getMemoryBaseAddr0(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.memory_base_addr0);
+ }
+
+ pub fn getMemoryLimit0(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.memory_limit0);
+ }
+
+ pub fn getMemoryBaseAddr1(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.memory_base_addr1);
+ }
+
+ pub fn getMemoryLimit1(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.memory_limit1);
+ }
+
+ pub fn getIoBaseAddr0(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.io_base_addr0);
+ }
+
+ pub fn getIoLimit0(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.io_limit0);
+ }
+
+ pub fn getIoBaseAddr1(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.io_base_addr1);
+ }
+
+ pub fn getIoLimit1(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.io_limit1);
+ }
+
+ pub fn getInterruptLine(self: *const volatile Pci2CardBusHeader) u8 {
+ return std.mem.littleToNative(u8, self.interrupt_line);
+ }
+
+ pub fn getInterruptPin(self: *const volatile Pci2CardBusHeader) u8 {
+ return std.mem.littleToNative(u8, self.interrupt_pin);
+ }
+
+ pub fn getBridgeControl(self: *const volatile Pci2CardBusHeader) u16 {
+ return std.mem.littleToNative(u16, self.bridge_control);
+ }
+
+ pub fn getSubsystemDeviceId(self: *const volatile Pci2CardBusHeader) u16 {
+ return std.mem.littleToNative(u16, self.subsystem_device_id);
+ }
+
+ pub fn getSubsystemVendorId(self: *const volatile Pci2CardBusHeader) u16 {
+ return std.mem.littleToNative(u16, self.subsystem_vendor_id);
+ }
+
+ pub fn getPcCardLegacyMode16BitBaseAddr(self: *const volatile Pci2CardBusHeader) u32 {
+ return std.mem.littleToNative(u32, self.pc_card_legacy_mode_16_bit_base_addr);
+ }
+
+ pub fn setCardBusSocketExCaBaseAddr(self: *volatile Pci2CardBusHeader, new_cardbus_socket_exca_base_addr: u32) void {
+ self.cardbus_socket_exca_base_addr = std.mem.nativeToLittle(u32, new_cardbus_socket_exca_base_addr);
+ }
+
+ pub fn setCapabilitiesListOffset(self: *volatile Pci2CardBusHeader, new_capabilities_list_offset: u8) void {
+ self.capabilities_list_offset = std.mem.nativeToLittle(u8, new_capabilities_list_offset);
+ }
+
+ pub fn setReserved0(self: *volatile Pci2CardBusHeader, new_reserved0: u8) void {
+ self.reserved0 = std.mem.nativeToLittle(u8, new_reserved0);
+ }
+
+ pub fn setSecondaryStatus(self: *volatile Pci2CardBusHeader, new_secondary_status: u16) void {
+ self.secondary_status = std.mem.nativeToLittle(u16, new_secondary_status);
+ }
+
+ pub fn setPciBusNumber(self: *volatile Pci2CardBusHeader, new_pci_bus_number: u8) void {
+ self.pci_bus_number = std.mem.nativeToLittle(u8, new_pci_bus_number);
+ }
+
+ pub fn setCardBusBusNumber(self: *volatile Pci2CardBusHeader, new_cardbus_bus_number: u8) void {
+ self.cardbus_bus_number = std.mem.nativeToLittle(u8, new_cardbus_bus_number);
+ }
+
+ pub fn setSubordinateBusNumber(self: *volatile Pci2CardBusHeader, new_subordinate_bus_number: u8) void {
+ self.subordinate_bus_number = std.mem.nativeToLittle(u8, new_subordinate_bus_number);
+ }
+
+ pub fn setCardBusLatencyTimer(self: *volatile Pci2CardBusHeader, new_cardbus_latency_timer: u8) void {
+ self.cardbus_latency_timer = std.mem.nativeToLittle(u8, new_cardbus_latency_timer);
+ }
+
+ pub fn setMemoryBaseAddr0(self: *volatile Pci2CardBusHeader, new_memory_base_addr0: u32) void {
+ self.memory_base_addr0 = std.mem.nativeToLittle(u32, new_memory_base_addr0);
+ }
+
+ pub fn setMemoryLimit0(self: *volatile Pci2CardBusHeader, new_memory_limit0: u32) void {
+ self.memory_limit0 = std.mem.nativeToLittle(u32, new_memory_limit0);
+ }
+
+ pub fn setMemoryBaseAddr1(self: *volatile Pci2CardBusHeader, new_memory_base_addr1: u32) void {
+ self.memory_base_addr1 = std.mem.nativeToLittle(u32, new_memory_base_addr1);
+ }
+
+ pub fn setMemoryLimit1(self: *volatile Pci2CardBusHeader, new_memory_limit1: u32) void {
+ self.memory_limit1 = std.mem.nativeToLittle(u32, new_memory_limit1);
+ }
+
+ pub fn setIoBaseAddr0(self: *volatile Pci2CardBusHeader, new_io_base_addr0: u32) void {
+ self.io_base_addr0 = std.mem.nativeToLittle(u32, new_io_base_addr0);
+ }
+
+ pub fn setIoLimit0(self: *volatile Pci2CardBusHeader, new_io_limit0: u32) void {
+ self.io_limit0 = std.mem.nativeToLittle(u32, new_io_limit0);
+ }
+
+ pub fn setIoBaseAddr1(self: *volatile Pci2CardBusHeader, new_io_base_addr1: u32) void {
+ self.io_base_addr1 = std.mem.nativeToLittle(u32, new_io_base_addr1);
+ }
+
+ pub fn setIoLimit1(self: *volatile Pci2CardBusHeader, new_io_limit1: u32) void {
+ self.io_limit1 = std.mem.nativeToLittle(u32, new_io_limit1);
+ }
+
+ pub fn setInterruptLine(self: *volatile Pci2CardBusHeader, new_interrupt_line: u8) void {
+ self.interrupt_line = std.mem.nativeToLittle(u8, new_interrupt_line);
+ }
+
+ pub fn setInterruptPin(self: *volatile Pci2CardBusHeader, new_interrupt_pin: u8) void {
+ self.interrupt_pin = std.mem.nativeToLittle(u8, new_interrupt_pin);
+ }
+
+ pub fn setBridgeControl(self: *volatile Pci2CardBusHeader, new_bridge_control: u16) void {
+ self.bridge_control = std.mem.nativeToLittle(u16, new_bridge_control);
+ }
+
+ pub fn setSubsystemDeviceId(self: *volatile Pci2CardBusHeader, new_subsystem_device_id: u16) void {
+ self.subsystem_device_id = std.mem.nativeToLittle(u16, new_subsystem_device_id);
+ }
+
+ pub fn setSubsystemVendorId(self: *volatile Pci2CardBusHeader, new_subsystem_vendor_id: u16) void {
+ self.subsystem_vendor_id = std.mem.nativeToLittle(u16, new_subsystem_vendor_id);
+ }
+
+ pub fn setPcCardLegacyMode16BitBaseAddr(self: *volatile Pci2CardBusHeader, new_pc_card_legacy_mode_16_bit_base_addr: u32) void {
+ self.pc_card_legacy_mode_16_bit_base_addr = std.mem.nativeToLittle(u32, new_pc_card_legacy_mode_16_bit_base_addr);
+ }
+};
+
+pub const CfgSpace = packed struct(u576) {
+ vendor_id: u16,
+ device_id: u16,
+ command: u16,
+ status: u16,
+ revision_id: u8,
+ prog_if: u8,
+ subclass: u8,
+ class: u8,
+ cache_line_size: u8,
+ latency_timer: u8,
+ header_type: u8,
+ bist: u8, // built-in self test
+
+ header: Header,
+
+ pub fn getDeviceId(self: *const volatile CfgSpace) u16 {
+ return std.mem.littleToNative(u16, self.device_id);
+ }
+
+ pub fn getVendorId(self: *const volatile CfgSpace) u16 {
+ return std.mem.littleToNative(u16, self.vendor_id);
+ }
+
+ pub fn getStatus(self: *const volatile CfgSpace) u16 {
+ return std.mem.littleToNative(u16, self.status);
+ }
+
+ pub fn getCommand(self: *const volatile CfgSpace) u16 {
+ return std.mem.littleToNative(u16, self.command);
+ }
+
+ pub fn getClass(self: *const volatile CfgSpace) u8 {
+ return std.mem.littleToNative(u8, self.class);
+ }
+
+ pub fn getSubclass(self: *const volatile CfgSpace) u8 {
+ return std.mem.littleToNative(u8, self.subclass);
+ }
+
+ pub fn getProgIf(self: *const volatile CfgSpace) u8 {
+ return std.mem.littleToNative(u8, self.prog_if);
+ }
+
+ pub fn getRevisionId(self: *const volatile CfgSpace) u8 {
+ return std.mem.littleToNative(u8, self.revision_id);
+ }
+
+ pub fn getBist(self: *const volatile CfgSpace) u8 {
+ return std.mem.littleToNative(u8, self.bist);
+ }
+
+ pub fn getHeaderType(self: *const volatile CfgSpace) HeaderType {
+ return @enumFromInt(std.mem.littleToNative(u8, self.header_type));
+ }
+
+ pub fn getLatencyTimer(self: *const volatile CfgSpace) u8 {
+ return std.mem.littleToNative(u8, self.latency_timer);
+ }
+
+ pub fn getCacheLineSize(self: *const volatile CfgSpace) u8 {
+ return std.mem.littleToNative(u8, self.cache_line_size);
+ }
+
+ pub fn setDeviceId(self: *volatile CfgSpace, new_device_id: u16) void {
+ self.device_id = std.mem.nativeToLittle(u16, new_device_id);
+ }
+
+ pub fn setVendorId(self: *volatile CfgSpace, new_vendor_id: u16) void {
+ self.vendor_id = std.mem.nativeToLittle(u16, new_vendor_id);
+ }
+
+ pub fn setStatus(self: *volatile CfgSpace, new_status: u16) void {
+ self.status = std.mem.nativeToLittle(u16, new_status);
+ }
+
+ pub fn setCommand(self: *volatile CfgSpace, new_command: u16) void {
+ self.command = std.mem.nativeToLittle(u16, new_command);
+ }
+
+ pub fn setClass(self: *volatile CfgSpace, new_class: u8) void {
+ self.class = std.mem.writeInt(u8, new_class);
+ }
+
+ pub fn setSubclass(self: *volatile CfgSpace, new_subclass: u8) void {
+ self.subclass = std.mem.nativeToLittle(u8, new_subclass);
+ }
+
+ pub fn setProgIf(self: *volatile CfgSpace, new_prog_if: u8) void {
+ self.prog_if = std.mem.nativeToLittle(u8, new_prog_if);
+ }
+
+ pub fn setRevisionId(self: *volatile CfgSpace, new_revision_id: u8) void {
+ self.revision_id = std.mem.nativeToLittle(u8, new_revision_id);
+ }
+
+ pub fn setBist(self: *volatile CfgSpace, new_bist: u8) void {
+ self.bist = std.mem.nativeToLittle(u8, new_bist);
+ }
+
+ pub fn setHeaderType(self: *volatile CfgSpace, new_header_type: HeaderType) void {
+ self.header_type = std.mem.writeInt(u8, @intFromEnum(new_header_type));
+ }
+
+ pub fn setLatencyTimer(self: *volatile CfgSpace, new_latency_timer: u8) void {
+ self.latency_timer = std.mem.nativeToLittle(u8, new_latency_timer);
+ }
+
+ pub fn setCacheLineSize(self: *volatile CfgSpace, new_cache_line_size: u8) void {
+ self.cache_line_size = std.mem.nativeToLittle(u8, new_cache_line_size);
+ }
+};
+
+pub fn controllerFromHwInfo() ?Controller {
+ var pcie = hwinfo.byKind(.pcie);
+ var pci = hwinfo.byKind(.pci);
+
+ if (try pcie.next()) |ecam| {
+ return Controller{ .enhanced = .{ .reg = ecam.reg.slice(u8) } };
+ } else if (try pci.next()) |cam| {
+ return Controller{ .conventional = .{ .reg = cam.reg.slice(u8) } };
+ } else {
+ return null;
+ }
+}
diff --git a/src/lib/plic.zig b/src/lib/plic.zig
new file mode 100644
index 0000000..227b8b8
--- /dev/null
+++ b/src/lib/plic.zig
@@ -0,0 +1,101 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const hwinfo = @import("hwinfo.zig");
+
+pub var default: Plic = undefined;
+
+pub const Error = error{
+ NoPlic,
+ PlicIncompatible,
+ NoPlicReg,
+ InterruptOutOfRange,
+ ContextOutOfRange,
+};
+
+pub const Context = packed struct {
+ priority_threshold: u32,
+ claim_or_complete: u32,
+};
+
+pub const Plic = struct {
+ mmio_register: hwinfo.Reg,
+
+ const priority_offset = 0x0;
+ const enable_offset = 0x2000;
+ const context_offset_zero = 0x200000;
+ const context_offset_nonzero = 0x201000;
+
+ pub const num_contexts = 15872;
+
+ // A value greater than or equal to num_contexts for context is an error.
+ // A value of 0 for interrupt results in an error.
+ pub fn setEnabled(self: Plic, context: u14, interrupt: u10, enable: bool) !void {
+ if (context >= num_contexts) return Error.ContextOutOfRange;
+ if (interrupt == 0) return Error.InterruptOutOfRange;
+
+ const mmio_slice = self.mmioSlice();
+ const enable_ptr: *volatile [num_contexts][32]u32 = @alignCast(@ptrCast(&mmio_slice[enable_offset]));
+
+ const register = interrupt / 32;
+ const bit = @as(u32, 1) << @intCast(interrupt & 0x1f);
+
+ if (enable) {
+ enable_ptr[context][register] |= bit;
+ } else {
+ enable_ptr[context][register] &= ~bit;
+ }
+ }
+
+ // A value of 0 for interrupt results in an error.
+ pub fn setPriority(self: Plic, interrupt: u10, priority: u3) !void {
+ if (interrupt == 0) return Error.InterruptOutOfRange;
+
+ const mmio_slice = self.mmioSlice();
+ const priority_ptr: *volatile [1024]u32 = @alignCast(@ptrCast(&mmio_slice[priority_offset]));
+
+ priority_ptr[interrupt] = @intCast(priority);
+ }
+
+ // A value greater than or equal to num_contexts for context is an error.
+ pub fn setPriorityThreshold(self: Plic, context: u14, threshold: u3) !void {
+ const context_ptr = try self.contextPtr(context);
+ context_ptr.priority_threshold = threshold;
+ }
+
+ // A value greater than or equal to num_contexts for context is an error.
+ // Non-null interrupts are guaranteed to be non-zero.
+ pub fn claim(self: Plic, context: u14) !?u10 {
+ const context_ptr = try self.contextPtr(context);
+ const interrupt = context_ptr.claim_or_complete;
+
+ if (interrupt != 0) return @intCast(interrupt) else return null;
+ }
+
+ // A value greater than or equal to num_contexts for context is an error.
+ // A value of 0 for interrupt results in an error.
+ pub fn complete(self: Plic, context: u14, interrupt: u10) !void {
+ if (interrupt == 0) return Error.InterruptOutOfRange;
+
+ const context_ptr = try self.contextPtr(context);
+ context_ptr.claim_or_complete = interrupt;
+ }
+
+ fn contextPtr(self: Plic, context: u14) !*volatile Context {
+ if (context >= num_contexts) return Error.ContextOutOfRange;
+
+ const mmio_slice = self.mmio_register.slice(u8);
+
+ if (context == 0) {
+ return @alignCast(@ptrCast(&mmio_slice[context_offset_zero]));
+ } else {
+ const context_offset: usize = context - 1;
+ const ptr_offset = context_offset * (@sizeOf(u32) + @sizeOf(Context));
+ const context_ptr = &mmio_slice[context_offset_nonzero + ptr_offset];
+ return @alignCast(@ptrCast(context_ptr));
+ }
+ }
+};
diff --git a/src/lib/process.zig b/src/lib/process.zig
new file mode 100644
index 0000000..7b3e696
--- /dev/null
+++ b/src/lib/process.zig
@@ -0,0 +1,173 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const instructions = @import("instructions.zig");
+const paging = @import("paging.zig");
+const time = @import("sbi/time.zig");
+const trap = @import("trap.zig");
+
+pub const schedule_interval_millis = 10;
+
+pub var list = std.mem.zeroInit(std.DoublyLinkedList(Info), .{});
+
+const num_stack_pages = 2;
+
+var next_pid: u16 = 1;
+
+pub const Error = error{
+ EmptySchedule,
+};
+
+pub const State = enum(u8) {
+ waiting,
+ active,
+ sleeping,
+ terminated,
+};
+
+pub const Info = extern struct {
+ id: u16,
+ trap_frame: trap.Frame,
+ stack: [*]u8,
+ pc: usize,
+ page_table: *paging.Table,
+ state: State,
+
+ pub fn destroy(self: *Info) !void {
+ try paging.free(self.stack);
+ try self.page_table.unmap();
+ try paging.free(self.page_table);
+ }
+
+ pub fn satp(self: *const Info) paging.Satp {
+ return self.page_table.satp(self.id);
+ }
+};
+
+fn new(entry: usize) !Info {
+ const stack = try paging.alloc(num_stack_pages);
+ errdefer paging.free(stack) catch {};
+
+ const procmem: *paging.Table = @alignCast(@ptrCast(try paging.zeroedAlloc(1)));
+ errdefer paging.free(@ptrCast(procmem)) catch {};
+
+ var proc = Info{
+ .id = next_pid,
+ .trap_frame = std.mem.zeroInit(trap.Frame, .{}),
+ .stack = @alignCast(@ptrCast(stack)),
+ .pc = entry,
+ .page_table = procmem,
+ .state = .waiting,
+ };
+
+ const stack_top = @intFromPtr(proc.stack) + num_stack_pages * paging.page_size;
+ proc.trap_frame.general_purpose_registers[2] = stack_top;
+
+ try procmem.mapKernel();
+
+ try procmem.map(entry, entry, paging.EntryFlags.userReadExec, 0);
+ // Not using identityMapRange because this is going to be expanded for non-relocatable binaries.
+ for (0..num_stack_pages) |page| {
+ const vaddr = @intFromPtr(proc.stack) + page * paging.page_size;
+ const paddr = @intFromPtr(proc.stack) + page * paging.page_size;
+
+ try procmem.map(vaddr, paddr, paging.EntryFlags.userReadWrite, 0);
+ }
+
+ next_pid += 1;
+ return proc;
+}
+
+pub fn next() ?*Info {
+ if (list.popFirst()) |info| {
+ list.append(info);
+ return &info.data;
+ }
+
+ return null;
+}
+
+pub fn switchTo(proc: *Info) noreturn {
+ proc.state = .active;
+
+ instructions.setSscratch(@intFromPtr(&proc.trap_frame));
+
+ asm volatile (
+ \\ csrr t0, sstatus
+ \\ li t1, 0x100
+ \\ not t1, t1
+ \\ and t0, t0, t1
+ \\ csrw sstatus, t0
+ ::: "t0", "t1");
+
+ instructions.setSepc(proc.pc);
+ instructions.setSatp(proc.satp());
+
+ // Probably not always needed. Let's not take the risk for now.
+ asm volatile (
+ \\ sfence.vma
+ );
+
+ asm volatile (
+ \\ csrr t6, sscratch
+ \\
+ \\ ld x1, 8(t6)
+ \\ ld x2, 16(t6)
+ \\ ld x3, 24(t6)
+ \\ ld x4, 32(t6)
+ \\ ld x5, 40(t6)
+ \\ ld x6, 48(t6)
+ \\ ld x7, 56(t6)
+ \\ ld x8, 64(t6)
+ \\ ld x9, 72(t6)
+ \\ ld x10, 80(t6)
+ \\ ld x11, 88(t6)
+ \\ ld x12, 96(t6)
+ \\ ld x13, 104(t6)
+ \\ ld x14, 112(t6)
+ \\ ld x15, 120(t6)
+ \\ ld x16, 128(t6)
+ \\ ld x17, 136(t6)
+ \\ ld x18, 144(t6)
+ \\ ld x19, 152(t6)
+ \\ ld x20, 160(t6)
+ \\ ld x21, 168(t6)
+ \\ ld x22, 176(t6)
+ \\ ld x23, 184(t6)
+ \\ ld x24, 192(t6)
+ \\ ld x25, 200(t6)
+ \\ ld x26, 208(t6)
+ \\ ld x27, 216(t6)
+ \\ ld x28, 224(t6)
+ \\ ld x29, 232(t6)
+ \\ ld x30, 240(t6)
+ \\ ld x31, 248(t6)
+ \\
+ \\ sret
+ );
+
+ unreachable;
+}
+
+pub fn demo(allocator: std.mem.Allocator) !void {
+ const entry: [*]u8 = @alignCast(@ptrCast(try paging.zeroedAlloc(1)));
+ defer paging.free(@ptrCast(entry)) catch {};
+
+ entry[0] = 0x73;
+ entry[1] = 0x00;
+ entry[2] = 0x00;
+ entry[3] = 0x00;
+
+ const proc = try new(@intFromPtr(entry));
+ const proc_node = try allocator.create(std.DoublyLinkedList(Info).Node);
+ proc_node.data = proc;
+ list.prepend(proc_node);
+
+ try time.interruptInMillis(schedule_interval_millis);
+ try switchTo(&proc_node.data);
+
+ while (true) asm volatile ("wfi");
+}
diff --git a/src/lib/sbi.zig b/src/lib/sbi.zig
new file mode 100644
index 0000000..2072b90
--- /dev/null
+++ b/src/lib/sbi.zig
@@ -0,0 +1,123 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const instructions = @import("instructions.zig");
+
+pub const Error = error{
+ Success,
+ Failed,
+ NotSupported,
+ InvalidParam,
+ Denied,
+ InvalidAddr,
+ AlreadyAvail,
+ AlreadyStarted,
+ AlreadyStopped,
+ NoSharedMem,
+ Unknown,
+};
+
+pub fn errorFromCode(code: isize) Error {
+ return switch (code) {
+ 0 => Error.Success,
+ -1 => Error.Failed,
+ -2 => Error.NotSupported,
+ -3 => Error.InvalidParam,
+ -4 => Error.Denied,
+ -5 => Error.InvalidAddr,
+ -6 => Error.AlreadyAvail,
+ -7 => Error.AlreadyStarted,
+ -8 => Error.AlreadyStopped,
+ -9 => Error.NoSharedMem,
+ else => Error.Unknown,
+ };
+}
+
+const BaseExtId: usize = 0x10;
+
+const BaseFnId = enum(usize) {
+ GetSpecVer = 0,
+ GetImpId = 1,
+ GetImpVer = 2,
+ ProbeExt = 3,
+ GetMVendorId = 4,
+ GetMArchId = 5,
+ GetMImpId = 6,
+};
+
+pub const ImpId = enum(isize) {
+ Bbl = 0,
+ OpenSbi = 1,
+ Xvisor = 2,
+ Kvm = 3,
+ RustSbi = 4,
+ Diosix = 5,
+ Coffer = 6,
+ Xen = 7,
+ PolarFire = 8,
+ _,
+};
+
+pub fn specVer() !isize {
+ const ret = instructions.ecall(BaseExtId, @intFromEnum(BaseFnId.GetSpecVer), 0, 0, 0);
+ if (ret.err != 0) {
+ return errorFromCode(ret.err);
+ }
+
+ return ret.val;
+}
+
+pub fn impId() !ImpId {
+ const ret = instructions.ecall(BaseExtId, @intFromEnum(BaseFnId.GetImpId), 0, 0, 0);
+ if (ret.err != 0) {
+ return errorFromCode(ret.err);
+ }
+
+ return @enumFromInt(ret.val);
+}
+
+pub fn impVer() !isize {
+ const ret = instructions.ecall(BaseExtId, @intFromEnum(BaseFnId.GetImpVer), 0, 0, 0);
+ if (ret.err != 0) {
+ return errorFromCode(ret.err);
+ }
+
+ return ret.val;
+}
+
+pub fn probeExt(ext_id: usize) !bool {
+ const ret = instructions.ecall(BaseExtId, @intFromEnum(BaseFnId.ProbeExt), ext_id, 0, 0);
+ if (ret.err != 0) {
+ return errorFromCode(ret.err);
+ }
+
+ return ret.val != 0;
+}
+
+pub fn mVendorId() !isize {
+ const ret = instructions.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMVendorId), 0, 0, 0);
+ if (ret.err != 0) {
+ return errorFromCode(ret.err);
+ }
+
+ return ret.val;
+}
+
+pub fn mArchId() !isize {
+ const ret = instructions.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMarchId), 0, 0, 0);
+ if (ret.err != 0) {
+ return errorFromCode(ret.err);
+ }
+
+ return ret.val;
+}
+
+pub fn mImpId() !isize {
+ const ret = instructions.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMImpId), 0, 0, 0);
+ if (ret.err != 0) {
+ return errorFromCode(ret.err);
+ }
+
+ return ret.val;
+}
diff --git a/src/lib/sbi/debug_console.zig b/src/lib/sbi/debug_console.zig
new file mode 100644
index 0000000..afd249f
--- /dev/null
+++ b/src/lib/sbi/debug_console.zig
@@ -0,0 +1,35 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const instructions = @import("../instructions.zig");
+const sbi = @import("../sbi.zig");
+
+const ExtId: usize = 0x4442434E;
+
+const FnId = enum(usize) {
+ Write = 0,
+ Read = 1,
+ WriteByte = 2,
+};
+
+pub const Writer = std.io.Writer(void, sbi.Error, write);
+
+fn write(_: void, bytes: []const u8) !usize {
+ const ret = instructions.ecall(ExtId, @intFromEnum(FnId.Write), bytes.len, @intFromPtr(bytes.ptr), 0);
+ if (ret.err != 0) {
+ return sbi.errorFromCode(ret.err);
+ }
+
+ return @intCast(ret.val);
+}
+
+pub fn writer() !Writer {
+ if (!try sbi.probeExt(ExtId)) {
+ return sbi.Error.NotSupported;
+ }
+
+ return .{ .context = {} };
+}
diff --git a/src/lib/sbi/legacy.zig b/src/lib/sbi/legacy.zig
new file mode 100644
index 0000000..e544367
--- /dev/null
+++ b/src/lib/sbi/legacy.zig
@@ -0,0 +1,41 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const instructions = @import("../instructions.zig");
+const sbi = @import("../sbi.zig");
+
+const ExtId = enum(usize) {
+ SetTimer = 0x00,
+ ConsolePutchar = 0x01,
+ ConsoleGetchar = 0x02,
+ ClearIpi = 0x03,
+ SendIpi = 0x04,
+ RemoteFenceI = 0x05,
+ RemoteSFenceVma = 0x06,
+ RemoteSFenceVmaAsid = 0x07,
+ Shutdown = 0x08,
+};
+
+pub const Writer = std.io.Writer(void, sbi.Error, write);
+
+fn write(_: void, bytes: []const u8) !usize {
+ for (bytes) |byte| {
+ const ret = instructions.ecall(@intFromEnum(ExtId.ConsolePutchar), 0, byte, 0, 0);
+ if (ret.err != 0) {
+ return sbi.errorFromCode(ret.err);
+ }
+ }
+
+ return bytes.len;
+}
+
+pub fn writer() !Writer {
+ if (!try sbi.probeExt(@intFromEnum(ExtId.ConsolePutchar))) {
+ return sbi.Error.NotSupported;
+ }
+
+ return .{ .context = {} };
+}
diff --git a/src/lib/sbi/sys_reset.zig b/src/lib/sbi/sys_reset.zig
new file mode 100644
index 0000000..5651fba
--- /dev/null
+++ b/src/lib/sbi/sys_reset.zig
@@ -0,0 +1,37 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const instructions = @import("../instructions.zig");
+const sbi = @import("../sbi.zig");
+
+const ExtId: usize = 0x53525354;
+
+const FnId = enum(usize) {
+ Reset = 0,
+};
+
+pub const Type = enum(u32) {
+ Shutdown = 0,
+ ColdReboot = 1,
+ WarmReboot = 2,
+};
+
+pub const Reason = enum(u32) {
+ None = 0,
+ SysErr = 1,
+};
+
+pub fn reset(@"type": Type, reset_reason: Reason) !void {
+ if (!try sbi.probeExt(ExtId)) {
+ return sbi.Error.NotSupported;
+ }
+
+ const typeId = @intFromEnum(reset_type);
+ const reasonId = @intFromEnum(reset_reason);
+
+ const ret = instructions.ecall(ExtId, @intFromEnum(FnId.Reset), ty, reason, 0);
+ if (ret.err != 0) {
+ return sbierr.errorFromCode(ret.err);
+ }
+}
diff --git a/src/lib/sbi/time.zig b/src/lib/sbi/time.zig
new file mode 100644
index 0000000..2fad3e8
--- /dev/null
+++ b/src/lib/sbi/time.zig
@@ -0,0 +1,39 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+
+const hwinfo = @import("../hwinfo.zig");
+const instructions = @import("../instructions.zig");
+const sbi = @import("../sbi.zig");
+
+const ExtId: usize = 0x54494d45;
+
+const FnId = enum(usize) {
+ SetTimer = 0,
+};
+
+pub const Error = error{
+ NoCpusHwInfo,
+};
+
+pub fn setTimer(stime_absolute: u64) !void {
+ if (!try sbi.probeExt(ExtId)) return sbi.Error.NotSupported;
+
+ const ret = instructions.ecall(ExtId, @intFromEnum(FnId.SetTimer), stime_absolute, 0, 0);
+ if (ret.err != 0) return sbi.errorFromCode(ret.err);
+}
+
+pub fn interruptInMillis(millis: u64) !void {
+ const stime = asm volatile (
+ \\ csrr %[stime], time
+ : [stime] "=r" (-> u64),
+ );
+
+ var cpus = hwinfo.byKind(.cpus);
+ const frequency = try cpus.next() orelse return error.NoCpusHwInfo;
+ const cycles = frequency.value / 1000 * millis;
+
+ try setTimer(stime + cycles);
+}
diff --git a/src/lib/syscall.zig b/src/lib/syscall.zig
new file mode 100644
index 0000000..692507e
--- /dev/null
+++ b/src/lib/syscall.zig
@@ -0,0 +1,15 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const trap = @import("trap.zig");
+
+pub const Error = error{
+ UnknownSyscall,
+};
+
+pub fn handle(trap_frame: *const trap.Frame) !void {
+ switch (trap_frame.general_purpose_registers[10]) {
+ else => return Error.UnknownSyscall,
+ }
+}
diff --git a/src/lib/trap.zig b/src/lib/trap.zig
new file mode 100644
index 0000000..4460cfd
--- /dev/null
+++ b/src/lib/trap.zig
@@ -0,0 +1,11 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+pub const Frame = extern struct {
+ general_purpose_registers: [32]usize, // Offset: 0
+ floating_point_registers: [32]usize, // Offset: 256
+ satp: usize, // Offset: 512
+ stack_pointer: *allowzero u8, // Offset: 520
+ hart_id: usize, // Offset: 528
+};