From c22a1c2214d5f3256684fa08262501cfd70be915 Mon Sep 17 00:00:00 2001 From: Himbeer Date: Thu, 1 Aug 2024 13:07:30 +0200 Subject: Flatten 'lib' directory into main 'src' tree --- .gitignore | 2 +- README.md | 8 +- src/Console.zig | 38 +++ src/TrapFrame.zig | 38 +++ src/cfg/platform/lpi4a.hwi | Bin 0 -> 65 bytes src/cfg/platform/lpi4a.hwi.license | 3 + src/cfg/platform/lpi4a.txt | 2 + src/cfg/platform/lpi4a.txt.license | 3 + src/cfg/platform/qemu.hwi | Bin 0 -> 97 bytes src/cfg/platform/qemu.hwi.license | 3 + src/cfg/platform/qemu.txt | 3 + src/cfg/platform/qemu.txt.license | 3 + src/channel.zig | 123 +++++++ src/hwi.zig | 2 +- src/hwinfo.zig | 102 ++++++ src/interrupts.zig | 301 +++++++++++++++++ src/kernel.zig | 20 +- src/lib/Console.zig | 38 --- src/lib/TrapFrame.zig | 38 --- src/lib/cfg/platform/lpi4a.hwi | Bin 65 -> 0 bytes src/lib/cfg/platform/lpi4a.hwi.license | 3 - src/lib/cfg/platform/lpi4a.txt | 2 - src/lib/cfg/platform/lpi4a.txt.license | 3 - src/lib/cfg/platform/qemu.hwi | Bin 97 -> 0 bytes src/lib/cfg/platform/qemu.hwi.license | 3 - src/lib/cfg/platform/qemu.txt | 3 - src/lib/cfg/platform/qemu.txt.license | 3 - src/lib/channel.zig | 123 ------- src/lib/hwinfo.zig | 102 ------ src/lib/interrupts.zig | 301 ----------------- src/lib/mem.zig | 261 -------------- src/lib/paging.zig | 597 --------------------------------- src/lib/plic.zig | 96 ------ src/lib/process.zig | 380 --------------------- src/lib/riscv.zig | 112 ------- src/lib/sbi.zig | 127 ------- src/lib/sbi/debug_console.zig | 34 -- src/lib/sbi/legacy.zig | 40 --- src/lib/sbi/sys_reset.zig | 37 -- src/lib/sbi/time.zig | 34 -- src/lib/syscall.zig | 200 ----------- src/mem.zig | 261 ++++++++++++++ src/paging.zig | 597 +++++++++++++++++++++++++++++++++ src/plic.zig | 96 ++++++ src/process.zig | 380 +++++++++++++++++++++ src/riscv.zig | 112 +++++++ src/sbi.zig | 127 +++++++ src/sbi/debug_console.zig | 34 ++ src/sbi/legacy.zig | 40 +++ src/sbi/sys_reset.zig | 37 ++ src/sbi/time.zig | 34 ++ src/syscall.zig | 200 +++++++++++ 52 files changed, 2553 insertions(+), 2553 deletions(-) create mode 100644 src/Console.zig create mode 100644 src/TrapFrame.zig create mode 100644 src/cfg/platform/lpi4a.hwi create mode 100644 src/cfg/platform/lpi4a.hwi.license create mode 100644 src/cfg/platform/lpi4a.txt create mode 100644 src/cfg/platform/lpi4a.txt.license create mode 100644 src/cfg/platform/qemu.hwi create mode 100644 src/cfg/platform/qemu.hwi.license create mode 100644 src/cfg/platform/qemu.txt create mode 100644 src/cfg/platform/qemu.txt.license create mode 100644 src/channel.zig create mode 100644 src/hwinfo.zig create mode 100644 src/interrupts.zig delete mode 100644 src/lib/Console.zig delete mode 100644 src/lib/TrapFrame.zig delete mode 100644 src/lib/cfg/platform/lpi4a.hwi delete mode 100644 src/lib/cfg/platform/lpi4a.hwi.license delete mode 100644 src/lib/cfg/platform/lpi4a.txt delete mode 100644 src/lib/cfg/platform/lpi4a.txt.license delete mode 100644 src/lib/cfg/platform/qemu.hwi delete mode 100644 src/lib/cfg/platform/qemu.hwi.license delete mode 100644 src/lib/cfg/platform/qemu.txt delete mode 100644 src/lib/cfg/platform/qemu.txt.license delete mode 100644 src/lib/channel.zig delete mode 100644 src/lib/hwinfo.zig delete mode 100644 src/lib/interrupts.zig delete mode 100644 src/lib/mem.zig delete mode 100644 src/lib/paging.zig delete mode 100644 src/lib/plic.zig delete mode 100644 src/lib/process.zig delete mode 100644 src/lib/riscv.zig delete mode 100644 src/lib/sbi.zig delete mode 100644 src/lib/sbi/debug_console.zig delete mode 100644 src/lib/sbi/legacy.zig delete mode 100644 src/lib/sbi/sys_reset.zig delete mode 100644 src/lib/sbi/time.zig delete mode 100644 src/lib/syscall.zig create mode 100644 src/mem.zig create mode 100644 src/paging.zig create mode 100644 src/plic.zig create mode 100644 src/process.zig create mode 100644 src/riscv.zig create mode 100644 src/sbi.zig create mode 100644 src/sbi/debug_console.zig create mode 100644 src/sbi/legacy.zig create mode 100644 src/sbi/sys_reset.zig create mode 100644 src/sbi/time.zig create mode 100644 src/syscall.zig diff --git a/.gitignore b/.gitignore index 5d30384..0c695bf 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,4 @@ /.zig-*/ /hwi /hwi.o -/src/lib/cfg/init +/src/cfg/init diff --git a/README.md b/README.md index 349f792..3930240 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ zig build -Dplatform= Replace `` with the platform you want to build for. Supported options include `qemu` and `lpi4a`. -See the `src/lib/cfg/platform` directory for the full list. +See the `src/cfg/platform` directory for the full list. You can also use any other Zig build mode, e.g. `--release=fast`. @@ -59,7 +59,7 @@ You may `strip(1)` this file if you want to. ### init executable The init executable is expected to be a statically linked ELF -(with program headers) at `src/lib/cfg/init`. It is embedded in the kernel +(with program headers) at `src/cfg/init`. It is embedded in the kernel binary to avoid running (filesystem) drivers in S-mode, meaning that a kernel rebuild is required to apply modifications. @@ -283,14 +283,14 @@ You can use the `hwi(1)` command to convert from the textual representation to the binary representation: ``` -hwi < src/lib/cfg/platform/.txt > src/lib/cfg/platform/.hwi +hwi < src/cfg/platform/.txt > src/cfg/platform/.hwi ``` Omitting the stdin pipe allows you to type out the textual representation manually: ``` -hwi > src/lib/cfg/platform/.hwi +hwi > src/cfg/platform/.hwi ``` Press Control+D after finishing the last line (and pressing Enter) diff --git a/src/Console.zig b/src/Console.zig new file mode 100644 index 0000000..8a64406 --- /dev/null +++ b/src/Console.zig @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const debug_console = @import("sbi/debug_console.zig"); +const legacy = @import("sbi/legacy.zig"); + +provider: Provider, + +const Self = @This(); + +pub const Provider = union(enum) { + sbi_debug: debug_console.Writer, + sbi_legacy: legacy.Writer, +}; + +pub fn autoChoose() ?Self { + if (debug_console.writer()) |sbi_con| { + return .{ + .provider = .{ .sbi_debug = sbi_con }, + }; + } else |_| {} + if (legacy.writer()) |sbi_legacy_con| { + return .{ + .provider = .{ .sbi_legacy = sbi_legacy_con }, + }; + } else |_| {} + + return null; +} + +pub fn writer(console: *const Self) std.io.AnyWriter { + switch (console.provider) { + .sbi_debug => return console.provider.sbi_debug.any(), + .sbi_legacy => return console.provider.sbi_legacy.any(), + } +} diff --git a/src/TrapFrame.zig b/src/TrapFrame.zig new file mode 100644 index 0000000..71556ed --- /dev/null +++ b/src/TrapFrame.zig @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +general_purpose_registers: [32]usize, // Offset: 0 +floating_point_registers: [32]usize, // Offset: 256 +satp: usize, // Offset: 512 +stack_pointer: *allowzero u8, // Offset: 520 +hart_id: usize, // Offset: 528 + +const Self = @This(); + +pub fn setReturnValue(self: *Self, value: anytype) void { + switch (@typeInfo(@TypeOf(value))) { + .ErrorUnion => self.returnErrorUnion(value), + .ErrorSet => self.returnError(value), + else => self.returnValue(value), + } +} + +fn returnErrorUnion(self: *Self, error_union: anytype) void { + if (error_union) |value| { + self.returnValue(value); + } else |err| { + self.returnError(err); + } +} + +fn returnError(self: *Self, err: anyerror) void { + self.general_purpose_registers[11] = @intFromError(err); +} + +fn returnValue(self: *Self, value: anytype) void { + self.general_purpose_registers[11] = 0; + if (@typeInfo(@TypeOf(value)) != .Void) { + self.general_purpose_registers[10] = @bitCast(value); + } +} diff --git a/src/cfg/platform/lpi4a.hwi b/src/cfg/platform/lpi4a.hwi new file mode 100644 index 0000000..f52d767 Binary files /dev/null and b/src/cfg/platform/lpi4a.hwi differ diff --git a/src/cfg/platform/lpi4a.hwi.license b/src/cfg/platform/lpi4a.hwi.license new file mode 100644 index 0000000..b2d2485 --- /dev/null +++ b/src/cfg/platform/lpi4a.hwi.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2024 Himbeer + +SPDX-License-Identifier: CC0-1.0 diff --git a/src/cfg/platform/lpi4a.txt b/src/cfg/platform/lpi4a.txt new file mode 100644 index 0000000..6213698 --- /dev/null +++ b/src/cfg/platform/lpi4a.txt @@ -0,0 +1,2 @@ +cpus 0 0 0x2dc6c0 +plic 0xffd8000000 0x4000000 diff --git a/src/cfg/platform/lpi4a.txt.license b/src/cfg/platform/lpi4a.txt.license new file mode 100644 index 0000000..b2d2485 --- /dev/null +++ b/src/cfg/platform/lpi4a.txt.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2024 Himbeer + +SPDX-License-Identifier: CC0-1.0 diff --git a/src/cfg/platform/qemu.hwi b/src/cfg/platform/qemu.hwi new file mode 100644 index 0000000..a951898 Binary files /dev/null and b/src/cfg/platform/qemu.hwi differ diff --git a/src/cfg/platform/qemu.hwi.license b/src/cfg/platform/qemu.hwi.license new file mode 100644 index 0000000..b2d2485 --- /dev/null +++ b/src/cfg/platform/qemu.hwi.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2024 Himbeer + +SPDX-License-Identifier: CC0-1.0 diff --git a/src/cfg/platform/qemu.txt b/src/cfg/platform/qemu.txt new file mode 100644 index 0000000..352e5d4 --- /dev/null +++ b/src/cfg/platform/qemu.txt @@ -0,0 +1,3 @@ +cpus 0 0 0x989680 +plic 0xc000000 0x600000 +pcie 0x30000000 0x10000000 diff --git a/src/cfg/platform/qemu.txt.license b/src/cfg/platform/qemu.txt.license new file mode 100644 index 0000000..b2d2485 --- /dev/null +++ b/src/cfg/platform/qemu.txt.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2024 Himbeer + +SPDX-License-Identifier: CC0-1.0 diff --git a/src/channel.zig b/src/channel.zig new file mode 100644 index 0000000..7687afa --- /dev/null +++ b/src/channel.zig @@ -0,0 +1,123 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const Allocator = std.mem.Allocator; + +pub const Error = error{ + NotJoined, + WouldBlock, +}; + +pub const Message = struct { + bytes: []const u8, + refcount: usize = 1, + + fn addReference(self: *Message) !void { + self.refcount = try std.math.add(usize, self.refcount, 1); + } + + fn dropReference(self: *Message) void { + self.refcount -= 1; + if (self.refcount == 0) { + defer alloc.free(self.bytes); + defer alloc.destroy(self); + } + } +}; + +pub const Messages = std.TailQueue(*Message); +var alloc: Allocator = undefined; + +const Queues = std.AutoArrayHashMap(usize, Messages); + +const Processes = std.AutoArrayHashMap(usize, Queues); +var joined: Processes = undefined; + +pub fn join(pid: usize, id: usize) !void { + const queues = try joined.getOrPut(pid); + if (!queues.found_existing) { + initProcess(queues.value_ptr); + } + + const messages = try queues.value_ptr.getOrPut(id); + if (!messages.found_existing) { + initQueue(messages.value_ptr); + } +} + +pub fn leave(pid: usize, id: usize) void { + const queues = joined.getPtr(pid) orelse return; + freeQueues(queues); + queues.clearAndFree(); + _ = queues.swapRemove(id); +} + +// The channel takes ownership of `bytes`. +pub fn pass(id: usize, bytes: []const u8) !void { + const message = try alloc.create(Message); + defer message.dropReference(); + + message.* = .{ .bytes = bytes }; + + var it = joined.iterator(); + while (it.next()) |queues| { + if (queues.value_ptr.getPtr(id)) |messages| { + try message.addReference(); + errdefer message.dropReference(); + + try enqueue(messages, message); + } + } +} + +pub fn receive(pid: usize, id: usize, buffer: []u8) !usize { + const queues = joined.getPtr(pid) orelse return Error.NotJoined; + const messages = queues.getPtr(id) orelse return Error.NotJoined; + const message = messages.popFirst() orelse return Error.WouldBlock; + + defer alloc.destroy(message); + defer message.data.dropReference(); + + const len = @min(buffer.len, message.data.bytes.len); + @memcpy(buffer[0..len], message.data.bytes[0..len]); + + return len; +} + +fn initQueue(messages: *Messages) void { + messages.* = .{}; +} + +fn initProcess(queues: *Queues) void { + queues.* = Queues.init(alloc); +} + +fn freeQueues(queues: *Queues) void { + var it = queues.iterator(); + while (it.next()) |messages| { + freeMessages(messages.value_ptr); + } +} + +fn freeMessages(messages: *Messages) void { + while (messages.popFirst()) |message| { + message.data.dropReference(); + } +} + +fn enqueue(messages: *Messages, message: *Message) !void { + const node = try alloc.create(Messages.Node); + node.data = message; + messages.append(node); +} + +pub fn init(with_allocator: Allocator) void { + joined = Processes.init(with_allocator); + alloc = with_allocator; +} + +pub fn allocator() Allocator { + return alloc; +} diff --git a/src/hwi.zig b/src/hwi.zig index b8736bb..8157edc 100644 --- a/src/hwi.zig +++ b/src/hwi.zig @@ -4,7 +4,7 @@ const builtin = @import("builtin"); const std = @import("std"); -const hwinfo = @import("lib/hwinfo.zig"); +const hwinfo = @import("hwinfo.zig"); pub fn main() !void { const stdin = std.io.getStdIn(); diff --git a/src/hwinfo.zig b/src/hwinfo.zig new file mode 100644 index 0000000..62eacf2 --- /dev/null +++ b/src/hwinfo.zig @@ -0,0 +1,102 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const config = @import("config"); +const std = @import("std"); + +const hw_info = @embedFile("cfg/platform/" ++ config.platform ++ ".hwi"); +const devices = std.io.FixedBufferStream([]const u8){ .buffer = hw_info, .pos = 0 }; + +pub const ParseError = error{ + MissingKind, + MissingRegAddr, + MissingRegLen, + UnknownDevKind, +}; + +pub const DevKind = enum(u32) { + cpus, + plic, + pcie, + pci, + _, + + pub fn parse(buf: []const u8) !DevKind { + if (std.mem.eql(u8, buf, "cpus")) { + return .cpus; + } else if (std.mem.eql(u8, buf, "plic")) { + return .plic; + } else if (std.mem.eql(u8, buf, "pcie")) { + return .pcie; + } else if (std.mem.eql(u8, buf, "pci")) { + return .pci; + } + + return ParseError.UnknownDevKind; + } +}; + +pub const Dev = extern struct { + kind: DevKind, + reg: Reg, + value: u64, + + pub fn parse(buf: []const u8) !Dev { + var columns = std.mem.tokenizeScalar(u8, buf, ' '); + const kind_buf = columns.next() orelse return ParseError.MissingKind; + const reg_addr_buf = columns.next() orelse return ParseError.MissingRegAddr; + const reg_len_buf = columns.next() orelse return ParseError.MissingRegLen; + const value_buf = columns.next() orelse "0"; + + return .{ + .kind = try DevKind.parse(kind_buf), + .reg = .{ + .addr = try std.fmt.parseUnsigned(u64, reg_addr_buf, 0), + .len = try std.fmt.parseUnsigned(u64, reg_len_buf, 0), + }, + .value = try std.fmt.parseUnsigned(u64, value_buf, 0), + }; + } +}; + +pub const Reg = extern struct { + addr: u64, + len: u64, + + pub fn slice(self: Reg, comptime T: type) []volatile T { + const ptr: [*]volatile T = @ptrFromInt(self.addr); + return ptr[0 .. self.len / @sizeOf(T)]; + } +}; + +pub const ByKind = struct { + kind: DevKind, + stream: std.io.FixedBufferStream([]const u8), + big_endian: bool, + + pub fn init(kind: DevKind, stream: std.io.FixedBufferStream([]const u8)) !ByKind { + var fbs = stream; + const endian = try fbs.reader().readByte(); + return .{ .kind = kind, .stream = fbs, .big_endian = endian != 0 }; + } + + pub fn next(it: *ByKind) !?Dev { + const endian: std.builtin.Endian = if (it.big_endian) .big else .little; + + const reader = it.stream.reader(); + while (reader.readStructEndian(Dev, endian)) |device| { + if (device.kind == it.kind) return device; + } else |err| return err; + + return null; + } + + pub fn reset(it: *ByKind) !void { + try it.stream.seekTo(1); + } +}; + +pub fn byKind(kind: DevKind) !ByKind { + return ByKind.init(kind, devices); +} diff --git a/src/interrupts.zig b/src/interrupts.zig new file mode 100644 index 0000000..66c8acd --- /dev/null +++ b/src/interrupts.zig @@ -0,0 +1,301 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const Console = @import("Console.zig"); +const TrapFrame = @import("TrapFrame.zig"); +const plic = @import("plic.zig"); +const process = @import("process.zig"); +const riscv = @import("riscv.zig"); +const syscall = @import("syscall.zig"); + +pub var trap_frame: TrapFrame = undefined; + +pub const SupervisorTrapVector = packed struct(usize) { + pub const Mode = enum(u2) { + direct, + vectored, + }; + + mode: u2, + base_addr: u62, + + pub fn fromBaseAddr(mode: Mode, base_addr: usize) SupervisorTrapVector { + return .{ + .mode = @intFromEnum(mode), + .base_addr = base_addr >> 2, + }; + } +}; + +pub const Enable = packed struct(usize) { + u_software: u1, + s_software: u1, + reserved0: u2, + u_timer: u1, + s_timer: u1, + reserved1: u2, + u_external: u1, + s_external: u1, + reserved2: u54, + + pub const none = std.mem.zeroInit(Enable, .{}); + pub const all = std.mem.zeroInit(Enable, .{ + .u_software = 1, + .s_software = 1, + .u_timer = 1, + .s_timer = 1, + .u_external = 1, + .s_external = 1, + }); +}; + +pub const Cause = packed struct(usize) { + num: u63, + @"async": u1, + + pub fn isAsync(self: Cause) bool { + return self.@"async" == 1; + } +}; + +pub const AsyncCause = enum(u63) { + user_software, + supervisor_software, + user_timer = 4, + supervisor_timer, + user_external = 8, + supervisor_external, + _, +}; + +pub const SyncCause = enum(u63) { + instruction_address_misaligned, + instruction_access_fault, + illegal_instruction, + breakpoint, + load_access_fault = 5, + amo_address_misaligned, + store_or_amo_access_fault, + ecall, + instruction_page_fault = 12, + load_page_fault, + store_or_amo_page_fault = 15, + _, +}; + +export fn handleTrap(epc: usize, cause: Cause, frame: *TrapFrame) usize { + const w = Console.autoChoose().?.writer(); + + const status = riscv.sstatus.read(); + + if (cause.isAsync()) { + switch (@as(AsyncCause, @enumFromInt(cause.num))) { + .supervisor_software => w.print("Hart {d}: Software interrupt\r\n", .{frame.hart_id}) catch unreachable, + .supervisor_timer => { + if (status.previous_privilege == .user) { + // Trapped from U-mode, update pc for next time slice. + // + // We can simply use the last node of the process list here + // because the scheduler moves a process to the end of the queue + // before returning into it. + + process.list.last.?.data.pc = epc; + process.list.last.?.data.state = .waiting; + + process.schedule() catch |err| { + std.debug.panic("schedule error: {}", .{err}); + }; + } + + // Don't interrupt kernel code, it may never run otherwise. + }, + .supervisor_external => { + const context: u14 = @intCast(2 * frame.hart_id + 1); + + const external_cause = plic.default.claim(context) catch |err| { + std.debug.panic("plic claim error: {}", .{err}); + }; + if (external_cause) |source| { + w.print("Hart {d}: External interrupt: 0x{x}\r\n", .{ frame.hart_id, source }) catch unreachable; + handleExternal(source); + plic.default.complete(context, source) catch |err| { + std.debug.panic("plic complete error: {}", .{err}); + }; + } + }, + else => { + std.debug.panic("unhandled interrupt {}", .{cause.num}); + }, + } + } else { + const pid = if (status.previous_privilege == .user) blk: { + const proc = &process.list.last.?.data; + proc.pc = epc; + proc.state = .waiting; + break :blk proc.id; + } else 0; + + switch (@as(SyncCause, @enumFromInt(cause.num))) { + .illegal_instruction => { + std.debug.panic("illegal instruction", .{}); + }, + .instruction_access_fault => { + std.debug.panic("instruction access fault", .{}); + }, + .load_access_fault => { + std.debug.panic("load access fault", .{}); + }, + .store_or_amo_access_fault => { + std.debug.panic("store/amo access fault", .{}); + }, + .ecall => { + const proc = &process.list.last.?.data; + syscall.handler(proc, frame) catch |err| switch (err) { + syscall.HandleError.UnknownSyscall => { + const a7 = frame.general_purpose_registers[17]; + w.print("Hart {d}, PID = {d}: Unknown syscall, a7 = 0x{x:0>16}\r\n", .{ frame.hart_id, pid, a7 }) catch unreachable; + }, + }; + + return epc + 4; + }, + .instruction_page_fault => { + std.debug.panic("instruction page fault", .{}); + }, + .load_page_fault => { + std.debug.panic("load page fault", .{}); + }, + .store_or_amo_page_fault => { + std.debug.panic("store/amo page fault", .{}); + }, + else => { + std.debug.panic("unhandled exception {d}", .{cause.num}); + }, + } + } + + return epc; +} + +fn handleExternal(interrupt: ?u10) void { + _ = &interrupt; +} + +export fn supervisorTrapVector() align(4) callconv(.Naked) noreturn { + asm volatile ( + \\ csrrw t6, sscratch, t6 + \\ + \\ sd x1, 8(t6) + \\ sd x2, 16(t6) + \\ sd x3, 24(t6) + \\ sd x4, 32(t6) + \\ sd x5, 40(t6) + \\ sd x6, 48(t6) + \\ sd x7, 56(t6) + \\ sd x8, 64(t6) + \\ sd x9, 72(t6) + \\ sd x10, 80(t6) + \\ sd x11, 88(t6) + \\ sd x12, 96(t6) + \\ sd x13, 104(t6) + \\ sd x14, 112(t6) + \\ sd x15, 120(t6) + \\ sd x16, 128(t6) + \\ sd x17, 136(t6) + \\ sd x18, 144(t6) + \\ sd x19, 152(t6) + \\ sd x20, 160(t6) + \\ sd x21, 168(t6) + \\ sd x22, 176(t6) + \\ sd x23, 184(t6) + \\ sd x24, 192(t6) + \\ sd x25, 200(t6) + \\ sd x26, 208(t6) + \\ sd x27, 216(t6) + \\ sd x28, 224(t6) + \\ sd x29, 232(t6) + \\ sd x30, 240(t6) + \\ + \\ mv t5, t6 + \\ csrr t6, sscratch + \\ + \\ sd x31, 248(t5) + \\ + \\ csrw sscratch, t5 + \\ + \\ .option push + \\ .option norelax + \\ la gp, _global_pointer + \\ .option pop + \\ + \\ csrr a0, sepc + \\ csrr a1, scause + \\ mv a2, t5 + \\ + \\ la sp, _stvec_stack_end + \\ call handleTrap + \\ + \\ csrw sepc, a0 + \\ + \\ csrr t6, sscratch + \\ + \\ ld x1, 8(t6) + \\ ld x2, 16(t6) + \\ ld x3, 24(t6) + \\ ld x4, 32(t6) + \\ ld x5, 40(t6) + \\ ld x6, 48(t6) + \\ ld x7, 56(t6) + \\ ld x8, 64(t6) + \\ ld x9, 72(t6) + \\ ld x10, 80(t6) + \\ ld x11, 88(t6) + \\ ld x12, 96(t6) + \\ ld x13, 104(t6) + \\ ld x14, 112(t6) + \\ ld x15, 120(t6) + \\ ld x16, 128(t6) + \\ ld x17, 136(t6) + \\ ld x18, 144(t6) + \\ ld x19, 152(t6) + \\ ld x20, 160(t6) + \\ ld x21, 168(t6) + \\ ld x22, 176(t6) + \\ ld x23, 184(t6) + \\ ld x24, 192(t6) + \\ ld x25, 200(t6) + \\ ld x26, 208(t6) + \\ ld x27, 216(t6) + \\ ld x28, 224(t6) + \\ ld x29, 232(t6) + \\ ld x30, 240(t6) + \\ ld x31, 248(t6) + \\ + \\ sret + ); +} + +pub fn init(hart_id: usize) void { + trap_frame = .{ + .general_purpose_registers = [_]usize{0} ** 32, + .floating_point_registers = [_]usize{0} ** 32, + .satp = 0, + .stack_pointer = @ptrFromInt(riscv.stackPointer()), + .hart_id = hart_id, + }; + + asm volatile ( + \\ csrw sscratch, %[trapframe] + \\ + \\ la t0, supervisorTrapVector + \\ csrw stvec, t0 + \\ + \\ csrr t0, sstatus + \\ ori t0, t0, 2 + \\ csrw sstatus, t0 + : + : [trapframe] "r" (&trap_frame), + ); +} diff --git a/src/kernel.zig b/src/kernel.zig index b415d17..41ef9a6 100644 --- a/src/kernel.zig +++ b/src/kernel.zig @@ -3,16 +3,16 @@ // SPDX-License-Identifier: AGPL-3.0-or-later const std = @import("std"); -const Console = @import("lib/Console.zig"); -const hwinfo = @import("lib/hwinfo.zig"); -const interrupts = @import("lib/interrupts.zig"); -const mem = @import("lib/mem.zig"); -const paging = @import("lib/paging.zig"); -const plic = @import("lib/plic.zig"); -const process = @import("lib/process.zig"); -const riscv = @import("lib/riscv.zig"); - -const init = @embedFile("lib/cfg/init"); +const Console = @import("Console.zig"); +const hwinfo = @import("hwinfo.zig"); +const interrupts = @import("interrupts.zig"); +const mem = @import("mem.zig"); +const paging = @import("paging.zig"); +const plic = @import("plic.zig"); +const process = @import("process.zig"); +const riscv = @import("riscv.zig"); + +const init = @embedFile("cfg/init"); const Error = error{ HartIdOutOfRange, diff --git a/src/lib/Console.zig b/src/lib/Console.zig deleted file mode 100644 index 8a64406..0000000 --- a/src/lib/Console.zig +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const debug_console = @import("sbi/debug_console.zig"); -const legacy = @import("sbi/legacy.zig"); - -provider: Provider, - -const Self = @This(); - -pub const Provider = union(enum) { - sbi_debug: debug_console.Writer, - sbi_legacy: legacy.Writer, -}; - -pub fn autoChoose() ?Self { - if (debug_console.writer()) |sbi_con| { - return .{ - .provider = .{ .sbi_debug = sbi_con }, - }; - } else |_| {} - if (legacy.writer()) |sbi_legacy_con| { - return .{ - .provider = .{ .sbi_legacy = sbi_legacy_con }, - }; - } else |_| {} - - return null; -} - -pub fn writer(console: *const Self) std.io.AnyWriter { - switch (console.provider) { - .sbi_debug => return console.provider.sbi_debug.any(), - .sbi_legacy => return console.provider.sbi_legacy.any(), - } -} diff --git a/src/lib/TrapFrame.zig b/src/lib/TrapFrame.zig deleted file mode 100644 index 71556ed..0000000 --- a/src/lib/TrapFrame.zig +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -general_purpose_registers: [32]usize, // Offset: 0 -floating_point_registers: [32]usize, // Offset: 256 -satp: usize, // Offset: 512 -stack_pointer: *allowzero u8, // Offset: 520 -hart_id: usize, // Offset: 528 - -const Self = @This(); - -pub fn setReturnValue(self: *Self, value: anytype) void { - switch (@typeInfo(@TypeOf(value))) { - .ErrorUnion => self.returnErrorUnion(value), - .ErrorSet => self.returnError(value), - else => self.returnValue(value), - } -} - -fn returnErrorUnion(self: *Self, error_union: anytype) void { - if (error_union) |value| { - self.returnValue(value); - } else |err| { - self.returnError(err); - } -} - -fn returnError(self: *Self, err: anyerror) void { - self.general_purpose_registers[11] = @intFromError(err); -} - -fn returnValue(self: *Self, value: anytype) void { - self.general_purpose_registers[11] = 0; - if (@typeInfo(@TypeOf(value)) != .Void) { - self.general_purpose_registers[10] = @bitCast(value); - } -} diff --git a/src/lib/cfg/platform/lpi4a.hwi b/src/lib/cfg/platform/lpi4a.hwi deleted file mode 100644 index f52d767..0000000 Binary files a/src/lib/cfg/platform/lpi4a.hwi and /dev/null differ diff --git a/src/lib/cfg/platform/lpi4a.hwi.license b/src/lib/cfg/platform/lpi4a.hwi.license deleted file mode 100644 index b2d2485..0000000 --- a/src/lib/cfg/platform/lpi4a.hwi.license +++ /dev/null @@ -1,3 +0,0 @@ -SPDX-FileCopyrightText: 2024 Himbeer - -SPDX-License-Identifier: CC0-1.0 diff --git a/src/lib/cfg/platform/lpi4a.txt b/src/lib/cfg/platform/lpi4a.txt deleted file mode 100644 index 6213698..0000000 --- a/src/lib/cfg/platform/lpi4a.txt +++ /dev/null @@ -1,2 +0,0 @@ -cpus 0 0 0x2dc6c0 -plic 0xffd8000000 0x4000000 diff --git a/src/lib/cfg/platform/lpi4a.txt.license b/src/lib/cfg/platform/lpi4a.txt.license deleted file mode 100644 index b2d2485..0000000 --- a/src/lib/cfg/platform/lpi4a.txt.license +++ /dev/null @@ -1,3 +0,0 @@ -SPDX-FileCopyrightText: 2024 Himbeer - -SPDX-License-Identifier: CC0-1.0 diff --git a/src/lib/cfg/platform/qemu.hwi b/src/lib/cfg/platform/qemu.hwi deleted file mode 100644 index a951898..0000000 Binary files a/src/lib/cfg/platform/qemu.hwi and /dev/null differ diff --git a/src/lib/cfg/platform/qemu.hwi.license b/src/lib/cfg/platform/qemu.hwi.license deleted file mode 100644 index b2d2485..0000000 --- a/src/lib/cfg/platform/qemu.hwi.license +++ /dev/null @@ -1,3 +0,0 @@ -SPDX-FileCopyrightText: 2024 Himbeer - -SPDX-License-Identifier: CC0-1.0 diff --git a/src/lib/cfg/platform/qemu.txt b/src/lib/cfg/platform/qemu.txt deleted file mode 100644 index 352e5d4..0000000 --- a/src/lib/cfg/platform/qemu.txt +++ /dev/null @@ -1,3 +0,0 @@ -cpus 0 0 0x989680 -plic 0xc000000 0x600000 -pcie 0x30000000 0x10000000 diff --git a/src/lib/cfg/platform/qemu.txt.license b/src/lib/cfg/platform/qemu.txt.license deleted file mode 100644 index b2d2485..0000000 --- a/src/lib/cfg/platform/qemu.txt.license +++ /dev/null @@ -1,3 +0,0 @@ -SPDX-FileCopyrightText: 2024 Himbeer - -SPDX-License-Identifier: CC0-1.0 diff --git a/src/lib/channel.zig b/src/lib/channel.zig deleted file mode 100644 index 7687afa..0000000 --- a/src/lib/channel.zig +++ /dev/null @@ -1,123 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const Allocator = std.mem.Allocator; - -pub const Error = error{ - NotJoined, - WouldBlock, -}; - -pub const Message = struct { - bytes: []const u8, - refcount: usize = 1, - - fn addReference(self: *Message) !void { - self.refcount = try std.math.add(usize, self.refcount, 1); - } - - fn dropReference(self: *Message) void { - self.refcount -= 1; - if (self.refcount == 0) { - defer alloc.free(self.bytes); - defer alloc.destroy(self); - } - } -}; - -pub const Messages = std.TailQueue(*Message); -var alloc: Allocator = undefined; - -const Queues = std.AutoArrayHashMap(usize, Messages); - -const Processes = std.AutoArrayHashMap(usize, Queues); -var joined: Processes = undefined; - -pub fn join(pid: usize, id: usize) !void { - const queues = try joined.getOrPut(pid); - if (!queues.found_existing) { - initProcess(queues.value_ptr); - } - - const messages = try queues.value_ptr.getOrPut(id); - if (!messages.found_existing) { - initQueue(messages.value_ptr); - } -} - -pub fn leave(pid: usize, id: usize) void { - const queues = joined.getPtr(pid) orelse return; - freeQueues(queues); - queues.clearAndFree(); - _ = queues.swapRemove(id); -} - -// The channel takes ownership of `bytes`. -pub fn pass(id: usize, bytes: []const u8) !void { - const message = try alloc.create(Message); - defer message.dropReference(); - - message.* = .{ .bytes = bytes }; - - var it = joined.iterator(); - while (it.next()) |queues| { - if (queues.value_ptr.getPtr(id)) |messages| { - try message.addReference(); - errdefer message.dropReference(); - - try enqueue(messages, message); - } - } -} - -pub fn receive(pid: usize, id: usize, buffer: []u8) !usize { - const queues = joined.getPtr(pid) orelse return Error.NotJoined; - const messages = queues.getPtr(id) orelse return Error.NotJoined; - const message = messages.popFirst() orelse return Error.WouldBlock; - - defer alloc.destroy(message); - defer message.data.dropReference(); - - const len = @min(buffer.len, message.data.bytes.len); - @memcpy(buffer[0..len], message.data.bytes[0..len]); - - return len; -} - -fn initQueue(messages: *Messages) void { - messages.* = .{}; -} - -fn initProcess(queues: *Queues) void { - queues.* = Queues.init(alloc); -} - -fn freeQueues(queues: *Queues) void { - var it = queues.iterator(); - while (it.next()) |messages| { - freeMessages(messages.value_ptr); - } -} - -fn freeMessages(messages: *Messages) void { - while (messages.popFirst()) |message| { - message.data.dropReference(); - } -} - -fn enqueue(messages: *Messages, message: *Message) !void { - const node = try alloc.create(Messages.Node); - node.data = message; - messages.append(node); -} - -pub fn init(with_allocator: Allocator) void { - joined = Processes.init(with_allocator); - alloc = with_allocator; -} - -pub fn allocator() Allocator { - return alloc; -} diff --git a/src/lib/hwinfo.zig b/src/lib/hwinfo.zig deleted file mode 100644 index 62eacf2..0000000 --- a/src/lib/hwinfo.zig +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const config = @import("config"); -const std = @import("std"); - -const hw_info = @embedFile("cfg/platform/" ++ config.platform ++ ".hwi"); -const devices = std.io.FixedBufferStream([]const u8){ .buffer = hw_info, .pos = 0 }; - -pub const ParseError = error{ - MissingKind, - MissingRegAddr, - MissingRegLen, - UnknownDevKind, -}; - -pub const DevKind = enum(u32) { - cpus, - plic, - pcie, - pci, - _, - - pub fn parse(buf: []const u8) !DevKind { - if (std.mem.eql(u8, buf, "cpus")) { - return .cpus; - } else if (std.mem.eql(u8, buf, "plic")) { - return .plic; - } else if (std.mem.eql(u8, buf, "pcie")) { - return .pcie; - } else if (std.mem.eql(u8, buf, "pci")) { - return .pci; - } - - return ParseError.UnknownDevKind; - } -}; - -pub const Dev = extern struct { - kind: DevKind, - reg: Reg, - value: u64, - - pub fn parse(buf: []const u8) !Dev { - var columns = std.mem.tokenizeScalar(u8, buf, ' '); - const kind_buf = columns.next() orelse return ParseError.MissingKind; - const reg_addr_buf = columns.next() orelse return ParseError.MissingRegAddr; - const reg_len_buf = columns.next() orelse return ParseError.MissingRegLen; - const value_buf = columns.next() orelse "0"; - - return .{ - .kind = try DevKind.parse(kind_buf), - .reg = .{ - .addr = try std.fmt.parseUnsigned(u64, reg_addr_buf, 0), - .len = try std.fmt.parseUnsigned(u64, reg_len_buf, 0), - }, - .value = try std.fmt.parseUnsigned(u64, value_buf, 0), - }; - } -}; - -pub const Reg = extern struct { - addr: u64, - len: u64, - - pub fn slice(self: Reg, comptime T: type) []volatile T { - const ptr: [*]volatile T = @ptrFromInt(self.addr); - return ptr[0 .. self.len / @sizeOf(T)]; - } -}; - -pub const ByKind = struct { - kind: DevKind, - stream: std.io.FixedBufferStream([]const u8), - big_endian: bool, - - pub fn init(kind: DevKind, stream: std.io.FixedBufferStream([]const u8)) !ByKind { - var fbs = stream; - const endian = try fbs.reader().readByte(); - return .{ .kind = kind, .stream = fbs, .big_endian = endian != 0 }; - } - - pub fn next(it: *ByKind) !?Dev { - const endian: std.builtin.Endian = if (it.big_endian) .big else .little; - - const reader = it.stream.reader(); - while (reader.readStructEndian(Dev, endian)) |device| { - if (device.kind == it.kind) return device; - } else |err| return err; - - return null; - } - - pub fn reset(it: *ByKind) !void { - try it.stream.seekTo(1); - } -}; - -pub fn byKind(kind: DevKind) !ByKind { - return ByKind.init(kind, devices); -} diff --git a/src/lib/interrupts.zig b/src/lib/interrupts.zig deleted file mode 100644 index 66c8acd..0000000 --- a/src/lib/interrupts.zig +++ /dev/null @@ -1,301 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const Console = @import("Console.zig"); -const TrapFrame = @import("TrapFrame.zig"); -const plic = @import("plic.zig"); -const process = @import("process.zig"); -const riscv = @import("riscv.zig"); -const syscall = @import("syscall.zig"); - -pub var trap_frame: TrapFrame = undefined; - -pub const SupervisorTrapVector = packed struct(usize) { - pub const Mode = enum(u2) { - direct, - vectored, - }; - - mode: u2, - base_addr: u62, - - pub fn fromBaseAddr(mode: Mode, base_addr: usize) SupervisorTrapVector { - return .{ - .mode = @intFromEnum(mode), - .base_addr = base_addr >> 2, - }; - } -}; - -pub const Enable = packed struct(usize) { - u_software: u1, - s_software: u1, - reserved0: u2, - u_timer: u1, - s_timer: u1, - reserved1: u2, - u_external: u1, - s_external: u1, - reserved2: u54, - - pub const none = std.mem.zeroInit(Enable, .{}); - pub const all = std.mem.zeroInit(Enable, .{ - .u_software = 1, - .s_software = 1, - .u_timer = 1, - .s_timer = 1, - .u_external = 1, - .s_external = 1, - }); -}; - -pub const Cause = packed struct(usize) { - num: u63, - @"async": u1, - - pub fn isAsync(self: Cause) bool { - return self.@"async" == 1; - } -}; - -pub const AsyncCause = enum(u63) { - user_software, - supervisor_software, - user_timer = 4, - supervisor_timer, - user_external = 8, - supervisor_external, - _, -}; - -pub const SyncCause = enum(u63) { - instruction_address_misaligned, - instruction_access_fault, - illegal_instruction, - breakpoint, - load_access_fault = 5, - amo_address_misaligned, - store_or_amo_access_fault, - ecall, - instruction_page_fault = 12, - load_page_fault, - store_or_amo_page_fault = 15, - _, -}; - -export fn handleTrap(epc: usize, cause: Cause, frame: *TrapFrame) usize { - const w = Console.autoChoose().?.writer(); - - const status = riscv.sstatus.read(); - - if (cause.isAsync()) { - switch (@as(AsyncCause, @enumFromInt(cause.num))) { - .supervisor_software => w.print("Hart {d}: Software interrupt\r\n", .{frame.hart_id}) catch unreachable, - .supervisor_timer => { - if (status.previous_privilege == .user) { - // Trapped from U-mode, update pc for next time slice. - // - // We can simply use the last node of the process list here - // because the scheduler moves a process to the end of the queue - // before returning into it. - - process.list.last.?.data.pc = epc; - process.list.last.?.data.state = .waiting; - - process.schedule() catch |err| { - std.debug.panic("schedule error: {}", .{err}); - }; - } - - // Don't interrupt kernel code, it may never run otherwise. - }, - .supervisor_external => { - const context: u14 = @intCast(2 * frame.hart_id + 1); - - const external_cause = plic.default.claim(context) catch |err| { - std.debug.panic("plic claim error: {}", .{err}); - }; - if (external_cause) |source| { - w.print("Hart {d}: External interrupt: 0x{x}\r\n", .{ frame.hart_id, source }) catch unreachable; - handleExternal(source); - plic.default.complete(context, source) catch |err| { - std.debug.panic("plic complete error: {}", .{err}); - }; - } - }, - else => { - std.debug.panic("unhandled interrupt {}", .{cause.num}); - }, - } - } else { - const pid = if (status.previous_privilege == .user) blk: { - const proc = &process.list.last.?.data; - proc.pc = epc; - proc.state = .waiting; - break :blk proc.id; - } else 0; - - switch (@as(SyncCause, @enumFromInt(cause.num))) { - .illegal_instruction => { - std.debug.panic("illegal instruction", .{}); - }, - .instruction_access_fault => { - std.debug.panic("instruction access fault", .{}); - }, - .load_access_fault => { - std.debug.panic("load access fault", .{}); - }, - .store_or_amo_access_fault => { - std.debug.panic("store/amo access fault", .{}); - }, - .ecall => { - const proc = &process.list.last.?.data; - syscall.handler(proc, frame) catch |err| switch (err) { - syscall.HandleError.UnknownSyscall => { - const a7 = frame.general_purpose_registers[17]; - w.print("Hart {d}, PID = {d}: Unknown syscall, a7 = 0x{x:0>16}\r\n", .{ frame.hart_id, pid, a7 }) catch unreachable; - }, - }; - - return epc + 4; - }, - .instruction_page_fault => { - std.debug.panic("instruction page fault", .{}); - }, - .load_page_fault => { - std.debug.panic("load page fault", .{}); - }, - .store_or_amo_page_fault => { - std.debug.panic("store/amo page fault", .{}); - }, - else => { - std.debug.panic("unhandled exception {d}", .{cause.num}); - }, - } - } - - return epc; -} - -fn handleExternal(interrupt: ?u10) void { - _ = &interrupt; -} - -export fn supervisorTrapVector() align(4) callconv(.Naked) noreturn { - asm volatile ( - \\ csrrw t6, sscratch, t6 - \\ - \\ sd x1, 8(t6) - \\ sd x2, 16(t6) - \\ sd x3, 24(t6) - \\ sd x4, 32(t6) - \\ sd x5, 40(t6) - \\ sd x6, 48(t6) - \\ sd x7, 56(t6) - \\ sd x8, 64(t6) - \\ sd x9, 72(t6) - \\ sd x10, 80(t6) - \\ sd x11, 88(t6) - \\ sd x12, 96(t6) - \\ sd x13, 104(t6) - \\ sd x14, 112(t6) - \\ sd x15, 120(t6) - \\ sd x16, 128(t6) - \\ sd x17, 136(t6) - \\ sd x18, 144(t6) - \\ sd x19, 152(t6) - \\ sd x20, 160(t6) - \\ sd x21, 168(t6) - \\ sd x22, 176(t6) - \\ sd x23, 184(t6) - \\ sd x24, 192(t6) - \\ sd x25, 200(t6) - \\ sd x26, 208(t6) - \\ sd x27, 216(t6) - \\ sd x28, 224(t6) - \\ sd x29, 232(t6) - \\ sd x30, 240(t6) - \\ - \\ mv t5, t6 - \\ csrr t6, sscratch - \\ - \\ sd x31, 248(t5) - \\ - \\ csrw sscratch, t5 - \\ - \\ .option push - \\ .option norelax - \\ la gp, _global_pointer - \\ .option pop - \\ - \\ csrr a0, sepc - \\ csrr a1, scause - \\ mv a2, t5 - \\ - \\ la sp, _stvec_stack_end - \\ call handleTrap - \\ - \\ csrw sepc, a0 - \\ - \\ csrr t6, sscratch - \\ - \\ ld x1, 8(t6) - \\ ld x2, 16(t6) - \\ ld x3, 24(t6) - \\ ld x4, 32(t6) - \\ ld x5, 40(t6) - \\ ld x6, 48(t6) - \\ ld x7, 56(t6) - \\ ld x8, 64(t6) - \\ ld x9, 72(t6) - \\ ld x10, 80(t6) - \\ ld x11, 88(t6) - \\ ld x12, 96(t6) - \\ ld x13, 104(t6) - \\ ld x14, 112(t6) - \\ ld x15, 120(t6) - \\ ld x16, 128(t6) - \\ ld x17, 136(t6) - \\ ld x18, 144(t6) - \\ ld x19, 152(t6) - \\ ld x20, 160(t6) - \\ ld x21, 168(t6) - \\ ld x22, 176(t6) - \\ ld x23, 184(t6) - \\ ld x24, 192(t6) - \\ ld x25, 200(t6) - \\ ld x26, 208(t6) - \\ ld x27, 216(t6) - \\ ld x28, 224(t6) - \\ ld x29, 232(t6) - \\ ld x30, 240(t6) - \\ ld x31, 248(t6) - \\ - \\ sret - ); -} - -pub fn init(hart_id: usize) void { - trap_frame = .{ - .general_purpose_registers = [_]usize{0} ** 32, - .floating_point_registers = [_]usize{0} ** 32, - .satp = 0, - .stack_pointer = @ptrFromInt(riscv.stackPointer()), - .hart_id = hart_id, - }; - - asm volatile ( - \\ csrw sscratch, %[trapframe] - \\ - \\ la t0, supervisorTrapVector - \\ csrw stvec, t0 - \\ - \\ csrr t0, sstatus - \\ ori t0, t0, 2 - \\ csrw sstatus, t0 - : - : [trapframe] "r" (&trap_frame), - ); -} diff --git a/src/lib/mem.zig b/src/lib/mem.zig deleted file mode 100644 index eefa452..0000000 --- a/src/lib/mem.zig +++ /dev/null @@ -1,261 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const paging = @import("paging.zig"); -const Allocator = std.mem.Allocator; -const assert = std.debug.assert; -const maxInt = std.math.maxInt; -const mem = std.mem; - -const Chunk = struct { - flags: Flags, - len: usize, - - const Flags = packed struct(u8) { - active: u1, - reserved: u7, - }; - - pub fn next(self: *align(1) Chunk) *align(1) Chunk { - const byte_ptr: [*]u8 = @ptrCast(self); - return @ptrCast(byte_ptr + @sizeOf(Chunk) + self.len); - } - - pub fn take(self: *align(1) Chunk) void { - self.flags.active = 1; - } - - pub fn clear(self: *align(1) Chunk) void { - self.flags = mem.zeroInit(Flags, .{}); - } - - pub fn data(self: *align(1) Chunk) []u8 { - const byte_ptr: [*]u8 = @ptrCast(self); - return byte_ptr[@sizeOf(Chunk)..self.len]; - } -}; - -pub const ChunkAllocatorConfig = struct { - auto_merge_free: bool = true, -}; - -pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type { - return struct { - head: ?*align(1) Chunk, - pages: usize, - - const Self = @This(); - - pub fn init(pages: usize) !Self { - const head: *align(1) Chunk = @ptrCast(try paging.zeroedAlloc(pages)); - head.len = (pages * paging.page_size) - @sizeOf(Chunk); - return .{ .head = head, .pages = pages }; - } - - pub fn deinit(self: *Self) void { - if (self.head) |head| { - paging.free(head); - self.head = null; - } - } - - pub fn allocator(self: *Self) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .free = free, - }, - }; - } - - pub fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 { - _ = ret_addr; - - const self: *Self = @ptrCast(@alignCast(ctx)); - - const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); - - var chunk = self.head orelse return null; - const bound = @intFromPtr(chunk) + (self.pages * paging.page_size); - - var predecessor: ?*align(1) Chunk = null; - while (@intFromPtr(chunk) < bound) : (chunk = chunk.next()) { - const adjust_off = mem.alignPointerOffset(chunk.data().ptr, ptr_align) orelse return null; - const aligned_len = len + adjust_off; - - // Is this chunk free and large enough to hold the requested allocation? - if (!@bitCast(chunk.flags.active) and chunk.len >= aligned_len) { - const remaining = chunk.len - aligned_len; - - if (predecessor) |*pred| { - pred.*.len += adjust_off; - } else if (adjust_off != 0) return null; - - chunk = @ptrFromInt(@intFromPtr(chunk) + adjust_off); - chunk.clear(); - chunk.take(); - - if (remaining > @sizeOf(Chunk)) { - chunk.len = len; - - const new_successor = chunk.next(); - - new_successor.clear(); - new_successor.len = remaining - @sizeOf(Chunk); - } - - return chunk.data().ptr; - } - - predecessor = chunk; - } - - return null; - } - - // Only expands into the next free chunk (if there is one). - // You may want to call mergeFree first if auto_merge_free was configured to false. - pub fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - _ = ret_addr; - - const self: *Self = @ptrCast(@alignCast(ctx)); - - const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); - - const head = self.head orelse return false; - const bound = @intFromPtr(head) + (self.pages * paging.page_size); - - const chunk = @as(*align(1) Chunk, @ptrCast(buf.ptr - @sizeOf(Chunk))); - - const adjust_off = mem.alignPointerOffset(buf.ptr, ptr_align) orelse return false; - const aligned_new_len = new_len + adjust_off; - - if (aligned_new_len < chunk.len) { - const regained = chunk.len - aligned_new_len; - if (regained > @sizeOf(Chunk)) { - chunk.len = aligned_new_len; - - const new_successor = chunk.next(); - - new_successor.clear(); - new_successor.len = regained - @sizeOf(Chunk); - } - - return true; - } else if (aligned_new_len > chunk.len) { - const successor = chunk.next(); - if (@intFromPtr(successor) >= bound) return false; - - const total_len = chunk.len + @sizeOf(Chunk) + successor.len; - - if (!@bitCast(successor.flags.active) and aligned_new_len <= total_len) { - const remaining = total_len - aligned_new_len; - - if (remaining > @sizeOf(Chunk)) { - chunk.len = aligned_new_len; - - const new_successor = chunk.next(); - - new_successor.clear(); - new_successor.len = remaining - @sizeOf(Chunk); - } else { - chunk.len = total_len; - } - - return true; - } - - return false; - } else return true; - } - - pub fn free(ctx: *anyopaque, old_mem: []u8, log2_old_align: u8, ret_addr: usize) void { - _ = log2_old_align; - _ = ret_addr; - - const self: *Self = @ptrCast(@alignCast(ctx)); - - // Safety check. Do not free memory in uninitialized / undefined pages. - if (self.head == null) return; - - const chunk = @as([*]Chunk, @ptrCast(@alignCast(old_mem.ptr))) - 1; - chunk[0].clear(); - - if (config.auto_merge_free) { - self.mergeFree(); - } - } - - pub fn mergeFree(self: *Self) void { - var chunk = self.head orelse return; - const bound = @intFromPtr(chunk) + (self.pages * paging.page_size); - - while (@intFromPtr(chunk) < bound) : (chunk = chunk.next()) { - const successor = chunk.next(); - - if (@intFromPtr(successor) >= bound) { - // Safety check. - // Should never run if the implementation is working correctly. - // - // Ensure that there is a successor within bounds. - // The loop condition is not sufficient here, it only detects - // non-erroneous list ends (i.e. chunk == bound). - break; - } else if (!@bitCast(chunk.flags.active) and !@bitCast(successor.flags.active)) { - chunk.len += @sizeOf(Chunk) + successor.len; - } - } - } - }; -} - -pub const PageAllocator = struct { - pub const vtable = Allocator.VTable{ - .alloc = alloc, - .resize = resize, - .free = free, - }; - - fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { - _ = ra; - _ = log2_align; - assert(n > 0); - if (n > maxInt(usize) - (paging.page_size - 1)) return null; - const aligned_len = mem.alignForward(usize, n, paging.page_size); - const num_pages = @divExact(aligned_len, paging.page_size); - - const slice = paging.zeroedAlloc(num_pages) catch return null; - assert(mem.isAligned(@intFromPtr(slice.ptr), paging.page_size)); - return slice.ptr; - } - - fn resize(_: *anyopaque, buf_unaligned: []u8, log2_buf_align: u8, new_size: usize, return_address: usize) bool { - _ = log2_buf_align; - _ = return_address; - const new_size_aligned = mem.alignForward(usize, new_size, paging.page_size); - - const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, paging.page_size); - if (new_size_aligned == buf_aligned_len) { - return true; - } - - return false; - } - - fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void { - _ = log2_buf_align; - _ = return_address; - const buf_aligned_len = mem.alignForward(usize, slice.len, paging.page_size); - - paging.free(slice.ptr[0..buf_aligned_len]); - } -}; - -pub const page_allocator = Allocator{ - .ptr = undefined, - .vtable = &PageAllocator.vtable, -}; diff --git a/src/lib/paging.zig b/src/lib/paging.zig deleted file mode 100644 index dfe44cc..0000000 --- a/src/lib/paging.zig +++ /dev/null @@ -1,597 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -// This is an implementation of Sv39 paging, meaning that the virtual addresses -// are 39 bits wide. Sv32 and Sv48 are currently not implemented. - -const std = @import("std"); -const hwinfo = @import("hwinfo.zig"); -const riscv = @import("riscv.zig"); - -// Defined by linker script. -pub const text_start = @extern(*anyopaque, .{ .name = "_text_start" }); -pub const text_end = @extern(*anyopaque, .{ .name = "_text_end" }); -pub const rodata_start = @extern(*anyopaque, .{ .name = "_rodata_start" }); -pub const rodata_end = @extern(*anyopaque, .{ .name = "_rodata_end" }); -pub const data_start = @extern(*anyopaque, .{ .name = "_data_start" }); -pub const data_end = @extern(*anyopaque, .{ .name = "_data_end" }); -pub const bss_start = @extern(*anyopaque, .{ .name = "_bss_start" }); -pub const bss_end = @extern(*anyopaque, .{ .name = "_bss_end" }); -pub const stack_start = @extern(*anyopaque, .{ .name = "_stack_start" }); -pub const stack_end = @extern(*anyopaque, .{ .name = "_stack_end" }); -pub const stvec_stack_start = @extern(*anyopaque, .{ .name = "_stvec_stack_start" }); -pub const stvec_stack_end = @extern(*anyopaque, .{ .name = "_stvec_stack_end" }); -pub const heap_start = @extern(*anyopaque, .{ .name = "_heap_start" }); -pub const heap_end = @extern(*anyopaque, .{ .name = "_heap_end" }); - -inline fn heapSize() usize { - return @intFromPtr(heap_end) - @intFromPtr(heap_start); -} - -pub const page_size: usize = 0x1000; // 4096 bytes - -var num_pages: usize = undefined; -var next_mmio_vaddr: usize = 0xff000000; - -pub var alloc_start: usize = undefined; -pub var kmem: *Table = undefined; - -pub const Error = error{ - ZeroSize, - OutOfMemory, - AlreadyTaken, - NotALeaf, -}; - -pub const Mode = enum(u4) { - bare, - sv39 = 8, - sv48, -}; - -// SATP register, configures and enables the MMU (and thus paging). -pub const Satp = packed struct(usize) { - pub const Asid = u16; - - // Reverse field order so that @bitCast yields a usize with the right order. - // Without this writing the value to the SATP register enables an invalid page table, - // leaves the MMU disabled or causes other bugs. - ppn: u44, - asid: Asid, - mode: Mode, -}; - -// A page descriptor for use by the heap allocator. -pub const Page = struct { - flags: Flags, - - pub const Flags = packed struct { - active: u1, - last: u1, // Last page of contiguous allocation - - pub const clear = .{ - .active = 0, - .last = 0, - }; - }; - - // Marks a page as taken, optionally flagging it as the last page of an allocation. - // Fails if the page is already taken. - // Returns whether the operation was successful. - pub fn take(self: *Page, last: bool) !void { - if (@bitCast(self.flags.active)) return Error.AlreadyTaken; - - self.flags.active = 1; - if (last) self.flags.last = 1; - } -}; - -// Returns the offset from the page base. Works with both physical and virtual addresses. -// Offsets are never translated. -pub fn offsetOf(addr: usize) usize { - // Offset is in bottom 12 bits of both physical and virtual addresses. - return addr & 0xfff; -} - -// Returns the virtual page numbers of a virtual address by paging level. -fn virtualPageNumbers(vaddr: usize) [3]usize { - // Virtual address format: - // - // VPN[2] | VPN[1] | VPN[0] | offset - // 9 bits | 9 bits | 9 bits | 12 bits - // - // Virtual page numbers are indexes into the page table of their level, - // i.e. VPN[2] is an index to the root page table on level 2 - // whereas VPN[1] is an index to the page table on level 1 specified by VPN[2]. - // - // Offsets are never translated. - - return [3]usize{ - (vaddr >> 12) & 0x1ff, - (vaddr >> 21) & 0x1ff, - (vaddr >> 30) & 0x1ff, - }; -} - -// Returns the physical page numbers of a physical address by paging level. -fn physicalPageNumbers(paddr: usize) [3]usize { - // Physical address format: - // - // PPN[2] | PPN[1] | PPN[0] | offset - // 26 bits | 9 bits | 9 bits | 12 bits - // - // PPN[i] is what to map VPN[i] to. - // - // Offsets are never translated. - - return [3]usize{ - (paddr >> 12) & 0x1ff, - (paddr >> 21) & 0x1ff, - (paddr >> 30) & 0x3ff_ffff, - }; -} - -// Returns the page numbers of an address as a single integer. -fn pageNumber(addr: usize) u44 { - return @intCast(addr >> 12); -} - -pub const EntryFlags = packed struct(u8) { - valid: u1, - read: u1, - write: u1, - exec: u1, - user: u1, - global: u1, - accessed: u1, - dirty: u1, - - pub const branch = EntryFlags{ - .valid = 1, - .read = 0, - .write = 0, - .exec = 0, - .user = 0, - .global = 0, - .accessed = 0, - .dirty = 0, - }; - - pub const readOnly = EntryFlags{ - .valid = 1, - .read = 1, - .write = 0, - .exec = 0, - .user = 0, - .global = 0, - .accessed = 1, - .dirty = 0, - }; - - pub const readWrite = EntryFlags{ - .valid = 1, - .read = 1, - .write = 1, - .exec = 0, - .user = 0, - .global = 0, - .accessed = 1, - .dirty = 1, - }; - - pub const readExec = EntryFlags{ - .valid = 1, - .read = 1, - .write = 0, - .exec = 1, - .user = 0, - .global = 0, - .accessed = 1, - .dirty = 0, - }; - - pub const userReadOnly = EntryFlags{ - .valid = 1, - .read = 1, - .write = 0, - .exec = 0, - .user = 1, - .global = 0, - .accessed = 1, - .dirty = 0, - }; - - pub const userReadWrite = EntryFlags{ - .valid = 1, - .read = 1, - .write = 1, - .exec = 0, - .user = 1, - .global = 0, - .accessed = 1, - .dirty = 1, - }; - - pub const userReadExec = EntryFlags{ - .valid = 1, - .read = 1, - .write = 0, - .exec = 1, - .user = 1, - .global = 0, - .accessed = 1, - .dirty = 0, - }; - - pub fn isLeaf(self: EntryFlags) bool { - return @bitCast(self.read) or @bitCast(self.write) or @bitCast(self.exec); - } - - // Returns whether the entry permissions allow the accesses - // specified in the `requested` argument. - pub fn allowAccess(self: EntryFlags, requested: EntryFlags) bool { - if (self.user != requested.user) return false; - if (self.read < requested.read) return false; - if (self.write < requested.write) return false; - if (self.exec < requested.exec) return false; - return true; - } -}; - -pub const Entry = packed struct(u64) { - // Reverse field order so that @bitCast yields a u64 with the right order. - // Without this writing the value to a page table creates an invalid entry, - // thus resulting in page faults or hanging. - flags: EntryFlags, - rsw: u2, // Reserved for supervisor use. Currently unused. - mapping: u44, - reserved: u10, - - // Returns the physical page numbers to map to by paging level. - pub fn physicalPageNumbers(self: Entry) [3]usize { - // Mapping format: - // - // PPN[2] | PPN[1] | PPN[0] - // 26 bits | 9 bits | 9 bits - // - // PPN[i] is what to map VPN[i] to. - - return [3]usize{ - self.mapping & 0x1ff, - (self.mapping >> 9) & 0x1ff, - (self.mapping >> 18) & 0x3ff_ffff, - }; - } - - pub fn mappingAddr(self: Entry) usize { - // Apply an offset of zero since entries always point to an aligned page - // and this function should return a usable memory address. - // Callers can change the offset if needed. - return self.mapping << 12; - } - - pub fn isValid(self: Entry) bool { - return @bitCast(self.flags.valid); - } - - // Returns whether the entry is a mapping (true) or another page table (false). - pub fn isLeaf(self: Entry) bool { - return self.flags.isLeaf(); - } -}; - -pub const Table = struct { - // Do not add any fields. The unmap function relies on mappings pointing to page tables, - // casting them to this data structure. This cast becomes invalid if additional fields - // are added, especially if they preceed the entries field. - - entries: [512]Entry, - - // Create a mapping of a certain virtual page address to a physical page address, - // discarding offsets. The mapping is written to the specified level, - // creating page tables as needed. - // - // The mapping must be a leaf, meaning that passing flags - // that indicate no access permissions at all will return an error. - // - // This function internally uses zeroedAlloc to allocate memory for the required page tables, - // but assumes that the physical address to map to has already been allocated by the caller. - pub fn map(root: *Table, vaddr: usize, paddr: usize, flags: EntryFlags, level: usize) !void { - if (!flags.isLeaf()) return Error.NotALeaf; - - const vpn = virtualPageNumbers(vaddr); - - // Grab the entry in the root (level 2) page table. - var v = &root.entries[vpn[2]]; - - // Walk the page table levels from high to low under the assumption that root is valid. - for (level..2) |iInv| { - const i = 1 - iInv; - - // If this entry doesn't point to a lower-level page table or memory page yet, - // allocate one. - if (!v.isValid()) { - const page = try zeroedAlloc(1); - v.* = .{ - .flags = EntryFlags.branch, - .rsw = 0, - .mapping = pageNumber(@intFromPtr(page.ptr)), // Remove the offset, a mapping is just the PPN. - .reserved = 0, - }; - } - - // Get the entries of the existing or newly created page table. - // This cast is safe because the only field of a Table is its entries. - const table: *Table = @ptrFromInt(v.mappingAddr()); - // Grab the entry of the table by indexing it according to the corresponding VPN. - v = &table.entries[vpn[i]]; - } - - // Write the actual mapping to the correct table on the requested level. - v.* = .{ - .flags = flags, - .rsw = 0, - .mapping = @intCast(pageNumber(paddr)), // Remove the offset, a mapping is just the PPN. - .reserved = 0, - }; - } - - // Deallocate child page tables recursively. The provided table itself is not affected, - // allowing partial unmapping of multi-level tables. - // - // This function does not deallocate memory pages mapped by the provided table - // or any of its (recursive) children. - pub fn unmap(table: *Table) void { - for (&table.entries) |*entry| { - if (entry.isValid() and !entry.isLeaf()) { - // This cast is safe because the only field of a Table is its entries. - const lowerLevelTable: *Table = @ptrFromInt(entry.mappingAddr()); - lowerLevelTable.unmap(); - entry.flags.valid = 0; - free(lowerLevelTable); - } - } - } - - pub fn unmapEntry(root: *Table, vaddr: usize) void { - const vpn = virtualPageNumbers(vaddr); - - var v = &root.entries[vpn[2]]; - - for (0..3) |iInv| { - const i = 2 - iInv; - - if (!v.isValid()) { - break; - } else if (v.isLeaf()) { - v.flags.valid = 0; - // IMPORTANT: Flush TLB correctly - // if sfence.vma in process.switchTo is ever optimized. - return; - } - - const entry: *[512]Entry = @ptrFromInt(v.mappingAddr()); - v = &entry[vpn[i - 1]]; - } - } - - // Returns the physical address to a virtual address using the provided level 2 page table. - // This can be used to access virtual addresses whose page table isn't active - // in the MMU / SATP CSR (Control and Status Register), making it possible - // to access the memory space of a user mode process (from its perspective) - // from supervisor or machine mode cleanly. - // - // If the permissions requested using the `flags` argument exceed those - // found in the page table entry, no value is returned. - // - // The absence of a return value is equivalent to a page fault. - pub fn translate(root: *const Table, vaddr: usize, flags: EntryFlags) ?usize { - const vpn = virtualPageNumbers(vaddr); - - // Grab the entry in the root (level 2) page table. - var v = &root.entries[vpn[2]]; - - // Walk the page table levels from high to low. - for (0..3) |iInv| { - const i = 2 - iInv; - - if (!v.isValid()) { - break; - } else if (v.isLeaf()) { - // Mapping found. - - if (!v.flags.allowAccess(flags)) return null; - - // Create a mask starting directly below / after PN[i]. - // Since all levels can have leaves i is not guaranteed to be zero. - const offset_mask = (@as(usize, 1) << @intCast(12 + 9 * i)) - 1; - const offset = vaddr & offset_mask; - const ppn_joined = v.mappingAddr() & ~offset_mask; - - return ppn_joined | offset; - } - - // Get the entries of the page table of the current level. - const entry: *[512]Entry = @ptrFromInt(v.mappingAddr()); - // Grab the entry of the table by indexing it according to the corresponding VPN. - v = &entry[vpn[i - 1]]; - } - - return null; - } - - // Creates an identity mapping for all pages needed for the specified range - // using the map function. An identity mapping doesn't actually translate - // memory addresses, virtual addresses and physical addresses are the same. - // - // The start address is inclusive while end is exclusive. - // - // This is still useful because it can be used to prevent the kernel - // from accessing machine-reserved memory by accident. - pub fn identityMapRange(root: *Table, start: usize, end: usize, flags: EntryFlags) !void { - // Mask out the offset within the starting page. - const start_page = start & ~(page_size - 1); - // Mask out the offset within the ending page, but ensure the returned page address - // is always the last required page for the mapping (end is exclusive, - // so subtracting 1 ends up in the previous page on boundaries, - // eliminating one useless mapping). The resulting value is inclusive. - const end_page = (end - 1) & ~(page_size - 1); - - var page = start_page; - while (page <= end_page) : (page += page_size) { - try root.map(page, page, flags, 0); - } - } - - // Constructs the SATP register value needed to activate the specified page table - // using the provided Address Space Identifier (ASID). - // - // The kernel page table always has ASID 0 (not mandated by the RISC-V specification). - pub fn satp(root: *const Table, asid: Satp.Asid) Satp { - return .{ - .ppn = @intCast(pageNumber(@intFromPtr(root))), - .asid = asid, - .mode = .sv39, - }; - } - - pub fn mapKernel(root: *Table) !void { - try root.identityMapRange(@intFromPtr(text_start), @intFromPtr(text_end), EntryFlags.readExec); - try root.identityMapRange(@intFromPtr(rodata_start), @intFromPtr(rodata_end), EntryFlags.readOnly); - try root.identityMapRange(@intFromPtr(data_start), @intFromPtr(data_end), EntryFlags.readWrite); - try root.identityMapRange(@intFromPtr(bss_start), @intFromPtr(bss_end), EntryFlags.readWrite); - try root.identityMapRange(@intFromPtr(stack_start), @intFromPtr(stack_end), EntryFlags.readWrite); - try root.identityMapRange(@intFromPtr(stvec_stack_start), @intFromPtr(stvec_stack_end), EntryFlags.readWrite); - try root.identityMapRange(@intFromPtr(heap_start), @intFromPtr(heap_end), EntryFlags.readWrite); - } - - pub fn mapDevice(root: *Table, reg: *hwinfo.Reg) !void { - const physical_start = reg.addr & ~(page_size - 1); - const physical_end = (reg.addr + reg.len - 1) & ~(page_size - 1); - - reg.addr = next_mmio_vaddr | (reg.addr & (page_size - 1)); - - var paddr = physical_start; - while (paddr <= physical_end) : (paddr += page_size) { - try root.map(next_mmio_vaddr, paddr, EntryFlags.readWrite, 0); - next_mmio_vaddr += page_size; - } - } -}; - -pub fn init() !void { - num_pages = heapSize() / page_size; - const pages: [*]Page = @ptrCast(heap_start); - - for (0..num_pages) |i| { - pages[i].flags = Page.Flags.clear; - } - - // Start allocating beyond page descriptors. - const descriptors_end = @intFromPtr(heap_start) + num_pages * @sizeOf(Page); - alloc_start = std.mem.alignForward(usize, descriptors_end, page_size); - - kmem = @ptrCast(try zeroedAlloc(1)); - try kmem.mapKernel(); -} - -// Allocate memory pages. Passing n <= 0 results in an error. -pub fn alloc(n: usize) ![]align(page_size) u8 { - if (n <= 0) return Error.ZeroSize; - - const descriptors: [*]Page = @ptrCast(heap_start); - - // Iterate over potential starting points. - // The subtraction of n prevents unnecessary iterations for starting points - // that don't leave enough space for the whole allocation. - for (0..num_pages - n) |i| { - if (!@bitCast(descriptors[i].flags.active)) { - // Free starting page found. - - var insufficient = false; - - // Check if there is enough contiguous free space for the whole allocation. - // If not, move on to the next potential starting point. - for (i..n + i) |j| { - if (@bitCast(descriptors[j].flags.active)) { - insufficient = true; - break; - } - } - - if (!insufficient) { - // Mark all allocated pages as taken. - for (i..n + i - 1) |j| { - try descriptors[j].take(false); - } - try descriptors[n + i - 1].take(true); - - // Construct a pointer to the first page using its descriptor number. - const first = alloc_start + i * page_size; - const allocation: [*]align(page_size) u8 = @ptrFromInt(first); - return allocation[0 .. n * page_size]; - } - } - } - - return Error.OutOfMemory; -} - -// Free (contiguous) memory page(s). -pub fn free(memory: anytype) void { - const Slice = @typeInfo(@TypeOf(memory)).Pointer; - const bytes = std.mem.asBytes(memory); - const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0; - if (bytes_len == 0) return; - - // Restore the address to the page descriptor flags from the address of its contents - // by restoring the descriptor number and indexing the descriptor table - // at the start of the heap using it. - const descriptor_offset = (@intFromPtr(bytes.ptr) - alloc_start) / page_size; - const addr = @intFromPtr(heap_start) + descriptor_offset; - - var page: [*]Page = @ptrFromInt(addr); - - // Mark all but the last page as free. - // A double-free check is performed on the last page before it is freed. - while (@bitCast(page[0].flags.active) and !@bitCast(page[0].flags.last)) : (page += 1) { - page[0].flags = Page.Flags.clear; - } - - // Mark the last page as free. - page[0].flags = Page.Flags.clear; -} - -// Allocate memory pages and overwrite their contents with zeroes for added security. -// Passing n <= 0 results in an error. -pub fn zeroedAlloc(n: usize) Error![]align(page_size) u8 { - const ret = try alloc(n); - - const satp = riscv.satp.read(); - if (satp.mode != .bare) { - const page_table: *Table = @ptrFromInt(satp.ppn << 12); - const start = @intFromPtr(ret.ptr); - const end = start + ret.len; - try page_table.identityMapRange(start, end, EntryFlags.readWrite); - } - - // Write zeroes in batches of 64-bit to reduce the amount of store instructions. - // The remainder / remaining bytes don't need to be accounted for - // because page_size (4096) is divisible by 8. - - const len = (n * page_size) / 8; - const ptr: []volatile u64 = @as([*]volatile u64, @ptrCast(ret))[0..len]; - - for (0..len) |i| { - ptr[i] = 0; - } - - return ret; -} - -pub fn setUserMemoryAccess(enable: bool) void { - var sstatus = riscv.sstatus.read(); - sstatus.supervisor_user_memory_access = @bitCast(enable); - riscv.sstatus.write(sstatus); -} diff --git a/src/lib/plic.zig b/src/lib/plic.zig deleted file mode 100644 index 7e54992..0000000 --- a/src/lib/plic.zig +++ /dev/null @@ -1,96 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const hwinfo = @import("hwinfo.zig"); - -pub var default: Plic = undefined; - -pub const Error = error{ - InterruptOutOfRange, - ContextOutOfRange, -}; - -pub const Context = packed struct { - priority_threshold: u32, - claim_or_complete: u32, -}; - -pub const Plic = struct { - mmio_register: hwinfo.Reg, - - const priority_offset = 0x0; - const enable_offset = 0x2000; - const context_offset_zero = 0x200000; - const context_offset_nonzero = 0x201000; - - pub const num_contexts = 15872; - - // A value greater than or equal to num_contexts for context is an error. - // A value of 0 for interrupt results in an error. - pub fn setEnabled(self: Plic, context: u14, interrupt: u10, enable: bool) !void { - if (context >= num_contexts) return Error.ContextOutOfRange; - if (interrupt == 0) return Error.InterruptOutOfRange; - - const mmio_slice = self.mmioSlice(); - const enable_ptr: *volatile [num_contexts][32]u32 = @alignCast(@ptrCast(&mmio_slice[enable_offset])); - - const register = interrupt / 32; - const bit = @as(u32, 1) << @intCast(interrupt & 0x1f); - - if (enable) { - enable_ptr[context][register] |= bit; - } else { - enable_ptr[context][register] &= ~bit; - } - } - - // A value of 0 for interrupt results in an error. - pub fn setPriority(self: Plic, interrupt: u10, priority: u3) !void { - if (interrupt == 0) return Error.InterruptOutOfRange; - - const mmio_slice = self.mmioSlice(); - const priority_ptr: *volatile [1024]u32 = @alignCast(@ptrCast(&mmio_slice[priority_offset])); - - priority_ptr[interrupt] = @intCast(priority); - } - - // A value greater than or equal to num_contexts for context is an error. - pub fn setPriorityThreshold(self: Plic, context: u14, threshold: u3) !void { - const context_ptr = try self.contextPtr(context); - context_ptr.priority_threshold = threshold; - } - - // A value greater than or equal to num_contexts for context is an error. - // Non-null interrupts are guaranteed to be non-zero. - pub fn claim(self: Plic, context: u14) !?u10 { - const context_ptr = try self.contextPtr(context); - const interrupt = context_ptr.claim_or_complete; - - if (interrupt != 0) return @intCast(interrupt) else return null; - } - - // A value greater than or equal to num_contexts for context is an error. - // A value of 0 for interrupt results in an error. - pub fn complete(self: Plic, context: u14, interrupt: u10) !void { - if (interrupt == 0) return Error.InterruptOutOfRange; - - const context_ptr = try self.contextPtr(context); - context_ptr.claim_or_complete = interrupt; - } - - fn contextPtr(self: Plic, context: u14) !*volatile Context { - if (context >= num_contexts) return Error.ContextOutOfRange; - - const mmio_slice = self.mmio_register.slice(u8); - - if (context == 0) { - return @alignCast(@ptrCast(&mmio_slice[context_offset_zero])); - } else { - const context_offset: usize = context - 1; - const ptr_offset = context_offset * (@sizeOf(u32) + @sizeOf(Context)); - const context_ptr = &mmio_slice[context_offset_nonzero + ptr_offset]; - return @alignCast(@ptrCast(context_ptr)); - } - } -}; diff --git a/src/lib/process.zig b/src/lib/process.zig deleted file mode 100644 index b5afade..0000000 --- a/src/lib/process.zig +++ /dev/null @@ -1,380 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const builtin = @import("builtin"); -const std = @import("std"); -const TrapFrame = @import("TrapFrame.zig"); -const paging = @import("paging.zig"); -const riscv = @import("riscv.zig"); -const time = @import("sbi/time.zig"); -const Allocator = std.mem.Allocator; -const elf = std.elf; - -pub const schedule_interval_millis = 1; - -pub var list = std.mem.zeroInit(std.DoublyLinkedList(Info), .{}); -var next_pid: u16 = 1; - -const num_stack_pages = 2; - -pub const Error = error{ - EmptySchedule, - NoInit, - TooManyThreads, -}; - -pub const ExeError = error{ - TooSmall, - BadEndian, - BadArch, - BadBitLen, - NotStaticExe, - LengthOutOfBounds, - ReservedMemMapping, - BranchPerms, - WritableCode, -}; - -pub const State = enum(u8) { - waiting, - active, - sleeping, - terminated, -}; - -pub const Info = struct { - allocator: Allocator, - id: u16, - thread_id: usize, - trap_frame: TrapFrame, - sections: std.ArrayList([]align(paging.page_size) u8), - stack: []align(paging.page_size) u8, - pc: usize, - page_table: *paging.Table, - state: State, - - pub fn satp(self: *const Info) paging.Satp { - return self.page_table.satp(self.id); - } - - pub fn createThread(self: *const Info, allocator: ?Allocator, entry: usize) !*Info { - const alloc = allocator orelse self.allocator; - - var trap_frame = std.mem.zeroInit(TrapFrame, .{}); - - const stack = try paging.zeroedAlloc(num_stack_pages); - errdefer paging.free(stack); - - const stack_top = @intFromPtr(stack.ptr) + num_stack_pages * paging.page_size; - try self.page_table.identityMapRange(@intFromPtr(stack.ptr), stack_top, paging.EntryFlags.userReadWrite); - - trap_frame.general_purpose_registers[2] = stack_top; - - const thread_id = std.math.add(usize, self.thread_id, 1) catch { - return Error.TooManyThreads; - }; - - const proc = Info{ - .allocator = alloc, - .id = self.id, - .thread_id = thread_id, - .trap_frame = trap_frame, - .pages = self.pages, - .stack = stack, - .pc = entry, - .cleanup_hook = null, - .page_table = self.page_table, - .state = .waiting, - }; - - const proc_node = try alloc.create(std.DoublyLinkedList(Info).Node); - proc_node.data = proc; - list.prepend(proc_node); - - return &proc_node.data; - } - - pub fn terminate( - self: *Info, - ) void { - riscv.satp.write(paging.kmem.satp(0)); - - var node = list.first; - while (node) |proc_node| : (node = proc_node.next) { - if (self.shouldTerminate(&proc_node.data)) { - if (proc_node.data.thread_id != self.thread_id) { - proc_node.data.terminate(); - } - - list.remove(proc_node); - self.allocator.destroy(proc_node); - } - } - - paging.free(self.stack); - - if (self.thread_id == 0) { - self.page_table.unmap(); - paging.free(self.page_table); - self.freeSections(); - } - } - - fn freeSections(self: *Info) void { - defer self.sections.deinit(); - for (self.sections.items) |section| { - paging.free(section); - } - } - - pub fn allowResume(self: *Info) void { - self.pc += 4; // Skip ecall instruction - self.state = .waiting; - } - - pub fn shouldTerminate(self: *const Info, candidate: *const Info) bool { - return candidate.id == self.id and self.shouldTerminateThread(candidate); - } - - fn shouldTerminateThread(self: *const Info, candidate: *const Info) bool { - return candidate.thread_id == self.thread_id or self.thread_id == 0; - } -}; - -pub fn next() ?*Info { - if (list.popFirst()) |info| { - list.append(info); - - if (info.data.state != .waiting) return next(); - return &info.data; - } - - return null; -} - -pub fn schedule() !noreturn { - if (next()) |proc| { - try time.interruptInMillis(schedule_interval_millis); - switchTo(proc); - } - - return Error.EmptySchedule; -} - -pub fn switchTo(proc: *Info) noreturn { - proc.state = .active; - - var sstatus = riscv.sstatus.read(); - sstatus.previous_privilege = .user; - sstatus.user_interrupt_enable = 0; - sstatus.supervisor_interrupt_enable = 0; - sstatus.user_prior_interrupt_enable = 1; - sstatus.supervisor_prior_interrupt_enable = 1; - riscv.sstatus.write(sstatus); - - riscv.sscratch.write(@intFromPtr(&proc.trap_frame)); - riscv.sepc.write(proc.pc); - riscv.satp.write(proc.satp()); - - // Probably not always needed. Let's not take the risk for now. - asm volatile ( - \\ sfence.vma - ); - - asm volatile ( - \\ csrr t6, sscratch - \\ - \\ ld x1, 8(t6) - \\ ld x2, 16(t6) - \\ ld x3, 24(t6) - \\ ld x4, 32(t6) - \\ ld x5, 40(t6) - \\ ld x6, 48(t6) - \\ ld x7, 56(t6) - \\ ld x8, 64(t6) - \\ ld x9, 72(t6) - \\ ld x10, 80(t6) - \\ ld x11, 88(t6) - \\ ld x12, 96(t6) - \\ ld x13, 104(t6) - \\ ld x14, 112(t6) - \\ ld x15, 120(t6) - \\ ld x16, 128(t6) - \\ ld x17, 136(t6) - \\ ld x18, 144(t6) - \\ ld x19, 152(t6) - \\ ld x20, 160(t6) - \\ ld x21, 168(t6) - \\ ld x22, 176(t6) - \\ ld x23, 184(t6) - \\ ld x24, 192(t6) - \\ ld x25, 200(t6) - \\ ld x26, 208(t6) - \\ ld x27, 216(t6) - \\ ld x28, 224(t6) - \\ ld x29, 232(t6) - \\ ld x30, 240(t6) - \\ ld x31, 248(t6) - \\ - \\ sret - ); - - unreachable; -} - -const HdrBuf = *align(@alignOf(elf.Elf64_Ehdr)) const [@sizeOf(elf.Elf64_Ehdr)]u8; - -pub fn create(allocator: Allocator, elf_buf: []align(@alignOf(elf.Elf64_Ehdr)) const u8) !*Info { - if (elf_buf.len < @sizeOf(elf.Elf64_Ehdr)) return ExeError.TooSmall; - - const hdr_buf: HdrBuf = elf_buf[0..@sizeOf(elf.Elf64_Ehdr)]; - const hdr = try elf.Header.parse(@ptrCast(hdr_buf)); - - try validateElfHeader(hdr, hdr_buf); - - const procmem: *paging.Table = @ptrCast(try paging.zeroedAlloc(1)); - errdefer paging.free(procmem); - - try procmem.mapKernel(); - - const parse_source = std.io.fixedBufferStream(elf_buf); - - var sections = std.ArrayList([]align(paging.page_size) u8).init(allocator); - - var it = hdr.program_header_iterator(parse_source); - while (try it.next()) |phdr| { - if (phdr.p_type != elf.PT_LOAD) continue; - if (phdr.p_filesz == 0 or phdr.p_memsz == 0) continue; - if (phdr.p_offset + phdr.p_filesz >= elf_buf.len) { - return ExeError.LengthOutOfBounds; - } - - const offset = paging.offsetOf(phdr.p_vaddr); - const memsz_aligned = std.mem.alignForward(usize, offset + phdr.p_memsz, paging.page_size); - const num_pages = @divExact(memsz_aligned, paging.page_size); - - const pages = try paging.zeroedAlloc(num_pages); - errdefer paging.free(pages); - - try sections.append(pages); - - const sz = @min(phdr.p_filesz, phdr.p_memsz); - @memcpy(pages[offset .. offset + sz], elf_buf[phdr.p_offset .. phdr.p_offset + sz]); - - for (0..num_pages) |page| { - const vaddr = phdr.p_vaddr + page * paging.page_size; - const paddr = @intFromPtr(pages.ptr) + page * paging.page_size; - const flags = paging.EntryFlags{ - .valid = 1, - .read = @bitCast(phdr.p_flags & elf.PF_R != 0), - .write = @bitCast(phdr.p_flags & elf.PF_W != 0), - .exec = @bitCast(phdr.p_flags & elf.PF_X != 0), - .user = 1, - .global = 0, - .accessed = 1, - .dirty = @bitCast(phdr.p_flags & elf.PF_W != 0), - }; - - if (vaddr >= @intFromPtr(paging.text_start) and vaddr < paging.alloc_start) { - return ExeError.ReservedMemMapping; - } - if (!@bitCast(flags.read) and !@bitCast(flags.write) and !@bitCast(flags.exec)) { - return ExeError.BranchPerms; - } - if (@bitCast(flags.write) and @bitCast(flags.exec)) { - return ExeError.WritableCode; - } - - try procmem.map(vaddr, paddr, flags, 0); - } - } - - const stack = try paging.zeroedAlloc(num_stack_pages); - errdefer paging.free(stack); - - const stack_top = @intFromPtr(stack.ptr) + num_stack_pages * paging.page_size; - try procmem.identityMapRange(@intFromPtr(stack.ptr), stack_top, paging.EntryFlags.userReadWrite); - - var proc = Info{ - .allocator = allocator, - .id = next_pid, - .thread_id = 0, - .trap_frame = std.mem.zeroInit(TrapFrame, .{}), - .sections = sections, - .stack = @ptrCast(stack), - .pc = hdr.entry, - .page_table = procmem, - .state = .waiting, - }; - proc.trap_frame.general_purpose_registers[2] = stack_top; - - next_pid += 1; - - const proc_node = try allocator.create(std.DoublyLinkedList(Info).Node); - proc_node.data = proc; - list.prepend(proc_node); - - return &proc_node.data; -} - -pub fn run(allocator: Allocator, bytes: []align(@alignOf(elf.Elf64_Ehdr)) const u8) !noreturn { - const proc = try create(allocator, @alignCast(bytes[0..])); - - try time.interruptInMillis(schedule_interval_millis); - switchTo(proc); -} - -fn validateElfHeader(hdr: elf.Header, hdr_buf: *align(@alignOf(elf.Elf64_Ehdr)) const [@sizeOf(elf.Elf64_Ehdr)]u8) !void { - const arch = builtin.cpu.arch; - - if (hdr.endian != arch.endian()) return ExeError.BadEndian; - if (hdr.machine != arch.toElfMachine()) return ExeError.BadArch; - if (!hdr.is_64) return ExeError.BadBitLen; - - const hdr64 = @as(*const elf.Elf64_Ehdr, @ptrCast(hdr_buf)); - - if (hdr64.e_type != .EXEC) return ExeError.NotStaticExe; -} - -fn usizeFromArg(arg: anytype) usize { - return switch (@typeInfo(@TypeOf(arg))) { - .Pointer => |ptr| switch (ptr.size) { - .Slice => @intFromPtr(arg.ptr), - else => @intFromPtr(arg), - }, - else => arg, - }; -} - -pub fn findThread(pid: u16, thread_id: usize) ?*Info { - var node = list.first; - while (node) |proc_node| : (node = proc_node.next) { - if (proc_node.data.id == pid and proc_node.data.thread_id == thread_id) { - return &proc_node.data; - } - } - - return null; -} - -pub fn mainThread(pid: u16) ?*Info { - return findThread(pid, 0); -} - -pub fn latestThread(pid: u16) ?*Info { - var latest: ?*Info = null; - - var node = list.first; - while (node) |proc_node| : (node = proc_node.next) { - if (proc_node.data.id == pid) { - if (latest) |proc| { - if (proc_node.data.thread_id > proc.thread_id) { - latest = &proc_node.data; - } - } else latest = &proc_node.data; - } - } - - return latest; -} diff --git a/src/lib/riscv.zig b/src/lib/riscv.zig deleted file mode 100644 index e68b3fb..0000000 --- a/src/lib/riscv.zig +++ /dev/null @@ -1,112 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const interrupts = @import("interrupts.zig"); -const paging = @import("paging.zig"); - -pub const Privilege = enum(u1) { - user, - supervisor, -}; - -pub const ExtensionState = enum(u2) { - off, - initial, - clean, - dirty, -}; - -pub const Xlen = enum(u2) { - rv32 = 1, - rv64, - rv128, -}; - -pub const Sstatus = packed struct(usize) { - user_interrupt_enable: u1, - supervisor_interrupt_enable: u1, - reserved0: u2, - user_prior_interrupt_enable: u1, - supervisor_prior_interrupt_enable: u1, - reserved1: u2, - previous_privilege: Privilege, - reserved2: u4, - floating_point_state: ExtensionState, - user_extension_state: ExtensionState, - reserved3: u1, - supervisor_user_memory_access: u1, - make_executable_readable: u1, - reserved4: u12, - user_xlen: Xlen, - reserved5: u29, - need_state_saving: u1, // Read-only. -}; - -pub const SbiRet = struct { - err: isize, - val: isize, -}; - -pub fn ecall(ext_id: usize, fn_id: usize, a0: usize, a1: usize, a2: usize) SbiRet { - var ret = SbiRet{ .err = 0, .val = 0 }; - - asm volatile ( - \\ ecall - \\ sw a0, 0(%[err]) - \\ sw a1, 0(%[val]) - : - : [err] "r" (&ret.err), - [val] "r" (&ret.val), - [eid] "{a7}" (ext_id), - [fid] "{a6}" (fn_id), - [a0] "{a0}" (a0), - [a1] "{a1}" (a1), - [a2] "{a2}" (a2), - ); - - return ret; -} - -pub fn stackPointer() usize { - return asm volatile ("" - : [value] "={sp}" (-> usize), - ); -} - -pub const satp = Csr(paging.Satp, "satp"); -pub const sstatus = Csr(Sstatus, "sstatus"); -pub const sie = Csr(interrupts.Enable, "sie"); -pub const sip = Csr(interrupts.Enable, "sip"); -pub const sscratch = Csr(usize, "sscratch"); -pub const sepc = Csr(usize, "sepc"); -pub const stval = Csr(usize, "stval"); -pub const time = Csr(usize, "time"); - -pub fn Csr(comptime T: type, csr: []const u8) type { - if (csr.len > 8) @compileError("CSR name length exceeds 8 characters"); - - return struct { - pub inline fn read() T { - comptime var buf = [_]u8{0} ** 23; - - const bits = asm volatile (std.fmt.bufPrint(buf[0..], "csrr %[bits], {s}", .{csr}) catch unreachable - : [bits] "=r" (-> usize), - ); - - return @bitCast(bits); - } - - pub inline fn write(value: T) void { - const bits: usize = @bitCast(value); - - comptime var buf = [_]u8{0} ** 23; - - asm volatile (std.fmt.bufPrint(buf[0..], "csrw {s}, %[bits]", .{csr}) catch unreachable - : - : [bits] "r" (bits), - ); - } - }; -} diff --git a/src/lib/sbi.zig b/src/lib/sbi.zig deleted file mode 100644 index fc8afde..0000000 --- a/src/lib/sbi.zig +++ /dev/null @@ -1,127 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const riscv = @import("riscv.zig"); - -pub const Error = error{ - Success, - Failed, - NotSupported, - InvalidParam, - Denied, - InvalidAddr, - AlreadyAvail, - AlreadyStarted, - AlreadyStopped, - NoSharedMem, - InvalidState, - BadRange, - SbiUnknown, -}; - -pub fn errorFromCode(code: isize) Error { - return switch (code) { - 0 => Error.Success, - -1 => Error.Failed, - -2 => Error.NotSupported, - -3 => Error.InvalidParam, - -4 => Error.Denied, - -5 => Error.InvalidAddr, - -6 => Error.AlreadyAvail, - -7 => Error.AlreadyStarted, - -8 => Error.AlreadyStopped, - -9 => Error.NoSharedMem, - -10 => Error.InvalidState, - -11 => Error.BadRange, - else => Error.SbiUnknown, - }; -} - -const BaseExtId: usize = 0x10; - -const BaseFnId = enum(usize) { - GetSpecVer, - GetImpId, - GetImpVer, - ProbeExt, - GetMVendorId, - GetMArchId, - GetMImpId, -}; - -pub const ImpId = enum(isize) { - Bbl, - OpenSbi, - Xvisor, - Kvm, - RustSbi, - Diosix, - Coffer, - Xen, - PolarFire, - _, -}; - -pub fn specVer() !isize { - const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetSpecVer), 0, 0, 0); - if (ret.err != 0) { - return errorFromCode(ret.err); - } - - return ret.val; -} - -pub fn impId() !ImpId { - const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetImpId), 0, 0, 0); - if (ret.err != 0) { - return errorFromCode(ret.err); - } - - return @enumFromInt(ret.val); -} - -pub fn impVer() !isize { - const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetImpVer), 0, 0, 0); - if (ret.err != 0) { - return errorFromCode(ret.err); - } - - return ret.val; -} - -pub fn probeExt(ext_id: usize) !bool { - const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.ProbeExt), ext_id, 0, 0); - if (ret.err != 0) { - return errorFromCode(ret.err); - } - - return ret.val != 0; -} - -pub fn mVendorId() !isize { - const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMVendorId), 0, 0, 0); - if (ret.err != 0) { - return errorFromCode(ret.err); - } - - return ret.val; -} - -pub fn mArchId() !isize { - const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMarchId), 0, 0, 0); - if (ret.err != 0) { - return errorFromCode(ret.err); - } - - return ret.val; -} - -pub fn mImpId() !isize { - const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMImpId), 0, 0, 0); - if (ret.err != 0) { - return errorFromCode(ret.err); - } - - return ret.val; -} diff --git a/src/lib/sbi/debug_console.zig b/src/lib/sbi/debug_console.zig deleted file mode 100644 index 2bcc097..0000000 --- a/src/lib/sbi/debug_console.zig +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const riscv = @import("../riscv.zig"); -const sbi = @import("../sbi.zig"); - -const ExtId: usize = 0x4442434E; - -const FnId = enum(usize) { - Write, - Read, - WriteByte, -}; - -pub const Writer = std.io.Writer(void, sbi.Error, write); - -fn write(_: void, bytes: []const u8) !usize { - const ret = riscv.ecall(ExtId, @intFromEnum(FnId.Write), bytes.len, @intFromPtr(bytes.ptr), 0); - if (ret.err != 0) { - return sbi.errorFromCode(ret.err); - } - - return @intCast(ret.val); -} - -pub fn writer() !Writer { - if (!try sbi.probeExt(ExtId)) { - return sbi.Error.NotSupported; - } - - return .{ .context = {} }; -} diff --git a/src/lib/sbi/legacy.zig b/src/lib/sbi/legacy.zig deleted file mode 100644 index 858899c..0000000 --- a/src/lib/sbi/legacy.zig +++ /dev/null @@ -1,40 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const riscv = @import("../riscv.zig"); -const sbi = @import("../sbi.zig"); - -const ExtId = enum(usize) { - SetTimer, - ConsolePutchar, - ConsoleGetchar, - ClearIpi, - SendIpi, - RemoteFenceI, - RemoteSFenceVma, - RemoteSFenceVmaAsid, - Shutdown, -}; - -pub const Writer = std.io.Writer(void, sbi.Error, write); - -fn write(_: void, bytes: []const u8) !usize { - for (bytes) |byte| { - const ret = riscv.ecall(@intFromEnum(ExtId.ConsolePutchar), 0, byte, 0, 0); - if (ret.err != 0) { - return sbi.errorFromCode(ret.err); - } - } - - return bytes.len; -} - -pub fn writer() !Writer { - if (!try sbi.probeExt(@intFromEnum(ExtId.ConsolePutchar))) { - return sbi.Error.NotSupported; - } - - return .{ .context = {} }; -} diff --git a/src/lib/sbi/sys_reset.zig b/src/lib/sbi/sys_reset.zig deleted file mode 100644 index 9a3d62b..0000000 --- a/src/lib/sbi/sys_reset.zig +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const riscv = @import("../riscv.zig"); -const sbi = @import("../sbi.zig"); - -const ExtId: usize = 0x53525354; - -const FnId = enum(usize) { - Reset, -}; - -pub const Type = enum(u32) { - Shutdown, - ColdReboot, - WarmReboot, -}; - -pub const Reason = enum(u32) { - None, - SysErr, -}; - -pub fn reset(@"type": Type, reason: Reason) !void { - if (!try sbi.probeExt(ExtId)) { - return sbi.Error.NotSupported; - } - - const type_id = @intFromEnum(@"type"); - const reason_id = @intFromEnum(reason); - - const ret = riscv.ecall(ExtId, @intFromEnum(FnId.Reset), type_id, reason_id, 0); - if (ret.err != 0) { - return sbierr.errorFromCode(ret.err); - } -} diff --git a/src/lib/sbi/time.zig b/src/lib/sbi/time.zig deleted file mode 100644 index eed5324..0000000 --- a/src/lib/sbi/time.zig +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const hwinfo = @import("../hwinfo.zig"); -const riscv = @import("../riscv.zig"); -const sbi = @import("../sbi.zig"); - -const ExtId: usize = 0x54494d45; - -const FnId = enum(usize) { - SetTimer, -}; - -pub const Error = error{ - NoCpusHwInfo, -}; - -pub fn setTimer(stime_absolute: u64) !void { - if (!try sbi.probeExt(ExtId)) return sbi.Error.NotSupported; - - const ret = riscv.ecall(ExtId, @intFromEnum(FnId.SetTimer), stime_absolute, 0, 0); - if (ret.err != 0) return sbi.errorFromCode(ret.err); -} - -pub fn interruptInMillis(millis: u64) !void { - var cpus = try hwinfo.byKind(.cpus); - const frequency = try cpus.next() orelse return error.NoCpusHwInfo; - const cycles = frequency.value / 1000 * millis; - - const time = riscv.time.read(); - try setTimer(time + cycles); -} diff --git a/src/lib/syscall.zig b/src/lib/syscall.zig deleted file mode 100644 index 6b94e16..0000000 --- a/src/lib/syscall.zig +++ /dev/null @@ -1,200 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Himbeer -// -// SPDX-License-Identifier: AGPL-3.0-or-later - -const std = @import("std"); -const Console = @import("Console.zig"); -const TrapFrame = @import("TrapFrame.zig"); -const channel = @import("channel.zig"); -const hwinfo = @import("hwinfo.zig"); -const mem = @import("mem.zig"); -const paging = @import("paging.zig"); -const process = @import("process.zig"); -const riscv = @import("riscv.zig"); - -pub const Error = error{ - ZeroAddressSupplied, -}; - -pub const HandleError = error{ - UnknownSyscall, -}; - -pub fn handler(proc: *process.Info, trap_frame: *TrapFrame) !void { - switch (trap_frame.general_purpose_registers[17]) { - 100000 => trap_frame.setReturnValue(errorName(trap_frame)), - 100001 => trap_frame.setReturnValue(consoleWrite(trap_frame)), - 100002 => trap_frame.setReturnValue(launch(trap_frame)), - 100003 => trap_frame.setReturnValue(end(proc)), - 100004 => trap_frame.setReturnValue(terminate(proc, trap_frame)), - 100005 => trap_frame.setReturnValue(processId(proc)), - 100006 => trap_frame.setReturnValue(threadId(proc)), - 100008 => trap_frame.setReturnValue(devicesByKind(trap_frame)), - 100009 => trap_frame.setReturnValue(join(proc, trap_frame)), - 100010 => trap_frame.setReturnValue(leave(proc, trap_frame)), - 100011 => trap_frame.setReturnValue(pass(trap_frame)), - 100012 => trap_frame.setReturnValue(receive(proc, trap_frame)), - else => return HandleError.UnknownSyscall, - } -} - -pub const ErrorNameError = error{ErrorCodeOutOfRange}; - -// errorName(code: u16, buffer: [*]u8, len: usize) !usize -fn errorName(trap_frame: *const TrapFrame) !usize { - const code_wide = trap_frame.general_purpose_registers[10]; - const buffer_opt: ?[*]u8 = @ptrFromInt(trap_frame.general_purpose_registers[11]); - const buffer_ptr = buffer_opt orelse return Error.ZeroAddressSupplied; - const len = trap_frame.general_purpose_registers[12]; - - const code = std.math.cast(u16, code_wide) orelse { - return ErrorNameError.ErrorCodeOutOfRange; - }; - const buffer = buffer_ptr[0..len]; - - if (code == 0) return 0; - - const error_name = @errorName(@errorFromInt(code)); - - const n = @min(buffer.len, error_name.len); - - paging.setUserMemoryAccess(true); - defer paging.setUserMemoryAccess(false); - - @memcpy(buffer[0..n], error_name[0..n]); - return n; -} - -// consoleWrite(bytes: [*]const u8, len: usize) !usize -fn consoleWrite(trap_frame: *const TrapFrame) !usize { - const vaddr = trap_frame.general_purpose_registers[10]; - const len = trap_frame.general_purpose_registers[11]; - - const procmem: *paging.Table = @ptrFromInt(riscv.satp.read().ppn << 12); - - const flags = paging.EntryFlags.userReadOnly; - const paddr = procmem.translate(vaddr, flags) orelse { - const faulter: *volatile u8 = @ptrFromInt(vaddr); - _ = faulter.*; - unreachable; - }; - - const bytes_ptr: [*]const u8 = @ptrFromInt(paddr); - const bytes = bytes_ptr[0..len]; - - const w = Console.autoChoose().?.writer(); - return w.write(bytes); -} - -// launch(bytes: [*]align(@alignOf(std.elf.Elf64_Ehdr)) const u8, len: usize) !usize -fn launch(trap_frame: *const TrapFrame) !usize { - const alignment = @alignOf(std.elf.Elf64_Ehdr); - const bytes_addr = trap_frame.general_purpose_registers[10]; - const bytes_opt: ?[*]const u8 = @ptrFromInt(bytes_addr); - const bytes_noalign = bytes_opt orelse return Error.ZeroAddressSupplied; - const bytes_ptr = try std.math.alignCast(alignment, bytes_noalign); - const len = trap_frame.general_purpose_registers[11]; - - const bytes = bytes_ptr[0..len]; - - paging.setUserMemoryAccess(true); - defer paging.setUserMemoryAccess(false); - - const new_proc = try process.create(mem.page_allocator, bytes); - return new_proc.id; -} - -// end() noreturn -fn end(proc: *process.Info) noreturn { - proc.terminate(); - process.schedule() catch |err| { - std.debug.panic("schedule error: {}", .{err}); - }; -} - -pub const TerminateError = error{ - PidOutOfRange, - ProcessNotFound, -}; - -// terminate(pid: u16, tid: usize) !void -fn terminate(proc: *const process.Info, trap_frame: *const TrapFrame) !void { - const pid_wide = trap_frame.general_purpose_registers[10]; - const pid = std.math.cast(u16, pid_wide) orelse { - return TerminateError.PidOutOfRange; - }; - const tid = trap_frame.general_purpose_registers[11]; - - const target = process.findThread(pid, tid) orelse { - return TerminateError.ProcessNotFound; - }; - target.terminate(); - - if (target.shouldTerminate(proc)) { - process.schedule() catch |err| { - std.debug.panic("schedule error: {}", .{err}); - }; - } -} - -// processId() u16 -fn processId(proc: *const process.Info) usize { - return proc.id; -} - -// threadId() usize -fn threadId(proc: *const process.Info) usize { - return proc.thread_id; -} - -// devicesByKind(kind: hwinfo.DevKind, devices: [*]hwinfo.Dev, len: usize) !usize -fn devicesByKind(trap_frame: *const TrapFrame) !usize { - const kind: hwinfo.DevKind = @enumFromInt(trap_frame.general_purpose_registers[10]); - const devices: [*]hwinfo.Dev = @ptrFromInt(trap_frame.general_purpose_registers[11]); - const len = trap_frame.general_purpose_registers[12]; - - var i: usize = 0; - var devs = try hwinfo.byKind(kind); - while (try devs.next()) |dev| { - if (i >= len) break; - - devices[i] = dev; - i += 1; - } - - return i; -} - -// join(channel_id: usize) !void -fn join(proc: *const process.Info, trap_frame: *const TrapFrame) !void { - const id = trap_frame.general_purpose_registers[10]; - return channel.join(proc.id, id); -} - -// leave(channel_id: usize) void -fn leave(proc: *const process.Info, trap_frame: *const TrapFrame) void { - const id = trap_frame.general_purpose_registers[10]; - channel.leave(proc.id, id); -} - -// pass(channel_id: usize, bytes: [*]const u8, len: usize) !void -fn pass(trap_frame: *const TrapFrame) !void { - const id = trap_frame.general_purpose_registers[10]; - const bytes_ptr: [*]const u8 = @ptrFromInt(trap_frame.general_purpose_registers[11]); - const len = trap_frame.general_purpose_registers[12]; - - const bytes = bytes_ptr[0..len]; - const copy = try channel.allocator().alloc(u8, bytes.len); - @memcpy(copy, bytes); - try channel.pass(id, copy); -} - -// receive(channel_id: usize, buffer: [*]u8, len: usize) !usize -fn receive(proc: *const process.Info, trap_frame: *const TrapFrame) !usize { - const id = trap_frame.general_purpose_registers[10]; - const buffer_ptr: [*]u8 = @ptrFromInt(trap_frame.general_purpose_registers[11]); - const len = trap_frame.general_purpose_registers[12]; - - const buffer = buffer_ptr[0..len]; - return channel.receive(proc.id, id, buffer); -} diff --git a/src/mem.zig b/src/mem.zig new file mode 100644 index 0000000..eefa452 --- /dev/null +++ b/src/mem.zig @@ -0,0 +1,261 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const paging = @import("paging.zig"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const maxInt = std.math.maxInt; +const mem = std.mem; + +const Chunk = struct { + flags: Flags, + len: usize, + + const Flags = packed struct(u8) { + active: u1, + reserved: u7, + }; + + pub fn next(self: *align(1) Chunk) *align(1) Chunk { + const byte_ptr: [*]u8 = @ptrCast(self); + return @ptrCast(byte_ptr + @sizeOf(Chunk) + self.len); + } + + pub fn take(self: *align(1) Chunk) void { + self.flags.active = 1; + } + + pub fn clear(self: *align(1) Chunk) void { + self.flags = mem.zeroInit(Flags, .{}); + } + + pub fn data(self: *align(1) Chunk) []u8 { + const byte_ptr: [*]u8 = @ptrCast(self); + return byte_ptr[@sizeOf(Chunk)..self.len]; + } +}; + +pub const ChunkAllocatorConfig = struct { + auto_merge_free: bool = true, +}; + +pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type { + return struct { + head: ?*align(1) Chunk, + pages: usize, + + const Self = @This(); + + pub fn init(pages: usize) !Self { + const head: *align(1) Chunk = @ptrCast(try paging.zeroedAlloc(pages)); + head.len = (pages * paging.page_size) - @sizeOf(Chunk); + return .{ .head = head, .pages = pages }; + } + + pub fn deinit(self: *Self) void { + if (self.head) |head| { + paging.free(head); + self.head = null; + } + } + + pub fn allocator(self: *Self) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .free = free, + }, + }; + } + + pub fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 { + _ = ret_addr; + + const self: *Self = @ptrCast(@alignCast(ctx)); + + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); + + var chunk = self.head orelse return null; + const bound = @intFromPtr(chunk) + (self.pages * paging.page_size); + + var predecessor: ?*align(1) Chunk = null; + while (@intFromPtr(chunk) < bound) : (chunk = chunk.next()) { + const adjust_off = mem.alignPointerOffset(chunk.data().ptr, ptr_align) orelse return null; + const aligned_len = len + adjust_off; + + // Is this chunk free and large enough to hold the requested allocation? + if (!@bitCast(chunk.flags.active) and chunk.len >= aligned_len) { + const remaining = chunk.len - aligned_len; + + if (predecessor) |*pred| { + pred.*.len += adjust_off; + } else if (adjust_off != 0) return null; + + chunk = @ptrFromInt(@intFromPtr(chunk) + adjust_off); + chunk.clear(); + chunk.take(); + + if (remaining > @sizeOf(Chunk)) { + chunk.len = len; + + const new_successor = chunk.next(); + + new_successor.clear(); + new_successor.len = remaining - @sizeOf(Chunk); + } + + return chunk.data().ptr; + } + + predecessor = chunk; + } + + return null; + } + + // Only expands into the next free chunk (if there is one). + // You may want to call mergeFree first if auto_merge_free was configured to false. + pub fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { + _ = ret_addr; + + const self: *Self = @ptrCast(@alignCast(ctx)); + + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); + + const head = self.head orelse return false; + const bound = @intFromPtr(head) + (self.pages * paging.page_size); + + const chunk = @as(*align(1) Chunk, @ptrCast(buf.ptr - @sizeOf(Chunk))); + + const adjust_off = mem.alignPointerOffset(buf.ptr, ptr_align) orelse return false; + const aligned_new_len = new_len + adjust_off; + + if (aligned_new_len < chunk.len) { + const regained = chunk.len - aligned_new_len; + if (regained > @sizeOf(Chunk)) { + chunk.len = aligned_new_len; + + const new_successor = chunk.next(); + + new_successor.clear(); + new_successor.len = regained - @sizeOf(Chunk); + } + + return true; + } else if (aligned_new_len > chunk.len) { + const successor = chunk.next(); + if (@intFromPtr(successor) >= bound) return false; + + const total_len = chunk.len + @sizeOf(Chunk) + successor.len; + + if (!@bitCast(successor.flags.active) and aligned_new_len <= total_len) { + const remaining = total_len - aligned_new_len; + + if (remaining > @sizeOf(Chunk)) { + chunk.len = aligned_new_len; + + const new_successor = chunk.next(); + + new_successor.clear(); + new_successor.len = remaining - @sizeOf(Chunk); + } else { + chunk.len = total_len; + } + + return true; + } + + return false; + } else return true; + } + + pub fn free(ctx: *anyopaque, old_mem: []u8, log2_old_align: u8, ret_addr: usize) void { + _ = log2_old_align; + _ = ret_addr; + + const self: *Self = @ptrCast(@alignCast(ctx)); + + // Safety check. Do not free memory in uninitialized / undefined pages. + if (self.head == null) return; + + const chunk = @as([*]Chunk, @ptrCast(@alignCast(old_mem.ptr))) - 1; + chunk[0].clear(); + + if (config.auto_merge_free) { + self.mergeFree(); + } + } + + pub fn mergeFree(self: *Self) void { + var chunk = self.head orelse return; + const bound = @intFromPtr(chunk) + (self.pages * paging.page_size); + + while (@intFromPtr(chunk) < bound) : (chunk = chunk.next()) { + const successor = chunk.next(); + + if (@intFromPtr(successor) >= bound) { + // Safety check. + // Should never run if the implementation is working correctly. + // + // Ensure that there is a successor within bounds. + // The loop condition is not sufficient here, it only detects + // non-erroneous list ends (i.e. chunk == bound). + break; + } else if (!@bitCast(chunk.flags.active) and !@bitCast(successor.flags.active)) { + chunk.len += @sizeOf(Chunk) + successor.len; + } + } + } + }; +} + +pub const PageAllocator = struct { + pub const vtable = Allocator.VTable{ + .alloc = alloc, + .resize = resize, + .free = free, + }; + + fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { + _ = ra; + _ = log2_align; + assert(n > 0); + if (n > maxInt(usize) - (paging.page_size - 1)) return null; + const aligned_len = mem.alignForward(usize, n, paging.page_size); + const num_pages = @divExact(aligned_len, paging.page_size); + + const slice = paging.zeroedAlloc(num_pages) catch return null; + assert(mem.isAligned(@intFromPtr(slice.ptr), paging.page_size)); + return slice.ptr; + } + + fn resize(_: *anyopaque, buf_unaligned: []u8, log2_buf_align: u8, new_size: usize, return_address: usize) bool { + _ = log2_buf_align; + _ = return_address; + const new_size_aligned = mem.alignForward(usize, new_size, paging.page_size); + + const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, paging.page_size); + if (new_size_aligned == buf_aligned_len) { + return true; + } + + return false; + } + + fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void { + _ = log2_buf_align; + _ = return_address; + const buf_aligned_len = mem.alignForward(usize, slice.len, paging.page_size); + + paging.free(slice.ptr[0..buf_aligned_len]); + } +}; + +pub const page_allocator = Allocator{ + .ptr = undefined, + .vtable = &PageAllocator.vtable, +}; diff --git a/src/paging.zig b/src/paging.zig new file mode 100644 index 0000000..dfe44cc --- /dev/null +++ b/src/paging.zig @@ -0,0 +1,597 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +// This is an implementation of Sv39 paging, meaning that the virtual addresses +// are 39 bits wide. Sv32 and Sv48 are currently not implemented. + +const std = @import("std"); +const hwinfo = @import("hwinfo.zig"); +const riscv = @import("riscv.zig"); + +// Defined by linker script. +pub const text_start = @extern(*anyopaque, .{ .name = "_text_start" }); +pub const text_end = @extern(*anyopaque, .{ .name = "_text_end" }); +pub const rodata_start = @extern(*anyopaque, .{ .name = "_rodata_start" }); +pub const rodata_end = @extern(*anyopaque, .{ .name = "_rodata_end" }); +pub const data_start = @extern(*anyopaque, .{ .name = "_data_start" }); +pub const data_end = @extern(*anyopaque, .{ .name = "_data_end" }); +pub const bss_start = @extern(*anyopaque, .{ .name = "_bss_start" }); +pub const bss_end = @extern(*anyopaque, .{ .name = "_bss_end" }); +pub const stack_start = @extern(*anyopaque, .{ .name = "_stack_start" }); +pub const stack_end = @extern(*anyopaque, .{ .name = "_stack_end" }); +pub const stvec_stack_start = @extern(*anyopaque, .{ .name = "_stvec_stack_start" }); +pub const stvec_stack_end = @extern(*anyopaque, .{ .name = "_stvec_stack_end" }); +pub const heap_start = @extern(*anyopaque, .{ .name = "_heap_start" }); +pub const heap_end = @extern(*anyopaque, .{ .name = "_heap_end" }); + +inline fn heapSize() usize { + return @intFromPtr(heap_end) - @intFromPtr(heap_start); +} + +pub const page_size: usize = 0x1000; // 4096 bytes + +var num_pages: usize = undefined; +var next_mmio_vaddr: usize = 0xff000000; + +pub var alloc_start: usize = undefined; +pub var kmem: *Table = undefined; + +pub const Error = error{ + ZeroSize, + OutOfMemory, + AlreadyTaken, + NotALeaf, +}; + +pub const Mode = enum(u4) { + bare, + sv39 = 8, + sv48, +}; + +// SATP register, configures and enables the MMU (and thus paging). +pub const Satp = packed struct(usize) { + pub const Asid = u16; + + // Reverse field order so that @bitCast yields a usize with the right order. + // Without this writing the value to the SATP register enables an invalid page table, + // leaves the MMU disabled or causes other bugs. + ppn: u44, + asid: Asid, + mode: Mode, +}; + +// A page descriptor for use by the heap allocator. +pub const Page = struct { + flags: Flags, + + pub const Flags = packed struct { + active: u1, + last: u1, // Last page of contiguous allocation + + pub const clear = .{ + .active = 0, + .last = 0, + }; + }; + + // Marks a page as taken, optionally flagging it as the last page of an allocation. + // Fails if the page is already taken. + // Returns whether the operation was successful. + pub fn take(self: *Page, last: bool) !void { + if (@bitCast(self.flags.active)) return Error.AlreadyTaken; + + self.flags.active = 1; + if (last) self.flags.last = 1; + } +}; + +// Returns the offset from the page base. Works with both physical and virtual addresses. +// Offsets are never translated. +pub fn offsetOf(addr: usize) usize { + // Offset is in bottom 12 bits of both physical and virtual addresses. + return addr & 0xfff; +} + +// Returns the virtual page numbers of a virtual address by paging level. +fn virtualPageNumbers(vaddr: usize) [3]usize { + // Virtual address format: + // + // VPN[2] | VPN[1] | VPN[0] | offset + // 9 bits | 9 bits | 9 bits | 12 bits + // + // Virtual page numbers are indexes into the page table of their level, + // i.e. VPN[2] is an index to the root page table on level 2 + // whereas VPN[1] is an index to the page table on level 1 specified by VPN[2]. + // + // Offsets are never translated. + + return [3]usize{ + (vaddr >> 12) & 0x1ff, + (vaddr >> 21) & 0x1ff, + (vaddr >> 30) & 0x1ff, + }; +} + +// Returns the physical page numbers of a physical address by paging level. +fn physicalPageNumbers(paddr: usize) [3]usize { + // Physical address format: + // + // PPN[2] | PPN[1] | PPN[0] | offset + // 26 bits | 9 bits | 9 bits | 12 bits + // + // PPN[i] is what to map VPN[i] to. + // + // Offsets are never translated. + + return [3]usize{ + (paddr >> 12) & 0x1ff, + (paddr >> 21) & 0x1ff, + (paddr >> 30) & 0x3ff_ffff, + }; +} + +// Returns the page numbers of an address as a single integer. +fn pageNumber(addr: usize) u44 { + return @intCast(addr >> 12); +} + +pub const EntryFlags = packed struct(u8) { + valid: u1, + read: u1, + write: u1, + exec: u1, + user: u1, + global: u1, + accessed: u1, + dirty: u1, + + pub const branch = EntryFlags{ + .valid = 1, + .read = 0, + .write = 0, + .exec = 0, + .user = 0, + .global = 0, + .accessed = 0, + .dirty = 0, + }; + + pub const readOnly = EntryFlags{ + .valid = 1, + .read = 1, + .write = 0, + .exec = 0, + .user = 0, + .global = 0, + .accessed = 1, + .dirty = 0, + }; + + pub const readWrite = EntryFlags{ + .valid = 1, + .read = 1, + .write = 1, + .exec = 0, + .user = 0, + .global = 0, + .accessed = 1, + .dirty = 1, + }; + + pub const readExec = EntryFlags{ + .valid = 1, + .read = 1, + .write = 0, + .exec = 1, + .user = 0, + .global = 0, + .accessed = 1, + .dirty = 0, + }; + + pub const userReadOnly = EntryFlags{ + .valid = 1, + .read = 1, + .write = 0, + .exec = 0, + .user = 1, + .global = 0, + .accessed = 1, + .dirty = 0, + }; + + pub const userReadWrite = EntryFlags{ + .valid = 1, + .read = 1, + .write = 1, + .exec = 0, + .user = 1, + .global = 0, + .accessed = 1, + .dirty = 1, + }; + + pub const userReadExec = EntryFlags{ + .valid = 1, + .read = 1, + .write = 0, + .exec = 1, + .user = 1, + .global = 0, + .accessed = 1, + .dirty = 0, + }; + + pub fn isLeaf(self: EntryFlags) bool { + return @bitCast(self.read) or @bitCast(self.write) or @bitCast(self.exec); + } + + // Returns whether the entry permissions allow the accesses + // specified in the `requested` argument. + pub fn allowAccess(self: EntryFlags, requested: EntryFlags) bool { + if (self.user != requested.user) return false; + if (self.read < requested.read) return false; + if (self.write < requested.write) return false; + if (self.exec < requested.exec) return false; + return true; + } +}; + +pub const Entry = packed struct(u64) { + // Reverse field order so that @bitCast yields a u64 with the right order. + // Without this writing the value to a page table creates an invalid entry, + // thus resulting in page faults or hanging. + flags: EntryFlags, + rsw: u2, // Reserved for supervisor use. Currently unused. + mapping: u44, + reserved: u10, + + // Returns the physical page numbers to map to by paging level. + pub fn physicalPageNumbers(self: Entry) [3]usize { + // Mapping format: + // + // PPN[2] | PPN[1] | PPN[0] + // 26 bits | 9 bits | 9 bits + // + // PPN[i] is what to map VPN[i] to. + + return [3]usize{ + self.mapping & 0x1ff, + (self.mapping >> 9) & 0x1ff, + (self.mapping >> 18) & 0x3ff_ffff, + }; + } + + pub fn mappingAddr(self: Entry) usize { + // Apply an offset of zero since entries always point to an aligned page + // and this function should return a usable memory address. + // Callers can change the offset if needed. + return self.mapping << 12; + } + + pub fn isValid(self: Entry) bool { + return @bitCast(self.flags.valid); + } + + // Returns whether the entry is a mapping (true) or another page table (false). + pub fn isLeaf(self: Entry) bool { + return self.flags.isLeaf(); + } +}; + +pub const Table = struct { + // Do not add any fields. The unmap function relies on mappings pointing to page tables, + // casting them to this data structure. This cast becomes invalid if additional fields + // are added, especially if they preceed the entries field. + + entries: [512]Entry, + + // Create a mapping of a certain virtual page address to a physical page address, + // discarding offsets. The mapping is written to the specified level, + // creating page tables as needed. + // + // The mapping must be a leaf, meaning that passing flags + // that indicate no access permissions at all will return an error. + // + // This function internally uses zeroedAlloc to allocate memory for the required page tables, + // but assumes that the physical address to map to has already been allocated by the caller. + pub fn map(root: *Table, vaddr: usize, paddr: usize, flags: EntryFlags, level: usize) !void { + if (!flags.isLeaf()) return Error.NotALeaf; + + const vpn = virtualPageNumbers(vaddr); + + // Grab the entry in the root (level 2) page table. + var v = &root.entries[vpn[2]]; + + // Walk the page table levels from high to low under the assumption that root is valid. + for (level..2) |iInv| { + const i = 1 - iInv; + + // If this entry doesn't point to a lower-level page table or memory page yet, + // allocate one. + if (!v.isValid()) { + const page = try zeroedAlloc(1); + v.* = .{ + .flags = EntryFlags.branch, + .rsw = 0, + .mapping = pageNumber(@intFromPtr(page.ptr)), // Remove the offset, a mapping is just the PPN. + .reserved = 0, + }; + } + + // Get the entries of the existing or newly created page table. + // This cast is safe because the only field of a Table is its entries. + const table: *Table = @ptrFromInt(v.mappingAddr()); + // Grab the entry of the table by indexing it according to the corresponding VPN. + v = &table.entries[vpn[i]]; + } + + // Write the actual mapping to the correct table on the requested level. + v.* = .{ + .flags = flags, + .rsw = 0, + .mapping = @intCast(pageNumber(paddr)), // Remove the offset, a mapping is just the PPN. + .reserved = 0, + }; + } + + // Deallocate child page tables recursively. The provided table itself is not affected, + // allowing partial unmapping of multi-level tables. + // + // This function does not deallocate memory pages mapped by the provided table + // or any of its (recursive) children. + pub fn unmap(table: *Table) void { + for (&table.entries) |*entry| { + if (entry.isValid() and !entry.isLeaf()) { + // This cast is safe because the only field of a Table is its entries. + const lowerLevelTable: *Table = @ptrFromInt(entry.mappingAddr()); + lowerLevelTable.unmap(); + entry.flags.valid = 0; + free(lowerLevelTable); + } + } + } + + pub fn unmapEntry(root: *Table, vaddr: usize) void { + const vpn = virtualPageNumbers(vaddr); + + var v = &root.entries[vpn[2]]; + + for (0..3) |iInv| { + const i = 2 - iInv; + + if (!v.isValid()) { + break; + } else if (v.isLeaf()) { + v.flags.valid = 0; + // IMPORTANT: Flush TLB correctly + // if sfence.vma in process.switchTo is ever optimized. + return; + } + + const entry: *[512]Entry = @ptrFromInt(v.mappingAddr()); + v = &entry[vpn[i - 1]]; + } + } + + // Returns the physical address to a virtual address using the provided level 2 page table. + // This can be used to access virtual addresses whose page table isn't active + // in the MMU / SATP CSR (Control and Status Register), making it possible + // to access the memory space of a user mode process (from its perspective) + // from supervisor or machine mode cleanly. + // + // If the permissions requested using the `flags` argument exceed those + // found in the page table entry, no value is returned. + // + // The absence of a return value is equivalent to a page fault. + pub fn translate(root: *const Table, vaddr: usize, flags: EntryFlags) ?usize { + const vpn = virtualPageNumbers(vaddr); + + // Grab the entry in the root (level 2) page table. + var v = &root.entries[vpn[2]]; + + // Walk the page table levels from high to low. + for (0..3) |iInv| { + const i = 2 - iInv; + + if (!v.isValid()) { + break; + } else if (v.isLeaf()) { + // Mapping found. + + if (!v.flags.allowAccess(flags)) return null; + + // Create a mask starting directly below / after PN[i]. + // Since all levels can have leaves i is not guaranteed to be zero. + const offset_mask = (@as(usize, 1) << @intCast(12 + 9 * i)) - 1; + const offset = vaddr & offset_mask; + const ppn_joined = v.mappingAddr() & ~offset_mask; + + return ppn_joined | offset; + } + + // Get the entries of the page table of the current level. + const entry: *[512]Entry = @ptrFromInt(v.mappingAddr()); + // Grab the entry of the table by indexing it according to the corresponding VPN. + v = &entry[vpn[i - 1]]; + } + + return null; + } + + // Creates an identity mapping for all pages needed for the specified range + // using the map function. An identity mapping doesn't actually translate + // memory addresses, virtual addresses and physical addresses are the same. + // + // The start address is inclusive while end is exclusive. + // + // This is still useful because it can be used to prevent the kernel + // from accessing machine-reserved memory by accident. + pub fn identityMapRange(root: *Table, start: usize, end: usize, flags: EntryFlags) !void { + // Mask out the offset within the starting page. + const start_page = start & ~(page_size - 1); + // Mask out the offset within the ending page, but ensure the returned page address + // is always the last required page for the mapping (end is exclusive, + // so subtracting 1 ends up in the previous page on boundaries, + // eliminating one useless mapping). The resulting value is inclusive. + const end_page = (end - 1) & ~(page_size - 1); + + var page = start_page; + while (page <= end_page) : (page += page_size) { + try root.map(page, page, flags, 0); + } + } + + // Constructs the SATP register value needed to activate the specified page table + // using the provided Address Space Identifier (ASID). + // + // The kernel page table always has ASID 0 (not mandated by the RISC-V specification). + pub fn satp(root: *const Table, asid: Satp.Asid) Satp { + return .{ + .ppn = @intCast(pageNumber(@intFromPtr(root))), + .asid = asid, + .mode = .sv39, + }; + } + + pub fn mapKernel(root: *Table) !void { + try root.identityMapRange(@intFromPtr(text_start), @intFromPtr(text_end), EntryFlags.readExec); + try root.identityMapRange(@intFromPtr(rodata_start), @intFromPtr(rodata_end), EntryFlags.readOnly); + try root.identityMapRange(@intFromPtr(data_start), @intFromPtr(data_end), EntryFlags.readWrite); + try root.identityMapRange(@intFromPtr(bss_start), @intFromPtr(bss_end), EntryFlags.readWrite); + try root.identityMapRange(@intFromPtr(stack_start), @intFromPtr(stack_end), EntryFlags.readWrite); + try root.identityMapRange(@intFromPtr(stvec_stack_start), @intFromPtr(stvec_stack_end), EntryFlags.readWrite); + try root.identityMapRange(@intFromPtr(heap_start), @intFromPtr(heap_end), EntryFlags.readWrite); + } + + pub fn mapDevice(root: *Table, reg: *hwinfo.Reg) !void { + const physical_start = reg.addr & ~(page_size - 1); + const physical_end = (reg.addr + reg.len - 1) & ~(page_size - 1); + + reg.addr = next_mmio_vaddr | (reg.addr & (page_size - 1)); + + var paddr = physical_start; + while (paddr <= physical_end) : (paddr += page_size) { + try root.map(next_mmio_vaddr, paddr, EntryFlags.readWrite, 0); + next_mmio_vaddr += page_size; + } + } +}; + +pub fn init() !void { + num_pages = heapSize() / page_size; + const pages: [*]Page = @ptrCast(heap_start); + + for (0..num_pages) |i| { + pages[i].flags = Page.Flags.clear; + } + + // Start allocating beyond page descriptors. + const descriptors_end = @intFromPtr(heap_start) + num_pages * @sizeOf(Page); + alloc_start = std.mem.alignForward(usize, descriptors_end, page_size); + + kmem = @ptrCast(try zeroedAlloc(1)); + try kmem.mapKernel(); +} + +// Allocate memory pages. Passing n <= 0 results in an error. +pub fn alloc(n: usize) ![]align(page_size) u8 { + if (n <= 0) return Error.ZeroSize; + + const descriptors: [*]Page = @ptrCast(heap_start); + + // Iterate over potential starting points. + // The subtraction of n prevents unnecessary iterations for starting points + // that don't leave enough space for the whole allocation. + for (0..num_pages - n) |i| { + if (!@bitCast(descriptors[i].flags.active)) { + // Free starting page found. + + var insufficient = false; + + // Check if there is enough contiguous free space for the whole allocation. + // If not, move on to the next potential starting point. + for (i..n + i) |j| { + if (@bitCast(descriptors[j].flags.active)) { + insufficient = true; + break; + } + } + + if (!insufficient) { + // Mark all allocated pages as taken. + for (i..n + i - 1) |j| { + try descriptors[j].take(false); + } + try descriptors[n + i - 1].take(true); + + // Construct a pointer to the first page using its descriptor number. + const first = alloc_start + i * page_size; + const allocation: [*]align(page_size) u8 = @ptrFromInt(first); + return allocation[0 .. n * page_size]; + } + } + } + + return Error.OutOfMemory; +} + +// Free (contiguous) memory page(s). +pub fn free(memory: anytype) void { + const Slice = @typeInfo(@TypeOf(memory)).Pointer; + const bytes = std.mem.asBytes(memory); + const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0; + if (bytes_len == 0) return; + + // Restore the address to the page descriptor flags from the address of its contents + // by restoring the descriptor number and indexing the descriptor table + // at the start of the heap using it. + const descriptor_offset = (@intFromPtr(bytes.ptr) - alloc_start) / page_size; + const addr = @intFromPtr(heap_start) + descriptor_offset; + + var page: [*]Page = @ptrFromInt(addr); + + // Mark all but the last page as free. + // A double-free check is performed on the last page before it is freed. + while (@bitCast(page[0].flags.active) and !@bitCast(page[0].flags.last)) : (page += 1) { + page[0].flags = Page.Flags.clear; + } + + // Mark the last page as free. + page[0].flags = Page.Flags.clear; +} + +// Allocate memory pages and overwrite their contents with zeroes for added security. +// Passing n <= 0 results in an error. +pub fn zeroedAlloc(n: usize) Error![]align(page_size) u8 { + const ret = try alloc(n); + + const satp = riscv.satp.read(); + if (satp.mode != .bare) { + const page_table: *Table = @ptrFromInt(satp.ppn << 12); + const start = @intFromPtr(ret.ptr); + const end = start + ret.len; + try page_table.identityMapRange(start, end, EntryFlags.readWrite); + } + + // Write zeroes in batches of 64-bit to reduce the amount of store instructions. + // The remainder / remaining bytes don't need to be accounted for + // because page_size (4096) is divisible by 8. + + const len = (n * page_size) / 8; + const ptr: []volatile u64 = @as([*]volatile u64, @ptrCast(ret))[0..len]; + + for (0..len) |i| { + ptr[i] = 0; + } + + return ret; +} + +pub fn setUserMemoryAccess(enable: bool) void { + var sstatus = riscv.sstatus.read(); + sstatus.supervisor_user_memory_access = @bitCast(enable); + riscv.sstatus.write(sstatus); +} diff --git a/src/plic.zig b/src/plic.zig new file mode 100644 index 0000000..7e54992 --- /dev/null +++ b/src/plic.zig @@ -0,0 +1,96 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const hwinfo = @import("hwinfo.zig"); + +pub var default: Plic = undefined; + +pub const Error = error{ + InterruptOutOfRange, + ContextOutOfRange, +}; + +pub const Context = packed struct { + priority_threshold: u32, + claim_or_complete: u32, +}; + +pub const Plic = struct { + mmio_register: hwinfo.Reg, + + const priority_offset = 0x0; + const enable_offset = 0x2000; + const context_offset_zero = 0x200000; + const context_offset_nonzero = 0x201000; + + pub const num_contexts = 15872; + + // A value greater than or equal to num_contexts for context is an error. + // A value of 0 for interrupt results in an error. + pub fn setEnabled(self: Plic, context: u14, interrupt: u10, enable: bool) !void { + if (context >= num_contexts) return Error.ContextOutOfRange; + if (interrupt == 0) return Error.InterruptOutOfRange; + + const mmio_slice = self.mmioSlice(); + const enable_ptr: *volatile [num_contexts][32]u32 = @alignCast(@ptrCast(&mmio_slice[enable_offset])); + + const register = interrupt / 32; + const bit = @as(u32, 1) << @intCast(interrupt & 0x1f); + + if (enable) { + enable_ptr[context][register] |= bit; + } else { + enable_ptr[context][register] &= ~bit; + } + } + + // A value of 0 for interrupt results in an error. + pub fn setPriority(self: Plic, interrupt: u10, priority: u3) !void { + if (interrupt == 0) return Error.InterruptOutOfRange; + + const mmio_slice = self.mmioSlice(); + const priority_ptr: *volatile [1024]u32 = @alignCast(@ptrCast(&mmio_slice[priority_offset])); + + priority_ptr[interrupt] = @intCast(priority); + } + + // A value greater than or equal to num_contexts for context is an error. + pub fn setPriorityThreshold(self: Plic, context: u14, threshold: u3) !void { + const context_ptr = try self.contextPtr(context); + context_ptr.priority_threshold = threshold; + } + + // A value greater than or equal to num_contexts for context is an error. + // Non-null interrupts are guaranteed to be non-zero. + pub fn claim(self: Plic, context: u14) !?u10 { + const context_ptr = try self.contextPtr(context); + const interrupt = context_ptr.claim_or_complete; + + if (interrupt != 0) return @intCast(interrupt) else return null; + } + + // A value greater than or equal to num_contexts for context is an error. + // A value of 0 for interrupt results in an error. + pub fn complete(self: Plic, context: u14, interrupt: u10) !void { + if (interrupt == 0) return Error.InterruptOutOfRange; + + const context_ptr = try self.contextPtr(context); + context_ptr.claim_or_complete = interrupt; + } + + fn contextPtr(self: Plic, context: u14) !*volatile Context { + if (context >= num_contexts) return Error.ContextOutOfRange; + + const mmio_slice = self.mmio_register.slice(u8); + + if (context == 0) { + return @alignCast(@ptrCast(&mmio_slice[context_offset_zero])); + } else { + const context_offset: usize = context - 1; + const ptr_offset = context_offset * (@sizeOf(u32) + @sizeOf(Context)); + const context_ptr = &mmio_slice[context_offset_nonzero + ptr_offset]; + return @alignCast(@ptrCast(context_ptr)); + } + } +}; diff --git a/src/process.zig b/src/process.zig new file mode 100644 index 0000000..b5afade --- /dev/null +++ b/src/process.zig @@ -0,0 +1,380 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const builtin = @import("builtin"); +const std = @import("std"); +const TrapFrame = @import("TrapFrame.zig"); +const paging = @import("paging.zig"); +const riscv = @import("riscv.zig"); +const time = @import("sbi/time.zig"); +const Allocator = std.mem.Allocator; +const elf = std.elf; + +pub const schedule_interval_millis = 1; + +pub var list = std.mem.zeroInit(std.DoublyLinkedList(Info), .{}); +var next_pid: u16 = 1; + +const num_stack_pages = 2; + +pub const Error = error{ + EmptySchedule, + NoInit, + TooManyThreads, +}; + +pub const ExeError = error{ + TooSmall, + BadEndian, + BadArch, + BadBitLen, + NotStaticExe, + LengthOutOfBounds, + ReservedMemMapping, + BranchPerms, + WritableCode, +}; + +pub const State = enum(u8) { + waiting, + active, + sleeping, + terminated, +}; + +pub const Info = struct { + allocator: Allocator, + id: u16, + thread_id: usize, + trap_frame: TrapFrame, + sections: std.ArrayList([]align(paging.page_size) u8), + stack: []align(paging.page_size) u8, + pc: usize, + page_table: *paging.Table, + state: State, + + pub fn satp(self: *const Info) paging.Satp { + return self.page_table.satp(self.id); + } + + pub fn createThread(self: *const Info, allocator: ?Allocator, entry: usize) !*Info { + const alloc = allocator orelse self.allocator; + + var trap_frame = std.mem.zeroInit(TrapFrame, .{}); + + const stack = try paging.zeroedAlloc(num_stack_pages); + errdefer paging.free(stack); + + const stack_top = @intFromPtr(stack.ptr) + num_stack_pages * paging.page_size; + try self.page_table.identityMapRange(@intFromPtr(stack.ptr), stack_top, paging.EntryFlags.userReadWrite); + + trap_frame.general_purpose_registers[2] = stack_top; + + const thread_id = std.math.add(usize, self.thread_id, 1) catch { + return Error.TooManyThreads; + }; + + const proc = Info{ + .allocator = alloc, + .id = self.id, + .thread_id = thread_id, + .trap_frame = trap_frame, + .pages = self.pages, + .stack = stack, + .pc = entry, + .cleanup_hook = null, + .page_table = self.page_table, + .state = .waiting, + }; + + const proc_node = try alloc.create(std.DoublyLinkedList(Info).Node); + proc_node.data = proc; + list.prepend(proc_node); + + return &proc_node.data; + } + + pub fn terminate( + self: *Info, + ) void { + riscv.satp.write(paging.kmem.satp(0)); + + var node = list.first; + while (node) |proc_node| : (node = proc_node.next) { + if (self.shouldTerminate(&proc_node.data)) { + if (proc_node.data.thread_id != self.thread_id) { + proc_node.data.terminate(); + } + + list.remove(proc_node); + self.allocator.destroy(proc_node); + } + } + + paging.free(self.stack); + + if (self.thread_id == 0) { + self.page_table.unmap(); + paging.free(self.page_table); + self.freeSections(); + } + } + + fn freeSections(self: *Info) void { + defer self.sections.deinit(); + for (self.sections.items) |section| { + paging.free(section); + } + } + + pub fn allowResume(self: *Info) void { + self.pc += 4; // Skip ecall instruction + self.state = .waiting; + } + + pub fn shouldTerminate(self: *const Info, candidate: *const Info) bool { + return candidate.id == self.id and self.shouldTerminateThread(candidate); + } + + fn shouldTerminateThread(self: *const Info, candidate: *const Info) bool { + return candidate.thread_id == self.thread_id or self.thread_id == 0; + } +}; + +pub fn next() ?*Info { + if (list.popFirst()) |info| { + list.append(info); + + if (info.data.state != .waiting) return next(); + return &info.data; + } + + return null; +} + +pub fn schedule() !noreturn { + if (next()) |proc| { + try time.interruptInMillis(schedule_interval_millis); + switchTo(proc); + } + + return Error.EmptySchedule; +} + +pub fn switchTo(proc: *Info) noreturn { + proc.state = .active; + + var sstatus = riscv.sstatus.read(); + sstatus.previous_privilege = .user; + sstatus.user_interrupt_enable = 0; + sstatus.supervisor_interrupt_enable = 0; + sstatus.user_prior_interrupt_enable = 1; + sstatus.supervisor_prior_interrupt_enable = 1; + riscv.sstatus.write(sstatus); + + riscv.sscratch.write(@intFromPtr(&proc.trap_frame)); + riscv.sepc.write(proc.pc); + riscv.satp.write(proc.satp()); + + // Probably not always needed. Let's not take the risk for now. + asm volatile ( + \\ sfence.vma + ); + + asm volatile ( + \\ csrr t6, sscratch + \\ + \\ ld x1, 8(t6) + \\ ld x2, 16(t6) + \\ ld x3, 24(t6) + \\ ld x4, 32(t6) + \\ ld x5, 40(t6) + \\ ld x6, 48(t6) + \\ ld x7, 56(t6) + \\ ld x8, 64(t6) + \\ ld x9, 72(t6) + \\ ld x10, 80(t6) + \\ ld x11, 88(t6) + \\ ld x12, 96(t6) + \\ ld x13, 104(t6) + \\ ld x14, 112(t6) + \\ ld x15, 120(t6) + \\ ld x16, 128(t6) + \\ ld x17, 136(t6) + \\ ld x18, 144(t6) + \\ ld x19, 152(t6) + \\ ld x20, 160(t6) + \\ ld x21, 168(t6) + \\ ld x22, 176(t6) + \\ ld x23, 184(t6) + \\ ld x24, 192(t6) + \\ ld x25, 200(t6) + \\ ld x26, 208(t6) + \\ ld x27, 216(t6) + \\ ld x28, 224(t6) + \\ ld x29, 232(t6) + \\ ld x30, 240(t6) + \\ ld x31, 248(t6) + \\ + \\ sret + ); + + unreachable; +} + +const HdrBuf = *align(@alignOf(elf.Elf64_Ehdr)) const [@sizeOf(elf.Elf64_Ehdr)]u8; + +pub fn create(allocator: Allocator, elf_buf: []align(@alignOf(elf.Elf64_Ehdr)) const u8) !*Info { + if (elf_buf.len < @sizeOf(elf.Elf64_Ehdr)) return ExeError.TooSmall; + + const hdr_buf: HdrBuf = elf_buf[0..@sizeOf(elf.Elf64_Ehdr)]; + const hdr = try elf.Header.parse(@ptrCast(hdr_buf)); + + try validateElfHeader(hdr, hdr_buf); + + const procmem: *paging.Table = @ptrCast(try paging.zeroedAlloc(1)); + errdefer paging.free(procmem); + + try procmem.mapKernel(); + + const parse_source = std.io.fixedBufferStream(elf_buf); + + var sections = std.ArrayList([]align(paging.page_size) u8).init(allocator); + + var it = hdr.program_header_iterator(parse_source); + while (try it.next()) |phdr| { + if (phdr.p_type != elf.PT_LOAD) continue; + if (phdr.p_filesz == 0 or phdr.p_memsz == 0) continue; + if (phdr.p_offset + phdr.p_filesz >= elf_buf.len) { + return ExeError.LengthOutOfBounds; + } + + const offset = paging.offsetOf(phdr.p_vaddr); + const memsz_aligned = std.mem.alignForward(usize, offset + phdr.p_memsz, paging.page_size); + const num_pages = @divExact(memsz_aligned, paging.page_size); + + const pages = try paging.zeroedAlloc(num_pages); + errdefer paging.free(pages); + + try sections.append(pages); + + const sz = @min(phdr.p_filesz, phdr.p_memsz); + @memcpy(pages[offset .. offset + sz], elf_buf[phdr.p_offset .. phdr.p_offset + sz]); + + for (0..num_pages) |page| { + const vaddr = phdr.p_vaddr + page * paging.page_size; + const paddr = @intFromPtr(pages.ptr) + page * paging.page_size; + const flags = paging.EntryFlags{ + .valid = 1, + .read = @bitCast(phdr.p_flags & elf.PF_R != 0), + .write = @bitCast(phdr.p_flags & elf.PF_W != 0), + .exec = @bitCast(phdr.p_flags & elf.PF_X != 0), + .user = 1, + .global = 0, + .accessed = 1, + .dirty = @bitCast(phdr.p_flags & elf.PF_W != 0), + }; + + if (vaddr >= @intFromPtr(paging.text_start) and vaddr < paging.alloc_start) { + return ExeError.ReservedMemMapping; + } + if (!@bitCast(flags.read) and !@bitCast(flags.write) and !@bitCast(flags.exec)) { + return ExeError.BranchPerms; + } + if (@bitCast(flags.write) and @bitCast(flags.exec)) { + return ExeError.WritableCode; + } + + try procmem.map(vaddr, paddr, flags, 0); + } + } + + const stack = try paging.zeroedAlloc(num_stack_pages); + errdefer paging.free(stack); + + const stack_top = @intFromPtr(stack.ptr) + num_stack_pages * paging.page_size; + try procmem.identityMapRange(@intFromPtr(stack.ptr), stack_top, paging.EntryFlags.userReadWrite); + + var proc = Info{ + .allocator = allocator, + .id = next_pid, + .thread_id = 0, + .trap_frame = std.mem.zeroInit(TrapFrame, .{}), + .sections = sections, + .stack = @ptrCast(stack), + .pc = hdr.entry, + .page_table = procmem, + .state = .waiting, + }; + proc.trap_frame.general_purpose_registers[2] = stack_top; + + next_pid += 1; + + const proc_node = try allocator.create(std.DoublyLinkedList(Info).Node); + proc_node.data = proc; + list.prepend(proc_node); + + return &proc_node.data; +} + +pub fn run(allocator: Allocator, bytes: []align(@alignOf(elf.Elf64_Ehdr)) const u8) !noreturn { + const proc = try create(allocator, @alignCast(bytes[0..])); + + try time.interruptInMillis(schedule_interval_millis); + switchTo(proc); +} + +fn validateElfHeader(hdr: elf.Header, hdr_buf: *align(@alignOf(elf.Elf64_Ehdr)) const [@sizeOf(elf.Elf64_Ehdr)]u8) !void { + const arch = builtin.cpu.arch; + + if (hdr.endian != arch.endian()) return ExeError.BadEndian; + if (hdr.machine != arch.toElfMachine()) return ExeError.BadArch; + if (!hdr.is_64) return ExeError.BadBitLen; + + const hdr64 = @as(*const elf.Elf64_Ehdr, @ptrCast(hdr_buf)); + + if (hdr64.e_type != .EXEC) return ExeError.NotStaticExe; +} + +fn usizeFromArg(arg: anytype) usize { + return switch (@typeInfo(@TypeOf(arg))) { + .Pointer => |ptr| switch (ptr.size) { + .Slice => @intFromPtr(arg.ptr), + else => @intFromPtr(arg), + }, + else => arg, + }; +} + +pub fn findThread(pid: u16, thread_id: usize) ?*Info { + var node = list.first; + while (node) |proc_node| : (node = proc_node.next) { + if (proc_node.data.id == pid and proc_node.data.thread_id == thread_id) { + return &proc_node.data; + } + } + + return null; +} + +pub fn mainThread(pid: u16) ?*Info { + return findThread(pid, 0); +} + +pub fn latestThread(pid: u16) ?*Info { + var latest: ?*Info = null; + + var node = list.first; + while (node) |proc_node| : (node = proc_node.next) { + if (proc_node.data.id == pid) { + if (latest) |proc| { + if (proc_node.data.thread_id > proc.thread_id) { + latest = &proc_node.data; + } + } else latest = &proc_node.data; + } + } + + return latest; +} diff --git a/src/riscv.zig b/src/riscv.zig new file mode 100644 index 0000000..e68b3fb --- /dev/null +++ b/src/riscv.zig @@ -0,0 +1,112 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const interrupts = @import("interrupts.zig"); +const paging = @import("paging.zig"); + +pub const Privilege = enum(u1) { + user, + supervisor, +}; + +pub const ExtensionState = enum(u2) { + off, + initial, + clean, + dirty, +}; + +pub const Xlen = enum(u2) { + rv32 = 1, + rv64, + rv128, +}; + +pub const Sstatus = packed struct(usize) { + user_interrupt_enable: u1, + supervisor_interrupt_enable: u1, + reserved0: u2, + user_prior_interrupt_enable: u1, + supervisor_prior_interrupt_enable: u1, + reserved1: u2, + previous_privilege: Privilege, + reserved2: u4, + floating_point_state: ExtensionState, + user_extension_state: ExtensionState, + reserved3: u1, + supervisor_user_memory_access: u1, + make_executable_readable: u1, + reserved4: u12, + user_xlen: Xlen, + reserved5: u29, + need_state_saving: u1, // Read-only. +}; + +pub const SbiRet = struct { + err: isize, + val: isize, +}; + +pub fn ecall(ext_id: usize, fn_id: usize, a0: usize, a1: usize, a2: usize) SbiRet { + var ret = SbiRet{ .err = 0, .val = 0 }; + + asm volatile ( + \\ ecall + \\ sw a0, 0(%[err]) + \\ sw a1, 0(%[val]) + : + : [err] "r" (&ret.err), + [val] "r" (&ret.val), + [eid] "{a7}" (ext_id), + [fid] "{a6}" (fn_id), + [a0] "{a0}" (a0), + [a1] "{a1}" (a1), + [a2] "{a2}" (a2), + ); + + return ret; +} + +pub fn stackPointer() usize { + return asm volatile ("" + : [value] "={sp}" (-> usize), + ); +} + +pub const satp = Csr(paging.Satp, "satp"); +pub const sstatus = Csr(Sstatus, "sstatus"); +pub const sie = Csr(interrupts.Enable, "sie"); +pub const sip = Csr(interrupts.Enable, "sip"); +pub const sscratch = Csr(usize, "sscratch"); +pub const sepc = Csr(usize, "sepc"); +pub const stval = Csr(usize, "stval"); +pub const time = Csr(usize, "time"); + +pub fn Csr(comptime T: type, csr: []const u8) type { + if (csr.len > 8) @compileError("CSR name length exceeds 8 characters"); + + return struct { + pub inline fn read() T { + comptime var buf = [_]u8{0} ** 23; + + const bits = asm volatile (std.fmt.bufPrint(buf[0..], "csrr %[bits], {s}", .{csr}) catch unreachable + : [bits] "=r" (-> usize), + ); + + return @bitCast(bits); + } + + pub inline fn write(value: T) void { + const bits: usize = @bitCast(value); + + comptime var buf = [_]u8{0} ** 23; + + asm volatile (std.fmt.bufPrint(buf[0..], "csrw {s}, %[bits]", .{csr}) catch unreachable + : + : [bits] "r" (bits), + ); + } + }; +} diff --git a/src/sbi.zig b/src/sbi.zig new file mode 100644 index 0000000..fc8afde --- /dev/null +++ b/src/sbi.zig @@ -0,0 +1,127 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const riscv = @import("riscv.zig"); + +pub const Error = error{ + Success, + Failed, + NotSupported, + InvalidParam, + Denied, + InvalidAddr, + AlreadyAvail, + AlreadyStarted, + AlreadyStopped, + NoSharedMem, + InvalidState, + BadRange, + SbiUnknown, +}; + +pub fn errorFromCode(code: isize) Error { + return switch (code) { + 0 => Error.Success, + -1 => Error.Failed, + -2 => Error.NotSupported, + -3 => Error.InvalidParam, + -4 => Error.Denied, + -5 => Error.InvalidAddr, + -6 => Error.AlreadyAvail, + -7 => Error.AlreadyStarted, + -8 => Error.AlreadyStopped, + -9 => Error.NoSharedMem, + -10 => Error.InvalidState, + -11 => Error.BadRange, + else => Error.SbiUnknown, + }; +} + +const BaseExtId: usize = 0x10; + +const BaseFnId = enum(usize) { + GetSpecVer, + GetImpId, + GetImpVer, + ProbeExt, + GetMVendorId, + GetMArchId, + GetMImpId, +}; + +pub const ImpId = enum(isize) { + Bbl, + OpenSbi, + Xvisor, + Kvm, + RustSbi, + Diosix, + Coffer, + Xen, + PolarFire, + _, +}; + +pub fn specVer() !isize { + const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetSpecVer), 0, 0, 0); + if (ret.err != 0) { + return errorFromCode(ret.err); + } + + return ret.val; +} + +pub fn impId() !ImpId { + const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetImpId), 0, 0, 0); + if (ret.err != 0) { + return errorFromCode(ret.err); + } + + return @enumFromInt(ret.val); +} + +pub fn impVer() !isize { + const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetImpVer), 0, 0, 0); + if (ret.err != 0) { + return errorFromCode(ret.err); + } + + return ret.val; +} + +pub fn probeExt(ext_id: usize) !bool { + const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.ProbeExt), ext_id, 0, 0); + if (ret.err != 0) { + return errorFromCode(ret.err); + } + + return ret.val != 0; +} + +pub fn mVendorId() !isize { + const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMVendorId), 0, 0, 0); + if (ret.err != 0) { + return errorFromCode(ret.err); + } + + return ret.val; +} + +pub fn mArchId() !isize { + const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMarchId), 0, 0, 0); + if (ret.err != 0) { + return errorFromCode(ret.err); + } + + return ret.val; +} + +pub fn mImpId() !isize { + const ret = riscv.ecall(BaseExtId, @intFromEnum(BaseFnId.GetMImpId), 0, 0, 0); + if (ret.err != 0) { + return errorFromCode(ret.err); + } + + return ret.val; +} diff --git a/src/sbi/debug_console.zig b/src/sbi/debug_console.zig new file mode 100644 index 0000000..2bcc097 --- /dev/null +++ b/src/sbi/debug_console.zig @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const riscv = @import("../riscv.zig"); +const sbi = @import("../sbi.zig"); + +const ExtId: usize = 0x4442434E; + +const FnId = enum(usize) { + Write, + Read, + WriteByte, +}; + +pub const Writer = std.io.Writer(void, sbi.Error, write); + +fn write(_: void, bytes: []const u8) !usize { + const ret = riscv.ecall(ExtId, @intFromEnum(FnId.Write), bytes.len, @intFromPtr(bytes.ptr), 0); + if (ret.err != 0) { + return sbi.errorFromCode(ret.err); + } + + return @intCast(ret.val); +} + +pub fn writer() !Writer { + if (!try sbi.probeExt(ExtId)) { + return sbi.Error.NotSupported; + } + + return .{ .context = {} }; +} diff --git a/src/sbi/legacy.zig b/src/sbi/legacy.zig new file mode 100644 index 0000000..858899c --- /dev/null +++ b/src/sbi/legacy.zig @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const riscv = @import("../riscv.zig"); +const sbi = @import("../sbi.zig"); + +const ExtId = enum(usize) { + SetTimer, + ConsolePutchar, + ConsoleGetchar, + ClearIpi, + SendIpi, + RemoteFenceI, + RemoteSFenceVma, + RemoteSFenceVmaAsid, + Shutdown, +}; + +pub const Writer = std.io.Writer(void, sbi.Error, write); + +fn write(_: void, bytes: []const u8) !usize { + for (bytes) |byte| { + const ret = riscv.ecall(@intFromEnum(ExtId.ConsolePutchar), 0, byte, 0, 0); + if (ret.err != 0) { + return sbi.errorFromCode(ret.err); + } + } + + return bytes.len; +} + +pub fn writer() !Writer { + if (!try sbi.probeExt(@intFromEnum(ExtId.ConsolePutchar))) { + return sbi.Error.NotSupported; + } + + return .{ .context = {} }; +} diff --git a/src/sbi/sys_reset.zig b/src/sbi/sys_reset.zig new file mode 100644 index 0000000..9a3d62b --- /dev/null +++ b/src/sbi/sys_reset.zig @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const riscv = @import("../riscv.zig"); +const sbi = @import("../sbi.zig"); + +const ExtId: usize = 0x53525354; + +const FnId = enum(usize) { + Reset, +}; + +pub const Type = enum(u32) { + Shutdown, + ColdReboot, + WarmReboot, +}; + +pub const Reason = enum(u32) { + None, + SysErr, +}; + +pub fn reset(@"type": Type, reason: Reason) !void { + if (!try sbi.probeExt(ExtId)) { + return sbi.Error.NotSupported; + } + + const type_id = @intFromEnum(@"type"); + const reason_id = @intFromEnum(reason); + + const ret = riscv.ecall(ExtId, @intFromEnum(FnId.Reset), type_id, reason_id, 0); + if (ret.err != 0) { + return sbierr.errorFromCode(ret.err); + } +} diff --git a/src/sbi/time.zig b/src/sbi/time.zig new file mode 100644 index 0000000..eed5324 --- /dev/null +++ b/src/sbi/time.zig @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const hwinfo = @import("../hwinfo.zig"); +const riscv = @import("../riscv.zig"); +const sbi = @import("../sbi.zig"); + +const ExtId: usize = 0x54494d45; + +const FnId = enum(usize) { + SetTimer, +}; + +pub const Error = error{ + NoCpusHwInfo, +}; + +pub fn setTimer(stime_absolute: u64) !void { + if (!try sbi.probeExt(ExtId)) return sbi.Error.NotSupported; + + const ret = riscv.ecall(ExtId, @intFromEnum(FnId.SetTimer), stime_absolute, 0, 0); + if (ret.err != 0) return sbi.errorFromCode(ret.err); +} + +pub fn interruptInMillis(millis: u64) !void { + var cpus = try hwinfo.byKind(.cpus); + const frequency = try cpus.next() orelse return error.NoCpusHwInfo; + const cycles = frequency.value / 1000 * millis; + + const time = riscv.time.read(); + try setTimer(time + cycles); +} diff --git a/src/syscall.zig b/src/syscall.zig new file mode 100644 index 0000000..6b94e16 --- /dev/null +++ b/src/syscall.zig @@ -0,0 +1,200 @@ +// SPDX-FileCopyrightText: 2024 Himbeer +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); +const Console = @import("Console.zig"); +const TrapFrame = @import("TrapFrame.zig"); +const channel = @import("channel.zig"); +const hwinfo = @import("hwinfo.zig"); +const mem = @import("mem.zig"); +const paging = @import("paging.zig"); +const process = @import("process.zig"); +const riscv = @import("riscv.zig"); + +pub const Error = error{ + ZeroAddressSupplied, +}; + +pub const HandleError = error{ + UnknownSyscall, +}; + +pub fn handler(proc: *process.Info, trap_frame: *TrapFrame) !void { + switch (trap_frame.general_purpose_registers[17]) { + 100000 => trap_frame.setReturnValue(errorName(trap_frame)), + 100001 => trap_frame.setReturnValue(consoleWrite(trap_frame)), + 100002 => trap_frame.setReturnValue(launch(trap_frame)), + 100003 => trap_frame.setReturnValue(end(proc)), + 100004 => trap_frame.setReturnValue(terminate(proc, trap_frame)), + 100005 => trap_frame.setReturnValue(processId(proc)), + 100006 => trap_frame.setReturnValue(threadId(proc)), + 100008 => trap_frame.setReturnValue(devicesByKind(trap_frame)), + 100009 => trap_frame.setReturnValue(join(proc, trap_frame)), + 100010 => trap_frame.setReturnValue(leave(proc, trap_frame)), + 100011 => trap_frame.setReturnValue(pass(trap_frame)), + 100012 => trap_frame.setReturnValue(receive(proc, trap_frame)), + else => return HandleError.UnknownSyscall, + } +} + +pub const ErrorNameError = error{ErrorCodeOutOfRange}; + +// errorName(code: u16, buffer: [*]u8, len: usize) !usize +fn errorName(trap_frame: *const TrapFrame) !usize { + const code_wide = trap_frame.general_purpose_registers[10]; + const buffer_opt: ?[*]u8 = @ptrFromInt(trap_frame.general_purpose_registers[11]); + const buffer_ptr = buffer_opt orelse return Error.ZeroAddressSupplied; + const len = trap_frame.general_purpose_registers[12]; + + const code = std.math.cast(u16, code_wide) orelse { + return ErrorNameError.ErrorCodeOutOfRange; + }; + const buffer = buffer_ptr[0..len]; + + if (code == 0) return 0; + + const error_name = @errorName(@errorFromInt(code)); + + const n = @min(buffer.len, error_name.len); + + paging.setUserMemoryAccess(true); + defer paging.setUserMemoryAccess(false); + + @memcpy(buffer[0..n], error_name[0..n]); + return n; +} + +// consoleWrite(bytes: [*]const u8, len: usize) !usize +fn consoleWrite(trap_frame: *const TrapFrame) !usize { + const vaddr = trap_frame.general_purpose_registers[10]; + const len = trap_frame.general_purpose_registers[11]; + + const procmem: *paging.Table = @ptrFromInt(riscv.satp.read().ppn << 12); + + const flags = paging.EntryFlags.userReadOnly; + const paddr = procmem.translate(vaddr, flags) orelse { + const faulter: *volatile u8 = @ptrFromInt(vaddr); + _ = faulter.*; + unreachable; + }; + + const bytes_ptr: [*]const u8 = @ptrFromInt(paddr); + const bytes = bytes_ptr[0..len]; + + const w = Console.autoChoose().?.writer(); + return w.write(bytes); +} + +// launch(bytes: [*]align(@alignOf(std.elf.Elf64_Ehdr)) const u8, len: usize) !usize +fn launch(trap_frame: *const TrapFrame) !usize { + const alignment = @alignOf(std.elf.Elf64_Ehdr); + const bytes_addr = trap_frame.general_purpose_registers[10]; + const bytes_opt: ?[*]const u8 = @ptrFromInt(bytes_addr); + const bytes_noalign = bytes_opt orelse return Error.ZeroAddressSupplied; + const bytes_ptr = try std.math.alignCast(alignment, bytes_noalign); + const len = trap_frame.general_purpose_registers[11]; + + const bytes = bytes_ptr[0..len]; + + paging.setUserMemoryAccess(true); + defer paging.setUserMemoryAccess(false); + + const new_proc = try process.create(mem.page_allocator, bytes); + return new_proc.id; +} + +// end() noreturn +fn end(proc: *process.Info) noreturn { + proc.terminate(); + process.schedule() catch |err| { + std.debug.panic("schedule error: {}", .{err}); + }; +} + +pub const TerminateError = error{ + PidOutOfRange, + ProcessNotFound, +}; + +// terminate(pid: u16, tid: usize) !void +fn terminate(proc: *const process.Info, trap_frame: *const TrapFrame) !void { + const pid_wide = trap_frame.general_purpose_registers[10]; + const pid = std.math.cast(u16, pid_wide) orelse { + return TerminateError.PidOutOfRange; + }; + const tid = trap_frame.general_purpose_registers[11]; + + const target = process.findThread(pid, tid) orelse { + return TerminateError.ProcessNotFound; + }; + target.terminate(); + + if (target.shouldTerminate(proc)) { + process.schedule() catch |err| { + std.debug.panic("schedule error: {}", .{err}); + }; + } +} + +// processId() u16 +fn processId(proc: *const process.Info) usize { + return proc.id; +} + +// threadId() usize +fn threadId(proc: *const process.Info) usize { + return proc.thread_id; +} + +// devicesByKind(kind: hwinfo.DevKind, devices: [*]hwinfo.Dev, len: usize) !usize +fn devicesByKind(trap_frame: *const TrapFrame) !usize { + const kind: hwinfo.DevKind = @enumFromInt(trap_frame.general_purpose_registers[10]); + const devices: [*]hwinfo.Dev = @ptrFromInt(trap_frame.general_purpose_registers[11]); + const len = trap_frame.general_purpose_registers[12]; + + var i: usize = 0; + var devs = try hwinfo.byKind(kind); + while (try devs.next()) |dev| { + if (i >= len) break; + + devices[i] = dev; + i += 1; + } + + return i; +} + +// join(channel_id: usize) !void +fn join(proc: *const process.Info, trap_frame: *const TrapFrame) !void { + const id = trap_frame.general_purpose_registers[10]; + return channel.join(proc.id, id); +} + +// leave(channel_id: usize) void +fn leave(proc: *const process.Info, trap_frame: *const TrapFrame) void { + const id = trap_frame.general_purpose_registers[10]; + channel.leave(proc.id, id); +} + +// pass(channel_id: usize, bytes: [*]const u8, len: usize) !void +fn pass(trap_frame: *const TrapFrame) !void { + const id = trap_frame.general_purpose_registers[10]; + const bytes_ptr: [*]const u8 = @ptrFromInt(trap_frame.general_purpose_registers[11]); + const len = trap_frame.general_purpose_registers[12]; + + const bytes = bytes_ptr[0..len]; + const copy = try channel.allocator().alloc(u8, bytes.len); + @memcpy(copy, bytes); + try channel.pass(id, copy); +} + +// receive(channel_id: usize, buffer: [*]u8, len: usize) !usize +fn receive(proc: *const process.Info, trap_frame: *const TrapFrame) !usize { + const id = trap_frame.general_purpose_registers[10]; + const buffer_ptr: [*]u8 = @ptrFromInt(trap_frame.general_purpose_registers[11]); + const len = trap_frame.general_purpose_registers[12]; + + const buffer = buffer_ptr[0..len]; + return channel.receive(proc.id, id, buffer); +} -- cgit v1.2.3