aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/lib/paging.zig29
-rw-r--r--src/lib/process.zig57
-rw-r--r--src/lib/syscall.zig34
-rw-r--r--src/lib/vfs.zig30
4 files changed, 124 insertions, 26 deletions
diff --git a/src/lib/paging.zig b/src/lib/paging.zig
index 8c2e521..7dc25dd 100644
--- a/src/lib/paging.zig
+++ b/src/lib/paging.zig
@@ -8,6 +8,7 @@
const std = @import("std");
const hwinfo = @import("hwinfo.zig");
+const instructions = @import("instructions.zig");
// Defined by linker script.
pub const text_start = @extern(*anyopaque, .{ .name = "_text_start" });
@@ -348,6 +349,28 @@ pub const Table = struct {
}
}
+ pub fn unmapEntry(root: *Table, vaddr: usize) void {
+ const vpn = virtualPageNumbers(vaddr);
+
+ var v = &root.entries[vpn[2]];
+
+ for (0..3) |iInv| {
+ const i = 2 - iInv;
+
+ if (!v.isValid()) {
+ break;
+ } else if (v.isLeaf()) {
+ v.flags.valid = 0;
+ // IMPORTANT: Flush TLB correctly
+ // if sfence.vma in process.switchTo is ever optimized.
+ return;
+ }
+
+ const entry: *[512]Entry = @ptrFromInt(v.mappingAddr());
+ v = &entry[vpn[i - 1]];
+ }
+ }
+
// Returns the physical address to a virtual address using the provided level 2 page table.
// This can be used to access virtual addresses whose page table isn't active
// in the MMU / SATP CSR (Control and Status Register), making it possible
@@ -552,3 +575,9 @@ pub fn zeroedAlloc(n: usize) ![]align(page_size) u8 {
return ret;
}
+
+pub fn setUserMemoryAccess(enable: bool) void {
+ var sstatus = instructions.sstatus.read();
+ sstatus.supervisor_user_memory_access = @bitCast(enable);
+ instructions.sstatus.write(sstatus);
+}
diff --git a/src/lib/process.zig b/src/lib/process.zig
index c155243..aa3568c 100644
--- a/src/lib/process.zig
+++ b/src/lib/process.zig
@@ -58,11 +58,18 @@ pub const Info = struct {
pages: []align(paging.page_size) u8,
stack: []align(paging.page_size) u8,
pc: usize,
+ cleanup_hook: ?CleanupHook,
term_hook: ?TermHook,
page_table: *paging.Table,
state: State,
rds: std.AutoArrayHashMap(usize, vfs.ResourceDescriptor),
+ pub const CleanupHook = struct {
+ cleanupFn: *const fn (proc: *const Info, buffer: []u8, copy: []const u8) void,
+ buffer: []u8,
+ copy: []const u8,
+ };
+
pub const TermHook = struct {
hookFn: *const fn (context: *anyopaque, proc: *const Info) void,
context: *anyopaque,
@@ -122,6 +129,7 @@ pub const Info = struct {
.pages = self.pages,
.stack = stack,
.pc = self.pc,
+ .cleanup_hook = null,
.term_hook = null,
.page_table = self.page_table,
.state = .suspended,
@@ -135,7 +143,7 @@ pub const Info = struct {
return &proc_node.data;
}
- pub fn call(self: *Info, function: usize, args: anytype, term_hook: ?TermHook) noreturn {
+ pub fn call(self: *Info, function: usize, args: anytype, cleanup_hook: ?CleanupHook, term_hook: ?TermHook) noreturn {
const Container = struct {
fn terminate() linksection(".rethooks") callconv(.Naked) noreturn {
// Syscall #100011 is "terminate".
@@ -147,6 +155,7 @@ pub const Info = struct {
};
self.pc = function;
+ self.cleanup_hook = cleanup_hook;
self.term_hook = term_hook;
self.trap_frame.general_purpose_registers[1] = @intFromPtr(&Container.terminate);
inline for (args, 0..) |arg, i| {
@@ -184,6 +193,9 @@ pub const Info = struct {
self.rds.deinit();
}
+ if (self.cleanup_hook) |cleanup_hook| {
+ cleanup_hook.cleanupFn(self, cleanup_hook.buffer, cleanup_hook.copy);
+ }
if (self.term_hook) |term_hook| {
term_hook.hookFn(term_hook.context, self);
}
@@ -196,6 +208,48 @@ pub const Info = struct {
fn shouldRemoveThread(self: *const Info, candidate: *const Info) bool {
return candidate.thread_id == self.thread_id or self.thread_id == 0;
}
+
+ pub fn copyBytes(self: *const Info, bytes: []const u8) ![]align(paging.page_size) const u8 {
+ const aligned_len = std.mem.alignForward(usize, bytes.len, paging.page_size);
+ const num_pages = @divExact(aligned_len, paging.page_size);
+
+ const copy = try paging.zeroedAlloc(num_pages);
+ errdefer paging.free(copy);
+
+ var addr = @intFromPtr(copy.ptr);
+ const limit = addr + copy.len;
+ while (addr < limit) : (addr += paging.page_size) {
+ try self.page_table.map(addr, addr, paging.EntryFlags.userReadOnly, 0);
+ errdefer self.page_table.unmapEntry(addr);
+ }
+
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
+
+ @memcpy(copy[0..bytes.len], bytes);
+ return copy[0..bytes.len];
+ }
+
+ pub fn copyBuffer(self: *const Info, buffer: []u8) ![]align(paging.page_size) u8 {
+ const aligned_len = std.mem.alignForward(usize, buffer.len, paging.page_size);
+ const num_pages = @divExact(aligned_len, paging.page_size);
+
+ const copy = try paging.zeroedAlloc(num_pages);
+ errdefer paging.free(copy);
+
+ var addr = @intFromPtr(copy.ptr);
+ const limit = addr + copy.len;
+ while (addr < limit) : (addr += paging.page_size) {
+ try self.page_table.map(addr, addr, paging.EntryFlags.userReadWrite, 0);
+ errdefer self.page_table.unmapEntry(addr);
+ }
+
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
+
+ @memcpy(copy[0..buffer.len], buffer);
+ return copy[0..buffer.len];
+ }
};
pub fn next() ?*Info {
@@ -344,6 +398,7 @@ pub fn create(allocator: std.mem.Allocator, elf_buf: []align(@alignOf(elf.Elf64_
.pages = pages,
.stack = @ptrCast(stack),
.pc = hdr.entry,
+ .cleanup_hook = null,
.term_hook = null,
.page_table = procmem,
.state = .waiting,
diff --git a/src/lib/syscall.zig b/src/lib/syscall.zig
index 9eb5199..3c01879 100644
--- a/src/lib/syscall.zig
+++ b/src/lib/syscall.zig
@@ -15,12 +15,6 @@ pub const Error = error{
UnknownSyscall,
};
-fn setUserMemoryAccess(enable: bool) void {
- var sstatus = instructions.sstatus.read();
- sstatus.supervisor_user_memory_access = @bitCast(enable);
- instructions.sstatus.write(sstatus);
-}
-
pub fn handler(proc: *process.Info, trap_frame: *trap.Frame) !void {
switch (trap_frame.general_purpose_registers[17]) {
100000 => uprint(trap_frame),
@@ -54,8 +48,8 @@ fn uprint(trap_frame: *const trap.Frame) void {
// open(path_c: [*:0]const u8, data: usize) Result(usize) // fixme: Kernel panic if null pointer
fn open(proc: *process.Info, trap_frame: *trap.Frame) void {
- setUserMemoryAccess(true);
- defer setUserMemoryAccess(false);
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
const path_c: [*:0]const u8 = @ptrFromInt(trap_frame.general_purpose_registers[10]);
const data = trap_frame.general_purpose_registers[11];
@@ -80,8 +74,8 @@ fn close(proc: *process.Info, trap_frame: *const trap.Frame) void {
// writeFn: ?vfs.Stream.WriteFn,
// ) Result(void)
fn provideStream(proc: *const process.Info, trap_frame: *trap.Frame) void {
- setUserMemoryAccess(true);
- defer setUserMemoryAccess(false);
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
const path_c: [*:0]const u8 = @ptrFromInt(trap_frame.general_purpose_registers[10]);
const readFn: ?vfs.Stream.ReadFn = @ptrFromInt(trap_frame.general_purpose_registers[11]);
@@ -103,8 +97,8 @@ fn provideStream(proc: *const process.Info, trap_frame: *trap.Frame) void {
// closeFn: ?vfs.File.CloseFn,
// ) Result(void)
fn provideFile(proc: *const process.Info, trap_frame: *trap.Frame) void {
- setUserMemoryAccess(true);
- defer setUserMemoryAccess(false);
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
const path_c: [*:0]const u8 = @ptrFromInt(trap_frame.general_purpose_registers[10]);
const openFn: vfs.File.OpenFn = @ptrFromInt(trap_frame.general_purpose_registers[11]);
@@ -127,8 +121,8 @@ fn provideFile(proc: *const process.Info, trap_frame: *trap.Frame) void {
// callback: vfs.Hook.Callback,
// ) Result(void)
fn provideHook(proc: *const process.Info, trap_frame: *trap.Frame) void {
- setUserMemoryAccess(true);
- defer setUserMemoryAccess(false);
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
const path_c: [*:0]const u8 = @ptrFromInt(trap_frame.general_purpose_registers[10]);
const callback: vfs.Hook.Callback = @ptrFromInt(trap_frame.general_purpose_registers[11]);
@@ -157,8 +151,8 @@ fn mkdir(trap_frame: *trap.Frame) void {
// removeFn: vfs.DirHook.RemoveFn,
// ) Result(void)
fn provideDirHook(proc: *const process.Info, trap_frame: *trap.Frame) void {
- setUserMemoryAccess(true);
- defer setUserMemoryAccess(false);
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
const path_c: [*:0]const u8 = @ptrFromInt(trap_frame.general_purpose_registers[10]);
const provideFn: vfs.DirHook.ProvideFn = @ptrFromInt(trap_frame.general_purpose_registers[11]);
@@ -184,8 +178,8 @@ fn remove(trap_frame: *const trap.Frame) void {
// read(handle: usize, buffer: [*]u8, len: usize) Result(usize)
fn read(proc: *process.Info, trap_frame: *trap.Frame) void {
- setUserMemoryAccess(true);
- defer setUserMemoryAccess(false);
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
const handle = trap_frame.general_purpose_registers[10];
const buffer: [*]u8 = @ptrFromInt(trap_frame.general_purpose_registers[11]);
@@ -200,8 +194,8 @@ fn read(proc: *process.Info, trap_frame: *trap.Frame) void {
// write(handle: usize, bytes: [*]const u8, len: usize) Result(usize)
fn write(proc: *process.Info, trap_frame: *trap.Frame) void {
- setUserMemoryAccess(true);
- defer setUserMemoryAccess(false);
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
const handle = trap_frame.general_purpose_registers[10];
const bytes: [*]const u8 = @ptrFromInt(trap_frame.general_purpose_registers[11]);
diff --git a/src/lib/vfs.zig b/src/lib/vfs.zig
index d9a7b90..b6646bf 100644
--- a/src/lib/vfs.zig
+++ b/src/lib/vfs.zig
@@ -4,6 +4,7 @@
const std = @import("std");
+const paging = @import("paging.zig");
const process = @import("process.zig");
const sysexchange = @import("sysexchange.zig");
@@ -133,10 +134,15 @@ pub const ResourceDescriptor = struct {
return switch (self.inode.resource) {
.stream => |stream| {
const driver = process.latestThread(self.inode.pid).?;
+ const copy = try driver.copyBuffer(buffer);
const readFn = stream.readFn orelse return Error.ReadNotSupported;
proc.state = .suspended;
- try call(driver, readFn, .{ buffer.ptr, buffer.len }, .{
+ try call(driver, readFn, .{ copy.ptr, copy.len }, .{
+ .cleanupFn = moveBack,
+ .buffer = buffer,
+ .copy = copy,
+ }, .{
.hookFn = crossProcessReturn,
.context = proc,
});
@@ -150,10 +156,11 @@ pub const ResourceDescriptor = struct {
return switch (self.inode.resource) {
.stream => |stream| {
const driver = process.latestThread(self.inode.pid).?;
+ const copy = try driver.copyBytes(bytes);
const writeFn = stream.writeFn orelse return Error.WriteNotSupported;
proc.state = .suspended;
- try call(driver, writeFn, .{ bytes.ptr, bytes.len }, .{
+ try call(driver, writeFn, .{ copy.ptr, copy.len }, null, .{
.hookFn = crossProcessReturn,
.context = proc,
});
@@ -162,6 +169,19 @@ pub const ResourceDescriptor = struct {
else => Error.WriteNotSupported,
};
}
+
+ fn moveBack(driver: *const process.Info, buffer: []u8, copy: []const u8) void {
+ paging.setUserMemoryAccess(true);
+ defer paging.setUserMemoryAccess(false);
+
+ @memcpy(buffer, copy);
+
+ var addr = @intFromPtr(copy.ptr);
+ const limit = addr + copy.len;
+ while (addr < limit) : (addr += paging.page_size) {
+ driver.page_table.unmapEntry(addr);
+ }
+ }
};
pub const UserInfo = union(enum) {
@@ -222,7 +242,7 @@ pub fn open(proc: *process.Info, path: []const u8, pid: u16, data: usize) !Resou
const driver = process.latestThread(node.data.pid).?;
proc.state = .suspended;
- try call(driver, hook.callback, .{ pid, data }, .{
+ try call(driver, hook.callback, .{ pid, data }, null, .{
.hookFn = crossProcessReturn,
.context = proc,
});
@@ -235,9 +255,9 @@ pub fn openZ(proc: *process.Info, path_c: [*:0]const u8, pid: u16, data: usize)
return open(proc, mem.sliceTo(path_c, 0), pid, data);
}
-fn call(proc: *process.Info, function: *const anyopaque, args: anytype, termHook: ?process.Info.TermHook) !noreturn {
+fn call(proc: *process.Info, function: *const anyopaque, args: anytype, cleanup_hook: ?process.Info.CleanupHook, term_hook: ?process.Info.TermHook) !noreturn {
const callback_thread = try proc.createThread(null);
- callback_thread.call(@intFromPtr(function), args, termHook);
+ callback_thread.call(@intFromPtr(function), args, cleanup_hook, term_hook);
}
fn crossProcessReturn(context: *anyopaque, driver: *const process.Info) void {