diff options
-rw-r--r-- | src/paging.zig | 82 |
1 files changed, 54 insertions, 28 deletions
diff --git a/src/paging.zig b/src/paging.zig index 904eb05..b32c634 100644 --- a/src/paging.zig +++ b/src/paging.zig @@ -6,13 +6,22 @@ // are 39 bits wide. Sv32 and Sv48 are currently not implemented. // Defined by linker script. -const heap_start = @extern(*void, .{ .name = "_heap_start" }); -const heap_size = @extern(*void, .{ .name = "_heap_size" }); - -const num_pages = @intFromPtr(heap_size) / page_size; -const alloc_start: *void = @ptrFromInt(heap_start + num_pages * page_size); // Beyond page descriptors +pub const text_start = @extern(*anyopaque, .{ .name = "_text_start" }); +pub const text_end = @extern(*anyopaque, .{ .name = "_text_end" }); +pub const rodata_start = @extern(*anyopaque, .{ .name = "_rodata_start" }); +pub const rodata_end = @extern(*anyopaque, .{ .name = "_rodata_end" }); +pub const data_start = @extern(*anyopaque, .{ .name = "_data_start" }); +pub const data_end = @extern(*anyopaque, .{ .name = "_data_end" }); +pub const bss_start = @extern(*anyopaque, .{ .name = "_bss_start" }); +pub const bss_end = @extern(*anyopaque, .{ .name = "_bss_end" }); +pub const heap_start = @extern(*anyopaque, .{ .name = "_heap_start" }); +pub const heap_end = @extern(*anyopaque, .{ .name = "_heap_end" }); + +pub inline fn heapSize() usize { + return @intFromPtr(heap_end) - @intFromPtr(heap_start); +} -pub const page_size = 0x1000; // 4096 bytes +pub const page_size: usize = 0x1000; // 4096 bytes pub const AllocError = error{ ZeroSize, @@ -38,7 +47,7 @@ pub const Page = struct { flags: u8, pub fn isFree(self: Page) bool { - return !(self.flags & PageFlags.active); + return (self.flags & @intFromEnum(PageFlags.active)) == 0; } // Reports whether this is the last page of a contiguous allocation. @@ -52,8 +61,8 @@ pub const Page = struct { pub fn take(self: *Page, last: bool) !void { if (!self.isFree()) return AllocError.AlreadyTaken; - self.flags |= PageFlags.active; - if (last) self.flags |= PageFlags.last; + self.flags |= @intFromEnum(PageFlags.active); + if (last) self.flags |= @intFromEnum(PageFlags.last); } // Clears all flags to mark a page as free. @@ -107,7 +116,12 @@ fn physicalPageNumbers(paddr: usize) [3]usize { }; } -pub const EntryFlags = enum(i64) { +// Returns the page numbers of an address as a single integer. +fn pageNumber(addr: usize) usize { + return addr >> 12; +} + +pub const EntryFlags = enum(u8) { valid = 1 << 0, read = 1 << 1, write = 1 << 2, @@ -116,9 +130,13 @@ pub const EntryFlags = enum(i64) { global = 1 << 5, accessed = 1 << 6, dirty = 1 << 7, + + pub fn isLeaf(flags: u8) bool { + return (flags & (@intFromEnum(EntryFlags.read) | @intFromEnum(EntryFlags.write) | @intFromEnum(EntryFlags.exec))) != 0; + } }; -pub const Entry = packed struct(i64) { +pub const Entry = packed struct(u64) { reserved: u10, mapping: u44, rsw: u2, // Reserved for supervisor use. Currently unused. @@ -148,12 +166,12 @@ pub const Entry = packed struct(i64) { } pub fn isValid(self: Entry) bool { - return self.flags & EntryFlags.valid; + return (self.flags & @intFromEnum(EntryFlags.valid)) != 0; } // Returns whether the entry is a mapping (true) or another page table (false). pub fn isLeaf(self: Entry) bool { - return self.flags & (EntryFlags.read | EntryFlags.write | EntryFlags.exec); + return EntryFlags.isLeaf(self.flags); } }; @@ -173,8 +191,8 @@ pub const Table = struct { // // This function internally uses zeroedAlloc to allocate memory for the required page tables, // but assumes that the physical address to map to has already been allocated by the caller. - pub fn map(root: *Table, vaddr: usize, paddr: usize, flags: i64, level: usize) !void { - if (!.{ .flags = flags }.isLeaf()) return TableError.NotALeaf; + pub fn map(root: *Table, vaddr: usize, paddr: usize, flags: u8, level: usize) !void { + if (!EntryFlags.isLeaf(flags)) return TableError.NotALeaf; const vpn = virtualPageNumbers(vaddr); @@ -188,12 +206,12 @@ pub const Table = struct { // If this entry doesn't point to a lower-level page table or memory page yet, // allocate one. if (!v.isValid()) { - const page = zeroedAlloc(1); + const page = try zeroedAlloc(1); v.* = .{ .reserved = 0, - .mapping = page >> 12, // Remove the offset, a mapping is just the PPN. + .mapping = @intCast(pageNumber(@intFromPtr(page))), // Remove the offset, a mapping is just the PPN. .rsw = 0, - .flags = EntryFlags.valid, // No permissions, this is a branch to another table. + .flags = @intFromEnum(EntryFlags.valid), // No permissions, this is a branch to another table. }; } @@ -201,15 +219,15 @@ pub const Table = struct { // This cast is safe because the only field of a Table is its entries. const table: *Table = @ptrFromInt(v.mappingAddr()); // Grab the entry of the table by indexing it according to the corresponding VPN. - v = &table[vpn[i]]; + v = &table.entries[vpn[i]]; } // Write the actual mapping to the correct table on the requested level. v.* = .{ .reserved = 0, - .mapping = paddr >> 12, // Remove the offset, a mapping is just the PPN. + .mapping = @intCast(pageNumber(paddr)), // Remove the offset, a mapping is just the PPN. .rsw = 0, - .flags = flags | EntryFlags.accessed | EntryFlags.dirty, // Prevent page faults on platforms that require the A and D flags. + .flags = flags | @intFromEnum(EntryFlags.accessed) | @intFromEnum(EntryFlags.dirty), // Prevent page faults on platforms that require the A and D flags. }; } @@ -278,7 +296,7 @@ pub const Table = struct { // // This is still useful because it can be used to prevent the kernel // from accessing machine-reserved memory by accident. - pub fn identityMapRange(root: *Table, start: usize, end: usize, flags: i64) !void { + pub fn identityMapRange(root: *Table, start: usize, end: usize, flags: u8) !void { // Mask out the offset within the starting page. const startPage = start & ~(page_size - 1); // Mask out the offset within the ending page, but ensure the returned page address @@ -298,7 +316,11 @@ pub const Table = struct { pub fn alloc(n: usize) !*void { if (n <= 0) return AllocError.ZeroSize; - const pages: *[num_pages]Page = @ptrCast(heap_start); + const num_pages = heapSize() / page_size; + // Start allocating beyond page descriptors. + const alloc_start = @intFromPtr(heap_start) + num_pages * page_size; + + const pages: [*]Page = @ptrCast(heap_start); // Iterate over potential starting points. // The subtraction of n prevents unnecessary iterations for starting points @@ -321,12 +343,12 @@ pub fn alloc(n: usize) !*void { if (!insufficient) { // Mark all allocated pages as taken. for (i..n + i - 1) |j| { - pages[j].take(false); + try pages[j].take(false); } - pages[n + i - 1].take(true); + try pages[n + i - 1].take(true); // Construct a pointer to the first page using its descriptor number. - return alloc_start + i * page_size; + return @ptrFromInt(alloc_start + i * page_size); } } } @@ -336,6 +358,10 @@ pub fn alloc(n: usize) !*void { // Free (contiguous) memory page(s). Provides limited protection against double-frees. pub fn free(ptr: *void) !void { + const num_pages = @intFromPtr(heapSize()) / page_size; + // Start allocating beyond page descriptors. + const alloc_start: *void = @ptrFromInt(heap_start + num_pages * page_size); + // Restore the address to the page descriptor flags from the address of its contents // by restoring the descriptor number and indexing the descriptor table // at the start of the heap using it. @@ -343,7 +369,7 @@ pub fn free(ptr: *void) !void { // Ensure basic address sanity. // Does not check descriptor table bounds. - if (addr < @intFromPtr(heap_start) or addr >= @intFromPtr(heap_start) + @intFromPtr(heap_size)) return AllocError.OutOfRange; + if (addr < @intFromPtr(heap_start) or addr >= @intFromPtr(heap_start) + @intFromPtr(heapSize())) return AllocError.OutOfRange; const page: [*]Page = @ptrCast(addr); @@ -371,7 +397,7 @@ pub fn zeroedAlloc(n: usize) !*void { // because page_size (4096) is divisible by 8. const size = (n * page_size) / 8; - const ptr: *volatile [size]u64 = @ptrCast(ret); + const ptr: [*]volatile u64 = @alignCast(@ptrCast(ret)); for (0..size) |i| { ptr[i] = 0; |