diff options
Diffstat (limited to 'src/lib/mem.zig')
-rw-r--r-- | src/lib/mem.zig | 196 |
1 files changed, 196 insertions, 0 deletions
diff --git a/src/lib/mem.zig b/src/lib/mem.zig new file mode 100644 index 0000000..639ac39 --- /dev/null +++ b/src/lib/mem.zig @@ -0,0 +1,196 @@ +// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org> +// +// SPDX-License-Identifier: AGPL-3.0-or-later + +const std = @import("std"); + +const paging = @import("paging.zig"); + +const Chunk = struct { + flags: Flags, + len: usize, + + const Flags = packed struct(u8) { + active: u1, + reserved: u7, + }; + + pub fn next(self: *const Chunk) [*]Chunk { + const byte_ptr: [*]const u8 = @ptrCast(@as([*]const Chunk, @ptrCast(self)) + 1); + return @constCast(@ptrCast(@alignCast(byte_ptr + self.len))); + } + + pub fn take(self: *Chunk) void { + self.flags.active = 1; + } + + pub fn clear(self: *Chunk) void { + self.flags = std.mem.zeroInit(Flags, .{}); + } +}; + +pub const ChunkAllocatorConfig = struct { + auto_merge_free: bool = true, +}; + +pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type { + return struct { + head: ?[*]Chunk, + pages: usize, + + const Self = @This(); + + pub fn init(pages: usize) !Self { + const chunks: [*]Chunk = @ptrCast(@alignCast(try paging.zeroedAlloc(pages))); + chunks[0].len = (pages * paging.page_size) - @sizeOf(Chunk); + return .{ .head = chunks, .pages = pages }; + } + + pub fn deinit(self: *Self) void { + if (self.head) |head| { + paging.free(head); + } + } + + pub fn allocator(self: *Self) std.mem.Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .free = free, + }, + }; + } + + pub fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 { + _ = ret_addr; + + const self: *Self = @ptrCast(@alignCast(ctx)); + + const ptr_align = @as(usize, 1) << @as(std.mem.Allocator.Log2Align, @intCast(log2_ptr_align)); + + var chunk = self.head orelse return null; + const bound = @intFromPtr(chunk) + (self.pages * paging.page_size); + + while (@intFromPtr(chunk) < bound) : (chunk = chunk[0].next()) { + const adjust_off = std.mem.alignPointerOffset(@as([*]u8, @ptrCast(chunk + 1)), ptr_align) orelse return null; + const aligned_len = len + adjust_off; + + // Is this chunk free and large enough to hold the requested allocation? + if (!@bitCast(chunk[0].flags.active) and chunk[0].len >= aligned_len) { + const remaining = chunk[0].len - aligned_len; + + chunk[0].take(); + + if (remaining > @sizeOf(Chunk)) { + const new_successor: *Chunk = @ptrCast(@alignCast(@as([*]u8, @ptrCast(chunk + 1)) + aligned_len)); + + new_successor.clear(); + new_successor.len = remaining - @sizeOf(Chunk); + + chunk[0].len = aligned_len; + } + + return std.mem.alignPointer(@as([*]u8, @ptrCast(chunk + 1)), ptr_align); + } + } + + return null; + } + + // Only expands into the next free chunk (if there is one). + // You may want to call mergeFree first if auto_merge_free was configured to false. + pub fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { + _ = ret_addr; + + const self: *Self = @ptrCast(@alignCast(ctx)); + + const ptr_align = @as(usize, 1) << @as(std.mem.Allocator.Log2Align, @intCast(log2_buf_align)); + + const head = self.head orelse return false; + const bound = @intFromPtr(head) + (self.pages * paging.page_size); + + const chunk = @as([*]Chunk, @ptrCast(@alignCast(buf.ptr))) - 1; + + const adjust_off = std.mem.alignPointerOffset(@as([*]u8, @ptrCast(chunk + 1)), ptr_align) orelse return false; + const aligned_new_len = new_len + adjust_off; + + if (aligned_new_len < chunk[0].len) { + const regained = chunk[0].len - aligned_new_len; + if (regained > @sizeOf(Chunk)) { + const new_successor: *Chunk = @ptrCast(@alignCast(@as([*]u8, @ptrCast(chunk + 1)) + aligned_new_len)); + + new_successor.clear(); + new_successor.len = regained - @sizeOf(Chunk); + + chunk[0].len = aligned_new_len; + } + + return true; + } else if (aligned_new_len > chunk[0].len) { + const successor = chunk[0].next(); + if (@intFromPtr(successor) >= bound) return false; + + const total_len = chunk[0].len + @sizeOf(Chunk) + successor[0].len; + + if (!@bitCast(successor[0].flags.active) and aligned_new_len <= total_len) { + const remaining = total_len - aligned_new_len; + + if (remaining > @sizeOf(Chunk)) { + const new_successor: *Chunk = @ptrCast(@alignCast(@as([*]u8, @ptrCast(chunk + 1)) + aligned_new_len)); + + new_successor.clear(); + new_successor.len = remaining - @sizeOf(Chunk); + + chunk[0].len = aligned_new_len; + } else { + chunk[0].len = total_len; + } + + return true; + } + + return false; + } else return true; + } + + pub fn free(ctx: *anyopaque, old_mem: []u8, log2_old_align: u8, ret_addr: usize) void { + _ = log2_old_align; + _ = ret_addr; + + const self: *Self = @ptrCast(@alignCast(ctx)); + + // Safety check. Do not free memory in uninitialized / undefined pages. + if (self.head == null) return; + + const chunk = @as([*]Chunk, @ptrCast(@alignCast(old_mem.ptr))) - 1; + chunk[0].clear(); + + if (config.auto_merge_free) { + self.mergeFree(); + } + } + + pub fn mergeFree(self: *Self) void { + var chunk = self.head orelse return; + const bound = @intFromPtr(chunk) + (self.pages * paging.page_size); + + while (@intFromPtr(chunk) < bound) : (chunk = chunk[0].next()) { + const successor = chunk[0].next(); + + if (@intFromPtr(successor) >= bound) { + // Safety check. + // Should never run if the implementation is working correctly. + // + // Ensure that there is a successor within bounds. + // The loop condition is not sufficient here, it only detects + // non-erroneous list ends (i.e. chunk == bound). + break; + } else if (!@bitCast(chunk[0].flags.active) and !@bitCast(successor[0].flags.active)) { + chunk[0].len += @sizeOf(Chunk) + successor[0].len; + } + } + } + }; +} |