aboutsummaryrefslogtreecommitdiff
path: root/src/mem.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem.zig')
-rw-r--r--src/mem.zig261
1 files changed, 261 insertions, 0 deletions
diff --git a/src/mem.zig b/src/mem.zig
new file mode 100644
index 0000000..eefa452
--- /dev/null
+++ b/src/mem.zig
@@ -0,0 +1,261 @@
+// SPDX-FileCopyrightText: 2024 Himbeer <himbeer@disroot.org>
+//
+// SPDX-License-Identifier: AGPL-3.0-or-later
+
+const std = @import("std");
+const paging = @import("paging.zig");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const maxInt = std.math.maxInt;
+const mem = std.mem;
+
+const Chunk = struct {
+ flags: Flags,
+ len: usize,
+
+ const Flags = packed struct(u8) {
+ active: u1,
+ reserved: u7,
+ };
+
+ pub fn next(self: *align(1) Chunk) *align(1) Chunk {
+ const byte_ptr: [*]u8 = @ptrCast(self);
+ return @ptrCast(byte_ptr + @sizeOf(Chunk) + self.len);
+ }
+
+ pub fn take(self: *align(1) Chunk) void {
+ self.flags.active = 1;
+ }
+
+ pub fn clear(self: *align(1) Chunk) void {
+ self.flags = mem.zeroInit(Flags, .{});
+ }
+
+ pub fn data(self: *align(1) Chunk) []u8 {
+ const byte_ptr: [*]u8 = @ptrCast(self);
+ return byte_ptr[@sizeOf(Chunk)..self.len];
+ }
+};
+
+pub const ChunkAllocatorConfig = struct {
+ auto_merge_free: bool = true,
+};
+
+pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type {
+ return struct {
+ head: ?*align(1) Chunk,
+ pages: usize,
+
+ const Self = @This();
+
+ pub fn init(pages: usize) !Self {
+ const head: *align(1) Chunk = @ptrCast(try paging.zeroedAlloc(pages));
+ head.len = (pages * paging.page_size) - @sizeOf(Chunk);
+ return .{ .head = head, .pages = pages };
+ }
+
+ pub fn deinit(self: *Self) void {
+ if (self.head) |head| {
+ paging.free(head);
+ self.head = null;
+ }
+ }
+
+ pub fn allocator(self: *Self) Allocator {
+ return .{
+ .ptr = self,
+ .vtable = &.{
+ .alloc = alloc,
+ .resize = resize,
+ .free = free,
+ },
+ };
+ }
+
+ pub fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 {
+ _ = ret_addr;
+
+ const self: *Self = @ptrCast(@alignCast(ctx));
+
+ const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
+
+ var chunk = self.head orelse return null;
+ const bound = @intFromPtr(chunk) + (self.pages * paging.page_size);
+
+ var predecessor: ?*align(1) Chunk = null;
+ while (@intFromPtr(chunk) < bound) : (chunk = chunk.next()) {
+ const adjust_off = mem.alignPointerOffset(chunk.data().ptr, ptr_align) orelse return null;
+ const aligned_len = len + adjust_off;
+
+ // Is this chunk free and large enough to hold the requested allocation?
+ if (!@bitCast(chunk.flags.active) and chunk.len >= aligned_len) {
+ const remaining = chunk.len - aligned_len;
+
+ if (predecessor) |*pred| {
+ pred.*.len += adjust_off;
+ } else if (adjust_off != 0) return null;
+
+ chunk = @ptrFromInt(@intFromPtr(chunk) + adjust_off);
+ chunk.clear();
+ chunk.take();
+
+ if (remaining > @sizeOf(Chunk)) {
+ chunk.len = len;
+
+ const new_successor = chunk.next();
+
+ new_successor.clear();
+ new_successor.len = remaining - @sizeOf(Chunk);
+ }
+
+ return chunk.data().ptr;
+ }
+
+ predecessor = chunk;
+ }
+
+ return null;
+ }
+
+ // Only expands into the next free chunk (if there is one).
+ // You may want to call mergeFree first if auto_merge_free was configured to false.
+ pub fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
+ _ = ret_addr;
+
+ const self: *Self = @ptrCast(@alignCast(ctx));
+
+ const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
+
+ const head = self.head orelse return false;
+ const bound = @intFromPtr(head) + (self.pages * paging.page_size);
+
+ const chunk = @as(*align(1) Chunk, @ptrCast(buf.ptr - @sizeOf(Chunk)));
+
+ const adjust_off = mem.alignPointerOffset(buf.ptr, ptr_align) orelse return false;
+ const aligned_new_len = new_len + adjust_off;
+
+ if (aligned_new_len < chunk.len) {
+ const regained = chunk.len - aligned_new_len;
+ if (regained > @sizeOf(Chunk)) {
+ chunk.len = aligned_new_len;
+
+ const new_successor = chunk.next();
+
+ new_successor.clear();
+ new_successor.len = regained - @sizeOf(Chunk);
+ }
+
+ return true;
+ } else if (aligned_new_len > chunk.len) {
+ const successor = chunk.next();
+ if (@intFromPtr(successor) >= bound) return false;
+
+ const total_len = chunk.len + @sizeOf(Chunk) + successor.len;
+
+ if (!@bitCast(successor.flags.active) and aligned_new_len <= total_len) {
+ const remaining = total_len - aligned_new_len;
+
+ if (remaining > @sizeOf(Chunk)) {
+ chunk.len = aligned_new_len;
+
+ const new_successor = chunk.next();
+
+ new_successor.clear();
+ new_successor.len = remaining - @sizeOf(Chunk);
+ } else {
+ chunk.len = total_len;
+ }
+
+ return true;
+ }
+
+ return false;
+ } else return true;
+ }
+
+ pub fn free(ctx: *anyopaque, old_mem: []u8, log2_old_align: u8, ret_addr: usize) void {
+ _ = log2_old_align;
+ _ = ret_addr;
+
+ const self: *Self = @ptrCast(@alignCast(ctx));
+
+ // Safety check. Do not free memory in uninitialized / undefined pages.
+ if (self.head == null) return;
+
+ const chunk = @as([*]Chunk, @ptrCast(@alignCast(old_mem.ptr))) - 1;
+ chunk[0].clear();
+
+ if (config.auto_merge_free) {
+ self.mergeFree();
+ }
+ }
+
+ pub fn mergeFree(self: *Self) void {
+ var chunk = self.head orelse return;
+ const bound = @intFromPtr(chunk) + (self.pages * paging.page_size);
+
+ while (@intFromPtr(chunk) < bound) : (chunk = chunk.next()) {
+ const successor = chunk.next();
+
+ if (@intFromPtr(successor) >= bound) {
+ // Safety check.
+ // Should never run if the implementation is working correctly.
+ //
+ // Ensure that there is a successor within bounds.
+ // The loop condition is not sufficient here, it only detects
+ // non-erroneous list ends (i.e. chunk == bound).
+ break;
+ } else if (!@bitCast(chunk.flags.active) and !@bitCast(successor.flags.active)) {
+ chunk.len += @sizeOf(Chunk) + successor.len;
+ }
+ }
+ }
+ };
+}
+
+pub const PageAllocator = struct {
+ pub const vtable = Allocator.VTable{
+ .alloc = alloc,
+ .resize = resize,
+ .free = free,
+ };
+
+ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
+ _ = ra;
+ _ = log2_align;
+ assert(n > 0);
+ if (n > maxInt(usize) - (paging.page_size - 1)) return null;
+ const aligned_len = mem.alignForward(usize, n, paging.page_size);
+ const num_pages = @divExact(aligned_len, paging.page_size);
+
+ const slice = paging.zeroedAlloc(num_pages) catch return null;
+ assert(mem.isAligned(@intFromPtr(slice.ptr), paging.page_size));
+ return slice.ptr;
+ }
+
+ fn resize(_: *anyopaque, buf_unaligned: []u8, log2_buf_align: u8, new_size: usize, return_address: usize) bool {
+ _ = log2_buf_align;
+ _ = return_address;
+ const new_size_aligned = mem.alignForward(usize, new_size, paging.page_size);
+
+ const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, paging.page_size);
+ if (new_size_aligned == buf_aligned_len) {
+ return true;
+ }
+
+ return false;
+ }
+
+ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
+ _ = log2_buf_align;
+ _ = return_address;
+ const buf_aligned_len = mem.alignForward(usize, slice.len, paging.page_size);
+
+ paging.free(slice.ptr[0..buf_aligned_len]);
+ }
+};
+
+pub const page_allocator = Allocator{
+ .ptr = undefined,
+ .vtable = &PageAllocator.vtable,
+};