aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorHimbeer <himbeer@disroot.org>2024-06-21 17:59:44 +0200
committerHimbeer <himbeer@disroot.org>2024-06-21 17:59:44 +0200
commit75ca4a878f99846782422cf99a72a22ebeb9529c (patch)
tree3f328f0fd73d6b4879919a5f64fff65fe4af088a /src
parent056ccf2b2a72e5fb30262438f3979a20414ca381 (diff)
mem: Add page allocator
This allocator is more robust than the existing chunk allocator. It is backed by the page-grained allocator and does not support resizing. The CPU and memory overhead is considerably higher.
Diffstat (limited to 'src')
-rw-r--r--src/lib/mem.zig65
1 files changed, 57 insertions, 8 deletions
diff --git a/src/lib/mem.zig b/src/lib/mem.zig
index 6f6ef5f..25adc84 100644
--- a/src/lib/mem.zig
+++ b/src/lib/mem.zig
@@ -2,9 +2,12 @@
//
// SPDX-License-Identifier: AGPL-3.0-or-later
-const std = @import("std");
-
const paging = @import("paging.zig");
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const mem = std.mem;
+const maxInt = std.math.maxInt;
+const assert = std.debug.assert;
const Chunk = struct {
flags: Flags,
@@ -25,7 +28,7 @@ const Chunk = struct {
}
pub fn clear(self: *align(1) Chunk) void {
- self.flags = std.mem.zeroInit(Flags, .{});
+ self.flags = mem.zeroInit(Flags, .{});
}
pub fn data(self: *align(1) Chunk) []u8 {
@@ -58,7 +61,7 @@ pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type {
}
}
- pub fn allocator(self: *Self) std.mem.Allocator {
+ pub fn allocator(self: *Self) mem.Allocator {
return .{
.ptr = self,
.vtable = &.{
@@ -74,14 +77,14 @@ pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type {
const self: *Self = @ptrCast(@alignCast(ctx));
- const ptr_align = @as(usize, 1) << @as(std.mem.Allocator.Log2Align, @intCast(log2_ptr_align));
+ const ptr_align = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_ptr_align));
var chunk = self.head orelse return null;
const bound = @intFromPtr(chunk) + (self.pages * paging.page_size);
var predecessor: ?*align(1) Chunk = null;
while (@intFromPtr(chunk) < bound) : (chunk = chunk.next()) {
- const adjust_off = std.mem.alignPointerOffset(chunk.data().ptr, ptr_align) orelse return null;
+ const adjust_off = mem.alignPointerOffset(chunk.data().ptr, ptr_align) orelse return null;
const aligned_len = len + adjust_off;
// Is this chunk free and large enough to hold the requested allocation?
@@ -121,14 +124,14 @@ pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type {
const self: *Self = @ptrCast(@alignCast(ctx));
- const ptr_align = @as(usize, 1) << @as(std.mem.Allocator.Log2Align, @intCast(log2_buf_align));
+ const ptr_align = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_buf_align));
const head = self.head orelse return false;
const bound = @intFromPtr(head) + (self.pages * paging.page_size);
const chunk = @as(*align(1) Chunk, @ptrCast(buf.ptr - @sizeOf(Chunk)));
- const adjust_off = std.mem.alignPointerOffset(buf.ptr, ptr_align) orelse return false;
+ const adjust_off = mem.alignPointerOffset(buf.ptr, ptr_align) orelse return false;
const aligned_new_len = new_len + adjust_off;
if (aligned_new_len < chunk.len) {
@@ -209,3 +212,49 @@ pub fn ChunkAllocator(comptime config: ChunkAllocatorConfig) type {
}
};
}
+
+pub const PageAllocator = struct {
+ pub const vtable = Allocator.VTable{
+ .alloc = alloc,
+ .resize = resize,
+ .free = free,
+ };
+
+ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
+ _ = ra;
+ _ = log2_align;
+ assert(n > 0);
+ if (n > maxInt(usize) - (paging.page_size - 1)) return null;
+ const aligned_len = mem.alignForward(usize, n, paging.page_size);
+
+ const slice = paging.zeroedAlloc(aligned_len) catch return null;
+ assert(mem.isAligned(@intFromPtr(slice.ptr), paging.page_size));
+ return slice.ptr;
+ }
+
+ fn resize(_: *anyopaque, buf_unaligned: []u8, log2_buf_align: u8, new_size: usize, return_address: usize) bool {
+ _ = log2_buf_align;
+ _ = return_address;
+ const new_size_aligned = mem.alignForward(usize, new_size, paging.page_size);
+
+ const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, paging.page_size);
+ if (new_size_aligned == buf_aligned_len) {
+ return true;
+ }
+
+ return false;
+ }
+
+ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
+ _ = log2_buf_align;
+ _ = return_address;
+ const buf_aligned_len = mem.alignForward(usize, slice.len, paging.page_size);
+
+ paging.free(slice.ptr[0..buf_aligned_len]);
+ }
+};
+
+pub const page_allocator = .{
+ .ptr = undefined,
+ .vtable = &PageAllocator.vtable,
+};