Lines 48-54
Link Here
|
48 |
//! |
48 |
//! |
49 |
//! ## Basic Design: |
49 |
//! ## Basic Design: |
50 |
//! |
50 |
//! |
51 |
//! Small allocations are divided into buckets: |
51 |
//! Small allocations are divided into buckets. For a max page size of 4K: |
52 |
//! |
52 |
//! |
53 |
//! ``` |
53 |
//! ``` |
54 |
//! index obj_size |
54 |
//! index obj_size |
Lines 75-80
Link Here
|
75 |
//! BucketHeader, followed by "used bits", and two stack traces for each slot |
75 |
//! BucketHeader, followed by "used bits", and two stack traces for each slot |
76 |
//! (allocation trace and free trace). |
76 |
//! (allocation trace and free trace). |
77 |
//! |
77 |
//! |
|
|
78 |
//! The buckets array contains buckets for every size class below `max_page_size`. |
79 |
//! At runtime, only size classes below `pageSize()` will actually be used for allocations. |
80 |
//! |
78 |
//! The "used bits" are 1 bit per slot representing whether the slot is used. |
81 |
//! The "used bits" are 1 bit per slot representing whether the slot is used. |
79 |
//! Allocations use the data to iterate to find a free slot. Frees assert that the |
82 |
//! Allocations use the data to iterate to find a free slot. Frees assert that the |
80 |
//! corresponding bit is 1 and set it to 0. |
83 |
//! corresponding bit is 1 and set it to 0. |
Lines 99-109
const math = std.math;
Link Here
|
99 |
const assert = std.debug.assert; |
102 |
const assert = std.debug.assert; |
100 |
const mem = std.mem; |
103 |
const mem = std.mem; |
101 |
const Allocator = std.mem.Allocator; |
104 |
const Allocator = std.mem.Allocator; |
102 |
const page_size = std.mem.page_size; |
105 |
const min_page_size = std.heap.min_page_size; |
|
|
106 |
const max_page_size = std.heap.max_page_size; |
107 |
const pageSize = std.heap.pageSize; |
103 |
const StackTrace = std.builtin.StackTrace; |
108 |
const StackTrace = std.builtin.StackTrace; |
104 |
|
109 |
|
105 |
/// Integer type for pointing to slots in a small allocation |
110 |
/// Integer type for pointing to slots in a small allocation |
106 |
const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1); |
111 |
const SlotIndex = std.meta.Int(.unsigned, math.log2(max_page_size) + 1); |
107 |
|
112 |
|
108 |
const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6; |
113 |
const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6; |
109 |
const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0; |
114 |
const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0; |
Lines 157-162
pub const Config = struct {
Link Here
|
157 |
|
162 |
|
158 |
pub const Check = enum { ok, leak }; |
163 |
pub const Check = enum { ok, leak }; |
159 |
|
164 |
|
|
|
165 |
var used_small_bucket_count_cache = std.atomic.Value(usize).init(0); |
166 |
var largest_used_bucket_object_size_cache = std.atomic.Value(usize).init(0); |
167 |
|
160 |
/// Default initialization of this struct is deprecated; use `.init` instead. |
168 |
/// Default initialization of this struct is deprecated; use `.init` instead. |
161 |
pub fn GeneralPurposeAllocator(comptime config: Config) type { |
169 |
pub fn GeneralPurposeAllocator(comptime config: Config) type { |
162 |
return struct { |
170 |
return struct { |
Lines 206-214
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
206 |
|
214 |
|
207 |
pub const Error = mem.Allocator.Error; |
215 |
pub const Error = mem.Allocator.Error; |
208 |
|
216 |
|
209 |
const small_bucket_count = math.log2(page_size); |
217 |
const small_bucket_count = math.log2(max_page_size); |
210 |
const largest_bucket_object_size = 1 << (small_bucket_count - 1); |
218 |
const largest_bucket_object_size = 1 << (small_bucket_count - 1); |
211 |
const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size); |
219 |
const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size); |
|
|
220 |
fn used_small_bucket_count() usize { |
221 |
const cached = used_small_bucket_count_cache.load(.monotonic); |
222 |
if (cached != 0) { |
223 |
return cached; |
224 |
} |
225 |
const val = math.log2(pageSize()); |
226 |
used_small_bucket_count_cache.store(val, .monotonic); |
227 |
return val; |
228 |
} |
229 |
fn largest_used_bucket_object_size() usize { |
230 |
const cached = largest_used_bucket_object_size_cache.load(.monotonic); |
231 |
if (cached != 0) { |
232 |
return cached; |
233 |
} |
234 |
const val = @as(usize, 1) << @truncate(used_small_bucket_count() - 1); |
235 |
largest_used_bucket_object_size_cache.store(val, .monotonic); |
236 |
return val; |
237 |
} |
212 |
|
238 |
|
213 |
const bucketCompare = struct { |
239 |
const bucketCompare = struct { |
214 |
fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order { |
240 |
fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order { |
Lines 261-267
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
261 |
// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation |
287 |
// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation |
262 |
|
288 |
|
263 |
const BucketHeader = struct { |
289 |
const BucketHeader = struct { |
264 |
page: [*]align(page_size) u8, |
290 |
page: [*]align(min_page_size) u8, |
265 |
alloc_cursor: SlotIndex, |
291 |
alloc_cursor: SlotIndex, |
266 |
used_count: SlotIndex, |
292 |
used_count: SlotIndex, |
267 |
|
293 |
|
Lines 273-286
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
273 |
if (!config.safety) @compileError("requested size is only stored when safety is enabled"); |
299 |
if (!config.safety) @compileError("requested size is only stored when safety is enabled"); |
274 |
const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(size_class); |
300 |
const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(size_class); |
275 |
const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr))); |
301 |
const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr))); |
276 |
const slot_count = @divExact(page_size, size_class); |
302 |
const slot_count = @divExact(pageSize(), size_class); |
277 |
return sizes[0..slot_count]; |
303 |
return sizes[0..slot_count]; |
278 |
} |
304 |
} |
279 |
|
305 |
|
280 |
fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 { |
306 |
fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 { |
281 |
if (!config.safety) @compileError("requested size is only stored when safety is enabled"); |
307 |
if (!config.safety) @compileError("requested size is only stored when safety is enabled"); |
282 |
const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class); |
308 |
const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class); |
283 |
const slot_count = @divExact(page_size, size_class); |
309 |
const slot_count = @divExact(pageSize(), size_class); |
284 |
return aligns_ptr[0..slot_count]; |
310 |
return aligns_ptr[0..slot_count]; |
285 |
} |
311 |
} |
286 |
|
312 |
|
Lines 312-318
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
312 |
/// Only valid for buckets within `empty_buckets`, and relies on the `alloc_cursor` |
338 |
/// Only valid for buckets within `empty_buckets`, and relies on the `alloc_cursor` |
313 |
/// of empty buckets being set to `slot_count` when they are added to `empty_buckets` |
339 |
/// of empty buckets being set to `slot_count` when they are added to `empty_buckets` |
314 |
fn emptyBucketSizeClass(bucket: *BucketHeader) usize { |
340 |
fn emptyBucketSizeClass(bucket: *BucketHeader) usize { |
315 |
return @divExact(page_size, bucket.alloc_cursor); |
341 |
return @divExact(pageSize(), bucket.alloc_cursor); |
316 |
} |
342 |
} |
317 |
}; |
343 |
}; |
318 |
|
344 |
|
Lines 355-367
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
355 |
|
381 |
|
356 |
fn bucketAlignsStart(size_class: usize) usize { |
382 |
fn bucketAlignsStart(size_class: usize) usize { |
357 |
if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled"); |
383 |
if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled"); |
358 |
const slot_count = @divExact(page_size, size_class); |
384 |
const slot_count = @divExact(pageSize(), size_class); |
359 |
return bucketRequestedSizesStart(size_class) + (@sizeOf(LargestSizeClassInt) * slot_count); |
385 |
return bucketRequestedSizesStart(size_class) + (@sizeOf(LargestSizeClassInt) * slot_count); |
360 |
} |
386 |
} |
361 |
|
387 |
|
362 |
fn bucketStackFramesStart(size_class: usize) usize { |
388 |
fn bucketStackFramesStart(size_class: usize) usize { |
363 |
const unaligned_start = if (config.safety) blk: { |
389 |
const unaligned_start = if (config.safety) blk: { |
364 |
const slot_count = @divExact(page_size, size_class); |
390 |
const slot_count = @divExact(pageSize(), size_class); |
365 |
break :blk bucketAlignsStart(size_class) + slot_count; |
391 |
break :blk bucketAlignsStart(size_class) + slot_count; |
366 |
} else @sizeOf(BucketHeader) + usedBitsCount(size_class); |
392 |
} else @sizeOf(BucketHeader) + usedBitsCount(size_class); |
367 |
return mem.alignForward( |
393 |
return mem.alignForward( |
Lines 372-383
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
372 |
} |
398 |
} |
373 |
|
399 |
|
374 |
fn bucketSize(size_class: usize) usize { |
400 |
fn bucketSize(size_class: usize) usize { |
375 |
const slot_count = @divExact(page_size, size_class); |
401 |
const slot_count = @divExact(pageSize(), size_class); |
376 |
return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count; |
402 |
return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count; |
377 |
} |
403 |
} |
378 |
|
404 |
|
379 |
fn usedBitsCount(size_class: usize) usize { |
405 |
fn usedBitsCount(size_class: usize) usize { |
380 |
const slot_count = @divExact(page_size, size_class); |
406 |
const slot_count = @divExact(pageSize(), size_class); |
381 |
if (slot_count < 8) return 1; |
407 |
if (slot_count < 8) return 1; |
382 |
return @divExact(slot_count, 8); |
408 |
return @divExact(slot_count, 8); |
383 |
} |
409 |
} |
Lines 416-422
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
416 |
pub fn detectLeaks(self: *Self) bool { |
442 |
pub fn detectLeaks(self: *Self) bool { |
417 |
var leaks = false; |
443 |
var leaks = false; |
418 |
|
444 |
|
419 |
for (&self.buckets, 0..) |*buckets, bucket_i| { |
445 |
for (0..used_small_bucket_count()) |bucket_i| { |
|
|
446 |
const buckets = &self.buckets[bucket_i]; |
420 |
if (buckets.root == null) continue; |
447 |
if (buckets.root == null) continue; |
421 |
const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i)); |
448 |
const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i)); |
422 |
const used_bits_count = usedBitsCount(size_class); |
449 |
const used_bits_count = usedBitsCount(size_class); |
Lines 464-470
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
464 |
var bucket = node.key; |
491 |
var bucket = node.key; |
465 |
if (config.never_unmap) { |
492 |
if (config.never_unmap) { |
466 |
// free page that was intentionally leaked by never_unmap |
493 |
// free page that was intentionally leaked by never_unmap |
467 |
self.backing_allocator.free(bucket.page[0..page_size]); |
494 |
self.backing_allocator.free(bucket.page[0..pageSize()]); |
468 |
} |
495 |
} |
469 |
// alloc_cursor was set to slot count when bucket added to empty_buckets |
496 |
// alloc_cursor was set to slot count when bucket added to empty_buckets |
470 |
self.freeBucket(bucket, bucket.emptyBucketSizeClass()); |
497 |
self.freeBucket(bucket, bucket.emptyBucketSizeClass()); |
Lines 531-537
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
531 |
fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error!Slot { |
558 |
fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error!Slot { |
532 |
const bucket_index = math.log2(size_class); |
559 |
const bucket_index = math.log2(size_class); |
533 |
var buckets = &self.buckets[bucket_index]; |
560 |
var buckets = &self.buckets[bucket_index]; |
534 |
const slot_count = @divExact(page_size, size_class); |
561 |
const slot_count = @divExact(pageSize(), size_class); |
535 |
if (self.cur_buckets[bucket_index] == null or self.cur_buckets[bucket_index].?.alloc_cursor == slot_count) { |
562 |
if (self.cur_buckets[bucket_index] == null or self.cur_buckets[bucket_index].?.alloc_cursor == slot_count) { |
536 |
const new_bucket = try self.createBucket(size_class); |
563 |
const new_bucket = try self.createBucket(size_class); |
537 |
errdefer self.freeBucket(new_bucket, size_class); |
564 |
errdefer self.freeBucket(new_bucket, size_class); |
Lines 564-570
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
564 |
addr: usize, |
591 |
addr: usize, |
565 |
current_bucket: ?*BucketHeader, |
592 |
current_bucket: ?*BucketHeader, |
566 |
) ?*BucketHeader { |
593 |
) ?*BucketHeader { |
567 |
const search_page: [*]align(page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, page_size)); |
594 |
const search_page: [*]align(min_page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, pageSize())); |
568 |
if (current_bucket != null and current_bucket.?.page == search_page) { |
595 |
if (current_bucket != null and current_bucket.?.page == search_page) { |
569 |
return current_bucket; |
596 |
return current_bucket; |
570 |
} |
597 |
} |
Lines 729-742
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
729 |
assert(old_mem.len != 0); |
756 |
assert(old_mem.len != 0); |
730 |
|
757 |
|
731 |
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align); |
758 |
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align); |
732 |
if (aligned_size > largest_bucket_object_size) { |
759 |
if (aligned_size > largest_used_bucket_object_size()) { |
733 |
return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr); |
760 |
return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr); |
734 |
} |
761 |
} |
735 |
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size); |
762 |
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size); |
736 |
|
763 |
|
737 |
var bucket_index = math.log2(size_class_hint); |
764 |
var bucket_index = math.log2(size_class_hint); |
738 |
var size_class: usize = size_class_hint; |
765 |
var size_class: usize = size_class_hint; |
739 |
const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) { |
766 |
const bucket = while (bucket_index < used_small_bucket_count()) : (bucket_index += 1) { |
740 |
if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| { |
767 |
if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| { |
741 |
break bucket; |
768 |
break bucket; |
742 |
} |
769 |
} |
Lines 847-853
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
847 |
assert(old_mem.len != 0); |
874 |
assert(old_mem.len != 0); |
848 |
|
875 |
|
849 |
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align); |
876 |
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align); |
850 |
if (aligned_size > largest_bucket_object_size) { |
877 |
if (aligned_size > largest_used_bucket_object_size()) { |
851 |
self.freeLarge(old_mem, log2_old_align, ret_addr); |
878 |
self.freeLarge(old_mem, log2_old_align, ret_addr); |
852 |
return; |
879 |
return; |
853 |
} |
880 |
} |
Lines 855-861
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
855 |
|
882 |
|
856 |
var bucket_index = math.log2(size_class_hint); |
883 |
var bucket_index = math.log2(size_class_hint); |
857 |
var size_class: usize = size_class_hint; |
884 |
var size_class: usize = size_class_hint; |
858 |
const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) { |
885 |
const bucket = while (bucket_index < used_small_bucket_count()) : (bucket_index += 1) { |
859 |
if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| { |
886 |
if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| { |
860 |
break bucket; |
887 |
break bucket; |
861 |
} |
888 |
} |
Lines 944-957
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
944 |
self.cur_buckets[bucket_index] = null; |
971 |
self.cur_buckets[bucket_index] = null; |
945 |
} |
972 |
} |
946 |
if (!config.never_unmap) { |
973 |
if (!config.never_unmap) { |
947 |
self.backing_allocator.free(bucket.page[0..page_size]); |
974 |
self.backing_allocator.free(bucket.page[0..pageSize()]); |
948 |
} |
975 |
} |
949 |
if (!config.retain_metadata) { |
976 |
if (!config.retain_metadata) { |
950 |
self.freeBucket(bucket, size_class); |
977 |
self.freeBucket(bucket, size_class); |
951 |
self.bucket_node_pool.destroy(node); |
978 |
self.bucket_node_pool.destroy(node); |
952 |
} else { |
979 |
} else { |
953 |
// move alloc_cursor to end so we can tell size_class later |
980 |
// move alloc_cursor to end so we can tell size_class later |
954 |
const slot_count = @divExact(page_size, size_class); |
981 |
const slot_count = @divExact(pageSize(), size_class); |
955 |
bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count)); |
982 |
bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count)); |
956 |
var empty_entry = self.empty_buckets.getEntryFor(node.key); |
983 |
var empty_entry = self.empty_buckets.getEntryFor(node.key); |
957 |
empty_entry.set(node); |
984 |
empty_entry.set(node); |
Lines 992-998
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
992 |
ret_addr: usize, |
1019 |
ret_addr: usize, |
993 |
) Allocator.Error![*]u8 { |
1020 |
) Allocator.Error![*]u8 { |
994 |
const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align))); |
1021 |
const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align))); |
995 |
if (new_aligned_size > largest_bucket_object_size) { |
1022 |
if (new_aligned_size > largest_used_bucket_object_size()) { |
996 |
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); |
1023 |
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); |
997 |
const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse |
1024 |
const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse |
998 |
return error.OutOfMemory; |
1025 |
return error.OutOfMemory; |
Lines 1035-1041
pub fn GeneralPurposeAllocator(comptime config: Config) type {
Link Here
|
1035 |
} |
1062 |
} |
1036 |
|
1063 |
|
1037 |
fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader { |
1064 |
fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader { |
1038 |
const page = try self.backing_allocator.alignedAlloc(u8, page_size, page_size); |
1065 |
const page = try self.backing_allocator.alignedAlloc(u8, min_page_size, pageSize()); |
1039 |
errdefer self.backing_allocator.free(page); |
1066 |
errdefer self.backing_allocator.free(page); |
1040 |
|
1067 |
|
1041 |
const bucket_size = bucketSize(size_class); |
1068 |
const bucket_size = bucketSize(size_class); |
Lines 1175-1191
test "large object - grow" {
Link Here
|
1175 |
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); |
1202 |
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); |
1176 |
const allocator = gpa.allocator(); |
1203 |
const allocator = gpa.allocator(); |
1177 |
|
1204 |
|
1178 |
var slice1 = try allocator.alloc(u8, page_size * 2 - 20); |
1205 |
var slice1 = try allocator.alloc(u8, pageSize() * 2 - 20); |
1179 |
defer allocator.free(slice1); |
1206 |
defer allocator.free(slice1); |
1180 |
|
1207 |
|
1181 |
const old = slice1; |
1208 |
const old = slice1; |
1182 |
slice1 = try allocator.realloc(slice1, page_size * 2 - 10); |
1209 |
slice1 = try allocator.realloc(slice1, pageSize() * 2 - 10); |
1183 |
try std.testing.expect(slice1.ptr == old.ptr); |
1210 |
try std.testing.expect(slice1.ptr == old.ptr); |
1184 |
|
1211 |
|
1185 |
slice1 = try allocator.realloc(slice1, page_size * 2); |
1212 |
slice1 = try allocator.realloc(slice1, pageSize() * 2); |
1186 |
try std.testing.expect(slice1.ptr == old.ptr); |
1213 |
try std.testing.expect(slice1.ptr == old.ptr); |
1187 |
|
1214 |
|
1188 |
slice1 = try allocator.realloc(slice1, page_size * 2 + 1); |
1215 |
slice1 = try allocator.realloc(slice1, pageSize() * 2 + 1); |
1189 |
} |
1216 |
} |
1190 |
|
1217 |
|
1191 |
test "realloc small object to large object" { |
1218 |
test "realloc small object to large object" { |
Lines 1199-1205
test "realloc small object to large object" {
Link Here
|
1199 |
slice[60] = 0x34; |
1226 |
slice[60] = 0x34; |
1200 |
|
1227 |
|
1201 |
// This requires upgrading to a large object |
1228 |
// This requires upgrading to a large object |
1202 |
const large_object_size = page_size * 2 + 50; |
1229 |
const large_object_size = pageSize() * 2 + 50; |
1203 |
slice = try allocator.realloc(slice, large_object_size); |
1230 |
slice = try allocator.realloc(slice, large_object_size); |
1204 |
try std.testing.expect(slice[0] == 0x12); |
1231 |
try std.testing.expect(slice[0] == 0x12); |
1205 |
try std.testing.expect(slice[60] == 0x34); |
1232 |
try std.testing.expect(slice[60] == 0x34); |
Lines 1210-1231
test "shrink large object to large object" {
Link Here
|
1210 |
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); |
1237 |
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); |
1211 |
const allocator = gpa.allocator(); |
1238 |
const allocator = gpa.allocator(); |
1212 |
|
1239 |
|
1213 |
var slice = try allocator.alloc(u8, page_size * 2 + 50); |
1240 |
var slice = try allocator.alloc(u8, pageSize() * 2 + 50); |
1214 |
defer allocator.free(slice); |
1241 |
defer allocator.free(slice); |
1215 |
slice[0] = 0x12; |
1242 |
slice[0] = 0x12; |
1216 |
slice[60] = 0x34; |
1243 |
slice[60] = 0x34; |
1217 |
|
1244 |
|
1218 |
if (!allocator.resize(slice, page_size * 2 + 1)) return; |
1245 |
if (!allocator.resize(slice, pageSize() * 2 + 1)) return; |
1219 |
slice = slice.ptr[0 .. page_size * 2 + 1]; |
1246 |
slice = slice.ptr[0 .. pageSize() * 2 + 1]; |
1220 |
try std.testing.expect(slice[0] == 0x12); |
1247 |
try std.testing.expect(slice[0] == 0x12); |
1221 |
try std.testing.expect(slice[60] == 0x34); |
1248 |
try std.testing.expect(slice[60] == 0x34); |
1222 |
|
1249 |
|
1223 |
try std.testing.expect(allocator.resize(slice, page_size * 2 + 1)); |
1250 |
try std.testing.expect(allocator.resize(slice, pageSize() * 2 + 1)); |
1224 |
slice = slice[0 .. page_size * 2 + 1]; |
1251 |
slice = slice[0 .. pageSize() * 2 + 1]; |
1225 |
try std.testing.expect(slice[0] == 0x12); |
1252 |
try std.testing.expect(slice[0] == 0x12); |
1226 |
try std.testing.expect(slice[60] == 0x34); |
1253 |
try std.testing.expect(slice[60] == 0x34); |
1227 |
|
1254 |
|
1228 |
slice = try allocator.realloc(slice, page_size * 2); |
1255 |
slice = try allocator.realloc(slice, pageSize() * 2); |
1229 |
try std.testing.expect(slice[0] == 0x12); |
1256 |
try std.testing.expect(slice[0] == 0x12); |
1230 |
try std.testing.expect(slice[60] == 0x34); |
1257 |
try std.testing.expect(slice[60] == 0x34); |
1231 |
} |
1258 |
} |
Lines 1239-1251
test "shrink large object to large object with larger alignment" {
Link Here
|
1239 |
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer); |
1266 |
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer); |
1240 |
const debug_allocator = fba.allocator(); |
1267 |
const debug_allocator = fba.allocator(); |
1241 |
|
1268 |
|
1242 |
const alloc_size = page_size * 2 + 50; |
1269 |
const alloc_size = pageSize() * 2 + 50; |
1243 |
var slice = try allocator.alignedAlloc(u8, 16, alloc_size); |
1270 |
var slice = try allocator.alignedAlloc(u8, 16, alloc_size); |
1244 |
defer allocator.free(slice); |
1271 |
defer allocator.free(slice); |
1245 |
|
1272 |
|
1246 |
const big_alignment: usize = switch (builtin.os.tag) { |
1273 |
const big_alignment: usize = switch (builtin.os.tag) { |
1247 |
.windows => page_size * 32, // Windows aligns to 64K. |
1274 |
.windows => pageSize() * 32, // Windows aligns to 64K. |
1248 |
else => page_size * 2, |
1275 |
else => pageSize() * 2, |
1249 |
}; |
1276 |
}; |
1250 |
// This loop allocates until we find a page that is not aligned to the big |
1277 |
// This loop allocates until we find a page that is not aligned to the big |
1251 |
// alignment. Then we shrink the allocation after the loop, but increase the |
1278 |
// alignment. Then we shrink the allocation after the loop, but increase the |
Lines 1271-1277
test "realloc large object to small object" {
Link Here
|
1271 |
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); |
1298 |
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); |
1272 |
const allocator = gpa.allocator(); |
1299 |
const allocator = gpa.allocator(); |
1273 |
|
1300 |
|
1274 |
var slice = try allocator.alloc(u8, page_size * 2 + 50); |
1301 |
var slice = try allocator.alloc(u8, pageSize() * 2 + 50); |
1275 |
defer allocator.free(slice); |
1302 |
defer allocator.free(slice); |
1276 |
slice[0] = 0x12; |
1303 |
slice[0] = 0x12; |
1277 |
slice[16] = 0x34; |
1304 |
slice[16] = 0x34; |
Lines 1311-1328
test "realloc large object to larger alignment" {
Link Here
|
1311 |
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer); |
1338 |
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer); |
1312 |
const debug_allocator = fba.allocator(); |
1339 |
const debug_allocator = fba.allocator(); |
1313 |
|
1340 |
|
1314 |
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50); |
1341 |
var slice = try allocator.alignedAlloc(u8, 16, pageSize() * 2 + 50); |
1315 |
defer allocator.free(slice); |
1342 |
defer allocator.free(slice); |
1316 |
|
1343 |
|
1317 |
const big_alignment: usize = switch (builtin.os.tag) { |
1344 |
const big_alignment: usize = switch (builtin.os.tag) { |
1318 |
.windows => page_size * 32, // Windows aligns to 64K. |
1345 |
.windows => pageSize() * 32, // Windows aligns to 64K. |
1319 |
else => page_size * 2, |
1346 |
else => pageSize() * 2, |
1320 |
}; |
1347 |
}; |
1321 |
// This loop allocates until we find a page that is not aligned to the big alignment. |
1348 |
// This loop allocates until we find a page that is not aligned to the big alignment. |
1322 |
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); |
1349 |
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); |
1323 |
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) { |
1350 |
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) { |
1324 |
try stuff_to_free.append(slice); |
1351 |
try stuff_to_free.append(slice); |
1325 |
slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50); |
1352 |
slice = try allocator.alignedAlloc(u8, 16, pageSize() * 2 + 50); |
1326 |
} |
1353 |
} |
1327 |
while (stuff_to_free.popOrNull()) |item| { |
1354 |
while (stuff_to_free.popOrNull()) |item| { |
1328 |
allocator.free(item); |
1355 |
allocator.free(item); |
Lines 1330-1344
test "realloc large object to larger alignment" {
Link Here
|
1330 |
slice[0] = 0x12; |
1357 |
slice[0] = 0x12; |
1331 |
slice[16] = 0x34; |
1358 |
slice[16] = 0x34; |
1332 |
|
1359 |
|
1333 |
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100); |
1360 |
slice = try allocator.reallocAdvanced(slice, 32, pageSize() * 2 + 100); |
1334 |
try std.testing.expect(slice[0] == 0x12); |
1361 |
try std.testing.expect(slice[0] == 0x12); |
1335 |
try std.testing.expect(slice[16] == 0x34); |
1362 |
try std.testing.expect(slice[16] == 0x34); |
1336 |
|
1363 |
|
1337 |
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25); |
1364 |
slice = try allocator.reallocAdvanced(slice, 32, pageSize() * 2 + 25); |
1338 |
try std.testing.expect(slice[0] == 0x12); |
1365 |
try std.testing.expect(slice[0] == 0x12); |
1339 |
try std.testing.expect(slice[16] == 0x34); |
1366 |
try std.testing.expect(slice[16] == 0x34); |
1340 |
|
1367 |
|
1341 |
slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100); |
1368 |
slice = try allocator.reallocAdvanced(slice, big_alignment, pageSize() * 2 + 100); |
1342 |
try std.testing.expect(slice[0] == 0x12); |
1369 |
try std.testing.expect(slice[0] == 0x12); |
1343 |
try std.testing.expect(slice[16] == 0x34); |
1370 |
try std.testing.expect(slice[16] == 0x34); |
1344 |
} |
1371 |
} |
Lines 1349-1355
test "large object shrinks to small but allocation fails during shrink" {
Link Here
|
1349 |
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); |
1376 |
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); |
1350 |
const allocator = gpa.allocator(); |
1377 |
const allocator = gpa.allocator(); |
1351 |
|
1378 |
|
1352 |
var slice = try allocator.alloc(u8, page_size * 2 + 50); |
1379 |
var slice = try allocator.alloc(u8, pageSize() * 2 + 50); |
1353 |
defer allocator.free(slice); |
1380 |
defer allocator.free(slice); |
1354 |
slice[0] = 0x12; |
1381 |
slice[0] = 0x12; |
1355 |
slice[3] = 0x34; |
1382 |
slice[3] = 0x34; |
Lines 1420-1426
test "double frees" {
Link Here
|
1420 |
try std.testing.expect(GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null) != null); |
1447 |
try std.testing.expect(GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null) != null); |
1421 |
|
1448 |
|
1422 |
// detect a large allocation double free |
1449 |
// detect a large allocation double free |
1423 |
const large = try allocator.alloc(u8, 2 * page_size); |
1450 |
const large = try allocator.alloc(u8, 2 * pageSize()); |
1424 |
try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr))); |
1451 |
try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr))); |
1425 |
try std.testing.expectEqual(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.bytes, large); |
1452 |
try std.testing.expectEqual(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.bytes, large); |
1426 |
allocator.free(large); |
1453 |
allocator.free(large); |
Lines 1429-1435
test "double frees" {
Link Here
|
1429 |
|
1456 |
|
1430 |
const normal_small = try allocator.alloc(u8, size_class); |
1457 |
const normal_small = try allocator.alloc(u8, size_class); |
1431 |
defer allocator.free(normal_small); |
1458 |
defer allocator.free(normal_small); |
1432 |
const normal_large = try allocator.alloc(u8, 2 * page_size); |
1459 |
const normal_large = try allocator.alloc(u8, 2 * pageSize()); |
1433 |
defer allocator.free(normal_large); |
1460 |
defer allocator.free(normal_large); |
1434 |
|
1461 |
|
1435 |
// check that flushing retained metadata doesn't disturb live allocations |
1462 |
// check that flushing retained metadata doesn't disturb live allocations |
Lines 1462-1469
test "bug 9995 fix, large allocs count requested size not backing size" {
Link Here
|
1462 |
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; |
1489 |
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; |
1463 |
const allocator = gpa.allocator(); |
1490 |
const allocator = gpa.allocator(); |
1464 |
|
1491 |
|
1465 |
var buf = try allocator.alignedAlloc(u8, 1, page_size + 1); |
1492 |
var buf = try allocator.alignedAlloc(u8, 1, pageSize() + 1); |
1466 |
try std.testing.expect(gpa.total_requested_bytes == page_size + 1); |
1493 |
try std.testing.expect(gpa.total_requested_bytes == pageSize() + 1); |
1467 |
buf = try allocator.realloc(buf, 1); |
1494 |
buf = try allocator.realloc(buf, 1); |
1468 |
try std.testing.expect(gpa.total_requested_bytes == 1); |
1495 |
try std.testing.expect(gpa.total_requested_bytes == 1); |
1469 |
buf = try allocator.realloc(buf, 2); |
1496 |
buf = try allocator.realloc(buf, 2); |