Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 915301 Details for
Bug 947025
dev-lang/zig-0.13.0-r2 fails to compile: thread 127 panic: reached unreachable code
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
Upstream PR rebased against 42dac40b3feeabe39b5f191d1e72d247327133ba
asahi-rebased-9999-2.patch (text/plain), 89.77 KB, created by
Eric Joldasov
on 2024-12-27 17:57:11 UTC
(
hide
)
Description:
Upstream PR rebased against 42dac40b3feeabe39b5f191d1e72d247327133ba
Filename:
MIME Type:
Creator:
Eric Joldasov
Created:
2024-12-27 17:57:11 UTC
Size:
89.77 KB
patch
obsolete
>diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig >index 3c00ec7b48..eb37e5e2fd 100644 >--- a/lib/fuzzer.zig >+++ b/lib/fuzzer.zig >@@ -480,7 +480,7 @@ pub const MemoryMappedList = struct { > /// of this ArrayList in accordance with the respective documentation. In > /// all cases, "invalidated" means that the memory has been passed to this > /// allocator's resize or free function. >- items: []align(std.mem.page_size) volatile u8, >+ items: []align(std.heap.min_page_size) volatile u8, > /// How many bytes this list can hold without allocating additional memory. > capacity: usize, > >diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig >index ac4336476e..0563d6782a 100644 >--- a/lib/std/Build/Fuzz/WebServer.zig >+++ b/lib/std/Build/Fuzz/WebServer.zig >@@ -41,7 +41,7 @@ const fuzzer_arch_os_abi = "wasm32-freestanding"; > const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext"; > > const CoverageMap = struct { >- mapped_memory: []align(std.mem.page_size) const u8, >+ mapped_memory: []align(std.heap.min_page_size) const u8, > coverage: Coverage, > source_locations: []Coverage.SourceLocation, > /// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested. >diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig >index aa21a8a0ea..8b904ea8de 100644 >--- a/lib/std/Thread.zig >+++ b/lib/std/Thread.zig >@@ -767,7 +767,7 @@ const PosixThreadImpl = struct { > // Use the same set of parameters used by the libc-less impl. > const stack_size = @max(config.stack_size, 16 * 1024); > assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS); >- assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS); >+ assert(c.pthread_attr_setguardsize(&attr, std.heap.pageSize()) == .SUCCESS); > > var handle: c.pthread_t = undefined; > switch (c.pthread_create( >@@ -1150,7 +1150,7 @@ const LinuxThreadImpl = struct { > completion: Completion = Completion.init(.running), > child_tid: std.atomic.Value(i32) = std.atomic.Value(i32).init(1), > parent_tid: i32 = undefined, >- mapped: []align(std.mem.page_size) u8, >+ mapped: []align(std.heap.min_page_size) u8, > > /// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`). > /// Ported over from musl libc's pthread detached implementation: >@@ -1357,7 +1357,7 @@ const LinuxThreadImpl = struct { > }; > > fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl { >- const page_size = std.mem.page_size; >+ const page_size = std.heap.pageSize(); > const Args = @TypeOf(args); > const Instance = struct { > fn_args: Args, >diff --git a/lib/std/c.zig b/lib/std/c.zig >index e31d90c79f..077fea51c1 100644 >--- a/lib/std/c.zig >+++ b/lib/std/c.zig >@@ -3,7 +3,7 @@ const builtin = @import("builtin"); > const c = @This(); > const maxInt = std.math.maxInt; > const assert = std.debug.assert; >-const page_size = std.mem.page_size; >+const min_page_size = std.heap.min_page_size; > const native_abi = builtin.abi; > const native_arch = builtin.cpu.arch; > const native_os = builtin.os.tag; >@@ -2209,6 +2209,39 @@ pub const SC = switch (native_os) { > .linux => linux.SC, > else => void, > }; >+ >+pub const _SC = switch (native_os) { >+ .bridgeos, .driverkit, .ios, .macos, .tvos, .visionos, .watchos => enum(c_int) { >+ PAGESIZE = 29, >+ }, >+ .dragonfly => enum(c_int) { >+ PAGESIZE = 47, >+ }, >+ .freebsd => enum(c_int) { >+ PAGESIZE = 47, >+ }, >+ .fuchsia => enum(c_int) { >+ PAGESIZE = 30, >+ }, >+ .haiku => enum(c_int) { >+ PAGESIZE = 27, >+ }, >+ .linux => enum(c_int) { >+ PAGESIZE = 30, >+ }, >+ .netbsd => enum(c_int) { >+ PAGESIZE = 28, >+ }, >+ .openbsd => enum(c_int) { >+ PAGESIZE = 28, >+ }, >+ .solaris, .illumos => enum(c_int) { >+ PAGESIZE = 11, >+ NPROCESSORS_ONLN = 15, >+ }, >+ else => void, >+}; >+ > pub const SEEK = switch (native_os) { > .linux => linux.SEEK, > .emscripten => emscripten.SEEK, >@@ -9038,7 +9071,7 @@ pub extern "c" fn getpwnam(name: [*:0]const u8) ?*passwd; > pub extern "c" fn getpwuid(uid: uid_t) ?*passwd; > pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int; > pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64; >-pub extern "c" fn mmap64(addr: ?*align(std.mem.page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque; >+pub extern "c" fn mmap64(addr: ?*align(min_page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque; > pub extern "c" fn open64(path: [*:0]const u8, oflag: O, ...) c_int; > pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: O, ...) c_int; > pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize; >@@ -9126,13 +9159,13 @@ pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) c_int; > > pub extern "c" fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: *const rlimit, old_limit: *rlimit) c_int; > pub extern "c" fn mincore( >- addr: *align(std.mem.page_size) anyopaque, >+ addr: *align(min_page_size) anyopaque, > length: usize, > vec: [*]u8, > ) c_int; > > pub extern "c" fn madvise( >- addr: *align(std.mem.page_size) anyopaque, >+ addr: *align(min_page_size) anyopaque, > length: usize, > advice: u32, > ) c_int; >@@ -9230,6 +9263,10 @@ pub const posix_memalign = switch (native_os) { > .dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .linux, .macos, .ios, .tvos, .watchos, .visionos => private.posix_memalign, > else => {}, > }; >+pub const sysconf = switch (native_os) { >+ .solaris => solaris.sysconf, >+ else => private.sysconf, >+}; > > pub const sf_hdtr = switch (native_os) { > .freebsd, .macos, .ios, .tvos, .watchos, .visionos => extern struct { >@@ -9271,9 +9308,9 @@ pub extern "c" fn writev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint) i > pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: off_t) isize; > pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize; > pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize; >-pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque; >-pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int; >-pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int; >+pub extern "c" fn mmap(addr: ?*align(min_page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque; >+pub extern "c" fn munmap(addr: *align(min_page_size) const anyopaque, len: usize) c_int; >+pub extern "c" fn mprotect(addr: *align(min_page_size) anyopaque, len: usize, prot: c_uint) c_int; > pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int; > pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int; > pub extern "c" fn unlink(path: [*:0]const u8) c_int; >@@ -9625,7 +9662,6 @@ pub const SCM = solaris.SCM; > pub const SETCONTEXT = solaris.SETCONTEXT; > pub const SETUSTACK = solaris.GETUSTACK; > pub const SFD = solaris.SFD; >-pub const _SC = solaris._SC; > pub const cmsghdr = solaris.cmsghdr; > pub const ctid_t = solaris.ctid_t; > pub const file_obj = solaris.file_obj; >@@ -9642,7 +9678,6 @@ pub const priority = solaris.priority; > pub const procfs = solaris.procfs; > pub const projid_t = solaris.projid_t; > pub const signalfd_siginfo = solaris.signalfd_siginfo; >-pub const sysconf = solaris.sysconf; > pub const taskid_t = solaris.taskid_t; > pub const zoneid_t = solaris.zoneid_t; > >@@ -9797,6 +9832,7 @@ pub const fcopyfile = darwin.fcopyfile; > pub const ipc_space_t = darwin.ipc_space_t; > pub const ipc_space_port_t = darwin.ipc_space_port_t; > pub const kern_return_t = darwin.kern_return_t; >+pub const vm_size_t = darwin.vm_size_t; > pub const kevent64 = darwin.kevent64; > pub const mach_absolute_time = darwin.mach_absolute_time; > pub const mach_continuous_time = darwin.mach_continuous_time; >@@ -9953,7 +9989,7 @@ const private = struct { > }; > extern "c" fn getrusage(who: c_int, usage: *rusage) c_int; > extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int; >- extern "c" fn msync(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int; >+ extern "c" fn msync(addr: *align(min_page_size) const anyopaque, len: usize, flags: c_int) c_int; > extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int; > extern "c" fn pipe2(fds: *[2]fd_t, flags: O) c_int; > extern "c" fn readdir(dir: *DIR) ?*dirent; >@@ -9966,6 +10002,7 @@ const private = struct { > extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int; > extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *Stat) c_int; > extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int; >+ extern "c" fn sysconf(sc: c_int) c_long; > > extern "c" fn pthread_setname_np(thread: pthread_t, name: [*:0]const u8) c_int; > extern "c" fn getcontext(ucp: *ucontext_t) c_int; >@@ -10000,7 +10037,7 @@ const private = struct { > extern "c" fn __getrusage50(who: c_int, usage: *rusage) c_int; > extern "c" fn __gettimeofday50(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int; > extern "c" fn __libc_thr_yield() c_int; >- extern "c" fn __msync13(addr: *align(std.mem.page_size) const anyopaque, len: usize, flags: c_int) c_int; >+ extern "c" fn __msync13(addr: *align(min_page_size) const anyopaque, len: usize, flags: c_int) c_int; > extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int; > extern "c" fn __sigaction14(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int; > extern "c" fn __sigfillset14(set: ?*sigset_t) void; >diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig >index c84478e2c0..df7dbca16a 100644 >--- a/lib/std/c/solaris.zig >+++ b/lib/std/c/solaris.zig >@@ -154,10 +154,6 @@ pub const AF_SUN = struct { > pub const NOPLM = 0x00000004; > }; > >-pub const _SC = struct { >- pub const NPROCESSORS_ONLN = 15; >-}; >- > pub const procfs = struct { > pub const misc_header = extern struct { > size: u32, >diff --git a/lib/std/crypto/tlcsprng.zig b/lib/std/crypto/tlcsprng.zig >index 672d6c2ecb..07df6d9a80 100644 >--- a/lib/std/crypto/tlcsprng.zig >+++ b/lib/std/crypto/tlcsprng.zig >@@ -6,6 +6,7 @@ > const std = @import("std"); > const builtin = @import("builtin"); > const mem = std.mem; >+const heap = std.heap; > const native_os = builtin.os.tag; > const posix = std.posix; > >@@ -42,7 +43,7 @@ var install_atfork_handler = std.once(struct { > } > }.do); > >-threadlocal var wipe_mem: []align(mem.page_size) u8 = &[_]u8{}; >+threadlocal var wipe_mem: []align(heap.min_page_size) u8 = &[_]u8{}; > > fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void { > if (os_has_arc4random) { >@@ -77,7 +78,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void { > } else { > // Use a static thread-local buffer. > const S = struct { >- threadlocal var buf: Context align(mem.page_size) = .{ >+ threadlocal var buf: Context align(heap.min_page_size) = .{ > .init_state = .uninitialized, > .rng = undefined, > }; >diff --git a/lib/std/debug.zig b/lib/std/debug.zig >index e8855f5d1a..a56b4430b6 100644 >--- a/lib/std/debug.zig >+++ b/lib/std/debug.zig >@@ -2,6 +2,7 @@ const builtin = @import("builtin"); > const std = @import("std.zig"); > const math = std.math; > const mem = std.mem; >+const heap = std.heap; > const io = std.io; > const posix = std.posix; > const fs = std.fs; >@@ -1021,7 +1022,7 @@ fn printLineFromFileAnyOs(out_stream: anytype, source_location: SourceLocation) > defer f.close(); > // TODO fstat and make sure that the file has the correct size > >- var buf: [mem.page_size]u8 = undefined; >+ var buf: [4096]u8 = undefined; > var amt_read = try f.read(buf[0..]); > const line_start = seek: { > var current_line_start: usize = 0; >@@ -1124,7 +1125,7 @@ test printLineFromFileAnyOs { > > const overlap = 10; > var writer = file.writer(); >- try writer.writeByteNTimes('a', mem.page_size - overlap); >+ try writer.writeByteNTimes('a', heap.min_page_size - overlap); > try writer.writeByte('\n'); > try writer.writeByteNTimes('a', overlap); > >@@ -1139,10 +1140,10 @@ test printLineFromFileAnyOs { > defer allocator.free(path); > > var writer = file.writer(); >- try writer.writeByteNTimes('a', mem.page_size); >+ try writer.writeByteNTimes('a', heap.max_page_size); > > try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 }); >- try expectEqualStrings(("a" ** mem.page_size) ++ "\n", output.items); >+ try expectEqualStrings(("a" ** heap.max_page_size) ++ "\n", output.items); > output.clearRetainingCapacity(); > } > { >@@ -1152,18 +1153,18 @@ test printLineFromFileAnyOs { > defer allocator.free(path); > > var writer = file.writer(); >- try writer.writeByteNTimes('a', 3 * mem.page_size); >+ try writer.writeByteNTimes('a', 3 * heap.max_page_size); > > try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 })); > > try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 }); >- try expectEqualStrings(("a" ** (3 * mem.page_size)) ++ "\n", output.items); >+ try expectEqualStrings(("a" ** (3 * heap.max_page_size)) ++ "\n", output.items); > output.clearRetainingCapacity(); > > try writer.writeAll("a\na"); > > try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 }); >- try expectEqualStrings(("a" ** (3 * mem.page_size)) ++ "a\n", output.items); >+ try expectEqualStrings(("a" ** (3 * heap.max_page_size)) ++ "a\n", output.items); > output.clearRetainingCapacity(); > > try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }); >@@ -1177,7 +1178,7 @@ test printLineFromFileAnyOs { > defer allocator.free(path); > > var writer = file.writer(); >- const real_file_start = 3 * mem.page_size; >+ const real_file_start = 3 * heap.min_page_size; > try writer.writeByteNTimes('\n', real_file_start); > try writer.writeAll("abc\ndef"); > >diff --git a/lib/std/debug/Dwarf.zig b/lib/std/debug/Dwarf.zig >index 73b1871c46..10ef029d76 100644 >--- a/lib/std/debug/Dwarf.zig >+++ b/lib/std/debug/Dwarf.zig >@@ -2110,8 +2110,8 @@ fn pcRelBase(field_ptr: usize, pc_rel_offset: i64) !usize { > pub const ElfModule = struct { > base_address: usize, > dwarf: Dwarf, >- mapped_memory: []align(std.mem.page_size) const u8, >- external_mapped_memory: ?[]align(std.mem.page_size) const u8, >+ mapped_memory: []align(std.heap.min_page_size) const u8, >+ external_mapped_memory: ?[]align(std.heap.min_page_size) const u8, > > pub fn deinit(self: *@This(), allocator: Allocator) void { > self.dwarf.deinit(allocator); >@@ -2157,11 +2157,11 @@ pub const ElfModule = struct { > /// sections from an external file. > pub fn load( > gpa: Allocator, >- mapped_mem: []align(std.mem.page_size) const u8, >+ mapped_mem: []align(std.heap.min_page_size) const u8, > build_id: ?[]const u8, > expected_crc: ?u32, > parent_sections: *Dwarf.SectionArray, >- parent_mapped_mem: ?[]align(std.mem.page_size) const u8, >+ parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8, > elf_filename: ?[]const u8, > ) LoadError!Dwarf.ElfModule { > if (expected_crc) |crc| if (crc != std.hash.crc.Crc32.hash(mapped_mem)) return error.InvalidDebugInfo; >@@ -2413,7 +2413,7 @@ pub const ElfModule = struct { > build_id: ?[]const u8, > expected_crc: ?u32, > parent_sections: *Dwarf.SectionArray, >- parent_mapped_mem: ?[]align(std.mem.page_size) const u8, >+ parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8, > ) LoadError!Dwarf.ElfModule { > const elf_file = elf_file_path.root_dir.handle.openFile(elf_file_path.sub_path, .{}) catch |err| switch (err) { > error.FileNotFound => return missing(), >diff --git a/lib/std/debug/Info.zig b/lib/std/debug/Info.zig >index 0a07d9ba15..c809547f73 100644 >--- a/lib/std/debug/Info.zig >+++ b/lib/std/debug/Info.zig >@@ -10,7 +10,6 @@ const std = @import("../std.zig"); > const Allocator = std.mem.Allocator; > const Path = std.Build.Cache.Path; > const Dwarf = std.debug.Dwarf; >-const page_size = std.mem.page_size; > const assert = std.debug.assert; > const Coverage = std.debug.Coverage; > const SourceLocation = std.debug.Coverage.SourceLocation; >diff --git a/lib/std/debug/MemoryAccessor.zig b/lib/std/debug/MemoryAccessor.zig >index 9f112262be..b07ee16187 100644 >--- a/lib/std/debug/MemoryAccessor.zig >+++ b/lib/std/debug/MemoryAccessor.zig >@@ -7,7 +7,7 @@ const native_os = builtin.os.tag; > const std = @import("../std.zig"); > const posix = std.posix; > const File = std.fs.File; >-const page_size = std.mem.page_size; >+const min_page_size = std.heap.min_page_size; > > const MemoryAccessor = @This(); > >@@ -82,9 +82,10 @@ pub fn isValidMemory(address: usize) bool { > // We are unable to determine validity of memory for freestanding targets > if (native_os == .freestanding or native_os == .other or native_os == .uefi) return true; > >- const aligned_address = address & ~@as(usize, @intCast((page_size - 1))); >+ const page_size = std.heap.pageSize(); >+ const aligned_address = address & ~(page_size - 1); > if (aligned_address == 0) return false; >- const aligned_memory = @as([*]align(page_size) u8, @ptrFromInt(aligned_address))[0..page_size]; >+ const aligned_memory = @as([*]align(min_page_size) u8, @ptrFromInt(aligned_address))[0..page_size]; > > if (native_os == .windows) { > const windows = std.os.windows; >@@ -93,7 +94,7 @@ pub fn isValidMemory(address: usize) bool { > > // The only error this function can throw is ERROR_INVALID_PARAMETER. > // supply an address that invalid i'll be thrown. >- const rc = windows.VirtualQuery(aligned_memory, &memory_info, aligned_memory.len) catch { >+ const rc = windows.VirtualQuery(@ptrCast(aligned_memory), &memory_info, aligned_memory.len) catch { > return false; > }; > >diff --git a/lib/std/debug/SelfInfo.zig b/lib/std/debug/SelfInfo.zig >index 544cf0ac6f..d39fcd249f 100644 >--- a/lib/std/debug/SelfInfo.zig >+++ b/lib/std/debug/SelfInfo.zig >@@ -504,7 +504,7 @@ pub const Module = switch (native_os) { > .macos, .ios, .watchos, .tvos, .visionos => struct { > base_address: usize, > vmaddr_slide: usize, >- mapped_memory: []align(mem.page_size) const u8, >+ mapped_memory: []align(std.heap.min_page_size) const u8, > symbols: []const MachoSymbol, > strings: [:0]const u8, > ofiles: OFileTable, >@@ -1046,7 +1046,7 @@ pub fn readElfDebugInfo( > build_id: ?[]const u8, > expected_crc: ?u32, > parent_sections: *Dwarf.SectionArray, >- parent_mapped_mem: ?[]align(mem.page_size) const u8, >+ parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8, > ) !Dwarf.ElfModule { > nosuspend { > const elf_file = (if (elf_filename) |filename| blk: { >@@ -1088,7 +1088,7 @@ const MachoSymbol = struct { > > /// Takes ownership of file, even on error. > /// TODO it's weird to take ownership even on error, rework this code. >-fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 { >+fn mapWholeFile(file: File) ![]align(std.heap.min_page_size) const u8 { > nosuspend { > defer file.close(); > >diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig >index 8f07db68da..38511f7f29 100644 >--- a/lib/std/dynamic_library.zig >+++ b/lib/std/dynamic_library.zig >@@ -1,6 +1,7 @@ > const std = @import("std.zig"); > const builtin = @import("builtin"); > const mem = std.mem; >+const heap = std.heap; > const testing = std.testing; > const elf = std.elf; > const windows = std.os.windows; >@@ -143,7 +144,7 @@ pub const ElfDynLib = struct { > hashtab: [*]posix.Elf_Symndx, > versym: ?[*]elf.Versym, > verdef: ?*elf.Verdef, >- memory: []align(mem.page_size) u8, >+ memory: []align(heap.min_page_size) u8, > > pub const Error = ElfDynLibError; > >@@ -223,7 +224,7 @@ pub const ElfDynLib = struct { > // corresponding to the actual LOAD sections. > const file_bytes = try posix.mmap( > null, >- mem.alignForward(usize, size, mem.page_size), >+ mem.alignForward(usize, size, heap.pageSize()), > posix.PROT.READ, > .{ .TYPE = .PRIVATE }, > fd, >@@ -284,10 +285,10 @@ pub const ElfDynLib = struct { > elf.PT_LOAD => { > // The VirtAddr may not be page-aligned; in such case there will be > // extra nonsense mapped before/after the VirtAddr,MemSiz >- const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); >+ const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, heap.pageSize()) - 1); > const extra_bytes = (base + ph.p_vaddr) - aligned_addr; >- const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size); >- const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr)); >+ const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, heap.pageSize()); >+ const ptr = @as([*]align(heap.min_page_size) u8, @ptrFromInt(aligned_addr)); > const prot = elfToMmapProt(ph.p_flags); > if ((ph.p_flags & elf.PF_W) == 0) { > // If it does not need write access, it can be mapped from the fd. >diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig >index b07e870f04..fa5357cd6c 100644 >--- a/lib/std/fifo.zig >+++ b/lib/std/fifo.zig >@@ -91,7 +91,7 @@ pub fn LinearFifo( > mem.copyForwards(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]); > self.head = 0; > } else { >- var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined; >+ var tmp: [4096 / 2 / @sizeOf(T)]T = undefined; > > while (self.head != 0) { > const n = @min(self.head, tmp.len); >diff --git a/lib/std/heap.zig b/lib/std/heap.zig >index 3d19d8daa6..b3e8d2ab20 100644 >--- a/lib/std/heap.zig >+++ b/lib/std/heap.zig >@@ -8,6 +8,376 @@ const c = std.c; > const Allocator = std.mem.Allocator; > const windows = std.os.windows; > >+const default_min_page_size: ?usize = switch (builtin.os.tag) { >+ .driverkit, .ios, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) { >+ .x86_64 => 4 << 10, >+ .aarch64 => 16 << 10, >+ else => null, >+ }, >+ .windows => switch (builtin.cpu.arch) { >+ // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200> >+ .x86, .x86_64 => 4 << 10, >+ // SuperH => 4 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 4 << 10, >+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10, >+ // DEC Alpha => 8 << 10, >+ // Itanium => 8 << 10, >+ .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10, >+ else => null, >+ }, >+ .wasi => switch (builtin.cpu.arch) { >+ .wasm32, .wasm64 => 64 << 10, >+ else => null, >+ }, >+ // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187 >+ .uefi => 4 << 10, >+ .freebsd => switch (builtin.cpu.arch) { >+ // FreeBSD/sys/* >+ .x86, .x86_64 => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10, >+ .riscv32, .riscv64 => 4 << 10, >+ else => null, >+ }, >+ .netbsd => switch (builtin.cpu.arch) { >+ // NetBSD/sys/arch/* >+ .x86, .x86_64 => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 4 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10, >+ .sparc => 4 << 10, >+ .sparc64 => 8 << 10, >+ .riscv32, .riscv64 => 4 << 10, >+ // Sun-2 >+ .m68k => 2 << 10, >+ else => null, >+ }, >+ .dragonfly => switch (builtin.cpu.arch) { >+ .x86, .x86_64 => 4 << 10, >+ else => null, >+ }, >+ .openbsd => switch (builtin.cpu.arch) { >+ // OpenBSD/sys/arch/* >+ .x86, .x86_64 => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10, >+ .mips64, .mips64el => 4 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10, >+ .riscv64 => 4 << 10, >+ .sparc64 => 8 << 10, >+ else => null, >+ }, >+ .solaris, .illumos => switch (builtin.cpu.arch) { >+ // src/uts/*/sys/machparam.h >+ .x86, .x86_64 => 4 << 10, >+ .sparc, .sparc64 => 8 << 10, >+ else => null, >+ }, >+ .fuchsia => switch (builtin.cpu.arch) { >+ // fuchsia/kernel/arch/*/include/arch/defines.h >+ .x86_64 => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .riscv64 => 4 << 10, >+ else => null, >+ }, >+ // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13 >+ .serenity => 4 << 10, >+ .haiku => switch (builtin.cpu.arch) { >+ // haiku/headers/posix/arch/*/limits.h >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .m68k => 4 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 4 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10, >+ .riscv64 => 4 << 10, >+ .sparc64 => 8 << 10, >+ .x86, .x86_64 => 4 << 10, >+ else => null, >+ }, >+ .hurd => switch (builtin.cpu.arch) { >+ // gnumach/*/include/mach/*/vm_param.h >+ .x86, .x86_64 => 4 << 10, >+ .aarch64 => null, >+ else => null, >+ }, >+ .plan9 => switch (builtin.cpu.arch) { >+ // 9front/sys/src/9/*/mem.h >+ .x86, .x86_64 => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 4 << 10, >+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10, >+ .sparc => 4 << 10, >+ else => null, >+ }, >+ .ps3 => switch (builtin.cpu.arch) { >+ // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html >+ .powerpc64 => 1 << 20, // 1 MiB >+ else => null, >+ }, >+ .ps4 => switch (builtin.cpu.arch) { >+ // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95 >+ .x86, .x86_64 => 4 << 10, >+ else => null, >+ }, >+ .ps5 => switch (builtin.cpu.arch) { >+ // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95 >+ .x86, .x86_64 => 16 << 10, >+ else => null, >+ }, >+ // system/lib/libc/musl/arch/emscripten/bits/limits.h >+ .emscripten => 64 << 10, >+ .linux => switch (builtin.cpu.arch) { >+ // Linux/arch/*/Kconfig >+ .arc => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .csky => 4 << 10, >+ .hexagon => 4 << 10, >+ .loongarch32, .loongarch64 => 4 << 10, >+ .m68k => 4 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 4 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10, >+ .riscv32, .riscv64 => 4 << 10, >+ .s390x => 4 << 10, >+ .sparc => 4 << 10, >+ .sparc64 => 8 << 10, >+ .x86, .x86_64 => 4 << 10, >+ .xtensa => 4 << 10, >+ else => null, >+ }, >+ .freestanding => switch (builtin.cpu.arch) { >+ .wasm32, .wasm64 => 64 << 10, >+ else => null, >+ }, >+ else => null, >+}; >+ >+const default_max_page_size: ?usize = switch (builtin.os.tag) { >+ .driverkit, .ios, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) { >+ .x86_64 => 4 << 10, >+ .aarch64 => 16 << 10, >+ else => null, >+ }, >+ .windows => switch (builtin.cpu.arch) { >+ // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200> >+ .x86, .x86_64 => 4 << 10, >+ // SuperH => 4 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 4 << 10, >+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10, >+ // DEC Alpha => 8 << 10, >+ // Itanium => 8 << 10, >+ .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10, >+ else => null, >+ }, >+ .wasi => switch (builtin.cpu.arch) { >+ .wasm32, .wasm64 => 64 << 10, >+ else => null, >+ }, >+ // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187 >+ .uefi => 4 << 10, >+ .freebsd => switch (builtin.cpu.arch) { >+ // FreeBSD/sys/* >+ .x86, .x86_64 => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10, >+ .riscv32, .riscv64 => 4 << 10, >+ else => null, >+ }, >+ .netbsd => switch (builtin.cpu.arch) { >+ // NetBSD/sys/arch/* >+ .x86, .x86_64 => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 64 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 16 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 16 << 10, >+ .sparc => 8 << 10, >+ .sparc64 => 8 << 10, >+ .riscv32, .riscv64 => 4 << 10, >+ .m68k => 8 << 10, >+ else => null, >+ }, >+ .dragonfly => switch (builtin.cpu.arch) { >+ .x86, .x86_64 => 4 << 10, >+ else => null, >+ }, >+ .openbsd => switch (builtin.cpu.arch) { >+ // OpenBSD/sys/arch/* >+ .x86, .x86_64 => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10, >+ .mips64, .mips64el => 16 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10, >+ .riscv64 => 4 << 10, >+ .sparc64 => 8 << 10, >+ else => null, >+ }, >+ .solaris, .illumos => switch (builtin.cpu.arch) { >+ // src/uts/*/sys/machparam.h >+ .x86, .x86_64 => 4 << 10, >+ .sparc, .sparc64 => 8 << 10, >+ else => null, >+ }, >+ .fuchsia => switch (builtin.cpu.arch) { >+ // fuchsia/kernel/arch/*/include/arch/defines.h >+ .x86_64 => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .riscv64 => 4 << 10, >+ else => null, >+ }, >+ // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13 >+ .serenity => 4 << 10, >+ .haiku => switch (builtin.cpu.arch) { >+ // haiku/headers/posix/arch/*/limits.h >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 4 << 10, >+ .m68k => 4 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 4 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10, >+ .riscv64 => 4 << 10, >+ .sparc64 => 8 << 10, >+ .x86, .x86_64 => 4 << 10, >+ else => null, >+ }, >+ .hurd => switch (builtin.cpu.arch) { >+ // gnumach/*/include/mach/*/vm_param.h >+ .x86, .x86_64 => 4 << 10, >+ .aarch64 => null, >+ else => null, >+ }, >+ .plan9 => switch (builtin.cpu.arch) { >+ // 9front/sys/src/9/*/mem.h >+ .x86, .x86_64 => 4 << 10, >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 64 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 16 << 10, >+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10, >+ .sparc => 4 << 10, >+ else => null, >+ }, >+ .ps3 => switch (builtin.cpu.arch) { >+ // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html >+ .powerpc64 => 1 << 20, // 1 MiB >+ else => null, >+ }, >+ .ps4 => switch (builtin.cpu.arch) { >+ // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95 >+ .x86, .x86_64 => 4 << 10, >+ else => null, >+ }, >+ .ps5 => switch (builtin.cpu.arch) { >+ // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95 >+ .x86, .x86_64 => 16 << 10, >+ else => null, >+ }, >+ // system/lib/libc/musl/arch/emscripten/bits/limits.h >+ .emscripten => 64 << 10, >+ .linux => switch (builtin.cpu.arch) { >+ // Linux/arch/*/Kconfig >+ .arc => 16 << 10, >+ .thumb, .thumbeb, .arm, .armeb => 4 << 10, >+ .aarch64, .aarch64_be => 64 << 10, >+ .csky => 4 << 10, >+ .hexagon => 256 << 10, >+ .loongarch32, .loongarch64 => 64 << 10, >+ .m68k => 8 << 10, >+ .mips, .mipsel, .mips64, .mips64el => 64 << 10, >+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 256 << 10, >+ .riscv32, .riscv64 => 4 << 10, >+ .s390x => 4 << 10, >+ .sparc => 4 << 10, >+ .sparc64 => 8 << 10, >+ .x86, .x86_64 => 4 << 10, >+ .xtensa => 4 << 10, >+ else => null, >+ }, >+ .freestanding => switch (builtin.cpu.arch) { >+ .wasm32, .wasm64 => 64 << 10, >+ else => null, >+ }, >+ else => null, >+}; >+ >+/// The compile-time minimum page size that the target might have. >+/// All pointers from `mmap` or `VirtualAlloc` are aligned to at least `min_page_size`, but their >+/// actual alignment may be much bigger. >+/// This value can be overridden via `std.options.min_page_size`. >+/// On many systems, the actual page size can only be determined at runtime with `pageSize()`. >+pub const min_page_size: usize = std.options.min_page_size orelse (default_min_page_size orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other) >+ @compileError("freestanding/other explicitly has no min_page_size. One can be provided with std.options.min_page_size") >+else >+ @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has no min_page_size. One can be provided with std.options.min_page_size")); >+ >+/// The compile-time maximum page size that the target might have. >+/// Targeting a system with a larger page size may require overriding `std.options.max_page_size`, >+/// as well as using the linker arugment `-z max-page-size=`. >+/// The actual page size can only be determined at runtime with `pageSize()`. >+pub const max_page_size: usize = std.options.max_page_size orelse (default_max_page_size orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other) >+ @compileError("freestanding/other explicitly has no max_page_size. One can be provided with std.options.max_page_size") >+else >+ @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has no max_page_size. One can be provided with std.options.max_page_size")); >+ >+/// Returns the system page size. >+/// If the page size is comptime-known, `pageSize()` returns it directly. >+/// Otherwise, `pageSize()` defers to `std.options.queryPageSizeFn()`. >+pub fn pageSize() usize { >+ if (min_page_size == max_page_size) { >+ return min_page_size; >+ } >+ return std.options.queryPageSizeFn(); >+} >+ >+// A cache used by `defaultQueryPageSize()` to avoid repeating syscalls. >+var page_size_cache = std.atomic.Value(usize).init(0); >+ >+// The default implementation in `std.options.queryPageSizeFn`. >+// The first time it is called, it asserts that the page size is within the comptime bounds. >+pub fn defaultQueryPageSize() usize { >+ var size = page_size_cache.load(.unordered); >+ if (size > 0) return size; >+ size = switch (builtin.os.tag) { >+ .linux => if (builtin.link_libc) @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE))) else std.os.linux.getauxval(std.elf.AT_PAGESZ), >+ .driverkit, .ios, .macos, .tvos, .visionos, .watchos => blk: { >+ const task_port = std.c.mach_task_self(); >+ // mach_task_self may fail "if there are any resource failures or other errors". >+ if (task_port == std.c.TASK_NULL) >+ break :blk 0; >+ var info_count = std.c.TASK_VM_INFO_COUNT; >+ var vm_info: std.c.task_vm_info_data_t = undefined; >+ vm_info.page_size = 0; >+ _ = std.c.task_info( >+ task_port, >+ std.c.TASK_VM_INFO, >+ @as(std.c.task_info_t, @ptrCast(&vm_info)), >+ &info_count, >+ ); >+ assert(vm_info.page_size != 0); >+ break :blk @as(usize, @intCast(vm_info.page_size)); >+ }, >+ .windows => blk: { >+ var info: std.os.windows.SYSTEM_INFO = undefined; >+ std.os.windows.kernel32.GetSystemInfo(&info); >+ break :blk info.dwPageSize; >+ }, >+ else => if (builtin.link_libc) >+ if (std.c._SC != void and @hasDecl(std.c._SC, "PAGESIZE")) >+ @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE))) >+ else >+ @compileError("missing _SC.PAGESIZE declaration for " ++ @tagName(builtin.os.tag) ++ "-" ++ @tagName(builtin.os.tag)) >+ else if (builtin.os.tag == .freestanding or builtin.os.tag == .other) >+ @compileError("pageSize on freestanding/other is not supported with the default std.options.queryPageSizeFn") >+ else >+ @compileError("pageSize on " ++ @tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " is not supported without linking libc, using the default implementation"), >+ }; >+ >+ assert(size >= min_page_size); >+ assert(size <= max_page_size); >+ page_size_cache.store(size, .unordered); >+ >+ return size; >+} >+ > pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator; > pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator; > pub const ScopedLoggingAllocator = @import("heap/logging_allocator.zig").ScopedLoggingAllocator; >@@ -30,7 +400,7 @@ pub const MemoryPoolExtra = memory_pool.MemoryPoolExtra; > pub const MemoryPoolOptions = memory_pool.Options; > > /// TODO Utilize this on Windows. >-pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null; >+pub var next_mmap_addr_hint: ?[*]align(min_page_size) u8 = null; > > const CAllocator = struct { > comptime { >@@ -258,7 +628,7 @@ pub const wasm_allocator = Allocator{ > /// Verifies that the adjusted length will still map to the full length > pub fn alignPageAllocLen(full_len: usize, len: usize) usize { > const aligned_len = mem.alignAllocLen(full_len, len); >- assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len); >+ assert(mem.alignForward(usize, aligned_len, pageSize()) == full_len); > return aligned_len; > } > >@@ -617,13 +987,13 @@ test "PageAllocator" { > } > > if (builtin.os.tag == .windows) { >- const slice = try allocator.alignedAlloc(u8, mem.page_size, 128); >+ const slice = try allocator.alignedAlloc(u8, min_page_size, 128); > slice[0] = 0x12; > slice[127] = 0x34; > allocator.free(slice); > } > { >- var buf = try allocator.alloc(u8, mem.page_size + 1); >+ var buf = try allocator.alloc(u8, pageSize() + 1); > defer allocator.free(buf); > buf = try allocator.realloc(buf, 1); // shrink past the page boundary > } >@@ -826,7 +1196,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void { > var validationAllocator = mem.validationWrap(base_allocator); > const allocator = validationAllocator.allocator(); > >- const large_align: usize = mem.page_size / 2; >+ const large_align: usize = min_page_size / 2; > > var align_mask: usize = undefined; > align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0]; >@@ -859,7 +1229,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { > var fib = FixedBufferAllocator.init(&debug_buffer); > const debug_allocator = fib.allocator(); > >- const alloc_size = mem.page_size * 2 + 50; >+ const alloc_size = pageSize() * 2 + 50; > var slice = try allocator.alignedAlloc(u8, 16, alloc_size); > defer allocator.free(slice); > >@@ -868,7 +1238,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { > // which is 16 pages, hence the 32. This test may require to increase > // the size of the allocations feeding the `allocator` parameter if they > // fail, because of this high over-alignment we want to have. >- while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), mem.page_size * 32)) { >+ while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), pageSize() * 32)) { > try stuff_to_free.append(slice); > slice = try allocator.alignedAlloc(u8, 16, alloc_size); > } >@@ -883,6 +1253,20 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { > try testing.expect(slice[60] == 0x34); > } > >+test "pageSize() smoke test" { >+ const size = std.heap.pageSize(); >+ // Check that pageSize is a power of 2. >+ std.debug.assert(size & (size - 1) == 0); >+} >+ >+test "defaultQueryPageSize() smoke test" { >+ // queryPageSize() does not always get called by pageSize() >+ if (builtin.cpu.arch.isWasm()) return error.SkipZigTest; >+ const size = defaultQueryPageSize(); >+ // Check that pageSize is a power of 2. >+ std.debug.assert(size & (size - 1) == 0); >+} >+ > test { > _ = LoggingAllocator; > _ = LogToWriterAllocator; >diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig >index 4188c25528..1e9058717e 100644 >--- a/lib/std/heap/PageAllocator.zig >+++ b/lib/std/heap/PageAllocator.zig >@@ -2,6 +2,7 @@ const std = @import("../std.zig"); > const builtin = @import("builtin"); > const Allocator = std.mem.Allocator; > const mem = std.mem; >+const heap = std.heap; > const maxInt = std.math.maxInt; > const assert = std.debug.assert; > const native_os = builtin.os.tag; >@@ -18,7 +19,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { > _ = ra; > _ = log2_align; > assert(n > 0); >- if (n > maxInt(usize) - (mem.page_size - 1)) return null; >+ if (n > maxInt(usize) - (heap.pageSize() - 1)) return null; > > if (native_os == .windows) { > const addr = windows.VirtualAlloc( >@@ -34,7 +35,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { > return @ptrCast(addr); > } > >- const aligned_len = mem.alignForward(usize, n, mem.page_size); >+ const aligned_len = mem.alignForward(usize, n, heap.pageSize()); > const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered); > const slice = posix.mmap( > hint, >@@ -44,8 +45,8 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { > -1, > 0, > ) catch return null; >- assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size)); >- const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len); >+ assert(mem.isAligned(@intFromPtr(slice.ptr), heap.pageSize())); >+ const new_hint: [*]align(heap.min_page_size) u8 = @alignCast(slice.ptr + aligned_len); > _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic); > return slice.ptr; > } >@@ -59,13 +60,13 @@ fn resize( > ) bool { > _ = log2_buf_align; > _ = return_address; >- const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); >+ const new_size_aligned = mem.alignForward(usize, new_size, heap.pageSize()); > > if (native_os == .windows) { > if (new_size <= buf_unaligned.len) { > const base_addr = @intFromPtr(buf_unaligned.ptr); > const old_addr_end = base_addr + buf_unaligned.len; >- const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size); >+ const new_addr_end = mem.alignForward(usize, base_addr + new_size, heap.pageSize()); > if (old_addr_end > new_addr_end) { > // For shrinking that is not releasing, we will only > // decommit the pages not needed anymore. >@@ -77,14 +78,14 @@ fn resize( > } > return true; > } >- const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size); >+ const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, heap.pageSize()); > if (new_size_aligned <= old_size_aligned) { > return true; > } > return false; > } > >- const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size); >+ const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, heap.pageSize()); > if (new_size_aligned == buf_aligned_len) > return true; > >@@ -107,7 +108,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v > if (native_os == .windows) { > windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE); > } else { >- const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); >+ const buf_aligned_len = mem.alignForward(usize, slice.len, heap.pageSize()); > posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len])); > } > } >diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig >index ca625e43ed..0c7d25d6b7 100644 >--- a/lib/std/heap/WasmPageAllocator.zig >+++ b/lib/std/heap/WasmPageAllocator.zig >@@ -5,6 +5,7 @@ const Allocator = std.mem.Allocator; > const mem = std.mem; > const maxInt = std.math.maxInt; > const assert = std.debug.assert; >+const page_size = std.wasm.page_size; > > comptime { > if (!builtin.target.isWasm()) { >@@ -71,7 +72,7 @@ const FreeBlock = struct { > var count: usize = 0; > while (j + count < self.totalPages() and self.getBit(j + count) == .free) { > count += 1; >- const addr = j * mem.page_size; >+ const addr = j * page_size; > if (count >= num_pages and mem.isAlignedLog2(addr, log2_align)) { > self.setBits(j, num_pages, .used); > return j; >@@ -98,16 +99,16 @@ fn extendedOffset() usize { > } > > fn nPages(memsize: usize) usize { >- return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size; >+ return mem.alignForward(usize, memsize, page_size) / page_size; > } > > fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { > _ = ctx; > _ = ra; >- if (len > maxInt(usize) - (mem.page_size - 1)) return null; >+ if (len > maxInt(usize) - (page_size - 1)) return null; > const page_count = nPages(len); > const page_idx = allocPages(page_count, log2_align) catch return null; >- return @as([*]u8, @ptrFromInt(page_idx * mem.page_size)); >+ return @as([*]u8, @ptrFromInt(page_idx * page_size)); > } > > fn allocPages(page_count: usize, log2_align: u8) !usize { >@@ -124,9 +125,9 @@ fn allocPages(page_count: usize, log2_align: u8) !usize { > } > > const next_page_idx = @wasmMemorySize(0); >- const next_page_addr = next_page_idx * mem.page_size; >+ const next_page_addr = next_page_idx * page_size; > const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align); >- const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size); >+ const drop_page_count = @divExact(aligned_addr - next_page_addr, page_size); > const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count))); > if (result <= 0) > return error.OutOfMemory; >@@ -149,7 +150,7 @@ fn freePages(start: usize, end: usize) void { > // TODO: would it be better if we use the first page instead? > new_end -= 1; > >- extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)]; >+ extended.data = @as([*]u128, @ptrFromInt(new_end * page_size))[0 .. page_size / @sizeOf(u128)]; > // Since this is the first page being freed and we consume it, assume *nothing* is free. > @memset(extended.data, PageStatus.none_free); > } >@@ -168,7 +169,7 @@ fn resize( > _ = ctx; > _ = log2_buf_align; > _ = return_address; >- const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); >+ const aligned_len = mem.alignForward(usize, buf.len, page_size); > if (new_len > aligned_len) return false; > const current_n = nPages(aligned_len); > const new_n = nPages(new_len); >@@ -188,7 +189,7 @@ fn free( > _ = ctx; > _ = log2_buf_align; > _ = return_address; >- const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); >+ const aligned_len = mem.alignForward(usize, buf.len, page_size); > const current_n = nPages(aligned_len); > const base = nPages(@intFromPtr(buf.ptr)); > freePages(base, base + current_n); >@@ -198,8 +199,8 @@ test "internals" { > const page_allocator = std.heap.page_allocator; > const testing = std.testing; > >- const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size; >- const initial = try page_allocator.alloc(u8, mem.page_size); >+ const conventional_memsize = WasmPageAllocator.conventional.totalPages() * page_size; >+ const initial = try page_allocator.alloc(u8, page_size); > try testing.expect(@intFromPtr(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite. > > var inplace = try page_allocator.realloc(initial, 1); >diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig >index b760c9d85d..fa1b1b0737 100644 >--- a/lib/std/heap/general_purpose_allocator.zig >+++ b/lib/std/heap/general_purpose_allocator.zig >@@ -48,7 +48,7 @@ > //! > //! ## Basic Design: > //! >-//! Small allocations are divided into buckets: >+//! Small allocations are divided into buckets. For a max page size of 4K: > //! > //! ``` > //! index obj_size >@@ -75,6 +75,9 @@ > //! BucketHeader, followed by "used bits", and two stack traces for each slot > //! (allocation trace and free trace). > //! >+//! The buckets array contains buckets for every size class below `max_page_size`. >+//! At runtime, only size classes below `pageSize()` will actually be used for allocations. >+//! > //! The "used bits" are 1 bit per slot representing whether the slot is used. > //! Allocations use the data to iterate to find a free slot. Frees assert that the > //! corresponding bit is 1 and set it to 0. >@@ -99,11 +102,13 @@ const math = std.math; > const assert = std.debug.assert; > const mem = std.mem; > const Allocator = std.mem.Allocator; >-const page_size = std.mem.page_size; >+const min_page_size = std.heap.min_page_size; >+const max_page_size = std.heap.max_page_size; >+const pageSize = std.heap.pageSize; > const StackTrace = std.builtin.StackTrace; > > /// Integer type for pointing to slots in a small allocation >-const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1); >+const SlotIndex = std.meta.Int(.unsigned, math.log2(max_page_size) + 1); > > const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6; > const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0; >@@ -157,6 +162,9 @@ pub const Config = struct { > > pub const Check = enum { ok, leak }; > >+var used_small_bucket_count_cache = std.atomic.Value(usize).init(0); >+var largest_used_bucket_object_size_cache = std.atomic.Value(usize).init(0); >+ > /// Default initialization of this struct is deprecated; use `.init` instead. > pub fn GeneralPurposeAllocator(comptime config: Config) type { > return struct { >@@ -206,9 +214,27 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > > pub const Error = mem.Allocator.Error; > >- const small_bucket_count = math.log2(page_size); >+ const small_bucket_count = math.log2(max_page_size); > const largest_bucket_object_size = 1 << (small_bucket_count - 1); > const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size); >+ fn used_small_bucket_count() usize { >+ const cached = used_small_bucket_count_cache.load(.monotonic); >+ if (cached != 0) { >+ return cached; >+ } >+ const val = math.log2(pageSize()); >+ used_small_bucket_count_cache.store(val, .monotonic); >+ return val; >+ } >+ fn largest_used_bucket_object_size() usize { >+ const cached = largest_used_bucket_object_size_cache.load(.monotonic); >+ if (cached != 0) { >+ return cached; >+ } >+ const val = @as(usize, 1) << @truncate(used_small_bucket_count() - 1); >+ largest_used_bucket_object_size_cache.store(val, .monotonic); >+ return val; >+ } > > const bucketCompare = struct { > fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order { >@@ -261,7 +287,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > // * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation > > const BucketHeader = struct { >- page: [*]align(page_size) u8, >+ page: [*]align(min_page_size) u8, > alloc_cursor: SlotIndex, > used_count: SlotIndex, > >@@ -273,14 +299,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > if (!config.safety) @compileError("requested size is only stored when safety is enabled"); > const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(size_class); > const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr))); >- const slot_count = @divExact(page_size, size_class); >+ const slot_count = @divExact(pageSize(), size_class); > return sizes[0..slot_count]; > } > > fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 { > if (!config.safety) @compileError("requested size is only stored when safety is enabled"); > const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class); >- const slot_count = @divExact(page_size, size_class); >+ const slot_count = @divExact(pageSize(), size_class); > return aligns_ptr[0..slot_count]; > } > >@@ -312,7 +338,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > /// Only valid for buckets within `empty_buckets`, and relies on the `alloc_cursor` > /// of empty buckets being set to `slot_count` when they are added to `empty_buckets` > fn emptyBucketSizeClass(bucket: *BucketHeader) usize { >- return @divExact(page_size, bucket.alloc_cursor); >+ return @divExact(pageSize(), bucket.alloc_cursor); > } > }; > >@@ -355,13 +381,13 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > > fn bucketAlignsStart(size_class: usize) usize { > if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled"); >- const slot_count = @divExact(page_size, size_class); >+ const slot_count = @divExact(pageSize(), size_class); > return bucketRequestedSizesStart(size_class) + (@sizeOf(LargestSizeClassInt) * slot_count); > } > > fn bucketStackFramesStart(size_class: usize) usize { > const unaligned_start = if (config.safety) blk: { >- const slot_count = @divExact(page_size, size_class); >+ const slot_count = @divExact(pageSize(), size_class); > break :blk bucketAlignsStart(size_class) + slot_count; > } else @sizeOf(BucketHeader) + usedBitsCount(size_class); > return mem.alignForward( >@@ -372,12 +398,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > } > > fn bucketSize(size_class: usize) usize { >- const slot_count = @divExact(page_size, size_class); >+ const slot_count = @divExact(pageSize(), size_class); > return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count; > } > > fn usedBitsCount(size_class: usize) usize { >- const slot_count = @divExact(page_size, size_class); >+ const slot_count = @divExact(pageSize(), size_class); > if (slot_count < 8) return 1; > return @divExact(slot_count, 8); > } >@@ -416,7 +442,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > pub fn detectLeaks(self: *Self) bool { > var leaks = false; > >- for (&self.buckets, 0..) |*buckets, bucket_i| { >+ for (0..used_small_bucket_count()) |bucket_i| { >+ const buckets = &self.buckets[bucket_i]; > if (buckets.root == null) continue; > const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i)); > const used_bits_count = usedBitsCount(size_class); >@@ -464,7 +491,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > var bucket = node.key; > if (config.never_unmap) { > // free page that was intentionally leaked by never_unmap >- self.backing_allocator.free(bucket.page[0..page_size]); >+ self.backing_allocator.free(bucket.page[0..pageSize()]); > } > // alloc_cursor was set to slot count when bucket added to empty_buckets > self.freeBucket(bucket, bucket.emptyBucketSizeClass()); >@@ -531,7 +558,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error!Slot { > const bucket_index = math.log2(size_class); > var buckets = &self.buckets[bucket_index]; >- const slot_count = @divExact(page_size, size_class); >+ const slot_count = @divExact(pageSize(), size_class); > if (self.cur_buckets[bucket_index] == null or self.cur_buckets[bucket_index].?.alloc_cursor == slot_count) { > const new_bucket = try self.createBucket(size_class); > errdefer self.freeBucket(new_bucket, size_class); >@@ -564,7 +591,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > addr: usize, > current_bucket: ?*BucketHeader, > ) ?*BucketHeader { >- const search_page: [*]align(page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, page_size)); >+ const search_page: [*]align(min_page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, pageSize())); > if (current_bucket != null and current_bucket.?.page == search_page) { > return current_bucket; > } >@@ -729,14 +756,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > assert(old_mem.len != 0); > > const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align); >- if (aligned_size > largest_bucket_object_size) { >+ if (aligned_size > largest_used_bucket_object_size()) { > return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr); > } > const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size); > > var bucket_index = math.log2(size_class_hint); > var size_class: usize = size_class_hint; >- const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) { >+ const bucket = while (bucket_index < used_small_bucket_count()) : (bucket_index += 1) { > if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| { > break bucket; > } >@@ -847,7 +874,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > assert(old_mem.len != 0); > > const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align); >- if (aligned_size > largest_bucket_object_size) { >+ if (aligned_size > largest_used_bucket_object_size()) { > self.freeLarge(old_mem, log2_old_align, ret_addr); > return; > } >@@ -855,7 +882,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > > var bucket_index = math.log2(size_class_hint); > var size_class: usize = size_class_hint; >- const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) { >+ const bucket = while (bucket_index < used_small_bucket_count()) : (bucket_index += 1) { > if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| { > break bucket; > } >@@ -944,14 +971,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > self.cur_buckets[bucket_index] = null; > } > if (!config.never_unmap) { >- self.backing_allocator.free(bucket.page[0..page_size]); >+ self.backing_allocator.free(bucket.page[0..pageSize()]); > } > if (!config.retain_metadata) { > self.freeBucket(bucket, size_class); > self.bucket_node_pool.destroy(node); > } else { > // move alloc_cursor to end so we can tell size_class later >- const slot_count = @divExact(page_size, size_class); >+ const slot_count = @divExact(pageSize(), size_class); > bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count)); > var empty_entry = self.empty_buckets.getEntryFor(node.key); > empty_entry.set(node); >@@ -992,7 +1019,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > ret_addr: usize, > ) Allocator.Error![*]u8 { > const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align))); >- if (new_aligned_size > largest_bucket_object_size) { >+ if (new_aligned_size > largest_used_bucket_object_size()) { > try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); > const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse > return error.OutOfMemory; >@@ -1035,7 +1062,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { > } > > fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader { >- const page = try self.backing_allocator.alignedAlloc(u8, page_size, page_size); >+ const page = try self.backing_allocator.alignedAlloc(u8, min_page_size, pageSize()); > errdefer self.backing_allocator.free(page); > > const bucket_size = bucketSize(size_class); >@@ -1175,17 +1202,17 @@ test "large object - grow" { > defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); > const allocator = gpa.allocator(); > >- var slice1 = try allocator.alloc(u8, page_size * 2 - 20); >+ var slice1 = try allocator.alloc(u8, pageSize() * 2 - 20); > defer allocator.free(slice1); > > const old = slice1; >- slice1 = try allocator.realloc(slice1, page_size * 2 - 10); >+ slice1 = try allocator.realloc(slice1, pageSize() * 2 - 10); > try std.testing.expect(slice1.ptr == old.ptr); > >- slice1 = try allocator.realloc(slice1, page_size * 2); >+ slice1 = try allocator.realloc(slice1, pageSize() * 2); > try std.testing.expect(slice1.ptr == old.ptr); > >- slice1 = try allocator.realloc(slice1, page_size * 2 + 1); >+ slice1 = try allocator.realloc(slice1, pageSize() * 2 + 1); > } > > test "realloc small object to large object" { >@@ -1199,7 +1226,7 @@ test "realloc small object to large object" { > slice[60] = 0x34; > > // This requires upgrading to a large object >- const large_object_size = page_size * 2 + 50; >+ const large_object_size = pageSize() * 2 + 50; > slice = try allocator.realloc(slice, large_object_size); > try std.testing.expect(slice[0] == 0x12); > try std.testing.expect(slice[60] == 0x34); >@@ -1210,22 +1237,22 @@ test "shrink large object to large object" { > defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); > const allocator = gpa.allocator(); > >- var slice = try allocator.alloc(u8, page_size * 2 + 50); >+ var slice = try allocator.alloc(u8, pageSize() * 2 + 50); > defer allocator.free(slice); > slice[0] = 0x12; > slice[60] = 0x34; > >- if (!allocator.resize(slice, page_size * 2 + 1)) return; >- slice = slice.ptr[0 .. page_size * 2 + 1]; >+ if (!allocator.resize(slice, pageSize() * 2 + 1)) return; >+ slice = slice.ptr[0 .. pageSize() * 2 + 1]; > try std.testing.expect(slice[0] == 0x12); > try std.testing.expect(slice[60] == 0x34); > >- try std.testing.expect(allocator.resize(slice, page_size * 2 + 1)); >- slice = slice[0 .. page_size * 2 + 1]; >+ try std.testing.expect(allocator.resize(slice, pageSize() * 2 + 1)); >+ slice = slice[0 .. pageSize() * 2 + 1]; > try std.testing.expect(slice[0] == 0x12); > try std.testing.expect(slice[60] == 0x34); > >- slice = try allocator.realloc(slice, page_size * 2); >+ slice = try allocator.realloc(slice, pageSize() * 2); > try std.testing.expect(slice[0] == 0x12); > try std.testing.expect(slice[60] == 0x34); > } >@@ -1239,13 +1266,13 @@ test "shrink large object to large object with larger alignment" { > var fba = std.heap.FixedBufferAllocator.init(&debug_buffer); > const debug_allocator = fba.allocator(); > >- const alloc_size = page_size * 2 + 50; >+ const alloc_size = pageSize() * 2 + 50; > var slice = try allocator.alignedAlloc(u8, 16, alloc_size); > defer allocator.free(slice); > > const big_alignment: usize = switch (builtin.os.tag) { >- .windows => page_size * 32, // Windows aligns to 64K. >- else => page_size * 2, >+ .windows => pageSize() * 32, // Windows aligns to 64K. >+ else => pageSize() * 2, > }; > // This loop allocates until we find a page that is not aligned to the big > // alignment. Then we shrink the allocation after the loop, but increase the >@@ -1271,7 +1298,7 @@ test "realloc large object to small object" { > defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); > const allocator = gpa.allocator(); > >- var slice = try allocator.alloc(u8, page_size * 2 + 50); >+ var slice = try allocator.alloc(u8, pageSize() * 2 + 50); > defer allocator.free(slice); > slice[0] = 0x12; > slice[16] = 0x34; >@@ -1311,18 +1338,18 @@ test "realloc large object to larger alignment" { > var fba = std.heap.FixedBufferAllocator.init(&debug_buffer); > const debug_allocator = fba.allocator(); > >- var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50); >+ var slice = try allocator.alignedAlloc(u8, 16, pageSize() * 2 + 50); > defer allocator.free(slice); > > const big_alignment: usize = switch (builtin.os.tag) { >- .windows => page_size * 32, // Windows aligns to 64K. >- else => page_size * 2, >+ .windows => pageSize() * 32, // Windows aligns to 64K. >+ else => pageSize() * 2, > }; > // This loop allocates until we find a page that is not aligned to the big alignment. > var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); > while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) { > try stuff_to_free.append(slice); >- slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50); >+ slice = try allocator.alignedAlloc(u8, 16, pageSize() * 2 + 50); > } > while (stuff_to_free.popOrNull()) |item| { > allocator.free(item); >@@ -1330,15 +1357,15 @@ test "realloc large object to larger alignment" { > slice[0] = 0x12; > slice[16] = 0x34; > >- slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100); >+ slice = try allocator.reallocAdvanced(slice, 32, pageSize() * 2 + 100); > try std.testing.expect(slice[0] == 0x12); > try std.testing.expect(slice[16] == 0x34); > >- slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25); >+ slice = try allocator.reallocAdvanced(slice, 32, pageSize() * 2 + 25); > try std.testing.expect(slice[0] == 0x12); > try std.testing.expect(slice[16] == 0x34); > >- slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100); >+ slice = try allocator.reallocAdvanced(slice, big_alignment, pageSize() * 2 + 100); > try std.testing.expect(slice[0] == 0x12); > try std.testing.expect(slice[16] == 0x34); > } >@@ -1349,7 +1376,7 @@ test "large object shrinks to small but allocation fails during shrink" { > defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); > const allocator = gpa.allocator(); > >- var slice = try allocator.alloc(u8, page_size * 2 + 50); >+ var slice = try allocator.alloc(u8, pageSize() * 2 + 50); > defer allocator.free(slice); > slice[0] = 0x12; > slice[3] = 0x34; >@@ -1420,7 +1447,7 @@ test "double frees" { > try std.testing.expect(GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null) != null); > > // detect a large allocation double free >- const large = try allocator.alloc(u8, 2 * page_size); >+ const large = try allocator.alloc(u8, 2 * pageSize()); > try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr))); > try std.testing.expectEqual(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.bytes, large); > allocator.free(large); >@@ -1429,7 +1456,7 @@ test "double frees" { > > const normal_small = try allocator.alloc(u8, size_class); > defer allocator.free(normal_small); >- const normal_large = try allocator.alloc(u8, 2 * page_size); >+ const normal_large = try allocator.alloc(u8, 2 * pageSize()); > defer allocator.free(normal_large); > > // check that flushing retained metadata doesn't disturb live allocations >@@ -1462,8 +1489,8 @@ test "bug 9995 fix, large allocs count requested size not backing size" { > var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; > const allocator = gpa.allocator(); > >- var buf = try allocator.alignedAlloc(u8, 1, page_size + 1); >- try std.testing.expect(gpa.total_requested_bytes == page_size + 1); >+ var buf = try allocator.alignedAlloc(u8, 1, pageSize() + 1); >+ try std.testing.expect(gpa.total_requested_bytes == pageSize() + 1); > buf = try allocator.realloc(buf, 1); > try std.testing.expect(gpa.total_requested_bytes == 1); > buf = try allocator.realloc(buf, 2); >diff --git a/lib/std/heap/sbrk_allocator.zig b/lib/std/heap/sbrk_allocator.zig >index 08933fed52..b39f8ac344 100644 >--- a/lib/std/heap/sbrk_allocator.zig >+++ b/lib/std/heap/sbrk_allocator.zig >@@ -3,6 +3,7 @@ const builtin = @import("builtin"); > const math = std.math; > const Allocator = std.mem.Allocator; > const mem = std.mem; >+const heap = std.heap; > const assert = std.debug.assert; > > pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { >@@ -18,7 +19,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { > const max_usize = math.maxInt(usize); > const ushift = math.Log2Int(usize); > const bigpage_size = 64 * 1024; >- const pages_per_bigpage = bigpage_size / mem.page_size; >+ const pages_per_bigpage = bigpage_size / heap.pageSize(); > const bigpage_count = max_usize / bigpage_size; > > /// Because of storing free list pointers, the minimum size class is 3. >@@ -58,7 +59,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { > } > > const next_addr = next_addrs[class]; >- if (next_addr % mem.page_size == 0) { >+ if (next_addr % heap.pageSize == 0) { > const addr = allocBigPages(1); > if (addr == 0) return null; > //std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{ >@@ -153,7 +154,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { > big_frees[class] = node.*; > return top_free_ptr; > } >- return sbrk(pow2_pages * pages_per_bigpage * mem.page_size); >+ return sbrk(pow2_pages * pages_per_bigpage * heap.pageSize()); > } > }; > } >diff --git a/lib/std/mem.zig b/lib/std/mem.zig >index 1f1e925f54..96654fd904 100644 >--- a/lib/std/mem.zig >+++ b/lib/std/mem.zig >@@ -8,26 +8,6 @@ const testing = std.testing; > const Endian = std.builtin.Endian; > const native_endian = builtin.cpu.arch.endian(); > >-/// Compile time known minimum page size. >-/// https://github.com/ziglang/zig/issues/4082 >-pub const page_size = switch (builtin.cpu.arch) { >- .wasm32, .wasm64 => 64 * 1024, >- .aarch64 => switch (builtin.os.tag) { >- .macos, .ios, .watchos, .tvos, .visionos => 16 * 1024, >- else => 4 * 1024, >- }, >- .sparc64 => 8 * 1024, >- .loongarch32, .loongarch64 => switch (builtin.os.tag) { >- // Linux default KConfig value is 16KiB >- .linux => 16 * 1024, >- // FIXME: >- // There is no other OS supported yet. Use the same value >- // as Linux for now. >- else => 16 * 1024, >- }, >- else => 4 * 1024, >-}; >- > /// The standard library currently thoroughly depends on byte size > /// being 8 bits. (see the use of u8 throughout allocation code as > /// the "byte" type.) Code which depends on this can reference this >@@ -1085,12 +1065,13 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co > const Block = @Vector(block_len, T); > const mask: Block = @splat(sentinel); > >- comptime std.debug.assert(std.mem.page_size % block_size == 0); >+ comptime std.debug.assert(std.heap.max_page_size % @sizeOf(Block) == 0); >+ std.debug.assert(std.heap.pageSize() % @sizeOf(Block) == 0); > > // First block may be unaligned > const start_addr = @intFromPtr(&p[i]); >- const offset_in_page = start_addr & (std.mem.page_size - 1); >- if (offset_in_page <= std.mem.page_size - block_size) { >+ const offset_in_page = start_addr & (std.heap.pageSize() - 1); >+ if (offset_in_page <= std.heap.pageSize() - @sizeOf(Block)) { > // Will not read past the end of a page, full block. > const block: Block = p[i..][0..block_len].*; > const matches = block == mask; >@@ -1138,18 +1119,18 @@ test "indexOfSentinel vector paths" { > const block_len = std.simd.suggestVectorLength(T) orelse continue; > > // Allocate three pages so we guarantee a page-crossing address with a full page after >- const memory = try allocator.alloc(T, 3 * std.mem.page_size / @sizeOf(T)); >+ const memory = try allocator.alloc(T, 3 * std.heap.pageSize() / @sizeOf(T)); > defer allocator.free(memory); > @memset(memory, 0xaa); > > // Find starting page-alignment = 0 > var start: usize = 0; > const start_addr = @intFromPtr(&memory); >- start += (std.mem.alignForward(usize, start_addr, std.mem.page_size) - start_addr) / @sizeOf(T); >- try testing.expect(start < std.mem.page_size / @sizeOf(T)); >+ start += (std.mem.alignForward(usize, start_addr, std.heap.pageSize()) - start_addr) / @sizeOf(T); >+ try testing.expect(start < std.heap.pageSize() / @sizeOf(T)); > > // Validate all sub-block alignments >- const search_len = std.mem.page_size / @sizeOf(T); >+ const search_len = std.heap.pageSize() / @sizeOf(T); > memory[start + search_len] = 0; > for (0..block_len) |offset| { > try testing.expectEqual(search_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start + offset]))); >@@ -1157,7 +1138,7 @@ test "indexOfSentinel vector paths" { > memory[start + search_len] = 0xaa; > > // Validate page boundary crossing >- const start_page_boundary = start + (std.mem.page_size / @sizeOf(T)); >+ const start_page_boundary = start + (std.heap.pageSize() / @sizeOf(T)); > memory[start_page_boundary + block_len] = 0; > for (0..block_len) |offset| { > try testing.expectEqual(2 * block_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset]))); >diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig >index 8aea197d6a..ba03da1812 100644 >--- a/lib/std/mem/Allocator.zig >+++ b/lib/std/mem/Allocator.zig >@@ -215,7 +215,7 @@ fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count: > // The Zig Allocator interface is not intended to solve alignments beyond > // the minimum OS page size. For these use cases, the caller must use OS > // APIs directly. >- comptime assert(alignment <= mem.page_size); >+ if (!@inComptime() and alignment > std.heap.pageSize()) @panic("Alignment must be smaller than page size."); > > if (byte_count == 0) { > const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment); >diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig >index 731877e5ae..d01fc6e0ec 100644 >--- a/lib/std/os/linux/IoUring.zig >+++ b/lib/std/os/linux/IoUring.zig >@@ -3,6 +3,7 @@ const std = @import("std"); > const builtin = @import("builtin"); > const assert = std.debug.assert; > const mem = std.mem; >+const heap = std.heap; > const net = std.net; > const posix = std.posix; > const linux = std.os.linux; >@@ -1341,8 +1342,8 @@ pub const SubmissionQueue = struct { > dropped: *u32, > array: []u32, > sqes: []linux.io_uring_sqe, >- mmap: []align(mem.page_size) u8, >- mmap_sqes: []align(mem.page_size) u8, >+ mmap: []align(heap.min_page_size) u8, >+ mmap_sqes: []align(heap.min_page_size) u8, > > // We use `sqe_head` and `sqe_tail` in the same way as liburing: > // We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`. >@@ -1460,7 +1461,7 @@ pub const BufferGroup = struct { > /// Pointer to the memory shared by the kernel. > /// `buffers_count` of `io_uring_buf` structures are shared by the kernel. > /// First `io_uring_buf` is overlaid by `io_uring_buf_ring` struct. >- br: *align(mem.page_size) linux.io_uring_buf_ring, >+ br: *align(heap.min_page_size) linux.io_uring_buf_ring, > /// Contiguous block of memory of size (buffers_count * buffer_size). > buffers: []u8, > /// Size of each buffer in buffers. >@@ -1555,7 +1556,7 @@ pub const BufferGroup = struct { > /// `fd` is IO_Uring.fd for which the provided buffer ring is being registered. > /// `entries` is the number of entries requested in the buffer ring, must be power of 2. > /// `group_id` is the chosen buffer group ID, unique in IO_Uring. >-pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.page_size) linux.io_uring_buf_ring { >+pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(heap.min_page_size) linux.io_uring_buf_ring { > if (entries == 0 or entries > 1 << 15) return error.EntriesNotInRange; > if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo; > >@@ -1571,7 +1572,7 @@ pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.p > errdefer posix.munmap(mmap); > assert(mmap.len == mmap_size); > >- const br: *align(mem.page_size) linux.io_uring_buf_ring = @ptrCast(mmap.ptr); >+ const br: *align(heap.min_page_size) linux.io_uring_buf_ring = @ptrCast(mmap.ptr); > try register_buf_ring(fd, @intFromPtr(br), entries, group_id); > return br; > } >@@ -1613,9 +1614,9 @@ fn handle_register_buf_ring_result(res: usize) !void { > } > > // Unregisters a previously registered shared buffer ring, returned from io_uring_setup_buf_ring. >-pub fn free_buf_ring(fd: posix.fd_t, br: *align(mem.page_size) linux.io_uring_buf_ring, entries: u32, group_id: u16) void { >+pub fn free_buf_ring(fd: posix.fd_t, br: *align(heap.min_page_size) linux.io_uring_buf_ring, entries: u32, group_id: u16) void { > unregister_buf_ring(fd, group_id) catch {}; >- var mmap: []align(mem.page_size) u8 = undefined; >+ var mmap: []align(heap.min_page_size) u8 = undefined; > mmap.ptr = @ptrCast(br); > mmap.len = entries * @sizeOf(linux.io_uring_buf); > posix.munmap(mmap); >diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig >index d1292e86dd..7917fe9d1b 100644 >--- a/lib/std/os/linux/tls.zig >+++ b/lib/std/os/linux/tls.zig >@@ -11,6 +11,7 @@ > > const std = @import("std"); > const mem = std.mem; >+const heap = std.heap; > const elf = std.elf; > const math = std.math; > const assert = std.debug.assert; >@@ -490,7 +491,7 @@ pub fn prepareArea(area: []u8) usize { > // and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I > // think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is > // equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead. >-var main_thread_area_buffer: [0x2100]u8 align(mem.page_size) = undefined; >+var main_thread_area_buffer: [0x2100]u8 align(heap.min_page_size) = undefined; > > /// Computes the layout of the static TLS area, allocates the area, initializes all of its fields, > /// and assigns the architecture-specific value to the TP register. >@@ -503,7 +504,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void { > const area = blk: { > // Fast path for the common case where the TLS data is really small, avoid an allocation and > // use our local buffer. >- if (area_desc.alignment <= mem.page_size and area_desc.size <= main_thread_area_buffer.len) { >+ if (area_desc.alignment <= heap.min_page_size and area_desc.size <= main_thread_area_buffer.len) { > break :blk main_thread_area_buffer[0..area_desc.size]; > } > >@@ -517,7 +518,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void { > ); > if (@as(isize, @bitCast(begin_addr)) < 0) @trap(); > >- const area_ptr: [*]align(mem.page_size) u8 = @ptrFromInt(begin_addr); >+ const area_ptr: [*]align(heap.min_page_size) u8 = @ptrFromInt(begin_addr); > > // Make sure the slice is correctly aligned. > const begin_aligned_addr = alignForward(begin_addr, area_desc.alignment); >diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig >index 0c48493b4c..0c034de7c7 100644 >--- a/lib/std/os/plan9.zig >+++ b/lib/std/os/plan9.zig >@@ -367,8 +367,8 @@ pub fn sbrk(n: usize) usize { > bloc = @intFromPtr(&ExecData.end); > bloc_max = @intFromPtr(&ExecData.end); > } >- const bl = std.mem.alignForward(usize, bloc, std.mem.page_size); >- const n_aligned = std.mem.alignForward(usize, n, std.mem.page_size); >+ const bl = std.mem.alignForward(usize, bloc, std.heap.pageSize()); >+ const n_aligned = std.mem.alignForward(usize, n, std.heap.pageSize()); > if (bl + n_aligned > bloc_max) { > // we need to allocate > if (brk_(bl + n_aligned) < 0) return 0; >diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig >index f2da7957a8..2cae65af1d 100644 >--- a/lib/std/os/windows/kernel32.zig >+++ b/lib/std/os/windows/kernel32.zig >@@ -42,6 +42,7 @@ const WCHAR = windows.WCHAR; > const WIN32_FIND_DATAW = windows.WIN32_FIND_DATAW; > const Win32Error = windows.Win32Error; > const WORD = windows.WORD; >+const SYSTEM_INFO = windows.SYSTEM_INFO; > > // I/O - Filesystem > >@@ -670,3 +671,7 @@ pub extern "kernel32" fn SetLastError( > pub extern "kernel32" fn GetSystemTimeAsFileTime( > lpSystemTimeAsFileTime: *FILETIME, > ) callconv(.winapi) void; >+ >+pub extern "kernel32" fn GetSystemInfo( >+ lpSystemInfo: *SYSTEM_INFO, >+) callconv(.winapi) void; >diff --git a/lib/std/posix.zig b/lib/std/posix.zig >index 100500bec4..b14fba542c 100644 >--- a/lib/std/posix.zig >+++ b/lib/std/posix.zig >@@ -18,6 +18,7 @@ const builtin = @import("builtin"); > const root = @import("root"); > const std = @import("std.zig"); > const mem = std.mem; >+const heap = std.heap; > const fs = std.fs; > const max_path_bytes = fs.max_path_bytes; > const maxInt = std.math.maxInt; >@@ -4663,7 +4664,7 @@ pub const MProtectError = error{ > OutOfMemory, > } || UnexpectedError; > >-pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void { >+pub fn mprotect(memory: []align(heap.min_page_size) u8, protection: u32) MProtectError!void { > if (native_os == .windows) { > const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) { > 0b000 => windows.PAGE_NOACCESS, >@@ -4728,21 +4729,21 @@ pub const MMapError = error{ > /// * SIGSEGV - Attempted write into a region mapped as read-only. > /// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file > pub fn mmap( >- ptr: ?[*]align(mem.page_size) u8, >+ ptr: ?[*]align(heap.min_page_size) u8, > length: usize, > prot: u32, > flags: system.MAP, > fd: fd_t, > offset: u64, >-) MMapError![]align(mem.page_size) u8 { >+) MMapError![]align(heap.min_page_size) u8 { > const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap; > const rc = mmap_sym(ptr, length, prot, @bitCast(flags), fd, @bitCast(offset)); > const err: E = if (builtin.link_libc) blk: { >- if (rc != std.c.MAP_FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length]; >+ if (rc != std.c.MAP_FAILED) return @as([*]align(heap.min_page_size) u8, @ptrCast(@alignCast(rc)))[0..length]; > break :blk @enumFromInt(system._errno().*); > } else blk: { > const err = errno(rc); >- if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length]; >+ if (err == .SUCCESS) return @as([*]align(heap.min_page_size) u8, @ptrFromInt(rc))[0..length]; > break :blk err; > }; > switch (err) { >@@ -4768,7 +4769,7 @@ pub fn mmap( > /// Zig's munmap function does not, for two reasons: > /// * It violates the Zig principle that resource deallocation must succeed. > /// * The Windows function, VirtualFree, has this restriction. >-pub fn munmap(memory: []align(mem.page_size) const u8) void { >+pub fn munmap(memory: []align(heap.min_page_size) const u8) void { > switch (errno(system.munmap(memory.ptr, memory.len))) { > .SUCCESS => return, > .INVAL => unreachable, // Invalid parameters. >@@ -4782,7 +4783,7 @@ pub const MSyncError = error{ > PermissionDenied, > } || UnexpectedError; > >-pub fn msync(memory: []align(mem.page_size) u8, flags: i32) MSyncError!void { >+pub fn msync(memory: []align(heap.min_page_size) u8, flags: i32) MSyncError!void { > switch (errno(system.msync(memory.ptr, memory.len, flags))) { > .SUCCESS => return, > .PERM => return error.PermissionDenied, >@@ -7093,7 +7094,7 @@ pub const MincoreError = error{ > } || UnexpectedError; > > /// Determine whether pages are resident in memory. >-pub fn mincore(ptr: [*]align(mem.page_size) u8, length: usize, vec: [*]u8) MincoreError!void { >+pub fn mincore(ptr: [*]align(heap.min_page_size) u8, length: usize, vec: [*]u8) MincoreError!void { > return switch (errno(system.mincore(ptr, length, vec))) { > .SUCCESS => {}, > .AGAIN => error.SystemResources, >@@ -7139,7 +7140,7 @@ pub const MadviseError = error{ > > /// Give advice about use of memory. > /// This syscall is optional and is sometimes configured to be disabled. >-pub fn madvise(ptr: [*]align(mem.page_size) u8, length: usize, advice: u32) MadviseError!void { >+pub fn madvise(ptr: [*]align(heap.min_page_size) u8, length: usize, advice: u32) MadviseError!void { > switch (errno(system.madvise(ptr, length, advice))) { > .SUCCESS => return, > .PERM => return error.PermissionDenied, >diff --git a/lib/std/process.zig b/lib/std/process.zig >index eca3a26c29..9355d5eca3 100644 >--- a/lib/std/process.zig >+++ b/lib/std/process.zig >@@ -1525,7 +1525,7 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo { > ReadGroupId, > }; > >- var buf: [std.mem.page_size]u8 = undefined; >+ var buf: [std.heap.min_page_size]u8 = undefined; > var name_index: usize = 0; > var state = State.Start; > var uid: posix.uid_t = 0; >diff --git a/lib/std/start.zig b/lib/std/start.zig >index 9da0cb2ec6..c86b67cda8 100644 >--- a/lib/std/start.zig >+++ b/lib/std/start.zig >@@ -576,7 +576,7 @@ fn expandStackSize(phdrs: []elf.Phdr) void { > switch (phdr.p_type) { > elf.PT_GNU_STACK => { > if (phdr.p_memsz == 0) break; >- assert(phdr.p_memsz % std.mem.page_size == 0); >+ assert(phdr.p_memsz % std.heap.pageSize() == 0); > > // Silently fail if we are unable to get limits. > const limits = std.posix.getrlimit(.STACK) catch break; >diff --git a/lib/std/std.zig b/lib/std/std.zig >index cc61111746..0c6af592c1 100644 >--- a/lib/std/std.zig >+++ b/lib/std/std.zig >@@ -118,6 +118,10 @@ pub const Options = struct { > args: anytype, > ) void = log.defaultLog, > >+ min_page_size: ?usize = null, >+ max_page_size: ?usize = null, >+ queryPageSizeFn: fn () usize = heap.defaultQueryPageSize, >+ > fmt_max_depth: usize = fmt.default_max_depth, > > cryptoRandomSeed: fn (buffer: []u8) void = @import("crypto/tlcsprng.zig").defaultRandomSeed, >diff --git a/lib/std/zip.zig b/lib/std/zip.zig >index 8c1b55216b..2b9228b626 100644 >--- a/lib/std/zip.zig >+++ b/lib/std/zip.zig >@@ -162,7 +162,7 @@ pub fn decompress( > var total_uncompressed: u64 = 0; > switch (method) { > .store => { >- var buf: [std.mem.page_size]u8 = undefined; >+ var buf: [4096]u8 = undefined; > while (true) { > const len = try reader.read(&buf); > if (len == 0) break; >diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig >index 1229ca524c..ae38b710b6 100644 >--- a/src/Package/Fetch.zig >+++ b/src/Package/Fetch.zig >@@ -1249,7 +1249,7 @@ fn unzip(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackResult { > .{@errorName(err)}, > )); > defer zip_file.close(); >- var buf: [std.mem.page_size]u8 = undefined; >+ var buf: [std.heap.min_page_size]u8 = undefined; > while (true) { > const len = reader.readAll(&buf) catch |err| return f.fail(f.location_tok, try eb.printString( > "read zip stream failed: {s}",
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 947025
:
915288
|
915289
|
915290
|
915291
| 915301