Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 947025
Collapse All | Expand All

(-)a/lib/fuzzer.zig (-1 / +1 lines)
Lines 480-486 pub const MemoryMappedList = struct { Link Here
480
    /// of this ArrayList in accordance with the respective documentation. In
480
    /// of this ArrayList in accordance with the respective documentation. In
481
    /// all cases, "invalidated" means that the memory has been passed to this
481
    /// all cases, "invalidated" means that the memory has been passed to this
482
    /// allocator's resize or free function.
482
    /// allocator's resize or free function.
483
    items: []align(std.mem.page_size) volatile u8,
483
    items: []align(std.heap.min_page_size) volatile u8,
484
    /// How many bytes this list can hold without allocating additional memory.
484
    /// How many bytes this list can hold without allocating additional memory.
485
    capacity: usize,
485
    capacity: usize,
486
486
(-)a/lib/std/Build/Fuzz/WebServer.zig (-1 / +1 lines)
Lines 41-47 const fuzzer_arch_os_abi = "wasm32-freestanding"; Link Here
41
const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
41
const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
42
42
43
const CoverageMap = struct {
43
const CoverageMap = struct {
44
    mapped_memory: []align(std.mem.page_size) const u8,
44
    mapped_memory: []align(std.heap.min_page_size) const u8,
45
    coverage: Coverage,
45
    coverage: Coverage,
46
    source_locations: []Coverage.SourceLocation,
46
    source_locations: []Coverage.SourceLocation,
47
    /// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
47
    /// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
(-)a/lib/std/Thread.zig (-3 / +3 lines)
Lines 767-773 const PosixThreadImpl = struct { Link Here
767
        // Use the same set of parameters used by the libc-less impl.
767
        // Use the same set of parameters used by the libc-less impl.
768
        const stack_size = @max(config.stack_size, 16 * 1024);
768
        const stack_size = @max(config.stack_size, 16 * 1024);
769
        assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS);
769
        assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS);
770
        assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS);
770
        assert(c.pthread_attr_setguardsize(&attr, std.heap.pageSize()) == .SUCCESS);
771
771
772
        var handle: c.pthread_t = undefined;
772
        var handle: c.pthread_t = undefined;
773
        switch (c.pthread_create(
773
        switch (c.pthread_create(
Lines 1150-1156 const LinuxThreadImpl = struct { Link Here
1150
        completion: Completion = Completion.init(.running),
1150
        completion: Completion = Completion.init(.running),
1151
        child_tid: std.atomic.Value(i32) = std.atomic.Value(i32).init(1),
1151
        child_tid: std.atomic.Value(i32) = std.atomic.Value(i32).init(1),
1152
        parent_tid: i32 = undefined,
1152
        parent_tid: i32 = undefined,
1153
        mapped: []align(std.mem.page_size) u8,
1153
        mapped: []align(std.heap.min_page_size) u8,
1154
1154
1155
        /// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`).
1155
        /// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`).
1156
        /// Ported over from musl libc's pthread detached implementation:
1156
        /// Ported over from musl libc's pthread detached implementation:
Lines 1357-1363 const LinuxThreadImpl = struct { Link Here
1357
    };
1357
    };
1358
1358
1359
    fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl {
1359
    fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl {
1360
        const page_size = std.mem.page_size;
1360
        const page_size = std.heap.pageSize();
1361
        const Args = @TypeOf(args);
1361
        const Args = @TypeOf(args);
1362
        const Instance = struct {
1362
        const Instance = struct {
1363
            fn_args: Args,
1363
            fn_args: Args,
(-)a/lib/std/c.zig (-11 / +48 lines)
Lines 3-9 const builtin = @import("builtin"); Link Here
3
const c = @This();
3
const c = @This();
4
const maxInt = std.math.maxInt;
4
const maxInt = std.math.maxInt;
5
const assert = std.debug.assert;
5
const assert = std.debug.assert;
6
const page_size = std.mem.page_size;
6
const min_page_size = std.heap.min_page_size;
7
const native_abi = builtin.abi;
7
const native_abi = builtin.abi;
8
const native_arch = builtin.cpu.arch;
8
const native_arch = builtin.cpu.arch;
9
const native_os = builtin.os.tag;
9
const native_os = builtin.os.tag;
Lines 2209-2214 pub const SC = switch (native_os) { Link Here
2209
    .linux => linux.SC,
2209
    .linux => linux.SC,
2210
    else => void,
2210
    else => void,
2211
};
2211
};
2212
2213
pub const _SC = switch (native_os) {
2214
    .bridgeos, .driverkit, .ios, .macos, .tvos, .visionos, .watchos => enum(c_int) {
2215
        PAGESIZE = 29,
2216
    },
2217
    .dragonfly => enum(c_int) {
2218
        PAGESIZE = 47,
2219
    },
2220
    .freebsd => enum(c_int) {
2221
        PAGESIZE = 47,
2222
    },
2223
    .fuchsia => enum(c_int) {
2224
        PAGESIZE = 30,
2225
    },
2226
    .haiku => enum(c_int) {
2227
        PAGESIZE = 27,
2228
    },
2229
    .linux => enum(c_int) {
2230
        PAGESIZE = 30,
2231
    },
2232
    .netbsd => enum(c_int) {
2233
        PAGESIZE = 28,
2234
    },
2235
    .openbsd => enum(c_int) {
2236
        PAGESIZE = 28,
2237
    },
2238
    .solaris, .illumos => enum(c_int) {
2239
        PAGESIZE = 11,
2240
        NPROCESSORS_ONLN = 15,
2241
    },
2242
    else => void,
2243
};
2244
2212
pub const SEEK = switch (native_os) {
2245
pub const SEEK = switch (native_os) {
2213
    .linux => linux.SEEK,
2246
    .linux => linux.SEEK,
2214
    .emscripten => emscripten.SEEK,
2247
    .emscripten => emscripten.SEEK,
Lines 9038-9044 pub extern "c" fn getpwnam(name: [*:0]const u8) ?*passwd; Link Here
9038
pub extern "c" fn getpwuid(uid: uid_t) ?*passwd;
9071
pub extern "c" fn getpwuid(uid: uid_t) ?*passwd;
9039
pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int;
9072
pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int;
9040
pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64;
9073
pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64;
9041
pub extern "c" fn mmap64(addr: ?*align(std.mem.page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
9074
pub extern "c" fn mmap64(addr: ?*align(min_page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
9042
pub extern "c" fn open64(path: [*:0]const u8, oflag: O, ...) c_int;
9075
pub extern "c" fn open64(path: [*:0]const u8, oflag: O, ...) c_int;
9043
pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: O, ...) c_int;
9076
pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: O, ...) c_int;
9044
pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize;
9077
pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize;
Lines 9126-9138 pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) c_int; Link Here
9126
9159
9127
pub extern "c" fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: *const rlimit, old_limit: *rlimit) c_int;
9160
pub extern "c" fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: *const rlimit, old_limit: *rlimit) c_int;
9128
pub extern "c" fn mincore(
9161
pub extern "c" fn mincore(
9129
    addr: *align(std.mem.page_size) anyopaque,
9162
    addr: *align(min_page_size) anyopaque,
9130
    length: usize,
9163
    length: usize,
9131
    vec: [*]u8,
9164
    vec: [*]u8,
9132
) c_int;
9165
) c_int;
9133
9166
9134
pub extern "c" fn madvise(
9167
pub extern "c" fn madvise(
9135
    addr: *align(std.mem.page_size) anyopaque,
9168
    addr: *align(min_page_size) anyopaque,
9136
    length: usize,
9169
    length: usize,
9137
    advice: u32,
9170
    advice: u32,
9138
) c_int;
9171
) c_int;
Lines 9230-9235 pub const posix_memalign = switch (native_os) { Link Here
9230
    .dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .linux, .macos, .ios, .tvos, .watchos, .visionos => private.posix_memalign,
9263
    .dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .linux, .macos, .ios, .tvos, .watchos, .visionos => private.posix_memalign,
9231
    else => {},
9264
    else => {},
9232
};
9265
};
9266
pub const sysconf = switch (native_os) {
9267
    .solaris => solaris.sysconf,
9268
    else => private.sysconf,
9269
};
9233
9270
9234
pub const sf_hdtr = switch (native_os) {
9271
pub const sf_hdtr = switch (native_os) {
9235
    .freebsd, .macos, .ios, .tvos, .watchos, .visionos => extern struct {
9272
    .freebsd, .macos, .ios, .tvos, .watchos, .visionos => extern struct {
Lines 9271-9279 pub extern "c" fn writev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint) i Link Here
9271
pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: off_t) isize;
9308
pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: off_t) isize;
9272
pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
9309
pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
9273
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize;
9310
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize;
9274
pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque;
9311
pub extern "c" fn mmap(addr: ?*align(min_page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque;
9275
pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int;
9312
pub extern "c" fn munmap(addr: *align(min_page_size) const anyopaque, len: usize) c_int;
9276
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int;
9313
pub extern "c" fn mprotect(addr: *align(min_page_size) anyopaque, len: usize, prot: c_uint) c_int;
9277
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int;
9314
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int;
9278
pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
9315
pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
9279
pub extern "c" fn unlink(path: [*:0]const u8) c_int;
9316
pub extern "c" fn unlink(path: [*:0]const u8) c_int;
Lines 9625-9631 pub const SCM = solaris.SCM; Link Here
9625
pub const SETCONTEXT = solaris.SETCONTEXT;
9662
pub const SETCONTEXT = solaris.SETCONTEXT;
9626
pub const SETUSTACK = solaris.GETUSTACK;
9663
pub const SETUSTACK = solaris.GETUSTACK;
9627
pub const SFD = solaris.SFD;
9664
pub const SFD = solaris.SFD;
9628
pub const _SC = solaris._SC;
9629
pub const cmsghdr = solaris.cmsghdr;
9665
pub const cmsghdr = solaris.cmsghdr;
9630
pub const ctid_t = solaris.ctid_t;
9666
pub const ctid_t = solaris.ctid_t;
9631
pub const file_obj = solaris.file_obj;
9667
pub const file_obj = solaris.file_obj;
Lines 9642-9648 pub const priority = solaris.priority; Link Here
9642
pub const procfs = solaris.procfs;
9678
pub const procfs = solaris.procfs;
9643
pub const projid_t = solaris.projid_t;
9679
pub const projid_t = solaris.projid_t;
9644
pub const signalfd_siginfo = solaris.signalfd_siginfo;
9680
pub const signalfd_siginfo = solaris.signalfd_siginfo;
9645
pub const sysconf = solaris.sysconf;
9646
pub const taskid_t = solaris.taskid_t;
9681
pub const taskid_t = solaris.taskid_t;
9647
pub const zoneid_t = solaris.zoneid_t;
9682
pub const zoneid_t = solaris.zoneid_t;
9648
9683
Lines 9797-9802 pub const fcopyfile = darwin.fcopyfile; Link Here
9797
pub const ipc_space_t = darwin.ipc_space_t;
9832
pub const ipc_space_t = darwin.ipc_space_t;
9798
pub const ipc_space_port_t = darwin.ipc_space_port_t;
9833
pub const ipc_space_port_t = darwin.ipc_space_port_t;
9799
pub const kern_return_t = darwin.kern_return_t;
9834
pub const kern_return_t = darwin.kern_return_t;
9835
pub const vm_size_t = darwin.vm_size_t;
9800
pub const kevent64 = darwin.kevent64;
9836
pub const kevent64 = darwin.kevent64;
9801
pub const mach_absolute_time = darwin.mach_absolute_time;
9837
pub const mach_absolute_time = darwin.mach_absolute_time;
9802
pub const mach_continuous_time = darwin.mach_continuous_time;
9838
pub const mach_continuous_time = darwin.mach_continuous_time;
Lines 9953-9959 const private = struct { Link Here
9953
    };
9989
    };
9954
    extern "c" fn getrusage(who: c_int, usage: *rusage) c_int;
9990
    extern "c" fn getrusage(who: c_int, usage: *rusage) c_int;
9955
    extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
9991
    extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
9956
    extern "c" fn msync(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
9992
    extern "c" fn msync(addr: *align(min_page_size) const anyopaque, len: usize, flags: c_int) c_int;
9957
    extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
9993
    extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
9958
    extern "c" fn pipe2(fds: *[2]fd_t, flags: O) c_int;
9994
    extern "c" fn pipe2(fds: *[2]fd_t, flags: O) c_int;
9959
    extern "c" fn readdir(dir: *DIR) ?*dirent;
9995
    extern "c" fn readdir(dir: *DIR) ?*dirent;
Lines 9966-9971 const private = struct { Link Here
9966
    extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int;
10002
    extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int;
9967
    extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *Stat) c_int;
10003
    extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *Stat) c_int;
9968
    extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
10004
    extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
10005
    extern "c" fn sysconf(sc: c_int) c_long;
9969
10006
9970
    extern "c" fn pthread_setname_np(thread: pthread_t, name: [*:0]const u8) c_int;
10007
    extern "c" fn pthread_setname_np(thread: pthread_t, name: [*:0]const u8) c_int;
9971
    extern "c" fn getcontext(ucp: *ucontext_t) c_int;
10008
    extern "c" fn getcontext(ucp: *ucontext_t) c_int;
Lines 10000-10006 const private = struct { Link Here
10000
    extern "c" fn __getrusage50(who: c_int, usage: *rusage) c_int;
10037
    extern "c" fn __getrusage50(who: c_int, usage: *rusage) c_int;
10001
    extern "c" fn __gettimeofday50(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
10038
    extern "c" fn __gettimeofday50(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
10002
    extern "c" fn __libc_thr_yield() c_int;
10039
    extern "c" fn __libc_thr_yield() c_int;
10003
    extern "c" fn __msync13(addr: *align(std.mem.page_size) const anyopaque, len: usize, flags: c_int) c_int;
10040
    extern "c" fn __msync13(addr: *align(min_page_size) const anyopaque, len: usize, flags: c_int) c_int;
10004
    extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int;
10041
    extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int;
10005
    extern "c" fn __sigaction14(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
10042
    extern "c" fn __sigaction14(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
10006
    extern "c" fn __sigfillset14(set: ?*sigset_t) void;
10043
    extern "c" fn __sigfillset14(set: ?*sigset_t) void;
(-)a/lib/std/c/solaris.zig (-4 lines)
Lines 154-163 pub const AF_SUN = struct { Link Here
154
    pub const NOPLM = 0x00000004;
154
    pub const NOPLM = 0x00000004;
155
};
155
};
156
156
157
pub const _SC = struct {
158
    pub const NPROCESSORS_ONLN = 15;
159
};
160
161
pub const procfs = struct {
157
pub const procfs = struct {
162
    pub const misc_header = extern struct {
158
    pub const misc_header = extern struct {
163
        size: u32,
159
        size: u32,
(-)a/lib/std/crypto/tlcsprng.zig (-2 / +3 lines)
Lines 6-11 Link Here
6
const std = @import("std");
6
const std = @import("std");
7
const builtin = @import("builtin");
7
const builtin = @import("builtin");
8
const mem = std.mem;
8
const mem = std.mem;
9
const heap = std.heap;
9
const native_os = builtin.os.tag;
10
const native_os = builtin.os.tag;
10
const posix = std.posix;
11
const posix = std.posix;
11
12
Lines 42-48 var install_atfork_handler = std.once(struct { Link Here
42
    }
43
    }
43
}.do);
44
}.do);
44
45
45
threadlocal var wipe_mem: []align(mem.page_size) u8 = &[_]u8{};
46
threadlocal var wipe_mem: []align(heap.min_page_size) u8 = &[_]u8{};
46
47
47
fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
48
fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
48
    if (os_has_arc4random) {
49
    if (os_has_arc4random) {
Lines 77-83 fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void { Link Here
77
        } else {
78
        } else {
78
            // Use a static thread-local buffer.
79
            // Use a static thread-local buffer.
79
            const S = struct {
80
            const S = struct {
80
                threadlocal var buf: Context align(mem.page_size) = .{
81
                threadlocal var buf: Context align(heap.min_page_size) = .{
81
                    .init_state = .uninitialized,
82
                    .init_state = .uninitialized,
82
                    .rng = undefined,
83
                    .rng = undefined,
83
                };
84
                };
(-)a/lib/std/debug.zig (-8 / +9 lines)
Lines 2-7 const builtin = @import("builtin"); Link Here
2
const std = @import("std.zig");
2
const std = @import("std.zig");
3
const math = std.math;
3
const math = std.math;
4
const mem = std.mem;
4
const mem = std.mem;
5
const heap = std.heap;
5
const io = std.io;
6
const io = std.io;
6
const posix = std.posix;
7
const posix = std.posix;
7
const fs = std.fs;
8
const fs = std.fs;
Lines 1021-1027 fn printLineFromFileAnyOs(out_stream: anytype, source_location: SourceLocation) Link Here
1021
    defer f.close();
1022
    defer f.close();
1022
    // TODO fstat and make sure that the file has the correct size
1023
    // TODO fstat and make sure that the file has the correct size
1023
1024
1024
    var buf: [mem.page_size]u8 = undefined;
1025
    var buf: [4096]u8 = undefined;
1025
    var amt_read = try f.read(buf[0..]);
1026
    var amt_read = try f.read(buf[0..]);
1026
    const line_start = seek: {
1027
    const line_start = seek: {
1027
        var current_line_start: usize = 0;
1028
        var current_line_start: usize = 0;
Lines 1124-1130 test printLineFromFileAnyOs { Link Here
1124
1125
1125
        const overlap = 10;
1126
        const overlap = 10;
1126
        var writer = file.writer();
1127
        var writer = file.writer();
1127
        try writer.writeByteNTimes('a', mem.page_size - overlap);
1128
        try writer.writeByteNTimes('a', heap.min_page_size - overlap);
1128
        try writer.writeByte('\n');
1129
        try writer.writeByte('\n');
1129
        try writer.writeByteNTimes('a', overlap);
1130
        try writer.writeByteNTimes('a', overlap);
1130
1131
Lines 1139-1148 test printLineFromFileAnyOs { Link Here
1139
        defer allocator.free(path);
1140
        defer allocator.free(path);
1140
1141
1141
        var writer = file.writer();
1142
        var writer = file.writer();
1142
        try writer.writeByteNTimes('a', mem.page_size);
1143
        try writer.writeByteNTimes('a', heap.max_page_size);
1143
1144
1144
        try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1145
        try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1145
        try expectEqualStrings(("a" ** mem.page_size) ++ "\n", output.items);
1146
        try expectEqualStrings(("a" ** heap.max_page_size) ++ "\n", output.items);
1146
        output.clearRetainingCapacity();
1147
        output.clearRetainingCapacity();
1147
    }
1148
    }
1148
    {
1149
    {
Lines 1152-1169 test printLineFromFileAnyOs { Link Here
1152
        defer allocator.free(path);
1153
        defer allocator.free(path);
1153
1154
1154
        var writer = file.writer();
1155
        var writer = file.writer();
1155
        try writer.writeByteNTimes('a', 3 * mem.page_size);
1156
        try writer.writeByteNTimes('a', 3 * heap.max_page_size);
1156
1157
1157
        try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
1158
        try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
1158
1159
1159
        try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1160
        try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1160
        try expectEqualStrings(("a" ** (3 * mem.page_size)) ++ "\n", output.items);
1161
        try expectEqualStrings(("a" ** (3 * heap.max_page_size)) ++ "\n", output.items);
1161
        output.clearRetainingCapacity();
1162
        output.clearRetainingCapacity();
1162
1163
1163
        try writer.writeAll("a\na");
1164
        try writer.writeAll("a\na");
1164
1165
1165
        try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1166
        try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1166
        try expectEqualStrings(("a" ** (3 * mem.page_size)) ++ "a\n", output.items);
1167
        try expectEqualStrings(("a" ** (3 * heap.max_page_size)) ++ "a\n", output.items);
1167
        output.clearRetainingCapacity();
1168
        output.clearRetainingCapacity();
1168
1169
1169
        try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
1170
        try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
Lines 1177-1183 test printLineFromFileAnyOs { Link Here
1177
        defer allocator.free(path);
1178
        defer allocator.free(path);
1178
1179
1179
        var writer = file.writer();
1180
        var writer = file.writer();
1180
        const real_file_start = 3 * mem.page_size;
1181
        const real_file_start = 3 * heap.min_page_size;
1181
        try writer.writeByteNTimes('\n', real_file_start);
1182
        try writer.writeByteNTimes('\n', real_file_start);
1182
        try writer.writeAll("abc\ndef");
1183
        try writer.writeAll("abc\ndef");
1183
1184
(-)a/lib/std/debug/Dwarf.zig (-5 / +5 lines)
Lines 2110-2117 fn pcRelBase(field_ptr: usize, pc_rel_offset: i64) !usize { Link Here
2110
pub const ElfModule = struct {
2110
pub const ElfModule = struct {
2111
    base_address: usize,
2111
    base_address: usize,
2112
    dwarf: Dwarf,
2112
    dwarf: Dwarf,
2113
    mapped_memory: []align(std.mem.page_size) const u8,
2113
    mapped_memory: []align(std.heap.min_page_size) const u8,
2114
    external_mapped_memory: ?[]align(std.mem.page_size) const u8,
2114
    external_mapped_memory: ?[]align(std.heap.min_page_size) const u8,
2115
2115
2116
    pub fn deinit(self: *@This(), allocator: Allocator) void {
2116
    pub fn deinit(self: *@This(), allocator: Allocator) void {
2117
        self.dwarf.deinit(allocator);
2117
        self.dwarf.deinit(allocator);
Lines 2157-2167 pub const ElfModule = struct { Link Here
2157
    /// sections from an external file.
2157
    /// sections from an external file.
2158
    pub fn load(
2158
    pub fn load(
2159
        gpa: Allocator,
2159
        gpa: Allocator,
2160
        mapped_mem: []align(std.mem.page_size) const u8,
2160
        mapped_mem: []align(std.heap.min_page_size) const u8,
2161
        build_id: ?[]const u8,
2161
        build_id: ?[]const u8,
2162
        expected_crc: ?u32,
2162
        expected_crc: ?u32,
2163
        parent_sections: *Dwarf.SectionArray,
2163
        parent_sections: *Dwarf.SectionArray,
2164
        parent_mapped_mem: ?[]align(std.mem.page_size) const u8,
2164
        parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8,
2165
        elf_filename: ?[]const u8,
2165
        elf_filename: ?[]const u8,
2166
    ) LoadError!Dwarf.ElfModule {
2166
    ) LoadError!Dwarf.ElfModule {
2167
        if (expected_crc) |crc| if (crc != std.hash.crc.Crc32.hash(mapped_mem)) return error.InvalidDebugInfo;
2167
        if (expected_crc) |crc| if (crc != std.hash.crc.Crc32.hash(mapped_mem)) return error.InvalidDebugInfo;
Lines 2413-2419 pub const ElfModule = struct { Link Here
2413
        build_id: ?[]const u8,
2413
        build_id: ?[]const u8,
2414
        expected_crc: ?u32,
2414
        expected_crc: ?u32,
2415
        parent_sections: *Dwarf.SectionArray,
2415
        parent_sections: *Dwarf.SectionArray,
2416
        parent_mapped_mem: ?[]align(std.mem.page_size) const u8,
2416
        parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8,
2417
    ) LoadError!Dwarf.ElfModule {
2417
    ) LoadError!Dwarf.ElfModule {
2418
        const elf_file = elf_file_path.root_dir.handle.openFile(elf_file_path.sub_path, .{}) catch |err| switch (err) {
2418
        const elf_file = elf_file_path.root_dir.handle.openFile(elf_file_path.sub_path, .{}) catch |err| switch (err) {
2419
            error.FileNotFound => return missing(),
2419
            error.FileNotFound => return missing(),
(-)a/lib/std/debug/Info.zig (-1 lines)
Lines 10-16 const std = @import("../std.zig"); Link Here
10
const Allocator = std.mem.Allocator;
10
const Allocator = std.mem.Allocator;
11
const Path = std.Build.Cache.Path;
11
const Path = std.Build.Cache.Path;
12
const Dwarf = std.debug.Dwarf;
12
const Dwarf = std.debug.Dwarf;
13
const page_size = std.mem.page_size;
14
const assert = std.debug.assert;
13
const assert = std.debug.assert;
15
const Coverage = std.debug.Coverage;
14
const Coverage = std.debug.Coverage;
16
const SourceLocation = std.debug.Coverage.SourceLocation;
15
const SourceLocation = std.debug.Coverage.SourceLocation;
(-)a/lib/std/debug/MemoryAccessor.zig (-4 / +5 lines)
Lines 7-13 const native_os = builtin.os.tag; Link Here
7
const std = @import("../std.zig");
7
const std = @import("../std.zig");
8
const posix = std.posix;
8
const posix = std.posix;
9
const File = std.fs.File;
9
const File = std.fs.File;
10
const page_size = std.mem.page_size;
10
const min_page_size = std.heap.min_page_size;
11
11
12
const MemoryAccessor = @This();
12
const MemoryAccessor = @This();
13
13
Lines 82-90 pub fn isValidMemory(address: usize) bool { Link Here
82
    // We are unable to determine validity of memory for freestanding targets
82
    // We are unable to determine validity of memory for freestanding targets
83
    if (native_os == .freestanding or native_os == .other or native_os == .uefi) return true;
83
    if (native_os == .freestanding or native_os == .other or native_os == .uefi) return true;
84
84
85
    const aligned_address = address & ~@as(usize, @intCast((page_size - 1)));
85
    const page_size = std.heap.pageSize();
86
    const aligned_address = address & ~(page_size - 1);
86
    if (aligned_address == 0) return false;
87
    if (aligned_address == 0) return false;
87
    const aligned_memory = @as([*]align(page_size) u8, @ptrFromInt(aligned_address))[0..page_size];
88
    const aligned_memory = @as([*]align(min_page_size) u8, @ptrFromInt(aligned_address))[0..page_size];
88
89
89
    if (native_os == .windows) {
90
    if (native_os == .windows) {
90
        const windows = std.os.windows;
91
        const windows = std.os.windows;
Lines 93-99 pub fn isValidMemory(address: usize) bool { Link Here
93
94
94
        // The only error this function can throw is ERROR_INVALID_PARAMETER.
95
        // The only error this function can throw is ERROR_INVALID_PARAMETER.
95
        // supply an address that invalid i'll be thrown.
96
        // supply an address that invalid i'll be thrown.
96
        const rc = windows.VirtualQuery(aligned_memory, &memory_info, aligned_memory.len) catch {
97
        const rc = windows.VirtualQuery(@ptrCast(aligned_memory), &memory_info, aligned_memory.len) catch {
97
            return false;
98
            return false;
98
        };
99
        };
99
100
(-)a/lib/std/debug/SelfInfo.zig (-3 / +3 lines)
Lines 504-510 pub const Module = switch (native_os) { Link Here
504
    .macos, .ios, .watchos, .tvos, .visionos => struct {
504
    .macos, .ios, .watchos, .tvos, .visionos => struct {
505
        base_address: usize,
505
        base_address: usize,
506
        vmaddr_slide: usize,
506
        vmaddr_slide: usize,
507
        mapped_memory: []align(mem.page_size) const u8,
507
        mapped_memory: []align(std.heap.min_page_size) const u8,
508
        symbols: []const MachoSymbol,
508
        symbols: []const MachoSymbol,
509
        strings: [:0]const u8,
509
        strings: [:0]const u8,
510
        ofiles: OFileTable,
510
        ofiles: OFileTable,
Lines 1046-1052 pub fn readElfDebugInfo( Link Here
1046
    build_id: ?[]const u8,
1046
    build_id: ?[]const u8,
1047
    expected_crc: ?u32,
1047
    expected_crc: ?u32,
1048
    parent_sections: *Dwarf.SectionArray,
1048
    parent_sections: *Dwarf.SectionArray,
1049
    parent_mapped_mem: ?[]align(mem.page_size) const u8,
1049
    parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8,
1050
) !Dwarf.ElfModule {
1050
) !Dwarf.ElfModule {
1051
    nosuspend {
1051
    nosuspend {
1052
        const elf_file = (if (elf_filename) |filename| blk: {
1052
        const elf_file = (if (elf_filename) |filename| blk: {
Lines 1088-1094 const MachoSymbol = struct { Link Here
1088
1088
1089
/// Takes ownership of file, even on error.
1089
/// Takes ownership of file, even on error.
1090
/// TODO it's weird to take ownership even on error, rework this code.
1090
/// TODO it's weird to take ownership even on error, rework this code.
1091
fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
1091
fn mapWholeFile(file: File) ![]align(std.heap.min_page_size) const u8 {
1092
    nosuspend {
1092
    nosuspend {
1093
        defer file.close();
1093
        defer file.close();
1094
1094
(-)a/lib/std/dynamic_library.zig (-5 / +6 lines)
Lines 1-6 Link Here
1
const std = @import("std.zig");
1
const std = @import("std.zig");
2
const builtin = @import("builtin");
2
const builtin = @import("builtin");
3
const mem = std.mem;
3
const mem = std.mem;
4
const heap = std.heap;
4
const testing = std.testing;
5
const testing = std.testing;
5
const elf = std.elf;
6
const elf = std.elf;
6
const windows = std.os.windows;
7
const windows = std.os.windows;
Lines 143-149 pub const ElfDynLib = struct { Link Here
143
    hashtab: [*]posix.Elf_Symndx,
144
    hashtab: [*]posix.Elf_Symndx,
144
    versym: ?[*]elf.Versym,
145
    versym: ?[*]elf.Versym,
145
    verdef: ?*elf.Verdef,
146
    verdef: ?*elf.Verdef,
146
    memory: []align(mem.page_size) u8,
147
    memory: []align(heap.min_page_size) u8,
147
148
148
    pub const Error = ElfDynLibError;
149
    pub const Error = ElfDynLibError;
149
150
Lines 223-229 pub const ElfDynLib = struct { Link Here
223
        // corresponding to the actual LOAD sections.
224
        // corresponding to the actual LOAD sections.
224
        const file_bytes = try posix.mmap(
225
        const file_bytes = try posix.mmap(
225
            null,
226
            null,
226
            mem.alignForward(usize, size, mem.page_size),
227
            mem.alignForward(usize, size, heap.pageSize()),
227
            posix.PROT.READ,
228
            posix.PROT.READ,
228
            .{ .TYPE = .PRIVATE },
229
            .{ .TYPE = .PRIVATE },
229
            fd,
230
            fd,
Lines 284-293 pub const ElfDynLib = struct { Link Here
284
                    elf.PT_LOAD => {
285
                    elf.PT_LOAD => {
285
                        // The VirtAddr may not be page-aligned; in such case there will be
286
                        // The VirtAddr may not be page-aligned; in such case there will be
286
                        // extra nonsense mapped before/after the VirtAddr,MemSiz
287
                        // extra nonsense mapped before/after the VirtAddr,MemSiz
287
                        const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1);
288
                        const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, heap.pageSize()) - 1);
288
                        const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
289
                        const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
289
                        const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size);
290
                        const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, heap.pageSize());
290
                        const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr));
291
                        const ptr = @as([*]align(heap.min_page_size) u8, @ptrFromInt(aligned_addr));
291
                        const prot = elfToMmapProt(ph.p_flags);
292
                        const prot = elfToMmapProt(ph.p_flags);
292
                        if ((ph.p_flags & elf.PF_W) == 0) {
293
                        if ((ph.p_flags & elf.PF_W) == 0) {
293
                            // If it does not need write access, it can be mapped from the fd.
294
                            // If it does not need write access, it can be mapped from the fd.
(-)a/lib/std/fifo.zig (-1 / +1 lines)
Lines 91-97 pub fn LinearFifo( Link Here
91
                mem.copyForwards(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]);
91
                mem.copyForwards(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]);
92
                self.head = 0;
92
                self.head = 0;
93
            } else {
93
            } else {
94
                var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined;
94
                var tmp: [4096 / 2 / @sizeOf(T)]T = undefined;
95
95
96
                while (self.head != 0) {
96
                while (self.head != 0) {
97
                    const n = @min(self.head, tmp.len);
97
                    const n = @min(self.head, tmp.len);
(-)a/lib/std/heap.zig (-7 / +391 lines)
Lines 8-13 const c = std.c; Link Here
8
const Allocator = std.mem.Allocator;
8
const Allocator = std.mem.Allocator;
9
const windows = std.os.windows;
9
const windows = std.os.windows;
10
10
11
const default_min_page_size: ?usize = switch (builtin.os.tag) {
12
    .driverkit, .ios, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) {
13
        .x86_64 => 4 << 10,
14
        .aarch64 => 16 << 10,
15
        else => null,
16
    },
17
    .windows => switch (builtin.cpu.arch) {
18
        // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200>
19
        .x86, .x86_64 => 4 << 10,
20
        // SuperH => 4 << 10,
21
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
22
        .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
23
        // DEC Alpha => 8 << 10,
24
        // Itanium => 8 << 10,
25
        .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
26
        else => null,
27
    },
28
    .wasi => switch (builtin.cpu.arch) {
29
        .wasm32, .wasm64 => 64 << 10,
30
        else => null,
31
    },
32
    // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187
33
    .uefi => 4 << 10,
34
    .freebsd => switch (builtin.cpu.arch) {
35
        // FreeBSD/sys/*
36
        .x86, .x86_64 => 4 << 10,
37
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
38
        .aarch64, .aarch64_be => 4 << 10,
39
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
40
        .riscv32, .riscv64 => 4 << 10,
41
        else => null,
42
    },
43
    .netbsd => switch (builtin.cpu.arch) {
44
        // NetBSD/sys/arch/*
45
        .x86, .x86_64 => 4 << 10,
46
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
47
        .aarch64, .aarch64_be => 4 << 10,
48
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
49
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
50
        .sparc => 4 << 10,
51
        .sparc64 => 8 << 10,
52
        .riscv32, .riscv64 => 4 << 10,
53
        // Sun-2
54
        .m68k => 2 << 10,
55
        else => null,
56
    },
57
    .dragonfly => switch (builtin.cpu.arch) {
58
        .x86, .x86_64 => 4 << 10,
59
        else => null,
60
    },
61
    .openbsd => switch (builtin.cpu.arch) {
62
        // OpenBSD/sys/arch/*
63
        .x86, .x86_64 => 4 << 10,
64
        .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
65
        .mips64, .mips64el => 4 << 10,
66
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
67
        .riscv64 => 4 << 10,
68
        .sparc64 => 8 << 10,
69
        else => null,
70
    },
71
    .solaris, .illumos => switch (builtin.cpu.arch) {
72
        // src/uts/*/sys/machparam.h
73
        .x86, .x86_64 => 4 << 10,
74
        .sparc, .sparc64 => 8 << 10,
75
        else => null,
76
    },
77
    .fuchsia => switch (builtin.cpu.arch) {
78
        // fuchsia/kernel/arch/*/include/arch/defines.h
79
        .x86_64 => 4 << 10,
80
        .aarch64, .aarch64_be => 4 << 10,
81
        .riscv64 => 4 << 10,
82
        else => null,
83
    },
84
    // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13
85
    .serenity => 4 << 10,
86
    .haiku => switch (builtin.cpu.arch) {
87
        // haiku/headers/posix/arch/*/limits.h
88
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
89
        .aarch64, .aarch64_be => 4 << 10,
90
        .m68k => 4 << 10,
91
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
92
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
93
        .riscv64 => 4 << 10,
94
        .sparc64 => 8 << 10,
95
        .x86, .x86_64 => 4 << 10,
96
        else => null,
97
    },
98
    .hurd => switch (builtin.cpu.arch) {
99
        // gnumach/*/include/mach/*/vm_param.h
100
        .x86, .x86_64 => 4 << 10,
101
        .aarch64 => null,
102
        else => null,
103
    },
104
    .plan9 => switch (builtin.cpu.arch) {
105
        // 9front/sys/src/9/*/mem.h
106
        .x86, .x86_64 => 4 << 10,
107
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
108
        .aarch64, .aarch64_be => 4 << 10,
109
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
110
        .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
111
        .sparc => 4 << 10,
112
        else => null,
113
    },
114
    .ps3 => switch (builtin.cpu.arch) {
115
        // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html
116
        .powerpc64 => 1 << 20, // 1 MiB
117
        else => null,
118
    },
119
    .ps4 => switch (builtin.cpu.arch) {
120
        // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95
121
        .x86, .x86_64 => 4 << 10,
122
        else => null,
123
    },
124
    .ps5 => switch (builtin.cpu.arch) {
125
        // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95
126
        .x86, .x86_64 => 16 << 10,
127
        else => null,
128
    },
129
    // system/lib/libc/musl/arch/emscripten/bits/limits.h
130
    .emscripten => 64 << 10,
131
    .linux => switch (builtin.cpu.arch) {
132
        // Linux/arch/*/Kconfig
133
        .arc => 4 << 10,
134
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
135
        .aarch64, .aarch64_be => 4 << 10,
136
        .csky => 4 << 10,
137
        .hexagon => 4 << 10,
138
        .loongarch32, .loongarch64 => 4 << 10,
139
        .m68k => 4 << 10,
140
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
141
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
142
        .riscv32, .riscv64 => 4 << 10,
143
        .s390x => 4 << 10,
144
        .sparc => 4 << 10,
145
        .sparc64 => 8 << 10,
146
        .x86, .x86_64 => 4 << 10,
147
        .xtensa => 4 << 10,
148
        else => null,
149
    },
150
    .freestanding => switch (builtin.cpu.arch) {
151
        .wasm32, .wasm64 => 64 << 10,
152
        else => null,
153
    },
154
    else => null,
155
};
156
157
const default_max_page_size: ?usize = switch (builtin.os.tag) {
158
    .driverkit, .ios, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) {
159
        .x86_64 => 4 << 10,
160
        .aarch64 => 16 << 10,
161
        else => null,
162
    },
163
    .windows => switch (builtin.cpu.arch) {
164
        // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200>
165
        .x86, .x86_64 => 4 << 10,
166
        // SuperH => 4 << 10,
167
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
168
        .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
169
        // DEC Alpha => 8 << 10,
170
        // Itanium => 8 << 10,
171
        .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
172
        else => null,
173
    },
174
    .wasi => switch (builtin.cpu.arch) {
175
        .wasm32, .wasm64 => 64 << 10,
176
        else => null,
177
    },
178
    // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187
179
    .uefi => 4 << 10,
180
    .freebsd => switch (builtin.cpu.arch) {
181
        // FreeBSD/sys/*
182
        .x86, .x86_64 => 4 << 10,
183
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
184
        .aarch64, .aarch64_be => 4 << 10,
185
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
186
        .riscv32, .riscv64 => 4 << 10,
187
        else => null,
188
    },
189
    .netbsd => switch (builtin.cpu.arch) {
190
        // NetBSD/sys/arch/*
191
        .x86, .x86_64 => 4 << 10,
192
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
193
        .aarch64, .aarch64_be => 64 << 10,
194
        .mips, .mipsel, .mips64, .mips64el => 16 << 10,
195
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 16 << 10,
196
        .sparc => 8 << 10,
197
        .sparc64 => 8 << 10,
198
        .riscv32, .riscv64 => 4 << 10,
199
        .m68k => 8 << 10,
200
        else => null,
201
    },
202
    .dragonfly => switch (builtin.cpu.arch) {
203
        .x86, .x86_64 => 4 << 10,
204
        else => null,
205
    },
206
    .openbsd => switch (builtin.cpu.arch) {
207
        // OpenBSD/sys/arch/*
208
        .x86, .x86_64 => 4 << 10,
209
        .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
210
        .mips64, .mips64el => 16 << 10,
211
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
212
        .riscv64 => 4 << 10,
213
        .sparc64 => 8 << 10,
214
        else => null,
215
    },
216
    .solaris, .illumos => switch (builtin.cpu.arch) {
217
        // src/uts/*/sys/machparam.h
218
        .x86, .x86_64 => 4 << 10,
219
        .sparc, .sparc64 => 8 << 10,
220
        else => null,
221
    },
222
    .fuchsia => switch (builtin.cpu.arch) {
223
        // fuchsia/kernel/arch/*/include/arch/defines.h
224
        .x86_64 => 4 << 10,
225
        .aarch64, .aarch64_be => 4 << 10,
226
        .riscv64 => 4 << 10,
227
        else => null,
228
    },
229
    // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13
230
    .serenity => 4 << 10,
231
    .haiku => switch (builtin.cpu.arch) {
232
        // haiku/headers/posix/arch/*/limits.h
233
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
234
        .aarch64, .aarch64_be => 4 << 10,
235
        .m68k => 4 << 10,
236
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
237
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
238
        .riscv64 => 4 << 10,
239
        .sparc64 => 8 << 10,
240
        .x86, .x86_64 => 4 << 10,
241
        else => null,
242
    },
243
    .hurd => switch (builtin.cpu.arch) {
244
        // gnumach/*/include/mach/*/vm_param.h
245
        .x86, .x86_64 => 4 << 10,
246
        .aarch64 => null,
247
        else => null,
248
    },
249
    .plan9 => switch (builtin.cpu.arch) {
250
        // 9front/sys/src/9/*/mem.h
251
        .x86, .x86_64 => 4 << 10,
252
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
253
        .aarch64, .aarch64_be => 64 << 10,
254
        .mips, .mipsel, .mips64, .mips64el => 16 << 10,
255
        .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
256
        .sparc => 4 << 10,
257
        else => null,
258
    },
259
    .ps3 => switch (builtin.cpu.arch) {
260
        // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html
261
        .powerpc64 => 1 << 20, // 1 MiB
262
        else => null,
263
    },
264
    .ps4 => switch (builtin.cpu.arch) {
265
        // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95
266
        .x86, .x86_64 => 4 << 10,
267
        else => null,
268
    },
269
    .ps5 => switch (builtin.cpu.arch) {
270
        // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95
271
        .x86, .x86_64 => 16 << 10,
272
        else => null,
273
    },
274
    // system/lib/libc/musl/arch/emscripten/bits/limits.h
275
    .emscripten => 64 << 10,
276
    .linux => switch (builtin.cpu.arch) {
277
        // Linux/arch/*/Kconfig
278
        .arc => 16 << 10,
279
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
280
        .aarch64, .aarch64_be => 64 << 10,
281
        .csky => 4 << 10,
282
        .hexagon => 256 << 10,
283
        .loongarch32, .loongarch64 => 64 << 10,
284
        .m68k => 8 << 10,
285
        .mips, .mipsel, .mips64, .mips64el => 64 << 10,
286
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 256 << 10,
287
        .riscv32, .riscv64 => 4 << 10,
288
        .s390x => 4 << 10,
289
        .sparc => 4 << 10,
290
        .sparc64 => 8 << 10,
291
        .x86, .x86_64 => 4 << 10,
292
        .xtensa => 4 << 10,
293
        else => null,
294
    },
295
    .freestanding => switch (builtin.cpu.arch) {
296
        .wasm32, .wasm64 => 64 << 10,
297
        else => null,
298
    },
299
    else => null,
300
};
301
302
/// The compile-time minimum page size that the target might have.
303
/// All pointers from `mmap` or `VirtualAlloc` are aligned to at least `min_page_size`, but their
304
/// actual alignment may be much bigger.
305
/// This value can be overridden via `std.options.min_page_size`.
306
/// On many systems, the actual page size can only be determined at runtime with `pageSize()`.
307
pub const min_page_size: usize = std.options.min_page_size orelse (default_min_page_size orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
308
    @compileError("freestanding/other explicitly has no min_page_size. One can be provided with std.options.min_page_size")
309
else
310
    @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has no min_page_size. One can be provided with std.options.min_page_size"));
311
312
/// The compile-time maximum page size that the target might have.
313
/// Targeting a system with a larger page size may require overriding `std.options.max_page_size`,
314
/// as well as using the linker arugment `-z max-page-size=`.
315
/// The actual page size can only be determined at runtime with `pageSize()`.
316
pub const max_page_size: usize = std.options.max_page_size orelse (default_max_page_size orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
317
    @compileError("freestanding/other explicitly has no max_page_size. One can be provided with std.options.max_page_size")
318
else
319
    @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has no max_page_size. One can be provided with std.options.max_page_size"));
320
321
/// Returns the system page size.
322
/// If the page size is comptime-known, `pageSize()` returns it directly.
323
/// Otherwise, `pageSize()` defers to `std.options.queryPageSizeFn()`.
324
pub fn pageSize() usize {
325
    if (min_page_size == max_page_size) {
326
        return min_page_size;
327
    }
328
    return std.options.queryPageSizeFn();
329
}
330
331
// A cache used by `defaultQueryPageSize()` to avoid repeating syscalls.
332
var page_size_cache = std.atomic.Value(usize).init(0);
333
334
// The default implementation in `std.options.queryPageSizeFn`.
335
// The first time it is called, it asserts that the page size is within the comptime bounds.
336
pub fn defaultQueryPageSize() usize {
337
    var size = page_size_cache.load(.unordered);
338
    if (size > 0) return size;
339
    size = switch (builtin.os.tag) {
340
        .linux => if (builtin.link_libc) @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE))) else std.os.linux.getauxval(std.elf.AT_PAGESZ),
341
        .driverkit, .ios, .macos, .tvos, .visionos, .watchos => blk: {
342
            const task_port = std.c.mach_task_self();
343
            // mach_task_self may fail "if there are any resource failures or other errors".
344
            if (task_port == std.c.TASK_NULL)
345
                break :blk 0;
346
            var info_count = std.c.TASK_VM_INFO_COUNT;
347
            var vm_info: std.c.task_vm_info_data_t = undefined;
348
            vm_info.page_size = 0;
349
            _ = std.c.task_info(
350
                task_port,
351
                std.c.TASK_VM_INFO,
352
                @as(std.c.task_info_t, @ptrCast(&vm_info)),
353
                &info_count,
354
            );
355
            assert(vm_info.page_size != 0);
356
            break :blk @as(usize, @intCast(vm_info.page_size));
357
        },
358
        .windows => blk: {
359
            var info: std.os.windows.SYSTEM_INFO = undefined;
360
            std.os.windows.kernel32.GetSystemInfo(&info);
361
            break :blk info.dwPageSize;
362
        },
363
        else => if (builtin.link_libc)
364
            if (std.c._SC != void and @hasDecl(std.c._SC, "PAGESIZE"))
365
                @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)))
366
            else
367
                @compileError("missing _SC.PAGESIZE declaration for " ++ @tagName(builtin.os.tag) ++ "-" ++ @tagName(builtin.os.tag))
368
        else if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
369
            @compileError("pageSize on freestanding/other is not supported with the default std.options.queryPageSizeFn")
370
        else
371
            @compileError("pageSize on " ++ @tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " is not supported without linking libc, using the default implementation"),
372
    };
373
374
    assert(size >= min_page_size);
375
    assert(size <= max_page_size);
376
    page_size_cache.store(size, .unordered);
377
378
    return size;
379
}
380
11
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
381
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
12
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
382
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
13
pub const ScopedLoggingAllocator = @import("heap/logging_allocator.zig").ScopedLoggingAllocator;
383
pub const ScopedLoggingAllocator = @import("heap/logging_allocator.zig").ScopedLoggingAllocator;
Lines 30-36 pub const MemoryPoolExtra = memory_pool.MemoryPoolExtra; Link Here
30
pub const MemoryPoolOptions = memory_pool.Options;
400
pub const MemoryPoolOptions = memory_pool.Options;
31
401
32
/// TODO Utilize this on Windows.
402
/// TODO Utilize this on Windows.
33
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
403
pub var next_mmap_addr_hint: ?[*]align(min_page_size) u8 = null;
34
404
35
const CAllocator = struct {
405
const CAllocator = struct {
36
    comptime {
406
    comptime {
Lines 258-264 pub const wasm_allocator = Allocator{ Link Here
258
/// Verifies that the adjusted length will still map to the full length
628
/// Verifies that the adjusted length will still map to the full length
259
pub fn alignPageAllocLen(full_len: usize, len: usize) usize {
629
pub fn alignPageAllocLen(full_len: usize, len: usize) usize {
260
    const aligned_len = mem.alignAllocLen(full_len, len);
630
    const aligned_len = mem.alignAllocLen(full_len, len);
261
    assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len);
631
    assert(mem.alignForward(usize, aligned_len, pageSize()) == full_len);
262
    return aligned_len;
632
    return aligned_len;
263
}
633
}
264
634
Lines 617-629 test "PageAllocator" { Link Here
617
    }
987
    }
618
988
619
    if (builtin.os.tag == .windows) {
989
    if (builtin.os.tag == .windows) {
620
        const slice = try allocator.alignedAlloc(u8, mem.page_size, 128);
990
        const slice = try allocator.alignedAlloc(u8, min_page_size, 128);
621
        slice[0] = 0x12;
991
        slice[0] = 0x12;
622
        slice[127] = 0x34;
992
        slice[127] = 0x34;
623
        allocator.free(slice);
993
        allocator.free(slice);
624
    }
994
    }
625
    {
995
    {
626
        var buf = try allocator.alloc(u8, mem.page_size + 1);
996
        var buf = try allocator.alloc(u8, pageSize() + 1);
627
        defer allocator.free(buf);
997
        defer allocator.free(buf);
628
        buf = try allocator.realloc(buf, 1); // shrink past the page boundary
998
        buf = try allocator.realloc(buf, 1); // shrink past the page boundary
629
    }
999
    }
Lines 826-832 pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void { Link Here
826
    var validationAllocator = mem.validationWrap(base_allocator);
1196
    var validationAllocator = mem.validationWrap(base_allocator);
827
    const allocator = validationAllocator.allocator();
1197
    const allocator = validationAllocator.allocator();
828
1198
829
    const large_align: usize = mem.page_size / 2;
1199
    const large_align: usize = min_page_size / 2;
830
1200
831
    var align_mask: usize = undefined;
1201
    var align_mask: usize = undefined;
832
    align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0];
1202
    align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0];
Lines 859-865 pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { Link Here
859
    var fib = FixedBufferAllocator.init(&debug_buffer);
1229
    var fib = FixedBufferAllocator.init(&debug_buffer);
860
    const debug_allocator = fib.allocator();
1230
    const debug_allocator = fib.allocator();
861
1231
862
    const alloc_size = mem.page_size * 2 + 50;
1232
    const alloc_size = pageSize() * 2 + 50;
863
    var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
1233
    var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
864
    defer allocator.free(slice);
1234
    defer allocator.free(slice);
865
1235
Lines 868-874 pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { Link Here
868
    // which is 16 pages, hence the 32. This test may require to increase
1238
    // which is 16 pages, hence the 32. This test may require to increase
869
    // the size of the allocations feeding the `allocator` parameter if they
1239
    // the size of the allocations feeding the `allocator` parameter if they
870
    // fail, because of this high over-alignment we want to have.
1240
    // fail, because of this high over-alignment we want to have.
871
    while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), mem.page_size * 32)) {
1241
    while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), pageSize() * 32)) {
872
        try stuff_to_free.append(slice);
1242
        try stuff_to_free.append(slice);
873
        slice = try allocator.alignedAlloc(u8, 16, alloc_size);
1243
        slice = try allocator.alignedAlloc(u8, 16, alloc_size);
874
    }
1244
    }
Lines 883-888 pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { Link Here
883
    try testing.expect(slice[60] == 0x34);
1253
    try testing.expect(slice[60] == 0x34);
884
}
1254
}
885
1255
1256
test "pageSize() smoke test" {
1257
    const size = std.heap.pageSize();
1258
    // Check that pageSize is a power of 2.
1259
    std.debug.assert(size & (size - 1) == 0);
1260
}
1261
1262
test "defaultQueryPageSize() smoke test" {
1263
    // queryPageSize() does not always get called by pageSize()
1264
    if (builtin.cpu.arch.isWasm()) return error.SkipZigTest;
1265
    const size = defaultQueryPageSize();
1266
    // Check that pageSize is a power of 2.
1267
    std.debug.assert(size & (size - 1) == 0);
1268
}
1269
886
test {
1270
test {
887
    _ = LoggingAllocator;
1271
    _ = LoggingAllocator;
888
    _ = LogToWriterAllocator;
1272
    _ = LogToWriterAllocator;
(-)a/lib/std/heap/PageAllocator.zig (-9 / +10 lines)
Lines 2-7 const std = @import("../std.zig"); Link Here
2
const builtin = @import("builtin");
2
const builtin = @import("builtin");
3
const Allocator = std.mem.Allocator;
3
const Allocator = std.mem.Allocator;
4
const mem = std.mem;
4
const mem = std.mem;
5
const heap = std.heap;
5
const maxInt = std.math.maxInt;
6
const maxInt = std.math.maxInt;
6
const assert = std.debug.assert;
7
const assert = std.debug.assert;
7
const native_os = builtin.os.tag;
8
const native_os = builtin.os.tag;
Lines 18-24 fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { Link Here
18
    _ = ra;
19
    _ = ra;
19
    _ = log2_align;
20
    _ = log2_align;
20
    assert(n > 0);
21
    assert(n > 0);
21
    if (n > maxInt(usize) - (mem.page_size - 1)) return null;
22
    if (n > maxInt(usize) - (heap.pageSize() - 1)) return null;
22
23
23
    if (native_os == .windows) {
24
    if (native_os == .windows) {
24
        const addr = windows.VirtualAlloc(
25
        const addr = windows.VirtualAlloc(
Lines 34-40 fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { Link Here
34
        return @ptrCast(addr);
35
        return @ptrCast(addr);
35
    }
36
    }
36
37
37
    const aligned_len = mem.alignForward(usize, n, mem.page_size);
38
    const aligned_len = mem.alignForward(usize, n, heap.pageSize());
38
    const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
39
    const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
39
    const slice = posix.mmap(
40
    const slice = posix.mmap(
40
        hint,
41
        hint,
Lines 44-51 fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { Link Here
44
        -1,
45
        -1,
45
        0,
46
        0,
46
    ) catch return null;
47
    ) catch return null;
47
    assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size));
48
    assert(mem.isAligned(@intFromPtr(slice.ptr), heap.pageSize()));
48
    const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len);
49
    const new_hint: [*]align(heap.min_page_size) u8 = @alignCast(slice.ptr + aligned_len);
49
    _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
50
    _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
50
    return slice.ptr;
51
    return slice.ptr;
51
}
52
}
Lines 59-71 fn resize( Link Here
59
) bool {
60
) bool {
60
    _ = log2_buf_align;
61
    _ = log2_buf_align;
61
    _ = return_address;
62
    _ = return_address;
62
    const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size);
63
    const new_size_aligned = mem.alignForward(usize, new_size, heap.pageSize());
63
64
64
    if (native_os == .windows) {
65
    if (native_os == .windows) {
65
        if (new_size <= buf_unaligned.len) {
66
        if (new_size <= buf_unaligned.len) {
66
            const base_addr = @intFromPtr(buf_unaligned.ptr);
67
            const base_addr = @intFromPtr(buf_unaligned.ptr);
67
            const old_addr_end = base_addr + buf_unaligned.len;
68
            const old_addr_end = base_addr + buf_unaligned.len;
68
            const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size);
69
            const new_addr_end = mem.alignForward(usize, base_addr + new_size, heap.pageSize());
69
            if (old_addr_end > new_addr_end) {
70
            if (old_addr_end > new_addr_end) {
70
                // For shrinking that is not releasing, we will only
71
                // For shrinking that is not releasing, we will only
71
                // decommit the pages not needed anymore.
72
                // decommit the pages not needed anymore.
Lines 77-90 fn resize( Link Here
77
            }
78
            }
78
            return true;
79
            return true;
79
        }
80
        }
80
        const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
81
        const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, heap.pageSize());
81
        if (new_size_aligned <= old_size_aligned) {
82
        if (new_size_aligned <= old_size_aligned) {
82
            return true;
83
            return true;
83
        }
84
        }
84
        return false;
85
        return false;
85
    }
86
    }
86
87
87
    const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
88
    const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, heap.pageSize());
88
    if (new_size_aligned == buf_aligned_len)
89
    if (new_size_aligned == buf_aligned_len)
89
        return true;
90
        return true;
90
91
Lines 107-113 fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v Link Here
107
    if (native_os == .windows) {
108
    if (native_os == .windows) {
108
        windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
109
        windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
109
    } else {
110
    } else {
110
        const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size);
111
        const buf_aligned_len = mem.alignForward(usize, slice.len, heap.pageSize());
111
        posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
112
        posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
112
    }
113
    }
113
}
114
}
(-)a/lib/std/heap/WasmPageAllocator.zig (-11 / +12 lines)
Lines 5-10 const Allocator = std.mem.Allocator; Link Here
5
const mem = std.mem;
5
const mem = std.mem;
6
const maxInt = std.math.maxInt;
6
const maxInt = std.math.maxInt;
7
const assert = std.debug.assert;
7
const assert = std.debug.assert;
8
const page_size = std.wasm.page_size;
8
9
9
comptime {
10
comptime {
10
    if (!builtin.target.isWasm()) {
11
    if (!builtin.target.isWasm()) {
Lines 71-77 const FreeBlock = struct { Link Here
71
                var count: usize = 0;
72
                var count: usize = 0;
72
                while (j + count < self.totalPages() and self.getBit(j + count) == .free) {
73
                while (j + count < self.totalPages() and self.getBit(j + count) == .free) {
73
                    count += 1;
74
                    count += 1;
74
                    const addr = j * mem.page_size;
75
                    const addr = j * page_size;
75
                    if (count >= num_pages and mem.isAlignedLog2(addr, log2_align)) {
76
                    if (count >= num_pages and mem.isAlignedLog2(addr, log2_align)) {
76
                        self.setBits(j, num_pages, .used);
77
                        self.setBits(j, num_pages, .used);
77
                        return j;
78
                        return j;
Lines 98-113 fn extendedOffset() usize { Link Here
98
}
99
}
99
100
100
fn nPages(memsize: usize) usize {
101
fn nPages(memsize: usize) usize {
101
    return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size;
102
    return mem.alignForward(usize, memsize, page_size) / page_size;
102
}
103
}
103
104
104
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
105
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
105
    _ = ctx;
106
    _ = ctx;
106
    _ = ra;
107
    _ = ra;
107
    if (len > maxInt(usize) - (mem.page_size - 1)) return null;
108
    if (len > maxInt(usize) - (page_size - 1)) return null;
108
    const page_count = nPages(len);
109
    const page_count = nPages(len);
109
    const page_idx = allocPages(page_count, log2_align) catch return null;
110
    const page_idx = allocPages(page_count, log2_align) catch return null;
110
    return @as([*]u8, @ptrFromInt(page_idx * mem.page_size));
111
    return @as([*]u8, @ptrFromInt(page_idx * page_size));
111
}
112
}
112
113
113
fn allocPages(page_count: usize, log2_align: u8) !usize {
114
fn allocPages(page_count: usize, log2_align: u8) !usize {
Lines 124-132 fn allocPages(page_count: usize, log2_align: u8) !usize { Link Here
124
    }
125
    }
125
126
126
    const next_page_idx = @wasmMemorySize(0);
127
    const next_page_idx = @wasmMemorySize(0);
127
    const next_page_addr = next_page_idx * mem.page_size;
128
    const next_page_addr = next_page_idx * page_size;
128
    const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align);
129
    const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align);
129
    const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
130
    const drop_page_count = @divExact(aligned_addr - next_page_addr, page_size);
130
    const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count)));
131
    const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count)));
131
    if (result <= 0)
132
    if (result <= 0)
132
        return error.OutOfMemory;
133
        return error.OutOfMemory;
Lines 149-155 fn freePages(start: usize, end: usize) void { Link Here
149
            // TODO: would it be better if we use the first page instead?
150
            // TODO: would it be better if we use the first page instead?
150
            new_end -= 1;
151
            new_end -= 1;
151
152
152
            extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)];
153
            extended.data = @as([*]u128, @ptrFromInt(new_end * page_size))[0 .. page_size / @sizeOf(u128)];
153
            // Since this is the first page being freed and we consume it, assume *nothing* is free.
154
            // Since this is the first page being freed and we consume it, assume *nothing* is free.
154
            @memset(extended.data, PageStatus.none_free);
155
            @memset(extended.data, PageStatus.none_free);
155
        }
156
        }
Lines 168-174 fn resize( Link Here
168
    _ = ctx;
169
    _ = ctx;
169
    _ = log2_buf_align;
170
    _ = log2_buf_align;
170
    _ = return_address;
171
    _ = return_address;
171
    const aligned_len = mem.alignForward(usize, buf.len, mem.page_size);
172
    const aligned_len = mem.alignForward(usize, buf.len, page_size);
172
    if (new_len > aligned_len) return false;
173
    if (new_len > aligned_len) return false;
173
    const current_n = nPages(aligned_len);
174
    const current_n = nPages(aligned_len);
174
    const new_n = nPages(new_len);
175
    const new_n = nPages(new_len);
Lines 188-194 fn free( Link Here
188
    _ = ctx;
189
    _ = ctx;
189
    _ = log2_buf_align;
190
    _ = log2_buf_align;
190
    _ = return_address;
191
    _ = return_address;
191
    const aligned_len = mem.alignForward(usize, buf.len, mem.page_size);
192
    const aligned_len = mem.alignForward(usize, buf.len, page_size);
192
    const current_n = nPages(aligned_len);
193
    const current_n = nPages(aligned_len);
193
    const base = nPages(@intFromPtr(buf.ptr));
194
    const base = nPages(@intFromPtr(buf.ptr));
194
    freePages(base, base + current_n);
195
    freePages(base, base + current_n);
Lines 198-205 test "internals" { Link Here
198
    const page_allocator = std.heap.page_allocator;
199
    const page_allocator = std.heap.page_allocator;
199
    const testing = std.testing;
200
    const testing = std.testing;
200
201
201
    const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
202
    const conventional_memsize = WasmPageAllocator.conventional.totalPages() * page_size;
202
    const initial = try page_allocator.alloc(u8, mem.page_size);
203
    const initial = try page_allocator.alloc(u8, page_size);
203
    try testing.expect(@intFromPtr(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
204
    try testing.expect(@intFromPtr(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
204
205
205
    var inplace = try page_allocator.realloc(initial, 1);
206
    var inplace = try page_allocator.realloc(initial, 1);
(-)a/lib/std/heap/general_purpose_allocator.zig (-51 / +78 lines)
Lines 48-54 Link Here
48
//!
48
//!
49
//! ## Basic Design:
49
//! ## Basic Design:
50
//!
50
//!
51
//! Small allocations are divided into buckets:
51
//! Small allocations are divided into buckets. For a max page size of 4K:
52
//!
52
//!
53
//! ```
53
//! ```
54
//! index obj_size
54
//! index obj_size
Lines 75-80 Link Here
75
//! BucketHeader, followed by "used bits", and two stack traces for each slot
75
//! BucketHeader, followed by "used bits", and two stack traces for each slot
76
//! (allocation trace and free trace).
76
//! (allocation trace and free trace).
77
//!
77
//!
78
//! The buckets array contains buckets for every size class below `max_page_size`.
79
//! At runtime, only size classes below `pageSize()` will actually be used for allocations.
80
//!
78
//! The "used bits" are 1 bit per slot representing whether the slot is used.
81
//! The "used bits" are 1 bit per slot representing whether the slot is used.
79
//! Allocations use the data to iterate to find a free slot. Frees assert that the
82
//! Allocations use the data to iterate to find a free slot. Frees assert that the
80
//! corresponding bit is 1 and set it to 0.
83
//! corresponding bit is 1 and set it to 0.
Lines 99-109 const math = std.math; Link Here
99
const assert = std.debug.assert;
102
const assert = std.debug.assert;
100
const mem = std.mem;
103
const mem = std.mem;
101
const Allocator = std.mem.Allocator;
104
const Allocator = std.mem.Allocator;
102
const page_size = std.mem.page_size;
105
const min_page_size = std.heap.min_page_size;
106
const max_page_size = std.heap.max_page_size;
107
const pageSize = std.heap.pageSize;
103
const StackTrace = std.builtin.StackTrace;
108
const StackTrace = std.builtin.StackTrace;
104
109
105
/// Integer type for pointing to slots in a small allocation
110
/// Integer type for pointing to slots in a small allocation
106
const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);
111
const SlotIndex = std.meta.Int(.unsigned, math.log2(max_page_size) + 1);
107
112
108
const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6;
113
const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6;
109
const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0;
114
const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0;
Lines 157-162 pub const Config = struct { Link Here
157
162
158
pub const Check = enum { ok, leak };
163
pub const Check = enum { ok, leak };
159
164
165
var used_small_bucket_count_cache = std.atomic.Value(usize).init(0);
166
var largest_used_bucket_object_size_cache = std.atomic.Value(usize).init(0);
167
160
/// Default initialization of this struct is deprecated; use `.init` instead.
168
/// Default initialization of this struct is deprecated; use `.init` instead.
161
pub fn GeneralPurposeAllocator(comptime config: Config) type {
169
pub fn GeneralPurposeAllocator(comptime config: Config) type {
162
    return struct {
170
    return struct {
Lines 206-214 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
206
214
207
        pub const Error = mem.Allocator.Error;
215
        pub const Error = mem.Allocator.Error;
208
216
209
        const small_bucket_count = math.log2(page_size);
217
        const small_bucket_count = math.log2(max_page_size);
210
        const largest_bucket_object_size = 1 << (small_bucket_count - 1);
218
        const largest_bucket_object_size = 1 << (small_bucket_count - 1);
211
        const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
219
        const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
220
        fn used_small_bucket_count() usize {
221
            const cached = used_small_bucket_count_cache.load(.monotonic);
222
            if (cached != 0) {
223
                return cached;
224
            }
225
            const val = math.log2(pageSize());
226
            used_small_bucket_count_cache.store(val, .monotonic);
227
            return val;
228
        }
229
        fn largest_used_bucket_object_size() usize {
230
            const cached = largest_used_bucket_object_size_cache.load(.monotonic);
231
            if (cached != 0) {
232
                return cached;
233
            }
234
            const val = @as(usize, 1) << @truncate(used_small_bucket_count() - 1);
235
            largest_used_bucket_object_size_cache.store(val, .monotonic);
236
            return val;
237
        }
212
238
213
        const bucketCompare = struct {
239
        const bucketCompare = struct {
214
            fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
240
            fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
Lines 261-267 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
261
        // * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
287
        // * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
262
288
263
        const BucketHeader = struct {
289
        const BucketHeader = struct {
264
            page: [*]align(page_size) u8,
290
            page: [*]align(min_page_size) u8,
265
            alloc_cursor: SlotIndex,
291
            alloc_cursor: SlotIndex,
266
            used_count: SlotIndex,
292
            used_count: SlotIndex,
267
293
Lines 273-286 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
273
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
299
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
274
                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(size_class);
300
                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(size_class);
275
                const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
301
                const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
276
                const slot_count = @divExact(page_size, size_class);
302
                const slot_count = @divExact(pageSize(), size_class);
277
                return sizes[0..slot_count];
303
                return sizes[0..slot_count];
278
            }
304
            }
279
305
280
            fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 {
306
            fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 {
281
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
307
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
282
                const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class);
308
                const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class);
283
                const slot_count = @divExact(page_size, size_class);
309
                const slot_count = @divExact(pageSize(), size_class);
284
                return aligns_ptr[0..slot_count];
310
                return aligns_ptr[0..slot_count];
285
            }
311
            }
286
312
Lines 312-318 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
312
            /// Only valid for buckets within `empty_buckets`, and relies on the `alloc_cursor`
338
            /// Only valid for buckets within `empty_buckets`, and relies on the `alloc_cursor`
313
            /// of empty buckets being set to `slot_count` when they are added to `empty_buckets`
339
            /// of empty buckets being set to `slot_count` when they are added to `empty_buckets`
314
            fn emptyBucketSizeClass(bucket: *BucketHeader) usize {
340
            fn emptyBucketSizeClass(bucket: *BucketHeader) usize {
315
                return @divExact(page_size, bucket.alloc_cursor);
341
                return @divExact(pageSize(), bucket.alloc_cursor);
316
            }
342
            }
317
        };
343
        };
318
344
Lines 355-367 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
355
381
356
        fn bucketAlignsStart(size_class: usize) usize {
382
        fn bucketAlignsStart(size_class: usize) usize {
357
            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
383
            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
358
            const slot_count = @divExact(page_size, size_class);
384
            const slot_count = @divExact(pageSize(), size_class);
359
            return bucketRequestedSizesStart(size_class) + (@sizeOf(LargestSizeClassInt) * slot_count);
385
            return bucketRequestedSizesStart(size_class) + (@sizeOf(LargestSizeClassInt) * slot_count);
360
        }
386
        }
361
387
362
        fn bucketStackFramesStart(size_class: usize) usize {
388
        fn bucketStackFramesStart(size_class: usize) usize {
363
            const unaligned_start = if (config.safety) blk: {
389
            const unaligned_start = if (config.safety) blk: {
364
                const slot_count = @divExact(page_size, size_class);
390
                const slot_count = @divExact(pageSize(), size_class);
365
                break :blk bucketAlignsStart(size_class) + slot_count;
391
                break :blk bucketAlignsStart(size_class) + slot_count;
366
            } else @sizeOf(BucketHeader) + usedBitsCount(size_class);
392
            } else @sizeOf(BucketHeader) + usedBitsCount(size_class);
367
            return mem.alignForward(
393
            return mem.alignForward(
Lines 372-383 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
372
        }
398
        }
373
399
374
        fn bucketSize(size_class: usize) usize {
400
        fn bucketSize(size_class: usize) usize {
375
            const slot_count = @divExact(page_size, size_class);
401
            const slot_count = @divExact(pageSize(), size_class);
376
            return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
402
            return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
377
        }
403
        }
378
404
379
        fn usedBitsCount(size_class: usize) usize {
405
        fn usedBitsCount(size_class: usize) usize {
380
            const slot_count = @divExact(page_size, size_class);
406
            const slot_count = @divExact(pageSize(), size_class);
381
            if (slot_count < 8) return 1;
407
            if (slot_count < 8) return 1;
382
            return @divExact(slot_count, 8);
408
            return @divExact(slot_count, 8);
383
        }
409
        }
Lines 416-422 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
416
        pub fn detectLeaks(self: *Self) bool {
442
        pub fn detectLeaks(self: *Self) bool {
417
            var leaks = false;
443
            var leaks = false;
418
444
419
            for (&self.buckets, 0..) |*buckets, bucket_i| {
445
            for (0..used_small_bucket_count()) |bucket_i| {
446
                const buckets = &self.buckets[bucket_i];
420
                if (buckets.root == null) continue;
447
                if (buckets.root == null) continue;
421
                const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i));
448
                const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i));
422
                const used_bits_count = usedBitsCount(size_class);
449
                const used_bits_count = usedBitsCount(size_class);
Lines 464-470 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
464
                    var bucket = node.key;
491
                    var bucket = node.key;
465
                    if (config.never_unmap) {
492
                    if (config.never_unmap) {
466
                        // free page that was intentionally leaked by never_unmap
493
                        // free page that was intentionally leaked by never_unmap
467
                        self.backing_allocator.free(bucket.page[0..page_size]);
494
                        self.backing_allocator.free(bucket.page[0..pageSize()]);
468
                    }
495
                    }
469
                    // alloc_cursor was set to slot count when bucket added to empty_buckets
496
                    // alloc_cursor was set to slot count when bucket added to empty_buckets
470
                    self.freeBucket(bucket, bucket.emptyBucketSizeClass());
497
                    self.freeBucket(bucket, bucket.emptyBucketSizeClass());
Lines 531-537 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
531
        fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error!Slot {
558
        fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error!Slot {
532
            const bucket_index = math.log2(size_class);
559
            const bucket_index = math.log2(size_class);
533
            var buckets = &self.buckets[bucket_index];
560
            var buckets = &self.buckets[bucket_index];
534
            const slot_count = @divExact(page_size, size_class);
561
            const slot_count = @divExact(pageSize(), size_class);
535
            if (self.cur_buckets[bucket_index] == null or self.cur_buckets[bucket_index].?.alloc_cursor == slot_count) {
562
            if (self.cur_buckets[bucket_index] == null or self.cur_buckets[bucket_index].?.alloc_cursor == slot_count) {
536
                const new_bucket = try self.createBucket(size_class);
563
                const new_bucket = try self.createBucket(size_class);
537
                errdefer self.freeBucket(new_bucket, size_class);
564
                errdefer self.freeBucket(new_bucket, size_class);
Lines 564-570 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
564
            addr: usize,
591
            addr: usize,
565
            current_bucket: ?*BucketHeader,
592
            current_bucket: ?*BucketHeader,
566
        ) ?*BucketHeader {
593
        ) ?*BucketHeader {
567
            const search_page: [*]align(page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, page_size));
594
            const search_page: [*]align(min_page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, pageSize()));
568
            if (current_bucket != null and current_bucket.?.page == search_page) {
595
            if (current_bucket != null and current_bucket.?.page == search_page) {
569
                return current_bucket;
596
                return current_bucket;
570
            }
597
            }
Lines 729-742 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
729
            assert(old_mem.len != 0);
756
            assert(old_mem.len != 0);
730
757
731
            const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
758
            const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
732
            if (aligned_size > largest_bucket_object_size) {
759
            if (aligned_size > largest_used_bucket_object_size()) {
733
                return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
760
                return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
734
            }
761
            }
735
            const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
762
            const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
736
763
737
            var bucket_index = math.log2(size_class_hint);
764
            var bucket_index = math.log2(size_class_hint);
738
            var size_class: usize = size_class_hint;
765
            var size_class: usize = size_class_hint;
739
            const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
766
            const bucket = while (bucket_index < used_small_bucket_count()) : (bucket_index += 1) {
740
                if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| {
767
                if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| {
741
                    break bucket;
768
                    break bucket;
742
                }
769
                }
Lines 847-853 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
847
            assert(old_mem.len != 0);
874
            assert(old_mem.len != 0);
848
875
849
            const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
876
            const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
850
            if (aligned_size > largest_bucket_object_size) {
877
            if (aligned_size > largest_used_bucket_object_size()) {
851
                self.freeLarge(old_mem, log2_old_align, ret_addr);
878
                self.freeLarge(old_mem, log2_old_align, ret_addr);
852
                return;
879
                return;
853
            }
880
            }
Lines 855-861 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
855
882
856
            var bucket_index = math.log2(size_class_hint);
883
            var bucket_index = math.log2(size_class_hint);
857
            var size_class: usize = size_class_hint;
884
            var size_class: usize = size_class_hint;
858
            const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
885
            const bucket = while (bucket_index < used_small_bucket_count()) : (bucket_index += 1) {
859
                if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| {
886
                if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| {
860
                    break bucket;
887
                    break bucket;
861
                }
888
                }
Lines 944-957 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
944
                    self.cur_buckets[bucket_index] = null;
971
                    self.cur_buckets[bucket_index] = null;
945
                }
972
                }
946
                if (!config.never_unmap) {
973
                if (!config.never_unmap) {
947
                    self.backing_allocator.free(bucket.page[0..page_size]);
974
                    self.backing_allocator.free(bucket.page[0..pageSize()]);
948
                }
975
                }
949
                if (!config.retain_metadata) {
976
                if (!config.retain_metadata) {
950
                    self.freeBucket(bucket, size_class);
977
                    self.freeBucket(bucket, size_class);
951
                    self.bucket_node_pool.destroy(node);
978
                    self.bucket_node_pool.destroy(node);
952
                } else {
979
                } else {
953
                    // move alloc_cursor to end so we can tell size_class later
980
                    // move alloc_cursor to end so we can tell size_class later
954
                    const slot_count = @divExact(page_size, size_class);
981
                    const slot_count = @divExact(pageSize(), size_class);
955
                    bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count));
982
                    bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count));
956
                    var empty_entry = self.empty_buckets.getEntryFor(node.key);
983
                    var empty_entry = self.empty_buckets.getEntryFor(node.key);
957
                    empty_entry.set(node);
984
                    empty_entry.set(node);
Lines 992-998 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
992
            ret_addr: usize,
1019
            ret_addr: usize,
993
        ) Allocator.Error![*]u8 {
1020
        ) Allocator.Error![*]u8 {
994
            const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)));
1021
            const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)));
995
            if (new_aligned_size > largest_bucket_object_size) {
1022
            if (new_aligned_size > largest_used_bucket_object_size()) {
996
                try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
1023
                try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
997
                const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse
1024
                const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse
998
                    return error.OutOfMemory;
1025
                    return error.OutOfMemory;
Lines 1035-1041 pub fn GeneralPurposeAllocator(comptime config: Config) type { Link Here
1035
        }
1062
        }
1036
1063
1037
        fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader {
1064
        fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader {
1038
            const page = try self.backing_allocator.alignedAlloc(u8, page_size, page_size);
1065
            const page = try self.backing_allocator.alignedAlloc(u8, min_page_size, pageSize());
1039
            errdefer self.backing_allocator.free(page);
1066
            errdefer self.backing_allocator.free(page);
1040
1067
1041
            const bucket_size = bucketSize(size_class);
1068
            const bucket_size = bucketSize(size_class);
Lines 1175-1191 test "large object - grow" { Link Here
1175
    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1202
    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1176
    const allocator = gpa.allocator();
1203
    const allocator = gpa.allocator();
1177
1204
1178
    var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
1205
    var slice1 = try allocator.alloc(u8, pageSize() * 2 - 20);
1179
    defer allocator.free(slice1);
1206
    defer allocator.free(slice1);
1180
1207
1181
    const old = slice1;
1208
    const old = slice1;
1182
    slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
1209
    slice1 = try allocator.realloc(slice1, pageSize() * 2 - 10);
1183
    try std.testing.expect(slice1.ptr == old.ptr);
1210
    try std.testing.expect(slice1.ptr == old.ptr);
1184
1211
1185
    slice1 = try allocator.realloc(slice1, page_size * 2);
1212
    slice1 = try allocator.realloc(slice1, pageSize() * 2);
1186
    try std.testing.expect(slice1.ptr == old.ptr);
1213
    try std.testing.expect(slice1.ptr == old.ptr);
1187
1214
1188
    slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
1215
    slice1 = try allocator.realloc(slice1, pageSize() * 2 + 1);
1189
}
1216
}
1190
1217
1191
test "realloc small object to large object" {
1218
test "realloc small object to large object" {
Lines 1199-1205 test "realloc small object to large object" { Link Here
1199
    slice[60] = 0x34;
1226
    slice[60] = 0x34;
1200
1227
1201
    // This requires upgrading to a large object
1228
    // This requires upgrading to a large object
1202
    const large_object_size = page_size * 2 + 50;
1229
    const large_object_size = pageSize() * 2 + 50;
1203
    slice = try allocator.realloc(slice, large_object_size);
1230
    slice = try allocator.realloc(slice, large_object_size);
1204
    try std.testing.expect(slice[0] == 0x12);
1231
    try std.testing.expect(slice[0] == 0x12);
1205
    try std.testing.expect(slice[60] == 0x34);
1232
    try std.testing.expect(slice[60] == 0x34);
Lines 1210-1231 test "shrink large object to large object" { Link Here
1210
    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1237
    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1211
    const allocator = gpa.allocator();
1238
    const allocator = gpa.allocator();
1212
1239
1213
    var slice = try allocator.alloc(u8, page_size * 2 + 50);
1240
    var slice = try allocator.alloc(u8, pageSize() * 2 + 50);
1214
    defer allocator.free(slice);
1241
    defer allocator.free(slice);
1215
    slice[0] = 0x12;
1242
    slice[0] = 0x12;
1216
    slice[60] = 0x34;
1243
    slice[60] = 0x34;
1217
1244
1218
    if (!allocator.resize(slice, page_size * 2 + 1)) return;
1245
    if (!allocator.resize(slice, pageSize() * 2 + 1)) return;
1219
    slice = slice.ptr[0 .. page_size * 2 + 1];
1246
    slice = slice.ptr[0 .. pageSize() * 2 + 1];
1220
    try std.testing.expect(slice[0] == 0x12);
1247
    try std.testing.expect(slice[0] == 0x12);
1221
    try std.testing.expect(slice[60] == 0x34);
1248
    try std.testing.expect(slice[60] == 0x34);
1222
1249
1223
    try std.testing.expect(allocator.resize(slice, page_size * 2 + 1));
1250
    try std.testing.expect(allocator.resize(slice, pageSize() * 2 + 1));
1224
    slice = slice[0 .. page_size * 2 + 1];
1251
    slice = slice[0 .. pageSize() * 2 + 1];
1225
    try std.testing.expect(slice[0] == 0x12);
1252
    try std.testing.expect(slice[0] == 0x12);
1226
    try std.testing.expect(slice[60] == 0x34);
1253
    try std.testing.expect(slice[60] == 0x34);
1227
1254
1228
    slice = try allocator.realloc(slice, page_size * 2);
1255
    slice = try allocator.realloc(slice, pageSize() * 2);
1229
    try std.testing.expect(slice[0] == 0x12);
1256
    try std.testing.expect(slice[0] == 0x12);
1230
    try std.testing.expect(slice[60] == 0x34);
1257
    try std.testing.expect(slice[60] == 0x34);
1231
}
1258
}
Lines 1239-1251 test "shrink large object to large object with larger alignment" { Link Here
1239
    var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
1266
    var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
1240
    const debug_allocator = fba.allocator();
1267
    const debug_allocator = fba.allocator();
1241
1268
1242
    const alloc_size = page_size * 2 + 50;
1269
    const alloc_size = pageSize() * 2 + 50;
1243
    var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
1270
    var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
1244
    defer allocator.free(slice);
1271
    defer allocator.free(slice);
1245
1272
1246
    const big_alignment: usize = switch (builtin.os.tag) {
1273
    const big_alignment: usize = switch (builtin.os.tag) {
1247
        .windows => page_size * 32, // Windows aligns to 64K.
1274
        .windows => pageSize() * 32, // Windows aligns to 64K.
1248
        else => page_size * 2,
1275
        else => pageSize() * 2,
1249
    };
1276
    };
1250
    // This loop allocates until we find a page that is not aligned to the big
1277
    // This loop allocates until we find a page that is not aligned to the big
1251
    // alignment. Then we shrink the allocation after the loop, but increase the
1278
    // alignment. Then we shrink the allocation after the loop, but increase the
Lines 1271-1277 test "realloc large object to small object" { Link Here
1271
    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1298
    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1272
    const allocator = gpa.allocator();
1299
    const allocator = gpa.allocator();
1273
1300
1274
    var slice = try allocator.alloc(u8, page_size * 2 + 50);
1301
    var slice = try allocator.alloc(u8, pageSize() * 2 + 50);
1275
    defer allocator.free(slice);
1302
    defer allocator.free(slice);
1276
    slice[0] = 0x12;
1303
    slice[0] = 0x12;
1277
    slice[16] = 0x34;
1304
    slice[16] = 0x34;
Lines 1311-1328 test "realloc large object to larger alignment" { Link Here
1311
    var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
1338
    var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
1312
    const debug_allocator = fba.allocator();
1339
    const debug_allocator = fba.allocator();
1313
1340
1314
    var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
1341
    var slice = try allocator.alignedAlloc(u8, 16, pageSize() * 2 + 50);
1315
    defer allocator.free(slice);
1342
    defer allocator.free(slice);
1316
1343
1317
    const big_alignment: usize = switch (builtin.os.tag) {
1344
    const big_alignment: usize = switch (builtin.os.tag) {
1318
        .windows => page_size * 32, // Windows aligns to 64K.
1345
        .windows => pageSize() * 32, // Windows aligns to 64K.
1319
        else => page_size * 2,
1346
        else => pageSize() * 2,
1320
    };
1347
    };
1321
    // This loop allocates until we find a page that is not aligned to the big alignment.
1348
    // This loop allocates until we find a page that is not aligned to the big alignment.
1322
    var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
1349
    var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
1323
    while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
1350
    while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
1324
        try stuff_to_free.append(slice);
1351
        try stuff_to_free.append(slice);
1325
        slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
1352
        slice = try allocator.alignedAlloc(u8, 16, pageSize() * 2 + 50);
1326
    }
1353
    }
1327
    while (stuff_to_free.popOrNull()) |item| {
1354
    while (stuff_to_free.popOrNull()) |item| {
1328
        allocator.free(item);
1355
        allocator.free(item);
Lines 1330-1344 test "realloc large object to larger alignment" { Link Here
1330
    slice[0] = 0x12;
1357
    slice[0] = 0x12;
1331
    slice[16] = 0x34;
1358
    slice[16] = 0x34;
1332
1359
1333
    slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100);
1360
    slice = try allocator.reallocAdvanced(slice, 32, pageSize() * 2 + 100);
1334
    try std.testing.expect(slice[0] == 0x12);
1361
    try std.testing.expect(slice[0] == 0x12);
1335
    try std.testing.expect(slice[16] == 0x34);
1362
    try std.testing.expect(slice[16] == 0x34);
1336
1363
1337
    slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25);
1364
    slice = try allocator.reallocAdvanced(slice, 32, pageSize() * 2 + 25);
1338
    try std.testing.expect(slice[0] == 0x12);
1365
    try std.testing.expect(slice[0] == 0x12);
1339
    try std.testing.expect(slice[16] == 0x34);
1366
    try std.testing.expect(slice[16] == 0x34);
1340
1367
1341
    slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100);
1368
    slice = try allocator.reallocAdvanced(slice, big_alignment, pageSize() * 2 + 100);
1342
    try std.testing.expect(slice[0] == 0x12);
1369
    try std.testing.expect(slice[0] == 0x12);
1343
    try std.testing.expect(slice[16] == 0x34);
1370
    try std.testing.expect(slice[16] == 0x34);
1344
}
1371
}
Lines 1349-1355 test "large object shrinks to small but allocation fails during shrink" { Link Here
1349
    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1376
    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1350
    const allocator = gpa.allocator();
1377
    const allocator = gpa.allocator();
1351
1378
1352
    var slice = try allocator.alloc(u8, page_size * 2 + 50);
1379
    var slice = try allocator.alloc(u8, pageSize() * 2 + 50);
1353
    defer allocator.free(slice);
1380
    defer allocator.free(slice);
1354
    slice[0] = 0x12;
1381
    slice[0] = 0x12;
1355
    slice[3] = 0x34;
1382
    slice[3] = 0x34;
Lines 1420-1426 test "double frees" { Link Here
1420
    try std.testing.expect(GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null) != null);
1447
    try std.testing.expect(GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null) != null);
1421
1448
1422
    // detect a large allocation double free
1449
    // detect a large allocation double free
1423
    const large = try allocator.alloc(u8, 2 * page_size);
1450
    const large = try allocator.alloc(u8, 2 * pageSize());
1424
    try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr)));
1451
    try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr)));
1425
    try std.testing.expectEqual(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.bytes, large);
1452
    try std.testing.expectEqual(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.bytes, large);
1426
    allocator.free(large);
1453
    allocator.free(large);
Lines 1429-1435 test "double frees" { Link Here
1429
1456
1430
    const normal_small = try allocator.alloc(u8, size_class);
1457
    const normal_small = try allocator.alloc(u8, size_class);
1431
    defer allocator.free(normal_small);
1458
    defer allocator.free(normal_small);
1432
    const normal_large = try allocator.alloc(u8, 2 * page_size);
1459
    const normal_large = try allocator.alloc(u8, 2 * pageSize());
1433
    defer allocator.free(normal_large);
1460
    defer allocator.free(normal_large);
1434
1461
1435
    // check that flushing retained metadata doesn't disturb live allocations
1462
    // check that flushing retained metadata doesn't disturb live allocations
Lines 1462-1469 test "bug 9995 fix, large allocs count requested size not backing size" { Link Here
1462
    var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
1489
    var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
1463
    const allocator = gpa.allocator();
1490
    const allocator = gpa.allocator();
1464
1491
1465
    var buf = try allocator.alignedAlloc(u8, 1, page_size + 1);
1492
    var buf = try allocator.alignedAlloc(u8, 1, pageSize() + 1);
1466
    try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
1493
    try std.testing.expect(gpa.total_requested_bytes == pageSize() + 1);
1467
    buf = try allocator.realloc(buf, 1);
1494
    buf = try allocator.realloc(buf, 1);
1468
    try std.testing.expect(gpa.total_requested_bytes == 1);
1495
    try std.testing.expect(gpa.total_requested_bytes == 1);
1469
    buf = try allocator.realloc(buf, 2);
1496
    buf = try allocator.realloc(buf, 2);
(-)a/lib/std/heap/sbrk_allocator.zig (-3 / +4 lines)
Lines 3-8 const builtin = @import("builtin"); Link Here
3
const math = std.math;
3
const math = std.math;
4
const Allocator = std.mem.Allocator;
4
const Allocator = std.mem.Allocator;
5
const mem = std.mem;
5
const mem = std.mem;
6
const heap = std.heap;
6
const assert = std.debug.assert;
7
const assert = std.debug.assert;
7
8
8
pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
9
pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
Lines 18-24 pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { Link Here
18
        const max_usize = math.maxInt(usize);
19
        const max_usize = math.maxInt(usize);
19
        const ushift = math.Log2Int(usize);
20
        const ushift = math.Log2Int(usize);
20
        const bigpage_size = 64 * 1024;
21
        const bigpage_size = 64 * 1024;
21
        const pages_per_bigpage = bigpage_size / mem.page_size;
22
        const pages_per_bigpage = bigpage_size / heap.pageSize();
22
        const bigpage_count = max_usize / bigpage_size;
23
        const bigpage_count = max_usize / bigpage_size;
23
24
24
        /// Because of storing free list pointers, the minimum size class is 3.
25
        /// Because of storing free list pointers, the minimum size class is 3.
Lines 58-64 pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { Link Here
58
                    }
59
                    }
59
60
60
                    const next_addr = next_addrs[class];
61
                    const next_addr = next_addrs[class];
61
                    if (next_addr % mem.page_size == 0) {
62
                    if (next_addr % heap.pageSize == 0) {
62
                        const addr = allocBigPages(1);
63
                        const addr = allocBigPages(1);
63
                        if (addr == 0) return null;
64
                        if (addr == 0) return null;
64
                        //std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{
65
                        //std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{
Lines 153-159 pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { Link Here
153
                big_frees[class] = node.*;
154
                big_frees[class] = node.*;
154
                return top_free_ptr;
155
                return top_free_ptr;
155
            }
156
            }
156
            return sbrk(pow2_pages * pages_per_bigpage * mem.page_size);
157
            return sbrk(pow2_pages * pages_per_bigpage * heap.pageSize());
157
        }
158
        }
158
    };
159
    };
159
}
160
}
(-)a/lib/std/mem.zig (-28 / +9 lines)
Lines 8-33 const testing = std.testing; Link Here
8
const Endian = std.builtin.Endian;
8
const Endian = std.builtin.Endian;
9
const native_endian = builtin.cpu.arch.endian();
9
const native_endian = builtin.cpu.arch.endian();
10
10
11
/// Compile time known minimum page size.
12
/// https://github.com/ziglang/zig/issues/4082
13
pub const page_size = switch (builtin.cpu.arch) {
14
    .wasm32, .wasm64 => 64 * 1024,
15
    .aarch64 => switch (builtin.os.tag) {
16
        .macos, .ios, .watchos, .tvos, .visionos => 16 * 1024,
17
        else => 4 * 1024,
18
    },
19
    .sparc64 => 8 * 1024,
20
    .loongarch32, .loongarch64 => switch (builtin.os.tag) {
21
        // Linux default KConfig value is 16KiB
22
        .linux => 16 * 1024,
23
        // FIXME:
24
        // There is no other OS supported yet. Use the same value
25
        // as Linux for now.
26
        else => 16 * 1024,
27
    },
28
    else => 4 * 1024,
29
};
30
31
/// The standard library currently thoroughly depends on byte size
11
/// The standard library currently thoroughly depends on byte size
32
/// being 8 bits.  (see the use of u8 throughout allocation code as
12
/// being 8 bits.  (see the use of u8 throughout allocation code as
33
/// the "byte" type.)  Code which depends on this can reference this
13
/// the "byte" type.)  Code which depends on this can reference this
Lines 1085-1096 pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co Link Here
1085
                const Block = @Vector(block_len, T);
1065
                const Block = @Vector(block_len, T);
1086
                const mask: Block = @splat(sentinel);
1066
                const mask: Block = @splat(sentinel);
1087
1067
1088
                comptime std.debug.assert(std.mem.page_size % block_size == 0);
1068
                comptime std.debug.assert(std.heap.max_page_size % @sizeOf(Block) == 0);
1069
                std.debug.assert(std.heap.pageSize() % @sizeOf(Block) == 0);
1089
1070
1090
                // First block may be unaligned
1071
                // First block may be unaligned
1091
                const start_addr = @intFromPtr(&p[i]);
1072
                const start_addr = @intFromPtr(&p[i]);
1092
                const offset_in_page = start_addr & (std.mem.page_size - 1);
1073
                const offset_in_page = start_addr & (std.heap.pageSize() - 1);
1093
                if (offset_in_page <= std.mem.page_size - block_size) {
1074
                if (offset_in_page <= std.heap.pageSize() - @sizeOf(Block)) {
1094
                    // Will not read past the end of a page, full block.
1075
                    // Will not read past the end of a page, full block.
1095
                    const block: Block = p[i..][0..block_len].*;
1076
                    const block: Block = p[i..][0..block_len].*;
1096
                    const matches = block == mask;
1077
                    const matches = block == mask;
Lines 1138-1155 test "indexOfSentinel vector paths" { Link Here
1138
        const block_len = std.simd.suggestVectorLength(T) orelse continue;
1119
        const block_len = std.simd.suggestVectorLength(T) orelse continue;
1139
1120
1140
        // Allocate three pages so we guarantee a page-crossing address with a full page after
1121
        // Allocate three pages so we guarantee a page-crossing address with a full page after
1141
        const memory = try allocator.alloc(T, 3 * std.mem.page_size / @sizeOf(T));
1122
        const memory = try allocator.alloc(T, 3 * std.heap.pageSize() / @sizeOf(T));
1142
        defer allocator.free(memory);
1123
        defer allocator.free(memory);
1143
        @memset(memory, 0xaa);
1124
        @memset(memory, 0xaa);
1144
1125
1145
        // Find starting page-alignment = 0
1126
        // Find starting page-alignment = 0
1146
        var start: usize = 0;
1127
        var start: usize = 0;
1147
        const start_addr = @intFromPtr(&memory);
1128
        const start_addr = @intFromPtr(&memory);
1148
        start += (std.mem.alignForward(usize, start_addr, std.mem.page_size) - start_addr) / @sizeOf(T);
1129
        start += (std.mem.alignForward(usize, start_addr, std.heap.pageSize()) - start_addr) / @sizeOf(T);
1149
        try testing.expect(start < std.mem.page_size / @sizeOf(T));
1130
        try testing.expect(start < std.heap.pageSize() / @sizeOf(T));
1150
1131
1151
        // Validate all sub-block alignments
1132
        // Validate all sub-block alignments
1152
        const search_len = std.mem.page_size / @sizeOf(T);
1133
        const search_len = std.heap.pageSize() / @sizeOf(T);
1153
        memory[start + search_len] = 0;
1134
        memory[start + search_len] = 0;
1154
        for (0..block_len) |offset| {
1135
        for (0..block_len) |offset| {
1155
            try testing.expectEqual(search_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start + offset])));
1136
            try testing.expectEqual(search_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start + offset])));
Lines 1157-1163 test "indexOfSentinel vector paths" { Link Here
1157
        memory[start + search_len] = 0xaa;
1138
        memory[start + search_len] = 0xaa;
1158
1139
1159
        // Validate page boundary crossing
1140
        // Validate page boundary crossing
1160
        const start_page_boundary = start + (std.mem.page_size / @sizeOf(T));
1141
        const start_page_boundary = start + (std.heap.pageSize() / @sizeOf(T));
1161
        memory[start_page_boundary + block_len] = 0;
1142
        memory[start_page_boundary + block_len] = 0;
1162
        for (0..block_len) |offset| {
1143
        for (0..block_len) |offset| {
1163
            try testing.expectEqual(2 * block_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset])));
1144
            try testing.expectEqual(2 * block_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset])));
(-)a/lib/std/mem/Allocator.zig (-1 / +1 lines)
Lines 215-221 fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count: Link Here
215
    // The Zig Allocator interface is not intended to solve alignments beyond
215
    // The Zig Allocator interface is not intended to solve alignments beyond
216
    // the minimum OS page size. For these use cases, the caller must use OS
216
    // the minimum OS page size. For these use cases, the caller must use OS
217
    // APIs directly.
217
    // APIs directly.
218
    comptime assert(alignment <= mem.page_size);
218
    if (!@inComptime() and alignment > std.heap.pageSize()) @panic("Alignment must be smaller than page size.");
219
219
220
    if (byte_count == 0) {
220
    if (byte_count == 0) {
221
        const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment);
221
        const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment);
(-)a/lib/std/os/linux/IoUring.zig (-7 / +8 lines)
Lines 3-8 const std = @import("std"); Link Here
3
const builtin = @import("builtin");
3
const builtin = @import("builtin");
4
const assert = std.debug.assert;
4
const assert = std.debug.assert;
5
const mem = std.mem;
5
const mem = std.mem;
6
const heap = std.heap;
6
const net = std.net;
7
const net = std.net;
7
const posix = std.posix;
8
const posix = std.posix;
8
const linux = std.os.linux;
9
const linux = std.os.linux;
Lines 1341-1348 pub const SubmissionQueue = struct { Link Here
1341
    dropped: *u32,
1342
    dropped: *u32,
1342
    array: []u32,
1343
    array: []u32,
1343
    sqes: []linux.io_uring_sqe,
1344
    sqes: []linux.io_uring_sqe,
1344
    mmap: []align(mem.page_size) u8,
1345
    mmap: []align(heap.min_page_size) u8,
1345
    mmap_sqes: []align(mem.page_size) u8,
1346
    mmap_sqes: []align(heap.min_page_size) u8,
1346
1347
1347
    // We use `sqe_head` and `sqe_tail` in the same way as liburing:
1348
    // We use `sqe_head` and `sqe_tail` in the same way as liburing:
1348
    // We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
1349
    // We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
Lines 1460-1466 pub const BufferGroup = struct { Link Here
1460
    /// Pointer to the memory shared by the kernel.
1461
    /// Pointer to the memory shared by the kernel.
1461
    /// `buffers_count` of `io_uring_buf` structures are shared by the kernel.
1462
    /// `buffers_count` of `io_uring_buf` structures are shared by the kernel.
1462
    /// First `io_uring_buf` is overlaid by `io_uring_buf_ring` struct.
1463
    /// First `io_uring_buf` is overlaid by `io_uring_buf_ring` struct.
1463
    br: *align(mem.page_size) linux.io_uring_buf_ring,
1464
    br: *align(heap.min_page_size) linux.io_uring_buf_ring,
1464
    /// Contiguous block of memory of size (buffers_count * buffer_size).
1465
    /// Contiguous block of memory of size (buffers_count * buffer_size).
1465
    buffers: []u8,
1466
    buffers: []u8,
1466
    /// Size of each buffer in buffers.
1467
    /// Size of each buffer in buffers.
Lines 1555-1561 pub const BufferGroup = struct { Link Here
1555
/// `fd` is IO_Uring.fd for which the provided buffer ring is being registered.
1556
/// `fd` is IO_Uring.fd for which the provided buffer ring is being registered.
1556
/// `entries` is the number of entries requested in the buffer ring, must be power of 2.
1557
/// `entries` is the number of entries requested in the buffer ring, must be power of 2.
1557
/// `group_id` is the chosen buffer group ID, unique in IO_Uring.
1558
/// `group_id` is the chosen buffer group ID, unique in IO_Uring.
1558
pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.page_size) linux.io_uring_buf_ring {
1559
pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(heap.min_page_size) linux.io_uring_buf_ring {
1559
    if (entries == 0 or entries > 1 << 15) return error.EntriesNotInRange;
1560
    if (entries == 0 or entries > 1 << 15) return error.EntriesNotInRange;
1560
    if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
1561
    if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
1561
1562
Lines 1571-1577 pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.p Link Here
1571
    errdefer posix.munmap(mmap);
1572
    errdefer posix.munmap(mmap);
1572
    assert(mmap.len == mmap_size);
1573
    assert(mmap.len == mmap_size);
1573
1574
1574
    const br: *align(mem.page_size) linux.io_uring_buf_ring = @ptrCast(mmap.ptr);
1575
    const br: *align(heap.min_page_size) linux.io_uring_buf_ring = @ptrCast(mmap.ptr);
1575
    try register_buf_ring(fd, @intFromPtr(br), entries, group_id);
1576
    try register_buf_ring(fd, @intFromPtr(br), entries, group_id);
1576
    return br;
1577
    return br;
1577
}
1578
}
Lines 1613-1621 fn handle_register_buf_ring_result(res: usize) !void { Link Here
1613
}
1614
}
1614
1615
1615
// Unregisters a previously registered shared buffer ring, returned from io_uring_setup_buf_ring.
1616
// Unregisters a previously registered shared buffer ring, returned from io_uring_setup_buf_ring.
1616
pub fn free_buf_ring(fd: posix.fd_t, br: *align(mem.page_size) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
1617
pub fn free_buf_ring(fd: posix.fd_t, br: *align(heap.min_page_size) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
1617
    unregister_buf_ring(fd, group_id) catch {};
1618
    unregister_buf_ring(fd, group_id) catch {};
1618
    var mmap: []align(mem.page_size) u8 = undefined;
1619
    var mmap: []align(heap.min_page_size) u8 = undefined;
1619
    mmap.ptr = @ptrCast(br);
1620
    mmap.ptr = @ptrCast(br);
1620
    mmap.len = entries * @sizeOf(linux.io_uring_buf);
1621
    mmap.len = entries * @sizeOf(linux.io_uring_buf);
1621
    posix.munmap(mmap);
1622
    posix.munmap(mmap);
(-)a/lib/std/os/linux/tls.zig (-3 / +4 lines)
Lines 11-16 Link Here
11
11
12
const std = @import("std");
12
const std = @import("std");
13
const mem = std.mem;
13
const mem = std.mem;
14
const heap = std.heap;
14
const elf = std.elf;
15
const elf = std.elf;
15
const math = std.math;
16
const math = std.math;
16
const assert = std.debug.assert;
17
const assert = std.debug.assert;
Lines 490-496 pub fn prepareArea(area: []u8) usize { Link Here
490
// and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I
491
// and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I
491
// think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is
492
// think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is
492
// equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead.
493
// equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead.
493
var main_thread_area_buffer: [0x2100]u8 align(mem.page_size) = undefined;
494
var main_thread_area_buffer: [0x2100]u8 align(heap.min_page_size) = undefined;
494
495
495
/// Computes the layout of the static TLS area, allocates the area, initializes all of its fields,
496
/// Computes the layout of the static TLS area, allocates the area, initializes all of its fields,
496
/// and assigns the architecture-specific value to the TP register.
497
/// and assigns the architecture-specific value to the TP register.
Lines 503-509 pub fn initStatic(phdrs: []elf.Phdr) void { Link Here
503
    const area = blk: {
504
    const area = blk: {
504
        // Fast path for the common case where the TLS data is really small, avoid an allocation and
505
        // Fast path for the common case where the TLS data is really small, avoid an allocation and
505
        // use our local buffer.
506
        // use our local buffer.
506
        if (area_desc.alignment <= mem.page_size and area_desc.size <= main_thread_area_buffer.len) {
507
        if (area_desc.alignment <= heap.min_page_size and area_desc.size <= main_thread_area_buffer.len) {
507
            break :blk main_thread_area_buffer[0..area_desc.size];
508
            break :blk main_thread_area_buffer[0..area_desc.size];
508
        }
509
        }
509
510
Lines 517-523 pub fn initStatic(phdrs: []elf.Phdr) void { Link Here
517
        );
518
        );
518
        if (@as(isize, @bitCast(begin_addr)) < 0) @trap();
519
        if (@as(isize, @bitCast(begin_addr)) < 0) @trap();
519
520
520
        const area_ptr: [*]align(mem.page_size) u8 = @ptrFromInt(begin_addr);
521
        const area_ptr: [*]align(heap.min_page_size) u8 = @ptrFromInt(begin_addr);
521
522
522
        // Make sure the slice is correctly aligned.
523
        // Make sure the slice is correctly aligned.
523
        const begin_aligned_addr = alignForward(begin_addr, area_desc.alignment);
524
        const begin_aligned_addr = alignForward(begin_addr, area_desc.alignment);
(-)a/lib/std/os/plan9.zig (-2 / +2 lines)
Lines 367-374 pub fn sbrk(n: usize) usize { Link Here
367
        bloc = @intFromPtr(&ExecData.end);
367
        bloc = @intFromPtr(&ExecData.end);
368
        bloc_max = @intFromPtr(&ExecData.end);
368
        bloc_max = @intFromPtr(&ExecData.end);
369
    }
369
    }
370
    const bl = std.mem.alignForward(usize, bloc, std.mem.page_size);
370
    const bl = std.mem.alignForward(usize, bloc, std.heap.pageSize());
371
    const n_aligned = std.mem.alignForward(usize, n, std.mem.page_size);
371
    const n_aligned = std.mem.alignForward(usize, n, std.heap.pageSize());
372
    if (bl + n_aligned > bloc_max) {
372
    if (bl + n_aligned > bloc_max) {
373
        // we need to allocate
373
        // we need to allocate
374
        if (brk_(bl + n_aligned) < 0) return 0;
374
        if (brk_(bl + n_aligned) < 0) return 0;
(-)a/lib/std/os/windows/kernel32.zig (+5 lines)
Lines 42-47 const WCHAR = windows.WCHAR; Link Here
42
const WIN32_FIND_DATAW = windows.WIN32_FIND_DATAW;
42
const WIN32_FIND_DATAW = windows.WIN32_FIND_DATAW;
43
const Win32Error = windows.Win32Error;
43
const Win32Error = windows.Win32Error;
44
const WORD = windows.WORD;
44
const WORD = windows.WORD;
45
const SYSTEM_INFO = windows.SYSTEM_INFO;
45
46
46
// I/O - Filesystem
47
// I/O - Filesystem
47
48
Lines 670-672 pub extern "kernel32" fn SetLastError( Link Here
670
pub extern "kernel32" fn GetSystemTimeAsFileTime(
671
pub extern "kernel32" fn GetSystemTimeAsFileTime(
671
    lpSystemTimeAsFileTime: *FILETIME,
672
    lpSystemTimeAsFileTime: *FILETIME,
672
) callconv(.winapi) void;
673
) callconv(.winapi) void;
674
675
pub extern "kernel32" fn GetSystemInfo(
676
    lpSystemInfo: *SYSTEM_INFO,
677
) callconv(.winapi) void;
(-)a/lib/std/posix.zig (-9 / +10 lines)
Lines 18-23 const builtin = @import("builtin"); Link Here
18
const root = @import("root");
18
const root = @import("root");
19
const std = @import("std.zig");
19
const std = @import("std.zig");
20
const mem = std.mem;
20
const mem = std.mem;
21
const heap = std.heap;
21
const fs = std.fs;
22
const fs = std.fs;
22
const max_path_bytes = fs.max_path_bytes;
23
const max_path_bytes = fs.max_path_bytes;
23
const maxInt = std.math.maxInt;
24
const maxInt = std.math.maxInt;
Lines 4663-4669 pub const MProtectError = error{ Link Here
4663
    OutOfMemory,
4664
    OutOfMemory,
4664
} || UnexpectedError;
4665
} || UnexpectedError;
4665
4666
4666
pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void {
4667
pub fn mprotect(memory: []align(heap.min_page_size) u8, protection: u32) MProtectError!void {
4667
    if (native_os == .windows) {
4668
    if (native_os == .windows) {
4668
        const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
4669
        const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
4669
            0b000 => windows.PAGE_NOACCESS,
4670
            0b000 => windows.PAGE_NOACCESS,
Lines 4728-4748 pub const MMapError = error{ Link Here
4728
/// * SIGSEGV - Attempted write into a region mapped as read-only.
4729
/// * SIGSEGV - Attempted write into a region mapped as read-only.
4729
/// * SIGBUS - Attempted  access to a portion of the buffer that does not correspond to the file
4730
/// * SIGBUS - Attempted  access to a portion of the buffer that does not correspond to the file
4730
pub fn mmap(
4731
pub fn mmap(
4731
    ptr: ?[*]align(mem.page_size) u8,
4732
    ptr: ?[*]align(heap.min_page_size) u8,
4732
    length: usize,
4733
    length: usize,
4733
    prot: u32,
4734
    prot: u32,
4734
    flags: system.MAP,
4735
    flags: system.MAP,
4735
    fd: fd_t,
4736
    fd: fd_t,
4736
    offset: u64,
4737
    offset: u64,
4737
) MMapError![]align(mem.page_size) u8 {
4738
) MMapError![]align(heap.min_page_size) u8 {
4738
    const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap;
4739
    const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap;
4739
    const rc = mmap_sym(ptr, length, prot, @bitCast(flags), fd, @bitCast(offset));
4740
    const rc = mmap_sym(ptr, length, prot, @bitCast(flags), fd, @bitCast(offset));
4740
    const err: E = if (builtin.link_libc) blk: {
4741
    const err: E = if (builtin.link_libc) blk: {
4741
        if (rc != std.c.MAP_FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length];
4742
        if (rc != std.c.MAP_FAILED) return @as([*]align(heap.min_page_size) u8, @ptrCast(@alignCast(rc)))[0..length];
4742
        break :blk @enumFromInt(system._errno().*);
4743
        break :blk @enumFromInt(system._errno().*);
4743
    } else blk: {
4744
    } else blk: {
4744
        const err = errno(rc);
4745
        const err = errno(rc);
4745
        if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length];
4746
        if (err == .SUCCESS) return @as([*]align(heap.min_page_size) u8, @ptrFromInt(rc))[0..length];
4746
        break :blk err;
4747
        break :blk err;
4747
    };
4748
    };
4748
    switch (err) {
4749
    switch (err) {
Lines 4768-4774 pub fn mmap( Link Here
4768
/// Zig's munmap function does not, for two reasons:
4769
/// Zig's munmap function does not, for two reasons:
4769
/// * It violates the Zig principle that resource deallocation must succeed.
4770
/// * It violates the Zig principle that resource deallocation must succeed.
4770
/// * The Windows function, VirtualFree, has this restriction.
4771
/// * The Windows function, VirtualFree, has this restriction.
4771
pub fn munmap(memory: []align(mem.page_size) const u8) void {
4772
pub fn munmap(memory: []align(heap.min_page_size) const u8) void {
4772
    switch (errno(system.munmap(memory.ptr, memory.len))) {
4773
    switch (errno(system.munmap(memory.ptr, memory.len))) {
4773
        .SUCCESS => return,
4774
        .SUCCESS => return,
4774
        .INVAL => unreachable, // Invalid parameters.
4775
        .INVAL => unreachable, // Invalid parameters.
Lines 4782-4788 pub const MSyncError = error{ Link Here
4782
    PermissionDenied,
4783
    PermissionDenied,
4783
} || UnexpectedError;
4784
} || UnexpectedError;
4784
4785
4785
pub fn msync(memory: []align(mem.page_size) u8, flags: i32) MSyncError!void {
4786
pub fn msync(memory: []align(heap.min_page_size) u8, flags: i32) MSyncError!void {
4786
    switch (errno(system.msync(memory.ptr, memory.len, flags))) {
4787
    switch (errno(system.msync(memory.ptr, memory.len, flags))) {
4787
        .SUCCESS => return,
4788
        .SUCCESS => return,
4788
        .PERM => return error.PermissionDenied,
4789
        .PERM => return error.PermissionDenied,
Lines 7093-7099 pub const MincoreError = error{ Link Here
7093
} || UnexpectedError;
7094
} || UnexpectedError;
7094
7095
7095
/// Determine whether pages are resident in memory.
7096
/// Determine whether pages are resident in memory.
7096
pub fn mincore(ptr: [*]align(mem.page_size) u8, length: usize, vec: [*]u8) MincoreError!void {
7097
pub fn mincore(ptr: [*]align(heap.min_page_size) u8, length: usize, vec: [*]u8) MincoreError!void {
7097
    return switch (errno(system.mincore(ptr, length, vec))) {
7098
    return switch (errno(system.mincore(ptr, length, vec))) {
7098
        .SUCCESS => {},
7099
        .SUCCESS => {},
7099
        .AGAIN => error.SystemResources,
7100
        .AGAIN => error.SystemResources,
Lines 7139-7145 pub const MadviseError = error{ Link Here
7139
7140
7140
/// Give advice about use of memory.
7141
/// Give advice about use of memory.
7141
/// This syscall is optional and is sometimes configured to be disabled.
7142
/// This syscall is optional and is sometimes configured to be disabled.
7142
pub fn madvise(ptr: [*]align(mem.page_size) u8, length: usize, advice: u32) MadviseError!void {
7143
pub fn madvise(ptr: [*]align(heap.min_page_size) u8, length: usize, advice: u32) MadviseError!void {
7143
    switch (errno(system.madvise(ptr, length, advice))) {
7144
    switch (errno(system.madvise(ptr, length, advice))) {
7144
        .SUCCESS => return,
7145
        .SUCCESS => return,
7145
        .PERM => return error.PermissionDenied,
7146
        .PERM => return error.PermissionDenied,
(-)a/lib/std/process.zig (-1 / +1 lines)
Lines 1525-1531 pub fn posixGetUserInfo(name: []const u8) !UserInfo { Link Here
1525
        ReadGroupId,
1525
        ReadGroupId,
1526
    };
1526
    };
1527
1527
1528
    var buf: [std.mem.page_size]u8 = undefined;
1528
    var buf: [std.heap.min_page_size]u8 = undefined;
1529
    var name_index: usize = 0;
1529
    var name_index: usize = 0;
1530
    var state = State.Start;
1530
    var state = State.Start;
1531
    var uid: posix.uid_t = 0;
1531
    var uid: posix.uid_t = 0;
(-)a/lib/std/start.zig (-1 / +1 lines)
Lines 576-582 fn expandStackSize(phdrs: []elf.Phdr) void { Link Here
576
        switch (phdr.p_type) {
576
        switch (phdr.p_type) {
577
            elf.PT_GNU_STACK => {
577
            elf.PT_GNU_STACK => {
578
                if (phdr.p_memsz == 0) break;
578
                if (phdr.p_memsz == 0) break;
579
                assert(phdr.p_memsz % std.mem.page_size == 0);
579
                assert(phdr.p_memsz % std.heap.pageSize() == 0);
580
580
581
                // Silently fail if we are unable to get limits.
581
                // Silently fail if we are unable to get limits.
582
                const limits = std.posix.getrlimit(.STACK) catch break;
582
                const limits = std.posix.getrlimit(.STACK) catch break;
(-)a/lib/std/std.zig (+4 lines)
Lines 118-123 pub const Options = struct { Link Here
118
        args: anytype,
118
        args: anytype,
119
    ) void = log.defaultLog,
119
    ) void = log.defaultLog,
120
120
121
    min_page_size: ?usize = null,
122
    max_page_size: ?usize = null,
123
    queryPageSizeFn: fn () usize = heap.defaultQueryPageSize,
124
121
    fmt_max_depth: usize = fmt.default_max_depth,
125
    fmt_max_depth: usize = fmt.default_max_depth,
122
126
123
    cryptoRandomSeed: fn (buffer: []u8) void = @import("crypto/tlcsprng.zig").defaultRandomSeed,
127
    cryptoRandomSeed: fn (buffer: []u8) void = @import("crypto/tlcsprng.zig").defaultRandomSeed,
(-)a/lib/std/zip.zig (-1 / +1 lines)
Lines 162-168 pub fn decompress( Link Here
162
    var total_uncompressed: u64 = 0;
162
    var total_uncompressed: u64 = 0;
163
    switch (method) {
163
    switch (method) {
164
        .store => {
164
        .store => {
165
            var buf: [std.mem.page_size]u8 = undefined;
165
            var buf: [4096]u8 = undefined;
166
            while (true) {
166
            while (true) {
167
                const len = try reader.read(&buf);
167
                const len = try reader.read(&buf);
168
                if (len == 0) break;
168
                if (len == 0) break;
(-)a/src/Package/Fetch.zig (-1 / +1 lines)
Lines 1249-1255 fn unzip(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackResult { Link Here
1249
            .{@errorName(err)},
1249
            .{@errorName(err)},
1250
        ));
1250
        ));
1251
        defer zip_file.close();
1251
        defer zip_file.close();
1252
        var buf: [std.mem.page_size]u8 = undefined;
1252
        var buf: [std.heap.min_page_size]u8 = undefined;
1253
        while (true) {
1253
        while (true) {
1254
            const len = reader.readAll(&buf) catch |err| return f.fail(f.location_tok, try eb.printString(
1254
            const len = reader.readAll(&buf) catch |err| return f.fail(f.location_tok, try eb.printString(
1255
                "read zip stream failed: {s}",
1255
                "read zip stream failed: {s}",

Return to bug 947025