diff --git a/internal/alloc/alloc_unix.go b/internal/alloc/alloc_unix.go index 157b544..203d847 100644 --- a/internal/alloc/alloc_unix.go +++ b/internal/alloc/alloc_unix.go @@ -9,24 +9,31 @@ import ( "golang.org/x/sys/unix" ) -func NewMemory(_, max uint64) experimental.LinearMemory { +func NewMemory(cap, max uint64) experimental.LinearMemory { // Round up to the page size. rnd := uint64(unix.Getpagesize() - 1) - max = (max + rnd) &^ rnd + res := (max + rnd) &^ rnd - if max > math.MaxInt { - // This ensures int(max) overflows to a negative value, + if res > math.MaxInt { + // This ensures int(res) overflows to a negative value, // and unix.Mmap returns EINVAL. - max = math.MaxUint64 + res = math.MaxUint64 } - // Reserve max bytes of address space, to ensure we won't need to move it. + com := res + prot := unix.PROT_READ | unix.PROT_WRITE + if cap < max { // Commit memory only if cap=max. + com = 0 + prot = unix.PROT_NONE + } + + // Reserve res bytes of address space, to ensure we won't need to move it. // A protected, private, anonymous mapping should not commit memory. - b, err := unix.Mmap(-1, 0, int(max), unix.PROT_NONE, unix.MAP_PRIVATE|unix.MAP_ANON) + b, err := unix.Mmap(-1, 0, int(res), prot, unix.MAP_PRIVATE|unix.MAP_ANON) if err != nil { panic(err) } - return &mmappedMemory{buf: b[:0]} + return &mmappedMemory{buf: b[:com]} } // The slice covers the entire mmapped memory: @@ -52,8 +59,7 @@ func (m *mmappedMemory) Reallocate(size uint64) []byte { return nil } - // Update committed memory. - m.buf = m.buf[:new] + m.buf = m.buf[:new] // Update committed memory. } // Limit returned capacity because bytes beyond // len(m.buf) have not yet been committed. diff --git a/internal/alloc/alloc_windows.go b/internal/alloc/alloc_windows.go index 2d753e2..ef83f01 100644 --- a/internal/alloc/alloc_windows.go +++ b/internal/alloc/alloc_windows.go @@ -9,20 +9,26 @@ import ( "golang.org/x/sys/windows" ) -func NewMemory(_, max uint64) experimental.LinearMemory { +func NewMemory(cap, max uint64) experimental.LinearMemory { // Round up to the page size. rnd := uint64(windows.Getpagesize() - 1) - max = (max + rnd) &^ rnd + res := (max + rnd) &^ rnd - if max > math.MaxInt { - // This ensures uintptr(max) overflows to a large value, + if res > math.MaxInt { + // This ensures uintptr(res) overflows to a large value, // and windows.VirtualAlloc returns an error. - max = math.MaxUint64 + res = math.MaxUint64 } - // Reserve max bytes of address space, to ensure we won't need to move it. - // This does not commit memory. - r, err := windows.VirtualAlloc(0, uintptr(max), windows.MEM_RESERVE, windows.PAGE_READWRITE) + com := res + kind := windows.MEM_COMMIT + if cap < max { // Commit memory only if cap=max. + com = 0 + kind = windows.MEM_RESERVE + } + + // Reserve res bytes of address space, to ensure we won't need to move it. + r, err := windows.VirtualAlloc(0, uintptr(res), uint32(kind), windows.PAGE_READWRITE) if err != nil { panic(err) } @@ -30,8 +36,9 @@ func NewMemory(_, max uint64) experimental.LinearMemory { mem := virtualMemory{addr: r} // SliceHeader, although deprecated, avoids a go vet warning. sh := (*reflect.SliceHeader)(unsafe.Pointer(&mem.buf)) - sh.Cap = int(max) sh.Data = r + sh.Len = int(com) + sh.Cap = int(res) return &mem } @@ -59,8 +66,7 @@ func (m *virtualMemory) Reallocate(size uint64) []byte { return nil } - // Update committed memory. - m.buf = m.buf[:new] + m.buf = m.buf[:new] // Update committed memory. } // Limit returned capacity because bytes beyond // len(m.buf) have not yet been committed.