1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2
* vim: set ts=8 sw=4 et tw=78:
4
* This Source Code Form is subject to the terms of the Mozilla Public
5
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
6
* You can obtain one at http://mozilla.org/MPL/2.0/. */
8
#include "mozilla/Assertions.h"
12
#include "js/Utility.h"
13
#include "gc/Memory.h"
22
static size_t AllocationGranularity = 0;
28
GetSystemInfo(&sysinfo);
29
if (sysinfo.dwPageSize != PageSize)
31
AllocationGranularity = sysinfo.dwAllocationGranularity;
35
MapAlignedPages(size_t size, size_t alignment)
37
JS_ASSERT(size >= alignment);
38
JS_ASSERT(size % alignment == 0);
39
JS_ASSERT(size % PageSize == 0);
40
JS_ASSERT(alignment % AllocationGranularity == 0);
42
/* Special case: If we want allocation alignment, no further work is needed. */
43
if (alignment == AllocationGranularity) {
44
return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
48
* Windows requires that there be a 1:1 mapping between VM allocation
49
* and deallocation operations. Therefore, take care here to acquire the
50
* final result via one mapping operation. This means unmapping any
51
* preliminary result that is not correctly aligned.
56
* Over-allocate in order to map a memory region that is
57
* definitely large enough then deallocate and allocate again the
58
* correct sizee, within the over-sized mapping.
60
* Since we're going to unmap the whole thing anyway, the first
61
* mapping doesn't have to commit pages.
63
p = VirtualAlloc(NULL, size * 2, MEM_RESERVE, PAGE_READWRITE);
66
void *chunkStart = (void *)(uintptr_t(p) + (alignment - (uintptr_t(p) % alignment)));
67
UnmapPages(p, size * 2);
68
p = VirtualAlloc(chunkStart, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
70
/* Failure here indicates a race with another thread, so try again. */
73
JS_ASSERT(uintptr_t(p) % alignment == 0);
78
UnmapPages(void *p, size_t size)
80
JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
84
MarkPagesUnused(void *p, size_t size)
86
JS_ASSERT(uintptr_t(p) % PageSize == 0);
87
LPVOID p2 = VirtualAlloc(p, size, MEM_RESET, PAGE_READWRITE);
92
MarkPagesInUse(void *p, size_t size)
94
JS_ASSERT(uintptr_t(p) % PageSize == 0);
101
PROCESS_MEMORY_COUNTERS pmc;
102
if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
104
return pmc.PageFaultCount;
107
#elif defined(XP_OS2)
109
#define INCL_DOSMEMMGR
112
#define JS_GC_HAS_MAP_ALIGN 1
113
#define OS2_MAX_RECURSIONS 16
116
InitMemorySubsystem()
121
UnmapPages(void *addr, size_t size)
123
if (!DosFreeMem(addr))
127
* If DosFreeMem() failed, 'addr' is probably part of an "expensive"
128
* allocation, so calculate the base address and try again.
130
unsigned long cb = 2 * size;
132
if (DosQueryMem(addr, &cb, &flags) || cb < size)
135
uintptr_t base = reinterpret_cast<uintptr_t>(addr) - ((2 * size) - cb);
136
DosFreeMem(reinterpret_cast<void*>(base));
142
MapAlignedPagesRecursively(size_t size, size_t alignment, int& recursions)
144
if (++recursions >= OS2_MAX_RECURSIONS)
148
if (DosAllocMem(&tmp, size,
149
OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
150
JS_ALWAYS_TRUE(DosAllocMem(&tmp, size,
151
PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
153
size_t offset = reinterpret_cast<uintptr_t>(tmp) & (alignment - 1);
158
* If there are 'filler' bytes of free space above 'tmp', free 'tmp',
159
* then reallocate it as a 'filler'-sized block; assuming we're not
160
* in a race with another thread, the next recursion should succeed.
162
size_t filler = size + alignment - offset;
163
unsigned long cb = filler;
164
unsigned long flags = 0;
165
unsigned long rc = DosQueryMem(&(static_cast<char*>(tmp))[size],
167
if (!rc && (flags & PAG_FREE) && cb >= filler) {
169
if (DosAllocMem(&tmp, filler,
170
OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
171
JS_ALWAYS_TRUE(DosAllocMem(&tmp, filler,
172
PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
176
void *p = MapAlignedPagesRecursively(size, alignment, recursions);
183
MapAlignedPages(size_t size, size_t alignment)
185
JS_ASSERT(size >= alignment);
186
JS_ASSERT(size % alignment == 0);
187
JS_ASSERT(size % PageSize == 0);
188
JS_ASSERT(alignment % PageSize == 0);
193
* Make up to OS2_MAX_RECURSIONS attempts to get an aligned block
194
* of the right size by recursively allocating blocks of unaligned
195
* free memory until only an aligned allocation is possible.
197
void *p = MapAlignedPagesRecursively(size, alignment, recursions);
202
* If memory is heavily fragmented, the recursive strategy may fail;
203
* instead, use the "expensive" strategy: allocate twice as much
204
* as requested and return an aligned address within this block.
206
if (DosAllocMem(&p, 2 * size,
207
OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
208
JS_ALWAYS_TRUE(DosAllocMem(&p, 2 * size,
209
PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
212
uintptr_t addr = reinterpret_cast<uintptr_t>(p);
213
addr = (addr + (alignment - 1)) & ~(alignment - 1);
215
return reinterpret_cast<void *>(addr);
219
MarkPagesUnused(void *p, size_t size)
221
JS_ASSERT(uintptr_t(p) % PageSize == 0);
226
MarkPagesInUse(void *p, size_t size)
228
JS_ASSERT(uintptr_t(p) % PageSize == 0);
238
#elif defined(SOLARIS)
240
#include <sys/mman.h>
244
# define MAP_NOSYNC 0
248
InitMemorySubsystem()
253
MapAlignedPages(size_t size, size_t alignment)
255
JS_ASSERT(size >= alignment);
256
JS_ASSERT(size % alignment == 0);
257
JS_ASSERT(size % PageSize == 0);
258
JS_ASSERT(alignment % PageSize == 0);
260
int prot = PROT_READ | PROT_WRITE;
261
int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
263
void *p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
270
UnmapPages(void *p, size_t size)
272
JS_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
276
MarkPagesUnused(void *p, size_t size)
278
JS_ASSERT(uintptr_t(p) % PageSize == 0);
283
MarkPagesInUse(void *p, size_t size)
285
JS_ASSERT(uintptr_t(p) % PageSize == 0);
295
#elif defined(XP_UNIX) || defined(XP_MACOSX) || defined(DARWIN)
297
#include <sys/mman.h>
298
#include <sys/time.h>
299
#include <sys/resource.h>
303
InitMemorySubsystem()
305
if (size_t(sysconf(_SC_PAGESIZE)) != PageSize)
310
MapAlignedPages(size_t size, size_t alignment)
312
JS_ASSERT(size >= alignment);
313
JS_ASSERT(size % alignment == 0);
314
JS_ASSERT(size % PageSize == 0);
315
JS_ASSERT(alignment % PageSize == 0);
317
int prot = PROT_READ | PROT_WRITE;
318
int flags = MAP_PRIVATE | MAP_ANON;
320
/* Special case: If we want page alignment, no further work is needed. */
321
if (alignment == PageSize) {
322
return mmap(NULL, size, prot, flags, -1, 0);
325
/* Overallocate and unmap the region's edges. */
326
size_t reqSize = Min(size + 2 * alignment, 2 * size);
327
void *region = mmap(NULL, reqSize, prot, flags, -1, 0);
328
if (region == MAP_FAILED)
331
uintptr_t regionEnd = uintptr_t(region) + reqSize;
332
uintptr_t offset = uintptr_t(region) % alignment;
333
JS_ASSERT(offset < reqSize - size);
335
void *front = (void *)(uintptr_t(region) + (alignment - offset));
336
void *end = (void *)(uintptr_t(front) + size);
338
JS_ALWAYS_TRUE(0 == munmap(region, alignment - offset));
339
if (uintptr_t(end) != regionEnd)
340
JS_ALWAYS_TRUE(0 == munmap(end, regionEnd - uintptr_t(end)));
342
JS_ASSERT(uintptr_t(front) % alignment == 0);
347
UnmapPages(void *p, size_t size)
349
JS_ALWAYS_TRUE(0 == munmap(p, size));
353
MarkPagesUnused(void *p, size_t size)
355
JS_ASSERT(uintptr_t(p) % PageSize == 0);
356
int result = madvise(p, size, MADV_DONTNEED);
361
MarkPagesInUse(void *p, size_t size)
363
JS_ASSERT(uintptr_t(p) % PageSize == 0);
371
int err = getrusage(RUSAGE_SELF, &usage);
374
return usage.ru_minflt + usage.ru_majflt;
378
#error "Memory mapping functions are not defined for your OS."