~mmach/netext73/webkit2gtk

« back to all changes in this revision

Viewing changes to Source/JavaScriptCore/heap/PreciseAllocation.cpp

  • Committer: mmach
  • Date: 2023-06-16 17:21:37 UTC
  • Revision ID: netbit73@gmail.com-20230616172137-2rqx6yr96ga9g3kp
1

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * Copyright (C) 2016-2019 Apple Inc. All rights reserved.
 
3
 *
 
4
 * Redistribution and use in source and binary forms, with or without
 
5
 * modification, are permitted provided that the following conditions
 
6
 * are met:
 
7
 * 1. Redistributions of source code must retain the above copyright
 
8
 *    notice, this list of conditions and the following disclaimer.
 
9
 * 2. Redistributions in binary form must reproduce the above copyright
 
10
 *    notice, this list of conditions and the following disclaimer in the
 
11
 *    documentation and/or other materials provided with the distribution.
 
12
 *
 
13
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
 
14
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 
15
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 
16
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
 
17
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 
18
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 
19
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 
20
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 
21
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 
22
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 
23
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 
24
 */
 
25
 
 
26
#include "config.h"
 
27
#include "PreciseAllocation.h"
 
28
 
 
29
#include "AlignedMemoryAllocator.h"
 
30
#include "Heap.h"
 
31
#include "IsoCellSetInlines.h"
 
32
#include "JSCInlines.h"
 
33
#include "Operations.h"
 
34
#include "SubspaceInlines.h"
 
35
 
 
36
namespace JSC {
 
37
 
 
38
static inline bool isAlignedForPreciseAllocation(void* memory)
 
39
{
 
40
    uintptr_t allocatedPointer = bitwise_cast<uintptr_t>(memory);
 
41
    return !(allocatedPointer & (PreciseAllocation::alignment - 1));
 
42
}
 
43
 
 
44
PreciseAllocation* PreciseAllocation::tryCreate(Heap& heap, size_t size, Subspace* subspace, unsigned indexInSpace)
 
45
{
 
46
    if (validateDFGDoesGC)
 
47
        RELEASE_ASSERT(heap.expectDoesGC());
 
48
 
 
49
    size_t adjustedAlignmentAllocationSize = headerSize() + size + halfAlignment;
 
50
    static_assert(halfAlignment == 8, "We assume that memory returned by malloc has alignment >= 8.");
 
51
    
 
52
    // We must use tryAllocateMemory instead of tryAllocateAlignedMemory since we want to use "realloc" feature.
 
53
    void* space = subspace->alignedMemoryAllocator()->tryAllocateMemory(adjustedAlignmentAllocationSize);
 
54
    if (!space)
 
55
        return nullptr;
 
56
 
 
57
    bool adjustedAlignment = false;
 
58
    if (!isAlignedForPreciseAllocation(space)) {
 
59
        space = bitwise_cast<void*>(bitwise_cast<uintptr_t>(space) + halfAlignment);
 
60
        adjustedAlignment = true;
 
61
        ASSERT(isAlignedForPreciseAllocation(space));
 
62
    }
 
63
    
 
64
    if (scribbleFreeCells())
 
65
        scribble(space, size);
 
66
    return new (NotNull, space) PreciseAllocation(heap, size, subspace, indexInSpace, adjustedAlignment);
 
67
}
 
68
 
 
69
PreciseAllocation* PreciseAllocation::tryReallocate(size_t size, Subspace* subspace)
 
70
{
 
71
    ASSERT(!isLowerTier());
 
72
    size_t adjustedAlignmentAllocationSize = headerSize() + size + halfAlignment;
 
73
    static_assert(halfAlignment == 8, "We assume that memory returned by malloc has alignment >= 8.");
 
74
 
 
75
    ASSERT(subspace == m_subspace);
 
76
 
 
77
    unsigned oldCellSize = m_cellSize;
 
78
    bool oldAdjustedAlignment = m_adjustedAlignment;
 
79
    void* oldBasePointer = basePointer();
 
80
 
 
81
    void* newBasePointer = subspace->alignedMemoryAllocator()->tryReallocateMemory(oldBasePointer, adjustedAlignmentAllocationSize);
 
82
    if (!newBasePointer)
 
83
        return nullptr;
 
84
 
 
85
    PreciseAllocation* newAllocation = bitwise_cast<PreciseAllocation*>(newBasePointer);
 
86
    bool newAdjustedAlignment = false;
 
87
    if (!isAlignedForPreciseAllocation(newBasePointer)) {
 
88
        newAdjustedAlignment = true;
 
89
        newAllocation = bitwise_cast<PreciseAllocation*>(bitwise_cast<uintptr_t>(newBasePointer) + halfAlignment);
 
90
        ASSERT(isAlignedForPreciseAllocation(static_cast<void*>(newAllocation)));
 
91
    }
 
92
 
 
93
    // We have 4 patterns.
 
94
    // oldAdjustedAlignment = true  newAdjustedAlignment = true  => Do nothing.
 
95
    // oldAdjustedAlignment = true  newAdjustedAlignment = false => Shift forward by halfAlignment
 
96
    // oldAdjustedAlignment = false newAdjustedAlignment = true  => Shift backward by halfAlignment
 
97
    // oldAdjustedAlignment = false newAdjustedAlignment = false => Do nothing.
 
98
 
 
99
    if (oldAdjustedAlignment != newAdjustedAlignment) {
 
100
        if (oldAdjustedAlignment) {
 
101
            ASSERT(!newAdjustedAlignment);
 
102
            ASSERT(newAllocation == newBasePointer);
 
103
            // Old   [ 8 ][  content  ]
 
104
            // Now   [   ][  content  ]
 
105
            // New   [  content  ]...
 
106
            memmove(newBasePointer, bitwise_cast<char*>(newBasePointer) + halfAlignment, oldCellSize + PreciseAllocation::headerSize());
 
107
        } else {
 
108
            ASSERT(newAdjustedAlignment);
 
109
            ASSERT(newAllocation != newBasePointer);
 
110
            ASSERT(newAllocation == bitwise_cast<void*>(bitwise_cast<char*>(newBasePointer) + halfAlignment));
 
111
            // Old   [  content  ]
 
112
            // Now   [  content  ][   ]
 
113
            // New   [ 8 ][  content  ]
 
114
            memmove(bitwise_cast<char*>(newBasePointer) + halfAlignment, newBasePointer, oldCellSize + PreciseAllocation::headerSize());
 
115
        }
 
116
    }
 
117
 
 
118
    newAllocation->m_cellSize = size;
 
119
    newAllocation->m_adjustedAlignment = newAdjustedAlignment;
 
120
    return newAllocation;
 
121
}
 
122
 
 
123
 
 
124
PreciseAllocation* PreciseAllocation::createForLowerTier(Heap& heap, size_t size, Subspace* subspace, uint8_t lowerTierIndex)
 
125
{
 
126
    if (validateDFGDoesGC)
 
127
        RELEASE_ASSERT(heap.expectDoesGC());
 
128
 
 
129
    size_t adjustedAlignmentAllocationSize = headerSize() + size + halfAlignment;
 
130
    static_assert(halfAlignment == 8, "We assume that memory returned by malloc has alignment >= 8.");
 
131
 
 
132
    void* space = subspace->alignedMemoryAllocator()->tryAllocateMemory(adjustedAlignmentAllocationSize);
 
133
    RELEASE_ASSERT(space);
 
134
 
 
135
    bool adjustedAlignment = false;
 
136
    if (!isAlignedForPreciseAllocation(space)) {
 
137
        space = bitwise_cast<void*>(bitwise_cast<uintptr_t>(space) + halfAlignment);
 
138
        adjustedAlignment = true;
 
139
        ASSERT(isAlignedForPreciseAllocation(space));
 
140
    }
 
141
 
 
142
    if (scribbleFreeCells())
 
143
        scribble(space, size);
 
144
    PreciseAllocation* preciseAllocation = new (NotNull, space) PreciseAllocation(heap, size, subspace, 0, adjustedAlignment);
 
145
    preciseAllocation->m_lowerTierIndex = lowerTierIndex;
 
146
    return preciseAllocation;
 
147
}
 
148
 
 
149
PreciseAllocation* PreciseAllocation::reuseForLowerTier()
 
150
{
 
151
    Heap& heap = *this->heap();
 
152
    size_t size = m_cellSize;
 
153
    Subspace* subspace = m_subspace;
 
154
    bool adjustedAlignment = m_adjustedAlignment;
 
155
    uint8_t lowerTierIndex = m_lowerTierIndex;
 
156
    void* basePointer = this->basePointer();
 
157
 
 
158
    this->~PreciseAllocation();
 
159
 
 
160
    void* space = basePointer;
 
161
    ASSERT((!isAlignedForPreciseAllocation(basePointer)) == adjustedAlignment);
 
162
    if (adjustedAlignment)
 
163
        space = bitwise_cast<void*>(bitwise_cast<uintptr_t>(basePointer) + halfAlignment);
 
164
 
 
165
    PreciseAllocation* preciseAllocation = new (NotNull, space) PreciseAllocation(heap, size, subspace, 0, adjustedAlignment);
 
166
    preciseAllocation->m_lowerTierIndex = lowerTierIndex;
 
167
    preciseAllocation->m_hasValidCell = false;
 
168
    return preciseAllocation;
 
169
}
 
170
 
 
171
PreciseAllocation::PreciseAllocation(Heap& heap, size_t size, Subspace* subspace, unsigned indexInSpace, bool adjustedAlignment)
 
172
    : m_indexInSpace(indexInSpace)
 
173
    , m_cellSize(size)
 
174
    , m_isNewlyAllocated(true)
 
175
    , m_hasValidCell(true)
 
176
    , m_adjustedAlignment(adjustedAlignment)
 
177
    , m_attributes(subspace->attributes())
 
178
    , m_subspace(subspace)
 
179
    , m_weakSet(heap.vm())
 
180
{
 
181
    m_isMarked.store(0);
 
182
    ASSERT(cell()->isPreciseAllocation());
 
183
}
 
184
 
 
185
PreciseAllocation::~PreciseAllocation()
 
186
{
 
187
    if (isOnList())
 
188
        remove();
 
189
}
 
190
 
 
191
void PreciseAllocation::lastChanceToFinalize()
 
192
{
 
193
    m_weakSet.lastChanceToFinalize();
 
194
    clearMarked();
 
195
    clearNewlyAllocated();
 
196
    sweep();
 
197
}
 
198
 
 
199
void PreciseAllocation::shrink()
 
200
{
 
201
    m_weakSet.shrink();
 
202
}
 
203
 
 
204
void PreciseAllocation::visitWeakSet(SlotVisitor& visitor)
 
205
{
 
206
    m_weakSet.visit(visitor);
 
207
}
 
208
 
 
209
void PreciseAllocation::reapWeakSet()
 
210
{
 
211
    return m_weakSet.reap();
 
212
}
 
213
 
 
214
void PreciseAllocation::flip()
 
215
{
 
216
    ASSERT(heap()->collectionScope() == CollectionScope::Full);
 
217
    clearMarked();
 
218
}
 
219
 
 
220
bool PreciseAllocation::isEmpty()
 
221
{
 
222
    return !isMarked() && m_weakSet.isEmpty() && !isNewlyAllocated();
 
223
}
 
224
 
 
225
void PreciseAllocation::sweep()
 
226
{
 
227
    m_weakSet.sweep();
 
228
    
 
229
    if (m_hasValidCell && !isLive()) {
 
230
        if (m_attributes.destruction == NeedsDestruction)
 
231
            m_subspace->destroy(vm(), static_cast<JSCell*>(cell()));
 
232
        // We should clear IsoCellSet's bit before actually destroying PreciseAllocation
 
233
        // since PreciseAllocation's destruction can be delayed until its WeakSet is cleared.
 
234
        if (isLowerTier())
 
235
            static_cast<IsoSubspace*>(m_subspace)->clearIsoCellSetBit(this);
 
236
        m_hasValidCell = false;
 
237
    }
 
238
}
 
239
 
 
240
void PreciseAllocation::destroy()
 
241
{
 
242
    AlignedMemoryAllocator* allocator = m_subspace->alignedMemoryAllocator();
 
243
    void* basePointer = this->basePointer();
 
244
    this->~PreciseAllocation();
 
245
    allocator->freeMemory(basePointer);
 
246
}
 
247
 
 
248
void PreciseAllocation::dump(PrintStream& out) const
 
249
{
 
250
    out.print(RawPointer(this), ":(cell at ", RawPointer(cell()), " with size ", m_cellSize, " and attributes ", m_attributes, ")");
 
251
}
 
252
 
 
253
#if ASSERT_ENABLED
 
254
void PreciseAllocation::assertValidCell(VM& vm, HeapCell* cell) const
 
255
{
 
256
    ASSERT(&vm == &this->vm());
 
257
    ASSERT(cell == this->cell());
 
258
    ASSERT(m_hasValidCell);
 
259
}
 
260
#endif
 
261
 
 
262
} // namespace JSC
 
263