~evarlast/ubuntu/utopic/mongodb/upstart-workaround-debian-bug-718702

« back to all changes in this revision

Viewing changes to src/third_party/v8/src/incremental-marking.cc

  • Committer: Package Import Robot
  • Author(s): James Page, James Page, Robie Basak
  • Date: 2013-05-29 17:44:42 UTC
  • mfrom: (44.1.7 sid)
  • Revision ID: package-import@ubuntu.com-20130529174442-z0a4qmoww4y0t458
Tags: 1:2.4.3-1ubuntu1
[ James Page ]
* Merge from Debian unstable, remaining changes:
  - Enable SSL support:
    + d/control: Add libssl-dev to BD's.
    + d/rules: Enabled --ssl option.
    + d/mongodb.conf: Add example SSL configuration options.
  - d/mongodb-server.mongodb.upstart: Add upstart configuration.
  - d/rules: Don't strip binaries during scons build for Ubuntu.
  - d/control: Add armhf to target archs.
  - d/p/SConscript.client.patch: fixup install of client libraries.
  - d/p/0010-install-libs-to-usr-lib-not-usr-lib64-Closes-588557.patch:
    Install libraries to lib not lib64.
* Dropped changes:
  - d/p/arm-support.patch: Included in Debian.
  - d/p/double-alignment.patch: Included in Debian.
  - d/rules,control: Debian also builds with avaliable system libraries
    now.
* Fix FTBFS due to gcc and boost upgrades in saucy:
  - d/p/0008-ignore-unused-local-typedefs.patch: Add -Wno-unused-typedefs
    to unbreak building with g++-4.8.
  - d/p/0009-boost-1.53.patch: Fixup signed/unsigned casting issue.

[ Robie Basak ]
* d/p/0011-Use-a-signed-char-to-store-BSONType-enumerations.patch: Fixup
  build failure on ARM due to missing signed'ness of char cast.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
// Copyright 2012 the V8 project authors. All rights reserved.
 
2
// Redistribution and use in source and binary forms, with or without
 
3
// modification, are permitted provided that the following conditions are
 
4
// met:
 
5
//
 
6
//     * Redistributions of source code must retain the above copyright
 
7
//       notice, this list of conditions and the following disclaimer.
 
8
//     * Redistributions in binary form must reproduce the above
 
9
//       copyright notice, this list of conditions and the following
 
10
//       disclaimer in the documentation and/or other materials provided
 
11
//       with the distribution.
 
12
//     * Neither the name of Google Inc. nor the names of its
 
13
//       contributors may be used to endorse or promote products derived
 
14
//       from this software without specific prior written permission.
 
15
//
 
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
27
 
 
28
#include "v8.h"
 
29
 
 
30
#include "incremental-marking.h"
 
31
 
 
32
#include "code-stubs.h"
 
33
#include "compilation-cache.h"
 
34
#include "objects-visiting.h"
 
35
#include "objects-visiting-inl.h"
 
36
#include "v8conversions.h"
 
37
 
 
38
namespace v8 {
 
39
namespace internal {
 
40
 
 
41
 
 
42
IncrementalMarking::IncrementalMarking(Heap* heap)
 
43
    : heap_(heap),
 
44
      state_(STOPPED),
 
45
      marking_deque_memory_(NULL),
 
46
      marking_deque_memory_committed_(false),
 
47
      marker_(this, heap->mark_compact_collector()),
 
48
      steps_count_(0),
 
49
      steps_took_(0),
 
50
      longest_step_(0.0),
 
51
      old_generation_space_available_at_start_of_incremental_(0),
 
52
      old_generation_space_used_at_start_of_incremental_(0),
 
53
      steps_count_since_last_gc_(0),
 
54
      steps_took_since_last_gc_(0),
 
55
      should_hurry_(false),
 
56
      allocation_marking_factor_(0),
 
57
      allocated_(0),
 
58
      no_marking_scope_depth_(0) {
 
59
}
 
60
 
 
61
 
 
62
void IncrementalMarking::TearDown() {
 
63
  delete marking_deque_memory_;
 
64
}
 
65
 
 
66
 
 
67
void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
 
68
                                         Object** slot,
 
69
                                         Object* value) {
 
70
  if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
 
71
    MarkBit obj_bit = Marking::MarkBitFrom(obj);
 
72
    if (Marking::IsBlack(obj_bit)) {
 
73
      // Object is not going to be rescanned we need to record the slot.
 
74
      heap_->mark_compact_collector()->RecordSlot(
 
75
          HeapObject::RawField(obj, 0), slot, value);
 
76
    }
 
77
  }
 
78
}
 
79
 
 
80
 
 
81
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
 
82
                                             Object* value,
 
83
                                             Isolate* isolate) {
 
84
  ASSERT(obj->IsHeapObject());
 
85
 
 
86
  // Fast cases should already be covered by RecordWriteStub.
 
87
  ASSERT(value->IsHeapObject());
 
88
  ASSERT(!value->IsHeapNumber());
 
89
  ASSERT(!value->IsString() ||
 
90
         value->IsConsString() ||
 
91
         value->IsSlicedString());
 
92
  ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
 
93
 
 
94
  IncrementalMarking* marking = isolate->heap()->incremental_marking();
 
95
  ASSERT(!marking->is_compacting_);
 
96
  marking->RecordWrite(obj, NULL, value);
 
97
}
 
98
 
 
99
 
 
100
void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
 
101
                                                          Object** slot,
 
102
                                                          Isolate* isolate) {
 
103
  IncrementalMarking* marking = isolate->heap()->incremental_marking();
 
104
  ASSERT(marking->is_compacting_);
 
105
  marking->RecordWrite(obj, slot, *slot);
 
106
}
 
107
 
 
108
 
 
109
void IncrementalMarking::RecordCodeTargetPatch(Code* host,
 
110
                                               Address pc,
 
111
                                               HeapObject* value) {
 
112
  if (IsMarking()) {
 
113
    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
 
114
    RecordWriteIntoCode(host, &rinfo, value);
 
115
  }
 
116
}
 
117
 
 
118
 
 
119
void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
 
120
  if (IsMarking()) {
 
121
    Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
 
122
        GcSafeFindCodeForInnerPointer(pc);
 
123
    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
 
124
    RecordWriteIntoCode(host, &rinfo, value);
 
125
  }
 
126
}
 
127
 
 
128
 
 
129
void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
 
130
                                                Object** slot,
 
131
                                                Code* value) {
 
132
  if (BaseRecordWrite(host, slot, value) && is_compacting_) {
 
133
    ASSERT(slot != NULL);
 
134
    heap_->mark_compact_collector()->
 
135
        RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
 
136
  }
 
137
}
 
138
 
 
139
 
 
140
void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
 
141
                                                 RelocInfo* rinfo,
 
142
                                                 Object* value) {
 
143
  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
 
144
  if (Marking::IsWhite(value_bit)) {
 
145
    MarkBit obj_bit = Marking::MarkBitFrom(obj);
 
146
    if (Marking::IsBlack(obj_bit)) {
 
147
      BlackToGreyAndUnshift(obj, obj_bit);
 
148
      RestartIfNotMarking();
 
149
    }
 
150
    // Object is either grey or white.  It will be scanned if survives.
 
151
    return;
 
152
  }
 
153
 
 
154
  if (is_compacting_) {
 
155
    MarkBit obj_bit = Marking::MarkBitFrom(obj);
 
156
    if (Marking::IsBlack(obj_bit)) {
 
157
      // Object is not going to be rescanned.  We need to record the slot.
 
158
      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
 
159
                                                       Code::cast(value));
 
160
    }
 
161
  }
 
162
}
 
163
 
 
164
 
 
165
class IncrementalMarkingMarkingVisitor
 
166
    : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
 
167
 public:
 
168
  static void Initialize() {
 
169
    StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
 
170
 
 
171
    table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
 
172
 
 
173
    table_.Register(kVisitJSFunction, &VisitJSFunction);
 
174
 
 
175
    table_.Register(kVisitJSRegExp, &VisitJSRegExp);
 
176
  }
 
177
 
 
178
  static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
 
179
    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
 
180
    Object* target = rinfo->target_object();
 
181
    if (target->NonFailureIsHeapObject()) {
 
182
      heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
 
183
      MarkObject(heap, target);
 
184
    }
 
185
  }
 
186
 
 
187
  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
 
188
    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
 
189
    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
 
190
    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
 
191
        && (target->ic_age() != heap->global_ic_age())) {
 
192
      IC::Clear(rinfo->pc());
 
193
      target = Code::GetCodeFromTargetAddress(rinfo->target_address());
 
194
    }
 
195
    heap->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
 
196
    MarkObject(heap, target);
 
197
  }
 
198
 
 
199
  static void VisitCode(Map* map, HeapObject* object) {
 
200
    Heap* heap = map->GetHeap();
 
201
    Code* code = reinterpret_cast<Code*>(object);
 
202
    code->CodeIterateBody<IncrementalMarkingMarkingVisitor>(heap);
 
203
  }
 
204
 
 
205
  static void VisitJSWeakMap(Map* map, HeapObject* object) {
 
206
    Heap* heap = map->GetHeap();
 
207
    VisitPointers(heap,
 
208
                  HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
 
209
                  HeapObject::RawField(object, JSWeakMap::kSize));
 
210
  }
 
211
 
 
212
  static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
 
213
    Heap* heap = map->GetHeap();
 
214
    SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
 
215
    if (shared->ic_age() != heap->global_ic_age()) {
 
216
      shared->ResetForNewContext(heap->global_ic_age());
 
217
    }
 
218
    FixedBodyVisitor<IncrementalMarkingMarkingVisitor,
 
219
                     SharedFunctionInfo::BodyDescriptor,
 
220
                     void>::Visit(map, object);
 
221
  }
 
222
 
 
223
  static inline void VisitJSFunction(Map* map, HeapObject* object) {
 
224
    Heap* heap = map->GetHeap();
 
225
    // Iterate over all fields in the body but take care in dealing with
 
226
    // the code entry and skip weak fields.
 
227
    VisitPointers(heap,
 
228
                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
 
229
                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
 
230
    VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
 
231
    VisitPointers(heap,
 
232
                  HeapObject::RawField(object,
 
233
                      JSFunction::kCodeEntryOffset + kPointerSize),
 
234
                  HeapObject::RawField(object,
 
235
                      JSFunction::kNonWeakFieldsEndOffset));
 
236
  }
 
237
 
 
238
  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
 
239
    Object* obj = *p;
 
240
    if (obj->NonFailureIsHeapObject()) {
 
241
      heap->mark_compact_collector()->RecordSlot(p, p, obj);
 
242
      MarkObject(heap, obj);
 
243
    }
 
244
  }
 
245
 
 
246
  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
 
247
    for (Object** p = start; p < end; p++) {
 
248
      Object* obj = *p;
 
249
      if (obj->NonFailureIsHeapObject()) {
 
250
        heap->mark_compact_collector()->RecordSlot(start, p, obj);
 
251
        MarkObject(heap, obj);
 
252
      }
 
253
    }
 
254
  }
 
255
 
 
256
  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
 
257
    HeapObject* heap_object = HeapObject::cast(obj);
 
258
    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
 
259
    if (mark_bit.data_only()) {
 
260
      if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
 
261
        MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
 
262
                                              heap_object->Size());
 
263
      }
 
264
    } else if (Marking::IsWhite(mark_bit)) {
 
265
      heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
 
266
    }
 
267
  }
 
268
};
 
269
 
 
270
 
 
271
class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
 
272
 public:
 
273
  IncrementalMarkingRootMarkingVisitor(Heap* heap,
 
274
                                       IncrementalMarking* incremental_marking)
 
275
      : heap_(heap),
 
276
        incremental_marking_(incremental_marking) {
 
277
  }
 
278
 
 
279
  void VisitPointer(Object** p) {
 
280
    MarkObjectByPointer(p);
 
281
  }
 
282
 
 
283
  void VisitPointers(Object** start, Object** end) {
 
284
    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
 
285
  }
 
286
 
 
287
 private:
 
288
  void MarkObjectByPointer(Object** p) {
 
289
    Object* obj = *p;
 
290
    if (!obj->IsHeapObject()) return;
 
291
 
 
292
    HeapObject* heap_object = HeapObject::cast(obj);
 
293
    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
 
294
    if (mark_bit.data_only()) {
 
295
      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
 
296
          MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
 
297
                                                heap_object->Size());
 
298
      }
 
299
    } else {
 
300
      if (Marking::IsWhite(mark_bit)) {
 
301
        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
 
302
      }
 
303
    }
 
304
  }
 
305
 
 
306
  Heap* heap_;
 
307
  IncrementalMarking* incremental_marking_;
 
308
};
 
309
 
 
310
 
 
311
void IncrementalMarking::Initialize() {
 
312
  IncrementalMarkingMarkingVisitor::Initialize();
 
313
}
 
314
 
 
315
 
 
316
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
 
317
                                              bool is_marking,
 
318
                                              bool is_compacting) {
 
319
  if (is_marking) {
 
320
    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
 
321
    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
322
 
 
323
    // It's difficult to filter out slots recorded for large objects.
 
324
    if (chunk->owner()->identity() == LO_SPACE &&
 
325
        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
 
326
        is_compacting) {
 
327
      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
 
328
    }
 
329
  } else if (chunk->owner()->identity() == CELL_SPACE ||
 
330
             chunk->scan_on_scavenge()) {
 
331
    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
 
332
    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
333
  } else {
 
334
    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
 
335
    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
336
  }
 
337
}
 
338
 
 
339
 
 
340
void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
 
341
                                              bool is_marking) {
 
342
  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
 
343
  if (is_marking) {
 
344
    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
345
  } else {
 
346
    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
347
  }
 
348
  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
 
349
}
 
350
 
 
351
 
 
352
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
 
353
    PagedSpace* space) {
 
354
  PageIterator it(space);
 
355
  while (it.has_next()) {
 
356
    Page* p = it.next();
 
357
    SetOldSpacePageFlags(p, false, false);
 
358
  }
 
359
}
 
360
 
 
361
 
 
362
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
 
363
    NewSpace* space) {
 
364
  NewSpacePageIterator it(space);
 
365
  while (it.has_next()) {
 
366
    NewSpacePage* p = it.next();
 
367
    SetNewSpacePageFlags(p, false);
 
368
  }
 
369
}
 
370
 
 
371
 
 
372
void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
 
373
  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
 
374
  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
 
375
  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
 
376
  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
 
377
  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
 
378
  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
 
379
 
 
380
  LargePage* lop = heap_->lo_space()->first_page();
 
381
  while (lop->is_valid()) {
 
382
    SetOldSpacePageFlags(lop, false, false);
 
383
    lop = lop->next_page();
 
384
  }
 
385
}
 
386
 
 
387
 
 
388
void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
 
389
  PageIterator it(space);
 
390
  while (it.has_next()) {
 
391
    Page* p = it.next();
 
392
    SetOldSpacePageFlags(p, true, is_compacting_);
 
393
  }
 
394
}
 
395
 
 
396
 
 
397
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
 
398
  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
 
399
  while (it.has_next()) {
 
400
    NewSpacePage* p = it.next();
 
401
    SetNewSpacePageFlags(p, true);
 
402
  }
 
403
}
 
404
 
 
405
 
 
406
void IncrementalMarking::ActivateIncrementalWriteBarrier() {
 
407
  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
 
408
  ActivateIncrementalWriteBarrier(heap_->old_data_space());
 
409
  ActivateIncrementalWriteBarrier(heap_->cell_space());
 
410
  ActivateIncrementalWriteBarrier(heap_->map_space());
 
411
  ActivateIncrementalWriteBarrier(heap_->code_space());
 
412
  ActivateIncrementalWriteBarrier(heap_->new_space());
 
413
 
 
414
  LargePage* lop = heap_->lo_space()->first_page();
 
415
  while (lop->is_valid()) {
 
416
    SetOldSpacePageFlags(lop, true, is_compacting_);
 
417
    lop = lop->next_page();
 
418
  }
 
419
}
 
420
 
 
421
 
 
422
bool IncrementalMarking::WorthActivating() {
 
423
#ifndef DEBUG
 
424
  static const intptr_t kActivationThreshold = 8 * MB;
 
425
#else
 
426
  // TODO(gc) consider setting this to some low level so that some
 
427
  // debug tests run with incremental marking and some without.
 
428
  static const intptr_t kActivationThreshold = 0;
 
429
#endif
 
430
 
 
431
  return !FLAG_expose_gc &&
 
432
      FLAG_incremental_marking &&
 
433
      !Serializer::enabled() &&
 
434
      heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
 
435
}
 
436
 
 
437
 
 
438
void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
 
439
  ASSERT(RecordWriteStub::GetMode(stub) ==
 
440
         RecordWriteStub::STORE_BUFFER_ONLY);
 
441
 
 
442
  if (!IsMarking()) {
 
443
    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
 
444
    // we don't need to do anything if incremental marking is
 
445
    // not active.
 
446
  } else if (IsCompacting()) {
 
447
    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
 
448
  } else {
 
449
    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
 
450
  }
 
451
}
 
452
 
 
453
 
 
454
static void PatchIncrementalMarkingRecordWriteStubs(
 
455
    Heap* heap, RecordWriteStub::Mode mode) {
 
456
  UnseededNumberDictionary* stubs = heap->code_stubs();
 
457
 
 
458
  int capacity = stubs->Capacity();
 
459
  for (int i = 0; i < capacity; i++) {
 
460
    Object* k = stubs->KeyAt(i);
 
461
    if (stubs->IsKey(k)) {
 
462
      uint32_t key = NumberToUint32(k);
 
463
 
 
464
      if (CodeStub::MajorKeyFromKey(key) ==
 
465
          CodeStub::RecordWrite) {
 
466
        Object* e = stubs->ValueAt(i);
 
467
        if (e->IsCode()) {
 
468
          RecordWriteStub::Patch(Code::cast(e), mode);
 
469
        }
 
470
      }
 
471
    }
 
472
  }
 
473
}
 
474
 
 
475
 
 
476
void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
 
477
  if (marking_deque_memory_ == NULL) {
 
478
    marking_deque_memory_ = new VirtualMemory(4 * MB);
 
479
  }
 
480
  if (!marking_deque_memory_committed_) {
 
481
    bool success = marking_deque_memory_->Commit(
 
482
        reinterpret_cast<Address>(marking_deque_memory_->address()),
 
483
        marking_deque_memory_->size(),
 
484
        false);  // Not executable.
 
485
    CHECK(success);
 
486
    marking_deque_memory_committed_ = true;
 
487
  }
 
488
}
 
489
 
 
490
void IncrementalMarking::UncommitMarkingDeque() {
 
491
  if (state_ == STOPPED && marking_deque_memory_committed_) {
 
492
    bool success = marking_deque_memory_->Uncommit(
 
493
        reinterpret_cast<Address>(marking_deque_memory_->address()),
 
494
        marking_deque_memory_->size());
 
495
    CHECK(success);
 
496
    marking_deque_memory_committed_ = false;
 
497
  }
 
498
}
 
499
 
 
500
 
 
501
void IncrementalMarking::Start() {
 
502
  if (FLAG_trace_incremental_marking) {
 
503
    PrintF("[IncrementalMarking] Start\n");
 
504
  }
 
505
  ASSERT(FLAG_incremental_marking);
 
506
  ASSERT(state_ == STOPPED);
 
507
 
 
508
  ResetStepCounters();
 
509
 
 
510
  if (heap_->old_pointer_space()->IsSweepingComplete() &&
 
511
      heap_->old_data_space()->IsSweepingComplete()) {
 
512
    StartMarking(ALLOW_COMPACTION);
 
513
  } else {
 
514
    if (FLAG_trace_incremental_marking) {
 
515
      PrintF("[IncrementalMarking] Start sweeping.\n");
 
516
    }
 
517
    state_ = SWEEPING;
 
518
  }
 
519
 
 
520
  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
 
521
}
 
522
 
 
523
 
 
524
static void MarkObjectGreyDoNotEnqueue(Object* obj) {
 
525
  if (obj->IsHeapObject()) {
 
526
    HeapObject* heap_obj = HeapObject::cast(obj);
 
527
    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
 
528
    if (Marking::IsBlack(mark_bit)) {
 
529
      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
 
530
                                            -heap_obj->Size());
 
531
    }
 
532
    Marking::AnyToGrey(mark_bit);
 
533
  }
 
534
}
 
535
 
 
536
 
 
537
void IncrementalMarking::StartMarking(CompactionFlag flag) {
 
538
  if (FLAG_trace_incremental_marking) {
 
539
    PrintF("[IncrementalMarking] Start marking\n");
 
540
  }
 
541
 
 
542
  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
 
543
      heap_->mark_compact_collector()->StartCompaction(
 
544
          MarkCompactCollector::INCREMENTAL_COMPACTION);
 
545
 
 
546
  state_ = MARKING;
 
547
 
 
548
  RecordWriteStub::Mode mode = is_compacting_ ?
 
549
      RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
 
550
 
 
551
  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
 
552
 
 
553
  EnsureMarkingDequeIsCommitted();
 
554
 
 
555
  // Initialize marking stack.
 
556
  Address addr = static_cast<Address>(marking_deque_memory_->address());
 
557
  size_t size = marking_deque_memory_->size();
 
558
  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
 
559
  marking_deque_.Initialize(addr, addr + size);
 
560
 
 
561
  ActivateIncrementalWriteBarrier();
 
562
 
 
563
#ifdef DEBUG
 
564
  // Marking bits are cleared by the sweeper.
 
565
  if (FLAG_verify_heap) {
 
566
    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
 
567
  }
 
568
#endif
 
569
 
 
570
  heap_->CompletelyClearInstanceofCache();
 
571
  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
 
572
 
 
573
  if (FLAG_cleanup_code_caches_at_gc) {
 
574
    // We will mark cache black with a separate pass
 
575
    // when we finish marking.
 
576
    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
 
577
  }
 
578
 
 
579
  // Mark strong roots grey.
 
580
  IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
 
581
  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
 
582
 
 
583
  // Ready to start incremental marking.
 
584
  if (FLAG_trace_incremental_marking) {
 
585
    PrintF("[IncrementalMarking] Running\n");
 
586
  }
 
587
}
 
588
 
 
589
 
 
590
void IncrementalMarking::PrepareForScavenge() {
 
591
  if (!IsMarking()) return;
 
592
  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
 
593
                          heap_->new_space()->FromSpaceEnd());
 
594
  while (it.has_next()) {
 
595
    Bitmap::Clear(it.next());
 
596
  }
 
597
}
 
598
 
 
599
 
 
600
void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
 
601
  if (!IsMarking()) return;
 
602
 
 
603
  int current = marking_deque_.bottom();
 
604
  int mask = marking_deque_.mask();
 
605
  int limit = marking_deque_.top();
 
606
  HeapObject** array = marking_deque_.array();
 
607
  int new_top = current;
 
608
 
 
609
  Map* filler_map = heap_->one_pointer_filler_map();
 
610
 
 
611
  while (current != limit) {
 
612
    HeapObject* obj = array[current];
 
613
    ASSERT(obj->IsHeapObject());
 
614
    current = ((current + 1) & mask);
 
615
    if (heap_->InNewSpace(obj)) {
 
616
      MapWord map_word = obj->map_word();
 
617
      if (map_word.IsForwardingAddress()) {
 
618
        HeapObject* dest = map_word.ToForwardingAddress();
 
619
        array[new_top] = dest;
 
620
        new_top = ((new_top + 1) & mask);
 
621
        ASSERT(new_top != marking_deque_.bottom());
 
622
#ifdef DEBUG
 
623
        MarkBit mark_bit = Marking::MarkBitFrom(obj);
 
624
        ASSERT(Marking::IsGrey(mark_bit) ||
 
625
               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
 
626
#endif
 
627
      }
 
628
    } else if (obj->map() != filler_map) {
 
629
      // Skip one word filler objects that appear on the
 
630
      // stack when we perform in place array shift.
 
631
      array[new_top] = obj;
 
632
      new_top = ((new_top + 1) & mask);
 
633
      ASSERT(new_top != marking_deque_.bottom());
 
634
#ifdef DEBUG
 
635
        MarkBit mark_bit = Marking::MarkBitFrom(obj);
 
636
        ASSERT(Marking::IsGrey(mark_bit) ||
 
637
               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
 
638
#endif
 
639
    }
 
640
  }
 
641
  marking_deque_.set_top(new_top);
 
642
 
 
643
  steps_took_since_last_gc_ = 0;
 
644
  steps_count_since_last_gc_ = 0;
 
645
  longest_step_ = 0.0;
 
646
}
 
647
 
 
648
 
 
649
void IncrementalMarking::Hurry() {
 
650
  if (state() == MARKING) {
 
651
    double start = 0.0;
 
652
    if (FLAG_trace_incremental_marking) {
 
653
      PrintF("[IncrementalMarking] Hurry\n");
 
654
      start = OS::TimeCurrentMillis();
 
655
    }
 
656
    // TODO(gc) hurry can mark objects it encounters black as mutator
 
657
    // was stopped.
 
658
    Map* filler_map = heap_->one_pointer_filler_map();
 
659
    Map* global_context_map = heap_->global_context_map();
 
660
    while (!marking_deque_.IsEmpty()) {
 
661
      HeapObject* obj = marking_deque_.Pop();
 
662
 
 
663
      // Explicitly skip one word fillers. Incremental markbit patterns are
 
664
      // correct only for objects that occupy at least two words.
 
665
      Map* map = obj->map();
 
666
      if (map == filler_map) {
 
667
        continue;
 
668
      } else if (map == global_context_map) {
 
669
        // Global contexts have weak fields.
 
670
        IncrementalMarkingMarkingVisitor::VisitGlobalContext(map, obj);
 
671
      } else if (map->instance_type() == MAP_TYPE) {
 
672
        Map* map = Map::cast(obj);
 
673
        heap_->ClearCacheOnMap(map);
 
674
 
 
675
        // When map collection is enabled we have to mark through map's
 
676
        // transitions and back pointers in a special way to make these links
 
677
        // weak.  Only maps for subclasses of JSReceiver can have transitions.
 
678
        STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
 
679
        if (FLAG_collect_maps &&
 
680
            map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
 
681
          marker_.MarkMapContents(map);
 
682
        } else {
 
683
          IncrementalMarkingMarkingVisitor::VisitPointers(
 
684
              heap_,
 
685
              HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
 
686
              HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
 
687
        }
 
688
      } else {
 
689
        MarkBit map_mark_bit = Marking::MarkBitFrom(map);
 
690
        if (Marking::IsWhite(map_mark_bit)) {
 
691
          WhiteToGreyAndPush(map, map_mark_bit);
 
692
        }
 
693
        IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
 
694
      }
 
695
 
 
696
      MarkBit mark_bit = Marking::MarkBitFrom(obj);
 
697
      ASSERT(!Marking::IsBlack(mark_bit));
 
698
      Marking::MarkBlack(mark_bit);
 
699
      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
 
700
    }
 
701
    state_ = COMPLETE;
 
702
    if (FLAG_trace_incremental_marking) {
 
703
      double end = OS::TimeCurrentMillis();
 
704
      PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
 
705
             static_cast<int>(end - start));
 
706
    }
 
707
  }
 
708
 
 
709
  if (FLAG_cleanup_code_caches_at_gc) {
 
710
    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
 
711
    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
 
712
    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
 
713
                                          PolymorphicCodeCache::kSize);
 
714
  }
 
715
 
 
716
  Object* context = heap_->global_contexts_list();
 
717
  while (!context->IsUndefined()) {
 
718
    // GC can happen when the context is not fully initialized,
 
719
    // so the cache can be undefined.
 
720
    HeapObject* cache = HeapObject::cast(
 
721
        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
 
722
    if (!cache->IsUndefined()) {
 
723
      MarkBit mark_bit = Marking::MarkBitFrom(cache);
 
724
      if (Marking::IsGrey(mark_bit)) {
 
725
        Marking::GreyToBlack(mark_bit);
 
726
        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
 
727
      }
 
728
    }
 
729
    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
 
730
  }
 
731
}
 
732
 
 
733
 
 
734
void IncrementalMarking::Abort() {
 
735
  if (IsStopped()) return;
 
736
  if (FLAG_trace_incremental_marking) {
 
737
    PrintF("[IncrementalMarking] Aborting.\n");
 
738
  }
 
739
  heap_->new_space()->LowerInlineAllocationLimit(0);
 
740
  IncrementalMarking::set_should_hurry(false);
 
741
  ResetStepCounters();
 
742
  if (IsMarking()) {
 
743
    PatchIncrementalMarkingRecordWriteStubs(heap_,
 
744
                                            RecordWriteStub::STORE_BUFFER_ONLY);
 
745
    DeactivateIncrementalWriteBarrier();
 
746
 
 
747
    if (is_compacting_) {
 
748
      LargeObjectIterator it(heap_->lo_space());
 
749
      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
 
750
        Page* p = Page::FromAddress(obj->address());
 
751
        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
 
752
          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
 
753
        }
 
754
      }
 
755
    }
 
756
  }
 
757
  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
 
758
  state_ = STOPPED;
 
759
  is_compacting_ = false;
 
760
}
 
761
 
 
762
 
 
763
void IncrementalMarking::Finalize() {
 
764
  Hurry();
 
765
  state_ = STOPPED;
 
766
  is_compacting_ = false;
 
767
  heap_->new_space()->LowerInlineAllocationLimit(0);
 
768
  IncrementalMarking::set_should_hurry(false);
 
769
  ResetStepCounters();
 
770
  PatchIncrementalMarkingRecordWriteStubs(heap_,
 
771
                                          RecordWriteStub::STORE_BUFFER_ONLY);
 
772
  DeactivateIncrementalWriteBarrier();
 
773
  ASSERT(marking_deque_.IsEmpty());
 
774
  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
 
775
}
 
776
 
 
777
 
 
778
void IncrementalMarking::MarkingComplete(CompletionAction action) {
 
779
  state_ = COMPLETE;
 
780
  // We will set the stack guard to request a GC now.  This will mean the rest
 
781
  // of the GC gets performed as soon as possible (we can't do a GC here in a
 
782
  // record-write context).  If a few things get allocated between now and then
 
783
  // that shouldn't make us do a scavenge and keep being incremental, so we set
 
784
  // the should-hurry flag to indicate that there can't be much work left to do.
 
785
  set_should_hurry(true);
 
786
  if (FLAG_trace_incremental_marking) {
 
787
    PrintF("[IncrementalMarking] Complete (normal).\n");
 
788
  }
 
789
  if (action == GC_VIA_STACK_GUARD) {
 
790
    heap_->isolate()->stack_guard()->RequestGC();
 
791
  }
 
792
}
 
793
 
 
794
 
 
795
void IncrementalMarking::Step(intptr_t allocated_bytes,
 
796
                              CompletionAction action) {
 
797
  if (heap_->gc_state() != Heap::NOT_IN_GC ||
 
798
      !FLAG_incremental_marking ||
 
799
      !FLAG_incremental_marking_steps ||
 
800
      (state_ != SWEEPING && state_ != MARKING)) {
 
801
    return;
 
802
  }
 
803
 
 
804
  allocated_ += allocated_bytes;
 
805
 
 
806
  if (allocated_ < kAllocatedThreshold) return;
 
807
 
 
808
  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
 
809
 
 
810
  intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
 
811
  bytes_scanned_ += bytes_to_process;
 
812
 
 
813
  double start = 0;
 
814
 
 
815
  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
 
816
    start = OS::TimeCurrentMillis();
 
817
  }
 
818
 
 
819
  if (state_ == SWEEPING) {
 
820
    if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
 
821
      bytes_scanned_ = 0;
 
822
      StartMarking(PREVENT_COMPACTION);
 
823
    }
 
824
  } else if (state_ == MARKING) {
 
825
    Map* filler_map = heap_->one_pointer_filler_map();
 
826
    Map* global_context_map = heap_->global_context_map();
 
827
    while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
 
828
      HeapObject* obj = marking_deque_.Pop();
 
829
 
 
830
      // Explicitly skip one word fillers. Incremental markbit patterns are
 
831
      // correct only for objects that occupy at least two words.
 
832
      Map* map = obj->map();
 
833
      if (map == filler_map) continue;
 
834
 
 
835
      int size = obj->SizeFromMap(map);
 
836
      bytes_to_process -= size;
 
837
      MarkBit map_mark_bit = Marking::MarkBitFrom(map);
 
838
      if (Marking::IsWhite(map_mark_bit)) {
 
839
        WhiteToGreyAndPush(map, map_mark_bit);
 
840
      }
 
841
 
 
842
      // TODO(gc) switch to static visitor instead of normal visitor.
 
843
      if (map == global_context_map) {
 
844
        // Global contexts have weak fields.
 
845
        Context* ctx = Context::cast(obj);
 
846
 
 
847
        // We will mark cache black with a separate pass
 
848
        // when we finish marking.
 
849
        MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
 
850
 
 
851
        IncrementalMarkingMarkingVisitor::VisitGlobalContext(map, ctx);
 
852
      } else if (map->instance_type() == MAP_TYPE) {
 
853
        Map* map = Map::cast(obj);
 
854
        heap_->ClearCacheOnMap(map);
 
855
 
 
856
        // When map collection is enabled we have to mark through map's
 
857
        // transitions and back pointers in a special way to make these links
 
858
        // weak.  Only maps for subclasses of JSReceiver can have transitions.
 
859
        STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
 
860
        if (FLAG_collect_maps &&
 
861
            map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
 
862
          marker_.MarkMapContents(map);
 
863
        } else {
 
864
          IncrementalMarkingMarkingVisitor::VisitPointers(
 
865
              heap_,
 
866
              HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
 
867
              HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
 
868
        }
 
869
      } else {
 
870
        IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
 
871
      }
 
872
 
 
873
      MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
 
874
      SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
 
875
                  (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
 
876
      Marking::MarkBlack(obj_mark_bit);
 
877
      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
 
878
    }
 
879
    if (marking_deque_.IsEmpty()) MarkingComplete(action);
 
880
  }
 
881
 
 
882
  allocated_ = 0;
 
883
 
 
884
  steps_count_++;
 
885
  steps_count_since_last_gc_++;
 
886
 
 
887
  bool speed_up = false;
 
888
 
 
889
  if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
 
890
    if (FLAG_trace_gc) {
 
891
      PrintPID("Speed up marking after %d steps\n",
 
892
               static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
 
893
    }
 
894
    speed_up = true;
 
895
  }
 
896
 
 
897
  bool space_left_is_very_small =
 
898
      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
 
899
 
 
900
  bool only_1_nth_of_space_that_was_available_still_left =
 
901
      (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
 
902
          old_generation_space_available_at_start_of_incremental_);
 
903
 
 
904
  if (space_left_is_very_small ||
 
905
      only_1_nth_of_space_that_was_available_still_left) {
 
906
    if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
 
907
    speed_up = true;
 
908
  }
 
909
 
 
910
  bool size_of_old_space_multiplied_by_n_during_marking =
 
911
      (heap_->PromotedTotalSize() >
 
912
       (allocation_marking_factor_ + 1) *
 
913
           old_generation_space_used_at_start_of_incremental_);
 
914
  if (size_of_old_space_multiplied_by_n_during_marking) {
 
915
    speed_up = true;
 
916
    if (FLAG_trace_gc) {
 
917
      PrintPID("Speed up marking because of heap size increase\n");
 
918
    }
 
919
  }
 
920
 
 
921
  int64_t promoted_during_marking = heap_->PromotedTotalSize()
 
922
      - old_generation_space_used_at_start_of_incremental_;
 
923
  intptr_t delay = allocation_marking_factor_ * MB;
 
924
  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
 
925
 
 
926
  // We try to scan at at least twice the speed that we are allocating.
 
927
  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
 
928
    if (FLAG_trace_gc) {
 
929
      PrintPID("Speed up marking because marker was not keeping up\n");
 
930
    }
 
931
    speed_up = true;
 
932
  }
 
933
 
 
934
  if (speed_up) {
 
935
    if (state_ != MARKING) {
 
936
      if (FLAG_trace_gc) {
 
937
        PrintPID("Postponing speeding up marking until marking starts\n");
 
938
      }
 
939
    } else {
 
940
      allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
 
941
      allocation_marking_factor_ = static_cast<int>(
 
942
          Min(kMaxAllocationMarkingFactor,
 
943
              static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
 
944
      if (FLAG_trace_gc) {
 
945
        PrintPID("Marking speed increased to %d\n", allocation_marking_factor_);
 
946
      }
 
947
    }
 
948
  }
 
949
 
 
950
  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
 
951
    double end = OS::TimeCurrentMillis();
 
952
    double delta = (end - start);
 
953
    longest_step_ = Max(longest_step_, delta);
 
954
    steps_took_ += delta;
 
955
    steps_took_since_last_gc_ += delta;
 
956
  }
 
957
}
 
958
 
 
959
 
 
960
void IncrementalMarking::ResetStepCounters() {
 
961
  steps_count_ = 0;
 
962
  steps_took_ = 0;
 
963
  longest_step_ = 0.0;
 
964
  old_generation_space_available_at_start_of_incremental_ =
 
965
      SpaceLeftInOldSpace();
 
966
  old_generation_space_used_at_start_of_incremental_ =
 
967
      heap_->PromotedTotalSize();
 
968
  steps_count_since_last_gc_ = 0;
 
969
  steps_took_since_last_gc_ = 0;
 
970
  bytes_rescanned_ = 0;
 
971
  allocation_marking_factor_ = kInitialAllocationMarkingFactor;
 
972
  bytes_scanned_ = 0;
 
973
}
 
974
 
 
975
 
 
976
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
 
977
  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
 
978
}
 
979
 
 
980
} }  // namespace v8::internal