~james-page/ubuntu/precise/nodejs/0.6.x-merge

« back to all changes in this revision

Viewing changes to deps/v8/src/v8threads.cc

  • Committer: James Page
  • Date: 2012-03-30 12:09:16 UTC
  • mfrom: (7.1.23 sid)
  • Revision ID: james.page@canonical.com-20120330120916-40hfu9o00qr5t87b
* Merge from Debian unstable:
  - New upstream release (LP: #892034).
  - This package is x86/arm only. Update control to match
  - d/patches/2009_increase_test_timeout.patch: Increased default test
    timeout from 60 to 120 seconds to support reliable execution of all
    tests on armhf/armel architectures.
  - d/patches/2005_expected_failing_tests.patch: 
    - Allow racey tests to fail: test-cluster-kill-workers,
      test-child-process-fork2 
    - Allow test-fs-watch to fail as LP buildd's don't support
      inotify.
    - Revert all other Ubuntu changes as no longer required.
* Update Standards-Version to 3.9.3.
* Patch wscript to enable build on mipsel arch, libv8 being available.
  Upstream does not support that arch, failure expected.
* test-cluster-kill-workers is expected to fail on armhf,
  Bug#660802 will be closed when test pass.
* test-buffer is expected to fail on armel,
  Bug#660800 will be closed when test pass.
* Add epoch to dependency on libev >= 1:4.11. Closes: bug#658441.
* Remove tools/doc because node-doc-generator has no license for now.
* Add copyright for doc/sh* files (shjs).
* source.lintian-overrides : source-contains-waf-binary tools/node-waf
  it is simply not the case here.
* test-stream-pipe-multi expected to timeout sometimes on busy builds. 
* New upstream release.
* Remove upstream patches.
* test-dgram-pingpong expected to timeout, the test itself is buggy.
* test-buffer expected to fail on armel, allow building package to make
  it easier to find the cause of the failure.
  Closes: bug#639636.
* Expect tests dgram-multicast and broadcast to fail.
  debian/patches/2005_expected_failing_tests.patch
* Drop dpkg-source local-options: Defaults since dpkg-source 1.16.1.
* New upstream release.
* Depend on libev-dev 4.11, see bug#657080.
* Bump dependency on openssl to 1.0.0g.
* Remove useless uv_loop_refcount from libuv,
  refreshed 2009_fix_shared_ev.patch.
* Apply to upstream patches landed after 0.6.10 release,
  to fix debugger repl and http client.
* New upstream release. Closes:bug#650661
* Repackage to remove non-dfsg font files ./deps/npm/html/*/*.ttf
* Remove unneeded bundled dependencies: lighter tarball,
  debian/copyright is easier to maintain.
* Drop unneeded build-dependency on scons.
* Depend on zlib1g, libc-ares, libev.
  Patches done to support building with those shared libs.
* Fix DEB_UPSTREAM_URL in debian/rules, and debian/watch.
* nodejs.pc file for pkgconfig is no more available.
* Build-depend on procps package, a test is using /bin/ps.
* Refreshed debian/patches/2005_expected_failing_tests.patch,
  only for tests that need networking.

Show diffs side-by-side

added added

removed removed

Lines of Context:
36
36
 
37
37
namespace v8 {
38
38
 
39
 
static internal::Thread::LocalStorageKey thread_state_key =
40
 
    internal::Thread::CreateThreadLocalKey();
41
 
static internal::Thread::LocalStorageKey thread_id_key =
42
 
    internal::Thread::CreateThreadLocalKey();
43
 
 
44
39
 
45
40
// Track whether this V8 instance has ever called v8::Locker. This allows the
46
41
// API code to verify that the lock is always held when V8 is being entered.
48
43
 
49
44
 
50
45
// Constructor for the Locker object.  Once the Locker is constructed the
51
 
// current thread will be guaranteed to have the big V8 lock.
52
 
Locker::Locker() : has_lock_(false), top_level_(true) {
 
46
// current thread will be guaranteed to have the lock for a given isolate.
 
47
Locker::Locker(v8::Isolate* isolate)
 
48
  : has_lock_(false),
 
49
    top_level_(true),
 
50
    isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
 
51
  if (isolate_ == NULL) {
 
52
    isolate_ = i::Isolate::GetDefaultIsolateForLocking();
 
53
  }
53
54
  // Record that the Locker has been used at least once.
54
55
  active_ = true;
55
56
  // Get the big lock if necessary.
56
 
  if (!internal::ThreadManager::IsLockedByCurrentThread()) {
57
 
    internal::ThreadManager::Lock();
 
57
  if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
 
58
    isolate_->thread_manager()->Lock();
58
59
    has_lock_ = true;
 
60
 
59
61
    // Make sure that V8 is initialized.  Archiving of threads interferes
60
62
    // with deserialization by adding additional root pointers, so we must
61
63
    // initialize here, before anyone can call ~Locker() or Unlocker().
62
 
    if (!internal::V8::IsRunning()) {
 
64
    if (!isolate_->IsInitialized()) {
 
65
      isolate_->Enter();
63
66
      V8::Initialize();
 
67
      isolate_->Exit();
64
68
    }
 
69
 
65
70
    // This may be a locker within an unlocker in which case we have to
66
71
    // get the saved state for this thread and restore it.
67
 
    if (internal::ThreadManager::RestoreThread()) {
 
72
    if (isolate_->thread_manager()->RestoreThread()) {
68
73
      top_level_ = false;
69
74
    } else {
70
 
      internal::ExecutionAccess access;
71
 
      internal::StackGuard::ClearThread(access);
72
 
      internal::StackGuard::InitThread(access);
73
 
    }
74
 
  }
75
 
  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
76
 
 
77
 
  // Make sure this thread is assigned a thread id.
78
 
  internal::ThreadManager::AssignId();
79
 
}
80
 
 
81
 
 
82
 
bool Locker::IsLocked() {
83
 
  return internal::ThreadManager::IsLockedByCurrentThread();
 
75
      internal::ExecutionAccess access(isolate_);
 
76
      isolate_->stack_guard()->ClearThread(access);
 
77
      isolate_->stack_guard()->InitThread(access);
 
78
    }
 
79
    if (isolate_->IsDefaultIsolate()) {
 
80
      // This only enters if not yet entered.
 
81
      internal::Isolate::EnterDefaultIsolate();
 
82
    }
 
83
  }
 
84
  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
 
85
}
 
86
 
 
87
 
 
88
bool Locker::IsLocked(v8::Isolate* isolate) {
 
89
  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
 
90
  if (internal_isolate == NULL) {
 
91
    internal_isolate = i::Isolate::GetDefaultIsolateForLocking();
 
92
  }
 
93
  return internal_isolate->thread_manager()->IsLockedByCurrentThread();
 
94
}
 
95
 
 
96
 
 
97
bool Locker::IsActive() {
 
98
  return active_;
84
99
}
85
100
 
86
101
 
87
102
Locker::~Locker() {
88
 
  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
 
103
  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
89
104
  if (has_lock_) {
 
105
    if (isolate_->IsDefaultIsolate()) {
 
106
      isolate_->Exit();
 
107
    }
90
108
    if (top_level_) {
91
 
      internal::ThreadManager::FreeThreadResources();
 
109
      isolate_->thread_manager()->FreeThreadResources();
92
110
    } else {
93
 
      internal::ThreadManager::ArchiveThread();
 
111
      isolate_->thread_manager()->ArchiveThread();
94
112
    }
95
 
    internal::ThreadManager::Unlock();
 
113
    isolate_->thread_manager()->Unlock();
96
114
  }
97
115
}
98
116
 
99
117
 
100
 
Unlocker::Unlocker() {
101
 
  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
102
 
  internal::ThreadManager::ArchiveThread();
103
 
  internal::ThreadManager::Unlock();
 
118
Unlocker::Unlocker(v8::Isolate* isolate)
 
119
  : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
 
120
  if (isolate_ == NULL) {
 
121
    isolate_ = i::Isolate::GetDefaultIsolateForLocking();
 
122
  }
 
123
  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
 
124
  if (isolate_->IsDefaultIsolate()) {
 
125
    isolate_->Exit();
 
126
  }
 
127
  isolate_->thread_manager()->ArchiveThread();
 
128
  isolate_->thread_manager()->Unlock();
104
129
}
105
130
 
106
131
 
107
132
Unlocker::~Unlocker() {
108
 
  ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
109
 
  internal::ThreadManager::Lock();
110
 
  internal::ThreadManager::RestoreThread();
 
133
  ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
 
134
  isolate_->thread_manager()->Lock();
 
135
  isolate_->thread_manager()->RestoreThread();
 
136
  if (isolate_->IsDefaultIsolate()) {
 
137
    isolate_->Enter();
 
138
  }
111
139
}
112
140
 
113
141
 
125
153
 
126
154
 
127
155
bool ThreadManager::RestoreThread() {
 
156
  ASSERT(IsLockedByCurrentThread());
128
157
  // First check whether the current thread has been 'lazily archived', ie
129
158
  // not archived at all.  If that is the case we put the state storage we
130
159
  // had prepared back in the free list, since we didn't need it after all.
131
 
  if (lazily_archived_thread_.IsSelf()) {
132
 
    lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
133
 
    ASSERT(Thread::GetThreadLocal(thread_state_key) ==
134
 
           lazily_archived_thread_state_);
135
 
    lazily_archived_thread_state_->set_id(kInvalidId);
 
160
  if (lazily_archived_thread_.Equals(ThreadId::Current())) {
 
161
    lazily_archived_thread_ = ThreadId::Invalid();
 
162
    Isolate::PerIsolateThreadData* per_thread =
 
163
        isolate_->FindPerThreadDataForThisThread();
 
164
    ASSERT(per_thread != NULL);
 
165
    ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
 
166
    lazily_archived_thread_state_->set_id(ThreadId::Invalid());
136
167
    lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
137
168
    lazily_archived_thread_state_ = NULL;
138
 
    Thread::SetThreadLocal(thread_state_key, NULL);
 
169
    per_thread->set_thread_state(NULL);
139
170
    return true;
140
171
  }
141
172
 
142
173
  // Make sure that the preemption thread cannot modify the thread state while
143
174
  // it is being archived or restored.
144
 
  ExecutionAccess access;
 
175
  ExecutionAccess access(isolate_);
145
176
 
146
177
  // If there is another thread that was lazily archived then we have to really
147
178
  // archive it now.
148
179
  if (lazily_archived_thread_.IsValid()) {
149
180
    EagerlyArchiveThread();
150
181
  }
151
 
  ThreadState* state =
152
 
      reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
153
 
  if (state == NULL) {
 
182
  Isolate::PerIsolateThreadData* per_thread =
 
183
      isolate_->FindPerThreadDataForThisThread();
 
184
  if (per_thread == NULL || per_thread->thread_state() == NULL) {
154
185
    // This is a new thread.
155
 
    StackGuard::InitThread(access);
 
186
    isolate_->stack_guard()->InitThread(access);
156
187
    return false;
157
188
  }
 
189
  ThreadState* state = per_thread->thread_state();
158
190
  char* from = state->data();
159
 
  from = HandleScopeImplementer::RestoreThread(from);
160
 
  from = Top::RestoreThread(from);
161
 
  from = Relocatable::RestoreState(from);
 
191
  from = isolate_->handle_scope_implementer()->RestoreThread(from);
 
192
  from = isolate_->RestoreThread(from);
 
193
  from = Relocatable::RestoreState(isolate_, from);
162
194
#ifdef ENABLE_DEBUGGER_SUPPORT
163
 
  from = Debug::RestoreDebug(from);
 
195
  from = isolate_->debug()->RestoreDebug(from);
164
196
#endif
165
 
  from = StackGuard::RestoreStackGuard(from);
166
 
  from = RegExpStack::RestoreStack(from);
167
 
  from = Bootstrapper::RestoreState(from);
168
 
  Thread::SetThreadLocal(thread_state_key, NULL);
 
197
  from = isolate_->stack_guard()->RestoreStackGuard(from);
 
198
  from = isolate_->regexp_stack()->RestoreStack(from);
 
199
  from = isolate_->bootstrapper()->RestoreState(from);
 
200
  per_thread->set_thread_state(NULL);
169
201
  if (state->terminate_on_restore()) {
170
 
    StackGuard::TerminateExecution();
 
202
    isolate_->stack_guard()->TerminateExecution();
171
203
    state->set_terminate_on_restore(false);
172
204
  }
173
 
  state->set_id(kInvalidId);
 
205
  state->set_id(ThreadId::Invalid());
174
206
  state->Unlink();
175
207
  state->LinkInto(ThreadState::FREE_LIST);
176
208
  return true;
179
211
 
180
212
void ThreadManager::Lock() {
181
213
  mutex_->Lock();
182
 
  mutex_owner_.Initialize(ThreadHandle::SELF);
 
214
  mutex_owner_ = ThreadId::Current();
183
215
  ASSERT(IsLockedByCurrentThread());
184
216
}
185
217
 
186
218
 
187
219
void ThreadManager::Unlock() {
188
 
  mutex_owner_.Initialize(ThreadHandle::INVALID);
 
220
  mutex_owner_ = ThreadId::Invalid();
189
221
  mutex_->Unlock();
190
222
}
191
223
 
192
224
 
193
225
static int ArchiveSpacePerThread() {
194
226
  return HandleScopeImplementer::ArchiveSpacePerThread() +
195
 
                            Top::ArchiveSpacePerThread() +
 
227
                        Isolate::ArchiveSpacePerThread() +
196
228
#ifdef ENABLE_DEBUGGER_SUPPORT
197
229
                          Debug::ArchiveSpacePerThread() +
198
230
#endif
203
235
}
204
236
 
205
237
 
206
 
ThreadState* ThreadState::free_anchor_ = new ThreadState();
207
 
ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
208
 
 
209
 
 
210
 
ThreadState::ThreadState() : id_(ThreadManager::kInvalidId),
211
 
                             terminate_on_restore_(false),
212
 
                             next_(this), previous_(this) {
 
238
ThreadState::ThreadState(ThreadManager* thread_manager)
 
239
    : id_(ThreadId::Invalid()),
 
240
      terminate_on_restore_(false),
 
241
      next_(this),
 
242
      previous_(this),
 
243
      thread_manager_(thread_manager) {
213
244
}
214
245
 
215
246
 
226
257
 
227
258
void ThreadState::LinkInto(List list) {
228
259
  ThreadState* flying_anchor =
229
 
      list == FREE_LIST ? free_anchor_ : in_use_anchor_;
 
260
      list == FREE_LIST ? thread_manager_->free_anchor_
 
261
                        : thread_manager_->in_use_anchor_;
230
262
  next_ = flying_anchor->next_;
231
263
  previous_ = flying_anchor;
232
264
  flying_anchor->next_ = this;
234
266
}
235
267
 
236
268
 
237
 
ThreadState* ThreadState::GetFree() {
 
269
ThreadState* ThreadManager::GetFreeThreadState() {
238
270
  ThreadState* gotten = free_anchor_->next_;
239
271
  if (gotten == free_anchor_) {
240
 
    ThreadState* new_thread_state = new ThreadState();
 
272
    ThreadState* new_thread_state = new ThreadState(this);
241
273
    new_thread_state->AllocateSpace();
242
274
    return new_thread_state;
243
275
  }
246
278
 
247
279
 
248
280
// Gets the first in the list of archived threads.
249
 
ThreadState* ThreadState::FirstInUse() {
 
281
ThreadState* ThreadManager::FirstThreadStateInUse() {
250
282
  return in_use_anchor_->Next();
251
283
}
252
284
 
253
285
 
254
286
ThreadState* ThreadState::Next() {
255
 
  if (next_ == in_use_anchor_) return NULL;
 
287
  if (next_ == thread_manager_->in_use_anchor_) return NULL;
256
288
  return next_;
257
289
}
258
290
 
260
292
// Thread ids must start with 1, because in TLS having thread id 0 can't
261
293
// be distinguished from not having a thread id at all (since NULL is
262
294
// defined as 0.)
263
 
int ThreadManager::last_id_ = 0;
264
 
Mutex* ThreadManager::mutex_ = OS::CreateMutex();
265
 
ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
266
 
ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
267
 
ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
 
295
ThreadManager::ThreadManager()
 
296
    : mutex_(OS::CreateMutex()),
 
297
      mutex_owner_(ThreadId::Invalid()),
 
298
      lazily_archived_thread_(ThreadId::Invalid()),
 
299
      lazily_archived_thread_state_(NULL),
 
300
      free_anchor_(NULL),
 
301
      in_use_anchor_(NULL) {
 
302
  free_anchor_ = new ThreadState(this);
 
303
  in_use_anchor_ = new ThreadState(this);
 
304
}
 
305
 
 
306
 
 
307
ThreadManager::~ThreadManager() {
 
308
  delete mutex_;
 
309
  delete free_anchor_;
 
310
  delete in_use_anchor_;
 
311
}
268
312
 
269
313
 
270
314
void ThreadManager::ArchiveThread() {
271
 
  ASSERT(!lazily_archived_thread_.IsValid());
 
315
  ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
272
316
  ASSERT(!IsArchived());
273
 
  ThreadState* state = ThreadState::GetFree();
 
317
  ASSERT(IsLockedByCurrentThread());
 
318
  ThreadState* state = GetFreeThreadState();
274
319
  state->Unlink();
275
 
  Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
276
 
  lazily_archived_thread_.Initialize(ThreadHandle::SELF);
 
320
  Isolate::PerIsolateThreadData* per_thread =
 
321
      isolate_->FindOrAllocatePerThreadDataForThisThread();
 
322
  per_thread->set_thread_state(state);
 
323
  lazily_archived_thread_ = ThreadId::Current();
277
324
  lazily_archived_thread_state_ = state;
278
 
  ASSERT(state->id() == kInvalidId);
 
325
  ASSERT(state->id().Equals(ThreadId::Invalid()));
279
326
  state->set_id(CurrentId());
280
 
  ASSERT(state->id() != kInvalidId);
 
327
  ASSERT(!state->id().Equals(ThreadId::Invalid()));
281
328
}
282
329
 
283
330
 
284
331
void ThreadManager::EagerlyArchiveThread() {
 
332
  ASSERT(IsLockedByCurrentThread());
285
333
  ThreadState* state = lazily_archived_thread_state_;
286
334
  state->LinkInto(ThreadState::IN_USE_LIST);
287
335
  char* to = state->data();
288
336
  // Ensure that data containing GC roots are archived first, and handle them
289
337
  // in ThreadManager::Iterate(ObjectVisitor*).
290
 
  to = HandleScopeImplementer::ArchiveThread(to);
291
 
  to = Top::ArchiveThread(to);
292
 
  to = Relocatable::ArchiveState(to);
 
338
  to = isolate_->handle_scope_implementer()->ArchiveThread(to);
 
339
  to = isolate_->ArchiveThread(to);
 
340
  to = Relocatable::ArchiveState(isolate_, to);
293
341
#ifdef ENABLE_DEBUGGER_SUPPORT
294
 
  to = Debug::ArchiveDebug(to);
 
342
  to = isolate_->debug()->ArchiveDebug(to);
295
343
#endif
296
 
  to = StackGuard::ArchiveStackGuard(to);
297
 
  to = RegExpStack::ArchiveStack(to);
298
 
  to = Bootstrapper::ArchiveState(to);
299
 
  lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
 
344
  to = isolate_->stack_guard()->ArchiveStackGuard(to);
 
345
  to = isolate_->regexp_stack()->ArchiveStack(to);
 
346
  to = isolate_->bootstrapper()->ArchiveState(to);
 
347
  lazily_archived_thread_ = ThreadId::Invalid();
300
348
  lazily_archived_thread_state_ = NULL;
301
349
}
302
350
 
303
351
 
304
352
void ThreadManager::FreeThreadResources() {
305
 
  HandleScopeImplementer::FreeThreadResources();
306
 
  Top::FreeThreadResources();
 
353
  isolate_->handle_scope_implementer()->FreeThreadResources();
 
354
  isolate_->FreeThreadResources();
307
355
#ifdef ENABLE_DEBUGGER_SUPPORT
308
 
  Debug::FreeThreadResources();
 
356
  isolate_->debug()->FreeThreadResources();
309
357
#endif
310
 
  StackGuard::FreeThreadResources();
311
 
  RegExpStack::FreeThreadResources();
312
 
  Bootstrapper::FreeThreadResources();
 
358
  isolate_->stack_guard()->FreeThreadResources();
 
359
  isolate_->regexp_stack()->FreeThreadResources();
 
360
  isolate_->bootstrapper()->FreeThreadResources();
313
361
}
314
362
 
315
363
 
316
364
bool ThreadManager::IsArchived() {
317
 
  return Thread::HasThreadLocal(thread_state_key);
 
365
  Isolate::PerIsolateThreadData* data =
 
366
      isolate_->FindPerThreadDataForThisThread();
 
367
  return data != NULL && data->thread_state() != NULL;
318
368
}
319
369
 
320
 
 
321
370
void ThreadManager::Iterate(ObjectVisitor* v) {
322
371
  // Expecting no threads during serialization/deserialization
323
 
  for (ThreadState* state = ThreadState::FirstInUse();
 
372
  for (ThreadState* state = FirstThreadStateInUse();
324
373
       state != NULL;
325
374
       state = state->Next()) {
326
375
    char* data = state->data();
327
376
    data = HandleScopeImplementer::Iterate(v, data);
328
 
    data = Top::Iterate(v, data);
 
377
    data = isolate_->Iterate(v, data);
329
378
    data = Relocatable::Iterate(v, data);
330
379
  }
331
380
}
332
381
 
333
382
 
334
383
void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
335
 
  for (ThreadState* state = ThreadState::FirstInUse();
 
384
  for (ThreadState* state = FirstThreadStateInUse();
336
385
       state != NULL;
337
386
       state = state->Next()) {
338
387
    char* data = state->data();
339
388
    data += HandleScopeImplementer::ArchiveSpacePerThread();
340
 
    Top::IterateThread(v, data);
341
 
  }
342
 
}
343
 
 
344
 
 
345
 
int ThreadManager::CurrentId() {
346
 
  return Thread::GetThreadLocalInt(thread_id_key);
347
 
}
348
 
 
349
 
 
350
 
void ThreadManager::AssignId() {
351
 
  if (!HasId()) {
352
 
    ASSERT(Locker::IsLocked());
353
 
    int thread_id = ++last_id_;
354
 
    ASSERT(thread_id > 0);  // see the comment near last_id_ definition.
355
 
    Thread::SetThreadLocalInt(thread_id_key, thread_id);
356
 
    Top::set_thread_id(thread_id);
357
 
  }
358
 
}
359
 
 
360
 
 
361
 
bool ThreadManager::HasId() {
362
 
  return Thread::HasThreadLocal(thread_id_key);
363
 
}
364
 
 
365
 
 
366
 
void ThreadManager::TerminateExecution(int thread_id) {
367
 
  for (ThreadState* state = ThreadState::FirstInUse();
 
389
    isolate_->IterateThread(v, data);
 
390
  }
 
391
}
 
392
 
 
393
 
 
394
ThreadId ThreadManager::CurrentId() {
 
395
  return ThreadId::Current();
 
396
}
 
397
 
 
398
 
 
399
void ThreadManager::TerminateExecution(ThreadId thread_id) {
 
400
  for (ThreadState* state = FirstThreadStateInUse();
368
401
       state != NULL;
369
402
       state = state->Next()) {
370
 
    if (thread_id == state->id()) {
 
403
    if (thread_id.Equals(state->id())) {
371
404
      state->set_terminate_on_restore(true);
372
405
    }
373
406
  }
374
407
}
375
408
 
376
409
 
377
 
// This is the ContextSwitcher singleton. There is at most a single thread
378
 
// running which delivers preemption events to V8 threads.
379
 
ContextSwitcher* ContextSwitcher::singleton_ = NULL;
380
 
 
381
 
 
382
 
ContextSwitcher::ContextSwitcher(int every_n_ms)
 
410
ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
383
411
  : Thread("v8:CtxtSwitcher"),
384
412
    keep_going_(true),
385
 
    sleep_ms_(every_n_ms) {
 
413
    sleep_ms_(every_n_ms),
 
414
    isolate_(isolate) {
386
415
}
387
416
 
388
417
 
389
418
// Set the scheduling interval of V8 threads. This function starts the
390
419
// ContextSwitcher thread if needed.
391
420
void ContextSwitcher::StartPreemption(int every_n_ms) {
392
 
  ASSERT(Locker::IsLocked());
393
 
  if (singleton_ == NULL) {
 
421
  Isolate* isolate = Isolate::Current();
 
422
  ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
 
423
  if (isolate->context_switcher() == NULL) {
394
424
    // If the ContextSwitcher thread is not running at the moment start it now.
395
 
    singleton_ = new ContextSwitcher(every_n_ms);
396
 
    singleton_->Start();
 
425
    isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
 
426
    isolate->context_switcher()->Start();
397
427
  } else {
398
428
    // ContextSwitcher thread is already running, so we just change the
399
429
    // scheduling interval.
400
 
    singleton_->sleep_ms_ = every_n_ms;
 
430
    isolate->context_switcher()->sleep_ms_ = every_n_ms;
401
431
  }
402
432
}
403
433
 
405
435
// Disable preemption of V8 threads. If multiple threads want to use V8 they
406
436
// must cooperatively schedule amongst them from this point on.
407
437
void ContextSwitcher::StopPreemption() {
408
 
  ASSERT(Locker::IsLocked());
409
 
  if (singleton_ != NULL) {
 
438
  Isolate* isolate = Isolate::Current();
 
439
  ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
 
440
  if (isolate->context_switcher() != NULL) {
410
441
    // The ContextSwitcher thread is running. We need to stop it and release
411
442
    // its resources.
412
 
    singleton_->keep_going_ = false;
413
 
    singleton_->Join();  // Wait for the ContextSwitcher thread to exit.
 
443
    isolate->context_switcher()->keep_going_ = false;
 
444
    // Wait for the ContextSwitcher thread to exit.
 
445
    isolate->context_switcher()->Join();
414
446
    // Thread has exited, now we can delete it.
415
 
    delete(singleton_);
416
 
    singleton_ = NULL;
 
447
    delete(isolate->context_switcher());
 
448
    isolate->set_context_switcher(NULL);
417
449
  }
418
450
}
419
451
 
423
455
void ContextSwitcher::Run() {
424
456
  while (keep_going_) {
425
457
    OS::Sleep(sleep_ms_);
426
 
    StackGuard::Preempt();
 
458
    isolate()->stack_guard()->Preempt();
427
459
  }
428
460
}
429
461