1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2
* vim: set ts=8 sts=4 et sw=4 tw=99:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
12
#include "ParallelArrayAnalysis.h"
13
#include "IonSpewer.h"
14
#include "UnreachableCodeElimination.h"
15
#include "IonAnalysis.h"
23
using parallel::SpewMIR;
24
using parallel::SpewCompile;
27
virtual bool visit##op(M##op *prop) { return true; }
29
#define CUSTOM_OP(op) \
30
virtual bool visit##op(M##op *prop);
33
virtual bool visit##op(M##op *ins) { \
34
MBasicBlock *block = ins->block(); \
35
block->discard(ins); \
39
#define PERMIT(T) (1 << T)
41
#define PERMIT_NUMERIC (PERMIT(MIRType_Int32) | PERMIT(MIRType_Double))
43
#define SPECIALIZED_OP(op, flags) \
44
virtual bool visit##op(M##op *ins) { \
45
return visitSpecializedInstruction(ins, ins->specialization(), flags); \
48
#define UNSAFE_OP(op) \
49
virtual bool visit##op(M##op *ins) { \
50
SpewMIR(ins, "Unsafe"); \
51
return markUnsafe(); \
54
#define WRITE_GUARDED_OP(op, obj) \
55
virtual bool visit##op(M##op *prop) { \
56
return insertWriteGuard(prop, prop->obj()); \
59
#define MAYBE_WRITE_GUARDED_OP(op, obj) \
60
virtual bool visit##op(M##op *prop) { \
63
return insertWriteGuard(prop, prop->obj()); \
66
class ParallelArrayVisitor : public MInstructionVisitor
71
MDefinition *parSlice_;
73
bool insertWriteGuard(MInstruction *writeInstruction,
74
MDefinition *valueBeingWritten);
76
bool replaceWithParNew(MInstruction *newInstruction,
77
JSObject *templateObject);
79
bool replace(MInstruction *oldInstruction,
80
MInstruction *replacementInstruction);
82
bool visitSpecializedInstruction(MInstruction *ins, MIRType spec, uint32_t flags);
84
// Intended for use in a visitXyz() instruction like "return
85
// markUnsafe()". Sets the unsafe flag and returns true (since
86
// this does not indicate an unrecoverable compilation failure).
94
ParallelArrayVisitor(JSContext *cx, MIRGraph &graph)
101
void clearUnsafe() { unsafe_ = false; }
102
bool unsafe() { return unsafe_; }
103
MDefinition *parSlice() {
105
parSlice_ = graph_.parSlice();
109
bool convertToBailout(MBasicBlock *block, MInstruction *ins);
111
// I am taking the policy of blacklisting everything that's not
112
// obviously safe for now. We can loosen as we need.
124
UNSAFE_OP(OsrScopeChain)
125
UNSAFE_OP(ReturnFromCtor)
126
CUSTOM_OP(CheckOverRecursed)
129
UNSAFE_OP(CreateThis)
130
UNSAFE_OP(CreateThisWithTemplate)
131
UNSAFE_OP(CreateThisWithProto)
132
UNSAFE_OP(CreateArgumentsObject)
133
UNSAFE_OP(GetArgumentsObjectArg)
134
UNSAFE_OP(SetArgumentsObjectArg)
139
UNSAFE_OP(GetDynamicName)
140
UNSAFE_OP(FilterArguments)
141
UNSAFE_OP(CallDirectEval)
150
SPECIALIZED_OP(Ursh, PERMIT_NUMERIC)
151
SPECIALIZED_OP(MinMax, PERMIT_NUMERIC)
155
SAFE_OP(MathFunction)
156
SPECIALIZED_OP(Add, PERMIT_NUMERIC)
157
SPECIALIZED_OP(Sub, PERMIT_NUMERIC)
158
SPECIALIZED_OP(Mul, PERMIT_NUMERIC)
159
SPECIALIZED_OP(Div, PERMIT_NUMERIC)
160
SPECIALIZED_OP(Mod, PERMIT_NUMERIC)
162
UNSAFE_OP(CharCodeAt)
163
UNSAFE_OP(FromCharCode)
166
SAFE_OP(Box) // Boxing just creates a JSVal, doesn't alloc.
171
SAFE_OP(TruncateToInt32)
176
CUSTOM_OP(NewCallObject)
177
CUSTOM_OP(NewParallelArray)
185
UNSAFE_OP(ImplicitThis)
188
SAFE_OP(ConstantElements)
190
WRITE_GUARDED_OP(StoreSlot, slots)
191
SAFE_OP(FunctionEnvironment) // just a load of func env ptr
192
SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us
193
SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us
194
UNSAFE_OP(PostWriteBarrier)
195
SAFE_OP(GetPropertyCache)
196
SAFE_OP(GetPropertyPolymorphic)
197
UNSAFE_OP(SetPropertyPolymorphic)
198
UNSAFE_OP(GetElementCache)
199
UNSAFE_OP(SetElementCache)
200
UNSAFE_OP(BindNameCache)
202
SAFE_OP(GuardObjectType)
205
SAFE_OP(TypedArrayLength)
206
SAFE_OP(TypedArrayElements)
207
SAFE_OP(InitializedLength)
208
WRITE_GUARDED_OP(SetInitializedLength, elements)
211
SAFE_OP(BoundsCheckLower)
213
SAFE_OP(LoadElementHole)
214
MAYBE_WRITE_GUARDED_OP(StoreElement, elements)
215
WRITE_GUARDED_OP(StoreElementHole, elements)
216
UNSAFE_OP(ArrayPopShift)
218
SAFE_OP(LoadTypedArrayElement)
219
SAFE_OP(LoadTypedArrayElementHole)
220
SAFE_OP(LoadTypedArrayElementStatic)
221
MAYBE_WRITE_GUARDED_OP(StoreTypedArrayElement, elements)
222
WRITE_GUARDED_OP(StoreTypedArrayElementHole, elements)
223
UNSAFE_OP(StoreTypedArrayElementStatic)
224
UNSAFE_OP(ClampToUint8)
225
SAFE_OP(LoadFixedSlot)
226
WRITE_GUARDED_OP(StoreFixedSlot, object)
227
UNSAFE_OP(CallGetProperty)
228
UNSAFE_OP(GetNameCache)
229
UNSAFE_OP(CallGetIntrinsicValue)
230
UNSAFE_OP(CallsiteCloneCache)
231
UNSAFE_OP(CallGetElement)
232
UNSAFE_OP(CallSetElement)
233
UNSAFE_OP(CallInitElementArray)
234
UNSAFE_OP(CallSetProperty)
235
UNSAFE_OP(DeleteProperty)
236
UNSAFE_OP(SetPropertyCache)
237
UNSAFE_OP(IteratorStart)
238
UNSAFE_OP(IteratorNext)
239
UNSAFE_OP(IteratorMore)
240
UNSAFE_OP(IteratorEnd)
241
SAFE_OP(StringLength)
242
UNSAFE_OP(ArgumentsLength)
243
UNSAFE_OP(GetArgument)
244
UNSAFE_OP(RunOncePrologue)
249
UNSAFE_OP(InstanceOf)
250
CUSTOM_OP(InterruptCheck)
253
SAFE_OP(ParNewDenseArray)
254
SAFE_OP(ParNewCallObject)
258
UNSAFE_OP(ArrayConcat)
259
UNSAFE_OP(GetDOMProperty)
260
UNSAFE_OP(SetDOMProperty)
261
UNSAFE_OP(NewStringObject)
265
UNSAFE_OP(RegExpTest)
266
UNSAFE_OP(CallInstanceOf)
267
UNSAFE_OP(FunctionBoundary)
268
UNSAFE_OP(GuardString)
269
UNSAFE_OP(NewDeclEnvObject)
272
SAFE_OP(ParWriteGuard)
273
SAFE_OP(ParCheckInterrupt)
274
SAFE_OP(ParCheckOverRecursed)
275
SAFE_OP(PolyInlineDispatch)
276
SAFE_OP(FunctionDispatch)
277
SAFE_OP(TypeObjectDispatch)
279
SAFE_OP(HaveSameClass)
280
UNSAFE_OP(EffectiveAddress)
281
UNSAFE_OP(AsmJSUnsignedToDouble)
285
UNSAFE_OP(AsmJSLoadHeap)
286
UNSAFE_OP(AsmJSStoreHeap)
287
UNSAFE_OP(AsmJSLoadGlobalVar)
288
UNSAFE_OP(AsmJSStoreGlobalVar)
289
UNSAFE_OP(AsmJSLoadFuncPtr)
290
UNSAFE_OP(AsmJSLoadFFIFunc)
291
UNSAFE_OP(AsmJSReturn)
292
UNSAFE_OP(AsmJSVoidReturn)
293
UNSAFE_OP(AsmJSPassStackArg)
294
UNSAFE_OP(AsmJSParameter)
296
UNSAFE_OP(AsmJSCheckOverRecursed)
298
// It looks like this could easily be made safe:
299
UNSAFE_OP(ConvertElementsToDoubles)
303
ParallelArrayAnalysis::analyze()
305
// Walk the basic blocks in a DFS. When we encounter a block with an
306
// unsafe instruction, then we know that this block will bailout when
307
// executed. Therefore, we replace the block.
309
// We don't need a worklist, though, because the graph is sorted
310
// in RPO. Therefore, we just use the marked flags to tell us
311
// when we visited some predecessor of the current block.
312
JSContext *cx = GetIonContext()->cx;
313
ParallelArrayVisitor visitor(cx, graph_);
314
graph_.entryBlock()->mark(); // Note: in par. exec., we never enter from OSR.
316
for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
317
if (mir_->shouldCancel("ParallelArrayAnalysis"))
320
if (block->isMarked()) {
321
// Iterate through and transform the instructions. Stop
322
// if we encounter an inherently unsafe operation, in
323
// which case we will transform this block into a bailout
325
MInstruction *instr = NULL;
326
for (MInstructionIterator ins(block->begin());
327
ins != block->end() && !visitor.unsafe();)
329
if (mir_->shouldCancel("ParallelArrayAnalysis"))
332
// We may be removing or replacing the current
333
// instruction, so advance `ins` now. Remember the
334
// last instr. we looked at for use later if it should
338
if (!instr->accept(&visitor)) {
339
SpewMIR(instr, "Unaccepted");
344
if (!visitor.unsafe()) {
345
// Count the number of reachable blocks.
348
// Block consists of only safe instructions. Visit its successors.
349
for (uint32_t i = 0; i < block->numSuccessors(); i++)
350
block->getSuccessor(i)->mark();
352
// Block contains an unsafe instruction. That means that once
353
// we enter this block, we are guaranteed to bailout.
355
// If this is the entry block, then there is no point
356
// in even trying to execute this function as it will
358
if (*block == graph_.entryBlock()) {
359
Spew(SpewCompile, "Entry block contains unsafe MIR");
363
// Otherwise, create a replacement that will.
364
if (!visitor.convertToBailout(*block, instr))
367
JS_ASSERT(!block->isMarked());
372
Spew(SpewCompile, "Safe");
373
IonSpewPass("ParallelArrayAnalysis");
375
UnreachableCodeElimination uce(mir_, graph_);
376
if (!uce.removeUnmarkedBlocks(marked))
378
IonSpewPass("UCEAfterParallelArrayAnalysis");
379
AssertExtendedGraphCoherency(graph_);
381
if (!removeResumePointOperands())
383
IonSpewPass("RemoveResumePointOperands");
384
AssertExtendedGraphCoherency(graph_);
386
if (!EliminateDeadCode(mir_, graph_))
388
IonSpewPass("DCEAfterParallelArrayAnalysis");
389
AssertExtendedGraphCoherency(graph_);
395
ParallelArrayAnalysis::removeResumePointOperands()
397
// In parallel exec mode, nothing is effectful, therefore we do
398
// not need to reconstruct interpreter state and can simply
399
// bailout by returning a special code. Ideally we'd either
400
// remove the unused resume points or else never generate them in
401
// the first place, but I encountered various assertions and
402
// crashes attempting to do that, so for the time being I simply
403
// replace their operands with undefined. This prevents them from
404
// interfering with DCE and other optimizations. It is also *necessary*
405
// to handle cases like this:
407
// foo(a, b, c.bar())
409
// where `foo` was deemed to be an unsafe function to call. This
410
// is because without neutering the ResumePoints, they would still
411
// refer to the MPassArg nodes generated for the call to foo().
412
// But the call to foo() is dead and has been removed, leading to
413
// an inconsistent IR and assertions at codegen time.
415
MConstant *udef = NULL;
416
for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
418
replaceOperandsOnResumePoint(block->entryResumePoint(), udef);
420
for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) {
421
if (ins->isStart()) {
422
JS_ASSERT(udef == NULL);
423
udef = MConstant::New(UndefinedValue());
424
block->insertAfter(*ins, udef);
426
if (MResumePoint *resumePoint = ins->resumePoint())
427
replaceOperandsOnResumePoint(resumePoint, udef);
435
ParallelArrayAnalysis::replaceOperandsOnResumePoint(MResumePoint *resumePoint,
436
MDefinition *withDef)
438
for (size_t i = 0; i < resumePoint->numOperands(); i++)
439
resumePoint->replaceOperand(i, withDef);
443
ParallelArrayVisitor::visitTest(MTest *)
449
ParallelArrayVisitor::convertToBailout(MBasicBlock *block, MInstruction *ins)
451
JS_ASSERT(unsafe()); // `block` must have contained unsafe items
452
JS_ASSERT(block->isMarked()); // `block` must have been reachable to get here
454
// Clear the unsafe flag for subsequent blocks.
457
// This block is no longer reachable.
460
// Create a bailout block for each predecessor. In principle, we
461
// only need one bailout block--in fact, only one per graph! But I
462
// found this approach easier to implement given the design of the
463
// MIR Graph construction routines. Besides, most often `block`
464
// has only one predecessor. Also, using multiple blocks helps to
465
// keep the PC information more accurate (though replacing `block`
466
// with exactly one bailout would be just as good).
467
for (size_t i = 0; i < block->numPredecessors(); i++) {
468
MBasicBlock *pred = block->getPredecessor(i);
470
// We only care about incoming edges from reachable predecessors.
471
if (!pred->isMarked())
474
// create bailout block to insert on this edge
475
MBasicBlock *bailBlock = MBasicBlock::NewParBailout(graph_, block->info(), pred,
476
block->pc(), block->entryResumePoint());
480
// if `block` had phis, we are replacing it with `bailBlock` which does not
481
if (pred->successorWithPhis() == block)
482
pred->setSuccessorWithPhis(NULL, 0);
484
// redirect the predecessor to the bailout block
485
uint32_t succIdx = pred->getSuccessorIndex(block);
486
pred->replaceSuccessor(succIdx, bailBlock);
488
// Insert the bailout block after `block` in the execution
489
// order. This should satisfy the RPO requirements and
490
// moreover ensures that we will visit this block in our outer
491
// walk, thus allowing us to keep the count of marked blocks
493
graph_.insertBlockAfter(block, bailBlock);
500
/////////////////////////////////////////////////////////////////////////////
503
// Simple memory allocation opcodes---those which ultimately compile
504
// down to a (possibly inlined) invocation of NewGCThing()---are
505
// replaced with MParNew, which is supplied with the thread context.
506
// These allocations will take place using per-helper-thread arenas.
509
ParallelArrayVisitor::visitNewParallelArray(MNewParallelArray *ins)
511
MParNew *parNew = new MParNew(parSlice(), ins->templateObject());
512
replace(ins, parNew);
517
ParallelArrayVisitor::visitNewCallObject(MNewCallObject *ins)
519
// fast path: replace with ParNewCallObject op
520
MParNewCallObject *parNewCallObjectInstruction =
521
MParNewCallObject::New(parSlice(), ins);
522
replace(ins, parNewCallObjectInstruction);
527
ParallelArrayVisitor::visitLambda(MLambda *ins)
529
if (ins->fun()->hasSingletonType() ||
530
types::UseNewTypeForClone(ins->fun()))
532
// slow path: bail on parallel execution.
536
// fast path: replace with ParLambda op
537
MParLambda *parLambdaInstruction = MParLambda::New(parSlice(), ins);
538
replace(ins, parLambdaInstruction);
543
ParallelArrayVisitor::visitNewObject(MNewObject *newInstruction)
545
if (newInstruction->shouldUseVM()) {
546
SpewMIR(newInstruction, "should use VM");
550
return replaceWithParNew(newInstruction,
551
newInstruction->templateObject());
555
ParallelArrayVisitor::visitNewArray(MNewArray *newInstruction)
557
if (newInstruction->shouldUseVM()) {
558
SpewMIR(newInstruction, "should use VM");
562
return replaceWithParNew(newInstruction,
563
newInstruction->templateObject());
567
ParallelArrayVisitor::visitRest(MRest *ins)
569
// Construct a new template object that has a generic type object and not
570
// a pc-tracked one. This is because we cannot ensure that the arguments
571
// array's element types to contain the argument types in a threadsafe
572
// manner, so we might as well just not track its element types so that we
573
// can stay parallel.
574
JSObject *templateObj = NewDenseUnallocatedArray(cx_, 0, NULL, TenuredObject);
578
return replace(ins, MParRest::New(parSlice(), ins->numActuals(),
579
ins->numFormals(), templateObj));
583
ParallelArrayVisitor::replaceWithParNew(MInstruction *newInstruction,
584
JSObject *templateObject)
586
MParNew *parNewInstruction = new MParNew(parSlice(), templateObject);
587
replace(newInstruction, parNewInstruction);
592
ParallelArrayVisitor::replace(MInstruction *oldInstruction,
593
MInstruction *replacementInstruction)
595
MBasicBlock *block = oldInstruction->block();
596
block->insertBefore(oldInstruction, replacementInstruction);
597
oldInstruction->replaceAllUsesWith(replacementInstruction);
598
block->discard(oldInstruction);
602
/////////////////////////////////////////////////////////////////////////////
605
// We only want to permit writes to locally guarded objects.
606
// Furthermore, we want to avoid PICs and other non-thread-safe things
607
// (though perhaps we should support PICs at some point). If we
608
// cannot determine the origin of an object, we can insert a write
609
// guard which will check whether the object was allocated from the
610
// per-thread-arena or not.
613
ParallelArrayVisitor::insertWriteGuard(MInstruction *writeInstruction,
614
MDefinition *valueBeingWritten)
616
// Many of the write operations do not take the JS object
617
// but rather something derived from it, such as the elements.
618
// So we need to identify the JS object:
620
switch (valueBeingWritten->type()) {
622
object = valueBeingWritten;
626
switch (valueBeingWritten->op()) {
627
case MDefinition::Op_Slots:
628
object = valueBeingWritten->toSlots()->object();
631
case MDefinition::Op_NewSlots:
632
// Values produced by new slots will ALWAYS be
637
SpewMIR(writeInstruction, "cannot insert write guard for %s",
638
valueBeingWritten->opName());
643
case MIRType_Elements:
644
switch (valueBeingWritten->op()) {
645
case MDefinition::Op_Elements:
646
object = valueBeingWritten->toElements()->object();
649
case MDefinition::Op_TypedArrayElements:
650
object = valueBeingWritten->toTypedArrayElements()->object();
654
SpewMIR(writeInstruction, "cannot insert write guard for %s",
655
valueBeingWritten->opName());
661
SpewMIR(writeInstruction, "cannot insert write guard for MIR Type %d",
662
valueBeingWritten->type());
666
if (object->isUnbox())
667
object = object->toUnbox()->input();
669
switch (object->op()) {
670
case MDefinition::Op_ParNew:
671
// MParNew will always be creating something thread-local, omit the guard
672
SpewMIR(writeInstruction, "write to ParNew prop does not require guard");
678
MBasicBlock *block = writeInstruction->block();
679
MParWriteGuard *writeGuard = MParWriteGuard::New(parSlice(), object);
680
block->insertBefore(writeInstruction, writeGuard);
681
writeGuard->adjustInputs(writeGuard);
685
/////////////////////////////////////////////////////////////////////////////
688
// We only support calls to interpreted functions that that have already been
689
// Ion compiled. If a function has no IonScript, we bail out.
692
ParallelArrayVisitor::visitCall(MCall *ins)
695
if (ins->isDOMFunction()) {
696
SpewMIR(ins, "call to dom function");
700
RootedFunction target(cx_, ins->getSingleTarget());
703
if (target->isNative()) {
704
SpewMIR(ins, "call to native function");
710
if (ins->isConstructing()) {
711
SpewMIR(ins, "call to unknown constructor");
718
/////////////////////////////////////////////////////////////////////////////
719
// Stack limit, interrupts
721
// In sequential Ion code, the stack limit is stored in the JSRuntime.
722
// We store it in the thread context. We therefore need a separate
723
// instruction to access it, one parameterized by the thread context.
724
// Similar considerations apply to checking for interrupts.
727
ParallelArrayVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins)
729
MParCheckOverRecursed *replacement = new MParCheckOverRecursed(parSlice());
730
return replace(ins, replacement);
734
ParallelArrayVisitor::visitInterruptCheck(MInterruptCheck *ins)
736
MParCheckInterrupt *replacement = new MParCheckInterrupt(parSlice());
737
return replace(ins, replacement);
740
/////////////////////////////////////////////////////////////////////////////
743
// Some ops, like +, can be specialized to ints/doubles. Anything
744
// else is terrifying.
746
// TODO---Eventually, we should probably permit arbitrary + but bail
747
// if the operands are not both integers/floats.
750
ParallelArrayVisitor::visitSpecializedInstruction(MInstruction *ins, MIRType spec,
753
uint32_t flag = 1 << spec;
757
SpewMIR(ins, "specialized to unacceptable type %d", spec);
761
/////////////////////////////////////////////////////////////////////////////
765
ParallelArrayVisitor::visitThrow(MThrow *thr)
767
MBasicBlock *block = thr->block();
768
JS_ASSERT(block->lastIns() == thr);
769
block->discardLastIns();
770
MParBailout *bailout = new MParBailout();
777
///////////////////////////////////////////////////////////////////////////
780
// See comments in header file.
783
GetPossibleCallees(JSContext *cx,
786
types::StackTypeSet *calleeTypes,
787
CallTargetVector &targets);
790
AddCallTarget(HandleScript script, CallTargetVector &targets);
793
AddPossibleCallees(MIRGraph &graph, CallTargetVector &targets)
795
JSContext *cx = GetIonContext()->cx;
797
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
798
for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++)
803
MCall *callIns = ins->toCall();
805
RootedFunction target(cx, callIns->getSingleTarget());
807
RootedScript script(cx, target->nonLazyScript());
808
if (!AddCallTarget(script, targets))
813
types::StackTypeSet *calleeTypes = callIns->getFunction()->resultTypeSet();
814
RootedScript script(cx, callIns->block()->info().script());
815
if (!GetPossibleCallees(cx,
817
callIns->resumePoint()->pc(),
828
GetPossibleCallees(JSContext *cx,
831
types::StackTypeSet *calleeTypes,
832
CallTargetVector &targets)
834
if (!calleeTypes || calleeTypes->baseFlags() != 0)
837
unsigned objCount = calleeTypes->getObjectCount();
842
RootedFunction rootedFun(cx);
843
RootedScript rootedScript(cx);
844
for (unsigned i = 0; i < objCount; i++) {
845
JSObject *obj = calleeTypes->getSingleObject(i);
846
if (obj && obj->is<JSFunction>()) {
847
rootedFun = &obj->as<JSFunction>();
849
types::TypeObject *typeObj = calleeTypes->getTypeObject(i);
852
rootedFun = typeObj->interpretedFunction;
857
if (!rootedFun->isInterpreted())
860
rootedScript = rootedFun->getOrCreateScript(cx);
864
if (rootedScript->shouldCloneAtCallsite) {
865
rootedFun = CloneFunctionAtCallsite(cx, rootedFun, script, pc);
868
rootedScript = rootedFun->nonLazyScript();
871
// check if this call target is already known
872
if (!AddCallTarget(rootedScript, targets))
880
AddCallTarget(HandleScript script, CallTargetVector &targets)
882
for (size_t i = 0; i < targets.length(); i++) {
883
if (targets[i] == script)
887
if (!targets.append(script))