54
67
// CHECK: @"\01?m_f_memptr@@3P8Multiple@@AEXXZA" = global { i8*, i32 } zeroinitializer, align 4
55
68
// CHECK: @"\01?v_f_memptr@@3P8Virtual@@AEXXZA" = global { i8*, i32, i32 } zeroinitializer, align 4
70
// We can define Unspecified after locking in the inheritance model.
71
struct Unspecified : Virtual {
76
// Test memptr emission in a constant expression.
78
void (Single ::*s_f_mp)() = &Single::foo;
79
void (Multiple ::*m_f_mp)() = &B2::foo;
80
void (Virtual ::*v_f_mp)() = &Virtual::foo;
81
void (Unspecified::*u_f_mp)() = &Unspecified::foo;
82
// CHECK: @"\01?s_f_mp@Const@@3P8Single@@AEXXZA" =
83
// CHECK: global i8* bitcast ({{.*}} @"\01?foo@Single@@QAEXXZ" to i8*), align 4
84
// CHECK: @"\01?m_f_mp@Const@@3P8Multiple@@AEXXZA" =
85
// CHECK: global { i8*, i32 } { i8* bitcast ({{.*}} @"\01?foo@B2@@QAEXXZ" to i8*), i32 4 }, align 4
86
// CHECK: @"\01?v_f_mp@Const@@3P8Virtual@@AEXXZA" =
87
// CHECK: global { i8*, i32, i32 } { i8* bitcast ({{.*}} @"\01?foo@Virtual@@QAEXXZ" to i8*), i32 0, i32 0 }, align 4
88
// CHECK: @"\01?u_f_mp@Const@@3P8Unspecified@@AEXXZA" =
89
// CHECK: global { i8*, i32, i32, i32 } { i8* bitcast ({{.*}} @"\01?foo@Unspecified@@QAEXXZ" to i8*), i32 0, i32 0, i32 0 }, align 4
93
// This exercises ConstExprEmitter instead of ValueDecl::evaluateValue. The
94
// extra reinterpret_cast for the parameter type requires more careful folding.
95
// FIXME: Or does it? If reinterpret_casts are no-ops, we should be able to
96
// strip them in evaluateValue() and just proceed as normal with an APValue.
102
struct C : B, A { int c; };
104
void (A::*ptr1)(void *) = (void (A::*)(void *)) &A::foo;
105
// CHECK: @"\01?ptr1@CastParam@@3P8A@1@AEXPAX@ZA" =
106
// CHECK: global i8* bitcast (void ({{.*}})* @"\01?foo@A@CastParam@@QAEXPAU12@@Z" to i8*), align 4
108
// Try a reinterpret_cast followed by a memptr conversion.
109
void (C::*ptr2)(void *) = (void (C::*)(void *)) (void (A::*)(void *)) &A::foo;
110
// CHECK: @"\01?ptr2@CastParam@@3P8C@1@AEXPAX@ZA" =
111
// CHECK: global { i8*, i32 } { i8* bitcast (void ({{.*}})* @"\01?foo@A@CastParam@@QAEXPAU12@@Z" to i8*), i32 4 }, align 4
113
void (C::*ptr3)(void *) = (void (C::*)(void *)) (void (A::*)(void *)) (void (A::*)(A *)) 0;
114
// CHECK: @"\01?ptr3@CastParam@@3P8C@1@AEXPAX@ZA" =
115
// CHECK: global { i8*, i32 } zeroinitializer, align 4
118
virtual void isPolymorphic();
122
// Try a cast that changes the inheritance model. Null for D is 0, but null for
123
// C is -1. We need the cast to long in order to hit the non-APValue path.
124
int C::*ptr4 = (int C::*) (int D::*) (long D::*) 0;
125
// CHECK: @"\01?ptr4@CastParam@@3PQC@1@HA" = global i32 -1, align 4
127
// MSVC rejects this but we accept it.
128
int C::*ptr5 = (int C::*) (long D::*) 0;
129
// CHECK: @"\01?ptr5@CastParam@@3PQC@1@HA" = global i32 -1, align 4
132
struct UnspecWithVBPtr;
133
int UnspecWithVBPtr::*forceUnspecWithVBPtr;
134
struct UnspecWithVBPtr : B1, virtual B2 {
139
// Test emitting non-virtual member pointers in a non-constexpr setting.
140
void EmitNonVirtualMemberPointers() {
141
void (Single ::*s_f_memptr)() = &Single::foo;
142
void (Multiple ::*m_f_memptr)() = &Multiple::foo;
143
void (Virtual ::*v_f_memptr)() = &Virtual::foo;
144
void (Unspecified::*u_f_memptr)() = &Unspecified::foo;
145
void (UnspecWithVBPtr::*u2_f_memptr)() = &UnspecWithVBPtr::foo;
146
// CHECK: define void @"\01?EmitNonVirtualMemberPointers@@YAXXZ"() {{.*}} {
147
// CHECK: alloca i8*, align 4
148
// CHECK: alloca { i8*, i32 }, align 4
149
// CHECK: alloca { i8*, i32, i32 }, align 4
150
// CHECK: alloca { i8*, i32, i32, i32 }, align 4
151
// CHECK: store i8* bitcast (void (%{{.*}}*)* @"\01?foo@Single@@QAEXXZ" to i8*), i8** %{{.*}}, align 4
152
// CHECK: store { i8*, i32 }
153
// CHECK: { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Multiple@@QAEXXZ" to i8*), i32 0 },
154
// CHECK: { i8*, i32 }* %{{.*}}, align 4
155
// CHECK: store { i8*, i32, i32 }
156
// CHECK: { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Virtual@@QAEXXZ" to i8*), i32 0, i32 0 },
157
// CHECK: { i8*, i32, i32 }* %{{.*}}, align 4
158
// CHECK: store { i8*, i32, i32, i32 }
159
// CHECK: { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Unspecified@@QAEXXZ" to i8*), i32 0, i32 0, i32 0 },
160
// CHECK: { i8*, i32, i32, i32 }* %{{.*}}, align 4
161
// CHECK: store { i8*, i32, i32, i32 }
162
// CHECK: { i8* bitcast (void (%{{.*}}*)* @"\01?foo@UnspecWithVBPtr@@QAEXXZ" to i8*),
163
// CHECK: i32 0, i32 4, i32 0 },
164
// CHECK: { i8*, i32, i32, i32 }* %{{.*}}, align 4
57
169
void podMemPtrs() {
222
334
// CHECK: ret void
338
bool compareSingleFunctionMemptr(void (Single::*l)(), void (Single::*r)()) {
340
// Should only be one comparison here.
341
// CHECK: define zeroext i1 @"\01?compareSingleFunctionMemptr@@YA_NP8Single@@AEXXZ0@Z"{{.*}} {
343
// CHECK: %[[r:.*]] = icmp eq
345
// CHECK: ret i1 %[[r]]
349
bool compareNeqSingleFunctionMemptr(void (Single::*l)(), void (Single::*r)()) {
351
// Should only be one comparison here.
352
// CHECK: define zeroext i1 @"\01?compareNeqSingleFunctionMemptr@@YA_NP8Single@@AEXXZ0@Z"{{.*}} {
354
// CHECK: %[[r:.*]] = icmp ne
356
// CHECK: ret i1 %[[r]]
360
bool unspecFuncMemptrEq(void (Unspecified::*l)(), void (Unspecified::*r)()) {
362
// CHECK: define zeroext i1 @"\01?unspecFuncMemptrEq@@YA_NP8Unspecified@@AEXXZ0@Z"{{.*}} {
363
// CHECK: %[[lhs0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[l:.*]], 0
364
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r:.*]], 0
365
// CHECK: %[[cmp0:.*]] = icmp eq i8* %[[lhs0]], %{{.*}}
366
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 1
367
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 1
368
// CHECK: %[[cmp1:.*]] = icmp eq i32
369
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 2
370
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 2
371
// CHECK: %[[cmp2:.*]] = icmp eq i32
372
// CHECK: %[[res12:.*]] = and i1 %[[cmp1]], %[[cmp2]]
373
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 3
374
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 3
375
// CHECK: %[[cmp3:.*]] = icmp eq i32
376
// CHECK: %[[res123:.*]] = and i1 %[[res12]], %[[cmp3]]
377
// CHECK: %[[iszero:.*]] = icmp eq i8* %[[lhs0]], null
378
// CHECK: %[[bits_or_null:.*]] = or i1 %[[res123]], %[[iszero]]
379
// CHECK: %{{.*}} = and i1 %[[bits_or_null]], %[[cmp0]]
380
// CHECK: ret i1 %{{.*}}
384
bool unspecFuncMemptrNeq(void (Unspecified::*l)(), void (Unspecified::*r)()) {
386
// CHECK: define zeroext i1 @"\01?unspecFuncMemptrNeq@@YA_NP8Unspecified@@AEXXZ0@Z"{{.*}} {
387
// CHECK: %[[lhs0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[l:.*]], 0
388
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r:.*]], 0
389
// CHECK: %[[cmp0:.*]] = icmp ne i8* %[[lhs0]], %{{.*}}
390
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 1
391
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 1
392
// CHECK: %[[cmp1:.*]] = icmp ne i32
393
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 2
394
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 2
395
// CHECK: %[[cmp2:.*]] = icmp ne i32
396
// CHECK: %[[res12:.*]] = or i1 %[[cmp1]], %[[cmp2]]
397
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 3
398
// CHECK: %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 3
399
// CHECK: %[[cmp3:.*]] = icmp ne i32
400
// CHECK: %[[res123:.*]] = or i1 %[[res12]], %[[cmp3]]
401
// CHECK: %[[iszero:.*]] = icmp ne i8* %[[lhs0]], null
402
// CHECK: %[[bits_or_null:.*]] = and i1 %[[res123]], %[[iszero]]
403
// CHECK: %{{.*}} = or i1 %[[bits_or_null]], %[[cmp0]]
404
// CHECK: ret i1 %{{.*}}
408
bool unspecDataMemptrEq(int Unspecified::*l, int Unspecified::*r) {
410
// CHECK: define zeroext i1 @"\01?unspecDataMemptrEq@@YA_NPQUnspecified@@H0@Z"{{.*}} {
411
// CHECK: extractvalue { i32, i32, i32 } %{{.*}}, 0
412
// CHECK: extractvalue { i32, i32, i32 } %{{.*}}, 0
413
// CHECK: icmp eq i32
414
// CHECK: extractvalue { i32, i32, i32 } %{{.*}}, 1
415
// CHECK: extractvalue { i32, i32, i32 } %{{.*}}, 1
416
// CHECK: icmp eq i32
417
// CHECK: extractvalue { i32, i32, i32 } %{{.*}}, 2
418
// CHECK: extractvalue { i32, i32, i32 } %{{.*}}, 2
419
// CHECK: icmp eq i32
426
void (Multiple::*convertB2FuncToMultiple(void (B2::*mp)()))() {
428
// CHECK: define i64 @"\01?convertB2FuncToMultiple@@YAP8Multiple@@AEXXZP8B2@@AEXXZ@Z"{{.*}} {
430
// CHECK: %[[mp:.*]] = load i8** %{{.*}}, align 4
431
// CHECK: icmp ne i8* %[[mp]], null
432
// CHECK: br i1 %{{.*}} label %{{.*}}, label %{{.*}}
434
// memptr.convert: ; preds = %entry
435
// CHECK: insertvalue { i8*, i32 } undef, i8* %[[mp]], 0
436
// CHECK: insertvalue { i8*, i32 } %{{.*}}, i32 4, 1
439
// memptr.converted: ; preds = %memptr.convert, %entry
440
// CHECK: phi { i8*, i32 } [ zeroinitializer, %{{.*}} ], [ {{.*}} ]
444
void (B2::*convertMultipleFuncToB2(void (Multiple::*mp)()))() {
445
// FIXME: cl emits warning C4407 on this code because of the representation
446
// change. We might want to do the same.
447
return static_cast<void (B2::*)()>(mp);
448
// FIXME: We should return i8* instead of i32 here. The ptrtoint cast prevents
449
// LLVM from optimizing away the branch. This is likely a bug in
450
// lib/CodeGen/TargetInfo.cpp with how we classify memptr types for returns.
452
// CHECK: define i32 @"\01?convertMultipleFuncToB2@@YAP8B2@@AEXXZP8Multiple@@AEXXZ@Z"{{.*}} {
454
// CHECK: %[[src:.*]] = load { i8*, i32 }* %{{.*}}, align 4
455
// CHECK: extractvalue { i8*, i32 } %[[src]], 0
456
// CHECK: icmp ne i8* %{{.*}}, null
457
// CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
459
// memptr.convert: ; preds = %entry
460
// CHECK: %[[fp:.*]] = extractvalue { i8*, i32 } %[[src]], 0
463
// memptr.converted: ; preds = %memptr.convert, %entry
464
// CHECK: phi i8* [ null, %{{.*}} ], [ %[[fp]], %{{.*}} ]
472
struct C : virtual A { int c; };
473
struct D : B, C { int d; };
475
void (D::*convertCToD(void (C::*mp)()))() {
477
// CHECK: define void @"\01?convertCToD@Test1@@YAP8D@1@AEXXZP8C@1@AEXXZ@Z"{{.*}} {
479
// CHECK: load { i8*, i32, i32 }* %{{.*}}, align 4
480
// CHECK: extractvalue { i8*, i32, i32 } %{{.*}}, 0
481
// CHECK: icmp ne i8* %{{.*}}, null
482
// CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
484
// memptr.convert: ; preds = %entry
485
// CHECK: extractvalue { i8*, i32, i32 } %{{.*}}, 0
486
// CHECK: extractvalue { i8*, i32, i32 } %{{.*}}, 1
487
// CHECK: extractvalue { i8*, i32, i32 } %{{.*}}, 2
488
// CHECK: %[[adj:.*]] = add nsw i32 %{{.*}}, 4
489
// CHECK: insertvalue { i8*, i32, i32 } undef, i8* {{.*}}, 0
490
// CHECK: insertvalue { i8*, i32, i32 } {{.*}}, i32 %[[adj]], 1
491
// CHECK: insertvalue { i8*, i32, i32 } {{.*}}, i32 {{.*}}, 2
494
// memptr.converted: ; preds = %memptr.convert, %entry
495
// CHECK: phi { i8*, i32, i32 } [ { i8* null, i32 0, i32 -1 }, {{.*}} ], [ {{.*}} ]
502
// Test that we dynamically convert between different null reps.
505
struct B : A { int b; };
508
virtual void hasVfPtr();
511
int A::*reinterpret(int B::*mp) {
512
return reinterpret_cast<int A::*>(mp);
513
// CHECK: define i32 @"\01?reinterpret@Test2@@YAPQA@1@HPQB@1@H@Z"{{.*}} {
519
int A::*reinterpret(int C::*mp) {
520
return reinterpret_cast<int A::*>(mp);
521
// CHECK: define i32 @"\01?reinterpret@Test2@@YAPQA@1@HPQC@1@H@Z"{{.*}} {
522
// CHECK: %[[mp:.*]] = load i32*
523
// CHECK: %[[cmp:.*]] = icmp ne i32 %[[mp]], 0
524
// CHECK: select i1 %[[cmp]], i32 %[[mp]], i32 -1