forked from JuliaLang/julia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cgutils.cpp
3613 lines (3411 loc) · 151 KB
/
cgutils.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// This file is a part of Julia. License is MIT: https://julialang.org/license
// utility procedures used in code generation
static Value *track_pjlvalue(jl_codectx_t &ctx, Value *V)
{
assert(V->getType() == T_pjlvalue);
return ctx.builder.CreateAddrSpaceCast(V, T_prjlvalue);
}
// Take an arbitrary untracked value and make it gc-tracked
static Value *maybe_decay_untracked(jl_codectx_t &ctx, Value *V)
{
if (V->getType() == T_pjlvalue)
return ctx.builder.CreateAddrSpaceCast(V, T_prjlvalue);
assert(V->getType() == T_prjlvalue);
return V;
}
// Take any value and mark that it may be derived from a rooted value
static Value *decay_derived(jl_codectx_t &ctx, Value *V)
{
Type *T = V->getType();
if (cast<PointerType>(T)->getAddressSpace() == AddressSpace::Derived)
return V;
// Once llvm deletes pointer element types, we won't need it here any more either.
Type *NewT = PointerType::get(cast<PointerType>(T)->getElementType(), AddressSpace::Derived);
return ctx.builder.CreateAddrSpaceCast(V, NewT);
}
// Take any value and make it safe to pass to GEP
static Value *maybe_decay_tracked(jl_codectx_t &ctx, Value *V)
{
Type *T = V->getType();
if (cast<PointerType>(T)->getAddressSpace() != AddressSpace::Tracked)
return V;
Type *NewT = PointerType::get(cast<PointerType>(T)->getElementType(), AddressSpace::Derived);
return ctx.builder.CreateAddrSpaceCast(V, NewT);
}
static Value *mark_callee_rooted(jl_codectx_t &ctx, Value *V)
{
assert(V->getType() == T_pjlvalue || V->getType() == T_prjlvalue);
return ctx.builder.CreateAddrSpaceCast(V,
PointerType::get(T_jlvalue, AddressSpace::CalleeRooted));
}
AtomicOrdering get_llvm_atomic_order(enum jl_memory_order order)
{
switch (order) {
case jl_memory_order_notatomic: return AtomicOrdering::NotAtomic;
case jl_memory_order_unordered: return AtomicOrdering::Unordered;
case jl_memory_order_monotonic: return AtomicOrdering::Monotonic;
case jl_memory_order_acquire: return AtomicOrdering::Acquire;
case jl_memory_order_release: return AtomicOrdering::Release;
case jl_memory_order_acq_rel: return AtomicOrdering::AcquireRelease;
case jl_memory_order_seq_cst: return AtomicOrdering::SequentiallyConsistent;
default:
assert("invalid atomic ordering");
abort();
}
}
// --- language feature checks ---
#define JL_FEAT_TEST(ctx, feature) ((ctx).params->feature)
// --- string constants ---
static Value *stringConstPtr(
jl_codegen_params_t &emission_context,
IRBuilder<> &irbuilder,
const std::string &txt)
{
Module *M = jl_builderModule(irbuilder);
StringRef ctxt(txt.c_str(), txt.size() + 1);
Constant *Data = ConstantDataArray::get(jl_LLVMContext, arrayRefFromStringRef(ctxt));
GlobalVariable *gv = get_pointer_to_constant(emission_context, Data, "_j_str", *M);
Value *zero = ConstantInt::get(Type::getInt32Ty(jl_LLVMContext), 0);
Value *Args[] = { zero, zero };
return irbuilder.CreateInBoundsGEP(gv->getValueType(), gv, Args);
}
// --- MDNode ---
Metadata *to_md_tree(jl_value_t *val) {
if (val == jl_nothing)
return nullptr;
Metadata *MD = nullptr;
if (jl_is_symbol(val)) {
MD = MDString::get(jl_LLVMContext, jl_symbol_name((jl_sym_t*)val));
} else if (jl_is_bool(val)) {
MD = ConstantAsMetadata::get(ConstantInt::get(T_int1, jl_unbox_bool(val)));
} else if (jl_is_long(val)) {
MD = ConstantAsMetadata::get(ConstantInt::get(T_int64, jl_unbox_long(val)));
} else if (jl_is_tuple(val)) {
SmallVector<Metadata *, 8> MDs;
for (int f = 0, nf = jl_nfields(val); f < nf; ++f) {
MD = to_md_tree(jl_fieldref(val, f));
if (MD)
MDs.push_back(MD);
}
MD = MDNode::get(jl_LLVMContext, MDs);
} else {
jl_error("LLVM metadata needs to Symbol/Bool/Int or Tuple thereof");
}
return MD;
}
// --- Debug info ---
static DIType *_julia_type_to_di(jl_codegen_params_t *ctx, jl_value_t *jt, DIBuilder *dbuilder, bool isboxed)
{
jl_datatype_t *jdt = (jl_datatype_t*)jt;
if (isboxed || !jl_is_datatype(jt) || !jdt->isconcretetype)
return jl_pvalue_dillvmt;
assert(jdt->layout);
DIType* _ditype = NULL;
DIType* &ditype = (ctx ? ctx->ditypes[jdt] : _ditype);
if (ditype)
return ditype;
const char *tname = jl_symbol_name(jdt->name->name);
if (jl_is_primitivetype(jt)) {
uint64_t SizeInBits = jl_datatype_nbits(jdt);
ditype = dbuilder->createBasicType(tname, SizeInBits, llvm::dwarf::DW_ATE_unsigned);
}
else if (jl_is_structtype(jt) && !jl_is_layout_opaque(jdt->layout)) {
size_t ntypes = jl_datatype_nfields(jdt);
std::vector<llvm::Metadata*> Elements(ntypes);
for (unsigned i = 0; i < ntypes; i++) {
jl_value_t *el = jl_field_type_concrete(jdt, i);
DIType *di;
if (jl_field_isptr(jdt, i))
di = jl_pvalue_dillvmt;
// TODO: elseif jl_islayout_inline
else
di = _julia_type_to_di(ctx, el, dbuilder, false);
Elements[i] = di;
}
DINodeArray ElemArray = dbuilder->getOrCreateArray(Elements);
std::string unique_name;
raw_string_ostream(unique_name) << (uintptr_t)jdt;
ditype = dbuilder->createStructType(
NULL, // Scope
tname, // Name
NULL, // File
0, // LineNumber
jl_datatype_nbits(jdt), // SizeInBits
8 * jl_datatype_align(jdt), // AlignInBits
DINode::FlagZero, // Flags
NULL, // DerivedFrom
ElemArray, // Elements
dwarf::DW_LANG_Julia, // RuntimeLanguage
nullptr, // VTableHolder
unique_name // UniqueIdentifier
);
}
else {
// return a typealias for types with hidden content
ditype = dbuilder->createTypedef(jl_pvalue_dillvmt, tname, NULL, 0, NULL);
}
return ditype;
}
static DIType *julia_type_to_di(jl_codectx_t &ctx, jl_value_t *jt, DIBuilder *dbuilder, bool isboxed)
{
return _julia_type_to_di(&ctx.emission_context, jt, dbuilder, isboxed);
}
static Value *emit_pointer_from_objref(jl_codectx_t &ctx, Value *V)
{
unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();
if (AS != AddressSpace::Tracked && AS != AddressSpace::Derived)
return V;
V = decay_derived(ctx, V);
Type *T = PointerType::get(T_jlvalue, AddressSpace::Derived);
if (V->getType() != T)
V = ctx.builder.CreateBitCast(V, T);
Function *F = prepare_call(pointer_from_objref_func);
CallInst *Call = ctx.builder.CreateCall(F, V);
Call->setAttributes(F->getAttributes());
return Call;
}
static Value *get_gc_root_for(const jl_cgval_t &x)
{
if (x.Vboxed)
return x.Vboxed;
if (x.ispointer() && !x.constant) {
assert(x.V);
if (PointerType *T = dyn_cast<PointerType>(x.V->getType())) {
if (T->getAddressSpace() == AddressSpace::Tracked ||
T->getAddressSpace() == AddressSpace::Derived) {
return x.V;
}
}
}
return nullptr;
}
// --- emitting pointers directly into code ---
static inline Constant *literal_static_pointer_val(const void *p, Type *T = T_pjlvalue);
static Value *julia_pgv(jl_codectx_t &ctx, const char *cname, void *addr)
{
// emit a GlobalVariable for a jl_value_t named "cname"
// store the name given so we can reuse it (facilitating merging later)
// so first see if there already is a GlobalVariable for this address
GlobalVariable* &gv = ctx.global_targets[addr];
Module *M = jl_Module;
StringRef localname;
std::string gvname;
if (!gv) {
raw_string_ostream(gvname) << cname << ctx.global_targets.size();
localname = StringRef(gvname);
}
else {
localname = gv->getName();
if (gv->getParent() != M)
gv = cast_or_null<GlobalVariable>(M->getNamedValue(localname));
}
if (gv == nullptr)
gv = new GlobalVariable(*M, T_pjlvalue,
false, GlobalVariable::PrivateLinkage,
NULL, localname);
// LLVM passes sometimes strip metadata when moving load around
// since the load at the new location satisfy the same condition as the original one.
// Mark the global as constant to LLVM code using our own metadata
// which is much less likely to be striped.
gv->setMetadata("julia.constgv", MDNode::get(gv->getContext(), None));
assert(localname == gv->getName());
assert(!gv->hasInitializer());
return gv;
}
static Value *julia_pgv(jl_codectx_t &ctx, const char *prefix, jl_sym_t *name, jl_module_t *mod, void *addr)
{
// emit a GlobalVariable for a jl_value_t, using the prefix, name, and module to
// to create a readable name of the form prefixModA.ModB.name
size_t len = strlen(jl_symbol_name(name)) + strlen(prefix) + 1;
jl_module_t *parent = mod, *prev = NULL;
while (parent != NULL && parent != prev) {
len += strlen(jl_symbol_name(parent->name))+1;
prev = parent;
parent = parent->parent;
}
char *fullname = (char*)alloca(len);
strcpy(fullname, prefix);
len -= strlen(jl_symbol_name(name)) + 1;
strcpy(fullname + len, jl_symbol_name(name));
parent = mod;
prev = NULL;
while (parent != NULL && parent != prev) {
size_t part = strlen(jl_symbol_name(parent->name)) + 1;
strcpy(fullname + len - part, jl_symbol_name(parent->name));
fullname[len - 1] = '.';
len -= part;
prev = parent;
parent = parent->parent;
}
return julia_pgv(ctx, fullname, addr);
}
static JuliaVariable *julia_const_gv(jl_value_t *val);
static Value *literal_pointer_val_slot(jl_codectx_t &ctx, jl_value_t *p)
{
// emit a pointer to a jl_value_t* which will allow it to be valid across reloading code
// also, try to give it a nice name for gdb, for easy identification
if (!imaging_mode) {
// TODO: this is an optimization, but is it useful or premature
// (it'll block any attempt to cache these, but can be simply deleted)
Module *M = jl_Module;
GlobalVariable *gv = new GlobalVariable(
*M, T_pjlvalue, true, GlobalVariable::PrivateLinkage,
literal_static_pointer_val(p));
gv->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
return gv;
}
if (JuliaVariable *gv = julia_const_gv(p)) {
// if this is a known special object, use the existing GlobalValue
return prepare_global_in(jl_Module, gv);
}
if (jl_is_datatype(p)) {
jl_datatype_t *addr = (jl_datatype_t*)p;
// DataTypes are prefixed with a +
return julia_pgv(ctx, "+", addr->name->name, addr->name->module, p);
}
if (jl_is_method(p)) {
jl_method_t *m = (jl_method_t*)p;
// functions are prefixed with a -
return julia_pgv(ctx, "-", m->name, m->module, p);
}
if (jl_is_method_instance(p)) {
jl_method_instance_t *linfo = (jl_method_instance_t*)p;
// Type-inferred functions are also prefixed with a -
if (jl_is_method(linfo->def.method))
return julia_pgv(ctx, "-", linfo->def.method->name, linfo->def.method->module, p);
}
if (jl_is_symbol(p)) {
jl_sym_t *addr = (jl_sym_t*)p;
// Symbols are prefixed with jl_sym#
return julia_pgv(ctx, "jl_sym#", addr, NULL, p);
}
// something else gets just a generic name
return julia_pgv(ctx, "jl_global#", p);
}
static size_t dereferenceable_size(jl_value_t *jt)
{
if (jl_is_array_type(jt)) {
// Array has at least this much data
return sizeof(jl_array_t);
}
else if (jl_is_datatype(jt) && jl_struct_try_layout((jl_datatype_t*)jt)) {
return jl_datatype_size(jt);
}
return 0;
}
// Return the min required / expected alignment of jltype (on the stack or heap)
static unsigned julia_alignment(jl_value_t *jt)
{
if (jl_is_array_type(jt)) {
// Array always has this alignment
return JL_SMALL_BYTE_ALIGNMENT;
}
if (jt == (jl_value_t*)jl_datatype_type) {
// types are never allocated in julia code/on the stack
// and this is the guarantee we have for the GC bits
return 16;
}
assert(jl_is_datatype(jt) && jl_struct_try_layout((jl_datatype_t*)jt));
unsigned alignment = jl_datatype_align(jt);
if (alignment > JL_HEAP_ALIGNMENT)
return JL_HEAP_ALIGNMENT;
return alignment;
}
static inline void maybe_mark_argument_dereferenceable(Argument *A, jl_value_t *jt)
{
AttrBuilder B;
B.addAttribute(Attribute::NonNull);
// The `dereferencable` below does not imply `nonnull` for non addrspace(0) pointers.
size_t size = dereferenceable_size(jt);
if (size) {
B.addDereferenceableAttr(size);
B.addAlignmentAttr(julia_alignment(jt));
}
A->addAttrs(B);
}
static inline Instruction *maybe_mark_load_dereferenceable(Instruction *LI, bool can_be_null,
size_t size, size_t align)
{
if (isa<PointerType>(LI->getType())) {
if (!can_be_null)
// The `dereferencable` below does not imply `nonnull` for non addrspace(0) pointers.
LI->setMetadata(LLVMContext::MD_nonnull, MDNode::get(jl_LLVMContext, None));
if (size) {
Metadata *OP = ConstantAsMetadata::get(ConstantInt::get(T_int64, size));
LI->setMetadata(can_be_null ? LLVMContext::MD_dereferenceable_or_null : LLVMContext::MD_dereferenceable,
MDNode::get(jl_LLVMContext, { OP }));
if (align >= 1) {
Metadata *OP = ConstantAsMetadata::get(ConstantInt::get(T_int64, align));
LI->setMetadata(LLVMContext::MD_align, MDNode::get(jl_LLVMContext, { OP }));
}
}
}
return LI;
}
static inline Instruction *maybe_mark_load_dereferenceable(Instruction *LI, bool can_be_null, jl_value_t *jt)
{
size_t size = dereferenceable_size(jt);
unsigned alignment = 1;
if (size > 0)
alignment = julia_alignment(jt);
return maybe_mark_load_dereferenceable(LI, can_be_null, size, alignment);
}
// Returns T_pjlvalue
static Value *literal_pointer_val(jl_codectx_t &ctx, jl_value_t *p)
{
if (p == NULL)
return V_null;
if (!imaging_mode)
return literal_static_pointer_val(p);
Value *pgv = literal_pointer_val_slot(ctx, p);
return tbaa_decorate(tbaa_const, maybe_mark_load_dereferenceable(
ctx.builder.CreateAlignedLoad(T_pjlvalue, pgv, Align(sizeof(void*))),
false, jl_typeof(p)));
}
// Returns T_pjlvalue
static Value *literal_pointer_val(jl_codectx_t &ctx, jl_binding_t *p)
{
// emit a pointer to any jl_value_t which will be valid across reloading code
if (p == NULL)
return V_null;
if (!imaging_mode)
return literal_static_pointer_val(p);
// bindings are prefixed with jl_bnd#
Value *pgv = julia_pgv(ctx, "jl_bnd#", p->name, p->owner, p);
return tbaa_decorate(tbaa_const, maybe_mark_load_dereferenceable(
ctx.builder.CreateAlignedLoad(T_pjlvalue, pgv, Align(sizeof(void*))),
false, sizeof(jl_binding_t), alignof(jl_binding_t)));
}
// bitcast a value, but preserve its address space when dealing with pointer types
static Value *emit_bitcast(jl_codectx_t &ctx, Value *v, Type *jl_value)
{
if (isa<PointerType>(jl_value) &&
v->getType()->getPointerAddressSpace() != jl_value->getPointerAddressSpace()) {
// Cast to the proper address space
Type *jl_value_addr =
PointerType::get(cast<PointerType>(jl_value)->getElementType(),
v->getType()->getPointerAddressSpace());
return ctx.builder.CreateBitCast(v, jl_value_addr);
}
else {
return ctx.builder.CreateBitCast(v, jl_value);
}
}
static Value *maybe_bitcast(jl_codectx_t &ctx, Value *V, Type *to) {
if (to != V->getType())
return emit_bitcast(ctx, V, to);
return V;
}
static Value *julia_binding_gv(jl_codectx_t &ctx, Value *bv)
{
Value *offset = ConstantInt::get(T_size, offsetof(jl_binding_t, value) / sizeof(size_t));
return ctx.builder.CreateInBoundsGEP(T_prjlvalue, bv, offset);
}
static Value *julia_binding_gv(jl_codectx_t &ctx, jl_binding_t *b)
{
// emit a literal_pointer_val to the value field of a jl_binding_t
// binding->value are prefixed with *
Value *bv;
if (imaging_mode)
bv = emit_bitcast(ctx,
tbaa_decorate(tbaa_const,
ctx.builder.CreateAlignedLoad(T_pjlvalue, julia_pgv(ctx, "*", b->name, b->owner, b), Align(sizeof(void*)))),
T_pprjlvalue);
else
bv = ConstantExpr::getBitCast(literal_static_pointer_val(b), T_pprjlvalue);
return julia_binding_gv(ctx, bv);
}
// --- mapping between julia and llvm types ---
static bool type_is_permalloc(jl_value_t *typ)
{
// Singleton should almost always be handled by the later optimization passes.
// Also do it here since it is cheap and save some effort in LLVM passes.
if (jl_is_datatype(typ) && jl_is_datatype_singleton((jl_datatype_t*)typ))
return true;
return typ == (jl_value_t*)jl_symbol_type ||
typ == (jl_value_t*)jl_int8_type ||
typ == (jl_value_t*)jl_uint8_type;
}
static unsigned convert_struct_offset(Type *lty, unsigned byte_offset)
{
const DataLayout &DL = jl_data_layout;
const StructLayout *SL = DL.getStructLayout(cast<StructType>(lty));
unsigned idx = SL->getElementContainingOffset(byte_offset);
assert(SL->getElementOffset(idx) == byte_offset);
return idx;
}
static unsigned convert_struct_offset(jl_codectx_t &ctx, Type *lty, unsigned byte_offset)
{
return convert_struct_offset(lty, byte_offset);
}
static Value *emit_struct_gep(jl_codectx_t &ctx, Type *lty, Value *base, unsigned byte_offset)
{
unsigned idx = convert_struct_offset(ctx, lty, byte_offset);
return ctx.builder.CreateConstInBoundsGEP2_32(lty, base, 0, idx);
}
static Type *_julia_struct_to_llvm(jl_codegen_params_t *ctx, jl_value_t *jt, bool *isboxed, bool llvmcall=false);
static Type *_julia_type_to_llvm(jl_codegen_params_t *ctx, jl_value_t *jt, bool *isboxed)
{
// this function converts a Julia Type into the equivalent LLVM type
if (isboxed) *isboxed = false;
if (jt == (jl_value_t*)jl_bottom_type)
return T_void;
if (jl_is_concrete_immutable(jt)) {
if (jl_datatype_nbits(jt) == 0)
return T_void;
Type *t = _julia_struct_to_llvm(ctx, jt, isboxed);
assert(t != NULL);
return t;
}
if (isboxed) *isboxed = true;
return T_prjlvalue;
}
static Type *julia_type_to_llvm(jl_codectx_t &ctx, jl_value_t *jt, bool *isboxed)
{
return _julia_type_to_llvm(&ctx.emission_context, jt, isboxed);
}
extern "C" JL_DLLEXPORT
Type *jl_type_to_llvm_impl(jl_value_t *jt, bool *isboxed)
{
return _julia_type_to_llvm(NULL, jt, isboxed);
}
// converts a julia bitstype into the equivalent LLVM bitstype
static Type *bitstype_to_llvm(jl_value_t *bt, bool llvmcall = false)
{
assert(jl_is_primitivetype(bt));
if (bt == (jl_value_t*)jl_bool_type)
return T_int8;
if (bt == (jl_value_t*)jl_int32_type)
return T_int32;
if (bt == (jl_value_t*)jl_int64_type)
return T_int64;
if (bt == (jl_value_t*)jl_float16_type)
return T_float16;
if (bt == (jl_value_t*)jl_float32_type)
return T_float32;
if (bt == (jl_value_t*)jl_float64_type)
return T_float64;
if (jl_is_llvmpointer_type(bt)) {
jl_value_t *as_param = jl_tparam1(bt);
int as;
if (jl_is_int32(as_param))
as = jl_unbox_int32(as_param);
else if (jl_is_int64(as_param))
as = jl_unbox_int64(as_param);
else
jl_error("invalid pointer address space");
return PointerType::get(T_int8, as);
}
int nb = jl_datatype_size(bt);
return Type::getIntNTy(jl_LLVMContext, nb * 8);
}
static bool jl_type_hasptr(jl_value_t* typ)
{ // assumes that jl_stored_inline(typ) is true (and therefore that layout is defined)
return jl_is_datatype(typ) && ((jl_datatype_t*)typ)->layout->npointers > 0;
}
static unsigned jl_field_align(jl_datatype_t *dt, size_t i)
{
unsigned al = jl_field_offset(dt, i);
al |= 16;
al &= -al;
return std::min({al, (unsigned)jl_datatype_align(dt), (unsigned)JL_HEAP_ALIGNMENT});
}
static Type *_julia_struct_to_llvm(jl_codegen_params_t *ctx, jl_value_t *jt, bool *isboxed, bool llvmcall)
{
// this function converts a Julia Type into the equivalent LLVM struct
// use this where C-compatible (unboxed) structs are desired
// use julia_type_to_llvm directly when you want to preserve Julia's type semantics
if (isboxed) *isboxed = false;
if (jt == (jl_value_t*)jl_bottom_type)
return T_void;
if (jl_is_primitivetype(jt))
return bitstype_to_llvm(jt, llvmcall);
jl_datatype_t *jst = (jl_datatype_t*)jt;
if (jl_is_structtype(jt) && !(jst->layout && jl_is_layout_opaque(jst->layout))) {
bool isTuple = jl_is_tuple_type(jt);
jl_svec_t *ftypes = jl_get_fieldtypes(jst);
size_t i, ntypes = jl_svec_len(ftypes);
if (!jl_struct_try_layout(jst))
return NULL; // caller should have checked jl_type_mappable_to_c already, but we'll be nice
if (ntypes == 0 || jl_datatype_nbits(jst) == 0)
return T_void;
Type *_struct_decl = NULL;
// TODO: we should probably make a temporary root for `jst` somewhere
// don't use pre-filled struct_decl for llvmcall (f16, etc. may be different)
Type *&struct_decl = (ctx && !llvmcall ? ctx->llvmtypes[jst] : _struct_decl);
if (struct_decl)
return struct_decl;
std::vector<Type*> latypes(0);
bool isarray = true;
bool isvector = true;
jl_value_t *jlasttype = NULL;
Type *lasttype = NULL;
bool allghost = true;
for (i = 0; i < ntypes; i++) {
jl_value_t *ty = jl_svecref(ftypes, i);
if (jlasttype != NULL && ty != jlasttype)
isvector = false;
jlasttype = ty;
if (jl_field_isatomic(jst, i)) {
// TODO: eventually support this?
// though it's a bit unclear how the implicit load should be interpreted
return NULL;
}
Type *lty;
if (jl_field_isptr(jst, i)) {
lty = T_prjlvalue;
isvector = false;
}
else if (ty == (jl_value_t*)jl_bool_type) {
lty = T_int8;
}
else if (jl_is_uniontype(ty)) {
// pick an Integer type size such that alignment will generally be correct,
// and always end with an Int8 (selector byte).
// We may need to insert padding first to get to the right offset
size_t fsz = 0, al = 0;
bool isptr = !jl_islayout_inline(ty, &fsz, &al);
assert(!isptr && fsz == jl_field_size(jst, i) - 1); (void)isptr;
if (fsz > 0) {
if (al > MAX_ALIGN) {
Type *AlignmentType;
AlignmentType = ArrayType::get(FixedVectorType::get(T_int8, al), 0);
latypes.push_back(AlignmentType);
al = MAX_ALIGN;
}
Type *AlignmentType = IntegerType::get(jl_LLVMContext, 8 * al);
unsigned NumATy = fsz / al;
unsigned remainder = fsz % al;
assert(al == 1 || NumATy > 0);
while (NumATy--)
latypes.push_back(AlignmentType);
while (remainder--)
latypes.push_back(T_int8);
}
latypes.push_back(T_int8);
isarray = false;
allghost = false;
continue;
}
else {
bool isptr;
lty = _julia_struct_to_llvm(ctx, ty, &isptr, llvmcall);
assert(lty && !isptr);
}
if (lasttype != NULL && lasttype != lty)
isarray = false;
lasttype = lty;
if (!type_is_ghost(lty)) {
allghost = false;
latypes.push_back(lty);
}
}
if (allghost) {
assert(jst->layout == NULL); // otherwise should have been caught above
struct_decl = T_void;
}
else if (jl_is_vecelement_type(jt) && !jl_is_uniontype(jl_svecref(ftypes, 0))) {
// VecElement type is unwrapped in LLVM (when possible)
struct_decl = latypes[0];
}
else if (isarray && !type_is_ghost(lasttype)) {
if (isTuple && isvector && jl_special_vector_alignment(ntypes, jlasttype) != 0)
struct_decl = FixedVectorType::get(lasttype, ntypes);
else if (isTuple || !llvmcall)
struct_decl = ArrayType::get(lasttype, ntypes);
else
struct_decl = StructType::get(jl_LLVMContext, latypes);
}
else {
#if 0 // stress-test code that tries to assume julia-index == llvm-index
// (also requires change to emit_new_struct to not assume 0 == 0)
if (!isTuple && latypes.size() > 1) {
Type *NoopType = ArrayType::get(T_int1, 0);
latypes.insert(latypes.begin(), NoopType);
}
#endif
struct_decl = StructType::get(jl_LLVMContext, latypes);
}
return struct_decl;
}
// TODO: enable this (with tests) to change ccall calling convention for Union:
// if (jl_is_uniontype(ty)) {
// // pick an Integer type size such that alignment will be correct
// // and always end with an Int8 (selector byte)
// lty = ArrayType::get(IntegerType::get(jl_LLVMContext, 8 * al), fsz / al);
// std::vector<Type*> Elements(2);
// Elements[0] = lty;
// Elements[1] = T_int8;
// unsigned remainder = fsz % al;
// while (remainder--)
// Elements.push_back(T_int8);
// lty = StructType::get(jl_LLVMContext, makeArrayRef(Elements));
// }
if (isboxed) *isboxed = true;
return T_prjlvalue;
}
static Type *julia_struct_to_llvm(jl_codectx_t &ctx, jl_value_t *jt, bool *isboxed)
{
return _julia_struct_to_llvm(&ctx.emission_context, jt, isboxed);
}
static bool is_datatype_all_pointers(jl_datatype_t *dt)
{
size_t i, l = jl_datatype_nfields(dt);
for (i = 0; i < l; i++) {
if (!jl_field_isptr(dt, i)) {
return false;
}
}
return true;
}
static bool is_tupletype_homogeneous(jl_svec_t *t, bool allow_va = false)
{
size_t i, l = jl_svec_len(t);
if (l > 0) {
jl_value_t *t0 = jl_svecref(t, 0);
if (!jl_is_concrete_type(t0)) {
if (allow_va && jl_is_vararg(t0) &&
jl_is_concrete_type(jl_unwrap_vararg(t0)))
return true;
return false;
}
for (i = 1; i < l; i++) {
if (allow_va && i == l - 1 && jl_is_vararg(jl_svecref(t, i))) {
if (t0 != jl_unwrap_vararg(jl_svecref(t, i)))
return false;
continue;
}
if (t0 != jl_svecref(t, i))
return false;
}
}
return true;
}
static bool for_each_uniontype_small(
std::function<void(unsigned, jl_datatype_t*)> f,
jl_value_t *ty,
unsigned &counter)
{
if (counter > 127)
return false;
if (jl_is_uniontype(ty)) {
bool allunbox = for_each_uniontype_small(f, ((jl_uniontype_t*)ty)->a, counter);
allunbox &= for_each_uniontype_small(f, ((jl_uniontype_t*)ty)->b, counter);
return allunbox;
}
else if (jl_is_pointerfree(ty)) {
f(++counter, (jl_datatype_t*)ty);
return true;
}
return false;
}
static bool is_uniontype_allunboxed(jl_value_t *typ)
{
unsigned counter = 0;
return for_each_uniontype_small([&](unsigned, jl_datatype_t*) {}, typ, counter);
}
static Value *emit_typeof_boxed(jl_codectx_t &ctx, const jl_cgval_t &p);
static unsigned get_box_tindex(jl_datatype_t *jt, jl_value_t *ut)
{
unsigned new_idx = 0;
unsigned new_counter = 0;
for_each_uniontype_small(
// find the corresponding index in the new union-type
[&](unsigned new_idx_, jl_datatype_t *new_jt) {
if (jt == new_jt)
new_idx = new_idx_;
},
ut,
new_counter);
return new_idx;
}
// --- generating various field accessors ---
static Value *emit_nthptr_addr(jl_codectx_t &ctx, Value *v, ssize_t n, bool gctracked = true)
{
return ctx.builder.CreateInBoundsGEP(
T_prjlvalue,
emit_bitcast(ctx, maybe_decay_tracked(ctx, v), T_pprjlvalue),
ConstantInt::get(T_size, n));
}
static Value *emit_nthptr_addr(jl_codectx_t &ctx, Value *v, Value *idx)
{
return ctx.builder.CreateInBoundsGEP(
T_prjlvalue,
emit_bitcast(ctx, maybe_decay_tracked(ctx, v), T_pprjlvalue),
idx);
}
static LoadInst *emit_nthptr_recast(jl_codectx_t &ctx, Value *v, Value *idx, MDNode *tbaa, Type *ptype)
{
// p = (jl_value_t**)v; *(ptype)&p[n]
Value *vptr = emit_nthptr_addr(ctx, v, idx);
return cast<LoadInst>(tbaa_decorate(tbaa, ctx.builder.CreateLoad(emit_bitcast(ctx, vptr, ptype))));
}
static LoadInst *emit_nthptr_recast(jl_codectx_t &ctx, Value *v, ssize_t n, MDNode *tbaa, Type *ptype)
{
// p = (jl_value_t**)v; *(ptype)&p[n]
Value *vptr = emit_nthptr_addr(ctx, v, n);
return cast<LoadInst>(tbaa_decorate(tbaa, ctx.builder.CreateLoad(emit_bitcast(ctx, vptr, ptype))));
}
static Value *boxed(jl_codectx_t &ctx, const jl_cgval_t &v);
// Returns T_prjlvalue
static Value *emit_typeof(jl_codectx_t &ctx, Value *tt)
{
assert(tt != NULL && !isa<AllocaInst>(tt) && "expected a conditionally boxed value");
return ctx.builder.CreateCall(prepare_call(jl_typeof_func), {tt});
}
static jl_cgval_t emit_typeof(jl_codectx_t &ctx, const jl_cgval_t &p)
{
// given p, compute its type
if (p.constant)
return mark_julia_const(jl_typeof(p.constant));
if (p.isboxed && !jl_is_concrete_type(p.typ)) {
if (jl_is_type_type(p.typ)) {
jl_value_t *tp = jl_tparam0(p.typ);
if (!jl_is_type(tp) || jl_is_concrete_type(tp)) {
// convert 1::Type{1} ==> typeof(1) ==> Int
return mark_julia_const(jl_typeof(tp));
}
}
return mark_julia_type(ctx, emit_typeof(ctx, p.V), true, jl_datatype_type);
}
if (p.TIndex) {
Value *tindex = ctx.builder.CreateAnd(p.TIndex, ConstantInt::get(T_int8, 0x7f));
bool allunboxed = is_uniontype_allunboxed(p.typ);
Value *datatype_or_p = imaging_mode ? Constant::getNullValue(T_ppjlvalue) : V_rnull;
unsigned counter = 0;
for_each_uniontype_small(
[&](unsigned idx, jl_datatype_t *jt) {
Value *cmp = ctx.builder.CreateICmpEQ(tindex, ConstantInt::get(T_int8, idx));
Value *ptr;
if (imaging_mode) {
ptr = literal_pointer_val_slot(ctx, (jl_value_t*)jt);
}
else {
ptr = track_pjlvalue(ctx, literal_pointer_val(ctx, (jl_value_t*)jt));
}
datatype_or_p = ctx.builder.CreateSelect(cmp, ptr, datatype_or_p);
},
p.typ,
counter);
auto emit_unboxty = [&] () -> Value* {
if (imaging_mode)
return track_pjlvalue(
ctx, tbaa_decorate(tbaa_const, ctx.builder.CreateAlignedLoad(T_pjlvalue, datatype_or_p, Align(sizeof(void*)))));
return datatype_or_p;
};
Value *res;
if (!allunboxed) {
Value *isnull = ctx.builder.CreateIsNull(datatype_or_p);
BasicBlock *boxBB = BasicBlock::Create(jl_LLVMContext, "boxed", ctx.f);
BasicBlock *unboxBB = BasicBlock::Create(jl_LLVMContext, "unboxed", ctx.f);
BasicBlock *mergeBB = BasicBlock::Create(jl_LLVMContext, "merge", ctx.f);
ctx.builder.CreateCondBr(isnull, boxBB, unboxBB);
ctx.builder.SetInsertPoint(boxBB);
auto boxTy = emit_typeof(ctx, p.Vboxed);
ctx.builder.CreateBr(mergeBB);
boxBB = ctx.builder.GetInsertBlock(); // could have changed
ctx.builder.SetInsertPoint(unboxBB);
auto unboxTy = emit_unboxty();
ctx.builder.CreateBr(mergeBB);
unboxBB = ctx.builder.GetInsertBlock(); // could have changed
ctx.builder.SetInsertPoint(mergeBB);
auto phi = ctx.builder.CreatePHI(T_prjlvalue, 2);
phi->addIncoming(boxTy, boxBB);
phi->addIncoming(unboxTy, unboxBB);
res = phi;
}
else {
res = emit_unboxty();
}
return mark_julia_type(ctx, res, true, jl_datatype_type);
}
return mark_julia_const(p.typ);
}
// Returns T_prjlvalue
static Value *emit_typeof_boxed(jl_codectx_t &ctx, const jl_cgval_t &p)
{
return boxed(ctx, emit_typeof(ctx, p));
}
static Value *emit_datatype_types(jl_codectx_t &ctx, Value *dt)
{
Value *Ptr = emit_bitcast(ctx, decay_derived(ctx, dt), T_ppjlvalue);
Value *Idx = ConstantInt::get(T_size, offsetof(jl_datatype_t, types) / sizeof(void*));
return tbaa_decorate(tbaa_const, ctx.builder.CreateAlignedLoad(
T_pjlvalue, ctx.builder.CreateInBoundsGEP(T_pjlvalue, Ptr, Idx), Align(sizeof(void*))));
}
static Value *emit_datatype_nfields(jl_codectx_t &ctx, Value *dt)
{
Value *type_svec = emit_bitcast(ctx, emit_datatype_types(ctx, dt), T_psize);
return tbaa_decorate(tbaa_const, ctx.builder.CreateAlignedLoad(T_size, type_svec, Align(sizeof(void*))));
}
static Value *emit_datatype_size(jl_codectx_t &ctx, Value *dt)
{
Value *Ptr = emit_bitcast(ctx, decay_derived(ctx, dt), T_pint32);
Value *Idx = ConstantInt::get(T_size, offsetof(jl_datatype_t, size) / sizeof(int));
return tbaa_decorate(tbaa_const, ctx.builder.CreateAlignedLoad(T_int32, ctx.builder.CreateInBoundsGEP(T_int32, Ptr, Idx), Align(sizeof(int32_t))));
}
/* this is valid code, it's simply unused
static Value *emit_sizeof(jl_codectx_t &ctx, const jl_cgval_t &p)
{
if (p.TIndex) {
Value *tindex = ctx.builder.CreateAnd(p.TIndex, ConstantInt::get(T_int8, 0x7f));
Value *size = ConstantInt::get(T_int32, -1);
unsigned counter = 0;
bool allunboxed = for_each_uniontype_small(
[&](unsigned idx, jl_datatype_t *jt) {
Value *cmp = ctx.builder.CreateICmpEQ(tindex, ConstantInt::get(T_int8, idx));
size = ctx.builder.CreateSelect(cmp, ConstantInt::get(T_int32, jl_datatype_size(jt)), size);
},
p.typ,
counter);
if (!allunboxed && p.ispointer() && p.V && !isa<AllocaInst>(p.V)) {
BasicBlock *currBB = ctx.builder.GetInsertBlock();
BasicBlock *dynloadBB = BasicBlock::Create(jl_LLVMContext, "dyn_sizeof", ctx.f);
BasicBlock *postBB = BasicBlock::Create(jl_LLVMContext, "post_sizeof", ctx.f);
Value *isboxed = ctx.builder.CreateICmpNE(
ctx.builder.CreateAnd(p.TIndex, ConstantInt::get(T_int8, 0x80)),
ConstantInt::get(T_int8, 0));
ctx.builder.CreateCondBr(isboxed, dynloadBB, postBB);
ctx.builder.SetInsertPoint(dynloadBB);
Value *datatype = emit_typeof(p.V);
Value *dyn_size = emit_datatype_size(ctx, datatype);
ctx.builder.CreateBr(postBB);
dynloadBB = ctx.builder.GetInsertBlock(); // could have changed
ctx.builder.SetInsertPoint(postBB);
PHINode *sizeof_merge = ctx.builder.CreatePHI(T_int32, 2);
sizeof_merge->addIncoming(dyn_size, dynloadBB);
sizeof_merge->addIncoming(size, currBB);
size = sizeof_merge;
}
#ifndef NDEBUG
// try to catch codegen errors early, before it uses this to memcpy over the entire stack
CreateConditionalAbort(ctx.builder, ctx.builder.CreateICmpEQ(size, ConstantInt::get(T_int32, -1)));
#endif
return size;
}
else if (jl_is_concrete_type(p.typ)) {
return ConstantInt::get(T_int32, jl_datatype_size(p.typ));
}
else {
Value *datatype = emit_typeof_boxed(ctx, p);
Value *dyn_size = emit_datatype_size(ctx, datatype);
return dyn_size;
}
}
*/
static Value *emit_datatype_mutabl(jl_codectx_t &ctx, Value *dt)
{
Value *Ptr = emit_bitcast(ctx, decay_derived(ctx, dt), T_ppint8);
Value *Idx = ConstantInt::get(T_size, offsetof(jl_datatype_t, name));
Value *Nam = tbaa_decorate(tbaa_const,
ctx.builder.CreateAlignedLoad(T_pint8, ctx.builder.CreateInBoundsGEP(T_pint8, Ptr, Idx), Align(sizeof(int8_t*))));
Value *Idx2 = ConstantInt::get(T_size, offsetof(jl_typename_t, n_uninitialized) + sizeof(((jl_typename_t*)nullptr)->n_uninitialized));
Value *mutabl = tbaa_decorate(tbaa_const,
ctx.builder.CreateAlignedLoad(T_int8, ctx.builder.CreateInBoundsGEP(T_int8, Nam, Idx2), Align(1)));
mutabl = ctx.builder.CreateLShr(mutabl, 1);
return ctx.builder.CreateTrunc(mutabl, T_int1);
}
static Value *emit_datatype_isprimitivetype(jl_codectx_t &ctx, Value *dt)
{
Value *immut = ctx.builder.CreateNot(emit_datatype_mutabl(ctx, dt));
Value *nofields = ctx.builder.CreateICmpEQ(emit_datatype_nfields(ctx, dt), V_size0);
Value *sized = ctx.builder.CreateICmpSGT(emit_datatype_size(ctx, dt), ConstantInt::get(T_int32, 0));
return ctx.builder.CreateAnd(immut, ctx.builder.CreateAnd(nofields, sized));
}
static Value *emit_datatype_name(jl_codectx_t &ctx, Value *dt)
{
Value *vptr = emit_nthptr_addr(ctx, dt, (ssize_t)(offsetof(jl_datatype_t, name) / sizeof(char*)));
return tbaa_decorate(tbaa_const, ctx.builder.CreateAlignedLoad(T_prjlvalue, vptr, Align(sizeof(void*))));
}
// --- generating various error checks ---
// Do not use conditional throw for cases that type inference can know
// the error is always thrown. This may cause non dominated use
// of SSA value error in the verifier.
static void just_emit_error(jl_codectx_t &ctx, Function *F, const std::string &txt)
{