forked from gcc-mirror/gcc
-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathasan.c
3528 lines (3075 loc) · 113 KB
/
asan.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* AddressSanitizer, a fast memory error detector.
Copyright (C) 2012-2017 Free Software Foundation, Inc.
Contributed by Kostya Serebryany <[email protected]>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "alloc-pool.h"
#include "tree-pass.h"
#include "memmodel.h"
#include "tm_p.h"
#include "ssa.h"
#include "stringpool.h"
#include "tree-ssanames.h"
#include "optabs.h"
#include "emit-rtl.h"
#include "cgraph.h"
#include "gimple-pretty-print.h"
#include "alias.h"
#include "fold-const.h"
#include "cfganal.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "varasm.h"
#include "stor-layout.h"
#include "tree-iterator.h"
#include "stringpool.h"
#include "attribs.h"
#include "asan.h"
#include "dojump.h"
#include "explow.h"
#include "expr.h"
#include "output.h"
#include "langhooks.h"
#include "cfgloop.h"
#include "gimple-builder.h"
#include "gimple-fold.h"
#include "ubsan.h"
#include "params.h"
#include "builtins.h"
#include "fnmatch.h"
#include "tree-inline.h"
/* AddressSanitizer finds out-of-bounds and use-after-free bugs
with <2x slowdown on average.
The tool consists of two parts:
instrumentation module (this file) and a run-time library.
The instrumentation module adds a run-time check before every memory insn.
For a 8- or 16- byte load accessing address X:
ShadowAddr = (X >> 3) + Offset
ShadowValue = *(char*)ShadowAddr; // *(short*) for 16-byte access.
if (ShadowValue)
__asan_report_load8(X);
For a load of N bytes (N=1, 2 or 4) from address X:
ShadowAddr = (X >> 3) + Offset
ShadowValue = *(char*)ShadowAddr;
if (ShadowValue)
if ((X & 7) + N - 1 > ShadowValue)
__asan_report_loadN(X);
Stores are instrumented similarly, but using __asan_report_storeN functions.
A call too __asan_init_vN() is inserted to the list of module CTORs.
N is the version number of the AddressSanitizer API. The changes between the
API versions are listed in libsanitizer/asan/asan_interface_internal.h.
The run-time library redefines malloc (so that redzone are inserted around
the allocated memory) and free (so that reuse of free-ed memory is delayed),
provides __asan_report* and __asan_init_vN functions.
Read more:
http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
The current implementation supports detection of out-of-bounds and
use-after-free in the heap, on the stack and for global variables.
[Protection of stack variables]
To understand how detection of out-of-bounds and use-after-free works
for stack variables, lets look at this example on x86_64 where the
stack grows downward:
int
foo ()
{
char a[23] = {0};
int b[2] = {0};
a[5] = 1;
b[1] = 2;
return a[5] + b[1];
}
For this function, the stack protected by asan will be organized as
follows, from the top of the stack to the bottom:
Slot 1/ [red zone of 32 bytes called 'RIGHT RedZone']
Slot 2/ [8 bytes of red zone, that adds up to the space of 'a' to make
the next slot be 32 bytes aligned; this one is called Partial
Redzone; this 32 bytes alignment is an asan constraint]
Slot 3/ [24 bytes for variable 'a']
Slot 4/ [red zone of 32 bytes called 'Middle RedZone']
Slot 5/ [24 bytes of Partial Red Zone (similar to slot 2]
Slot 6/ [8 bytes for variable 'b']
Slot 7/ [32 bytes of Red Zone at the bottom of the stack, called
'LEFT RedZone']
The 32 bytes of LEFT red zone at the bottom of the stack can be
decomposed as such:
1/ The first 8 bytes contain a magical asan number that is always
0x41B58AB3.
2/ The following 8 bytes contains a pointer to a string (to be
parsed at runtime by the runtime asan library), which format is
the following:
"<function-name> <space> <num-of-variables-on-the-stack>
(<32-bytes-aligned-offset-in-bytes-of-variable> <space>
<length-of-var-in-bytes> ){n} "
where '(...){n}' means the content inside the parenthesis occurs 'n'
times, with 'n' being the number of variables on the stack.
3/ The following 8 bytes contain the PC of the current function which
will be used by the run-time library to print an error message.
4/ The following 8 bytes are reserved for internal use by the run-time.
The shadow memory for that stack layout is going to look like this:
- content of shadow memory 8 bytes for slot 7: 0xF1F1F1F1.
The F1 byte pattern is a magic number called
ASAN_STACK_MAGIC_LEFT and is a way for the runtime to know that
the memory for that shadow byte is part of a the LEFT red zone
intended to seat at the bottom of the variables on the stack.
- content of shadow memory 8 bytes for slots 6 and 5:
0xF4F4F400. The F4 byte pattern is a magic number
called ASAN_STACK_MAGIC_PARTIAL. It flags the fact that the
memory region for this shadow byte is a PARTIAL red zone
intended to pad a variable A, so that the slot following
{A,padding} is 32 bytes aligned.
Note that the fact that the least significant byte of this
shadow memory content is 00 means that 8 bytes of its
corresponding memory (which corresponds to the memory of
variable 'b') is addressable.
- content of shadow memory 8 bytes for slot 4: 0xF2F2F2F2.
The F2 byte pattern is a magic number called
ASAN_STACK_MAGIC_MIDDLE. It flags the fact that the memory
region for this shadow byte is a MIDDLE red zone intended to
seat between two 32 aligned slots of {variable,padding}.
- content of shadow memory 8 bytes for slot 3 and 2:
0xF4000000. This represents is the concatenation of
variable 'a' and the partial red zone following it, like what we
had for variable 'b'. The least significant 3 bytes being 00
means that the 3 bytes of variable 'a' are addressable.
- content of shadow memory 8 bytes for slot 1: 0xF3F3F3F3.
The F3 byte pattern is a magic number called
ASAN_STACK_MAGIC_RIGHT. It flags the fact that the memory
region for this shadow byte is a RIGHT red zone intended to seat
at the top of the variables of the stack.
Note that the real variable layout is done in expand_used_vars in
cfgexpand.c. As far as Address Sanitizer is concerned, it lays out
stack variables as well as the different red zones, emits some
prologue code to populate the shadow memory as to poison (mark as
non-accessible) the regions of the red zones and mark the regions of
stack variables as accessible, and emit some epilogue code to
un-poison (mark as accessible) the regions of red zones right before
the function exits.
[Protection of global variables]
The basic idea is to insert a red zone between two global variables
and install a constructor function that calls the asan runtime to do
the populating of the relevant shadow memory regions at load time.
So the global variables are laid out as to insert a red zone between
them. The size of the red zones is so that each variable starts on a
32 bytes boundary.
Then a constructor function is installed so that, for each global
variable, it calls the runtime asan library function
__asan_register_globals_with an instance of this type:
struct __asan_global
{
// Address of the beginning of the global variable.
const void *__beg;
// Initial size of the global variable.
uptr __size;
// Size of the global variable + size of the red zone. This
// size is 32 bytes aligned.
uptr __size_with_redzone;
// Name of the global variable.
const void *__name;
// Name of the module where the global variable is declared.
const void *__module_name;
// 1 if it has dynamic initialization, 0 otherwise.
uptr __has_dynamic_init;
// A pointer to struct that contains source location, could be NULL.
__asan_global_source_location *__location;
}
A destructor function that calls the runtime asan library function
_asan_unregister_globals is also installed. */
static unsigned HOST_WIDE_INT asan_shadow_offset_value;
static bool asan_shadow_offset_computed;
static vec<char *> sanitized_sections;
static tree last_alloca_addr;
/* Set of variable declarations that are going to be guarded by
use-after-scope sanitizer. */
static hash_set<tree> *asan_handled_variables = NULL;
hash_set <tree> *asan_used_labels = NULL;
/* Sets shadow offset to value in string VAL. */
bool
set_asan_shadow_offset (const char *val)
{
char *endp;
errno = 0;
#ifdef HAVE_LONG_LONG
asan_shadow_offset_value = strtoull (val, &endp, 0);
#else
asan_shadow_offset_value = strtoul (val, &endp, 0);
#endif
if (!(*val != '\0' && *endp == '\0' && errno == 0))
return false;
asan_shadow_offset_computed = true;
return true;
}
/* Set list of user-defined sections that need to be sanitized. */
void
set_sanitized_sections (const char *sections)
{
char *pat;
unsigned i;
FOR_EACH_VEC_ELT (sanitized_sections, i, pat)
free (pat);
sanitized_sections.truncate (0);
for (const char *s = sections; *s; )
{
const char *end;
for (end = s; *end && *end != ','; ++end);
size_t len = end - s;
sanitized_sections.safe_push (xstrndup (s, len));
s = *end ? end + 1 : end;
}
}
bool
asan_mark_p (gimple *stmt, enum asan_mark_flags flag)
{
return (gimple_call_internal_p (stmt, IFN_ASAN_MARK)
&& tree_to_uhwi (gimple_call_arg (stmt, 0)) == flag);
}
bool
asan_sanitize_stack_p (void)
{
return (sanitize_flags_p (SANITIZE_ADDRESS) && ASAN_STACK);
}
bool
asan_sanitize_allocas_p (void)
{
return (asan_sanitize_stack_p () && ASAN_PROTECT_ALLOCAS);
}
/* Checks whether section SEC should be sanitized. */
static bool
section_sanitized_p (const char *sec)
{
char *pat;
unsigned i;
FOR_EACH_VEC_ELT (sanitized_sections, i, pat)
if (fnmatch (pat, sec, FNM_PERIOD) == 0)
return true;
return false;
}
/* Returns Asan shadow offset. */
static unsigned HOST_WIDE_INT
asan_shadow_offset ()
{
if (!asan_shadow_offset_computed)
{
asan_shadow_offset_computed = true;
asan_shadow_offset_value = targetm.asan_shadow_offset ();
}
return asan_shadow_offset_value;
}
alias_set_type asan_shadow_set = -1;
/* Pointer types to 1, 2 or 4 byte integers in shadow memory. A separate
alias set is used for all shadow memory accesses. */
static GTY(()) tree shadow_ptr_types[3];
/* Decl for __asan_option_detect_stack_use_after_return. */
static GTY(()) tree asan_detect_stack_use_after_return;
/* Hashtable support for memory references used by gimple
statements. */
/* This type represents a reference to a memory region. */
struct asan_mem_ref
{
/* The expression of the beginning of the memory region. */
tree start;
/* The size of the access. */
HOST_WIDE_INT access_size;
};
object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref");
/* Initializes an instance of asan_mem_ref. */
static void
asan_mem_ref_init (asan_mem_ref *ref, tree start, HOST_WIDE_INT access_size)
{
ref->start = start;
ref->access_size = access_size;
}
/* Allocates memory for an instance of asan_mem_ref into the memory
pool returned by asan_mem_ref_get_alloc_pool and initialize it.
START is the address of (or the expression pointing to) the
beginning of memory reference. ACCESS_SIZE is the size of the
access to the referenced memory. */
static asan_mem_ref*
asan_mem_ref_new (tree start, HOST_WIDE_INT access_size)
{
asan_mem_ref *ref = asan_mem_ref_pool.allocate ();
asan_mem_ref_init (ref, start, access_size);
return ref;
}
/* This builds and returns a pointer to the end of the memory region
that starts at START and of length LEN. */
tree
asan_mem_ref_get_end (tree start, tree len)
{
if (len == NULL_TREE || integer_zerop (len))
return start;
if (!ptrofftype_p (len))
len = convert_to_ptrofftype (len);
return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (start), start, len);
}
/* Return a tree expression that represents the end of the referenced
memory region. Beware that this function can actually build a new
tree expression. */
tree
asan_mem_ref_get_end (const asan_mem_ref *ref, tree len)
{
return asan_mem_ref_get_end (ref->start, len);
}
struct asan_mem_ref_hasher : nofree_ptr_hash <asan_mem_ref>
{
static inline hashval_t hash (const asan_mem_ref *);
static inline bool equal (const asan_mem_ref *, const asan_mem_ref *);
};
/* Hash a memory reference. */
inline hashval_t
asan_mem_ref_hasher::hash (const asan_mem_ref *mem_ref)
{
return iterative_hash_expr (mem_ref->start, 0);
}
/* Compare two memory references. We accept the length of either
memory references to be NULL_TREE. */
inline bool
asan_mem_ref_hasher::equal (const asan_mem_ref *m1,
const asan_mem_ref *m2)
{
return operand_equal_p (m1->start, m2->start, 0);
}
static hash_table<asan_mem_ref_hasher> *asan_mem_ref_ht;
/* Returns a reference to the hash table containing memory references.
This function ensures that the hash table is created. Note that
this hash table is updated by the function
update_mem_ref_hash_table. */
static hash_table<asan_mem_ref_hasher> *
get_mem_ref_hash_table ()
{
if (!asan_mem_ref_ht)
asan_mem_ref_ht = new hash_table<asan_mem_ref_hasher> (10);
return asan_mem_ref_ht;
}
/* Clear all entries from the memory references hash table. */
static void
empty_mem_ref_hash_table ()
{
if (asan_mem_ref_ht)
asan_mem_ref_ht->empty ();
}
/* Free the memory references hash table. */
static void
free_mem_ref_resources ()
{
delete asan_mem_ref_ht;
asan_mem_ref_ht = NULL;
asan_mem_ref_pool.release ();
}
/* Return true iff the memory reference REF has been instrumented. */
static bool
has_mem_ref_been_instrumented (tree ref, HOST_WIDE_INT access_size)
{
asan_mem_ref r;
asan_mem_ref_init (&r, ref, access_size);
asan_mem_ref *saved_ref = get_mem_ref_hash_table ()->find (&r);
return saved_ref && saved_ref->access_size >= access_size;
}
/* Return true iff the memory reference REF has been instrumented. */
static bool
has_mem_ref_been_instrumented (const asan_mem_ref *ref)
{
return has_mem_ref_been_instrumented (ref->start, ref->access_size);
}
/* Return true iff access to memory region starting at REF and of
length LEN has been instrumented. */
static bool
has_mem_ref_been_instrumented (const asan_mem_ref *ref, tree len)
{
HOST_WIDE_INT size_in_bytes
= tree_fits_shwi_p (len) ? tree_to_shwi (len) : -1;
return size_in_bytes != -1
&& has_mem_ref_been_instrumented (ref->start, size_in_bytes);
}
/* Set REF to the memory reference present in a gimple assignment
ASSIGNMENT. Return true upon successful completion, false
otherwise. */
static bool
get_mem_ref_of_assignment (const gassign *assignment,
asan_mem_ref *ref,
bool *ref_is_store)
{
gcc_assert (gimple_assign_single_p (assignment));
if (gimple_store_p (assignment)
&& !gimple_clobber_p (assignment))
{
ref->start = gimple_assign_lhs (assignment);
*ref_is_store = true;
}
else if (gimple_assign_load_p (assignment))
{
ref->start = gimple_assign_rhs1 (assignment);
*ref_is_store = false;
}
else
return false;
ref->access_size = int_size_in_bytes (TREE_TYPE (ref->start));
return true;
}
/* Return address of last allocated dynamic alloca. */
static tree
get_last_alloca_addr ()
{
if (last_alloca_addr)
return last_alloca_addr;
last_alloca_addr = create_tmp_reg (ptr_type_node, "last_alloca_addr");
gassign *g = gimple_build_assign (last_alloca_addr, null_pointer_node);
edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gsi_insert_on_edge_immediate (e, g);
return last_alloca_addr;
}
/* Insert __asan_allocas_unpoison (top, bottom) call after
__builtin_stack_restore (new_sp) call.
The pseudocode of this routine should look like this:
__builtin_stack_restore (new_sp);
top = last_alloca_addr;
bot = new_sp;
__asan_allocas_unpoison (top, bot);
last_alloca_addr = new_sp;
In general, we can't use new_sp as bot parameter because on some
architectures SP has non zero offset from dynamic stack area. Moreover, on
some architectures this offset (STACK_DYNAMIC_OFFSET) becomes known for each
particular function only after all callees were expanded to rtl.
The most noticeable example is PowerPC{,64}, see
http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.html#DYNAM-STACK.
To overcome the issue we use following trick: pass new_sp as a second
parameter to __asan_allocas_unpoison and rewrite it during expansion with
virtual_dynamic_stack_rtx later in expand_asan_emit_allocas_unpoison
function.
*/
static void
handle_builtin_stack_restore (gcall *call, gimple_stmt_iterator *iter)
{
if (!iter || !asan_sanitize_allocas_p ())
return;
tree last_alloca = get_last_alloca_addr ();
tree restored_stack = gimple_call_arg (call, 0);
tree fn = builtin_decl_implicit (BUILT_IN_ASAN_ALLOCAS_UNPOISON);
gimple *g = gimple_build_call (fn, 2, last_alloca, restored_stack);
gsi_insert_after (iter, g, GSI_NEW_STMT);
g = gimple_build_assign (last_alloca, restored_stack);
gsi_insert_after (iter, g, GSI_NEW_STMT);
}
/* Deploy and poison redzones around __builtin_alloca call. To do this, we
should replace this call with another one with changed parameters and
replace all its uses with new address, so
addr = __builtin_alloca (old_size, align);
is replaced by
left_redzone_size = max (align, ASAN_RED_ZONE_SIZE);
Following two statements are optimized out if we know that
old_size & (ASAN_RED_ZONE_SIZE - 1) == 0, i.e. alloca doesn't need partial
redzone.
misalign = old_size & (ASAN_RED_ZONE_SIZE - 1);
partial_redzone_size = ASAN_RED_ZONE_SIZE - misalign;
right_redzone_size = ASAN_RED_ZONE_SIZE;
additional_size = left_redzone_size + partial_redzone_size +
right_redzone_size;
new_size = old_size + additional_size;
new_alloca = __builtin_alloca (new_size, max (align, 32))
__asan_alloca_poison (new_alloca, old_size)
addr = new_alloca + max (align, ASAN_RED_ZONE_SIZE);
last_alloca_addr = new_alloca;
ADDITIONAL_SIZE is added to make new memory allocation contain not only
requested memory, but also left, partial and right redzones as well as some
additional space, required by alignment. */
static void
handle_builtin_alloca (gcall *call, gimple_stmt_iterator *iter)
{
if (!iter || !asan_sanitize_allocas_p ())
return;
gassign *g;
gcall *gg;
const HOST_WIDE_INT redzone_mask = ASAN_RED_ZONE_SIZE - 1;
tree last_alloca = get_last_alloca_addr ();
tree callee = gimple_call_fndecl (call);
tree old_size = gimple_call_arg (call, 0);
tree ptr_type = gimple_call_lhs (call) ? TREE_TYPE (gimple_call_lhs (call))
: ptr_type_node;
tree partial_size = NULL_TREE;
unsigned int align
= DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
? 0 : tree_to_uhwi (gimple_call_arg (call, 1));
/* If ALIGN > ASAN_RED_ZONE_SIZE, we embed left redzone into first ALIGN
bytes of allocated space. Otherwise, align alloca to ASAN_RED_ZONE_SIZE
manually. */
align = MAX (align, ASAN_RED_ZONE_SIZE * BITS_PER_UNIT);
tree alloca_rz_mask = build_int_cst (size_type_node, redzone_mask);
tree redzone_size = build_int_cst (size_type_node, ASAN_RED_ZONE_SIZE);
/* Extract lower bits from old_size. */
wide_int size_nonzero_bits = get_nonzero_bits (old_size);
wide_int rz_mask
= wi::uhwi (redzone_mask, wi::get_precision (size_nonzero_bits));
wide_int old_size_lower_bits = wi::bit_and (size_nonzero_bits, rz_mask);
/* If alloca size is aligned to ASAN_RED_ZONE_SIZE, we don't need partial
redzone. Otherwise, compute its size here. */
if (wi::ne_p (old_size_lower_bits, 0))
{
/* misalign = size & (ASAN_RED_ZONE_SIZE - 1)
partial_size = ASAN_RED_ZONE_SIZE - misalign. */
g = gimple_build_assign (make_ssa_name (size_type_node, NULL),
BIT_AND_EXPR, old_size, alloca_rz_mask);
gsi_insert_before (iter, g, GSI_SAME_STMT);
tree misalign = gimple_assign_lhs (g);
g = gimple_build_assign (make_ssa_name (size_type_node, NULL), MINUS_EXPR,
redzone_size, misalign);
gsi_insert_before (iter, g, GSI_SAME_STMT);
partial_size = gimple_assign_lhs (g);
}
/* additional_size = align + ASAN_RED_ZONE_SIZE. */
tree additional_size = build_int_cst (size_type_node, align / BITS_PER_UNIT
+ ASAN_RED_ZONE_SIZE);
/* If alloca has partial redzone, include it to additional_size too. */
if (partial_size)
{
/* additional_size += partial_size. */
g = gimple_build_assign (make_ssa_name (size_type_node), PLUS_EXPR,
partial_size, additional_size);
gsi_insert_before (iter, g, GSI_SAME_STMT);
additional_size = gimple_assign_lhs (g);
}
/* new_size = old_size + additional_size. */
g = gimple_build_assign (make_ssa_name (size_type_node), PLUS_EXPR, old_size,
additional_size);
gsi_insert_before (iter, g, GSI_SAME_STMT);
tree new_size = gimple_assign_lhs (g);
/* Build new __builtin_alloca call:
new_alloca_with_rz = __builtin_alloca (new_size, align). */
tree fn = builtin_decl_implicit (BUILT_IN_ALLOCA_WITH_ALIGN);
gg = gimple_build_call (fn, 2, new_size,
build_int_cst (size_type_node, align));
tree new_alloca_with_rz = make_ssa_name (ptr_type, gg);
gimple_call_set_lhs (gg, new_alloca_with_rz);
gsi_insert_before (iter, gg, GSI_SAME_STMT);
/* new_alloca = new_alloca_with_rz + align. */
g = gimple_build_assign (make_ssa_name (ptr_type), POINTER_PLUS_EXPR,
new_alloca_with_rz,
build_int_cst (size_type_node,
align / BITS_PER_UNIT));
gsi_insert_before (iter, g, GSI_SAME_STMT);
tree new_alloca = gimple_assign_lhs (g);
/* Poison newly created alloca redzones:
__asan_alloca_poison (new_alloca, old_size). */
fn = builtin_decl_implicit (BUILT_IN_ASAN_ALLOCA_POISON);
gg = gimple_build_call (fn, 2, new_alloca, old_size);
gsi_insert_before (iter, gg, GSI_SAME_STMT);
/* Save new_alloca_with_rz value into last_alloca to use it during
allocas unpoisoning. */
g = gimple_build_assign (last_alloca, new_alloca_with_rz);
gsi_insert_before (iter, g, GSI_SAME_STMT);
/* Finally, replace old alloca ptr with NEW_ALLOCA. */
replace_call_with_value (iter, new_alloca);
}
/* Return the memory references contained in a gimple statement
representing a builtin call that has to do with memory access. */
static bool
get_mem_refs_of_builtin_call (gcall *call,
asan_mem_ref *src0,
tree *src0_len,
bool *src0_is_store,
asan_mem_ref *src1,
tree *src1_len,
bool *src1_is_store,
asan_mem_ref *dst,
tree *dst_len,
bool *dst_is_store,
bool *dest_is_deref,
bool *intercepted_p,
gimple_stmt_iterator *iter = NULL)
{
gcc_checking_assert (gimple_call_builtin_p (call, BUILT_IN_NORMAL));
tree callee = gimple_call_fndecl (call);
tree source0 = NULL_TREE, source1 = NULL_TREE,
dest = NULL_TREE, len = NULL_TREE;
bool is_store = true, got_reference_p = false;
HOST_WIDE_INT access_size = 1;
*intercepted_p = asan_intercepted_p ((DECL_FUNCTION_CODE (callee)));
switch (DECL_FUNCTION_CODE (callee))
{
/* (s, s, n) style memops. */
case BUILT_IN_BCMP:
case BUILT_IN_MEMCMP:
source0 = gimple_call_arg (call, 0);
source1 = gimple_call_arg (call, 1);
len = gimple_call_arg (call, 2);
break;
/* (src, dest, n) style memops. */
case BUILT_IN_BCOPY:
source0 = gimple_call_arg (call, 0);
dest = gimple_call_arg (call, 1);
len = gimple_call_arg (call, 2);
break;
/* (dest, src, n) style memops. */
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMCPY_CHK:
case BUILT_IN_MEMMOVE:
case BUILT_IN_MEMMOVE_CHK:
case BUILT_IN_MEMPCPY:
case BUILT_IN_MEMPCPY_CHK:
dest = gimple_call_arg (call, 0);
source0 = gimple_call_arg (call, 1);
len = gimple_call_arg (call, 2);
break;
/* (dest, n) style memops. */
case BUILT_IN_BZERO:
dest = gimple_call_arg (call, 0);
len = gimple_call_arg (call, 1);
break;
/* (dest, x, n) style memops*/
case BUILT_IN_MEMSET:
case BUILT_IN_MEMSET_CHK:
dest = gimple_call_arg (call, 0);
len = gimple_call_arg (call, 2);
break;
case BUILT_IN_STRLEN:
source0 = gimple_call_arg (call, 0);
len = gimple_call_lhs (call);
break;
case BUILT_IN_STACK_RESTORE:
handle_builtin_stack_restore (call, iter);
break;
CASE_BUILT_IN_ALLOCA:
handle_builtin_alloca (call, iter);
break;
/* And now the __atomic* and __sync builtins.
These are handled differently from the classical memory memory
access builtins above. */
case BUILT_IN_ATOMIC_LOAD_1:
is_store = false;
/* FALLTHRU */
case BUILT_IN_SYNC_FETCH_AND_ADD_1:
case BUILT_IN_SYNC_FETCH_AND_SUB_1:
case BUILT_IN_SYNC_FETCH_AND_OR_1:
case BUILT_IN_SYNC_FETCH_AND_AND_1:
case BUILT_IN_SYNC_FETCH_AND_XOR_1:
case BUILT_IN_SYNC_FETCH_AND_NAND_1:
case BUILT_IN_SYNC_ADD_AND_FETCH_1:
case BUILT_IN_SYNC_SUB_AND_FETCH_1:
case BUILT_IN_SYNC_OR_AND_FETCH_1:
case BUILT_IN_SYNC_AND_AND_FETCH_1:
case BUILT_IN_SYNC_XOR_AND_FETCH_1:
case BUILT_IN_SYNC_NAND_AND_FETCH_1:
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1:
case BUILT_IN_SYNC_LOCK_TEST_AND_SET_1:
case BUILT_IN_SYNC_LOCK_RELEASE_1:
case BUILT_IN_ATOMIC_EXCHANGE_1:
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
case BUILT_IN_ATOMIC_STORE_1:
case BUILT_IN_ATOMIC_ADD_FETCH_1:
case BUILT_IN_ATOMIC_SUB_FETCH_1:
case BUILT_IN_ATOMIC_AND_FETCH_1:
case BUILT_IN_ATOMIC_NAND_FETCH_1:
case BUILT_IN_ATOMIC_XOR_FETCH_1:
case BUILT_IN_ATOMIC_OR_FETCH_1:
case BUILT_IN_ATOMIC_FETCH_ADD_1:
case BUILT_IN_ATOMIC_FETCH_SUB_1:
case BUILT_IN_ATOMIC_FETCH_AND_1:
case BUILT_IN_ATOMIC_FETCH_NAND_1:
case BUILT_IN_ATOMIC_FETCH_XOR_1:
case BUILT_IN_ATOMIC_FETCH_OR_1:
access_size = 1;
goto do_atomic;
case BUILT_IN_ATOMIC_LOAD_2:
is_store = false;
/* FALLTHRU */
case BUILT_IN_SYNC_FETCH_AND_ADD_2:
case BUILT_IN_SYNC_FETCH_AND_SUB_2:
case BUILT_IN_SYNC_FETCH_AND_OR_2:
case BUILT_IN_SYNC_FETCH_AND_AND_2:
case BUILT_IN_SYNC_FETCH_AND_XOR_2:
case BUILT_IN_SYNC_FETCH_AND_NAND_2:
case BUILT_IN_SYNC_ADD_AND_FETCH_2:
case BUILT_IN_SYNC_SUB_AND_FETCH_2:
case BUILT_IN_SYNC_OR_AND_FETCH_2:
case BUILT_IN_SYNC_AND_AND_FETCH_2:
case BUILT_IN_SYNC_XOR_AND_FETCH_2:
case BUILT_IN_SYNC_NAND_AND_FETCH_2:
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2:
case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2:
case BUILT_IN_SYNC_LOCK_RELEASE_2:
case BUILT_IN_ATOMIC_EXCHANGE_2:
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
case BUILT_IN_ATOMIC_STORE_2:
case BUILT_IN_ATOMIC_ADD_FETCH_2:
case BUILT_IN_ATOMIC_SUB_FETCH_2:
case BUILT_IN_ATOMIC_AND_FETCH_2:
case BUILT_IN_ATOMIC_NAND_FETCH_2:
case BUILT_IN_ATOMIC_XOR_FETCH_2:
case BUILT_IN_ATOMIC_OR_FETCH_2:
case BUILT_IN_ATOMIC_FETCH_ADD_2:
case BUILT_IN_ATOMIC_FETCH_SUB_2:
case BUILT_IN_ATOMIC_FETCH_AND_2:
case BUILT_IN_ATOMIC_FETCH_NAND_2:
case BUILT_IN_ATOMIC_FETCH_XOR_2:
case BUILT_IN_ATOMIC_FETCH_OR_2:
access_size = 2;
goto do_atomic;
case BUILT_IN_ATOMIC_LOAD_4:
is_store = false;
/* FALLTHRU */
case BUILT_IN_SYNC_FETCH_AND_ADD_4:
case BUILT_IN_SYNC_FETCH_AND_SUB_4:
case BUILT_IN_SYNC_FETCH_AND_OR_4:
case BUILT_IN_SYNC_FETCH_AND_AND_4:
case BUILT_IN_SYNC_FETCH_AND_XOR_4:
case BUILT_IN_SYNC_FETCH_AND_NAND_4:
case BUILT_IN_SYNC_ADD_AND_FETCH_4:
case BUILT_IN_SYNC_SUB_AND_FETCH_4:
case BUILT_IN_SYNC_OR_AND_FETCH_4:
case BUILT_IN_SYNC_AND_AND_FETCH_4:
case BUILT_IN_SYNC_XOR_AND_FETCH_4:
case BUILT_IN_SYNC_NAND_AND_FETCH_4:
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4:
case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4:
case BUILT_IN_SYNC_LOCK_RELEASE_4:
case BUILT_IN_ATOMIC_EXCHANGE_4:
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
case BUILT_IN_ATOMIC_STORE_4:
case BUILT_IN_ATOMIC_ADD_FETCH_4:
case BUILT_IN_ATOMIC_SUB_FETCH_4:
case BUILT_IN_ATOMIC_AND_FETCH_4:
case BUILT_IN_ATOMIC_NAND_FETCH_4:
case BUILT_IN_ATOMIC_XOR_FETCH_4:
case BUILT_IN_ATOMIC_OR_FETCH_4:
case BUILT_IN_ATOMIC_FETCH_ADD_4:
case BUILT_IN_ATOMIC_FETCH_SUB_4:
case BUILT_IN_ATOMIC_FETCH_AND_4:
case BUILT_IN_ATOMIC_FETCH_NAND_4:
case BUILT_IN_ATOMIC_FETCH_XOR_4:
case BUILT_IN_ATOMIC_FETCH_OR_4:
access_size = 4;
goto do_atomic;
case BUILT_IN_ATOMIC_LOAD_8:
is_store = false;
/* FALLTHRU */
case BUILT_IN_SYNC_FETCH_AND_ADD_8:
case BUILT_IN_SYNC_FETCH_AND_SUB_8:
case BUILT_IN_SYNC_FETCH_AND_OR_8:
case BUILT_IN_SYNC_FETCH_AND_AND_8:
case BUILT_IN_SYNC_FETCH_AND_XOR_8:
case BUILT_IN_SYNC_FETCH_AND_NAND_8:
case BUILT_IN_SYNC_ADD_AND_FETCH_8:
case BUILT_IN_SYNC_SUB_AND_FETCH_8:
case BUILT_IN_SYNC_OR_AND_FETCH_8:
case BUILT_IN_SYNC_AND_AND_FETCH_8:
case BUILT_IN_SYNC_XOR_AND_FETCH_8:
case BUILT_IN_SYNC_NAND_AND_FETCH_8:
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8:
case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8:
case BUILT_IN_SYNC_LOCK_RELEASE_8:
case BUILT_IN_ATOMIC_EXCHANGE_8:
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
case BUILT_IN_ATOMIC_STORE_8:
case BUILT_IN_ATOMIC_ADD_FETCH_8:
case BUILT_IN_ATOMIC_SUB_FETCH_8:
case BUILT_IN_ATOMIC_AND_FETCH_8:
case BUILT_IN_ATOMIC_NAND_FETCH_8:
case BUILT_IN_ATOMIC_XOR_FETCH_8:
case BUILT_IN_ATOMIC_OR_FETCH_8:
case BUILT_IN_ATOMIC_FETCH_ADD_8:
case BUILT_IN_ATOMIC_FETCH_SUB_8:
case BUILT_IN_ATOMIC_FETCH_AND_8:
case BUILT_IN_ATOMIC_FETCH_NAND_8:
case BUILT_IN_ATOMIC_FETCH_XOR_8:
case BUILT_IN_ATOMIC_FETCH_OR_8:
access_size = 8;
goto do_atomic;
case BUILT_IN_ATOMIC_LOAD_16:
is_store = false;
/* FALLTHRU */
case BUILT_IN_SYNC_FETCH_AND_ADD_16:
case BUILT_IN_SYNC_FETCH_AND_SUB_16:
case BUILT_IN_SYNC_FETCH_AND_OR_16:
case BUILT_IN_SYNC_FETCH_AND_AND_16:
case BUILT_IN_SYNC_FETCH_AND_XOR_16:
case BUILT_IN_SYNC_FETCH_AND_NAND_16:
case BUILT_IN_SYNC_ADD_AND_FETCH_16:
case BUILT_IN_SYNC_SUB_AND_FETCH_16:
case BUILT_IN_SYNC_OR_AND_FETCH_16:
case BUILT_IN_SYNC_AND_AND_FETCH_16:
case BUILT_IN_SYNC_XOR_AND_FETCH_16:
case BUILT_IN_SYNC_NAND_AND_FETCH_16:
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16:
case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16:
case BUILT_IN_SYNC_LOCK_RELEASE_16:
case BUILT_IN_ATOMIC_EXCHANGE_16:
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
case BUILT_IN_ATOMIC_STORE_16:
case BUILT_IN_ATOMIC_ADD_FETCH_16:
case BUILT_IN_ATOMIC_SUB_FETCH_16:
case BUILT_IN_ATOMIC_AND_FETCH_16:
case BUILT_IN_ATOMIC_NAND_FETCH_16:
case BUILT_IN_ATOMIC_XOR_FETCH_16:
case BUILT_IN_ATOMIC_OR_FETCH_16:
case BUILT_IN_ATOMIC_FETCH_ADD_16:
case BUILT_IN_ATOMIC_FETCH_SUB_16:
case BUILT_IN_ATOMIC_FETCH_AND_16:
case BUILT_IN_ATOMIC_FETCH_NAND_16:
case BUILT_IN_ATOMIC_FETCH_XOR_16:
case BUILT_IN_ATOMIC_FETCH_OR_16:
access_size = 16;
/* FALLTHRU */
do_atomic:
{
dest = gimple_call_arg (call, 0);
/* DEST represents the address of a memory location.
instrument_derefs wants the memory location, so lets
dereference the address DEST before handing it to
instrument_derefs. */
tree type = build_nonstandard_integer_type (access_size
* BITS_PER_UNIT, 1);
dest = build2 (MEM_REF, type, dest,
build_int_cst (build_pointer_type (char_type_node), 0));
break;
}
default: