forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 1
/
verifier.c
10186 lines (9135 loc) · 290 KB
/
verifier.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
* Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
*/
#include <uapi/linux/btf.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <linux/perf_event.h>
#include <linux/ctype.h>
#include "disasm.h"
static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
[_id] = & _name ## _verifier_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
*
* The first pass is depth-first-search to check that the program is a DAG.
* It rejects the following programs:
* - larger than BPF_MAXINSNS insns
* - if loop is present (detected via back-edge)
* - unreachable insns exist (shouldn't be a forest. program = one function)
* - out of bounds or malformed jumps
* The second pass is all possible path descent from the 1st insn.
* Since it's analyzing all pathes through the program, the length of the
* analysis is limited to 64k insn, which may be hit even if total number of
* insn is less then 4K, but there are too many branches that change stack/regs.
* Number of 'branches to be analyzed' is limited to 1k
*
* On entry to each instruction, each register has a type, and the instruction
* changes the types of the registers depending on instruction semantics.
* If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
* copied to R1.
*
* All registers are 64-bit.
* R0 - return register
* R1-R5 argument passing registers
* R6-R9 callee saved registers
* R10 - frame pointer read-only
*
* At the start of BPF program the register R1 contains a pointer to bpf_context
* and has type PTR_TO_CTX.
*
* Verifier tracks arithmetic operations on pointers in case:
* BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
* 1st insn copies R10 (which has FRAME_PTR) type into R1
* and 2nd arithmetic instruction is pattern matched to recognize
* that it wants to construct a pointer to some element within stack.
* So after 2nd insn, the register R1 has type PTR_TO_STACK
* (and -20 constant is saved for further stack bounds checking).
* Meaning that this reg is a pointer to stack plus known immediate constant.
*
* Most of the time the registers have SCALAR_VALUE type, which
* means the register has some value, but it's not a valid pointer.
* (like pointer plus pointer becomes SCALAR_VALUE type)
*
* When verifier sees load or store instructions the type of base register
* can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
* four pointer types recognized by check_mem_access() function.
*
* PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
* and the range of [ptr, ptr + map's value_size) is accessible.
*
* registers used to pass values to function calls are checked against
* function argument constraints.
*
* ARG_PTR_TO_MAP_KEY is one of such argument constraints.
* It means that the register type passed to this function must be
* PTR_TO_STACK and it will be used inside the function as
* 'pointer to map element key'
*
* For example the argument constraints for bpf_map_lookup_elem():
* .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
* .arg1_type = ARG_CONST_MAP_PTR,
* .arg2_type = ARG_PTR_TO_MAP_KEY,
*
* ret_type says that this function returns 'pointer to map elem value or null'
* function expects 1st argument to be a const pointer to 'struct bpf_map' and
* 2nd argument should be a pointer to stack, which will be used inside
* the helper function as a pointer to map element key.
*
* On the kernel side the helper function looks like:
* u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
* {
* struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
* void *key = (void *) (unsigned long) r2;
* void *value;
*
* here kernel can access 'key' and 'map' pointers safely, knowing that
* [key, key + map->key_size) bytes are valid and were initialized on
* the stack of eBPF program.
* }
*
* Corresponding eBPF program may look like:
* BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
* BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
* BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
* here verifier looks at prototype of map_lookup_elem() and sees:
* .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
* Now verifier knows that this map has key of R1->map_ptr->key_size bytes
*
* Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
* Now verifier checks that [R2, R2 + map's key_size) are within stack limits
* and were initialized prior to this call.
* If it's ok, then verifier allows this BPF_CALL insn and looks at
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
* returns ether pointer to map value or NULL.
*
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
* insn, the register holding that pointer in the true branch changes state to
* PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
* branch. See check_cond_jmp_op().
*
* After the call R0 is set to return type of the function and registers R1-R5
* are set to NOT_INIT to indicate that they are no longer readable.
*
* The following reference types represent a potential reference to a kernel
* resource which, after first being allocated, must be checked and freed by
* the BPF program:
* - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
*
* When the verifier sees a helper call return a reference type, it allocates a
* pointer id for the reference and stores it in the current function state.
* Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
* PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
* passes through a NULL-check conditional. For the branch wherein the state is
* changed to CONST_IMM, the verifier releases the reference.
*
* For each helper function that allocates a reference, such as
* bpf_sk_lookup_tcp(), there is a corresponding release function, such as
* bpf_sk_release(). When a reference type passes into the release function,
* the verifier also releases the reference. If any unchecked or unreleased
* reference remains at the end of the program, the verifier rejects it.
*/
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
struct bpf_verifier_stack_elem {
/* verifer state is 'st'
* before processing instruction 'insn_idx'
* and after processing instruction 'prev_insn_idx'
*/
struct bpf_verifier_state st;
int insn_idx;
int prev_insn_idx;
struct bpf_verifier_stack_elem *next;
};
#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
#define BPF_COMPLEXITY_LIMIT_STATES 64
#define BPF_MAP_KEY_POISON (1ULL << 63)
#define BPF_MAP_KEY_SEEN (1ULL << 62)
#define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
POISON_POINTER_DELTA))
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{
return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
}
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
{
return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
}
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
const struct bpf_map *map, bool unpriv)
{
BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
unpriv |= bpf_map_ptr_unpriv(aux);
aux->map_ptr_state = (unsigned long)map |
(unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
}
static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
{
return aux->map_key_state & BPF_MAP_KEY_POISON;
}
static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
{
return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
}
static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
{
return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
}
static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
{
bool poisoned = bpf_map_key_poisoned(aux);
aux->map_key_state = state | BPF_MAP_KEY_SEEN |
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
}
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
bool pkt_access;
int regno;
int access_size;
s64 msize_smax_value;
u64 msize_umax_value;
int ref_obj_id;
int func_id;
u32 btf_id;
};
struct btf *btf_vmlinux;
static DEFINE_MUTEX(bpf_verifier_lock);
static const struct bpf_line_info *
find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
{
const struct bpf_line_info *linfo;
const struct bpf_prog *prog;
u32 i, nr_linfo;
prog = env->prog;
nr_linfo = prog->aux->nr_linfo;
if (!nr_linfo || insn_off >= prog->len)
return NULL;
linfo = prog->aux->linfo;
for (i = 1; i < nr_linfo; i++)
if (insn_off < linfo[i].insn_off)
break;
return &linfo[i - 1];
}
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
va_list args)
{
unsigned int n;
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
n = min(log->len_total - log->len_used - 1, n);
log->kbuf[n] = '\0';
if (log->level == BPF_LOG_KERNEL) {
pr_err("BPF:%s\n", log->kbuf);
return;
}
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
log->len_used += n;
else
log->ubuf = NULL;
}
/* log_level controls verbosity level of eBPF verifier.
* bpf_verifier_log_write() is used to dump the verification trace to the log,
* so the user can figure out what's wrong with the program
*/
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...)
{
va_list args;
if (!bpf_verifier_log_needed(&env->log))
return;
va_start(args, fmt);
bpf_verifier_vlog(&env->log, fmt, args);
va_end(args);
}
EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
{
struct bpf_verifier_env *env = private_data;
va_list args;
if (!bpf_verifier_log_needed(&env->log))
return;
va_start(args, fmt);
bpf_verifier_vlog(&env->log, fmt, args);
va_end(args);
}
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
const char *fmt, ...)
{
va_list args;
if (!bpf_verifier_log_needed(log))
return;
va_start(args, fmt);
bpf_verifier_vlog(log, fmt, args);
va_end(args);
}
static const char *ltrim(const char *s)
{
while (isspace(*s))
s++;
return s;
}
__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...)
{
const struct bpf_line_info *linfo;
if (!bpf_verifier_log_needed(&env->log))
return;
linfo = find_linfo(env, insn_off);
if (!linfo || linfo == env->prev_linfo)
return;
if (prefix_fmt) {
va_list args;
va_start(args, prefix_fmt);
bpf_verifier_vlog(&env->log, prefix_fmt, args);
va_end(args);
}
verbose(env, "%s\n",
ltrim(btf_name_by_offset(env->prog->aux->btf,
linfo->line_off)));
env->prev_linfo = linfo;
}
static bool type_is_pkt_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_PACKET ||
type == PTR_TO_PACKET_META;
}
static bool type_is_sk_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET ||
type == PTR_TO_SOCK_COMMON ||
type == PTR_TO_TCP_SOCK ||
type == PTR_TO_XDP_SOCK;
}
static bool reg_type_may_be_null(enum bpf_reg_type type)
{
return type == PTR_TO_MAP_VALUE_OR_NULL ||
type == PTR_TO_SOCKET_OR_NULL ||
type == PTR_TO_SOCK_COMMON_OR_NULL ||
type == PTR_TO_TCP_SOCK_OR_NULL;
}
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{
return reg->type == PTR_TO_MAP_VALUE &&
map_value_has_spin_lock(reg->map_ptr);
}
static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET ||
type == PTR_TO_SOCKET_OR_NULL ||
type == PTR_TO_TCP_SOCK ||
type == PTR_TO_TCP_SOCK_OR_NULL;
}
static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_SOCK_COMMON;
}
/* Determine whether the function releases some resources allocated by another
* function call. The first reference type argument will be assumed to be
* released by release_reference().
*/
static bool is_release_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_release;
}
static bool is_acquire_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_lookup_tcp ||
func_id == BPF_FUNC_sk_lookup_udp ||
func_id == BPF_FUNC_skc_lookup_tcp;
}
static bool is_ptr_cast_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_tcp_sock ||
func_id == BPF_FUNC_sk_fullsock;
}
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
[SCALAR_VALUE] = "inv",
[PTR_TO_CTX] = "ctx",
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
[PTR_TO_STACK] = "fp",
[PTR_TO_PACKET] = "pkt",
[PTR_TO_PACKET_META] = "pkt_meta",
[PTR_TO_PACKET_END] = "pkt_end",
[PTR_TO_FLOW_KEYS] = "flow_keys",
[PTR_TO_SOCKET] = "sock",
[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
[PTR_TO_SOCK_COMMON] = "sock_common",
[PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
[PTR_TO_TCP_SOCK] = "tcp_sock",
[PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
[PTR_TO_TP_BUFFER] = "tp_buffer",
[PTR_TO_XDP_SOCK] = "xdp_sock",
[PTR_TO_BTF_ID] = "ptr_",
};
static char slot_type_char[] = {
[STACK_INVALID] = '?',
[STACK_SPILL] = 'r',
[STACK_MISC] = 'm',
[STACK_ZERO] = '0',
};
static void print_liveness(struct bpf_verifier_env *env,
enum bpf_reg_liveness live)
{
if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
verbose(env, "_");
if (live & REG_LIVE_READ)
verbose(env, "r");
if (live & REG_LIVE_WRITTEN)
verbose(env, "w");
if (live & REG_LIVE_DONE)
verbose(env, "D");
}
static struct bpf_func_state *func(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg)
{
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[reg->frameno];
}
const char *kernel_type_name(u32 id)
{
return btf_name_by_offset(btf_vmlinux,
btf_type_by_id(btf_vmlinux, id)->name_off);
}
static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state)
{
const struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
if (state->frameno)
verbose(env, " frame%d:", state->frameno);
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
t = reg->type;
if (t == NOT_INIT)
continue;
verbose(env, " R%d", i);
print_liveness(env, reg->live);
verbose(env, "=%s", reg_type_str[t]);
if (t == SCALAR_VALUE && reg->precise)
verbose(env, "P");
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
} else {
if (t == PTR_TO_BTF_ID)
verbose(env, "%s", kernel_type_name(reg->btf_id));
verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t))
verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
if (t != SCALAR_VALUE)
verbose(env, ",off=%d", reg->off);
if (type_is_pkt_pointer(t))
verbose(env, ",r=%d", reg->range);
else if (t == CONST_PTR_TO_MAP ||
t == PTR_TO_MAP_VALUE ||
t == PTR_TO_MAP_VALUE_OR_NULL)
verbose(env, ",ks=%d,vs=%d",
reg->map_ptr->key_size,
reg->map_ptr->value_size);
if (tnum_is_const(reg->var_off)) {
/* Typically an immediate SCALAR_VALUE, but
* could be a pointer whose offset is too big
* for reg->off
*/
verbose(env, ",imm=%llx", reg->var_off.value);
} else {
if (reg->smin_value != reg->umin_value &&
reg->smin_value != S64_MIN)
verbose(env, ",smin_value=%lld",
(long long)reg->smin_value);
if (reg->smax_value != reg->umax_value &&
reg->smax_value != S64_MAX)
verbose(env, ",smax_value=%lld",
(long long)reg->smax_value);
if (reg->umin_value != 0)
verbose(env, ",umin_value=%llu",
(unsigned long long)reg->umin_value);
if (reg->umax_value != U64_MAX)
verbose(env, ",umax_value=%llu",
(unsigned long long)reg->umax_value);
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, ",var_off=%s", tn_buf);
}
}
verbose(env, ")");
}
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
char types_buf[BPF_REG_SIZE + 1];
bool valid = false;
int j;
for (j = 0; j < BPF_REG_SIZE; j++) {
if (state->stack[i].slot_type[j] != STACK_INVALID)
valid = true;
types_buf[j] = slot_type_char[
state->stack[i].slot_type[j]];
}
types_buf[BPF_REG_SIZE] = 0;
if (!valid)
continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live);
if (state->stack[i].slot_type[0] == STACK_SPILL) {
reg = &state->stack[i].spilled_ptr;
t = reg->type;
verbose(env, "=%s", reg_type_str[t]);
if (t == SCALAR_VALUE && reg->precise)
verbose(env, "P");
if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
verbose(env, "%lld", reg->var_off.value + reg->off);
} else {
verbose(env, "=%s", types_buf);
}
}
if (state->acquired_refs && state->refs[0].id) {
verbose(env, " refs=%d", state->refs[0].id);
for (i = 1; i < state->acquired_refs; i++)
if (state->refs[i].id)
verbose(env, ",%d", state->refs[i].id);
}
verbose(env, "\n");
}
#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
static int copy_##NAME##_state(struct bpf_func_state *dst, \
const struct bpf_func_state *src) \
{ \
if (!src->FIELD) \
return 0; \
if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
/* internal bug, make state invalid to reject the program */ \
memset(dst, 0, sizeof(*dst)); \
return -EFAULT; \
} \
memcpy(dst->FIELD, src->FIELD, \
sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
return 0; \
}
/* copy_reference_state() */
COPY_STATE_FN(reference, acquired_refs, refs, 1)
/* copy_stack_state() */
COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
#undef COPY_STATE_FN
#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
bool copy_old) \
{ \
u32 old_size = state->COUNT; \
struct bpf_##NAME##_state *new_##FIELD; \
int slot = size / SIZE; \
\
if (size <= old_size || !size) { \
if (copy_old) \
return 0; \
state->COUNT = slot * SIZE; \
if (!size && old_size) { \
kfree(state->FIELD); \
state->FIELD = NULL; \
} \
return 0; \
} \
new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
GFP_KERNEL); \
if (!new_##FIELD) \
return -ENOMEM; \
if (copy_old) { \
if (state->FIELD) \
memcpy(new_##FIELD, state->FIELD, \
sizeof(*new_##FIELD) * (old_size / SIZE)); \
memset(new_##FIELD + old_size / SIZE, 0, \
sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
} \
state->COUNT = slot * SIZE; \
kfree(state->FIELD); \
state->FIELD = new_##FIELD; \
return 0; \
}
/* realloc_reference_state() */
REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
/* realloc_stack_state() */
REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
#undef REALLOC_STATE_FN
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
* the program calls into realloc_func_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
* which realloc_stack_state() copies over. It points to previous
* bpf_verifier_state which is never reallocated.
*/
static int realloc_func_state(struct bpf_func_state *state, int stack_size,
int refs_size, bool copy_old)
{
int err = realloc_reference_state(state, refs_size, copy_old);
if (err)
return err;
return realloc_stack_state(state, stack_size, copy_old);
}
/* Acquire a pointer id from the env and update the state->refs to include
* this new pointer reference.
* On success, returns a valid pointer id to associate with the register
* On failure, returns a negative errno.
*/
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_func_state *state = cur_func(env);
int new_ofs = state->acquired_refs;
int id, err;
err = realloc_reference_state(state, state->acquired_refs + 1, true);
if (err)
return err;
id = ++env->id_gen;
state->refs[new_ofs].id = id;
state->refs[new_ofs].insn_idx = insn_idx;
return id;
}
/* release function corresponding to acquire_reference_state(). Idempotent. */
static int release_reference_state(struct bpf_func_state *state, int ptr_id)
{
int i, last_idx;
last_idx = state->acquired_refs - 1;
for (i = 0; i < state->acquired_refs; i++) {
if (state->refs[i].id == ptr_id) {
if (last_idx && i != last_idx)
memcpy(&state->refs[i], &state->refs[last_idx],
sizeof(*state->refs));
memset(&state->refs[last_idx], 0, sizeof(*state->refs));
state->acquired_refs--;
return 0;
}
}
return -EINVAL;
}
static int transfer_reference_state(struct bpf_func_state *dst,
struct bpf_func_state *src)
{
int err = realloc_reference_state(dst, src->acquired_refs, false);
if (err)
return err;
err = copy_reference_state(dst, src);
if (err)
return err;
return 0;
}
static void free_func_state(struct bpf_func_state *state)
{
if (!state)
return;
kfree(state->refs);
kfree(state->stack);
kfree(state);
}
static void clear_jmp_history(struct bpf_verifier_state *state)
{
kfree(state->jmp_history);
state->jmp_history = NULL;
state->jmp_history_cnt = 0;
}
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
int i;
for (i = 0; i <= state->curframe; i++) {
free_func_state(state->frame[i]);
state->frame[i] = NULL;
}
clear_jmp_history(state);
if (free_self)
kfree(state);
}
/* copy verifier state from src to dst growing dst stack space
* when necessary to accommodate larger src stack
*/
static int copy_func_state(struct bpf_func_state *dst,
const struct bpf_func_state *src)
{
int err;
err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
false);
if (err)
return err;
memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
err = copy_reference_state(dst, src);
if (err)
return err;
return copy_stack_state(dst, src);
}
static int copy_verifier_state(struct bpf_verifier_state *dst_state,
const struct bpf_verifier_state *src)
{
struct bpf_func_state *dst;
u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
int i, err;
if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
kfree(dst_state->jmp_history);
dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
if (!dst_state->jmp_history)
return -ENOMEM;
}
memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
dst_state->jmp_history_cnt = src->jmp_history_cnt;
/* if dst has more stack frames then src frame, free them */
for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
free_func_state(dst_state->frame[i]);
dst_state->frame[i] = NULL;
}
dst_state->speculative = src->speculative;
dst_state->curframe = src->curframe;
dst_state->active_spin_lock = src->active_spin_lock;
dst_state->branches = src->branches;
dst_state->parent = src->parent;
dst_state->first_insn_idx = src->first_insn_idx;
dst_state->last_insn_idx = src->last_insn_idx;
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst)
return -ENOMEM;
dst_state->frame[i] = dst;
}
err = copy_func_state(dst, src->frame[i]);
if (err)
return err;
}
return 0;
}
static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{
while (st) {
u32 br = --st->branches;
/* WARN_ON(br > 1) technically makes sense here,
* but see comment in push_stack(), hence:
*/
WARN_ONCE((int)br < 0,
"BUG update_branch_counts:branches_to_explore=%d\n",
br);
if (br)
break;
st = st->parent;
}
}
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem, *head = env->head;
int err;
if (env->head == NULL)
return -ENOENT;
if (cur) {
err = copy_verifier_state(cur, &head->st);
if (err)
return err;
}
if (insn_idx)
*insn_idx = head->insn_idx;
if (prev_insn_idx)
*prev_insn_idx = head->prev_insn_idx;
elem = head->next;
free_verifier_state(&head->st, false);
kfree(head);
env->head = elem;
env->stack_size--;
return 0;
}
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx,
bool speculative)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem;
int err;
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
if (!elem)
goto err;
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
elem->next = env->head;
env->head = elem;
env->stack_size++;
err = copy_verifier_state(&elem->st, cur);
if (err)
goto err;
elem->st.speculative |= speculative;
if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
verbose(env, "The sequence of %d jumps is too complex.\n",
env->stack_size);
goto err;
}
if (elem->st.parent) {
++elem->st.parent->branches;
/* WARN_ON(branches > 2) technically makes sense here,
* but
* 1. speculative states will bump 'branches' for non-branch
* instructions
* 2. is_state_visited() heuristics may decide not to create
* a new state for a sequence of branches and all such current
* and cloned states will be pointing to a single parent state
* which might have large 'branches' count.
*/
}
return &elem->st;
err:
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
/* pop all elements and return */
while (!pop_stack(env, NULL, NULL));
return NULL;
}
#define CALLER_SAVED_REGS 6
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
/* Mark the unknown part of a register (variable offset or scalar value) as
* known to have the value @imm.
*/
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
/* Clear id, off, and union(map_ptr, range) */
memset(((u8 *)reg) + sizeof(reg->type), 0,
offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
reg->var_off = tnum_const(imm);
reg->smin_value = (s64)imm;
reg->smax_value = (s64)imm;
reg->umin_value = imm;
reg->umax_value = imm;
}
/* Mark the 'variable offset' part of a register as zero. This should be
* used only on registers holding a pointer type.
*/
static void __mark_reg_known_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
}
static void __mark_reg_const_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
reg->type = SCALAR_VALUE;
}
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs */
for (regno = 0; regno < MAX_BPF_REG; regno++)
__mark_reg_not_init(env, regs + regno);
return;
}
__mark_reg_known_zero(regs + regno);
}
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
{
return type_is_pkt_pointer(reg->type);
}
static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
{
return reg_is_pkt_pointer(reg) ||
reg->type == PTR_TO_PACKET_END;
}
/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
enum bpf_reg_type which)
{
/* The register can already have a range from prior markings.
* This is fine as long as it hasn't been advanced from its
* origin.
*/
return reg->type == which &&
reg->id == 0 &&
reg->off == 0 &&
tnum_equals_const(reg->var_off, 0);
}
/* Attempts to improve min/max values based on var_off information */
static void __update_reg_bounds(struct bpf_reg_state *reg)
{
/* min signed is max(sign bit) | min(other bits) */
reg->smin_value = max_t(s64, reg->smin_value,
reg->var_off.value | (reg->var_off.mask & S64_MIN));
/* max signed is min(sign bit) | max(other bits) */
reg->smax_value = min_t(s64, reg->smax_value,
reg->var_off.value | (reg->var_off.mask & S64_MAX));
reg->umin_value = max(reg->umin_value, reg->var_off.value);
reg->umax_value = min(reg->umax_value,
reg->var_off.value | reg->var_off.mask);
}
/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{
/* Learn sign from signed bounds.
* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.