forked from intel/linux-intel-lts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathskbuff.c
6956 lines (5889 loc) · 174 KB
/
skbuff.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Routines having to do with the 'struct sk_buff' memory handlers.
*
* Authors: Alan Cox <[email protected]>
* Florian La Roche <[email protected]>
*
* Fixes:
* Alan Cox : Fixed the worst of the load
* balancer bugs.
* Dave Platt : Interrupt stacking fix.
* Richard Kooijman : Timestamp fixes.
* Alan Cox : Changed buffer format.
* Alan Cox : destructor hook for AF_UNIX etc.
* Linus Torvalds : Better skb_clone.
* Alan Cox : Added skb_copy.
* Alan Cox : Added all the changed routines Linus
* only put in the headers
* Ray VanTassle : Fixed --skb->lock in free
* Alan Cox : skb_copy copy arp field
* Andi Kleen : slabified it.
* Robert Olsson : Removed skb_head_pool
*
* NOTE:
* The __skb_ routines should be called with interrupts
* disabled, or you better be *real* sure that the operation is atomic
* with respect to whatever list is being frobbed (e.g. via lock_sock()
* or via disabling bottom half handlers, etc).
*/
/*
* The functions in this file will not compile correctly with gcc 2.4.x
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NET_CLS_ACT
#include <net/pkt_sched.h>
#endif
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/splice.h>
#include <linux/cache.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <linux/bitfield.h>
#include <linux/if_vlan.h>
#include <linux/mpls.h>
#include <linux/kcov.h>
#include <net/protocol.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <net/gso.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/mpls.h>
#include <net/mptcp.h>
#include <net/mctp.h>
#include <net/page_pool/helpers.h>
#include <net/dropreason.h>
#include <linux/uaccess.h>
#include <trace/events/skb.h>
#include <linux/highmem.h>
#include <linux/capability.h>
#include <linux/user_namespace.h>
#include <linux/indirect_call_wrapper.h>
#include <linux/textsearch.h>
#include "dev.h"
#include "sock_destructor.h"
struct kmem_cache *skbuff_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif
static struct kmem_cache *skb_small_head_cache __ro_after_init;
#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
* This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
* size, and we can differentiate heads from skb_small_head_cache
* vs system slabs by looking at their size (skb_end_offset()).
*/
#define SKB_SMALL_HEAD_CACHE_SIZE \
(is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \
(SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \
SKB_SMALL_HEAD_SIZE)
#define SKB_SMALL_HEAD_HEADROOM \
SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
#undef FN
#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
static const char * const drop_reasons[] = {
[SKB_CONSUMED] = "CONSUMED",
DEFINE_DROP_REASON(FN, FN)
};
static const struct drop_reason_list drop_reasons_core = {
.reasons = drop_reasons,
.n_reasons = ARRAY_SIZE(drop_reasons),
};
const struct drop_reason_list __rcu *
drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = {
[SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core),
};
EXPORT_SYMBOL(drop_reasons_by_subsys);
/**
* drop_reasons_register_subsys - register another drop reason subsystem
* @subsys: the subsystem to register, must not be the core
* @list: the list of drop reasons within the subsystem, must point to
* a statically initialized list
*/
void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys,
const struct drop_reason_list *list)
{
if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
"invalid subsystem %d\n", subsys))
return;
/* must point to statically allocated memory, so INIT is OK */
RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list);
}
EXPORT_SYMBOL_GPL(drop_reasons_register_subsys);
/**
* drop_reasons_unregister_subsys - unregister a drop reason subsystem
* @subsys: the subsystem to remove, must not be the core
*
* Note: This will synchronize_rcu() to ensure no users when it returns.
*/
void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys)
{
if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
"invalid subsystem %d\n", subsys))
return;
RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys);
/**
* skb_panic - private function for out-of-line support
* @skb: buffer
* @sz: size
* @addr: address
* @msg: skb_over_panic or skb_under_panic
*
* Out-of-line support for skb_put() and skb_push().
* Called via the wrapper skb_over_panic() or skb_under_panic().
* Keep out of line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always reliable.
*/
static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
const char msg[])
{
pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
msg, addr, skb->len, sz, skb->head, skb->data,
(unsigned long)skb->tail, (unsigned long)skb->end,
skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
{
skb_panic(skb, sz, addr, __func__);
}
static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
{
skb_panic(skb, sz, addr, __func__);
}
#define NAPI_SKB_CACHE_SIZE 64
#define NAPI_SKB_CACHE_BULK 16
#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
#if PAGE_SIZE == SZ_4K
#define NAPI_HAS_SMALL_PAGE_FRAG 1
#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
/* specialized page frag allocator using a single order 0 page
* and slicing it into 1K sized fragment. Constrained to systems
* with a very limited amount of 1K fragments fitting a single
* page - to avoid excessive truesize underestimation
*/
struct page_frag_1k {
void *va;
u16 offset;
bool pfmemalloc;
};
static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp)
{
struct page *page;
int offset;
offset = nc->offset - SZ_1K;
if (likely(offset >= 0))
goto use_frag;
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
if (!page)
return NULL;
nc->va = page_address(page);
nc->pfmemalloc = page_is_pfmemalloc(page);
offset = PAGE_SIZE - SZ_1K;
page_ref_add(page, offset / SZ_1K);
use_frag:
nc->offset = offset;
return nc->va + offset;
}
#else
/* the small page is actually unused in this build; add dummy helpers
* to please the compiler and avoid later preprocessor's conditionals
*/
#define NAPI_HAS_SMALL_PAGE_FRAG 0
#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
struct page_frag_1k {
};
static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
{
return NULL;
}
#endif
struct napi_alloc_cache {
struct page_frag_cache page;
struct page_frag_1k page_small;
unsigned int skb_count;
void *skb_cache[NAPI_SKB_CACHE_SIZE];
};
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
/* Double check that napi_get_frags() allocates skbs with
* skb->head being backed by slab, not a page fragment.
* This is to make sure bug fixed in 3226b158e67c
* ("net: avoid 32 x truesize under-estimation for tiny skbs")
* does not accidentally come back.
*/
void napi_get_frags_check(struct napi_struct *napi)
{
struct sk_buff *skb;
local_bh_disable();
skb = napi_get_frags(napi);
WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
napi_free_frags(napi);
local_bh_enable();
}
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
fragsz = SKB_DATA_ALIGN(fragsz);
return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
void *data;
fragsz = SKB_DATA_ALIGN(fragsz);
if (in_hardirq() || irqs_disabled()) {
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
} else {
struct napi_alloc_cache *nc;
local_bh_disable();
nc = this_cpu_ptr(&napi_alloc_cache);
data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
local_bh_enable();
}
return data;
}
EXPORT_SYMBOL(__netdev_alloc_frag_align);
static struct sk_buff *napi_skb_cache_get(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
struct sk_buff *skb;
if (unlikely(!nc->skb_count)) {
nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
GFP_ATOMIC,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
if (unlikely(!nc->skb_count))
return NULL;
}
skb = nc->skb_cache[--nc->skb_count];
kasan_unpoison_object_data(skbuff_cache, skb);
return skb;
}
static inline void __finalize_skb_around(struct sk_buff *skb, void *data,
unsigned int size)
{
struct skb_shared_info *shinfo;
size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Assumes caller memset cleared SKB */
skb->truesize = SKB_TRUESIZE(size);
refcount_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
skb_set_end_offset(skb, size);
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
skb->alloc_cpu = raw_smp_processor_id();
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
skb_set_kcov_handle(skb, kcov_common_handle());
}
static inline void *__slab_build_skb(struct sk_buff *skb, void *data,
unsigned int *size)
{
void *resized;
/* Must find the allocation size (and grow it to match). */
*size = ksize(data);
/* krealloc() will immediately return "data" when
* "ksize(data)" is requested: it is the existing upper
* bounds. As a result, GFP_ATOMIC will be ignored. Note
* that this "new" pointer needs to be passed back to the
* caller for use so the __alloc_size hinting will be
* tracked correctly.
*/
resized = krealloc(data, *size, GFP_ATOMIC);
WARN_ON_ONCE(resized != data);
return resized;
}
/* build_skb() variant which can operate on slab buffers.
* Note that this should be used sparingly as slab buffers
* cannot be combined efficiently by GRO!
*/
struct sk_buff *slab_build_skb(void *data)
{
struct sk_buff *skb;
unsigned int size;
skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
data = __slab_build_skb(skb, data, &size);
__finalize_skb_around(skb, data, size);
return skb;
}
EXPORT_SYMBOL(slab_build_skb);
/* Caller must provide SKB that is memset cleared */
static void __build_skb_around(struct sk_buff *skb, void *data,
unsigned int frag_size)
{
unsigned int size = frag_size;
/* frag_size == 0 is considered deprecated now. Callers
* using slab buffer should use slab_build_skb() instead.
*/
if (WARN_ONCE(size == 0, "Use slab_build_skb() instead"))
data = __slab_build_skb(skb, data, &size);
__finalize_skb_around(skb, data, size);
}
/**
* __build_skb - build a network buffer
* @data: data buffer provided by caller
* @frag_size: size of data (must not be 0)
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated from the page
* allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc()
* allocation is deprecated, and callers should use slab_build_skb()
* instead.)
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
* Before IO, driver allocates only data buffer where NIC put incoming frame
* Driver should add room at head (NET_SKB_PAD) and
* MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
* After IO, driver calls build_skb(), to allocate sk_buff and populate it
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
__build_skb_around(skb, data, frag_size);
return skb;
}
/* build_skb() is wrapper over __build_skb(), that specifically
* takes care of skb->head and skb->pfmemalloc
*/
struct sk_buff *build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb = __build_skb(data, frag_size);
if (likely(skb && frag_size)) {
skb->head_frag = 1;
skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
}
return skb;
}
EXPORT_SYMBOL(build_skb);
/**
* build_skb_around - build a network buffer around provided skb
* @skb: sk_buff provide by caller, must be memset cleared
* @data: data buffer provided by caller
* @frag_size: size of data
*/
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size)
{
if (unlikely(!skb))
return NULL;
__build_skb_around(skb, data, frag_size);
if (frag_size) {
skb->head_frag = 1;
skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
}
return skb;
}
EXPORT_SYMBOL(build_skb_around);
/**
* __napi_build_skb - build a network buffer
* @data: data buffer provided by caller
* @frag_size: size of data
*
* Version of __build_skb() that uses NAPI percpu caches to obtain
* skbuff_head instead of inplace allocation.
*
* Returns a new &sk_buff on success, %NULL on allocation failure.
*/
static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
skb = napi_skb_cache_get();
if (unlikely(!skb))
return NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
__build_skb_around(skb, data, frag_size);
return skb;
}
/**
* napi_build_skb - build a network buffer
* @data: data buffer provided by caller
* @frag_size: size of data
*
* Version of __napi_build_skb() that takes care of skb->head_frag
* and skb->pfmemalloc when the data is a page or page fragment.
*
* Returns a new &sk_buff on success, %NULL on allocation failure.
*/
struct sk_buff *napi_build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb = __napi_build_skb(data, frag_size);
if (likely(skb) && frag_size) {
skb->head_frag = 1;
skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
}
return skb;
}
EXPORT_SYMBOL(napi_build_skb);
/*
* kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
* the caller if emergency pfmemalloc reserves are being used. If it is and
* the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
* may be used. Otherwise, the packet data may be discarded until enough
* memory is free
*/
static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
bool *pfmemalloc)
{
bool ret_pfmemalloc = false;
size_t obj_size;
void *obj;
obj_size = SKB_HEAD_ALIGN(*size);
if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
!(flags & KMALLOC_NOT_NORMAL_BITS)) {
obj = kmem_cache_alloc_node(skb_small_head_cache,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
*size = SKB_SMALL_HEAD_CACHE_SIZE;
if (obj || !(gfp_pfmemalloc_allowed(flags)))
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
goto out;
}
obj_size = kmalloc_size_roundup(obj_size);
/* The following cast might truncate high-order bits of obj_size, this
* is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
*/
*size = (unsigned int)obj_size;
/*
* Try a regular allocation, when that fails and we're not entitled
* to the reserves, fail.
*/
obj = kmalloc_node_track_caller(obj_size,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
if (obj || !(gfp_pfmemalloc_allowed(flags)))
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
obj = kmalloc_node_track_caller(obj_size, flags, node);
out:
if (pfmemalloc)
*pfmemalloc = ret_pfmemalloc;
return obj;
}
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
* [BEEP] leaks.
*
*/
/**
* __alloc_skb - allocate a network buffer
* @size: size to allocate
* @gfp_mask: allocation mask
* @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
* instead of head cache and allocate a cloned (child) skb.
* If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
* allocations in case the data is required for writeback
* @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of at least size bytes. The object has a reference count
* of one. The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int flags, int node)
{
struct kmem_cache *cache;
struct sk_buff *skb;
bool pfmemalloc;
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
/* Get the HEAD */
if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
likely(node == NUMA_NO_NODE || node == numa_mem_id()))
skb = napi_skb_cache_get();
else
skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
if (unlikely(!skb))
return NULL;
prefetchw(skb);
/* We do our best to align skb_shared_info on a separate cache
* line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
if (unlikely(!data))
goto nodata;
/* kmalloc_size_roundup() might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
prefetchw(data + SKB_WITH_OVERHEAD(size));
/*
* Only clear those fields we need to clear, not those that we will
* actually initialise below. Hence, don't put any more fields after
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
__build_skb_around(skb, data, size);
skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) {
struct sk_buff_fclones *fclones;
fclones = container_of(skb, struct sk_buff_fclones, skb1);
skb->fclone = SKB_FCLONE_ORIG;
refcount_set(&fclones->fclone_ref, 1);
}
return skb;
nodata:
kmem_cache_free(cache, skb);
return NULL;
}
EXPORT_SYMBOL(__alloc_skb);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @len: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has NET_SKB_PAD headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
gfp_t gfp_mask)
{
struct page_frag_cache *nc;
struct sk_buff *skb;
bool pfmemalloc;
void *data;
len += NET_SKB_PAD;
/* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation.
*/
if (len <= SKB_WITH_OVERHEAD(1024) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
goto skb_fail;
goto skb_success;
}
len = SKB_HEAD_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
if (in_hardirq() || irqs_disabled()) {
nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = nc->pfmemalloc;
} else {
local_bh_disable();
nc = this_cpu_ptr(&napi_alloc_cache.page);
data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = nc->pfmemalloc;
local_bh_enable();
}
if (unlikely(!data))
return NULL;
skb = __build_skb(data, len);
if (unlikely(!skb)) {
skb_free_frag(data);
return NULL;
}
if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
skb_success:
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
skb_fail:
return skb;
}
EXPORT_SYMBOL(__netdev_alloc_skb);
/**
* __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
* @napi: napi instance this buffer was allocated for
* @len: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
*
* Allocate a new sk_buff for use in NAPI receive. This buffer will
* attempt to allocate the head from a special reserved region used
* only for NAPI Rx allocation. By doing this we can save several
* CPU cycles by avoiding having to disable and re-enable IRQs.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
struct napi_alloc_cache *nc;
struct sk_buff *skb;
bool pfmemalloc;
void *data;
DEBUG_NET_WARN_ON_ONCE(!in_softirq());
len += NET_SKB_PAD + NET_IP_ALIGN;
/* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation.
* When the small frag allocator is available, prefer it over kmalloc
* for small fragments
*/
if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
NUMA_NO_NODE);
if (!skb)
goto skb_fail;
goto skb_success;
}
nc = this_cpu_ptr(&napi_alloc_cache);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
/* we are artificially inflating the allocation size, but
* that is not as bad as it may look like, as:
* - 'len' less than GRO_MAX_HEAD makes little sense
* - On most systems, larger 'len' values lead to fragment
* size above 512 bytes
* - kmalloc would use the kmalloc-1k slab for such values
* - Builds with smaller GRO_MAX_HEAD will very likely do
* little networking, as that implies no WiFi and no
* tunnels support, and 32 bits arches.
*/
len = SZ_1K;
data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
} else {
len = SKB_HEAD_ALIGN(len);
data = page_frag_alloc(&nc->page, len, gfp_mask);
pfmemalloc = nc->page.pfmemalloc;
}
if (unlikely(!data))
return NULL;
skb = __napi_build_skb(data, len);
if (unlikely(!skb)) {
skb_free_frag(data);
return NULL;
}
if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
skb_success:
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb->dev = napi->dev;
skb_fail:
return skb;
}
EXPORT_SYMBOL(__napi_alloc_skb);
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize)
{
skb_fill_page_desc(skb, i, page, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
EXPORT_SYMBOL(skb_add_rx_frag);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb_frag_size_add(frag, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
EXPORT_SYMBOL(skb_coalesce_rx_frag);
static void skb_drop_list(struct sk_buff **listp)
{
kfree_skb_list(*listp);
*listp = NULL;
}
static inline void skb_drop_fraglist(struct sk_buff *skb)
{
skb_drop_list(&skb_shinfo(skb)->frag_list);
}
static void skb_clone_fraglist(struct sk_buff *skb)
{
struct sk_buff *list;
skb_walk_frags(skb, list)
skb_get(list);
}
#if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(struct page *page, bool napi_safe)
{
bool allow_direct = false;
struct page_pool *pp;
page = compound_head(page);
/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
* in order to preserve any existing bits, such as bit 0 for the
* head page of compound page and bit 1 for pfmemalloc page, so
* mask those bits for freeing side when doing below checking,
* and page_is_pfmemalloc() is checked in __page_pool_put_page()
* to avoid recycling the pfmemalloc page.
*/
if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
return false;
pp = page->pp;
/* Allow direct recycle if we have reasons to believe that we are
* in the same context as the consumer would run, so there's
* no possible race.
* __page_pool_put_page() makes sure we're not in hardirq context
* and interrupts are enabled prior to accessing the cache.
*/
if (napi_safe || in_softirq()) {
const struct napi_struct *napi = READ_ONCE(pp->p.napi);
allow_direct = napi &&
READ_ONCE(napi->list_owner) == smp_processor_id();
}
/* Driver set this to memory recycling info. Reset it on recycle.
* This will *not* work for NIC using a split-page memory model.
* The page will be returned to the pool here regardless of the
* 'flipped' fragment being in use or not.
*/
page_pool_put_full_page(pp, page, allow_direct);
return true;
}
EXPORT_SYMBOL(napi_pp_put_page);
#endif
static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
{
if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
return false;
return napi_pp_put_page(virt_to_page(data), napi_safe);
}
static void skb_kfree_head(void *head, unsigned int end_offset)
{
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
kmem_cache_free(skb_small_head_cache, head);
else
kfree(head);
}
static void skb_free_head(struct sk_buff *skb, bool napi_safe)
{
unsigned char *head = skb->head;
if (skb->head_frag) {
if (skb_pp_recycle(skb, head, napi_safe))
return;
skb_free_frag(head);
} else {
skb_kfree_head(head, skb_end_offset(skb));
}
}
static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
bool napi_safe)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
if (skb->cloned &&
atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
&shinfo->dataref))
goto exit;
if (skb_zcopy(skb)) {
bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS;
skb_zcopy_clear(skb, true);
if (skip_unref)
goto free_head;
}
for (i = 0; i < shinfo->nr_frags; i++)
napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe);
free_head:
if (shinfo->frag_list)
kfree_skb_list_reason(shinfo->frag_list, reason);
skb_free_head(skb, napi_safe);
exit:
/* When we clone an SKB we copy the reycling bit. The pp_recycle
* bit is only set on the head though, so in order to avoid races
* while trying to recycle fragments on __skb_frag_unref() we need
* to make one SKB responsible for triggering the recycle path.
* So disable the recycling bit if an SKB is cloned and we have
* additional references to the fragmented part of the SKB.
* Eventually the last SKB will have the recycling bit set and it's
* dataref set to 0, which will trigger the recycling
*/
skb->pp_recycle = 0;
}