forked from openvswitch/ovs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dpif-netdev.c
7896 lines (6787 loc) · 250 KB
/
dpif-netdev.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2009-2014, 2016-2018 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <config.h>
#include "dpif-netdev.h"
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <net/if.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <unistd.h>
#include "bitmap.h"
#include "cmap.h"
#include "conntrack.h"
#include "coverage.h"
#include "ct-dpif.h"
#include "csum.h"
#include "dp-packet.h"
#include "dpif.h"
#include "dpif-netdev-perf.h"
#include "dpif-provider.h"
#include "dummy.h"
#include "fat-rwlock.h"
#include "flow.h"
#include "hmapx.h"
#include "id-pool.h"
#include "ipf.h"
#include "latch.h"
#include "netdev.h"
#include "netdev-provider.h"
#include "netdev-vport.h"
#include "netlink.h"
#include "odp-execute.h"
#include "odp-util.h"
#include "openvswitch/dynamic-string.h"
#include "openvswitch/list.h"
#include "openvswitch/match.h"
#include "openvswitch/ofp-parse.h"
#include "openvswitch/ofp-print.h"
#include "openvswitch/ofpbuf.h"
#include "openvswitch/shash.h"
#include "openvswitch/vlog.h"
#include "ovs-numa.h"
#include "ovs-rcu.h"
#include "packets.h"
#include "openvswitch/poll-loop.h"
#include "pvector.h"
#include "random.h"
#include "seq.h"
#include "smap.h"
#include "sset.h"
#include "timeval.h"
#include "tnl-neigh-cache.h"
#include "tnl-ports.h"
#include "unixctl.h"
#include "util.h"
#include "uuid.h"
VLOG_DEFINE_THIS_MODULE(dpif_netdev);
/* Auto Load Balancing Defaults */
#define ALB_ACCEPTABLE_IMPROVEMENT 25
#define ALB_PMD_LOAD_THRESHOLD 95
#define ALB_PMD_REBALANCE_POLL_INTERVAL 1 /* 1 Min */
#define MIN_TO_MSEC 60000
#define FLOW_DUMP_MAX_BATCH 50
/* Use per thread recirc_depth to prevent recirculation loop. */
#define MAX_RECIRC_DEPTH 6
DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
/* Use instant packet send by default. */
#define DEFAULT_TX_FLUSH_INTERVAL 0
/* Configuration parameters. */
enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
enum { MAX_METERS = 65536 }; /* Maximum number of meters. */
enum { MAX_BANDS = 8 }; /* Maximum number of bands / meter. */
enum { N_METER_LOCKS = 64 }; /* Maximum number of meters. */
/* Protects against changes to 'dp_netdevs'. */
static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
/* Contains all 'struct dp_netdev's. */
static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex)
= SHASH_INITIALIZER(&dp_netdevs);
static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600);
#define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
| CS_INVALID | CS_REPLY_DIR | CS_TRACKED \
| CS_SRC_NAT | CS_DST_NAT)
#define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
static struct odp_support dp_netdev_support = {
.max_vlan_headers = SIZE_MAX,
.max_mpls_depth = SIZE_MAX,
.recirc = true,
.ct_state = true,
.ct_zone = true,
.ct_mark = true,
.ct_label = true,
.ct_state_nat = true,
.ct_orig_tuple = true,
.ct_orig_tuple6 = true,
};
/* Stores a miniflow with inline values */
struct netdev_flow_key {
uint32_t hash; /* Hash function differs for different users. */
uint32_t len; /* Length of the following miniflow (incl. map). */
struct miniflow mf;
uint64_t buf[FLOW_MAX_PACKET_U64S];
};
/* EMC cache and SMC cache compose the datapath flow cache (DFC)
*
* Exact match cache for frequently used flows
*
* The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
* search its entries for a miniflow that matches exactly the miniflow of the
* packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
*
* A cache entry holds a reference to its 'dp_netdev_flow'.
*
* A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
* entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
* them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
* value is the index of a cache entry where the miniflow could be.
*
*
* Signature match cache (SMC)
*
* This cache stores a 16-bit signature for each flow without storing keys, and
* stores the corresponding 16-bit flow_table index to the 'dp_netdev_flow'.
* Each flow thus occupies 32bit which is much more memory efficient than EMC.
* SMC uses a set-associative design that each bucket contains
* SMC_ENTRY_PER_BUCKET number of entries.
* Since 16-bit flow_table index is used, if there are more than 2^16
* dp_netdev_flow, SMC will miss them that cannot be indexed by a 16-bit value.
*
*
* Thread-safety
* =============
*
* Each pmd_thread has its own private exact match cache.
* If dp_netdev_input is not called from a pmd thread, a mutex is used.
*/
#define EM_FLOW_HASH_SHIFT 13
#define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
#define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
#define EM_FLOW_HASH_SEGS 2
/* SMC uses a set-associative design. A bucket contains a set of entries that
* a flow item can occupy. For now, it uses one hash function rather than two
* as for the EMC design. */
#define SMC_ENTRY_PER_BUCKET 4
#define SMC_ENTRIES (1u << 20)
#define SMC_BUCKET_CNT (SMC_ENTRIES / SMC_ENTRY_PER_BUCKET)
#define SMC_MASK (SMC_BUCKET_CNT - 1)
/* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */
#define DEFAULT_EM_FLOW_INSERT_INV_PROB 100
#define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
DEFAULT_EM_FLOW_INSERT_INV_PROB)
struct emc_entry {
struct dp_netdev_flow *flow;
struct netdev_flow_key key; /* key.hash used for emc hash value. */
};
struct emc_cache {
struct emc_entry entries[EM_FLOW_HASH_ENTRIES];
int sweep_idx; /* For emc_cache_slow_sweep(). */
};
struct smc_bucket {
uint16_t sig[SMC_ENTRY_PER_BUCKET];
uint16_t flow_idx[SMC_ENTRY_PER_BUCKET];
};
/* Signature match cache, differentiate from EMC cache */
struct smc_cache {
struct smc_bucket buckets[SMC_BUCKET_CNT];
};
struct dfc_cache {
struct emc_cache emc_cache;
struct smc_cache smc_cache;
};
/* Iterate in the exact match cache through every entry that might contain a
* miniflow with hash 'HASH'. */
#define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
for (uint32_t i__ = 0, srch_hash__ = (HASH); \
(CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
i__ < EM_FLOW_HASH_SEGS; \
i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
/* Simple non-wildcarding single-priority classifier. */
/* Time in microseconds between successive optimizations of the dpcls
* subtable vector */
#define DPCLS_OPTIMIZATION_INTERVAL 1000000LL
/* Time in microseconds of the interval in which rxq processing cycles used
* in rxq to pmd assignments is measured and stored. */
#define PMD_RXQ_INTERVAL_LEN 10000000LL
/* Number of intervals for which cycles are stored
* and used during rxq to pmd assignment. */
#define PMD_RXQ_INTERVAL_MAX 6
struct dpcls {
struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */
odp_port_t in_port;
struct cmap subtables_map;
struct pvector subtables;
};
/* A rule to be inserted to the classifier. */
struct dpcls_rule {
struct cmap_node cmap_node; /* Within struct dpcls_subtable 'rules'. */
struct netdev_flow_key *mask; /* Subtable's mask. */
struct netdev_flow_key flow; /* Matching key. */
/* 'flow' must be the last field, additional space is allocated here. */
};
/* Data structure to keep packet order till fastpath processing. */
struct dp_packet_flow_map {
struct dp_packet *packet;
struct dp_netdev_flow *flow;
uint16_t tcp_flags;
};
static void dpcls_init(struct dpcls *);
static void dpcls_destroy(struct dpcls *);
static void dpcls_sort_subtable_vector(struct dpcls *);
static void dpcls_insert(struct dpcls *, struct dpcls_rule *,
const struct netdev_flow_key *mask);
static void dpcls_remove(struct dpcls *, struct dpcls_rule *);
static bool dpcls_lookup(struct dpcls *cls,
const struct netdev_flow_key *keys[],
struct dpcls_rule **rules, size_t cnt,
int *num_lookups_p);
static bool dpcls_rule_matches_key(const struct dpcls_rule *rule,
const struct netdev_flow_key *target);
/* Set of supported meter flags */
#define DP_SUPPORTED_METER_FLAGS_MASK \
(OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
/* Set of supported meter band types */
#define DP_SUPPORTED_METER_BAND_TYPES \
( 1 << OFPMBT13_DROP )
struct dp_meter_band {
struct ofputil_meter_band up; /* type, prec_level, pad, rate, burst_size */
uint32_t bucket; /* In 1/1000 packets (for PKTPS), or in bits (for KBPS) */
uint64_t packet_count;
uint64_t byte_count;
};
struct dp_meter {
uint16_t flags;
uint16_t n_bands;
uint32_t max_delta_t;
uint64_t used;
uint64_t packet_count;
uint64_t byte_count;
struct dp_meter_band bands[];
};
struct pmd_auto_lb {
bool auto_lb_requested; /* Auto load balancing requested by user. */
bool is_enabled; /* Current status of Auto load balancing. */
uint64_t rebalance_intvl;
uint64_t rebalance_poll_timer;
};
/* Datapath based on the network device interface from netdev.h.
*
*
* Thread-safety
* =============
*
* Some members, marked 'const', are immutable. Accessing other members
* requires synchronization, as noted in more detail below.
*
* Acquisition order is, from outermost to innermost:
*
* dp_netdev_mutex (global)
* port_mutex
* non_pmd_mutex
*/
struct dp_netdev {
const struct dpif_class *const class;
const char *const name;
struct dpif *dpif;
struct ovs_refcount ref_cnt;
atomic_flag destroyed;
/* Ports.
*
* Any lookup into 'ports' or any access to the dp_netdev_ports found
* through 'ports' requires taking 'port_mutex'. */
struct ovs_mutex port_mutex;
struct hmap ports;
struct seq *port_seq; /* Incremented whenever a port changes. */
/* The time that a packet can wait in output batch for sending. */
atomic_uint32_t tx_flush_interval;
/* Meters. */
struct ovs_mutex meter_locks[N_METER_LOCKS];
struct dp_meter *meters[MAX_METERS]; /* Meter bands. */
/* Probability of EMC insertions is a factor of 'emc_insert_min'.*/
OVS_ALIGNED_VAR(CACHE_LINE_SIZE) atomic_uint32_t emc_insert_min;
/* Enable collection of PMD performance metrics. */
atomic_bool pmd_perf_metrics;
/* Enable the SMC cache from ovsdb config */
atomic_bool smc_enable_db;
/* Protects access to ofproto-dpif-upcall interface during revalidator
* thread synchronization. */
struct fat_rwlock upcall_rwlock;
upcall_callback *upcall_cb; /* Callback function for executing upcalls. */
void *upcall_aux;
/* Callback function for notifying the purging of dp flows (during
* reseting pmd deletion). */
dp_purge_callback *dp_purge_cb;
void *dp_purge_aux;
/* Stores all 'struct dp_netdev_pmd_thread's. */
struct cmap poll_threads;
/* id pool for per thread static_tx_qid. */
struct id_pool *tx_qid_pool;
struct ovs_mutex tx_qid_pool_mutex;
/* Use measured cycles for rxq to pmd assignment. */
bool pmd_rxq_assign_cyc;
/* Protects the access of the 'struct dp_netdev_pmd_thread'
* instance for non-pmd thread. */
struct ovs_mutex non_pmd_mutex;
/* Each pmd thread will store its pointer to
* 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
ovsthread_key_t per_pmd_key;
struct seq *reconfigure_seq;
uint64_t last_reconfigure_seq;
/* Cpu mask for pin of pmd threads. */
char *pmd_cmask;
uint64_t last_tnl_conf_seq;
struct conntrack conntrack;
struct pmd_auto_lb pmd_alb;
};
static void meter_lock(const struct dp_netdev *dp, uint32_t meter_id)
OVS_ACQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS])
{
ovs_mutex_lock(&dp->meter_locks[meter_id % N_METER_LOCKS]);
}
static void meter_unlock(const struct dp_netdev *dp, uint32_t meter_id)
OVS_RELEASES(dp->meter_locks[meter_id % N_METER_LOCKS])
{
ovs_mutex_unlock(&dp->meter_locks[meter_id % N_METER_LOCKS]);
}
static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
odp_port_t)
OVS_REQUIRES(dp->port_mutex);
enum rxq_cycles_counter_type {
RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and
processing packets during the current
interval. */
RXQ_CYCLES_PROC_HIST, /* Total cycles of all intervals that are used
during rxq to pmd assignment. */
RXQ_N_CYCLES
};
enum {
DP_NETDEV_FLOW_OFFLOAD_OP_ADD,
DP_NETDEV_FLOW_OFFLOAD_OP_MOD,
DP_NETDEV_FLOW_OFFLOAD_OP_DEL,
};
struct dp_flow_offload_item {
struct dp_netdev_pmd_thread *pmd;
struct dp_netdev_flow *flow;
int op;
struct match match;
struct nlattr *actions;
size_t actions_len;
struct ovs_list node;
};
struct dp_flow_offload {
struct ovs_mutex mutex;
struct ovs_list list;
pthread_cond_t cond;
};
static struct dp_flow_offload dp_flow_offload = {
.mutex = OVS_MUTEX_INITIALIZER,
.list = OVS_LIST_INITIALIZER(&dp_flow_offload.list),
};
static struct ovsthread_once offload_thread_once
= OVSTHREAD_ONCE_INITIALIZER;
#define XPS_TIMEOUT 500000LL /* In microseconds. */
/* Contained by struct dp_netdev_port's 'rxqs' member. */
struct dp_netdev_rxq {
struct dp_netdev_port *port;
struct netdev_rxq *rx;
unsigned core_id; /* Core to which this queue should be
pinned. OVS_CORE_UNSPEC if the
queue doesn't need to be pinned to a
particular core. */
unsigned intrvl_idx; /* Write index for 'cycles_intrvl'. */
struct dp_netdev_pmd_thread *pmd; /* pmd thread that polls this queue. */
bool is_vhost; /* Is rxq of a vhost port. */
/* Counters of cycles spent successfully polling and processing pkts. */
atomic_ullong cycles[RXQ_N_CYCLES];
/* We store PMD_RXQ_INTERVAL_MAX intervals of data for an rxq and then
sum them to yield the cycles used for an rxq. */
atomic_ullong cycles_intrvl[PMD_RXQ_INTERVAL_MAX];
};
/* A port in a netdev-based datapath. */
struct dp_netdev_port {
odp_port_t port_no;
bool dynamic_txqs; /* If true XPS will be used. */
bool need_reconfigure; /* True if we should reconfigure netdev. */
struct netdev *netdev;
struct hmap_node node; /* Node in dp_netdev's 'ports'. */
struct netdev_saved_flags *sf;
struct dp_netdev_rxq *rxqs;
unsigned n_rxq; /* Number of elements in 'rxqs' */
unsigned *txq_used; /* Number of threads that use each tx queue. */
struct ovs_mutex txq_used_mutex;
bool emc_enabled; /* If true EMC will be used. */
char *type; /* Port type as requested by user. */
char *rxq_affinity_list; /* Requested affinity of rx queues. */
};
/* Contained by struct dp_netdev_flow's 'stats' member. */
struct dp_netdev_flow_stats {
atomic_llong used; /* Last used time, in monotonic msecs. */
atomic_ullong packet_count; /* Number of packets matched. */
atomic_ullong byte_count; /* Number of bytes matched. */
atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
};
/* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
*
*
* Thread-safety
* =============
*
* Except near the beginning or ending of its lifespan, rule 'rule' belongs to
* its pmd thread's classifier. The text below calls this classifier 'cls'.
*
* Motivation
* ----------
*
* The thread safety rules described here for "struct dp_netdev_flow" are
* motivated by two goals:
*
* - Prevent threads that read members of "struct dp_netdev_flow" from
* reading bad data due to changes by some thread concurrently modifying
* those members.
*
* - Prevent two threads making changes to members of a given "struct
* dp_netdev_flow" from interfering with each other.
*
*
* Rules
* -----
*
* A flow 'flow' may be accessed without a risk of being freed during an RCU
* grace period. Code that needs to hold onto a flow for a while
* should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
*
* 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
* flow from being deleted from 'cls' and it doesn't protect members of 'flow'
* from modification.
*
* Some members, marked 'const', are immutable. Accessing other members
* requires synchronization, as noted in more detail below.
*/
struct dp_netdev_flow {
const struct flow flow; /* Unmasked flow that created this entry. */
/* Hash table index by unmasked flow. */
const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */
/* 'flow_table'. */
const struct cmap_node mark_node; /* In owning flow_mark's mark_to_flow */
const ovs_u128 ufid; /* Unique flow identifier. */
const ovs_u128 mega_ufid; /* Unique mega flow identifier. */
const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */
/* flow. */
/* Number of references.
* The classifier owns one reference.
* Any thread trying to keep a rule from being freed should hold its own
* reference. */
struct ovs_refcount ref_cnt;
bool dead;
uint32_t mark; /* Unique flow mark assigned to a flow */
/* Statistics. */
struct dp_netdev_flow_stats stats;
/* Actions. */
OVSRCU_TYPE(struct dp_netdev_actions *) actions;
/* While processing a group of input packets, the datapath uses the next
* member to store a pointer to the output batch for the flow. It is
* reset after the batch has been sent out (See dp_netdev_queue_batches(),
* packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
struct packet_batch_per_flow *batch;
/* Packet classification. */
struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */
/* 'cr' must be the last member. */
};
static void dp_netdev_flow_unref(struct dp_netdev_flow *);
static bool dp_netdev_flow_ref(struct dp_netdev_flow *);
static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t,
struct flow *, bool);
/* A set of datapath actions within a "struct dp_netdev_flow".
*
*
* Thread-safety
* =============
*
* A struct dp_netdev_actions 'actions' is protected with RCU. */
struct dp_netdev_actions {
/* These members are immutable: they do not change during the struct's
* lifetime. */
unsigned int size; /* Size of 'actions', in bytes. */
struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
};
struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *,
size_t);
struct dp_netdev_actions *dp_netdev_flow_get_actions(
const struct dp_netdev_flow *);
static void dp_netdev_actions_free(struct dp_netdev_actions *);
struct polled_queue {
struct dp_netdev_rxq *rxq;
odp_port_t port_no;
bool emc_enabled;
};
/* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
struct rxq_poll {
struct dp_netdev_rxq *rxq;
struct hmap_node node;
};
/* Contained by struct dp_netdev_pmd_thread's 'send_port_cache',
* 'tnl_port_cache' or 'tx_ports'. */
struct tx_port {
struct dp_netdev_port *port;
int qid;
long long last_used;
struct hmap_node node;
long long flush_time;
struct dp_packet_batch output_pkts;
struct dp_netdev_rxq *output_pkts_rxqs[NETDEV_MAX_BURST];
};
/* A set of properties for the current processing loop that is not directly
* associated with the pmd thread itself, but with the packets being
* processed or the short-term system configuration (for example, time).
* Contained by struct dp_netdev_pmd_thread's 'ctx' member. */
struct dp_netdev_pmd_thread_ctx {
/* Latest measured time. See 'pmd_thread_ctx_time_update()'. */
long long now;
/* RX queue from which last packet was received. */
struct dp_netdev_rxq *last_rxq;
/* EMC insertion probability context for the current processing cycle. */
uint32_t emc_insert_min;
};
/* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
* the performance overhead of interrupt processing. Therefore netdev can
* not implement rx-wait for these devices. dpif-netdev needs to poll
* these device to check for recv buffer. pmd-thread does polling for
* devices assigned to itself.
*
* DPDK used PMD for accessing NIC.
*
* Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
* I/O of all non-pmd threads. There will be no actual thread created
* for the instance.
*
* Each struct has its own flow cache and classifier per managed ingress port.
* For packets received on ingress port, a look up is done on corresponding PMD
* thread's flow cache and in case of a miss, lookup is performed in the
* corresponding classifier of port. Packets are executed with the found
* actions in either case.
* */
struct dp_netdev_pmd_thread {
struct dp_netdev *dp;
struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
struct cmap_node node; /* In 'dp->poll_threads'. */
pthread_cond_t cond; /* For synchronizing pmd thread reload. */
struct ovs_mutex cond_mutex; /* Mutex for condition variable. */
/* Per thread exact-match cache. Note, the instance for cpu core
* NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
* need to be protected by 'non_pmd_mutex'. Every other instance
* will only be accessed by its own pmd thread. */
OVS_ALIGNED_VAR(CACHE_LINE_SIZE) struct dfc_cache flow_cache;
/* Flow-Table and classifiers
*
* Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
* changes to 'classifiers' must be made while still holding the
* 'flow_mutex'.
*/
struct ovs_mutex flow_mutex;
struct cmap flow_table OVS_GUARDED; /* Flow table. */
/* One classifier per in_port polled by the pmd */
struct cmap classifiers;
/* Periodically sort subtable vectors according to hit frequencies */
long long int next_optimization;
/* End of the next time interval for which processing cycles
are stored for each polled rxq. */
long long int rxq_next_cycle_store;
/* Last interval timestamp. */
uint64_t intrvl_tsc_prev;
/* Last interval cycles. */
atomic_ullong intrvl_cycles;
/* Current context of the PMD thread. */
struct dp_netdev_pmd_thread_ctx ctx;
struct latch exit_latch; /* For terminating the pmd thread. */
struct seq *reload_seq;
uint64_t last_reload_seq;
atomic_bool reload; /* Do we need to reload ports? */
pthread_t thread;
unsigned core_id; /* CPU core id of this pmd thread. */
int numa_id; /* numa node id of this pmd thread. */
bool isolated;
/* Queue id used by this pmd thread to send packets on all netdevs if
* XPS disabled for this netdev. All static_tx_qid's are unique and less
* than 'cmap_count(dp->poll_threads)'. */
uint32_t static_tx_qid;
/* Number of filled output batches. */
int n_output_batches;
struct ovs_mutex port_mutex; /* Mutex for 'poll_list' and 'tx_ports'. */
/* List of rx queues to poll. */
struct hmap poll_list OVS_GUARDED;
/* Map of 'tx_port's used for transmission. Written by the main thread,
* read by the pmd thread. */
struct hmap tx_ports OVS_GUARDED;
/* These are thread-local copies of 'tx_ports'. One contains only tunnel
* ports (that support push_tunnel/pop_tunnel), the other contains ports
* with at least one txq (that support send). A port can be in both.
*
* There are two separate maps to make sure that we don't try to execute
* OUTPUT on a device which has 0 txqs or PUSH/POP on a non-tunnel device.
*
* The instances for cpu core NON_PMD_CORE_ID can be accessed by multiple
* threads, and thusly need to be protected by 'non_pmd_mutex'. Every
* other instance will only be accessed by its own pmd thread. */
struct hmap tnl_port_cache;
struct hmap send_port_cache;
/* Keep track of detailed PMD performance statistics. */
struct pmd_perf_stats perf_stats;
/* Stats from previous iteration used by automatic pmd
* load balance logic. */
uint64_t prev_stats[PMD_N_STATS];
atomic_count pmd_overloaded;
/* Set to true if the pmd thread needs to be reloaded. */
bool need_reload;
};
/* Interface to netdev-based datapath. */
struct dpif_netdev {
struct dpif dpif;
struct dp_netdev *dp;
uint64_t last_port_seq;
};
static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
struct dp_netdev_port **portp)
OVS_REQUIRES(dp->port_mutex);
static int get_port_by_name(struct dp_netdev *dp, const char *devname,
struct dp_netdev_port **portp)
OVS_REQUIRES(dp->port_mutex);
static void dp_netdev_free(struct dp_netdev *)
OVS_REQUIRES(dp_netdev_mutex);
static int do_add_port(struct dp_netdev *dp, const char *devname,
const char *type, odp_port_t port_no)
OVS_REQUIRES(dp->port_mutex);
static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
OVS_REQUIRES(dp->port_mutex);
static int dpif_netdev_open(const struct dpif_class *, const char *name,
bool create, struct dpif **);
static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
struct dp_packet_batch *,
bool should_steal,
const struct flow *flow,
const struct nlattr *actions,
size_t actions_len);
static void dp_netdev_input(struct dp_netdev_pmd_thread *,
struct dp_packet_batch *, odp_port_t port_no);
static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
struct dp_packet_batch *);
static void dp_netdev_disable_upcall(struct dp_netdev *);
static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev *dp, unsigned core_id,
int numa_id);
static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_set_nonpmd(struct dp_netdev *dp)
OVS_REQUIRES(dp->port_mutex);
static void *pmd_thread_main(void *);
static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
unsigned core_id);
static struct dp_netdev_pmd_thread *
dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
static void dp_netdev_del_pmd(struct dp_netdev *dp,
struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd);
static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_port *port)
OVS_REQUIRES(pmd->port_mutex);
static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
struct tx_port *tx)
OVS_REQUIRES(pmd->port_mutex);
static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_rxq *rxq)
OVS_REQUIRES(pmd->port_mutex);
static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd,
struct rxq_poll *poll)
OVS_REQUIRES(pmd->port_mutex);
static int
dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread *pmd,
bool force);
static void reconfigure_datapath(struct dp_netdev *dp)
OVS_REQUIRES(dp->port_mutex);
static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);
static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
OVS_REQUIRES(pmd->port_mutex);
static inline void
dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
struct polled_queue *poll_list, int poll_cnt);
static void
dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
enum rxq_cycles_counter_type type,
unsigned long long cycles);
static uint64_t
dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
enum rxq_cycles_counter_type type);
static void
dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
unsigned long long cycles);
static uint64_t
dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx);
static void
dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
bool purge);
static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
struct tx_port *tx);
static inline bool emc_entry_alive(struct emc_entry *ce);
static void emc_clear_entry(struct emc_entry *ce);
static void smc_clear_entry(struct smc_bucket *b, int idx);
static void dp_netdev_request_reconfigure(struct dp_netdev *dp);
static inline bool
pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread *pmd);
static void queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_flow *flow);
static void
emc_cache_init(struct emc_cache *flow_cache)
{
int i;
flow_cache->sweep_idx = 0;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
flow_cache->entries[i].flow = NULL;
flow_cache->entries[i].key.hash = 0;
flow_cache->entries[i].key.len = sizeof(struct miniflow);
flowmap_init(&flow_cache->entries[i].key.mf.map);
}
}
static void
smc_cache_init(struct smc_cache *smc_cache)
{
int i, j;
for (i = 0; i < SMC_BUCKET_CNT; i++) {
for (j = 0; j < SMC_ENTRY_PER_BUCKET; j++) {
smc_cache->buckets[i].flow_idx[j] = UINT16_MAX;
}
}
}
static void
dfc_cache_init(struct dfc_cache *flow_cache)
{
emc_cache_init(&flow_cache->emc_cache);
smc_cache_init(&flow_cache->smc_cache);
}
static void
emc_cache_uninit(struct emc_cache *flow_cache)
{
int i;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
emc_clear_entry(&flow_cache->entries[i]);
}
}
static void
smc_cache_uninit(struct smc_cache *smc)
{
int i, j;
for (i = 0; i < SMC_BUCKET_CNT; i++) {
for (j = 0; j < SMC_ENTRY_PER_BUCKET; j++) {
smc_clear_entry(&(smc->buckets[i]), j);
}
}
}
static void
dfc_cache_uninit(struct dfc_cache *flow_cache)
{
smc_cache_uninit(&flow_cache->smc_cache);
emc_cache_uninit(&flow_cache->emc_cache);
}
/* Check and clear dead flow references slowly (one entry at each
* invocation). */
static void
emc_cache_slow_sweep(struct emc_cache *flow_cache)
{
struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx];
if (!emc_entry_alive(entry)) {
emc_clear_entry(entry);
}
flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
}
/* Updates the time in PMD threads context and should be called in three cases:
*
* 1. PMD structure initialization:
* - dp_netdev_configure_pmd()
*
* 2. Before processing of the new packet batch:
* - dpif_netdev_execute()
* - dp_netdev_process_rxq_port()
*
* 3. At least once per polling iteration in main polling threads if no
* packets received on current iteration:
* - dpif_netdev_run()
* - pmd_thread_main()
*
* 'pmd->ctx.now' should be used without update in all other cases if possible.
*/
static inline void
pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread *pmd)
{
pmd->ctx.now = time_usec();
}
/* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
bool
dpif_is_netdev(const struct dpif *dpif)
{
return dpif->dpif_class->open == dpif_netdev_open;
}
static struct dpif_netdev *
dpif_netdev_cast(const struct dpif *dpif)
{
ovs_assert(dpif_is_netdev(dpif));
return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
}
static struct dp_netdev *
get_dp_netdev(const struct dpif *dpif)
{
return dpif_netdev_cast(dpif)->dp;
}
enum pmd_info_type {
PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */
PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */
PMD_INFO_SHOW_RXQ, /* Show poll lists of pmd threads. */
PMD_INFO_PERF_SHOW, /* Show pmd performance details. */
};
static void
format_pmd_thread(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
{
ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID)
? "main thread" : "pmd thread");
if (pmd->numa_id != OVS_NUMA_UNSPEC) {
ds_put_format(reply, " numa_id %d", pmd->numa_id);
}
if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) {
ds_put_format(reply, " core_id %u", pmd->core_id);
}
ds_put_cstr(reply, ":\n");
}
static void
pmd_info_show_stats(struct ds *reply,
struct dp_netdev_pmd_thread *pmd)
{
uint64_t stats[PMD_N_STATS];
uint64_t total_cycles, total_packets;
double passes_per_pkt = 0;
double lookups_per_hit = 0;
double packets_per_batch = 0;
pmd_perf_read_counters(&pmd->perf_stats, stats);
total_cycles = stats[PMD_CYCLES_ITER_IDLE]
+ stats[PMD_CYCLES_ITER_BUSY];
total_packets = stats[PMD_STAT_RECV];
format_pmd_thread(reply, pmd);
if (total_packets > 0) {
passes_per_pkt = (total_packets + stats[PMD_STAT_RECIRC])
/ (double) total_packets;
}
if (stats[PMD_STAT_MASKED_HIT] > 0) {
lookups_per_hit = stats[PMD_STAT_MASKED_LOOKUP]
/ (double) stats[PMD_STAT_MASKED_HIT];
}
if (stats[PMD_STAT_SENT_BATCHES] > 0) {