forked from openvswitch/ovs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathofproto-dpif-xlate.c
3252 lines (2768 loc) · 105 KB
/
ofproto-dpif-xlate.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
#include <config.h>
#include "ofproto/ofproto-dpif-xlate.h"
#include <errno.h>
#include "bfd.h"
#include "bitmap.h"
#include "bond.h"
#include "bundle.h"
#include "byte-order.h"
#include "cfm.h"
#include "connmgr.h"
#include "coverage.h"
#include "dpif.h"
#include "dynamic-string.h"
#include "in-band.h"
#include "lacp.h"
#include "learn.h"
#include "list.h"
#include "mac-learning.h"
#include "meta-flow.h"
#include "multipath.h"
#include "netdev-vport.h"
#include "netlink.h"
#include "nx-match.h"
#include "odp-execute.h"
#include "ofp-actions.h"
#include "ofproto/ofproto-dpif-ipfix.h"
#include "ofproto/ofproto-dpif-mirror.h"
#include "ofproto/ofproto-dpif-monitor.h"
#include "ofproto/ofproto-dpif-sflow.h"
#include "ofproto/ofproto-dpif.h"
#include "ofproto/ofproto-provider.h"
#include "tunnel.h"
#include "vlog.h"
COVERAGE_DEFINE(xlate_actions);
COVERAGE_DEFINE(xlate_actions_oversize);
COVERAGE_DEFINE(xlate_actions_mpls_overflow);
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
/* Maximum depth of flow table recursion (due to resubmit actions) in a
* flow translation. */
#define MAX_RESUBMIT_RECURSION 64
/* Maximum number of resubmit actions in a flow translation, whether they are
* recursive or not. */
#define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
struct ovs_rwlock xlate_rwlock = OVS_RWLOCK_INITIALIZER;
struct xbridge {
struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
struct list xbundles; /* Owned xbundles. */
struct hmap xports; /* Indexed by ofp_port. */
char *name; /* Name used in log messages. */
struct dpif *dpif; /* Datapath interface. */
struct mac_learning *ml; /* Mac learning handle. */
struct mbridge *mbridge; /* Mirroring. */
struct dpif_sflow *sflow; /* SFlow handle, or null. */
struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
struct netflow *netflow; /* Netflow handle, or null. */
struct stp *stp; /* STP or null if disabled. */
/* Special rules installed by ofproto-dpif. */
struct rule_dpif *miss_rule;
struct rule_dpif *no_packet_in_rule;
enum ofp_config_flags frag; /* Fragmentation handling. */
bool has_in_band; /* Bridge has in band control? */
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
/* True if the datapath supports variable-length
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
* False if the datapath supports only 8-byte (or shorter) userdata. */
bool variable_length_userdata;
/* Number of MPLS label stack entries that the datapath supports
* in matches. */
size_t max_mpls_depth;
};
struct xbundle {
struct hmap_node hmap_node; /* In global 'xbundles' map. */
struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
struct list list_node; /* In parent 'xbridges' list. */
struct xbridge *xbridge; /* Parent xbridge. */
struct list xports; /* Contains "struct xport"s. */
char *name; /* Name used in log messages. */
struct bond *bond; /* Nonnull iff more than one port. */
struct lacp *lacp; /* LACP handle or null. */
enum port_vlan_mode vlan_mode; /* VLAN mode. */
int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
* NULL if all VLANs are trunked. */
bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
};
struct xport {
struct hmap_node hmap_node; /* Node in global 'xports' map. */
struct ofport_dpif *ofport; /* Key in global 'xports map. */
struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
struct list bundle_node; /* In parent xbundle (if it exists). */
struct xbundle *xbundle; /* Parent xbundle or null. */
struct netdev *netdev; /* 'ofport''s netdev. */
struct xbridge *xbridge; /* Parent bridge. */
struct xport *peer; /* Patch port peer or null. */
enum ofputil_port_config config; /* OpenFlow port configuration. */
enum ofputil_port_state state; /* OpenFlow port state. */
int stp_port_no; /* STP port number or -1 if not in use. */
struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
bool may_enable; /* May be enabled in bonds. */
bool is_tunnel; /* Is a tunnel port. */
struct cfm *cfm; /* CFM handle or null. */
struct bfd *bfd; /* BFD handle or null. */
};
struct xlate_ctx {
struct xlate_in *xin;
struct xlate_out *xout;
const struct xbridge *xbridge;
/* Flow at the last commit. */
struct flow base_flow;
/* Tunnel IP destination address as received. This is stored separately
* as the base_flow.tunnel is cleared on init to reflect the datapath
* behavior. Used to make sure not to send tunneled output to ourselves,
* which might lead to an infinite loop. This could happen easily
* if a tunnel is marked as 'ip_remote=flow', and the flow does not
* actually set the tun_dst field. */
ovs_be32 orig_tunnel_ip_dst;
/* Stack for the push and pop actions. Each stack element is of type
* "union mf_subvalue". */
union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
struct ofpbuf stack;
/* The rule that we are currently translating, or NULL. */
struct rule_dpif *rule;
/* Resubmit statistics, via xlate_table_action(). */
int recurse; /* Current resubmit nesting depth. */
int resubmits; /* Total number of resubmits. */
bool in_group; /* Currently translating ofgroup, if true. */
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
uint32_t sflow_n_outputs; /* Number of output ports. */
odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
/* OpenFlow 1.1+ action set.
*
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
* When translation is otherwise complete, ofpacts_execute_action_set()
* converts it to a set of "struct ofpact"s that can be translated into
* datapath actions. */
struct ofpbuf action_set; /* Action set. */
uint64_t action_set_stub[1024 / 8];
};
/* A controller may use OFPP_NONE as the ingress port to indicate that
* it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
* when an input bundle is needed for validation (e.g., mirroring or
* OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
* any 'port' structs, so care must be taken when dealing with it. */
static struct xbundle ofpp_none_bundle = {
.name = "OFPP_NONE",
.vlan_mode = PORT_VLAN_TRUNK
};
/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
* 'priority' (the datapath's term for QoS queue) to the dscp bits which all
* traffic egressing the 'ofport' with that priority should be marked with. */
struct skb_priority_to_dscp {
struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
uint32_t skb_priority; /* Priority of this queue (see struct flow). */
uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
};
static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
static struct hmap xports = HMAP_INITIALIZER(&xports);
static bool may_receive(const struct xport *, struct xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *);
static void xlate_actions__(struct xlate_in *, struct xlate_out *)
OVS_REQ_RDLOCK(xlate_rwlock);
static void xlate_normal(struct xlate_ctx *);
static void xlate_report(struct xlate_ctx *, const char *);
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
uint8_t table_id, bool may_packet_in,
bool honor_table_miss);
static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
static void output_normal(struct xlate_ctx *, const struct xbundle *,
uint16_t vlan);
static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
static struct xbridge *xbridge_lookup(const struct ofproto_dpif *);
static struct xbundle *xbundle_lookup(const struct ofbundle *);
static struct xport *xport_lookup(const struct ofport_dpif *);
static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
uint32_t skb_priority);
static void clear_skb_priorities(struct xport *);
static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
uint8_t *dscp);
void
xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
struct dpif *dpif, struct rule_dpif *miss_rule,
struct rule_dpif *no_packet_in_rule,
const struct mac_learning *ml, struct stp *stp,
const struct mbridge *mbridge,
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow, enum ofp_config_flags frag,
bool forward_bpdu, bool has_in_band,
bool variable_length_userdata,
size_t max_mpls_depth)
{
struct xbridge *xbridge = xbridge_lookup(ofproto);
if (!xbridge) {
xbridge = xzalloc(sizeof *xbridge);
xbridge->ofproto = ofproto;
hmap_insert(&xbridges, &xbridge->hmap_node, hash_pointer(ofproto, 0));
hmap_init(&xbridge->xports);
list_init(&xbridge->xbundles);
}
if (xbridge->ml != ml) {
mac_learning_unref(xbridge->ml);
xbridge->ml = mac_learning_ref(ml);
}
if (xbridge->mbridge != mbridge) {
mbridge_unref(xbridge->mbridge);
xbridge->mbridge = mbridge_ref(mbridge);
}
if (xbridge->sflow != sflow) {
dpif_sflow_unref(xbridge->sflow);
xbridge->sflow = dpif_sflow_ref(sflow);
}
if (xbridge->ipfix != ipfix) {
dpif_ipfix_unref(xbridge->ipfix);
xbridge->ipfix = dpif_ipfix_ref(ipfix);
}
if (xbridge->stp != stp) {
stp_unref(xbridge->stp);
xbridge->stp = stp_ref(stp);
}
if (xbridge->netflow != netflow) {
netflow_unref(xbridge->netflow);
xbridge->netflow = netflow_ref(netflow);
}
free(xbridge->name);
xbridge->name = xstrdup(name);
xbridge->dpif = dpif;
xbridge->forward_bpdu = forward_bpdu;
xbridge->has_in_band = has_in_band;
xbridge->frag = frag;
xbridge->miss_rule = miss_rule;
xbridge->no_packet_in_rule = no_packet_in_rule;
xbridge->variable_length_userdata = variable_length_userdata;
xbridge->max_mpls_depth = max_mpls_depth;
}
void
xlate_remove_ofproto(struct ofproto_dpif *ofproto)
{
struct xbridge *xbridge = xbridge_lookup(ofproto);
struct xbundle *xbundle, *next_xbundle;
struct xport *xport, *next_xport;
if (!xbridge) {
return;
}
HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
xlate_ofport_remove(xport->ofport);
}
LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
xlate_bundle_remove(xbundle->ofbundle);
}
hmap_remove(&xbridges, &xbridge->hmap_node);
mac_learning_unref(xbridge->ml);
mbridge_unref(xbridge->mbridge);
dpif_sflow_unref(xbridge->sflow);
dpif_ipfix_unref(xbridge->ipfix);
stp_unref(xbridge->stp);
hmap_destroy(&xbridge->xports);
free(xbridge->name);
free(xbridge);
}
void
xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
const char *name, enum port_vlan_mode vlan_mode, int vlan,
unsigned long *trunks, bool use_priority_tags,
const struct bond *bond, const struct lacp *lacp,
bool floodable)
{
struct xbundle *xbundle = xbundle_lookup(ofbundle);
if (!xbundle) {
xbundle = xzalloc(sizeof *xbundle);
xbundle->ofbundle = ofbundle;
xbundle->xbridge = xbridge_lookup(ofproto);
hmap_insert(&xbundles, &xbundle->hmap_node, hash_pointer(ofbundle, 0));
list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
list_init(&xbundle->xports);
}
ovs_assert(xbundle->xbridge);
free(xbundle->name);
xbundle->name = xstrdup(name);
xbundle->vlan_mode = vlan_mode;
xbundle->vlan = vlan;
xbundle->trunks = trunks;
xbundle->use_priority_tags = use_priority_tags;
xbundle->floodable = floodable;
if (xbundle->bond != bond) {
bond_unref(xbundle->bond);
xbundle->bond = bond_ref(bond);
}
if (xbundle->lacp != lacp) {
lacp_unref(xbundle->lacp);
xbundle->lacp = lacp_ref(lacp);
}
}
void
xlate_bundle_remove(struct ofbundle *ofbundle)
{
struct xbundle *xbundle = xbundle_lookup(ofbundle);
struct xport *xport, *next;
if (!xbundle) {
return;
}
LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
list_remove(&xport->bundle_node);
xport->xbundle = NULL;
}
hmap_remove(&xbundles, &xbundle->hmap_node);
list_remove(&xbundle->list_node);
bond_unref(xbundle->bond);
lacp_unref(xbundle->lacp);
free(xbundle->name);
free(xbundle);
}
void
xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
struct ofport_dpif *ofport, ofp_port_t ofp_port,
odp_port_t odp_port, const struct netdev *netdev,
const struct cfm *cfm, const struct bfd *bfd,
struct ofport_dpif *peer, int stp_port_no,
const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
enum ofputil_port_config config,
enum ofputil_port_state state, bool is_tunnel,
bool may_enable)
{
struct xport *xport = xport_lookup(ofport);
size_t i;
if (!xport) {
xport = xzalloc(sizeof *xport);
xport->ofport = ofport;
xport->xbridge = xbridge_lookup(ofproto);
xport->ofp_port = ofp_port;
hmap_init(&xport->skb_priorities);
hmap_insert(&xports, &xport->hmap_node, hash_pointer(ofport, 0));
hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
hash_ofp_port(xport->ofp_port));
}
ovs_assert(xport->ofp_port == ofp_port);
xport->config = config;
xport->state = state;
xport->stp_port_no = stp_port_no;
xport->is_tunnel = is_tunnel;
xport->may_enable = may_enable;
xport->odp_port = odp_port;
if (xport->netdev != netdev) {
netdev_close(xport->netdev);
xport->netdev = netdev_ref(netdev);
}
if (xport->cfm != cfm) {
cfm_unref(xport->cfm);
xport->cfm = cfm_ref(cfm);
}
if (xport->bfd != bfd) {
bfd_unref(xport->bfd);
xport->bfd = bfd_ref(bfd);
}
if (xport->peer) {
xport->peer->peer = NULL;
}
xport->peer = xport_lookup(peer);
if (xport->peer) {
xport->peer->peer = xport;
}
if (xport->xbundle) {
list_remove(&xport->bundle_node);
}
xport->xbundle = xbundle_lookup(ofbundle);
if (xport->xbundle) {
list_insert(&xport->xbundle->xports, &xport->bundle_node);
}
clear_skb_priorities(xport);
for (i = 0; i < n_qdscp; i++) {
struct skb_priority_to_dscp *pdscp;
uint32_t skb_priority;
if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
&skb_priority)) {
continue;
}
pdscp = xmalloc(sizeof *pdscp);
pdscp->skb_priority = skb_priority;
pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
hash_int(pdscp->skb_priority, 0));
}
}
void
xlate_ofport_remove(struct ofport_dpif *ofport)
{
struct xport *xport = xport_lookup(ofport);
if (!xport) {
return;
}
if (xport->peer) {
xport->peer->peer = NULL;
xport->peer = NULL;
}
if (xport->xbundle) {
list_remove(&xport->bundle_node);
}
clear_skb_priorities(xport);
hmap_destroy(&xport->skb_priorities);
hmap_remove(&xports, &xport->hmap_node);
hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
netdev_close(xport->netdev);
cfm_unref(xport->cfm);
bfd_unref(xport->bfd);
free(xport);
}
/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
* respectively), populates 'flow' with the result of odp_flow_key_to_flow().
* Optionally populates 'ofproto' with the ofproto_dpif, 'odp_in_port' with
* the datapath in_port, that 'packet' ingressed, and 'ipfix', 'sflow', and
* 'netflow' with the appropriate handles for those protocols if they're
* enabled. Caller is responsible for unrefing them.
*
* If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
* 'flow''s in_port to OFPP_NONE.
*
* This function does post-processing on data returned from
* odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
* of the upcall processing logic. In particular, if the extracted in_port is
* a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
* flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
* a VLAN header onto 'packet' (if it is nonnull).
*
* Similarly, this function also includes some logic to help with tunnels. It
* may modify 'flow' as necessary to make the tunneling implementation
* transparent to the upcall processing logic.
*
* Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
* or some other positive errno if there are other problems. */
int
xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
const struct nlattr *key, size_t key_len, struct flow *flow,
struct ofproto_dpif **ofproto, struct dpif_ipfix **ipfix,
struct dpif_sflow **sflow, struct netflow **netflow,
odp_port_t *odp_in_port)
{
const struct xport *xport;
int error = ENODEV;
ovs_rwlock_rdlock(&xlate_rwlock);
if (odp_flow_key_to_flow(key, key_len, flow) == ODP_FIT_ERROR) {
error = EINVAL;
goto exit;
}
if (odp_in_port) {
*odp_in_port = flow->in_port.odp_port;
}
xport = xport_lookup(tnl_port_should_receive(flow)
? tnl_port_receive(flow)
: odp_port_to_ofport(backer, flow->in_port.odp_port));
flow->in_port.ofp_port = xport ? xport->ofp_port : OFPP_NONE;
if (!xport) {
goto exit;
}
if (vsp_adjust_flow(xport->xbridge->ofproto, flow)) {
if (packet) {
/* Make the packet resemble the flow, so that it gets sent to
* an OpenFlow controller properly, so that it looks correct
* for sFlow, and so that flow_extract() will get the correct
* vlan_tci if it is called on 'packet'. */
eth_push_vlan(packet, htons(ETH_TYPE_VLAN), flow->vlan_tci);
}
}
error = 0;
if (ofproto) {
*ofproto = xport->xbridge->ofproto;
}
if (ipfix) {
*ipfix = dpif_ipfix_ref(xport->xbridge->ipfix);
}
if (sflow) {
*sflow = dpif_sflow_ref(xport->xbridge->sflow);
}
if (netflow) {
*netflow = netflow_ref(xport->xbridge->netflow);
}
exit:
ovs_rwlock_unlock(&xlate_rwlock);
return error;
}
static struct xbridge *
xbridge_lookup(const struct ofproto_dpif *ofproto)
{
struct xbridge *xbridge;
if (!ofproto) {
return NULL;
}
HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
&xbridges) {
if (xbridge->ofproto == ofproto) {
return xbridge;
}
}
return NULL;
}
static struct xbundle *
xbundle_lookup(const struct ofbundle *ofbundle)
{
struct xbundle *xbundle;
if (!ofbundle) {
return NULL;
}
HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
&xbundles) {
if (xbundle->ofbundle == ofbundle) {
return xbundle;
}
}
return NULL;
}
static struct xport *
xport_lookup(const struct ofport_dpif *ofport)
{
struct xport *xport;
if (!ofport) {
return NULL;
}
HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
&xports) {
if (xport->ofport == ofport) {
return xport;
}
}
return NULL;
}
static struct stp_port *
xport_get_stp_port(const struct xport *xport)
{
return xport->xbridge->stp && xport->stp_port_no != -1
? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
: NULL;
}
static bool
xport_stp_learn_state(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
return stp_learn_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
}
static bool
xport_stp_forward_state(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
}
static bool
xport_stp_listen_state(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
return stp_listen_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
}
/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
* were used to make the determination.*/
static bool
stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
{
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
return eth_addr_equals(flow->dl_dst, eth_addr_stp);
}
static void
stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
{
struct stp_port *sp = xport_get_stp_port(xport);
struct ofpbuf payload = *packet;
struct eth_header *eth = ofpbuf_data(&payload);
/* Sink packets on ports that have STP disabled when the bridge has
* STP enabled. */
if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
return;
}
/* Trim off padding on payload. */
if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
}
if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
stp_received_bpdu(sp, ofpbuf_data(&payload), ofpbuf_size(&payload));
}
}
static struct xport *
get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
{
struct xport *xport;
HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
&xbridge->xports) {
if (xport->ofp_port == ofp_port) {
return xport;
}
}
return NULL;
}
static odp_port_t
ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
{
const struct xport *xport = get_ofp_port(xbridge, ofp_port);
return xport ? xport->odp_port : ODPP_NONE;
}
static bool
odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
{
struct xport *xport;
xport = get_ofp_port(ctx->xbridge, ofp_port);
if (!xport || xport->config & OFPUTIL_PC_PORT_DOWN ||
xport->state & OFPUTIL_PS_LINK_DOWN) {
return false;
}
return true;
}
static const struct ofputil_bucket *
group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
int depth);
static bool
group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
{
struct group_dpif *group;
bool hit;
hit = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
if (!hit) {
return false;
}
hit = group_first_live_bucket(ctx, group, depth) != NULL;
group_dpif_release(group);
return hit;
}
#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
static bool
bucket_is_alive(const struct xlate_ctx *ctx,
const struct ofputil_bucket *bucket, int depth)
{
if (depth >= MAX_LIVENESS_RECURSION) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_WARN_RL(&rl, "bucket chaining exceeded %d links",
MAX_LIVENESS_RECURSION);
return false;
}
return !ofputil_bucket_has_liveness(bucket) ||
(bucket->watch_port != OFPP_ANY &&
odp_port_is_alive(ctx, bucket->watch_port)) ||
(bucket->watch_group != OFPG_ANY &&
group_is_alive(ctx, bucket->watch_group, depth + 1));
}
static const struct ofputil_bucket *
group_first_live_bucket(const struct xlate_ctx *ctx,
const struct group_dpif *group, int depth)
{
struct ofputil_bucket *bucket;
const struct list *buckets;
group_dpif_get_buckets(group, &buckets);
LIST_FOR_EACH (bucket, list_node, buckets) {
if (bucket_is_alive(ctx, bucket, depth)) {
return bucket;
}
}
return NULL;
}
static const struct ofputil_bucket *
group_best_live_bucket(const struct xlate_ctx *ctx,
const struct group_dpif *group,
uint32_t basis)
{
const struct ofputil_bucket *best_bucket = NULL;
uint32_t best_score = 0;
int i = 0;
const struct ofputil_bucket *bucket;
const struct list *buckets;
group_dpif_get_buckets(group, &buckets);
LIST_FOR_EACH (bucket, list_node, buckets) {
if (bucket_is_alive(ctx, bucket, 0)) {
uint32_t score = (hash_int(i, basis) & 0xffff) * bucket->weight;
if (score >= best_score) {
best_bucket = bucket;
best_score = score;
}
}
i++;
}
return best_bucket;
}
static bool
xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
{
return (bundle->vlan_mode != PORT_VLAN_ACCESS
&& (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
}
static bool
xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
{
return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
}
static mirror_mask_t
xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
{
return xbundle != &ofpp_none_bundle
? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
: 0;
}
static mirror_mask_t
xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
{
return xbundle != &ofpp_none_bundle
? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
: 0;
}
static mirror_mask_t
xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
{
return xbundle != &ofpp_none_bundle
? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
: 0;
}
static struct xbundle *
lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
bool warn, struct xport **in_xportp)
{
struct xport *xport;
/* Find the port and bundle for the received packet. */
xport = get_ofp_port(xbridge, in_port);
if (in_xportp) {
*in_xportp = xport;
}
if (xport && xport->xbundle) {
return xport->xbundle;
}
/* Special-case OFPP_NONE, which a controller may use as the ingress
* port for traffic that it is sourcing. */
if (in_port == OFPP_NONE) {
return &ofpp_none_bundle;
}
/* Odd. A few possible reasons here:
*
* - We deleted a port but there are still a few packets queued up
* from it.
*
* - Someone externally added a port (e.g. "ovs-dpctl add-if") that
* we don't know about.
*
* - The ofproto client didn't configure the port as part of a bundle.
* This is particularly likely to happen if a packet was received on the
* port after it was created, but before the client had a chance to
* configure its bundle.
*/
if (warn) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
"port %"PRIu16, xbridge->name, in_port);
}
return NULL;
}
static void
add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
{
const struct xbridge *xbridge = ctx->xbridge;
mirror_mask_t mirrors;
struct xbundle *in_xbundle;
uint16_t vlan;
uint16_t vid;
mirrors = ctx->xout->mirrors;
ctx->xout->mirrors = 0;
in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
ctx->xin->packet != NULL, NULL);
if (!in_xbundle) {
return;
}
mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
/* Drop frames on bundles reserved for mirroring. */
if (xbundle_mirror_out(xbridge, in_xbundle)) {
if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
"%s, which is reserved exclusively for mirroring",
ctx->xbridge->name, in_xbundle->name);
}
ofpbuf_clear(&ctx->xout->odp_actions);
return;
}
/* Check VLAN. */
vid = vlan_tci_to_vid(orig_flow->vlan_tci);
if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
return;
}
vlan = input_vid_to_vlan(in_xbundle, vid);
if (!mirrors) {
return;
}
/* Restore the original packet before adding the mirror actions. */
ctx->xin->flow = *orig_flow;
while (mirrors) {
mirror_mask_t dup_mirrors;
struct ofbundle *out;
unsigned long *vlans;
bool vlan_mirrored;
bool has_mirror;
int out_vlan;
has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
&vlans, &dup_mirrors, &out, &out_vlan);
ovs_assert(has_mirror);
if (vlans) {
ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
}
vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
free(vlans);
if (!vlan_mirrored) {
mirrors = zero_rightmost_1bit(mirrors);
continue;
}
mirrors &= ~dup_mirrors;
ctx->xout->mirrors |= dup_mirrors;
if (out) {
struct xbundle *out_xbundle = xbundle_lookup(out);
if (out_xbundle) {
output_normal(ctx, out_xbundle, vlan);
}
} else if (vlan != out_vlan