66
75
* recursive or not. */
67
76
#define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
69
struct fat_rwlock xlate_rwlock;
72
79
struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
73
80
struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
75
struct list xbundles; /* Owned xbundles. */
82
struct ovs_list xbundles; /* Owned xbundles. */
76
83
struct hmap xports; /* Indexed by ofp_port. */
78
85
char *name; /* Name used in log messages. */
79
86
struct dpif *dpif; /* Datapath interface. */
80
87
struct mac_learning *ml; /* Mac learning handle. */
88
struct mcast_snooping *ms; /* Multicast Snooping handle. */
81
89
struct mbridge *mbridge; /* Mirroring. */
82
90
struct dpif_sflow *sflow; /* SFlow handle, or null. */
83
91
struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
84
92
struct netflow *netflow; /* Netflow handle, or null. */
85
93
struct stp *stp; /* STP or null if disabled. */
87
/* Special rules installed by ofproto-dpif. */
88
struct rule_dpif *miss_rule;
89
struct rule_dpif *no_packet_in_rule;
91
enum ofp_config_flags frag; /* Fragmentation handling. */
94
struct rstp *rstp; /* RSTP or null if disabled. */
92
96
bool has_in_band; /* Bridge has in band control? */
93
97
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
95
/* True if the datapath supports recirculation. */
98
/* True if the datapath supports variable-length
99
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
100
* False if the datapath supports only 8-byte (or shorter) userdata. */
101
bool variable_length_userdata;
103
/* Number of MPLS label stack entries that the datapath supports
105
size_t max_mpls_depth;
99
/* Datapath feature support. */
100
struct dpif_backer_support support;
109
104
struct hmap_node hmap_node; /* In global 'xbundles' map. */
110
105
struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
112
struct list list_node; /* In parent 'xbridges' list. */
107
struct ovs_list list_node; /* In parent 'xbridges' list. */
113
108
struct xbridge *xbridge; /* Parent xbridge. */
115
struct list xports; /* Contains "struct xport"s. */
110
struct ovs_list xports; /* Contains "struct xport"s. */
117
112
char *name; /* Name used in log messages. */
118
113
struct bond *bond; /* Nonnull iff more than one port. */
185
185
int recurse; /* Current resubmit nesting depth. */
186
186
int resubmits; /* Total number of resubmits. */
187
187
bool in_group; /* Currently translating ofgroup, if true. */
188
bool in_action_set; /* Currently translating action_set, if true. */
190
uint8_t table_id; /* OpenFlow table ID where flow was found. */
191
ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
189
192
uint32_t orig_skb_priority; /* Priority when packet arrived. */
190
uint8_t table_id; /* OpenFlow table ID where flow was found. */
191
193
uint32_t sflow_n_outputs; /* Number of output ports. */
192
194
odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
193
195
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
194
196
bool exit; /* No further actions should be processed. */
196
bool use_recirc; /* Should generate recirc? */
197
struct xlate_recirc recirc; /* Information used for generating
198
* recirculation actions */
198
/* These are used for non-bond recirculation. The recirculation IDs are
199
* stored in xout and must be associated with a datapath flow (ukey),
200
* otherwise they will be freed when the xout is uninitialized.
203
* Steps in Recirculation Translation
204
* ==================================
206
* At some point during translation, the code recognizes the need for
207
* recirculation. For example, recirculation is necessary when, after
208
* popping the last MPLS label, an action or a match tries to examine or
209
* modify a field that has been newly revealed following the MPLS label.
211
* The simplest part of the work to be done is to commit existing changes to
212
* the packet, which produces datapath actions corresponding to the changes,
213
* and after this, add an OVS_ACTION_ATTR_RECIRC datapath action.
215
* The main problem here is preserving state. When the datapath executes
216
* OVS_ACTION_ATTR_RECIRC, it will upcall to userspace to get a translation
217
* for the post-recirculation actions. At this point userspace has to
218
* resume the translation where it left off, which means that it has to
219
* execute the following:
221
* - The action that prompted recirculation, and any actions following
222
* it within the same flow.
224
* - If the action that prompted recirculation was invoked within a
225
* NXAST_RESUBMIT, then any actions following the resubmit. These
226
* "resubmit"s can be nested, so this has to go all the way up the
229
* - The OpenFlow 1.1+ action set.
231
* State that actions and flow table lookups can depend on, such as the
232
* following, must also be preserved:
234
* - Metadata fields (input port, registers, OF1.1+ metadata, ...).
236
* - Action set, stack
238
* - The table ID and cookie of the flow being translated at each level
239
* of the control stack (since OFPAT_CONTROLLER actions send these to
242
* Translation allows for the control of this state preservation via these
243
* members. When a need for recirculation is identified, the translation
246
* 1. Sets 'recirc_action_offset' to the current size of 'action_set'. The
247
* action set is part of what needs to be preserved, so this allows the
248
* action set and the additional state to share the 'action_set' buffer.
249
* Later steps can tell that setup for recirculation is in progress from
250
* the nonnegative value of 'recirc_action_offset'.
252
* 2. Sets 'exit' to true to tell later steps that we're exiting from the
253
* translation process.
255
* 3. Adds an OFPACT_UNROLL_XLATE action to 'action_set'. This action
256
* holds the current table ID and cookie so that they can be restored
257
* during a post-recirculation upcall translation.
259
* 4. Adds the action that prompted recirculation and any actions following
260
* it within the same flow to 'action_set', so that they can be executed
261
* during a post-recirculation upcall translation.
265
* 6. The action that prompted recirculation might be nested in a stack of
266
* nested "resubmit"s that have actions remaining. Each of these notices
267
* that we're exiting (from 'exit') and that recirculation setup is in
268
* progress (from 'recirc_action_offset') and responds by adding more
269
* OFPACT_UNROLL_XLATE actions to 'action_set', as necessary, and any
270
* actions that were yet unprocessed.
272
* The caller stores all the state produced by this process associated with
273
* the recirculation ID. For post-recirculation upcall translation, the
274
* caller passes it back in for the new translation to execute. The
275
* process yielded a set of ofpacts that can be translated directly, so it
276
* is not much of a special case at that point.
278
int recirc_action_offset; /* Offset in 'action_set' to actions to be
279
* executed after recirculation, or -1. */
280
int last_unroll_offset; /* Offset in 'action_set' to the latest unroll
283
/* True if a packet was but is no longer MPLS (due to an MPLS pop action).
284
* This is a trigger for recirculation in cases where translating an action
285
* or looking up a flow requires access to the fields of the packet after
286
* the MPLS label stack that was originally present. */
200
289
/* OpenFlow 1.1+ action set.
202
291
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
203
292
* When translation is otherwise complete, ofpacts_execute_action_set()
204
293
* converts it to a set of "struct ofpact"s that can be translated into
205
* datapath actions. */
294
* datapath actions. */
295
bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
206
296
struct ofpbuf action_set; /* Action set. */
207
297
uint64_t action_set_stub[1024 / 8];
300
static void xlate_action_set(struct xlate_ctx *ctx);
303
ctx_trigger_recirculation(struct xlate_ctx *ctx)
306
ctx->recirc_action_offset = ctx->action_set.size;
310
ctx_first_recirculation_action(const struct xlate_ctx *ctx)
312
return ctx->recirc_action_offset == ctx->action_set.size;
316
exit_recirculates(const struct xlate_ctx *ctx)
318
/* When recirculating the 'recirc_action_offset' has a non-negative value.
320
return ctx->recirc_action_offset >= 0;
323
static void compose_recirculate_action(struct xlate_ctx *ctx);
210
325
/* A controller may use OFPP_NONE as the ingress port to indicate that
211
326
* it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
212
327
* when an input bundle is needed for validation (e.g., mirroring or
312
444
static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
313
445
static void output_normal(struct xlate_ctx *, const struct xbundle *,
315
static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
317
static struct xbridge *xbridge_lookup(const struct ofproto_dpif *);
318
static struct xbundle *xbundle_lookup(const struct ofbundle *);
319
static struct xport *xport_lookup(const struct ofport_dpif *);
448
/* Optional bond recirculation parameter to compose_output_action(). */
449
struct xlate_bond_recirc {
450
uint32_t recirc_id; /* !0 Use recirculation instead of output. */
451
uint8_t hash_alg; /* !0 Compute hash for recirc before. */
452
uint32_t hash_basis; /* Compute hash for recirc before. */
455
static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
456
const struct xlate_bond_recirc *xr);
458
static struct xbridge *xbridge_lookup(struct xlate_cfg *,
459
const struct ofproto_dpif *);
460
static struct xbundle *xbundle_lookup(struct xlate_cfg *,
461
const struct ofbundle *);
462
static struct xport *xport_lookup(struct xlate_cfg *,
463
const struct ofport_dpif *);
320
464
static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
321
465
static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
322
466
uint32_t skb_priority);
323
467
static void clear_skb_priorities(struct xport *);
468
static size_t count_skb_priorities(const struct xport *);
324
469
static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
327
472
static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc,
328
473
enum xc_type type);
331
xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
332
struct dpif *dpif, struct rule_dpif *miss_rule,
333
struct rule_dpif *no_packet_in_rule,
474
static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
475
static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
476
static void xlate_xport_init(struct xlate_cfg *, struct xport *);
477
static void xlate_xbridge_set(struct xbridge *, struct dpif *,
478
const struct mac_learning *, struct stp *,
479
struct rstp *, const struct mcast_snooping *,
480
const struct mbridge *,
481
const struct dpif_sflow *,
482
const struct dpif_ipfix *,
483
const struct netflow *,
484
bool forward_bpdu, bool has_in_band,
485
const struct dpif_backer_support *);
486
static void xlate_xbundle_set(struct xbundle *xbundle,
487
enum port_vlan_mode vlan_mode, int vlan,
488
unsigned long *trunks, bool use_priority_tags,
489
const struct bond *bond, const struct lacp *lacp,
491
static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
492
const struct netdev *netdev, const struct cfm *cfm,
493
const struct bfd *bfd, const struct lldp *lldp,
494
int stp_port_no, const struct rstp_port *rstp_port,
495
enum ofputil_port_config config,
496
enum ofputil_port_state state, bool is_tunnel,
498
static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
499
static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
500
static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
501
static void xlate_xbridge_copy(struct xbridge *);
502
static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
503
static void xlate_xport_copy(struct xbridge *, struct xbundle *,
505
static void xlate_xcfg_free(struct xlate_cfg *);
508
xlate_report(struct xlate_ctx *ctx, const char *s)
510
if (OVS_UNLIKELY(ctx->xin->report_hook)) {
511
ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
516
xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
518
list_init(&xbridge->xbundles);
519
hmap_init(&xbridge->xports);
520
hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
521
hash_pointer(xbridge->ofproto, 0));
525
xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
527
list_init(&xbundle->xports);
528
list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
529
hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
530
hash_pointer(xbundle->ofbundle, 0));
534
xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
536
hmap_init(&xport->skb_priorities);
537
hmap_insert(&xcfg->xports, &xport->hmap_node,
538
hash_pointer(xport->ofport, 0));
539
hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
540
hash_ofp_port(xport->ofp_port));
544
xlate_xbridge_set(struct xbridge *xbridge,
334
546
const struct mac_learning *ml, struct stp *stp,
547
struct rstp *rstp, const struct mcast_snooping *ms,
335
548
const struct mbridge *mbridge,
336
549
const struct dpif_sflow *sflow,
337
550
const struct dpif_ipfix *ipfix,
338
const struct netflow *netflow, enum ofp_config_flags frag,
551
const struct netflow *netflow,
339
552
bool forward_bpdu, bool has_in_band,
341
bool variable_length_userdata,
342
size_t max_mpls_depth)
553
const struct dpif_backer_support *support)
344
struct xbridge *xbridge = xbridge_lookup(ofproto);
347
xbridge = xzalloc(sizeof *xbridge);
348
xbridge->ofproto = ofproto;
350
hmap_insert(&xbridges, &xbridge->hmap_node, hash_pointer(ofproto, 0));
351
hmap_init(&xbridge->xports);
352
list_init(&xbridge->xbundles);
355
555
if (xbridge->ml != ml) {
356
556
mac_learning_unref(xbridge->ml);
357
557
xbridge->ml = mac_learning_ref(ml);
560
if (xbridge->ms != ms) {
561
mcast_snooping_unref(xbridge->ms);
562
xbridge->ms = mcast_snooping_ref(ms);
360
565
if (xbridge->mbridge != mbridge) {
361
566
mbridge_unref(xbridge->mbridge);
362
567
xbridge->mbridge = mbridge_ref(mbridge);
377
582
xbridge->stp = stp_ref(stp);
585
if (xbridge->rstp != rstp) {
586
rstp_unref(xbridge->rstp);
587
xbridge->rstp = rstp_ref(rstp);
380
590
if (xbridge->netflow != netflow) {
381
591
netflow_unref(xbridge->netflow);
382
592
xbridge->netflow = netflow_ref(netflow);
386
xbridge->name = xstrdup(name);
388
595
xbridge->dpif = dpif;
389
596
xbridge->forward_bpdu = forward_bpdu;
390
597
xbridge->has_in_band = has_in_band;
391
xbridge->frag = frag;
392
xbridge->miss_rule = miss_rule;
393
xbridge->no_packet_in_rule = no_packet_in_rule;
394
xbridge->enable_recirc = enable_recirc;
395
xbridge->variable_length_userdata = variable_length_userdata;
396
xbridge->max_mpls_depth = max_mpls_depth;
400
xlate_remove_ofproto(struct ofproto_dpif *ofproto)
402
struct xbridge *xbridge = xbridge_lookup(ofproto);
598
xbridge->support = *support;
602
xlate_xbundle_set(struct xbundle *xbundle,
603
enum port_vlan_mode vlan_mode, int vlan,
604
unsigned long *trunks, bool use_priority_tags,
605
const struct bond *bond, const struct lacp *lacp,
608
ovs_assert(xbundle->xbridge);
610
xbundle->vlan_mode = vlan_mode;
611
xbundle->vlan = vlan;
612
xbundle->trunks = trunks;
613
xbundle->use_priority_tags = use_priority_tags;
614
xbundle->floodable = floodable;
616
if (xbundle->bond != bond) {
617
bond_unref(xbundle->bond);
618
xbundle->bond = bond_ref(bond);
621
if (xbundle->lacp != lacp) {
622
lacp_unref(xbundle->lacp);
623
xbundle->lacp = lacp_ref(lacp);
628
xlate_xport_set(struct xport *xport, odp_port_t odp_port,
629
const struct netdev *netdev, const struct cfm *cfm,
630
const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
631
const struct rstp_port* rstp_port,
632
enum ofputil_port_config config, enum ofputil_port_state state,
633
bool is_tunnel, bool may_enable)
635
xport->config = config;
636
xport->state = state;
637
xport->stp_port_no = stp_port_no;
638
xport->is_tunnel = is_tunnel;
639
xport->may_enable = may_enable;
640
xport->odp_port = odp_port;
642
if (xport->rstp_port != rstp_port) {
643
rstp_port_unref(xport->rstp_port);
644
xport->rstp_port = rstp_port_ref(rstp_port);
647
if (xport->cfm != cfm) {
648
cfm_unref(xport->cfm);
649
xport->cfm = cfm_ref(cfm);
652
if (xport->bfd != bfd) {
653
bfd_unref(xport->bfd);
654
xport->bfd = bfd_ref(bfd);
657
if (xport->lldp != lldp) {
658
lldp_unref(xport->lldp);
659
xport->lldp = lldp_ref(lldp);
662
if (xport->netdev != netdev) {
663
netdev_close(xport->netdev);
664
xport->netdev = netdev_ref(netdev);
669
xlate_xbridge_copy(struct xbridge *xbridge)
671
struct xbundle *xbundle;
673
struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
674
new_xbridge->ofproto = xbridge->ofproto;
675
new_xbridge->name = xstrdup(xbridge->name);
676
xlate_xbridge_init(new_xcfg, new_xbridge);
678
xlate_xbridge_set(new_xbridge,
679
xbridge->dpif, xbridge->ml, xbridge->stp,
680
xbridge->rstp, xbridge->ms, xbridge->mbridge,
681
xbridge->sflow, xbridge->ipfix, xbridge->netflow,
682
xbridge->forward_bpdu, xbridge->has_in_band,
684
LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
685
xlate_xbundle_copy(new_xbridge, xbundle);
688
/* Copy xports which are not part of a xbundle */
689
HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
690
if (!xport->xbundle) {
691
xlate_xport_copy(new_xbridge, NULL, xport);
697
xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
700
struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
701
new_xbundle->ofbundle = xbundle->ofbundle;
702
new_xbundle->xbridge = xbridge;
703
new_xbundle->name = xstrdup(xbundle->name);
704
xlate_xbundle_init(new_xcfg, new_xbundle);
706
xlate_xbundle_set(new_xbundle, xbundle->vlan_mode,
707
xbundle->vlan, xbundle->trunks,
708
xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
710
LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
711
xlate_xport_copy(xbridge, new_xbundle, xport);
716
xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
719
struct skb_priority_to_dscp *pdscp, *new_pdscp;
720
struct xport *new_xport = xzalloc(sizeof *xport);
721
new_xport->ofport = xport->ofport;
722
new_xport->ofp_port = xport->ofp_port;
723
new_xport->xbridge = xbridge;
724
xlate_xport_init(new_xcfg, new_xport);
726
xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
727
xport->bfd, xport->lldp, xport->stp_port_no,
728
xport->rstp_port, xport->config, xport->state,
729
xport->is_tunnel, xport->may_enable);
732
struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
734
new_xport->peer = peer;
735
new_xport->peer->peer = new_xport;
740
new_xport->xbundle = xbundle;
741
list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
744
HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
745
new_pdscp = xmalloc(sizeof *pdscp);
746
new_pdscp->skb_priority = pdscp->skb_priority;
747
new_pdscp->dscp = pdscp->dscp;
748
hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
749
hash_int(new_pdscp->skb_priority, 0));
753
/* Sets the current xlate configuration to new_xcfg and frees the old xlate
754
* configuration in xcfgp.
756
* This needs to be called after editing the xlate configuration.
758
* Functions that edit the new xlate configuration are
759
* xlate_<ofport/bundle/ofport>_set and xlate_<ofport/bundle/ofport>_remove.
765
* edit_xlate_configuration();
767
* xlate_txn_commit(); */
769
xlate_txn_commit(void)
771
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
773
ovsrcu_set(&xcfgp, new_xcfg);
774
ovsrcu_synchronize();
775
xlate_xcfg_free(xcfg);
779
/* Copies the current xlate configuration in xcfgp to new_xcfg.
781
* This needs to be called prior to editing the xlate configuration. */
783
xlate_txn_start(void)
785
struct xbridge *xbridge;
786
struct xlate_cfg *xcfg;
788
ovs_assert(!new_xcfg);
790
new_xcfg = xmalloc(sizeof *new_xcfg);
791
hmap_init(&new_xcfg->xbridges);
792
hmap_init(&new_xcfg->xbundles);
793
hmap_init(&new_xcfg->xports);
795
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
800
HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
801
xlate_xbridge_copy(xbridge);
807
xlate_xcfg_free(struct xlate_cfg *xcfg)
809
struct xbridge *xbridge, *next_xbridge;
815
HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
816
xlate_xbridge_remove(xcfg, xbridge);
819
hmap_destroy(&xcfg->xbridges);
820
hmap_destroy(&xcfg->xbundles);
821
hmap_destroy(&xcfg->xports);
826
xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
828
const struct mac_learning *ml, struct stp *stp,
829
struct rstp *rstp, const struct mcast_snooping *ms,
830
const struct mbridge *mbridge,
831
const struct dpif_sflow *sflow,
832
const struct dpif_ipfix *ipfix,
833
const struct netflow *netflow,
834
bool forward_bpdu, bool has_in_band,
835
const struct dpif_backer_support *support)
837
struct xbridge *xbridge;
839
ovs_assert(new_xcfg);
841
xbridge = xbridge_lookup(new_xcfg, ofproto);
843
xbridge = xzalloc(sizeof *xbridge);
844
xbridge->ofproto = ofproto;
846
xlate_xbridge_init(new_xcfg, xbridge);
850
xbridge->name = xstrdup(name);
852
xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
853
netflow, forward_bpdu, has_in_band, support);
857
xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
403
859
struct xbundle *xbundle, *next_xbundle;
404
860
struct xport *xport, *next_xport;
410
866
HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
411
xlate_ofport_remove(xport->ofport);
867
xlate_xport_remove(xcfg, xport);
414
870
LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
415
xlate_bundle_remove(xbundle->ofbundle);
871
xlate_xbundle_remove(xcfg, xbundle);
418
hmap_remove(&xbridges, &xbridge->hmap_node);
874
hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
419
875
mac_learning_unref(xbridge->ml);
876
mcast_snooping_unref(xbridge->ms);
420
877
mbridge_unref(xbridge->mbridge);
421
878
dpif_sflow_unref(xbridge->sflow);
422
879
dpif_ipfix_unref(xbridge->ipfix);
423
880
stp_unref(xbridge->stp);
881
rstp_unref(xbridge->rstp);
424
882
hmap_destroy(&xbridge->xports);
425
883
free(xbridge->name);
888
xlate_remove_ofproto(struct ofproto_dpif *ofproto)
890
struct xbridge *xbridge;
892
ovs_assert(new_xcfg);
894
xbridge = xbridge_lookup(new_xcfg, ofproto);
895
xlate_xbridge_remove(new_xcfg, xbridge);
430
899
xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
431
900
const char *name, enum port_vlan_mode vlan_mode, int vlan,
432
901
unsigned long *trunks, bool use_priority_tags,
433
902
const struct bond *bond, const struct lacp *lacp,
436
struct xbundle *xbundle = xbundle_lookup(ofbundle);
905
struct xbundle *xbundle;
907
ovs_assert(new_xcfg);
909
xbundle = xbundle_lookup(new_xcfg, ofbundle);
439
911
xbundle = xzalloc(sizeof *xbundle);
440
912
xbundle->ofbundle = ofbundle;
441
xbundle->xbridge = xbridge_lookup(ofproto);
913
xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
443
hmap_insert(&xbundles, &xbundle->hmap_node, hash_pointer(ofbundle, 0));
444
list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
445
list_init(&xbundle->xports);
915
xlate_xbundle_init(new_xcfg, xbundle);
448
ovs_assert(xbundle->xbridge);
450
918
free(xbundle->name);
451
919
xbundle->name = xstrdup(name);
453
xbundle->vlan_mode = vlan_mode;
454
xbundle->vlan = vlan;
455
xbundle->trunks = trunks;
456
xbundle->use_priority_tags = use_priority_tags;
457
xbundle->floodable = floodable;
459
if (xbundle->bond != bond) {
460
bond_unref(xbundle->bond);
461
xbundle->bond = bond_ref(bond);
464
if (xbundle->lacp != lacp) {
465
lacp_unref(xbundle->lacp);
466
xbundle->lacp = lacp_ref(lacp);
921
xlate_xbundle_set(xbundle, vlan_mode, vlan, trunks,
922
use_priority_tags, bond, lacp, floodable);
471
xlate_bundle_remove(struct ofbundle *ofbundle)
926
xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
473
struct xbundle *xbundle = xbundle_lookup(ofbundle);
474
struct xport *xport, *next;
480
LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
481
list_remove(&xport->bundle_node);
934
LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
482
935
xport->xbundle = NULL;
485
hmap_remove(&xbundles, &xbundle->hmap_node);
938
hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
486
939
list_remove(&xbundle->list_node);
487
940
bond_unref(xbundle->bond);
488
941
lacp_unref(xbundle->lacp);
947
xlate_bundle_remove(struct ofbundle *ofbundle)
949
struct xbundle *xbundle;
951
ovs_assert(new_xcfg);
953
xbundle = xbundle_lookup(new_xcfg, ofbundle);
954
xlate_xbundle_remove(new_xcfg, xbundle);
494
958
xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
495
959
struct ofport_dpif *ofport, ofp_port_t ofp_port,
496
960
odp_port_t odp_port, const struct netdev *netdev,
497
961
const struct cfm *cfm, const struct bfd *bfd,
498
struct ofport_dpif *peer, int stp_port_no,
962
const struct lldp *lldp, struct ofport_dpif *peer,
963
int stp_port_no, const struct rstp_port *rstp_port,
499
964
const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
500
965
enum ofputil_port_config config,
501
966
enum ofputil_port_state state, bool is_tunnel,
504
struct xport *xport = xport_lookup(ofport);
972
ovs_assert(new_xcfg);
974
xport = xport_lookup(new_xcfg, ofport);
508
976
xport = xzalloc(sizeof *xport);
509
977
xport->ofport = ofport;
510
xport->xbridge = xbridge_lookup(ofproto);
978
xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
511
979
xport->ofp_port = ofp_port;
513
hmap_init(&xport->skb_priorities);
514
hmap_insert(&xports, &xport->hmap_node, hash_pointer(ofport, 0));
515
hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
516
hash_ofp_port(xport->ofp_port));
981
xlate_xport_init(new_xcfg, xport);
519
984
ovs_assert(xport->ofp_port == ofp_port);
521
xport->config = config;
522
xport->state = state;
523
xport->stp_port_no = stp_port_no;
524
xport->is_tunnel = is_tunnel;
525
xport->may_enable = may_enable;
526
xport->odp_port = odp_port;
528
if (xport->netdev != netdev) {
529
netdev_close(xport->netdev);
530
xport->netdev = netdev_ref(netdev);
533
if (xport->cfm != cfm) {
534
cfm_unref(xport->cfm);
535
xport->cfm = cfm_ref(cfm);
538
if (xport->bfd != bfd) {
539
bfd_unref(xport->bfd);
540
xport->bfd = bfd_ref(bfd);
986
xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
987
stp_port_no, rstp_port, config, state, is_tunnel,
543
990
if (xport->peer) {
544
991
xport->peer->peer = NULL;
546
xport->peer = xport_lookup(peer);
993
xport->peer = xport_lookup(new_xcfg, peer);
547
994
if (xport->peer) {
548
995
xport->peer->peer = xport;
595
1040
clear_skb_priorities(xport);
596
1041
hmap_destroy(&xport->skb_priorities);
598
hmap_remove(&xports, &xport->hmap_node);
1043
hmap_remove(&xcfg->xports, &xport->hmap_node);
599
1044
hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
601
1046
netdev_close(xport->netdev);
1047
rstp_port_unref(xport->rstp_port);
602
1048
cfm_unref(xport->cfm);
603
1049
bfd_unref(xport->bfd);
1050
lldp_unref(xport->lldp);
607
/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
608
* respectively), populates 'flow' with the result of odp_flow_key_to_flow().
609
* Optionally populates 'ofproto' with the ofproto_dpif, 'odp_in_port' with
610
* the datapath in_port, that 'packet' ingressed, and 'ipfix', 'sflow', and
611
* 'netflow' with the appropriate handles for those protocols if they're
612
* enabled. Caller is responsible for unrefing them.
614
* If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
615
* 'flow''s in_port to OFPP_NONE.
617
* This function does post-processing on data returned from
618
* odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
619
* of the upcall processing logic. In particular, if the extracted in_port is
620
* a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
621
* flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
622
* a VLAN header onto 'packet' (if it is nonnull).
624
* Similarly, this function also includes some logic to help with tunnels. It
625
* may modify 'flow' as necessary to make the tunneling implementation
626
* transparent to the upcall processing logic.
628
* Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
629
* or some other positive errno if there are other problems. */
631
xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
632
const struct nlattr *key, size_t key_len, struct flow *flow,
633
struct ofproto_dpif **ofproto, struct dpif_ipfix **ipfix,
634
struct dpif_sflow **sflow, struct netflow **netflow,
635
odp_port_t *odp_in_port)
637
struct ofproto_dpif *recv_ofproto = NULL;
638
struct ofproto_dpif *recirc_ofproto = NULL;
1055
xlate_ofport_remove(struct ofport_dpif *ofport)
1057
struct xport *xport;
1059
ovs_assert(new_xcfg);
1061
xport = xport_lookup(new_xcfg, ofport);
1062
xlate_xport_remove(new_xcfg, xport);
1065
static struct ofproto_dpif *
1066
xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
1067
ofp_port_t *ofp_in_port, const struct xport **xportp)
1069
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
639
1070
const struct xport *xport;
642
fat_rwlock_rdlock(&xlate_rwlock);
643
if (odp_flow_key_to_flow(key, key_len, flow) == ODP_FIT_ERROR) {
649
*odp_in_port = flow->in_port.odp_port;
652
xport = xport_lookup(tnl_port_should_receive(flow)
1072
xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
653
1073
? tnl_port_receive(flow)
654
1074
: odp_port_to_ofport(backer, flow->in_port.odp_port));
656
flow->in_port.ofp_port = xport ? xport->ofp_port : OFPP_NONE;
660
recv_ofproto = xport->xbridge->ofproto;
662
if (vsp_adjust_flow(xport->xbridge->ofproto, flow)) {
664
/* Make the packet resemble the flow, so that it gets sent to
665
* an OpenFlow controller properly, so that it looks correct
666
* for sFlow, and so that flow_extract() will get the correct
667
* vlan_tci if it is called on 'packet'. */
668
eth_push_vlan(packet, htons(ETH_TYPE_VLAN), flow->vlan_tci);
673
/* When recirc_id is set in 'flow', checks whether the ofproto_dpif that
674
* corresponds to the recirc_id is same as the receiving bridge. If they
675
* are the same, uses the 'recv_ofproto' and keeps the 'ofp_in_port' as
676
* assigned. Otherwise, uses the 'recirc_ofproto' that owns recirc_id and
677
* assigns OFPP_NONE to 'ofp_in_port'. Doing this is in that, the
678
* recirculated flow must be processced by the ofproto which originates
679
* the recirculation, and as bridges can only see their own ports, the
680
* in_port of the 'recv_ofproto' should not be passed to the
683
* Admittedly, setting the 'ofp_in_port' to OFPP_NONE limits the
684
* 'recirc_ofproto' from meaningfully matching on in_port of recirculated
685
* flow, and should be fixed in the near future.
687
* TODO: Restore the original patch port.
689
if (flow->recirc_id) {
690
recirc_ofproto = ofproto_dpif_recirc_get_ofproto(backer,
692
/* Returns error if could not find recirculation bridge */
693
if (!recirc_ofproto) {
698
if (recv_ofproto != recirc_ofproto) {
700
flow->in_port.ofp_port = OFPP_NONE;
702
*odp_in_port = ODPP_NONE;
708
*ofproto = xport ? recv_ofproto : recirc_ofproto;
1075
if (OVS_UNLIKELY(!xport)) {
1080
*ofp_in_port = xport->ofp_port;
1082
return xport->xbridge->ofproto;
1085
/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1086
* returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1087
struct ofproto_dpif *
1088
xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1089
ofp_port_t *ofp_in_port)
1091
const struct xport *xport;
1093
return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1096
/* Given a datapath and flow metadata ('backer', and 'flow' respectively),
1097
* optionally populates 'ofproto' with the ofproto_dpif, 'ofp_in_port' with the
1098
* openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
1099
* handles for those protocols if they're enabled. Caller may use the returned
1100
* pointers until quiescing, for longer term use additional references must
1103
* Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
1106
xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1107
struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1108
struct dpif_sflow **sflow, struct netflow **netflow,
1109
ofp_port_t *ofp_in_port)
1111
struct ofproto_dpif *ofproto;
1112
const struct xport *xport;
1114
ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1121
*ofprotop = ofproto;
712
*ipfix = xport ? dpif_ipfix_ref(xport->xbridge->ipfix) : NULL;
1125
*ipfix = xport ? xport->xbridge->ipfix : NULL;
716
*sflow = xport ? dpif_sflow_ref(xport->xbridge->sflow) : NULL;
1129
*sflow = xport ? xport->xbridge->sflow : NULL;
720
*netflow = xport ? netflow_ref(xport->xbridge->netflow) : NULL;
1133
*netflow = xport ? xport->xbridge->netflow : NULL;
724
fat_rwlock_unlock(&xlate_rwlock);
728
1139
static struct xbridge *
729
xbridge_lookup(const struct ofproto_dpif *ofproto)
1140
xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
1142
struct hmap *xbridges;
731
1143
struct xbridge *xbridge;
1145
if (!ofproto || !xcfg) {
1149
xbridges = &xcfg->xbridges;
737
1151
HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
739
1153
if (xbridge->ofproto == ofproto) {
1817
/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
1818
* dropped. Returns true if they may be forwarded, false if they should be
1821
* 'in_port' must be the xport that corresponds to flow->in_port.
1822
* 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
1824
* 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
1825
* returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
1826
* checked by input_vid_is_valid().
1828
* May also add tags to '*tags', although the current implementation only does
1829
* so in one special case.
1832
is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
1835
struct xbundle *in_xbundle = in_port->xbundle;
1836
const struct xbridge *xbridge = ctx->xbridge;
1837
struct flow *flow = &ctx->xin->flow;
1839
/* Drop frames for reserved multicast addresses
1840
* only if forward_bpdu option is absent. */
1841
if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
1842
xlate_report(ctx, "packet has reserved destination MAC, dropping");
1846
if (in_xbundle->bond) {
1847
struct mac_entry *mac;
1849
switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
1855
xlate_report(ctx, "bonding refused admissibility, dropping");
1858
case BV_DROP_IF_MOVED:
1859
ovs_rwlock_rdlock(&xbridge->ml->rwlock);
1860
mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
1862
&& mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
1863
&& (!is_gratuitous_arp(flow, &ctx->xout->wc)
1864
|| mac_entry_is_grat_arp_locked(mac))) {
1865
ovs_rwlock_unlock(&xbridge->ml->rwlock);
1866
xlate_report(ctx, "SLB bond thinks this packet looped back, "
1870
ovs_rwlock_unlock(&xbridge->ml->rwlock);
1348
1878
/* Checks whether a MAC learning update is necessary for MAC learning table
1349
1879
* 'ml' given that a packet matching 'flow' was received on 'in_xbundle' in
1462
/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
1463
* dropped. Returns true if they may be forwarded, false if they should be
1466
* 'in_port' must be the xport that corresponds to flow->in_port.
1467
* 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
1469
* 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
1470
* returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
1471
* checked by input_vid_is_valid().
1473
* May also add tags to '*tags', although the current implementation only does
1474
* so in one special case.
1477
is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
1480
struct xbundle *in_xbundle = in_port->xbundle;
1481
const struct xbridge *xbridge = ctx->xbridge;
1482
struct flow *flow = &ctx->xin->flow;
1484
/* Drop frames for reserved multicast addresses
1485
* only if forward_bpdu option is absent. */
1486
if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
1487
xlate_report(ctx, "packet has reserved destination MAC, dropping");
1491
if (in_xbundle->bond) {
1492
struct mac_entry *mac;
1494
switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
1500
xlate_report(ctx, "bonding refused admissibility, dropping");
1503
case BV_DROP_IF_MOVED:
1504
ovs_rwlock_rdlock(&xbridge->ml->rwlock);
1505
mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
1507
&& mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
1508
&& (!is_gratuitous_arp(flow, &ctx->xout->wc)
1509
|| mac_entry_is_grat_arp_locked(mac))) {
1510
ovs_rwlock_unlock(&xbridge->ml->rwlock);
1511
xlate_report(ctx, "SLB bond thinks this packet looped back, "
1515
ovs_rwlock_unlock(&xbridge->ml->rwlock);
1992
/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
1993
* was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
1995
update_mcast_snooping_table__(const struct xbridge *xbridge,
1996
const struct flow *flow,
1997
struct mcast_snooping *ms,
1998
ovs_be32 ip4, int vlan,
1999
struct xbundle *in_xbundle,
2000
const struct dp_packet *packet)
2001
OVS_REQ_WRLOCK(ms->rwlock)
2003
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 30);
2006
switch (ntohs(flow->tp_src)) {
2007
case IGMP_HOST_MEMBERSHIP_REPORT:
2008
case IGMPV2_HOST_MEMBERSHIP_REPORT:
2009
if (mcast_snooping_add_group(ms, ip4, vlan, in_xbundle->ofbundle)) {
2010
VLOG_DBG_RL(&rl, "bridge %s: multicast snooping learned that "
2011
IP_FMT" is on port %s in VLAN %d",
2012
xbridge->name, IP_ARGS(ip4), in_xbundle->name, vlan);
2015
case IGMP_HOST_LEAVE_MESSAGE:
2016
if (mcast_snooping_leave_group(ms, ip4, vlan, in_xbundle->ofbundle)) {
2017
VLOG_DBG_RL(&rl, "bridge %s: multicast snooping leaving "
2018
IP_FMT" is on port %s in VLAN %d",
2019
xbridge->name, IP_ARGS(ip4), in_xbundle->name, vlan);
2022
case IGMP_HOST_MEMBERSHIP_QUERY:
2023
if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2024
in_xbundle->ofbundle)) {
2025
VLOG_DBG_RL(&rl, "bridge %s: multicast snooping query from "
2026
IP_FMT" is on port %s in VLAN %d",
2027
xbridge->name, IP_ARGS(flow->nw_src),
2028
in_xbundle->name, vlan);
2031
case IGMPV3_HOST_MEMBERSHIP_REPORT:
2032
if ((count = mcast_snooping_add_report(ms, packet, vlan,
2033
in_xbundle->ofbundle))) {
2034
VLOG_DBG_RL(&rl, "bridge %s: multicast snooping processed %d "
2035
"addresses on port %s in VLAN %d",
2036
xbridge->name, count, in_xbundle->name, vlan);
2042
/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2043
* was received on 'in_xbundle' in 'vlan'. */
2045
update_mcast_snooping_table(const struct xbridge *xbridge,
2046
const struct flow *flow, int vlan,
2047
struct xbundle *in_xbundle,
2048
const struct dp_packet *packet)
2050
struct mcast_snooping *ms = xbridge->ms;
2051
struct xlate_cfg *xcfg;
2052
struct xbundle *mcast_xbundle;
2053
struct mcast_port_bundle *fport;
2055
/* Don't learn the OFPP_NONE port. */
2056
if (in_xbundle == &ofpp_none_bundle) {
2060
/* Don't learn from flood ports */
2061
mcast_xbundle = NULL;
2062
ovs_rwlock_wrlock(&ms->rwlock);
2063
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2064
LIST_FOR_EACH(fport, node, &ms->fport_list) {
2065
mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2066
if (mcast_xbundle == in_xbundle) {
2071
if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
2072
update_mcast_snooping_table__(xbridge, flow, ms, flow->igmp_group_ip4,
2073
vlan, in_xbundle, packet);
2075
ovs_rwlock_unlock(&ms->rwlock);
2078
/* send the packet to ports having the multicast group learned */
2080
xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2081
struct mcast_snooping *ms OVS_UNUSED,
2082
struct mcast_group *grp,
2083
struct xbundle *in_xbundle, uint16_t vlan)
2084
OVS_REQ_RDLOCK(ms->rwlock)
2086
struct xlate_cfg *xcfg;
2087
struct mcast_group_bundle *b;
2088
struct xbundle *mcast_xbundle;
2090
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2091
LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2092
mcast_xbundle = xbundle_lookup(xcfg, b->port);
2093
if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2094
xlate_report(ctx, "forwarding to mcast group port");
2095
output_normal(ctx, mcast_xbundle, vlan);
2096
} else if (!mcast_xbundle) {
2097
xlate_report(ctx, "mcast group port is unknown, dropping");
2099
xlate_report(ctx, "mcast group port is input port, dropping");
2104
/* send the packet to ports connected to multicast routers */
2106
xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2107
struct mcast_snooping *ms,
2108
struct xbundle *in_xbundle, uint16_t vlan)
2109
OVS_REQ_RDLOCK(ms->rwlock)
2111
struct xlate_cfg *xcfg;
2112
struct mcast_mrouter_bundle *mrouter;
2113
struct xbundle *mcast_xbundle;
2115
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2116
LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2117
mcast_xbundle = xbundle_lookup(xcfg, mrouter->port);
2118
if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2119
xlate_report(ctx, "forwarding to mcast router port");
2120
output_normal(ctx, mcast_xbundle, vlan);
2121
} else if (!mcast_xbundle) {
2122
xlate_report(ctx, "mcast router port is unknown, dropping");
2124
xlate_report(ctx, "mcast router port is input port, dropping");
2129
/* send the packet to ports flagged to be flooded */
2131
xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2132
struct mcast_snooping *ms,
2133
struct xbundle *in_xbundle, uint16_t vlan)
2134
OVS_REQ_RDLOCK(ms->rwlock)
2136
struct xlate_cfg *xcfg;
2137
struct mcast_port_bundle *fport;
2138
struct xbundle *mcast_xbundle;
2140
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2141
LIST_FOR_EACH(fport, node, &ms->fport_list) {
2142
mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2143
if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2144
xlate_report(ctx, "forwarding to mcast flood port");
2145
output_normal(ctx, mcast_xbundle, vlan);
2146
} else if (!mcast_xbundle) {
2147
xlate_report(ctx, "mcast flood port is unknown, dropping");
2149
xlate_report(ctx, "mcast flood port is input port, dropping");
2154
/* forward the Reports to configured ports */
2156
xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2157
struct mcast_snooping *ms,
2158
struct xbundle *in_xbundle, uint16_t vlan)
2159
OVS_REQ_RDLOCK(ms->rwlock)
2161
struct xlate_cfg *xcfg;
2162
struct mcast_port_bundle *rport;
2163
struct xbundle *mcast_xbundle;
2165
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2166
LIST_FOR_EACH(rport, node, &ms->rport_list) {
2167
mcast_xbundle = xbundle_lookup(xcfg, rport->port);
2168
if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2169
xlate_report(ctx, "forwarding Report to mcast flagged port");
2170
output_normal(ctx, mcast_xbundle, vlan);
2171
} else if (!mcast_xbundle) {
2172
xlate_report(ctx, "mcast port is unknown, dropping the Report");
2174
xlate_report(ctx, "mcast port is input port, dropping the Report");
2180
xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
2183
struct xbundle *xbundle;
2185
LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2186
if (xbundle != in_xbundle
2187
&& xbundle_includes_vlan(xbundle, vlan)
2188
&& xbundle->floodable
2189
&& !xbundle_mirror_out(ctx->xbridge, xbundle)) {
2190
output_normal(ctx, xbundle, vlan);
2193
ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1600
2273
/* Determine output bundle. */
1601
ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
1602
mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
1603
mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
1604
ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
1607
struct xbundle *mac_xbundle = xbundle_lookup(mac_port);
1608
if (mac_xbundle && mac_xbundle != in_xbundle) {
1609
xlate_report(ctx, "forwarding to learned port");
1610
output_normal(ctx, mac_xbundle, vlan);
1611
} else if (!mac_xbundle) {
1612
xlate_report(ctx, "learned port is unknown, dropping");
1614
xlate_report(ctx, "learned port is input port, dropping");
2274
if (mcast_snooping_enabled(ctx->xbridge->ms)
2275
&& !eth_addr_is_broadcast(flow->dl_dst)
2276
&& eth_addr_is_multicast(flow->dl_dst)
2277
&& flow->dl_type == htons(ETH_TYPE_IP)) {
2278
struct mcast_snooping *ms = ctx->xbridge->ms;
2279
struct mcast_group *grp;
2281
if (flow->nw_proto == IPPROTO_IGMP) {
2282
if (mcast_snooping_is_membership(flow->tp_src) ||
2283
mcast_snooping_is_query(flow->tp_src)) {
2284
if (ctx->xin->may_learn) {
2285
update_mcast_snooping_table(ctx->xbridge, flow, vlan,
2286
in_xbundle, ctx->xin->packet);
2289
* IGMP packets need to take the slow path, in order to be
2290
* processed for mdb updates. That will prevent expires
2291
* firing off even after hosts have sent reports.
2293
ctx->xout->slow |= SLOW_ACTION;
2296
if (mcast_snooping_is_membership(flow->tp_src)) {
2297
ovs_rwlock_rdlock(&ms->rwlock);
2298
xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
2299
/* RFC4541: section 2.1.1, item 1: A snooping switch should
2300
* forward IGMP Membership Reports only to those ports where
2301
* multicast routers are attached. Alternatively stated: a
2302
* snooping switch should not forward IGMP Membership Reports
2303
* to ports on which only hosts are attached.
2304
* An administrative control may be provided to override this
2305
* restriction, allowing the report messages to be flooded to
2307
xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, vlan);
2308
ovs_rwlock_unlock(&ms->rwlock);
2310
xlate_report(ctx, "multicast traffic, flooding");
2311
xlate_normal_flood(ctx, in_xbundle, vlan);
2315
if (ip_is_local_multicast(flow->nw_dst)) {
2316
/* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2317
* address in the 224.0.0.x range which are not IGMP must
2318
* be forwarded on all ports */
2319
xlate_report(ctx, "RFC4541: section 2.1.2, item 2, flooding");
2320
xlate_normal_flood(ctx, in_xbundle, vlan);
2325
/* forwarding to group base ports */
2326
ovs_rwlock_rdlock(&ms->rwlock);
2327
grp = mcast_snooping_lookup(ms, flow->nw_dst, vlan);
2329
xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, vlan);
2330
xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, vlan);
2331
xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
2333
if (mcast_snooping_flood_unreg(ms)) {
2334
xlate_report(ctx, "unregistered multicast, flooding");
2335
xlate_normal_flood(ctx, in_xbundle, vlan);
2337
xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
2338
xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, vlan);
2341
ovs_rwlock_unlock(&ms->rwlock);
1617
struct xbundle *xbundle;
2343
ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
2344
mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
2345
mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
2346
ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
1619
xlate_report(ctx, "no learned MAC for destination, flooding");
1620
LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
1621
if (xbundle != in_xbundle
1622
&& xbundle_includes_vlan(xbundle, vlan)
1623
&& xbundle->floodable
1624
&& !xbundle_mirror_out(ctx->xbridge, xbundle)) {
1625
output_normal(ctx, xbundle, vlan);
2349
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2350
struct xbundle *mac_xbundle = xbundle_lookup(xcfg, mac_port);
2351
if (mac_xbundle && mac_xbundle != in_xbundle) {
2352
xlate_report(ctx, "forwarding to learned port");
2353
output_normal(ctx, mac_xbundle, vlan);
2354
} else if (!mac_xbundle) {
2355
xlate_report(ctx, "learned port is unknown, dropping");
2357
xlate_report(ctx, "learned port is input port, dropping");
2360
xlate_report(ctx, "no learned MAC for destination, flooding");
2361
xlate_normal_flood(ctx, in_xbundle, vlan);
1628
ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1827
2594
lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
1829
2596
return SLOW_LACP;
1830
} else if (xbridge->stp && stp_should_process_flow(flow, wc)) {
2597
} else if ((xbridge->stp || xbridge->rstp) &&
2598
stp_should_process_flow(flow, wc)) {
1832
stp_process_packet(xport, packet);
2601
? stp_process_packet(xport, packet)
2602
: rstp_process_packet(xport, packet);
1834
2604
return SLOW_STP;
2605
} else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
2607
lldp_process_packet(xport->lldp, packet);
2616
tnl_route_lookup_flow(const struct flow *oflow,
2617
ovs_be32 *ip, struct xport **out_port)
2619
char out_dev[IFNAMSIZ];
2620
struct xbridge *xbridge;
2621
struct xlate_cfg *xcfg;
2624
if (!ovs_router_lookup(oflow->tunnel.ip_dst, out_dev, &gw)) {
2631
*ip = oflow->tunnel.ip_dst;
2634
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2637
HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
2638
if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
2641
HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
2642
if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
2653
xlate_flood_packet(struct xbridge *xbridge, struct dp_packet *packet)
2655
struct ofpact_output output;
2658
ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
2659
/* Use OFPP_NONE as the in_port to avoid special packet processing. */
2660
flow_extract(packet, &flow);
2661
flow.in_port.ofp_port = OFPP_NONE;
2662
output.port = OFPP_FLOOD;
2665
return ofproto_dpif_execute_actions(xbridge->ofproto, &flow, NULL,
2666
&output.ofpact, sizeof output,
2671
tnl_send_arp_request(const struct xport *out_dev, const uint8_t eth_src[ETH_ADDR_LEN],
2672
ovs_be32 ip_src, ovs_be32 ip_dst)
2674
struct xbridge *xbridge = out_dev->xbridge;
2675
struct dp_packet packet;
2677
dp_packet_init(&packet, 0);
2678
compose_arp(&packet, eth_src, ip_src, ip_dst);
2680
xlate_flood_packet(xbridge, &packet);
2681
dp_packet_uninit(&packet);
2685
build_tunnel_send(const struct xlate_ctx *ctx, const struct xport *xport,
2686
const struct flow *flow, odp_port_t tunnel_odp_port)
2688
struct ovs_action_push_tnl tnl_push_data;
2689
struct xport *out_dev = NULL;
2690
ovs_be32 s_ip, d_ip = 0;
2691
uint8_t smac[ETH_ADDR_LEN];
2692
uint8_t dmac[ETH_ADDR_LEN];
2695
err = tnl_route_lookup_flow(flow, &d_ip, &out_dev);
2700
/* Use mac addr of bridge port of the peer. */
2701
err = netdev_get_etheraddr(out_dev->netdev, smac);
2706
err = netdev_get_in4(out_dev->netdev, (struct in_addr *) &s_ip, NULL);
2711
err = tnl_arp_lookup(out_dev->xbridge->name, d_ip, dmac);
2713
tnl_send_arp_request(out_dev, smac, s_ip, d_ip);
2716
if (ctx->xin->xcache) {
2717
struct xc_entry *entry;
2719
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_ARP);
2720
ovs_strlcpy(entry->u.tnl_arp_cache.br_name, out_dev->xbridge->name,
2721
sizeof entry->u.tnl_arp_cache.br_name);
2722
entry->u.tnl_arp_cache.d_ip = d_ip;
2724
err = tnl_port_build_header(xport->ofport, flow,
2725
dmac, smac, s_ip, &tnl_push_data);
2729
tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port);
2730
tnl_push_data.out_port = odp_to_u32(out_dev->odp_port);
2731
odp_put_tnl_push_action(ctx->xout->odp_actions, &tnl_push_data);
1841
2736
compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
2737
const struct xlate_bond_recirc *xr, bool check_stp)
1844
2739
const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1845
2740
struct flow_wildcards *wc = &ctx->xout->wc;
1846
2741
struct flow *flow = &ctx->xin->flow;
2742
struct flow_tnl flow_tnl;
1847
2743
ovs_be16 flow_vlan_tci;
1848
2744
uint32_t flow_pkt_mark;
1849
2745
uint8_t flow_nw_tos;
1850
2746
odp_port_t out_port, odp_port;
2747
bool tnl_push_pop_send = false;
1853
2750
/* If 'struct flow' gets additional metadata, we'll need to zero it out
1854
2751
* before traversing a patch port. */
1855
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
2752
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
2753
memset(&flow_tnl, 0, sizeof flow_tnl);
1858
2756
xlate_report(ctx, "Nonexistent output port");
1882
2792
if (xport->peer) {
1883
2793
const struct xport *peer = xport->peer;
1884
2794
struct flow old_flow = ctx->xin->flow;
2795
bool old_was_mpls = ctx->was_mpls;
2796
cls_version_t old_version = ctx->tables_version;
1885
2797
enum slow_path_reason special;
1886
uint8_t table_id = rule_dpif_lookup_get_init_table_id(&ctx->xin->flow);
2798
struct ofpbuf old_stack = ctx->stack;
2799
union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
2800
struct ofpbuf old_action_set = ctx->action_set;
2801
uint64_t actset_stub[1024 / 8];
2803
ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
2804
ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
1888
2805
ctx->xbridge = peer->xbridge;
1889
2806
flow->in_port.ofp_port = peer->ofp_port;
1890
2807
flow->metadata = htonll(0);
1891
2808
memset(&flow->tunnel, 0, sizeof flow->tunnel);
1892
2809
memset(flow->regs, 0, sizeof flow->regs);
2810
flow->actset_output = OFPP_UNSET;
2812
/* The bridge is now known so obtain its table version. */
2814
= ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
1894
2816
special = process_special(ctx, &ctx->xin->flow, peer,
1895
2817
ctx->xin->packet);
1897
2819
ctx->xout->slow |= special;
1898
2820
} else if (may_receive(peer, ctx)) {
1899
if (xport_stp_forward_state(peer)) {
1900
xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
2821
if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
2822
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
2823
if (ctx->action_set.size) {
2824
/* Translate action set only if not dropping the packet and
2825
* not recirculating. */
2826
if (!exit_recirculates(ctx)) {
2827
xlate_action_set(ctx);
2830
/* Check if need to recirculate. */
2831
if (exit_recirculates(ctx)) {
2832
compose_recirculate_action(ctx);
1903
/* Forwarding is disabled by STP. Let OFPP_NORMAL and the
1904
* learning action look at the packet, then drop it. */
2835
/* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
2836
* the learning action look at the packet, then drop it. */
1905
2837
struct flow old_base_flow = ctx->base_flow;
1906
size_t old_size = ofpbuf_size(&ctx->xout->odp_actions);
2838
size_t old_size = ctx->xout->odp_actions->size;
1907
2839
mirror_mask_t old_mirrors = ctx->xout->mirrors;
1908
xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
2841
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
1910
2842
ctx->xout->mirrors = old_mirrors;
1911
2843
ctx->base_flow = old_base_flow;
1912
ofpbuf_set_size(&ctx->xout->odp_actions, old_size);
2844
ctx->xout->odp_actions->size = old_size;
2846
/* Undo changes that may have been done for recirculation. */
2847
if (exit_recirculates(ctx)) {
2848
ctx->action_set.size = ctx->recirc_action_offset;
2849
ctx->recirc_action_offset = -1;
2850
ctx->last_unroll_offset = -1;
1916
2855
ctx->xin->flow = old_flow;
1917
2856
ctx->xbridge = xport->xbridge;
2857
ofpbuf_uninit(&ctx->action_set);
2858
ctx->action_set = old_action_set;
2859
ofpbuf_uninit(&ctx->stack);
2860
ctx->stack = old_stack;
2862
/* Restore calling bridge's lookup version. */
2863
ctx->tables_version = old_version;
2865
/* The peer bridge popping MPLS should have no effect on the original
2867
ctx->was_mpls = old_was_mpls;
2869
/* The fact that the peer bridge exits (for any reason) does not mean
2870
* that the original bridge should exit. Specifically, if the peer
2871
* bridge recirculates (which typically modifies the packet), the
2872
* original bridge must continue processing with the original, not the
2873
* recirculated packet! */
1919
2876
if (ctx->xin->resubmit_stats) {
1920
2877
netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
2075
3069
xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
2076
3070
bool may_packet_in, bool honor_table_miss)
3072
/* Check if we need to recirculate before matching in a table. */
3073
if (ctx->was_mpls) {
3074
ctx_trigger_recirculation(ctx);
2078
3077
if (xlate_resubmit_resource_check(ctx)) {
2079
ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
2080
bool skip_wildcards = ctx->xin->skip_wildcards;
3078
struct flow_wildcards *wc;
2081
3079
uint8_t old_table_id = ctx->table_id;
2082
3080
struct rule_dpif *rule;
2083
enum rule_dpif_lookup_verdict verdict;
2084
enum ofputil_port_config config = 0;
2086
3082
ctx->table_id = table_id;
2088
/* Look up a flow with 'in_port' as the input port. Then restore the
2089
* original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2090
* have surprising behavior). */
2091
ctx->xin->flow.in_port.ofp_port = in_port;
2092
verdict = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
2095
? &ctx->xout->wc : NULL,
2097
&ctx->table_id, &rule,
2098
ctx->xin->xcache != NULL);
2099
ctx->xin->flow.in_port.ofp_port = old_in_port;
2101
if (ctx->xin->resubmit_hook) {
2102
ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
2106
case RULE_DPIF_LOOKUP_VERDICT_MATCH:
2108
case RULE_DPIF_LOOKUP_VERDICT_CONTROLLER:
2109
if (may_packet_in) {
2110
struct xport *xport;
2112
xport = get_ofp_port(ctx->xbridge,
2113
ctx->xin->flow.in_port.ofp_port);
2114
config = xport ? xport->config : 0;
2117
/* Fall through to drop */
2118
case RULE_DPIF_LOOKUP_VERDICT_DROP:
2119
config = OFPUTIL_PC_NO_PACKET_IN;
2121
case RULE_DPIF_LOOKUP_VERDICT_DEFAULT:
2122
if (!ofproto_dpif_wants_packet_in_on_miss(ctx->xbridge->ofproto)) {
2123
config = OFPUTIL_PC_NO_PACKET_IN;
2130
choose_miss_rule(config, ctx->xbridge->miss_rule,
2131
ctx->xbridge->no_packet_in_rule, &rule,
2132
ctx->xin->xcache != NULL);
3083
wc = (ctx->xin->skip_wildcards) ? NULL : &ctx->xout->wc;
3085
rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
3086
ctx->tables_version,
3087
&ctx->xin->flow, wc,
3088
ctx->xin->xcache != NULL,
3089
ctx->xin->resubmit_stats,
3090
&ctx->table_id, in_port,
3091
may_packet_in, honor_table_miss);
3093
if (OVS_UNLIKELY(ctx->xin->resubmit_hook)) {
3094
ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse + 1);
2136
3098
/* Fill in the cache entry here instead of xlate_recursively
2137
3099
* to make the reference counting more explicit. We take a
2195
3187
xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
2197
const struct ofputil_bucket *bucket;
2198
const struct list *buckets;
3189
struct ofputil_bucket *bucket;
3190
const struct ovs_list *buckets;
2200
3192
group_dpif_get_buckets(group, &buckets);
2202
3194
LIST_FOR_EACH (bucket, list_node, buckets) {
2203
3195
xlate_group_bucket(ctx, bucket);
3197
xlate_group_stats(ctx, group, NULL);
2208
3201
xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
2210
const struct ofputil_bucket *bucket;
3203
struct ofputil_bucket *bucket;
2212
3205
bucket = group_first_live_bucket(ctx, group, 0);
2214
3207
xlate_group_bucket(ctx, bucket);
3208
xlate_group_stats(ctx, group, bucket);
3213
xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
3215
struct flow_wildcards *wc = &ctx->xout->wc;
3216
struct ofputil_bucket *bucket;
3219
basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
3220
flow_mask_hash_fields(&ctx->xin->flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
3221
bucket = group_best_live_bucket(ctx, group, basis);
3223
xlate_group_bucket(ctx, bucket);
3224
xlate_group_stats(ctx, group, bucket);
3229
xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
3231
struct mf_bitmap hash_fields = MF_BITMAP_INITIALIZER;
3232
struct flow_wildcards *wc = &ctx->xout->wc;
3233
const struct field_array *fields;
3234
struct ofputil_bucket *bucket;
3238
fields = group_dpif_get_fields(group);
3239
basis = hash_uint64(group_dpif_get_selection_method_param(group));
3241
/* Determine which fields to hash */
3242
for (i = 0; i < MFF_N_IDS; i++) {
3243
if (bitmap_is_set(fields->used.bm, i)) {
3244
const struct mf_field *mf;
3246
/* If the field is already present in 'hash_fields' then
3247
* this loop has already checked that it and its pre-requisites
3248
* are present in the flow and its pre-requisites have
3249
* already been added to 'hash_fields'. There is nothing more
3250
* to do here and as an optimisation the loop can continue. */
3251
if (bitmap_is_set(hash_fields.bm, i)) {
3257
/* Only hash a field if it and its pre-requisites are present
3259
if (!mf_are_prereqs_ok(mf, &ctx->xin->flow)) {
3263
/* Hash both the field and its pre-requisites */
3264
mf_bitmap_set_field_and_prereqs(mf, &hash_fields);
3268
/* Hash the fields */
3269
for (i = 0; i < MFF_N_IDS; i++) {
3270
if (bitmap_is_set(hash_fields.bm, i)) {
3271
const struct mf_field *mf = mf_from_id(i);
3272
union mf_value value;
3275
mf_get_value(mf, &ctx->xin->flow, &value);
3276
/* This seems inefficient but so does apply_mask() */
3277
for (j = 0; j < mf->n_bytes; j++) {
3278
((uint8_t *) &value)[j] &= ((uint8_t *) &fields->value[i])[j];
3280
basis = hash_bytes(&value, mf->n_bytes, basis);
3282
mf_mask_field(mf, &wc->masks);
3286
bucket = group_best_live_bucket(ctx, group, basis);
3288
xlate_group_bucket(ctx, bucket);
3289
xlate_group_stats(ctx, group, bucket);
2219
3294
xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
2221
struct flow_wildcards *wc = &ctx->xout->wc;
2222
const struct ofputil_bucket *bucket;
3296
const char *selection_method = group_dpif_get_selection_method(group);
2225
basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
2226
bucket = group_best_live_bucket(ctx, group, basis);
2228
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2229
xlate_group_bucket(ctx, bucket);
3298
if (selection_method[0] == '\0') {
3299
xlate_default_select_group(ctx, group);
3300
} else if (!strcasecmp("hash", selection_method)) {
3301
xlate_hash_fields_select_group(ctx, group);
3303
/* Parsing of groups should ensure this never happens */
2402
3476
ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, pin);
2403
ofpbuf_delete(packet);
3477
dp_packet_delete(packet);
3480
/* Called only when ctx->recirc_action_offset is set. */
3482
compose_recirculate_action(struct xlate_ctx *ctx)
3484
struct recirc_metadata md;
3488
use_masked = ctx->xbridge->support.masked_set_action;
3489
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3490
ctx->xout->odp_actions,
3491
&ctx->xout->wc, use_masked);
3493
recirc_metadata_from_flow(&md, &ctx->xin->flow);
3495
ovs_assert(ctx->recirc_action_offset >= 0);
3497
/* Only allocate recirculation ID if we have a packet. */
3498
if (ctx->xin->packet) {
3499
/* Allocate a unique recirc id for the given metadata state in the
3500
* flow. The life-cycle of this recirc id is managed by associating it
3501
* with the udpif key ('ukey') created for each new datapath flow. */
3502
id = recirc_alloc_id_ctx(ctx->xbridge->ofproto, 0, &md, &ctx->stack,
3503
ctx->recirc_action_offset,
3504
ctx->action_set.size, ctx->action_set.data);
3506
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3507
VLOG_ERR_RL(&rl, "Failed to allocate recirculation id");
3510
xlate_out_add_recirc(ctx->xout, id);
3512
/* Look up an existing recirc id for the given metadata state in the
3513
* flow. No new reference is taken, as the ID is RCU protected and is
3514
* only required temporarily for verification. */
3515
id = recirc_find_id(ctx->xbridge->ofproto, 0, &md, &ctx->stack,
3516
ctx->recirc_action_offset,
3517
ctx->action_set.size, ctx->action_set.data);
3518
/* We let zero 'id' to be used in the RECIRC action below, which will
3519
* fail all revalidations as zero is not a valid recirculation ID. */
3522
nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
3524
/* Undo changes done by recirculation. */
3525
ctx->action_set.size = ctx->recirc_action_offset;
3526
ctx->recirc_action_offset = -1;
3527
ctx->last_unroll_offset = -1;
2756
3886
xlate_sample_action(struct xlate_ctx *ctx,
2757
3887
const struct ofpact_sample *os)
2759
union user_action_cookie cookie;
2760
/* Scale the probability from 16-bit to 32-bit while representing
2761
* the same percentage. */
2762
uint32_t probability = (os->probability << 16) | os->probability;
2764
if (!ctx->xbridge->variable_length_userdata) {
2765
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2767
VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
2768
"lacks support (needs Linux 3.10+ or kernel module from "
2773
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
2774
&ctx->xout->odp_actions,
2777
compose_flow_sample_cookie(os->probability, os->collector_set_id,
2778
os->obs_domain_id, os->obs_point_id, &cookie);
2779
compose_sample_action(ctx->xbridge, &ctx->xout->odp_actions, &ctx->xin->flow,
2780
probability, &cookie, sizeof cookie.flow_sample);
3889
union user_action_cookie cookie;
3890
/* Scale the probability from 16-bit to 32-bit while representing
3891
* the same percentage. */
3892
uint32_t probability = (os->probability << 16) | os->probability;
3895
if (!ctx->xbridge->support.variable_length_userdata) {
3896
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
3898
VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
3899
"lacks support (needs Linux 3.10+ or kernel module from "
3904
use_masked = ctx->xbridge->support.masked_set_action;
3905
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3906
ctx->xout->odp_actions,
3907
&ctx->xout->wc, use_masked);
3909
compose_flow_sample_cookie(os->probability, os->collector_set_id,
3910
os->obs_domain_id, os->obs_point_id, &cookie);
3911
compose_sample_action(ctx->xbridge, ctx->xout->odp_actions,
3912
&ctx->xin->flow, probability, &cookie,
3913
sizeof cookie.flow_sample, ODPP_NONE);
2814
3972
uint64_t action_list_stub[1024 / 64];
2815
3973
struct ofpbuf action_list;
3975
ctx->in_action_set = true;
2817
3976
ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
2818
3977
ofpacts_execute_action_set(&action_list, &ctx->action_set);
2819
do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
3978
/* Clear the action set, as it is not needed any more. */
3979
ofpbuf_clear(&ctx->action_set);
3980
do_xlate_actions(action_list.data, action_list.size, ctx);
3981
ctx->in_action_set = false;
2820
3982
ofpbuf_uninit(&action_list);
3986
recirc_put_unroll_xlate(struct xlate_ctx *ctx)
3988
struct ofpact_unroll_xlate *unroll;
3990
unroll = ctx->last_unroll_offset < 0
3992
: ALIGNED_CAST(struct ofpact_unroll_xlate *,
3993
(char *)ctx->action_set.data + ctx->last_unroll_offset);
3995
/* Restore the table_id and rule cookie for a potential PACKET
3998
(ctx->table_id != unroll->rule_table_id
3999
|| ctx->rule_cookie != unroll->rule_cookie)) {
4001
ctx->last_unroll_offset = ctx->action_set.size;
4002
unroll = ofpact_put_UNROLL_XLATE(&ctx->action_set);
4003
unroll->rule_table_id = ctx->table_id;
4004
unroll->rule_cookie = ctx->rule_cookie;
4009
/* Copy remaining actions to the action_set to be executed after recirculation.
4010
* UNROLL_XLATE action is inserted, if not already done so, before actions that
4011
* may generate PACKET_INs from the current table and without matching another
4014
recirc_unroll_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
4015
struct xlate_ctx *ctx)
4017
const struct ofpact *a;
4019
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
4021
/* May generate PACKET INs. */
4022
case OFPACT_OUTPUT_REG:
4025
case OFPACT_CONTROLLER:
4026
case OFPACT_DEC_MPLS_TTL:
4027
case OFPACT_DEC_TTL:
4028
recirc_put_unroll_xlate(ctx);
4031
/* These may not generate PACKET INs. */
4032
case OFPACT_SET_TUNNEL:
4033
case OFPACT_REG_MOVE:
4034
case OFPACT_SET_FIELD:
4035
case OFPACT_STACK_PUSH:
4036
case OFPACT_STACK_POP:
4038
case OFPACT_WRITE_METADATA:
4039
case OFPACT_RESUBMIT: /* May indirectly generate PACKET INs, */
4040
case OFPACT_GOTO_TABLE: /* but from a different table and rule. */
4041
case OFPACT_ENQUEUE:
4042
case OFPACT_SET_VLAN_VID:
4043
case OFPACT_SET_VLAN_PCP:
4044
case OFPACT_STRIP_VLAN:
4045
case OFPACT_PUSH_VLAN:
4046
case OFPACT_SET_ETH_SRC:
4047
case OFPACT_SET_ETH_DST:
4048
case OFPACT_SET_IPV4_SRC:
4049
case OFPACT_SET_IPV4_DST:
4050
case OFPACT_SET_IP_DSCP:
4051
case OFPACT_SET_IP_ECN:
4052
case OFPACT_SET_IP_TTL:
4053
case OFPACT_SET_L4_SRC_PORT:
4054
case OFPACT_SET_L4_DST_PORT:
4055
case OFPACT_SET_QUEUE:
4056
case OFPACT_POP_QUEUE:
4057
case OFPACT_PUSH_MPLS:
4058
case OFPACT_POP_MPLS:
4059
case OFPACT_SET_MPLS_LABEL:
4060
case OFPACT_SET_MPLS_TC:
4061
case OFPACT_SET_MPLS_TTL:
4062
case OFPACT_MULTIPATH:
4065
case OFPACT_UNROLL_XLATE:
4066
case OFPACT_FIN_TIMEOUT:
4067
case OFPACT_CLEAR_ACTIONS:
4068
case OFPACT_WRITE_ACTIONS:
4073
/* These need not be copied for restoration. */
4075
case OFPACT_CONJUNCTION:
4078
/* Copy the action over. */
4079
ofpbuf_put(&ctx->action_set, a, OFPACT_ALIGN(a->len));
4083
#define CHECK_MPLS_RECIRCULATION() \
4084
if (ctx->was_mpls) { \
4085
ctx_trigger_recirculation(ctx); \
4088
#define CHECK_MPLS_RECIRCULATION_IF(COND) \
4090
CHECK_MPLS_RECIRCULATION(); \
2824
4094
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
2825
4095
struct xlate_ctx *ctx)
2997
4293
&& !eth_type_mpls(flow->dl_type)) {
4296
/* A flow may wildcard nw_frag. Do nothing if setting a trasport
4297
* header field on a packet that does not have them. */
3001
4298
mf_mask_field_and_prereqs(mf, &wc->masks);
3002
mf_set_flow_value(mf, &set_field->value, flow);
4299
if (mf_are_prereqs_ok(mf, flow)) {
4300
mf_set_flow_value_masked(mf, &set_field->value,
4301
&set_field->mask, flow);
3005
4305
case OFPACT_STACK_PUSH:
4306
CHECK_MPLS_RECIRCULATION_IF(
4307
mf_is_l3_or_higher(ofpact_get_STACK_PUSH(a)->subfield.field));
3006
4308
nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
3010
4312
case OFPACT_STACK_POP:
4313
CHECK_MPLS_RECIRCULATION_IF(
4314
mf_is_l3_or_higher(ofpact_get_STACK_POP(a)->subfield.field));
3011
4315
nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, wc,
3015
4319
case OFPACT_PUSH_MPLS:
4320
/* Recirculate if it is an IP packet with a zero ttl. This may
4321
* indicate that the packet was previously MPLS and an MPLS pop
4322
* action converted it to IP. In this case recirculating should
4323
* reveal the IP TTL which is used as the basis for a new MPLS
4325
CHECK_MPLS_RECIRCULATION_IF(
4326
!flow_count_mpls_labels(flow, wc)
4327
&& flow->nw_ttl == 0
4328
&& is_ip_any(flow));
3016
4329
compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
3019
4332
case OFPACT_POP_MPLS:
4333
CHECK_MPLS_RECIRCULATION();
3020
4334
compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
3023
4337
case OFPACT_SET_MPLS_LABEL:
4338
CHECK_MPLS_RECIRCULATION();
3024
4339
compose_set_mpls_label_action(
3025
4340
ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
3028
4343
case OFPACT_SET_MPLS_TC:
4344
CHECK_MPLS_RECIRCULATION();
3029
4345
compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
3032
4348
case OFPACT_SET_MPLS_TTL:
4349
CHECK_MPLS_RECIRCULATION();
3033
4350
compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
3036
4353
case OFPACT_DEC_MPLS_TTL:
4354
CHECK_MPLS_RECIRCULATION();
3037
4355
if (compose_dec_mpls_ttl_action(ctx)) {
3042
4360
case OFPACT_DEC_TTL:
4361
CHECK_MPLS_RECIRCULATION();
3043
4362
wc->masks.nw_ttl = 0xff;
3044
4363
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
3361
4719
ctx.xout->has_fin_timeout = false;
3362
4720
ctx.xout->nf_output_iface = NF_OUT_DROP;
3363
4721
ctx.xout->mirrors = 0;
3364
ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
3365
sizeof ctx.xout->odp_actions_stub);
3366
ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
3368
ctx.xbridge = xbridge_lookup(xin->ofproto);
4722
ctx.xout->n_recircs = 0;
4724
xout->odp_actions = xin->odp_actions;
4725
if (!xout->odp_actions) {
4726
xout->odp_actions = &xout->odp_actions_buf;
4727
ofpbuf_use_stub(xout->odp_actions, xout->odp_actions_stub,
4728
sizeof xout->odp_actions_stub);
4730
ofpbuf_reserve(xout->odp_actions, NL_A_U32_SIZE);
4732
xbridge = xbridge_lookup(xcfg, xin->ofproto);
4736
/* 'ctx.xbridge' may be changed by action processing, whereas 'xbridge'
4737
* will remain set on the original input bridge. */
4738
ctx.xbridge = xbridge;
3373
4739
ctx.rule = xin->rule;
3375
4741
ctx.base_flow = *flow;
3376
4742
memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
3377
4743
ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
3379
flow_wildcards_init_catchall(wc);
3380
memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
3381
memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
3382
memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
3383
if (is_ip_any(flow)) {
3384
wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
4745
if (!xin->skip_wildcards) {
4747
flow_wildcards_init_catchall(wc);
4748
memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
4749
memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
4750
if (is_ip_any(flow)) {
4751
wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
4753
if (xbridge->support.recirc) {
4754
/* Always exactly match recirc_id when datapath supports
4756
wc->masks.recirc_id = UINT32_MAX;
4758
if (xbridge->netflow) {
4759
netflow_mask_wc(flow, wc);
3386
4762
is_icmp = is_icmpv4(flow) || is_icmpv6(flow);
3388
4764
tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
3389
if (ctx.xbridge->netflow) {
3390
netflow_mask_wc(flow, wc);
3393
4766
ctx.recurse = 0;
3394
4767
ctx.resubmits = 0;
3395
4768
ctx.in_group = false;
4769
ctx.in_action_set = false;
3396
4770
ctx.orig_skb_priority = flow->skb_priority;
3397
4771
ctx.table_id = 0;
4772
ctx.rule_cookie = OVS_BE64_MAX;
3398
4773
ctx.exit = false;
3399
ctx.use_recirc = false;
4774
ctx.was_mpls = false;
4775
ctx.recirc_action_offset = -1;
4776
ctx.last_unroll_offset = -1;
4778
ctx.action_set_has_group = false;
4779
ofpbuf_use_stub(&ctx.action_set,
4780
ctx.action_set_stub, sizeof ctx.action_set_stub);
4782
ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
4784
/* The in_port of the original packet before recirculation. */
4785
in_port = get_ofp_port(xbridge, flow->in_port.ofp_port);
4788
const struct recirc_id_node *recirc = xin->recirc;
4790
if (xin->ofpacts_len > 0 || ctx.rule) {
4791
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
4793
VLOG_WARN_RL(&rl, "Recirculation conflict (%s)!",
4794
xin->ofpacts_len > 0
4800
/* Set the bridge for post-recirculation processing if needed. */
4801
if (ctx.xbridge->ofproto != recirc->ofproto) {
4802
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
4803
const struct xbridge *new_bridge = xbridge_lookup(xcfg,
4806
if (OVS_UNLIKELY(!new_bridge)) {
4807
/* Drop the packet if the bridge cannot be found. */
4808
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
4809
VLOG_WARN_RL(&rl, "Recirculation bridge no longer exists.");
4812
ctx.xbridge = new_bridge;
4815
/* Set the post-recirculation table id. Note: A table lookup is done
4816
* only if there are no post-recirculation actions. */
4817
ctx.table_id = recirc->table_id;
4819
/* Restore pipeline metadata. May change flow's in_port and other
4820
* metadata to the values that existed when recirculation was
4822
recirc_metadata_to_flow(&recirc->metadata, flow);
4824
/* Restore stack, if any. */
4825
if (recirc->stack) {
4826
ofpbuf_put(&ctx.stack, recirc->stack->data, recirc->stack->size);
4829
/* Restore action set, if any. */
4830
if (recirc->action_set_len) {
4831
const struct ofpact *a;
4833
ofpbuf_put(&ctx.action_set, recirc->ofpacts,
4834
recirc->action_set_len);
4836
OFPACT_FOR_EACH(a, recirc->ofpacts, recirc->action_set_len) {
4837
if (a->type == OFPACT_GROUP) {
4838
ctx.action_set_has_group = true;
4844
/* Restore recirculation actions. If there are no actions, processing
4845
* will start with a lookup in the table set above. */
4846
if (recirc->ofpacts_len > recirc->action_set_len) {
4847
xin->ofpacts_len = recirc->ofpacts_len - recirc->action_set_len;
4848
xin->ofpacts = recirc->ofpacts +
4849
recirc->action_set_len / sizeof *recirc->ofpacts;
4851
} else if (OVS_UNLIKELY(flow->recirc_id)) {
4852
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
4854
VLOG_WARN_RL(&rl, "Recirculation context not found for ID %"PRIx32,
4858
/* The bridge is now known so obtain its table version. */
4859
ctx.tables_version = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
3401
4861
if (!xin->ofpacts && !ctx.rule) {
3402
ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
3403
!xin->skip_wildcards ? wc : NULL,
3404
&rule, ctx.xin->xcache != NULL);
4862
rule = rule_dpif_lookup_from_table(ctx.xbridge->ofproto,
4863
ctx.tables_version, flow, wc,
4864
ctx.xin->xcache != NULL,
4865
ctx.xin->resubmit_stats,
4867
flow->in_port.ofp_port, true, true);
3405
4868
if (ctx.xin->resubmit_stats) {
3406
4869
rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
3479
special = process_special(&ctx, flow, in_port, ctx.xin->packet);
4922
/* Do not perform special processing on recirculated packets,
4923
* as recirculated packets are not really received by the bridge. */
4925
(special = process_special(&ctx, flow, in_port, ctx.xin->packet))) {
3481
4926
ctx.xout->slow |= special;
3483
4928
size_t sample_actions_len;
3485
4930
if (flow->in_port.ofp_port
3486
!= vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
4931
!= vsp_realdev_to_vlandev(xbridge->ofproto,
3487
4932
flow->in_port.ofp_port,
3488
4933
flow->vlan_tci)) {
3489
4934
ctx.base_flow.vlan_tci = 0;
3492
add_sflow_action(&ctx);
3493
add_ipfix_action(&ctx);
3494
sample_actions_len = ofpbuf_size(&ctx.xout->odp_actions);
4937
/* Sampling is done only for packets really received by the bridge. */
4939
add_sflow_action(&ctx);
4940
add_ipfix_action(&ctx);
4941
sample_actions_len = ctx.xout->odp_actions->size;
4943
sample_actions_len = 0;
3496
4946
if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
3497
4947
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
3499
4949
/* We've let OFPP_NORMAL and the learning action look at the
3500
4950
* packet, so drop it now if forwarding is disabled. */
3501
if (in_port && !xport_stp_forward_state(in_port)) {
3502
ofpbuf_set_size(&ctx.xout->odp_actions, sample_actions_len);
3506
if (ofpbuf_size(&ctx.action_set)) {
3507
xlate_action_set(&ctx);
3510
if (ctx.xbridge->has_in_band
4951
if (in_port && (!xport_stp_forward_state(in_port) ||
4952
!xport_rstp_forward_state(in_port))) {
4953
/* Drop all actions added by do_xlate_actions() above. */
4954
ctx.xout->odp_actions->size = sample_actions_len;
4956
/* Undo changes that may have been done for recirculation. */
4957
if (exit_recirculates(&ctx)) {
4958
ctx.action_set.size = ctx.recirc_action_offset;
4959
ctx.recirc_action_offset = -1;
4960
ctx.last_unroll_offset = -1;
4962
} else if (ctx.action_set.size) {
4963
/* Translate action set only if not dropping the packet and
4964
* not recirculating. */
4965
if (!exit_recirculates(&ctx)) {
4966
xlate_action_set(&ctx);
4969
/* Check if need to recirculate. */
4970
if (exit_recirculates(&ctx)) {
4971
compose_recirculate_action(&ctx);
4975
/* Output only fully processed packets. */
4976
if (!exit_recirculates(&ctx)
4977
&& xbridge->has_in_band
3511
4978
&& in_band_must_output_to_local_port(flow)
3512
4979
&& !actions_output_to_local_port(&ctx)) {
3513
compose_output_action(&ctx, OFPP_LOCAL);
3516
fix_sflow_action(&ctx);
3518
if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
4980
compose_output_action(&ctx, OFPP_LOCAL, NULL);
4984
fix_sflow_action(&ctx);
4986
/* Only mirror fully processed packets. */
4987
if (!exit_recirculates(&ctx)
4988
&& mbridge_has_mirrors(xbridge->mbridge)) {
3519
4989
add_mirror_actions(&ctx, &orig_flow);
3523
if (nl_attr_oversized(ofpbuf_size(&ctx.xout->odp_actions))) {
4993
if (nl_attr_oversized(ctx.xout->odp_actions->size)) {
3524
4994
/* These datapath actions are too big for a Netlink attribute, so we
3525
4995
* can't hand them to the kernel directly. dpif_execute() can execute
3526
4996
* them one by one with help, so just mark the result as SLOW_ACTION to
3527
4997
* prevent the flow from being installed. */
3528
4998
COVERAGE_INC(xlate_actions_oversize);
3529
4999
ctx.xout->slow |= SLOW_ACTION;
3530
} else if (too_many_output_actions(&ctx.xout->odp_actions)) {
5000
} else if (too_many_output_actions(ctx.xout->odp_actions)) {
3531
5001
COVERAGE_INC(xlate_actions_too_many_output);
3532
5002
ctx.xout->slow |= SLOW_ACTION;
3535
if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
5005
/* Update mirror stats only for packets really received by the bridge. */
5006
if (!xin->recirc && mbridge_has_mirrors(xbridge->mbridge)) {
3536
5007
if (ctx.xin->resubmit_stats) {
3537
mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
5008
mirror_update_stats(xbridge->mbridge, xout->mirrors,
3538
5009
ctx.xin->resubmit_stats->n_packets,
3539
5010
ctx.xin->resubmit_stats->n_bytes);
3574
5046
ofpbuf_uninit(&ctx.stack);
3575
5047
ofpbuf_uninit(&ctx.action_set);
3578
/* Clear the metadata and register wildcard masks, because we won't
3579
* use non-header fields as part of the cache. */
3580
flow_wildcards_clear_non_packet_fields(wc);
3582
/* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow uses
3583
* the low 8 bits of the 16-bit tp_src and tp_dst members to represent
3584
* these fields. The datapath interface, on the other hand, represents
3585
* them with just 8 bits each. This means that if the high 8 bits of the
3586
* masks for these fields somehow become set, then they will get chopped
3587
* off by a round trip through the datapath, and revalidation will spot
3588
* that as an inconsistency and delete the flow. Avoid the problem here by
3589
* making sure that only the low 8 bits of either field can be unwildcarded
3593
wc->masks.tp_src &= htons(UINT8_MAX);
3594
wc->masks.tp_dst &= htons(UINT8_MAX);
3596
/* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
3597
if (wc->masks.vlan_tci) {
3598
wc->masks.vlan_tci |= htons(VLAN_CFI);
5050
/* Clear the metadata and register wildcard masks, because we won't
5051
* use non-header fields as part of the cache. */
5052
flow_wildcards_clear_non_packet_fields(wc);
5054
/* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
5055
* uses the low 8 bits of the 16-bit tp_src and tp_dst members to
5056
* represent these fields. The datapath interface, on the other hand,
5057
* represents them with just 8 bits each. This means that if the high
5058
* 8 bits of the masks for these fields somehow become set, then they
5059
* will get chopped off by a round trip through the datapath, and
5060
* revalidation will spot that as an inconsistency and delete the flow.
5061
* Avoid the problem here by making sure that only the low 8 bits of
5062
* either field can be unwildcarded for ICMP.
5065
wc->masks.tp_src &= htons(UINT8_MAX);
5066
wc->masks.tp_dst &= htons(UINT8_MAX);
5068
/* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
5069
if (wc->masks.vlan_tci) {
5070
wc->masks.vlan_tci |= htons(VLAN_CFI);