2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-26 14:14:01 +08:00

nfp: extend flower matching capabilities

Extends matching capabilities for flower offloads to include vlan,
layer 2, layer 3 and layer 4 type matches. This includes both exact
and wildcard matching.

Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Signed-off-by: Simon Horman <simon.horman@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Pieter Jansen van Vuuren 2017-06-29 22:08:15 +02:00 committed by David S. Miller
parent af9d842c13
commit 5571e8c9f2
6 changed files with 316 additions and 1 deletions

View File

@ -33,6 +33,7 @@ ifeq ($(CONFIG_NFP_APP_FLOWER),y)
nfp-objs += \ nfp-objs += \
flower/cmsg.o \ flower/cmsg.o \
flower/main.o \ flower/main.o \
flower/match.o \
flower/offload.o flower/offload.o
endif endif

View File

@ -52,6 +52,10 @@
#define NFP_FLOWER_LAYER_ETHER BIT(3) #define NFP_FLOWER_LAYER_ETHER BIT(3)
#define NFP_FLOWER_LAYER_ARP BIT(4) #define NFP_FLOWER_LAYER_ARP BIT(4)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
/* Metadata without L2 (1W/4B) /* Metadata without L2 (1W/4B)
* ---------------------------------------------------------------- * ----------------------------------------------------------------
* 3 2 1 * 3 2 1

View File

@ -66,4 +66,9 @@ struct nfp_fl_payload {
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
u32 handle, __be16 proto, struct tc_to_netdev *tc); u32 handle, __be16 proto, struct tc_to_netdev *tc);
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow);
#endif #endif

View File

@ -0,0 +1,292 @@
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/bitfield.h>
#include <net/pkt_cls.h>
#include "cmsg.h"
#include "main.h"
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
if (mask_version) {
frame->tci = cpu_to_be16(~0);
return;
}
flow_vlan = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_VLAN,
flow->key);
/* Populate the tci field. */
if (!flow_vlan->vlan_id) {
tmp_tci = 0;
} else {
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
flow_vlan->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
flow_vlan->vlan_id) |
NFP_FLOWER_MASK_VLAN_CFI;
}
frame->tci = cpu_to_be16(tmp_tci);
}
static void
nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
{
frame->nfp_flow_key_layer = key_type;
frame->mask_id = 0;
frame->reserved = 0;
}
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
bool mask_version)
{
if (mask_version) {
frame->in_port = cpu_to_be32(~0);
return 0;
}
frame->in_port = cpu_to_be32(cmsg_port);
return 0;
}
static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
struct tc_cls_flower_offload *flow,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_eth_addrs *flow_mac;
flow_mac = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
target);
memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
/* Populate mac frame. */
ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]);
ether_addr_copy(frame->mac_src, &flow_mac->src[0]);
if (mask_version)
frame->mpls_lse = cpu_to_be32(~0);
}
static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
struct tc_cls_flower_offload *flow,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ports *flow_tp;
flow_tp = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_PORTS,
target);
frame->port_src = flow_tp->src;
frame->port_dst = flow_tp->dst;
}
static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
struct tc_cls_flower_offload *flow,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv4_addrs *flow_ipv4;
struct flow_dissector_key_basic *flow_basic;
flow_ipv4 = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
target);
flow_basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
target);
/* Populate IPv4 frame. */
frame->reserved = 0;
frame->ipv4_src = flow_ipv4->src;
frame->ipv4_dst = flow_ipv4->dst;
frame->proto = flow_basic->ip_proto;
/* Wildcard TOS/TTL for now. */
frame->tos = 0;
frame->ttl = 0;
}
static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
struct tc_cls_flower_offload *flow,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv6_addrs *flow_ipv6;
struct flow_dissector_key_basic *flow_basic;
flow_ipv6 = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
target);
flow_basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
target);
/* Populate IPv6 frame. */
frame->reserved = 0;
frame->ipv6_src = flow_ipv6->src;
frame->ipv6_dst = flow_ipv6->dst;
frame->proto = flow_basic->ip_proto;
/* Wildcard LABEL/TOS/TTL for now. */
frame->ipv6_flow_label_exthdr = 0;
frame->tos = 0;
frame->ttl = 0;
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
int err;
u8 *ext;
u8 *msk;
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
/* Populate Exact Metadata. */
nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
flow, key_ls->key_layer, false);
/* Populate Mask Metadata. */
nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
flow, key_ls->key_layer, true);
ext += sizeof(struct nfp_flower_meta_two);
msk += sizeof(struct nfp_flower_meta_two);
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
nfp_repr_get_port_id(netdev),
false);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
nfp_repr_get_port_id(netdev),
true);
if (err)
return err;
ext += sizeof(struct nfp_flower_in_port);
msk += sizeof(struct nfp_flower_in_port);
} else {
/* Populate Exact Metadata. */
nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
key_ls->key_layer);
/* Populate Mask Metadata. */
nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
key_ls->key_layer);
ext += sizeof(struct nfp_flower_meta_one);
msk += sizeof(struct nfp_flower_meta_one);
}
if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
/* Additional Metadata Fields.
* Currently unsupported.
*/
return -EOPNOTSUPP;
}
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
/* Populate Exact MAC Data. */
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
flow, false);
/* Populate Mask MAC Data. */
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
flow, true);
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
/* Populate Exact TP Data. */
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
flow, false);
/* Populate Mask TP Data. */
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
flow, true);
ext += sizeof(struct nfp_flower_tp_ports);
msk += sizeof(struct nfp_flower_tp_ports);
}
if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
/* Populate Exact IPv4 Data. */
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
flow, false);
/* Populate Mask IPv4 Data. */
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
flow, true);
ext += sizeof(struct nfp_flower_ipv4);
msk += sizeof(struct nfp_flower_ipv4);
}
if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
/* Populate Exact IPv4 Data. */
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
flow, false);
/* Populate Mask IPv4 Data. */
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
flow, true);
ext += sizeof(struct nfp_flower_ipv6);
msk += sizeof(struct nfp_flower_ipv6);
}
return 0;
}

View File

@ -208,13 +208,17 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls; goto err_free_key_ls;
} }
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
if (err)
goto err_destroy_flow;
/* TODO: Complete flower_add_offload. */ /* TODO: Complete flower_add_offload. */
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
err_destroy_flow:
kfree(flow_pay->mask_data); kfree(flow_pay->mask_data);
kfree(flow_pay->unmasked_data); kfree(flow_pay->unmasked_data);
kfree(flow_pay); kfree(flow_pay);
err_free_key_ls: err_free_key_ls:
kfree(key_layer); kfree(key_layer);
return err; return err;

View File

@ -38,6 +38,8 @@ struct metadata_dst;
struct nfp_net; struct nfp_net;
struct nfp_port; struct nfp_port;
#include <net/dst_metadata.h>
/** /**
* struct nfp_reprs - container for representor netdevs * struct nfp_reprs - container for representor netdevs
* @num_reprs: Number of elements in reprs array * @num_reprs: Number of elements in reprs array
@ -104,6 +106,13 @@ static inline bool nfp_netdev_is_nfp_repr(struct net_device *netdev)
return netdev->netdev_ops == &nfp_repr_netdev_ops; return netdev->netdev_ops == &nfp_repr_netdev_ops;
} }
static inline int nfp_repr_get_port_id(struct net_device *netdev)
{
struct nfp_repr *priv = netdev_priv(netdev);
return priv->dst->u.port_info.port_id;
}
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port, u32 cmsg_port_id, struct nfp_port *port,