Merge branch 'cxgb4-more-flower-offloads'

Rahul Lakkireddy says:

====================
cxgb4: enable more tc flower offload matches and actions

This patch series enable more matches and actions for TC Flower
Offload support on Chelsio adapters.

Patch 1 enables matching on IP TOS.

Patch 2 enables matching on VLAN TCI.

Patch 3 adds support for action PASS.

Patch 4 adds support for ETH-DMAC rewrite via TC-PEDIT action. Also,
adds a check to assert that vlan/eth-dmac rewrite actions are valid
only in combination with action egress redirect.

Patch 5 introduces SMT ops for adding/removing entries from SMAC Table
in HW in preparation for patch 6.

Patch 6 adds support for ETH-SMAC rewrite via TC-PEDIT action.

Patch 7 introduces fw_filter2_wr to support L3/L4 header rewrites
in preparation for patch 8.

Patch 8 adds support for rewrite on L3/L4 header fields via TC-PEDIT
action. Supported fields for rewrite are:
IPv4 src/dst address, IPv6 src/dst address, TCP/UDP sport/dport.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-10-20 13:06:53 +01:00
commit 322d95f04a
11 changed files with 1068 additions and 27 deletions

View File

@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
cudbg_common.o cudbg_lib.o

View File

@ -367,6 +367,7 @@ struct adapter_params {
unsigned int max_ird_adapter; /* Max read depth per adapter */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
u8 fw_caps_support; /* 32-bit Port Capabilities */
bool filter2_wr_support; /* FW support for FILTER2_WR */
/* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is
* used by the Port
@ -858,6 +859,7 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
struct smt_data *smt;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@ -1063,10 +1065,19 @@ struct ch_filter_specification {
uint32_t newdmac:1; /* rewrite destination MAC address */
uint32_t newsmac:1; /* rewrite source MAC address */
uint32_t newvlan:2; /* rewrite VLAN Tag */
uint32_t nat_mode:3; /* specify NAT operation mode */
uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
uint8_t smac[ETH_ALEN]; /* new source MAC address */
uint16_t vlan; /* VLAN Tag to insert */
u8 nat_lip[16]; /* local IP to use after NAT'ing */
u8 nat_fip[16]; /* foreign IP to use after NAT'ing */
u16 nat_lport; /* local port to use after NAT'ing */
u16 nat_fport; /* foreign port to use after NAT'ing */
/* reservation for future additions */
u8 rsvd[24];
/* Filter rule value/mask pairs.
*/
struct ch_filter_tuple val;
@ -1086,6 +1097,10 @@ enum {
VLAN_REWRITE
};
enum {
NAT_MODE_ALL = 7, /* NAT on entire 4-tuple */
};
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to
@ -1098,9 +1113,9 @@ struct filter_entry {
u32 locked:1; /* filter is administratively locked */
u32 pending:1; /* filter action is pending firmware reply */
u32 smtidx:8; /* Source MAC Table index for smac */
struct filter_ctx *ctx; /* Caller's completion hook */
struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
struct smt_entry *smt; /* Source Mac Table entry for smac */
struct net_device *dev; /* Associated net device */
u32 tid; /* This will store the actual tid */

View File

@ -34,7 +34,9 @@
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4_tcb.h"
#include "l2t.h"
#include "smt.h"
#include "t4fw_api.h"
#include "cxgb4_filter.h"
@ -311,7 +313,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
int set_filter_wr(struct adapter *adapter, int fidx)
{
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct fw_filter_wr *fwr;
struct fw_filter2_wr *fwr;
struct sk_buff *skb;
skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
@ -332,6 +334,21 @@ int set_filter_wr(struct adapter *adapter, int fidx)
}
}
/* If the new filter requires loopback Source MAC rewriting then
* we need to allocate a SMT entry for the filter.
*/
if (f->fs.newsmac) {
f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
if (!f->smt) {
if (f->l2t) {
cxgb4_l2t_release(f->l2t);
f->l2t = NULL;
}
kfree_skb(skb);
return -ENOMEM;
}
}
fwr = __skb_put_zero(skb, sizeof(*fwr));
/* It would be nice to put most of the following in t4_hw.c but most
@ -342,7 +359,10 @@ int set_filter_wr(struct adapter *adapter, int fidx)
* filter specification structure but for now it's easiest to simply
* put this fairly direct code in line ...
*/
fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
if (adapter->params.filter2_wr_support)
fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
else
fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
fwr->tid_to_iq =
htonl(FW_FILTER_WR_TID_V(f->tid) |
@ -357,7 +377,6 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
@ -404,8 +423,18 @@ int set_filter_wr(struct adapter *adapter, int fidx)
fwr->lpm = htons(f->fs.mask.lport);
fwr->fp = htons(f->fs.val.fport);
fwr->fpm = htons(f->fs.mask.fport);
if (f->fs.newsmac)
memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
if (adapter->params.filter2_wr_support) {
fwr->natmode_to_ulp_type =
FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
ULP_MODE_TCPDDP :
ULP_MODE_NONE) |
FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
fwr->newlport = htons(f->fs.nat_lport);
fwr->newfport = htons(f->fs.nat_fport);
}
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
@ -463,6 +492,9 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->l2t)
cxgb4_l2t_release(f->l2t);
if (f->smt)
cxgb4_smt_release(f->smt);
/* The zeroing of the filter rule below clears the filter valid,
* pending, locked flags, l2t pointer, etc. so it's all we need for
* this operation.
@ -757,6 +789,62 @@ out:
return ret;
}
static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
unsigned int ftid, u16 word, u64 mask, u64 val,
int no_reply)
{
struct cpl_set_tcb_field *req;
struct sk_buff *skb;
skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
req->reply_ctrl = htons(REPLY_CHAN_V(0) |
QUEUENO_V(adap->sge.fw_evtq.abs_id) |
NO_REPLY_V(no_reply));
req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
req->mask = cpu_to_be64(mask);
req->val = cpu_to_be64(val);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
t4_ofld_send(adap, skb);
return 0;
}
/* Set one of the t_flags bits in the TCB.
*/
static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
unsigned int ftid, unsigned int bit_pos,
unsigned int val, int no_reply)
{
return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos,
(unsigned long long)val << bit_pos, no_reply);
}
static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
{
int err;
/* do a set-tcb for smac-sel and CWR bit.. */
err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (err)
goto smac_err;
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
if (!err)
return 0;
smac_err:
dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
f->tid, err);
return err;
}
/* Handle a filter write/deletion reply. */
void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
{
@ -795,19 +883,23 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
clear_filter(adap, f);
if (ctx)
ctx->result = 0;
} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
idx);
clear_filter(adap, f);
if (ctx)
ctx->result = -ENOMEM;
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
f->pending = 0; /* asynchronous setup completed */
f->valid = 1;
if (ctx) {
ctx->result = 0;
ctx->tid = idx;
int err = 0;
if (f->fs.newsmac)
err = configure_filter_smac(adap, f);
if (!err) {
f->pending = 0; /* async setup completed */
f->valid = 1;
if (ctx) {
ctx->result = 0;
ctx->tid = idx;
}
} else {
clear_filter(adap, f);
if (ctx)
ctx->result = err;
}
} else {
/* Something went wrong. Issue a warning about the

View File

@ -77,6 +77,7 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
#include "smt.h"
#include "sched.h"
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
@ -563,6 +564,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_l2t_write_rpl *p = (void *)rsp;
do_l2t_write_rpl(q->adap, p);
} else if (opcode == CPL_SMT_WRITE_RPL) {
const struct cpl_smt_write_rpl *p = (void *)rsp;
do_smt_write_rpl(q->adap, p);
} else if (opcode == CPL_SET_TCB_RPL) {
const struct cpl_set_tcb_rpl *p = (void *)rsp;
@ -3905,6 +3910,16 @@ static int adap_init0(struct adapter *adap)
1, params, val);
adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
/* See if FW supports FW_FILTER2 work request */
if (is_t4(adap->params.chip)) {
adap->params.filter2_wr_support = 0;
} else {
params[0] = FW_PARAM_DEV(FILTER2_WR);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1, params, val);
adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
}
/*
* Get device capabilities so we can determine what resources we need
* to manage.
@ -4641,6 +4656,7 @@ static void free_some_resources(struct adapter *adapter)
{
unsigned int i;
kvfree(adapter->smt);
kvfree(adapter->l2t);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
@ -5067,6 +5083,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
cfg_queues(adapter);
adapter->smt = t4_init_smt();
if (!adapter->smt) {
/* We tolerate a lack of SMT, giving up some functionality */
dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
}
adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
if (!adapter->l2t) {
/* We tolerate a lack of L2T, giving up some functionality */

View File

@ -32,8 +32,9 @@
* SOFTWARE.
*/
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_vlan.h>
#include "cxgb4.h"
@ -41,6 +42,27 @@
#define STATS_CHECK_PERIOD (HZ / 2)
struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
};
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
@ -113,6 +135,11 @@ static void cxgb4_process_flow_match(struct net_device *dev,
memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
/* also initialize nat_lip/fip to same values */
memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
@ -130,6 +157,10 @@ static void cxgb4_process_flow_match(struct net_device *dev,
memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
/* also initialize nat_lip/fip to same values */
memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
}
if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
@ -145,6 +176,57 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.lport = cpu_to_be16(mask->dst);
fs->val.fport = cpu_to_be16(key->src);
fs->mask.fport = cpu_to_be16(mask->src);
/* also initialize nat_lport/fport to same values */
fs->nat_lport = cpu_to_be16(key->dst);
fs->nat_fport = cpu_to_be16(key->src);
}
if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
struct flow_dissector_key_ip *key, *mask;
key = skb_flow_dissector_target(cls->dissector,
FLOW_DISSECTOR_KEY_IP,
cls->key);
mask = skb_flow_dissector_target(cls->dissector,
FLOW_DISSECTOR_KEY_IP,
cls->mask);
fs->val.tos = key->tos;
fs->mask.tos = mask->tos;
}
if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *key, *mask;
u16 vlan_tci, vlan_tci_mask;
key = skb_flow_dissector_target(cls->dissector,
FLOW_DISSECTOR_KEY_VLAN,
cls->key);
mask = skb_flow_dissector_target(cls->dissector,
FLOW_DISSECTOR_KEY_VLAN,
cls->mask);
vlan_tci = key->vlan_id | (key->vlan_priority <<
VLAN_PRIO_SHIFT);
vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
VLAN_PRIO_SHIFT);
fs->val.ivlan = cpu_to_be16(vlan_tci);
fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
/* Chelsio adapters use ivlan_vld bit to match vlan packets
* as 802.1Q. Also, when vlan tag is present in packets,
* ethtype match is used then to match on ethtype of inner
* header ie. the header following the vlan header.
* So, set the ivlan_vld based on ethtype info supplied by
* TC for vlan packets if its 802.1Q. And then reset the
* ethtype value else, hw will try to match the supplied
* ethtype value with ethtype of inner header.
*/
if (fs->val.ethtype == ETH_P_8021Q) {
fs->val.ivlan_vld = 1;
fs->mask.ivlan_vld = 1;
fs->val.ethtype = 0;
fs->mask.ethtype = 0;
}
}
/* Match only packets coming from the ingress port where this
@ -157,19 +239,162 @@ static void cxgb4_process_flow_match(struct net_device *dev,
static int cxgb4_validate_flow_match(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
u16 ethtype_mask = 0;
u16 ethtype_key = 0;
if (cls->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(dev, "Unsupported key used: 0x%x\n",
cls->dissector->used_keys);
return -EOPNOTSUPP;
}
if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
skb_flow_dissector_target(cls->dissector,
FLOW_DISSECTOR_KEY_BASIC,
cls->key);
struct flow_dissector_key_basic *mask =
skb_flow_dissector_target(cls->dissector,
FLOW_DISSECTOR_KEY_BASIC,
cls->mask);
ethtype_key = ntohs(key->n_proto);
ethtype_mask = ntohs(mask->n_proto);
}
if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
u16 eth_ip_type = ethtype_key & ethtype_mask;
struct flow_dissector_key_ip *mask;
if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
netdev_err(dev, "IP Key supported only with IPv4/v6");
return -EINVAL;
}
mask = skb_flow_dissector_target(cls->dissector,
FLOW_DISSECTOR_KEY_IP,
cls->mask);
if (mask->ttl) {
netdev_warn(dev, "ttl match unsupported for offload");
return -EOPNOTSUPP;
}
}
return 0;
}
static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
u8 field)
{
u32 set_val = val & ~mask;
u32 offset = 0;
u8 size = 1;
int i;
for (i = 0; i < ARRAY_SIZE(pedits); i++) {
if (pedits[i].field == field) {
offset = pedits[i].offset;
size = pedits[i].size;
break;
}
}
memcpy((u8 *)fs + offset, &set_val, size);
}
static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
u32 mask, u32 offset, u8 htype)
{
switch (htype) {
case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
switch (offset) {
case PEDIT_ETH_DMAC_31_0:
fs->newdmac = 1;
offload_pedit(fs, val, mask, ETH_DMAC_31_0);
break;
case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
if (~mask & PEDIT_ETH_DMAC_MASK)
offload_pedit(fs, val, mask, ETH_DMAC_47_32);
else
offload_pedit(fs, val >> 16, mask >> 16,
ETH_SMAC_15_0);
break;
case PEDIT_ETH_SMAC_47_16:
fs->newsmac = 1;
offload_pedit(fs, val, mask, ETH_SMAC_47_16);
}
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
offload_pedit(fs, val, mask, IP4_SRC);
break;
case PEDIT_IP4_DST:
offload_pedit(fs, val, mask, IP4_DST);
}
fs->nat_mode = NAT_MODE_ALL;
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
offload_pedit(fs, val, mask, IP6_SRC_31_0);
break;
case PEDIT_IP6_SRC_63_32:
offload_pedit(fs, val, mask, IP6_SRC_63_32);
break;
case PEDIT_IP6_SRC_95_64:
offload_pedit(fs, val, mask, IP6_SRC_95_64);
break;
case PEDIT_IP6_SRC_127_96:
offload_pedit(fs, val, mask, IP6_SRC_127_96);
break;
case PEDIT_IP6_DST_31_0:
offload_pedit(fs, val, mask, IP6_DST_31_0);
break;
case PEDIT_IP6_DST_63_32:
offload_pedit(fs, val, mask, IP6_DST_63_32);
break;
case PEDIT_IP6_DST_95_64:
offload_pedit(fs, val, mask, IP6_DST_95_64);
break;
case PEDIT_IP6_DST_127_96:
offload_pedit(fs, val, mask, IP6_DST_127_96);
}
fs->nat_mode = NAT_MODE_ALL;
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
offload_pedit(fs, cpu_to_be32(val) >> 16,
cpu_to_be32(mask) >> 16,
TCP_SPORT);
else
offload_pedit(fs, cpu_to_be32(val),
cpu_to_be32(mask), TCP_DPORT);
}
fs->nat_mode = NAT_MODE_ALL;
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
offload_pedit(fs, cpu_to_be32(val) >> 16,
cpu_to_be32(mask) >> 16,
UDP_SPORT);
else
offload_pedit(fs, cpu_to_be32(val),
cpu_to_be32(mask), UDP_DPORT);
}
fs->nat_mode = NAT_MODE_ALL;
}
}
static void cxgb4_process_flow_actions(struct net_device *in,
struct tc_cls_flower_offload *cls,
struct ch_filter_specification *fs)
@ -179,7 +404,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
tcf_exts_to_list(cls->exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
if (is_tcf_gact_ok(a)) {
fs->action = FILTER_PASS;
} else if (is_tcf_gact_shot(a)) {
fs->action = FILTER_DROP;
} else if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
@ -210,19 +437,152 @@ static void cxgb4_process_flow_actions(struct net_device *in,
default:
break;
}
} else if (is_tcf_pedit(a)) {
u32 mask, val, offset;
int nkeys, i;
u8 htype;
nkeys = tcf_pedit_nkeys(a);
for (i = 0; i < nkeys; i++) {
htype = tcf_pedit_htype(a, i);
mask = tcf_pedit_mask(a, i);
val = tcf_pedit_val(a, i);
offset = tcf_pedit_offset(a, i);
process_pedit_field(fs, val, mask, offset,
htype);
}
}
}
}
static bool valid_l4_mask(u32 mask)
{
u16 hi, lo;
/* Either the upper 16-bits (SPORT) OR the lower
* 16-bits (DPORT) can be set, but NOT BOTH.
*/
hi = (mask >> 16) & 0xFFFF;
lo = mask & 0xFFFF;
return hi && lo ? false : true;
}
static bool valid_pedit_action(struct net_device *dev,
const struct tc_action *a)
{
u32 mask, offset;
u8 cmd, htype;
int nkeys, i;
nkeys = tcf_pedit_nkeys(a);
for (i = 0; i < nkeys; i++) {
htype = tcf_pedit_htype(a, i);
cmd = tcf_pedit_cmd(a, i);
mask = tcf_pedit_mask(a, i);
offset = tcf_pedit_offset(a, i);
if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
netdev_err(dev, "%s: Unsupported pedit cmd\n",
__func__);
return false;
}
switch (htype) {
case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
switch (offset) {
case PEDIT_ETH_DMAC_31_0:
case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
case PEDIT_ETH_SMAC_47_16:
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
case PEDIT_IP4_DST:
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
case PEDIT_IP6_SRC_63_32:
case PEDIT_IP6_SRC_95_64:
case PEDIT_IP6_SRC_127_96:
case PEDIT_IP6_DST_31_0:
case PEDIT_IP6_DST_63_32:
case PEDIT_IP6_DST_95_64:
case PEDIT_IP6_DST_127_96:
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
__func__);
return false;
}
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
__func__);
return false;
}
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
break;
default:
netdev_err(dev, "%s: Unsupported pedit type\n",
__func__);
return false;
}
}
return true;
}
static int cxgb4_validate_flow_actions(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
const struct tc_action *a;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
LIST_HEAD(actions);
tcf_exts_to_list(cls->exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
if (is_tcf_gact_ok(a)) {
/* Do nothing */
} else if (is_tcf_gact_shot(a)) {
/* Do nothing */
} else if (is_tcf_mirred_egress_redirect(a)) {
struct adapter *adap = netdev2adap(dev);
@ -247,6 +607,7 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
__func__);
return -EINVAL;
}
act_redir = true;
} else if (is_tcf_vlan(a)) {
u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
u32 vlan_action = tcf_vlan_action(a);
@ -267,11 +628,25 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
__func__);
return -EOPNOTSUPP;
}
act_vlan = true;
} else if (is_tcf_pedit(a)) {
bool pedit_valid = valid_pedit_action(dev, a);
if (!pedit_valid)
return -EOPNOTSUPP;
act_pedit = true;
} else {
netdev_err(dev, "%s: Unsupported action\n", __func__);
return -EOPNOTSUPP;
}
}
if ((act_pedit || act_vlan) && !act_redir) {
netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
__func__);
return -EINVAL;
}
return 0;
}
@ -299,8 +674,8 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
fs = &ch_flower->fs;
fs->hitcnts = 1;
cxgb4_process_flow_actions(dev, cls, fs);
cxgb4_process_flow_match(dev, cls, fs);
cxgb4_process_flow_actions(dev, cls, fs);
fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
if (fidx < 0) {

View File

@ -54,6 +54,60 @@ struct ch_tc_flower_entry {
u32 filter_id;
};
enum {
ETH_DMAC_31_0, /* dmac bits 0.. 31 */
ETH_DMAC_47_32, /* dmac bits 32..47 */
ETH_SMAC_15_0, /* smac bits 0.. 15 */
ETH_SMAC_47_16, /* smac bits 16..47 */
IP4_SRC, /* 32-bit IPv4 src */
IP4_DST, /* 32-bit IPv4 dst */
IP6_SRC_31_0, /* src bits 0.. 31 */
IP6_SRC_63_32, /* src bits 63.. 32 */
IP6_SRC_95_64, /* src bits 95.. 64 */
IP6_SRC_127_96, /* src bits 127..96 */
IP6_DST_31_0, /* dst bits 0.. 31 */
IP6_DST_63_32, /* dst bits 63.. 32 */
IP6_DST_95_64, /* dst bits 95.. 64 */
IP6_DST_127_96, /* dst bits 127..96 */
TCP_SPORT, /* 16-bit TCP sport */
TCP_DPORT, /* 16-bit TCP dport */
UDP_SPORT, /* 16-bit UDP sport */
UDP_DPORT, /* 16-bit UDP dport */
};
struct ch_tc_pedit_fields {
u8 field;
u8 size;
u32 offset;
};
#define PEDIT_FIELDS(type, field, size, fs_field, offset) \
{ type## field, size, \
offsetof(struct ch_filter_specification, fs_field) + (offset) }
#define PEDIT_ETH_DMAC_MASK 0xffff
#define PEDIT_TCP_UDP_SPORT_MASK 0xffff
#define PEDIT_ETH_DMAC_31_0 0x0
#define PEDIT_ETH_DMAC_47_32_SMAC_15_0 0x4
#define PEDIT_ETH_SMAC_47_16 0x8
#define PEDIT_IP4_SRC 0xC
#define PEDIT_IP4_DST 0x10
#define PEDIT_IP6_SRC_31_0 0x8
#define PEDIT_IP6_SRC_63_32 0xC
#define PEDIT_IP6_SRC_95_64 0x10
#define PEDIT_IP6_SRC_127_96 0x14
#define PEDIT_IP6_DST_31_0 0x18
#define PEDIT_IP6_DST_63_32 0x1C
#define PEDIT_IP6_DST_95_64 0x20
#define PEDIT_IP6_DST_127_96 0x24
#define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0
int cxgb4_tc_flower_replace(struct net_device *dev,
struct tc_cls_flower_offload *cls);
int cxgb4_tc_flower_destroy(struct net_device *dev,

View File

@ -0,0 +1,247 @@
/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "cxgb4.h"
#include "smt.h"
#include "t4_msg.h"
#include "t4fw_api.h"
#include "t4_regs.h"
#include "t4_values.h"
struct smt_data *t4_init_smt(void)
{
unsigned int smt_size;
struct smt_data *s;
int i;
smt_size = SMT_SIZE;
s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry),
GFP_KERNEL);
if (!s)
return NULL;
s->smt_size = smt_size;
rwlock_init(&s->lock);
for (i = 0; i < s->smt_size; ++i) {
s->smtab[i].idx = i;
s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
spin_lock_init(&s->smtab[i].lock);
atomic_set(&s->smtab[i].refcnt, 0);
}
return s;
}
static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
{
struct smt_entry *first_free = NULL;
struct smt_entry *e, *end;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
if (atomic_read(&e->refcnt) == 0) {
if (!first_free)
first_free = e;
} else {
if (e->state == SMT_STATE_SWITCHING) {
/* This entry is actually in use. See if we can
* re-use it ?
*/
if (memcmp(e->src_mac, smac, ETH_ALEN) == 0)
goto found_reuse;
}
}
}
if (first_free) {
e = first_free;
goto found;
}
return NULL;
found:
e->state = SMT_STATE_UNUSED;
found_reuse:
return e;
}
static void t4_smte_free(struct smt_entry *e)
{
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
e->state = SMT_STATE_UNUSED;
}
spin_unlock_bh(&e->lock);
}
/**
* @e: smt entry to release
*
* Releases ref count and frees up an smt entry from SMT table
*/
void cxgb4_smt_release(struct smt_entry *e)
{
if (atomic_dec_and_test(&e->refcnt))
t4_smte_free(e);
}
EXPORT_SYMBOL(cxgb4_smt_release);
void do_smt_write_rpl(struct adapter *adap, const struct cpl_smt_write_rpl *rpl)
{
unsigned int smtidx = TID_TID_G(GET_TID(rpl));
struct smt_data *s = adap->smt;
if (unlikely(rpl->status != CPL_ERR_NONE)) {
struct smt_entry *e = &s->smtab[smtidx];
dev_err(adap->pdev_dev,
"Unexpected SMT_WRITE_RPL status %u for entry %u\n",
rpl->status, smtidx);
spin_lock(&e->lock);
e->state = SMT_STATE_ERROR;
spin_unlock(&e->lock);
return;
}
}
static int write_smt_entry(struct adapter *adapter, struct smt_entry *e)
{
struct cpl_t6_smt_write_req *t6req;
struct smt_data *s = adapter->smt;
struct cpl_smt_write_req *req;
struct sk_buff *skb;
int size;
u8 row;
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
size = sizeof(*req);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
/* Source MAC Table (SMT) contains 256 SMAC entries
* organized in 128 rows of 2 entries each.
*/
req = (struct cpl_smt_write_req *)__skb_put(skb, size);
INIT_TP_WR(req, 0);
/* Each row contains an SMAC pair.
* LSB selects the SMAC entry within a row
*/
row = (e->idx >> 1);
if (e->idx & 1) {
req->pfvf1 = 0x0;
memcpy(req->src_mac1, e->src_mac, ETH_ALEN);
/* fill pfvf0/src_mac0 with entry
* at prev index from smt-tab.
*/
req->pfvf0 = 0x0;
memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
ETH_ALEN);
} else {
req->pfvf0 = 0x0;
memcpy(req->src_mac0, e->src_mac, ETH_ALEN);
/* fill pfvf1/src_mac1 with entry
* at next index from smt-tab
*/
req->pfvf1 = 0x0;
memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
ETH_ALEN);
}
} else {
size = sizeof(*t6req);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
/* Source MAC Table (SMT) contains 256 SMAC entries */
t6req = (struct cpl_t6_smt_write_req *)__skb_put(skb, size);
INIT_TP_WR(t6req, 0);
req = (struct cpl_smt_write_req *)t6req;
/* fill pfvf0/src_mac0 from smt-tab */
req->pfvf0 = 0x0;
memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN);
row = e->idx;
}
OPCODE_TID(req) =
htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx |
TID_QID_V(adapter->sge.fw_evtq.abs_id)));
req->params = htonl(SMTW_NORPL_V(0) |
SMTW_IDX_V(row) |
SMTW_OVLAN_IDX_V(0));
t4_mgmt_tx(adapter, skb);
return 0;
}
static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
u8 *smac)
{
struct smt_data *s = adap->smt;
struct smt_entry *e;
write_lock_bh(&s->lock);
e = find_or_alloc_smte(s, smac);
if (e) {
spin_lock(&e->lock);
if (!atomic_read(&e->refcnt)) {
atomic_set(&e->refcnt, 1);
e->state = SMT_STATE_SWITCHING;
e->pfvf = pfvf;
memcpy(e->src_mac, smac, ETH_ALEN);
write_smt_entry(adap, e);
} else {
atomic_inc(&e->refcnt);
}
spin_unlock(&e->lock);
}
write_unlock_bh(&s->lock);
return e;
}
/**
* @dev: net_device pointer
* @smac: MAC address to add to SMT
* Returns pointer to the SMT entry created
*
* Allocates an SMT entry to be used by switching rule of a filter.
*/
struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac)
{
struct adapter *adap = netdev2adap(dev);
return t4_smt_alloc_switching(adap, 0x0, smac);
}
EXPORT_SYMBOL(cxgb4_smt_alloc_switching);

View File

@ -0,0 +1,76 @@
/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_SMT_H
#define __CXGB4_SMT_H
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/atomic.h>
struct adapter;
struct cpl_smt_write_rpl;
/* SMT related handling. Heavily adapted based on l2t ops in l2t.h/l2t.c
*/
enum {
SMT_STATE_SWITCHING,
SMT_STATE_UNUSED,
SMT_STATE_ERROR
};
enum {
SMT_SIZE = 256
};
struct smt_entry {
u16 state;
u16 idx;
u16 pfvf;
u8 src_mac[ETH_ALEN];
atomic_t refcnt;
spinlock_t lock; /* protect smt entry add,removal */
};
struct smt_data {
unsigned int smt_size;
rwlock_t lock;
struct smt_entry smtab[0];
};
struct smt_data *t4_init_smt(void);
struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac);
void cxgb4_smt_release(struct smt_entry *e);
void do_smt_write_rpl(struct adapter *p, const struct cpl_smt_write_rpl *rpl);
#endif /* __CXGB4_SMT_H */

View File

@ -50,6 +50,7 @@ enum {
CPL_RX_DATA_ACK = 0xD,
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
CPL_SMT_WRITE_REQ = 0x14,
CPL_TID_RELEASE = 0x1A,
CPL_TX_DATA_ISO = 0x1F,
@ -60,6 +61,7 @@ enum {
CPL_PEER_CLOSE = 0x26,
CPL_ABORT_REQ_RSS = 0x2B,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_SMT_WRITE_RPL = 0x2E,
CPL_RX_PHYS_ADDR = 0x30,
CPL_CLOSE_CON_RPL = 0x32,
@ -681,8 +683,8 @@ struct cpl_set_tcb_field {
};
/* cpl_set_tcb_field.word_cookie fields */
#define TCB_WORD_S 0
#define TCB_WORD(x) ((x) << TCB_WORD_S)
#define TCB_WORD_S 0
#define TCB_WORD_V(x) ((x) << TCB_WORD_S)
#define TCB_COOKIE_S 5
#define TCB_COOKIE_M 0x7
@ -1266,6 +1268,44 @@ struct cpl_l2t_write_rpl {
u8 rsvd[3];
};
struct cpl_smt_write_req {
WR_HDR;
union opcode_tid ot;
__be32 params;
__be16 pfvf1;
u8 src_mac1[6];
__be16 pfvf0;
u8 src_mac0[6];
};
struct cpl_t6_smt_write_req {
WR_HDR;
union opcode_tid ot;
__be32 params;
__be64 tag;
__be16 pfvf0;
u8 src_mac0[6];
__be32 local_ip;
__be32 rsvd;
};
struct cpl_smt_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
/* cpl_smt_{read,write}_req.params fields */
#define SMTW_OVLAN_IDX_S 16
#define SMTW_OVLAN_IDX_V(x) ((x) << SMTW_OVLAN_IDX_S)
#define SMTW_IDX_S 20
#define SMTW_IDX_V(x) ((x) << SMTW_IDX_S)
#define SMTW_NORPL_S 31
#define SMTW_NORPL_V(x) ((x) << SMTW_NORPL_S)
#define SMTW_NORPL_F SMTW_NORPL_V(1U)
struct cpl_rdma_terminate {
union opcode_tid ot;
__be16 rsvd;

View File

@ -0,0 +1,47 @@
/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4_TCB_H
#define __T4_TCB_H
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
#define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S)
#define TCB_T_FLAGS_W 1
#define TF_CCTRL_CWR_S 61
#endif /* __T4_TCB_H */

View File

@ -105,7 +105,8 @@ enum fw_wr_opcodes {
FW_ISCSI_TX_DATA_WR = 0x45,
FW_PTP_TX_PKT_WR = 0x46,
FW_CRYPTO_LOOKASIDE_WR = 0X6d,
FW_LASTC2E_WR = 0x70
FW_LASTC2E_WR = 0x70,
FW_FILTER2_WR = 0x77
};
struct fw_wr_hdr {
@ -201,6 +202,51 @@ struct fw_filter_wr {
__u8 sma[6];
};
struct fw_filter2_wr {
__be32 op_pkd;
__be32 len16_pkd;
__be64 r3;
__be32 tid_to_iq;
__be32 del_filter_to_l2tix;
__be16 ethtype;
__be16 ethtypem;
__u8 frag_to_ovlan_vldm;
__u8 smac_sel;
__be16 rx_chan_rx_rpl_iq;
__be32 maci_to_matchtypem;
__u8 ptcl;
__u8 ptclm;
__u8 ttyp;
__u8 ttypm;
__be16 ivlan;
__be16 ivlanm;
__be16 ovlan;
__be16 ovlanm;
__u8 lip[16];
__u8 lipm[16];
__u8 fip[16];
__u8 fipm[16];
__be16 lp;
__be16 lpm;
__be16 fp;
__be16 fpm;
__be16 r7;
__u8 sma[6];
__be16 r8;
__u8 filter_type_swapmac;
__u8 natmode_to_ulp_type;
__be16 newlport;
__be16 newfport;
__u8 newlip[16];
__u8 newfip[16];
__be32 natseqcheck;
__be32 r9;
__be64 r10;
__be64 r11;
__be64 r12;
__be64 r13;
};
#define FW_FILTER_WR_TID_S 12
#define FW_FILTER_WR_TID_M 0xfffff
#define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S)
@ -385,6 +431,32 @@ struct fw_filter_wr {
#define FW_FILTER_WR_RX_RPL_IQ_G(x) \
(((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M)
#define FW_FILTER2_WR_FILTER_TYPE_S 1
#define FW_FILTER2_WR_FILTER_TYPE_M 0x1
#define FW_FILTER2_WR_FILTER_TYPE_V(x) ((x) << FW_FILTER2_WR_FILTER_TYPE_S)
#define FW_FILTER2_WR_FILTER_TYPE_G(x) \
(((x) >> FW_FILTER2_WR_FILTER_TYPE_S) & FW_FILTER2_WR_FILTER_TYPE_M)
#define FW_FILTER2_WR_FILTER_TYPE_F FW_FILTER2_WR_FILTER_TYPE_V(1U)
#define FW_FILTER2_WR_NATMODE_S 5
#define FW_FILTER2_WR_NATMODE_M 0x7
#define FW_FILTER2_WR_NATMODE_V(x) ((x) << FW_FILTER2_WR_NATMODE_S)
#define FW_FILTER2_WR_NATMODE_G(x) \
(((x) >> FW_FILTER2_WR_NATMODE_S) & FW_FILTER2_WR_NATMODE_M)
#define FW_FILTER2_WR_NATFLAGCHECK_S 4
#define FW_FILTER2_WR_NATFLAGCHECK_M 0x1
#define FW_FILTER2_WR_NATFLAGCHECK_V(x) ((x) << FW_FILTER2_WR_NATFLAGCHECK_S)
#define FW_FILTER2_WR_NATFLAGCHECK_G(x) \
(((x) >> FW_FILTER2_WR_NATFLAGCHECK_S) & FW_FILTER2_WR_NATFLAGCHECK_M)
#define FW_FILTER2_WR_NATFLAGCHECK_F FW_FILTER2_WR_NATFLAGCHECK_V(1U)
#define FW_FILTER2_WR_ULP_TYPE_S 0
#define FW_FILTER2_WR_ULP_TYPE_M 0xf
#define FW_FILTER2_WR_ULP_TYPE_V(x) ((x) << FW_FILTER2_WR_ULP_TYPE_S)
#define FW_FILTER2_WR_ULP_TYPE_G(x) \
(((x) >> FW_FILTER2_WR_ULP_TYPE_S) & FW_FILTER2_WR_ULP_TYPE_M)
#define FW_FILTER_WR_MACI_S 23
#define FW_FILTER_WR_MACI_M 0x1ff
#define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S)
@ -1127,6 +1199,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_SCFGREV = 0x1A,
FW_PARAMS_PARAM_DEV_VPDREV = 0x1B,
FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D,
FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E,
};