Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Buffers powersave frame test is reversed in cfg80211, fix from Felix
    Fietkau.

 2) Remove bogus WARN_ON in openvswitch, from Jarno Rajahalme.

 3) Fix some tg3 ethtool logic bugs, and one that would cause no
    interrupts to be generated when rx-coalescing is set to 0.  From
    Satish Baddipadige and Siva Reddy Kallam.

 4) QLCNIC mailbox corruption and napi budget handling fix from Manish
    Chopra.

 5) Fix fib_trie logic when walking the trie during /proc/net/route
    output than can access a stale node pointer.  From David Forster.

 6) Several sctp_diag fixes from Phil Sutter.

 7) PAUSE frame handling fixes in mlxsw driver from Ido Schimmel.

 8) Checksum fixup fixes in bpf from Daniel Borkmann.

 9) Memork leaks in nfnetlink, from Liping Zhang.

10) Use after free in rxrpc, from David Howells.

11) Use after free in new skb_array code of macvtap driver, from Jason
    Wang.

12) Calipso resource leak, from Colin Ian King.

13) mediatek bug fixes (missing stats sync init, etc.) from Sean Wang.

14) Fix bpf non-linear packet write helpers, from Daniel Borkmann.

15) Fix lockdep splats in macsec, from Sabrina Dubroca.

16) hv_netvsc bug fixes from Vitaly Kuznetsov, mostly to do with VF
    handling.

17) Various tc-action bug fixes, from CONG Wang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (116 commits)
  net_sched: allow flushing tc police actions
  net_sched: unify the init logic for act_police
  net_sched: convert tcf_exts from list to pointer array
  net_sched: move tc offload macros to pkt_cls.h
  net_sched: fix a typo in tc_for_each_action()
  net_sched: remove an unnecessary list_del()
  net_sched: remove the leftover cleanup_a()
  mlxsw: spectrum: Allow packets to be trapped from any PG
  mlxsw: spectrum: Unmap 802.1Q FID before destroying it
  mlxsw: spectrum: Add missing rollbacks in error path
  mlxsw: reg: Fix missing op field fill-up
  mlxsw: spectrum: Trap loop-backed packets
  mlxsw: spectrum: Add missing packet traps
  mlxsw: spectrum: Mark port as active before registering it
  mlxsw: spectrum: Create PVID vPort before registering netdevice
  mlxsw: spectrum: Remove redundant errors from the code
  mlxsw: spectrum: Don't return upon error in removal path
  i40e: check for and deal with non-contiguous TCs
  ixgbe: Re-enable ability to toggle VLAN filtering
  ixgbe: Force VLNCTRL.VFE to be set in all VMDq paths
  ...
This commit is contained in:
Linus Torvalds 2016-08-17 17:26:58 -07:00
commit 184ca82348
116 changed files with 1489 additions and 1112 deletions

View File

@ -790,13 +790,12 @@ The kernel interface functions are as follows:
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
data message has been used up, rxrpc_kernel_data_consumed() should be
called on it.
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
Messages should be handled to rxrpc_kernel_free_skb() to dispose of. It
is possible to get extra refs on all types of message for later freeing,
but this may pin the state of a call until the message is finally freed.
(*) Accept an incoming call.
@ -821,12 +820,14 @@ The kernel interface functions are as follows:
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
(*) Record the delivery of a data message.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
void rxrpc_kernel_data_consumed(struct rxrpc_call *call,
struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
This is used to record a data message as having been consumed and to
update the ACK state for the call. The message must still be passed to
rxrpc_kernel_free_skb() for disposal by the caller.
(*) Free a message.

View File

@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0);
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
"0 for slow, 1 for fast");
module_param(ad_select, charp, 0);
MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
"0 for stable (default), 1 for bandwidth, "
"2 for count");
module_param(min_links, int, 0);

View File

@ -258,7 +258,7 @@
* BCM5325 and BCM5365 share most definitions below
*/
#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
#define ARLTBL_MAC_MASK 0xffffffffffff
#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
#define ARLTBL_VID_MASK_25 0xff
#define ARLTBL_VID_MASK 0xfff

View File

@ -3187,6 +3187,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
return err;
}
#ifdef CONFIG_NET_DSA_HWMON
static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
int reg)
{
@ -3212,6 +3213,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
return ret;
}
#endif
static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
{

View File

@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev)
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
#else
return -ENODEV;
#endif
}

View File

@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface)
priv->dev = dev;
priv->regs = devm_ioremap_resource(dev, &res_regs);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
if (IS_ERR(priv->regs)) {
err = PTR_ERR(priv->regs);
goto out_put_node;
}
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);

View File

@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
info->data = TG3_RSS_MAX_NUM_QS;
}
/* The first interrupt vector only
* handles link interrupts.
*/
info->data -= 1;
return 0;
default:
@ -14014,6 +14010,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
}
if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
(!ec->rx_coalesce_usecs) ||
(ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
(ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
(ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||

View File

@ -403,11 +403,11 @@
#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
#define MACB_CAPS_USRIO_DISABLED 0x00000010
#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
#define MACB_CAPS_JUMBO 0x00000010
/* Bit manipulation macros */
#define MACB_BIT(name) \

View File

@ -1299,6 +1299,7 @@ static int
dm9000_open(struct net_device *dev)
{
struct board_info *db = netdev_priv(dev);
unsigned int irq_flags = irq_get_trigger_type(dev->irq);
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev)
/* If there is no IRQ type specified, tell the user that this is a
* problem
*/
if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
if (irq_flags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
irq_flags |= IRQF_SHARED;
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev)
/* Initialize DM9000 board */
dm9000_init_dm9000(dev);
if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
dev->name, dev))
if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
return -EAGAIN;
/* Now that we have an interrupt handler hooked up we can unmask
* our interrupts

View File

@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
{"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
{"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
{"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
{"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
{"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
{"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
{"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
{"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},

View File

@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = {
| FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX
| FLAG2_DMA_BURST,
| FLAG2_DMA_BURST
| FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = {
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX,
| FLAG2_NO_DISABLE_RX
| FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,

View File

@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))

View File

@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
| FLAG2_HAS_EEE
| FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,

View File

@ -4302,6 +4302,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
clear_bit(__E1000_RESETTING, &adapter->state);
}
/**
* e1000e_sanitize_systim - sanitize raw cycle counter reads
* @hw: pointer to the HW structure
* @systim: cycle_t value read, sanitized and returned
*
* Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
* check to see that the time is incrementing at a reasonable
* rate and is a multiple of incvalue.
**/
static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
{
u64 time_delta, rem, temp;
cycle_t systim_next;
u32 incvalue;
int i;
incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
/* latch SYSTIMH on read of SYSTIML */
systim_next = (cycle_t)er32(SYSTIML);
systim_next |= (cycle_t)er32(SYSTIMH) << 32;
time_delta = systim_next - systim;
temp = time_delta;
/* VMWare users have seen incvalue of zero, don't div / 0 */
rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
systim = systim_next;
if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
break;
}
return systim;
}
/**
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
cc);
struct e1000_hw *hw = &adapter->hw;
u32 systimel, systimeh;
cycle_t systim, systim_next;
cycle_t systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
* we read SYSTIMH, the value of SYSTIMH has been incremented and we
@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
systim = (cycle_t)systimel;
systim |= (cycle_t)systimeh << 32;
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
u64 time_delta, rem, temp;
u32 incvalue;
int i;
if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
systim = e1000e_sanitize_systim(hw, systim);
/* errata for 82574/82583 possible bad bits read from SYSTIMH/L
* check to see that the time is incrementing at a reasonable
* rate and is a multiple of incvalue
*/
incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
/* latch SYSTIMH on read of SYSTIML */
systim_next = (cycle_t)er32(SYSTIML);
systim_next |= (cycle_t)er32(SYSTIMH) << 32;
time_delta = systim_next - systim;
temp = time_delta;
/* VMWare users have seen incvalue of zero, don't div / 0 */
rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
systim = systim_next;
if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
(rem == 0))
break;
}
}
return systim;
}

View File

@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
**/
static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
{
int i, tc_unused = 0;
u8 num_tc = 0;
int i;
u8 ret = 0;
/* Scan the ETS Config Priority Table to find
* traffic class enabled for a given priority
* and use the traffic class index to get the
* number of traffic classes enabled
* and create a bitmask of enabled TCs
*/
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
if (dcbcfg->etscfg.prioritytable[i] > num_tc)
num_tc = dcbcfg->etscfg.prioritytable[i];
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
/* Now scan the bitmask to check for
* contiguous TCs starting with TC0
*/
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (num_tc & BIT(i)) {
if (!tc_unused) {
ret++;
} else {
pr_err("Non-contiguous TC - Disabling DCB\n");
return 1;
}
} else {
tc_unused = 1;
}
}
/* Traffic class index starts from zero so
* increment to return the actual count
*/
return num_tc + 1;
/* There is always at least TC0 */
if (!ret)
ret = 1;
return ret;
}
/**

View File

@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
}
}
shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
struct igb_adapter *adapter = q_vector->adapter;
int adjust = 0;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;
break;
case SPEED_100:
adjust = IGB_I210_RX_LATENCY_100;
break;
case SPEED_1000:
adjust = IGB_I210_RX_LATENCY_1000;
break;
}
}
skb_hwtstamps(skb)->hwtstamp =
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
}
/**
@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
}
}
skb_hwtstamps(skb)->hwtstamp =
ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
/* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.

View File

@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* fall through */
case ixgbe_mac_82598EB:
/* legacy case, we can just disable VLAN filtering */
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* Set flag so we don't redo unnecessary work */
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
/* For VMDq and SR-IOV we must leave VLAN filtering enabled */
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* Add PF to all active pools */
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
/* Set VLAN filtering to enabled */
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
break;
/* fall through */
case ixgbe_mac_82598EB:
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
struct tcf_exts *exts, u64 *action, u8 *queue)
{
const struct tc_action *a;
LIST_HEAD(actions);
int err;
if (tc_no_actions(exts))
return -EINVAL;
tc_for_each_action(a, exts) {
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
/* Drop action */
if (is_tcf_gact_shot(a)) {
@ -9517,6 +9525,7 @@ skip_sriov:
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXALL |

View File

@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
case PHY_INTERFACE_MODE_MII:
ge_mode = 1;
break;
case PHY_INTERFACE_MODE_RMII:
case PHY_INTERFACE_MODE_REVMII:
ge_mode = 2;
break;
case PHY_INTERFACE_MODE_RMII:
if (!mac->id)
goto err_phy;
ge_mode = 3;
break;
default:
dev_err(eth->dev, "invalid phy_mode\n");
return -1;
goto err_phy;
}
/* put the gmac into the right mode */
@ -263,13 +267,25 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
if (of_phy_is_fixed_link(mac->of_node))
mac->phy_dev->supported |=
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
of_node_put(np);
return 0;
err_phy:
of_node_put(np);
dev_err(eth->dev, "invalid phy_mode\n");
return -EINVAL;
}
static int mtk_mdio_init(struct mtk_eth *eth)
@ -542,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx];
}
static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
dma_unmap_single(dev,
dma_unmap_single(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
dma_unmap_page(dev,
dma_unmap_page(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
@ -595,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
mapped_addr = dma_map_single(&dev->dev, skb->data,
mapped_addr = dma_map_single(eth->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr);
@ -623,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
goto err_dma;
if (i == nr_frags - 1 &&
@ -679,7 +695,7 @@ err_dma:
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf);
mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@ -836,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++;
goto release_desc;
}
dma_addr = dma_map_single(&eth->netdev[mac]->dev,
dma_addr = dma_map_single(eth->dev,
new_data + NET_SKB_PAD,
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
@ -855,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
dma_unmap_single(&netdev->dev, trxd.rxd1,
dma_unmap_single(eth->dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
@ -937,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
done[mac]++;
budget--;
}
mtk_tx_unmap(eth->dev, tx_buf);
mtk_tx_unmap(eth, tx_buf);
ring->last_free = desc;
atomic_inc(&ring->free_count);
@ -1092,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
mtk_tx_unmap(eth->dev, &ring->buf[i]);
mtk_tx_unmap(eth, &ring->buf[i]);
kfree(ring->buf);
ring->buf = NULL;
}
@ -1751,6 +1767,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
goto free_netdev;
}
spin_lock_init(&mac->hw_stats->stats_lock);
u64_stats_init(&mac->hw_stats->syncp);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);

View File

@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 *action, u32 *flow_tag)
{
const struct tc_action *a;
LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
*action = 0;
tc_for_each_action(a, exts) {
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 *action, u32 *dest_vport)
{
const struct tc_action *a;
LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
*action = 0;
tc_for_each_action(a, exts) {
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
struct tc_action *a;
struct mlx5_fc *counter;
LIST_HEAD(actions);
u64 bytes;
u64 packets;
u64 lastuse;
@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
tc_for_each_action(a, f->exts)
tcf_exts_to_list(f->exts, &actions);
list_for_each_entry(a, &actions, list)
tcf_action_stats_update(a, bytes, packets, lastuse);
return 0;

View File

@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
/* reg_ritr_lb_en
* Loop-back filter enable for unicast packets.
* If the flag is set then loop-back filter for unicast packets is
* implemented on the RIF. Multicast packets are always subject to
* loop-back filtering.
* Access: RW
*/
MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
/* reg_ritr_virtual_router
* Virtual router ID associated with the router interface.
* Access: RW
@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_op_set(payload, op);
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1);
mlxsw_reg_ritr_mtu_set(payload, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
}
@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload,
{
MLXSW_REG_ZERO(ralue, payload);
mlxsw_reg_ralue_protocol_set(payload, protocol);
mlxsw_reg_ralue_op_set(payload, op);
mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
mlxsw_reg_ralue_entry_type_set(payload,

View File

@ -942,8 +942,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
kfree(mlxsw_sp_vport);
}
int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
u16 vid)
static int mlxsw_sp_port_add_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
@ -956,16 +956,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
if (!vid)
return 0;
if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
netdev_warn(dev, "VID=%d already configured\n", vid);
if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
return 0;
}
mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
if (!mlxsw_sp_vport) {
netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
if (!mlxsw_sp_vport)
return -ENOMEM;
}
/* When adding the first VLAN interface on a bridged port we need to
* transition all the active 802.1Q bridge VLANs to use explicit
@ -973,24 +969,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
*/
if (list_is_singular(&mlxsw_sp_port->vports_list)) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err) {
netdev_err(dev, "Failed to set to Virtual mode\n");
if (err)
goto err_port_vp_mode_trans;
}
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
if (err) {
netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
if (err)
goto err_port_vid_learning_set;
}
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
if (err) {
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
vid);
if (err)
goto err_port_add_vid;
}
return 0;
@ -1010,7 +999,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
int err;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
@ -1019,23 +1007,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (!mlxsw_sp_vport) {
netdev_warn(dev, "VID=%d does not exist\n", vid);
if (WARN_ON(!mlxsw_sp_vport))
return 0;
}
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
if (err) {
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
vid);
return err;
}
mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
if (err) {
netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
return err;
}
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
/* Drop FID reference. If this was the last reference the
* resources will be freed.
@ -1048,13 +1025,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
* transition all active 802.1Q bridge VLANs to use VID to FID
* mappings and set port's mode to VLAN mode.
*/
if (list_is_singular(&mlxsw_sp_port->vports_list)) {
err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
if (err) {
netdev_err(dev, "Failed to set to VLAN mode\n");
return err;
}
}
if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
@ -1149,6 +1121,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
const struct tc_action *a;
LIST_HEAD(actions);
int err;
if (!tc_single_action(cls->exts)) {
@ -1156,7 +1129,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOTSUPP;
}
tc_for_each_action(a, cls->exts) {
tcf_exts_to_list(cls->exts, &actions);
list_for_each_entry(a, &actions, list) {
if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
return -ENOTSUPP;
@ -2076,6 +2050,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
{
mlxsw_sp_port->pvid = 1;
return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
}
static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
}
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
@ -2191,7 +2177,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_dcb_init;
}
err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
mlxsw_sp_port->local_port);
goto err_port_pvid_vport_create;
}
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@ -2208,24 +2202,23 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_core_port_init;
}
err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
if (err)
goto err_port_vlan_init;
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
return 0;
err_port_vlan_init:
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
err_core_port_init:
unregister_netdev(dev);
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
err_port_pvid_vport_create:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
@ -2245,12 +2238,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
if (!mlxsw_sp_port)
return;
mlxsw_sp->ports[local_port] = NULL;
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
@ -2659,6 +2652,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_ARPUC,
},
{
.func = mlxsw_sp_rx_listener_func,
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_MTUERROR,
},
{
.func = mlxsw_sp_rx_listener_func,
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_TTLERROR,
},
{
.func = mlxsw_sp_rx_listener_func,
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_LBERROR,
},
{
.func = mlxsw_sp_rx_listener_func,
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_OSPF,
},
{
.func = mlxsw_sp_rx_listener_func,
.local_port = MLXSW_PORT_DONT_CARE,

View File

@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
u16 vid);
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);

View File

@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,

View File

@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
char pfcc_pl[MLXSW_REG_PFCC_LEN];
mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
int err;
if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
pfc->pfc_en) {
if (pause_en && pfc->pfc_en) {
netdev_err(dev, "PAUSE frames already enabled on port\n");
return -EINVAL;
}
err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
mlxsw_sp_port->dcb.ets->prio_tc,
false, pfc);
pause_en, pfc);
if (err) {
netdev_err(dev, "Failed to configure port's headroom for PFC\n");
return err;
@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
err_port_pfc_set:
__mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
mlxsw_sp_port->dcb.ets->prio_tc, false,
mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
mlxsw_sp_port->dcb.pfc);
return err;
}

View File

@ -1651,9 +1651,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
const struct mlxsw_sp_router_fib4_add_info *info = data;
struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
struct mlxsw_sp_vr *vr = fib_entry->vr;
mlxsw_sp_fib_entry_destroy(fib_entry);
mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr);
mlxsw_sp_vr_put(mlxsw_sp, vr);
kfree(info);
}

View File

@ -450,6 +450,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
kfree(f);
mlxsw_sp_fid_map(mlxsw_sp, fid, false);
mlxsw_sp_fid_op(mlxsw_sp, fid, false);
}
@ -997,13 +999,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
}
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end, bool init)
u16 vid_begin, u16 vid_end)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
int err;
if (!init && !mlxsw_sp_port->bridged)
if (!mlxsw_sp_port->bridged)
return -EINVAL;
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@ -1014,9 +1016,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
if (init)
goto out;
pvid = mlxsw_sp_port->pvid;
if (pvid >= vid_begin && pvid <= vid_end) {
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
@ -1028,7 +1027,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
out:
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
@ -1039,8 +1037,8 @@ out:
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
vlan->vid_begin, vlan->vid_end, false);
return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
vlan->vid_end);
}
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
@ -1048,7 +1046,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
u16 vid;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
__mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
__mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
}
static int
@ -1546,32 +1544,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_fdb_fini(mlxsw_sp);
}
int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
int err;
/* Allow only untagged packets to ingress and tag them internally
* with VID 1.
*/
mlxsw_sp_port->pvid = 1;
err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
true);
if (err) {
netdev_err(dev, "Unable to init VLANs\n");
return err;
}
/* Add implicit VLAN interface in the device, so that untagged
* packets will be classified to the default vFID.
*/
err = mlxsw_sp_port_add_vid(dev, 0, 1);
if (err)
netdev_err(dev, "Failed to configure default vFID\n");
return err;
}
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;

View File

@ -56,6 +56,10 @@ enum {
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
MLXSW_TRAP_ID_OSPF = 0x55,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,

View File

@ -52,40 +52,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
DCBX_APP_SF_ETHTYPE);
}
static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
{
u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
/* Old MFW */
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
return qed_dcbx_app_ethtype(app_info_bitmap);
return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
}
static bool qed_dcbx_app_port(u32 app_info_bitmap)
{
return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_PORT);
}
static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
{
return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
proto_id == QED_ETH_TYPE_DEFAULT);
u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
/* Old MFW */
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
return qed_dcbx_app_port(app_info_bitmap);
return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
}
static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
return !!(qed_dcbx_app_port(app_info_bitmap) &&
proto_id == QED_TCP_PORT_ISCSI);
bool ethtype;
if (ieee)
ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
else
ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
}
static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
proto_id == QED_ETH_TYPE_FCOE);
bool port;
if (ieee)
port = qed_dcbx_ieee_app_port(app_info_bitmap,
DCBX_APP_SF_IEEE_TCP_PORT);
else
port = qed_dcbx_app_port(app_info_bitmap);
return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
}
static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
proto_id == QED_ETH_TYPE_ROCE);
bool ethtype;
if (ieee)
ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
else
ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
}
static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
return !!(qed_dcbx_app_port(app_info_bitmap) &&
proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
bool ethtype;
if (ieee)
ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
else
ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
}
static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
bool port;
if (ieee)
port = qed_dcbx_ieee_app_port(app_info_bitmap,
DCBX_APP_SF_IEEE_UDP_PORT);
else
port = qed_dcbx_app_port(app_info_bitmap);
return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
}
static void
@ -164,17 +218,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
static bool
qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
u32 app_prio_bitmap,
u16 id, enum dcbx_protocol_type *type)
u16 id, enum dcbx_protocol_type *type, bool ieee)
{
if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_FCOE;
} else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
} else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ROCE;
} else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
} else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ISCSI;
} else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
} else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ETH;
} else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
} else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ROCE_V2;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
@ -194,17 +248,18 @@ static int
qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct qed_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, bool dcbx_enabled)
u32 pri_tc_tbl, int count, u8 dcbx_version)
{
u8 tc, priority_map;
enum dcbx_protocol_type type;
bool enable, ieee;
u16 protocol_id;
int priority;
bool enable;
int i;
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@ -219,7 +274,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
protocol_id, &type)) {
protocol_id, &type, ieee)) {
/* ETH always have the enable bit reset, as it gets
* vlan information per packet. For other protocols,
* should be set according to the dcbx_enabled
@ -275,15 +330,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
struct dcbx_ets_feature *p_ets;
struct qed_hw_info *p_info;
u32 pri_tc_tbl, flags;
bool dcbx_enabled;
u8 dcbx_version;
int num_entries;
int rc = 0;
/* If DCBx version is non zero, then negotiation was
* successfuly performed
*/
flags = p_hwfn->p_dcbx_info->operational.flags;
dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
p_app = &p_hwfn->p_dcbx_info->operational.features.app;
p_tbl = p_app->app_pri_tbl;
@ -295,13 +347,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_enabled);
num_entries, dcbx_version);
if (rc)
return rc;
p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = dcbx_enabled;
data.dcbx_enabled = !!dcbx_version;
qed_dcbx_dp_protocol(p_hwfn, &data);
@ -400,7 +452,7 @@ static void
qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
struct qed_dcbx_params *p_params)
struct qed_dcbx_params *p_params, bool ieee)
{
struct qed_app_entry *entry;
u8 pri_map;
@ -414,15 +466,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
DCBX_APP_NUM_ENTRIES);
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_params->app_entry[i];
entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_SF));
if (ieee) {
u8 sf_ieee;
u32 val;
sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_SF_IEEE);
switch (sf_ieee) {
case DCBX_APP_SF_IEEE_RESERVED:
/* Old MFW */
val = QED_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_SF);
entry->sf_ieee = val ?
QED_DCBX_SF_IEEE_TCP_UDP_PORT :
QED_DCBX_SF_IEEE_ETHTYPE;
break;
case DCBX_APP_SF_IEEE_ETHTYPE:
entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
break;
case DCBX_APP_SF_IEEE_TCP_PORT:
entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
break;
case DCBX_APP_SF_IEEE_UDP_PORT:
entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
break;
case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
break;
}
} else {
entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_SF));
}
pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
entry->prio = ffs(pri_map) - 1;
entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PROTOCOL_ID);
qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
entry->proto_id,
&entry->proto_type);
&entry->proto_type, ieee);
}
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
@ -483,7 +566,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]);
pri_map = p_ets->pri_tc_tbl[0];
for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@ -500,9 +583,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
struct dcbx_ets_feature *p_ets,
u32 pfc, struct qed_dcbx_params *p_params)
u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
{
qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
}
@ -516,7 +599,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
p_feat->pfc, &params->local.params);
p_feat->pfc, &params->local.params, false);
params->local.valid = true;
}
@ -529,7 +612,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
p_feat = &p_hwfn->p_dcbx_info->remote.features;
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
p_feat->pfc, &params->remote.params);
p_feat->pfc, &params->remote.params, false);
params->remote.valid = true;
}
@ -574,7 +657,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
p_feat->pfc, &params->operational.params);
p_feat->pfc, &params->operational.params,
p_operational->ieee);
qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
@ -944,7 +1028,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
p_ets->pri_tc_tbl[0] |= val;
}
p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
for (i = 0; i < 2; i++) {
p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
@ -954,7 +1037,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct qed_dcbx_params *p_params)
struct qed_dcbx_params *p_params, bool ieee)
{
u32 *entry;
int i;
@ -975,12 +1058,36 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
*entry &= ~DCBX_APP_SF_MASK;
if (p_params->app_entry[i].ethtype)
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
DCBX_APP_SF_SHIFT);
else
*entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT);
if (ieee) {
*entry &= ~DCBX_APP_SF_IEEE_MASK;
switch (p_params->app_entry[i].sf_ieee) {
case QED_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
DCBX_APP_SF_IEEE_SHIFT);
break;
case QED_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
break;
case QED_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
break;
case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
break;
}
} else {
*entry &= ~DCBX_APP_SF_MASK;
if (p_params->app_entry[i].ethtype)
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
DCBX_APP_SF_SHIFT);
else
*entry |= ((u32)DCBX_APP_SF_PORT <<
DCBX_APP_SF_SHIFT);
}
*entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
*entry |= ((u32)p_params->app_entry[i].proto_id <<
DCBX_APP_PROTOCOL_ID_SHIFT);
@ -995,15 +1102,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
struct dcbx_local_params *local_admin,
struct qed_dcbx_set *params)
{
bool ieee = false;
local_admin->flags = 0;
memcpy(&local_admin->features,
&p_hwfn->p_dcbx_info->operational.features,
sizeof(local_admin->features));
if (params->enabled)
if (params->enabled) {
local_admin->config = params->ver_num;
else
ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
} else {
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
}
if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
@ -1015,7 +1126,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
&params->config.params);
&params->config.params, ieee);
}
int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
@ -1596,8 +1707,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev,
if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
break;
/* First empty slot */
if (!entry->proto_id)
if (!entry->proto_id) {
dcbx_set.config.params.num_app_entries++;
break;
}
}
if (i == QED_DCBX_MAX_APP_PROTOCOL) {
@ -2117,8 +2230,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
(entry->proto_id == app->protocol))
break;
/* First empty slot */
if (!entry->proto_id)
if (!entry->proto_id) {
dcbx_set.config.params.num_app_entries++;
break;
}
}
if (i == QED_DCBX_MAX_APP_PROTOCOL) {

View File

@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_SF_SHIFT 8
#define DCBX_APP_SF_ETHTYPE 0
#define DCBX_APP_SF_PORT 1
#define DCBX_APP_SF_IEEE_MASK 0x0000f000
#define DCBX_APP_SF_IEEE_SHIFT 12
#define DCBX_APP_SF_IEEE_RESERVED 0
#define DCBX_APP_SF_IEEE_ETHTYPE 1
#define DCBX_APP_SF_IEEE_TCP_PORT 2
#define DCBX_APP_SF_IEEE_UDP_PORT 3
#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
#define DCBX_APP_PROTOCOL_ID_SHIFT 16
};

View File

@ -37,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
#define _QLCNIC_LINUX_SUBVERSION 64
#define QLCNIC_LINUX_VERSIONID "5.3.64"
#define _QLCNIC_LINUX_SUBVERSION 65
#define QLCNIC_LINUX_VERSIONID "5.3.65"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))

View File

@ -102,7 +102,6 @@
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
#define QLCNIC_TX_POLL_BUDGET 128
#define QLCNIC_TCP_HDR_SIZE 20
#define QLCNIC_TCP_TS_OPTION_SIZE 12
#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_adapter *adapter;
budget = QLCNIC_TX_POLL_BUDGET;
tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
adapter = tx_ring->adapter;
work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);

View File

@ -156,10 +156,8 @@ struct qlcnic_vf_info {
spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
struct qlcnic_async_cmd {
struct list_head list;
struct work_struct work;
void *ptr;
struct qlcnic_cmd_args *cmd;
};
@ -168,7 +166,10 @@ struct qlcnic_back_channel {
struct workqueue_struct *bc_trans_wq;
struct workqueue_struct *bc_async_wq;
struct workqueue_struct *bc_flr_wq;
struct list_head async_list;
struct qlcnic_adapter *adapter;
struct list_head async_cmd_list;
struct work_struct vf_async_work;
spinlock_t queue_lock; /* async_cmd_list queue lock */
};
struct qlcnic_sriov {

View File

@ -29,6 +29,7 @@
#define QLC_83XX_VF_RESET_FAIL_THRESH 8
#define QLC_BC_CMD_MAX_RETRY_CNT 5
static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
}
bc->bc_async_wq = wq;
INIT_LIST_HEAD(&bc->async_list);
INIT_LIST_HEAD(&bc->async_cmd_list);
INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
spin_lock_init(&bc->queue_lock);
bc->adapter = adapter;
for (i = 0; i < num_vfs; i++) {
vf = &sriov->vf_info[i];
@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
{
struct list_head *head = &bc->async_list;
struct qlcnic_async_work_list *entry;
struct list_head *head = &bc->async_cmd_list;
struct qlcnic_async_cmd *entry;
flush_workqueue(bc->bc_async_wq);
cancel_work_sync(&bc->vf_async_work);
spin_lock(&bc->queue_lock);
while (!list_empty(head)) {
entry = list_entry(head->next, struct qlcnic_async_work_list,
entry = list_entry(head->next, struct qlcnic_async_cmd,
list);
cancel_work_sync(&entry->work);
list_del(&entry->list);
kfree(entry->cmd);
kfree(entry);
}
spin_unlock(&bc->queue_lock);
}
void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
struct qlcnic_async_work_list *entry;
struct qlcnic_adapter *adapter;
struct qlcnic_async_cmd *entry, *tmp;
struct qlcnic_back_channel *bc;
struct qlcnic_cmd_args *cmd;
struct list_head *head;
LIST_HEAD(del_list);
bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
head = &bc->async_cmd_list;
spin_lock(&bc->queue_lock);
list_splice_init(head, &del_list);
spin_unlock(&bc->queue_lock);
list_for_each_entry_safe(entry, tmp, &del_list, list) {
list_del(&entry->list);
cmd = entry->cmd;
__qlcnic_sriov_issue_cmd(bc->adapter, cmd);
kfree(entry);
}
if (!list_empty(head))
queue_work(bc->bc_async_wq, &bc->vf_async_work);
entry = container_of(work, struct qlcnic_async_work_list, work);
adapter = entry->ptr;
cmd = entry->cmd;
__qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
static struct qlcnic_async_work_list *
qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
static struct qlcnic_async_cmd *
qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
struct qlcnic_cmd_args *cmd)
{
struct list_head *node;
struct qlcnic_async_work_list *entry = NULL;
u8 empty = 0;
struct qlcnic_async_cmd *entry = NULL;
list_for_each(node, &bc->async_list) {
entry = list_entry(node, struct qlcnic_async_work_list, list);
if (!work_pending(&entry->work)) {
empty = 1;
break;
}
}
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return NULL;
if (!empty) {
entry = kzalloc(sizeof(struct qlcnic_async_work_list),
GFP_ATOMIC);
if (entry == NULL)
return NULL;
list_add_tail(&entry->list, &bc->async_list);
}
entry->cmd = cmd;
spin_lock(&bc->queue_lock);
list_add_tail(&entry->list, &bc->async_cmd_list);
spin_unlock(&bc->queue_lock);
return entry;
}
static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
work_func_t func, void *data,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_async_work_list *entry = NULL;
struct qlcnic_async_cmd *entry = NULL;
entry = qlcnic_sriov_get_free_node_async_work(bc);
if (!entry)
entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
if (!entry) {
qlcnic_free_mbx_args(cmd);
kfree(cmd);
return;
}
entry->ptr = data;
entry->cmd = cmd;
INIT_WORK(&entry->work, func);
queue_work(bc->bc_async_wq, &entry->work);
queue_work(bc->bc_async_wq, &bc->vf_async_work);
}
static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
if (adapter->need_fw_reset)
return -EIO;
qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
adapter, cmd);
qlcnic_sriov_schedule_async_cmd(bc, cmd);
return 0;
}

View File

@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
ndev->stats.rx_packets++;
kmemleak_not_leak(new_skb);
} else {
ndev->stats.rx_dropped++;
new_skb = skb;
@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
kfree_skb(skb);
goto err_cleanup;
}
kmemleak_not_leak(skb);
}
/* continue even if we didn't manage to submit all
* receive descs

View File

@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = {
static void tsi108_timed_checker(unsigned long dev_ptr);
#ifdef DEBUG
static void dump_eth_one(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev)
TSI_READ(TSI108_EC_RXESTAT),
TSI_READ(TSI108_EC_RXERR), data->rxpending);
}
#endif
/* Synchronization is needed between the thread and up/down events.
* Note that the PHY is accessed through the same registers for both

View File

@ -644,12 +644,6 @@ struct netvsc_reconfig {
u32 event;
};
struct garp_wrk {
struct work_struct dwrk;
struct net_device *netdev;
struct netvsc_device *netvsc_dev;
};
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
@ -667,7 +661,6 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
struct garp_wrk gwrk;
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
@ -678,6 +671,15 @@ struct net_device_context {
/* the device is going away */
bool start_remove;
/* State to manage the associated VF interface. */
struct net_device *vf_netdev;
bool vf_inject;
atomic_t vf_use_cnt;
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
};
/* Per netvsc device */
@ -733,15 +735,7 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
atomic_t open_cnt;
/* State to manage the associated VF interface. */
bool vf_inject;
struct net_device *vf_netdev;
atomic_t vf_use_cnt;
};
static inline struct netvsc_device *

View File

@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
atomic_set(&net_device->open_cnt, 0);
atomic_set(&net_device->vf_use_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
net_device->vf_netdev = NULL;
net_device->vf_inject = false;
return net_device;
}
@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev,
nvscdev->send_table[i] = tab[i];
}
static void netvsc_send_vf(struct netvsc_device *nvdev,
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
struct nvsp_message *nvmsg)
{
nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
}
static inline void netvsc_receive_inband(struct hv_device *hdev,
struct netvsc_device *nvdev,
struct nvsp_message *nvmsg)
struct net_device_context *net_device_ctx,
struct nvsp_message *nvmsg)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
netvsc_send_vf(nvdev, nvmsg);
netvsc_send_vf(net_device_ctx, nvmsg);
break;
}
}
@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
struct vmpacket_descriptor *desc)
{
struct nvsp_message *nvmsg;
struct net_device_context *net_device_ctx = netdev_priv(ndev);
nvmsg = (struct nvsp_message *)((unsigned long)
desc + (desc->offset8 << 3));
@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_INBAND:
netvsc_receive_inband(device, net_device, nvmsg);
netvsc_receive_inband(device, net_device_ctx, nvmsg);
break;
default:

View File

@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
struct sk_buff *vf_skb;
struct netvsc_stats *rx_stats;
struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
u32 bytes_recvd = packet->total_data_buflen;
int ret = 0;
if (!net || net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
if (READ_ONCE(netvsc_dev->vf_inject)) {
atomic_inc(&netvsc_dev->vf_use_cnt);
if (!READ_ONCE(netvsc_dev->vf_inject)) {
if (READ_ONCE(net_device_ctx->vf_inject)) {
atomic_inc(&net_device_ctx->vf_use_cnt);
if (!READ_ONCE(net_device_ctx->vf_inject)) {
/*
* We raced; just move on.
*/
atomic_dec(&netvsc_dev->vf_use_cnt);
atomic_dec(&net_device_ctx->vf_use_cnt);
goto vf_injection_done;
}
@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* the host). Deliver these via the VF interface
* in the guest.
*/
vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
csum_info, *data, vlan_tci);
vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
packet, csum_info, *data,
vlan_tci);
if (vf_skb != NULL) {
++netvsc_dev->vf_netdev->stats.rx_packets;
netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
++net_device_ctx->vf_netdev->stats.rx_packets;
net_device_ctx->vf_netdev->stats.rx_bytes +=
bytes_recvd;
netif_receive_skb(vf_skb);
} else {
++net->stats.rx_dropped;
ret = NVSP_STAT_FAIL;
}
atomic_dec(&netvsc_dev->vf_use_cnt);
atomic_dec(&net_device_ctx->vf_use_cnt);
return ret;
}
@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
static void netvsc_notify_peers(struct work_struct *wrk)
{
struct garp_wrk *gwrk;
gwrk = container_of(wrk, struct garp_wrk, dwrk);
netdev_notify_peers(gwrk->netdev);
atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
}
static struct net_device *get_netvsc_net_device(char *mac)
{
struct net_device *dev, *found = NULL;
@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if (netvsc_dev == NULL)
if (!netvsc_dev || net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* Take a reference on the module.
*/
try_module_get(THIS_MODULE);
netvsc_dev->vf_netdev = vf_netdev;
net_device_ctx->vf_netdev = vf_netdev;
return NOTIFY_OK;
}
static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
{
net_device_ctx->vf_inject = true;
}
static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
{
net_device_ctx->vf_inject = false;
/* Wait for currently active users to drain out. */
while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
udelay(50);
}
static int netvsc_vf_up(struct net_device *vf_netdev)
{
@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
netvsc_dev->vf_inject = true;
netvsc_inject_enable(net_device_ctx);
/*
* Open the device before switching data path.
@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
netif_carrier_off(ndev);
/*
* Now notify peers. We are scheduling work to
* notify peers; take a reference to prevent
* the VF interface from vanishing.
*/
atomic_inc(&netvsc_dev->vf_use_cnt);
net_device_ctx->gwrk.netdev = vf_netdev;
net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
schedule_work(&net_device_ctx->gwrk.dwrk);
/* Now notify peers through VF device. */
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
return NOTIFY_OK;
}
@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
netvsc_dev->vf_inject = false;
/*
* Wait for currently active users to
* drain out.
*/
while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
udelay(50);
netvsc_inject_disable(net_device_ctx);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
netif_carrier_on(ndev);
/*
* Notify peers.
*/
atomic_inc(&netvsc_dev->vf_use_cnt);
net_device_ctx->gwrk.netdev = ndev;
net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
schedule_work(&net_device_ctx->gwrk.dwrk);
/* Now notify peers through netvsc device. */
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
return NOTIFY_OK;
}
@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if (netvsc_dev == NULL)
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
netvsc_dev->vf_netdev = NULL;
netvsc_inject_disable(net_device_ctx);
net_device_ctx->vf_netdev = NULL;
module_put(THIS_MODULE);
return NOTIFY_OK;
}
@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
atomic_set(&net_device_ctx->vf_use_cnt, 0);
net_device_ctx->vf_netdev = NULL;
net_device_ctx->vf_inject = false;
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
/* Avoid Vlan, Bonding dev with same MAC registering as VF */
if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING))
/* Avoid Vlan dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_802_1Q_VLAN)
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_BONDING &&
event_dev->flags & IFF_MASTER)
return NOTIFY_DONE;
switch (event) {

View File

@ -270,6 +270,7 @@ struct macsec_dev {
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
unsigned int nest_level;
};
/**
@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
static struct lock_class_key macsec_netdev_addr_lock_key;
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev)
return macsec_priv(dev)->real_dev->ifindex;
}
static int macsec_get_nest_level(struct net_device *dev)
{
return macsec_priv(dev)->nest_level;
}
static const struct net_device_ops macsec_netdev_ops = {
.ndo_init = macsec_dev_init,
.ndo_uninit = macsec_dev_uninit,
@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = {
.ndo_start_xmit = macsec_start_xmit,
.ndo_get_stats64 = macsec_get_stats64,
.ndo_get_iflink = macsec_get_iflink,
.ndo_get_lock_subclass = macsec_get_nest_level,
};
static const struct device_type macsec_type = {
@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec)
}
}
static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
unregister_netdevice_queue(dev, head);
list_del_rcu(&macsec->secys);
macsec_del_dev(macsec);
netdev_upper_dev_unlink(real_dev, dev);
macsec_generation++;
}
static void macsec_dellink(struct net_device *dev, struct list_head *head)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
macsec_generation++;
macsec_common_dellink(dev, head);
unregister_netdevice_queue(dev, head);
list_del_rcu(&macsec->secys);
if (list_empty(&rxd->secys)) {
netdev_rx_handler_unregister(real_dev);
kfree(rxd);
}
macsec_del_dev(macsec);
}
static int register_macsec_dev(struct net_device *real_dev,
@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
dev_hold(real_dev);
macsec->nest_level = dev_get_nest_level(real_dev) + 1;
netdev_lockdep_set_classes(dev);
lockdep_set_class_and_subclass(&dev->addr_list_lock,
&macsec_netdev_addr_lock_key,
macsec_get_nest_level(dev));
err = netdev_upper_dev_link(real_dev, dev);
if (err < 0)
goto unregister;
/* need to be already registered so that ->init has run and
* the MAC addr is set
*/
@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (rx_handler && sci_exists(real_dev, sci)) {
err = -EBUSY;
goto unregister;
goto unlink;
}
err = macsec_add_dev(dev, sci, icv_len);
if (err)
goto unregister;
goto unlink;
if (data)
macsec_changelink_common(dev, data);
@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
del_dev:
macsec_del_dev(macsec);
unlink:
netdev_upper_dev_unlink(real_dev, dev);
unregister:
unregister_netdevice(dev);
return err;
@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
rxd = macsec_data_rtnl(real_dev);
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
macsec_dellink(m->secy.netdev, &head);
macsec_common_dellink(m->secy.netdev, &head);
}
netdev_rx_handler_unregister(real_dev);
kfree(rxd);
unregister_netdevice_many(&head);
break;
}

View File

@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])

View File

@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q)
rtnl_unlock();
synchronize_rcu();
skb_array_cleanup(&q->skb_array);
sock_put(&q->sk);
}
@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk)
static void macvtap_sock_destruct(struct sock *sk)
{
struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
struct sk_buff *skb;
while ((skb = skb_array_consume(&q->skb_array)) != NULL)
kfree_skb(skb);
skb_array_cleanup(&q->skb_array);
}
static int macvtap_open(struct inode *inode, struct file *file)

View File

@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
static int kszphy_suspend(struct phy_device *phydev)
{
/* Disable PHY Interrupts */
if (phy_interrupt_is_valid(phydev)) {
phydev->interrupts = PHY_INTERRUPT_DISABLED;
if (phydev->drv->config_intr)
phydev->drv->config_intr(phydev);
}
return genphy_suspend(phydev);
}
static int kszphy_resume(struct phy_device *phydev)
{
int value;
genphy_resume(phydev);
mutex_lock(&phydev->lock);
value = phy_read(phydev, MII_BMCR);
phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
kszphy_config_intr(phydev);
mutex_unlock(&phydev->lock);
/* Enable PHY Interrupts */
if (phy_interrupt_is_valid(phydev)) {
phydev->interrupts = PHY_INTERRUPT_ENABLED;
if (phydev->drv->config_intr)
phydev->drv->config_intr(phydev);
}
return 0;
}
@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8061,

View File

@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
fl4.flowi4_mark = skb->mark;
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
fl4.saddr = *saddr;
rt = ip_route_output_key(vxlan->net, &fl4);
if (!IS_ERR(rt)) {
@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
fl6.saddr = *saddr;
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt = NULL;
const struct iphdr *old_iph;
union vxlan_addr *dst;
union vxlan_addr remote_ip;
union vxlan_addr remote_ip, local_ip;
union vxlan_addr *src;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = rdst->remote_vni;
dst = &rdst->remote_ip;
src = &vxlan->cfg.saddr;
dst_cache = &rdst->dst_cache;
} else {
if (!info) {
@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
vni = vxlan_tun_id_to_vni(info->key.tun_id);
remote_ip.sa.sa_family = ip_tunnel_info_af(info);
if (remote_ip.sa.sa_family == AF_INET)
if (remote_ip.sa.sa_family == AF_INET) {
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
else
local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
} else {
remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
}
dst = &remote_ip;
src = &local_ip;
dst_cache = &info->dst_cache;
}
@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (dst->sa.sa_family == AF_INET) {
__be32 saddr;
if (!vxlan->vn4_sock)
goto drop;
sk = vxlan->vn4_sock->sock->sk;
rt = vxlan_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
dst->sin.sin_addr.s_addr, &saddr,
dst->sin.sin_addr.s_addr,
&src->sin.sin_addr.s_addr,
dst_cache, info);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n",
@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
/* Bypass encapsulation if the destination is local */
if (rt->rt_flags & RTCF_LOCAL &&
if (!info && rt->rt_flags & RTCF_LOCAL &&
!(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan;
@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (err < 0)
goto xmit_tx_error;
udp_tunnel_xmit_skb(rt, sk, skb, saddr,
udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct dst_entry *ndst;
struct in6_addr saddr;
u32 rt6i_flags;
if (!vxlan->vn6_sock)
@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
label, &dst->sin6.sin6_addr, &saddr,
label, &dst->sin6.sin6_addr,
&src->sin6.sin6_addr,
dst_cache, info);
if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n",
@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
/* Bypass encapsulation if the destination is local */
rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
if (rt6i_flags & RTF_LOCAL &&
if (!info && rt6i_flags & RTF_LOCAL &&
!(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan;
@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return;
}
udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
&saddr, &dst->sin6.sin6_addr, tos, ttl,
&src->sin6.sin6_addr,
&dst->sin6.sin6_addr, tos, ttl,
label, src_port, dst_port, !udp_sum);
#endif
}

View File

@ -5700,10 +5700,11 @@ out:
mutex_unlock(&wl->mutex);
}
static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta)
static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
struct wl1271 *wl = wl_sta->wl;
struct wl1271 *wl = hw->priv;
u8 hlid = wl_sta->hlid;
/* return in units of Kbps */

View File

@ -189,11 +189,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
case 1:
_debug("extract FID count");
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("FID count: %u", call->count);
@ -210,11 +207,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
_debug("extract FID array");
ret = afs_extract_data(call, skb, last, call->buffer,
call->count * 3 * 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
_debug("unmarshall FID array");
call->request = kcalloc(call->count,
@ -239,11 +233,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
case 3:
_debug("extract CB count");
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
tmp = ntohl(call->tmp);
_debug("CB count: %u", tmp);
@ -258,11 +249,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
_debug("extract CB array");
ret = afs_extract_data(call, skb, last, call->request,
call->count * 3 * 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
_debug("unmarshall CB array");
cb = call->request;
@ -278,9 +266,9 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
call->unmarshall++;
case 5:
_debug("trailer");
if (skb->len != 0)
return -EBADMSG;
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
/* Record that the message was unmarshalled successfully so
* that the call destructor can know do the callback breaking
@ -294,8 +282,6 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
break;
}
if (!last)
return 0;
call->state = AFS_CALL_REPLYING;
@ -335,13 +321,13 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
{
struct afs_server *server;
struct in_addr addr;
int ret;
_enter(",{%u},%d", skb->len, last);
if (skb->len > 0)
return -EBADMSG;
if (!last)
return 0;
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;
@ -371,8 +357,10 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call,
_enter(",{%u},%d", skb->len, last);
/* There are some arguments that we ignore */
afs_data_consumed(call, skb);
if (!last)
return 0;
return -EAGAIN;
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;
@ -408,12 +396,13 @@ static void SRXAFSCB_Probe(struct work_struct *work)
static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
bool last)
{
int ret;
_enter(",{%u},%d", skb->len, last);
if (skb->len > 0)
return -EBADMSG;
if (!last)
return 0;
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;
@ -460,10 +449,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
if (skb->len > 0)
return -EBADMSG;
if (!last)
return 0;
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
switch (call->unmarshall) {
case 0:
@ -509,8 +497,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
break;
}
if (!last)
return 0;
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
call->state = AFS_CALL_REPLYING;
@ -588,12 +577,13 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call,
struct sk_buff *skb, bool last)
{
int ret;
_enter(",{%u},%d", skb->len, last);
if (skb->len > 0)
return -EBADMSG;
if (!last)
return 0;
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;

View File

@ -240,15 +240,13 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call,
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
_enter(",,%u", last);
afs_transfer_reply(call, skb);
if (!last)
return 0;
if (call->reply_size != call->reply_max)
return -EBADMSG;
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
@ -335,11 +333,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
case 1:
_debug("extract data length (MSW)");
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("DATA length MSW: %u", call->count);
@ -353,11 +348,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
case 2:
_debug("extract data length");
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("DATA length: %u", call->count);
@ -375,11 +367,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
ret = afs_extract_data(call, skb, last, buffer,
call->count);
kunmap_atomic(buffer);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
}
call->offset = 0;
@ -389,11 +378,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
case 4:
ret = afs_extract_data(call, skb, last, call->buffer,
(21 + 3 + 6) * 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
bp = call->buffer;
xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
@ -405,15 +391,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
call->unmarshall++;
case 5:
_debug("trailer");
if (skb->len != 0)
return -EBADMSG;
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
break;
}
if (!last)
return 0;
if (call->count < PAGE_SIZE) {
_debug("clear");
page = call->reply3;
@ -537,9 +520,8 @@ static int afs_deliver_fs_give_up_callbacks(struct afs_call *call,
{
_enter(",{%u},%d", skb->len, last);
if (skb->len > 0)
return -EBADMSG; /* shouldn't be any reply data */
return 0;
/* shouldn't be any reply data */
return afs_data_complete(call, skb, last);
}
/*
@ -622,15 +604,13 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call,
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
afs_transfer_reply(call, skb);
if (!last)
return 0;
if (call->reply_size != call->reply_max)
return -EBADMSG;
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
@ -721,15 +701,13 @@ static int afs_deliver_fs_remove(struct afs_call *call,
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
afs_transfer_reply(call, skb);
if (!last)
return 0;
if (call->reply_size != call->reply_max)
return -EBADMSG;
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
@ -804,15 +782,13 @@ static int afs_deliver_fs_link(struct afs_call *call,
{
struct afs_vnode *dvnode = call->reply, *vnode = call->reply2;
const __be32 *bp;
int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
afs_transfer_reply(call, skb);
if (!last)
return 0;
if (call->reply_size != call->reply_max)
return -EBADMSG;
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
@ -892,15 +868,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call,
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
afs_transfer_reply(call, skb);
if (!last)
return 0;
if (call->reply_size != call->reply_max)
return -EBADMSG;
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
@ -999,15 +973,13 @@ static int afs_deliver_fs_rename(struct afs_call *call,
{
struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2;
const __be32 *bp;
int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
afs_transfer_reply(call, skb);
if (!last)
return 0;
if (call->reply_size != call->reply_max)
return -EBADMSG;
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
@ -1105,20 +1077,13 @@ static int afs_deliver_fs_store_data(struct afs_call *call,
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
_enter(",,%u", last);
afs_transfer_reply(call, skb);
if (!last) {
_leave(" = 0 [more]");
return 0;
}
if (call->reply_size != call->reply_max) {
_leave(" = -EBADMSG [%u != %u]",
call->reply_size, call->reply_max);
return -EBADMSG;
}
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
@ -1292,20 +1257,13 @@ static int afs_deliver_fs_store_status(struct afs_call *call,
afs_dataversion_t *store_version;
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
_enter(",,%u", last);
afs_transfer_reply(call, skb);
if (!last) {
_leave(" = 0 [more]");
return 0;
}
if (call->reply_size != call->reply_max) {
_leave(" = -EBADMSG [%u != %u]",
call->reply_size, call->reply_max);
return -EBADMSG;
}
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
store_version = NULL;
@ -1504,11 +1462,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
_debug("extract status");
ret = afs_extract_data(call, skb, last, call->buffer,
12 * 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
bp = call->buffer;
xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2);
@ -1518,11 +1473,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
/* extract the volume name length */
case 2:
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("volname length: %u", call->count);
@ -1537,11 +1489,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
if (call->count > 0) {
ret = afs_extract_data(call, skb, last, call->reply3,
call->count);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
}
p = call->reply3;
@ -1561,11 +1510,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
case 4:
ret = afs_extract_data(call, skb, last, call->buffer,
call->count);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->offset = 0;
call->unmarshall++;
@ -1574,11 +1520,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
/* extract the offline message length */
case 5:
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("offline msg length: %u", call->count);
@ -1593,11 +1536,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
if (call->count > 0) {
ret = afs_extract_data(call, skb, last, call->reply3,
call->count);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
}
p = call->reply3;
@ -1617,11 +1557,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
case 7:
ret = afs_extract_data(call, skb, last, call->buffer,
call->count);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->offset = 0;
call->unmarshall++;
@ -1630,11 +1567,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
/* extract the message of the day length */
case 8:
ret = afs_extract_data(call, skb, last, &call->tmp, 4);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("motd length: %u", call->count);
@ -1649,11 +1583,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
if (call->count > 0) {
ret = afs_extract_data(call, skb, last, call->reply3,
call->count);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
}
p = call->reply3;
@ -1673,26 +1604,20 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
case 10:
ret = afs_extract_data(call, skb, last, call->buffer,
call->count);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
if (ret < 0)
return ret;
call->offset = 0;
call->unmarshall++;
no_motd_padding:
case 11:
_debug("trailer %d", skb->len);
if (skb->len != 0)
return -EBADMSG;
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
break;
}
if (!last)
return 0;
_leave(" = 0 [done]");
return 0;
}
@ -1764,15 +1689,13 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call,
struct sk_buff *skb, bool last)
{
const __be32 *bp;
int ret;
_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
afs_transfer_reply(call, skb);
if (!last)
return 0;
if (call->reply_size != call->reply_max)
return -EBADMSG;
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;

View File

@ -609,17 +609,29 @@ extern void afs_proc_cell_remove(struct afs_cell *);
*/
extern int afs_open_socket(void);
extern void afs_close_socket(void);
extern void afs_data_consumed(struct afs_call *, struct sk_buff *);
extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
const struct afs_wait_mode *);
extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
size_t, size_t);
extern void afs_flat_call_destructor(struct afs_call *);
extern void afs_transfer_reply(struct afs_call *, struct sk_buff *);
extern int afs_transfer_reply(struct afs_call *, struct sk_buff *, bool);
extern void afs_send_empty_reply(struct afs_call *);
extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
size_t);
static inline int afs_data_complete(struct afs_call *call, struct sk_buff *skb,
bool last)
{
if (skb->len > 0)
return -EBADMSG;
afs_data_consumed(call, skb);
if (!last)
return -EAGAIN;
return 0;
}
/*
* security.c
*/

View File

@ -150,10 +150,9 @@ void afs_close_socket(void)
}
/*
* note that the data in a socket buffer is now delivered and that the buffer
* should be freed
* Note that the data in a socket buffer is now consumed.
*/
static void afs_data_delivered(struct sk_buff *skb)
void afs_data_consumed(struct afs_call *call, struct sk_buff *skb)
{
if (!skb) {
_debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
@ -161,9 +160,7 @@ static void afs_data_delivered(struct sk_buff *skb)
} else {
_debug("DLVR %p{%u} [%d]",
skb, skb->mark, atomic_read(&afs_outstanding_skbs));
if (atomic_dec_return(&afs_outstanding_skbs) == -1)
BUG();
rxrpc_kernel_data_delivered(skb);
rxrpc_kernel_data_consumed(call->rxcall, skb);
}
}
@ -489,9 +486,15 @@ static void afs_deliver_to_call(struct afs_call *call)
last = rxrpc_kernel_is_data_last(skb);
ret = call->type->deliver(call, skb, last);
switch (ret) {
case -EAGAIN:
if (last) {
_debug("short data");
goto unmarshal_error;
}
break;
case 0:
if (last &&
call->state == AFS_CALL_AWAIT_REPLY)
ASSERT(last);
if (call->state == AFS_CALL_AWAIT_REPLY)
call->state = AFS_CALL_COMPLETE;
break;
case -ENOTCONN:
@ -501,6 +504,7 @@ static void afs_deliver_to_call(struct afs_call *call)
abort_code = RX_INVALID_OPERATION;
goto do_abort;
default:
unmarshal_error:
abort_code = RXGEN_CC_UNMARSHAL;
if (call->state != AFS_CALL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
@ -511,9 +515,7 @@ static void afs_deliver_to_call(struct afs_call *call)
call->state = AFS_CALL_ERROR;
break;
}
afs_data_delivered(skb);
skb = NULL;
continue;
break;
case RXRPC_SKB_MARK_FINAL_ACK:
_debug("Rcv ACK");
call->state = AFS_CALL_COMPLETE;
@ -685,15 +687,35 @@ static void afs_process_async_call(struct afs_call *call)
}
/*
* empty a socket buffer into a flat reply buffer
* Empty a socket buffer into a flat reply buffer.
*/
void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
int afs_transfer_reply(struct afs_call *call, struct sk_buff *skb, bool last)
{
size_t len = skb->len;
if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0)
BUG();
call->reply_size += len;
if (len > call->reply_max - call->reply_size) {
_leave(" = -EBADMSG [%zu > %u]",
len, call->reply_max - call->reply_size);
return -EBADMSG;
}
if (len > 0) {
if (skb_copy_bits(skb, 0, call->buffer + call->reply_size,
len) < 0)
BUG();
call->reply_size += len;
}
afs_data_consumed(call, skb);
if (!last)
return -EAGAIN;
if (call->reply_size != call->reply_max) {
_leave(" = -EBADMSG [%u != %u]",
call->reply_size, call->reply_max);
return -EBADMSG;
}
return 0;
}
/*
@ -745,7 +767,8 @@ static void afs_collect_incoming_call(struct work_struct *work)
}
/*
* grab the operation ID from an incoming cache manager call
* Grab the operation ID from an incoming cache manager call. The socket
* buffer is discarded on error or if we don't yet have sufficient data.
*/
static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
bool last)
@ -766,12 +789,9 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
call->offset += len;
if (call->offset < 4) {
if (last) {
_leave(" = -EBADMSG [op ID short]");
return -EBADMSG;
}
_leave(" = 0 [incomplete]");
return 0;
afs_data_consumed(call, skb);
_leave(" = -EAGAIN");
return -EAGAIN;
}
call->state = AFS_CALL_AWAIT_REQUEST;
@ -855,7 +875,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
}
/*
* extract a piece of data from the received data socket buffers
* Extract a piece of data from the received data socket buffers.
*/
int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
bool last, void *buf, size_t count)
@ -873,10 +893,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
call->offset += len;
if (call->offset < count) {
if (last) {
_leave(" = -EBADMSG [%d < %zu]", call->offset, count);
return -EBADMSG;
}
afs_data_consumed(call, skb);
_leave(" = -EAGAIN");
return -EAGAIN;
}

View File

@ -64,16 +64,13 @@ static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call,
struct afs_cache_vlocation *entry;
__be32 *bp;
u32 tmp;
int loop;
int loop, ret;
_enter(",,%u", last);
afs_transfer_reply(call, skb);
if (!last)
return 0;
if (call->reply_size != call->reply_max)
return -EBADMSG;
ret = afs_transfer_reply(call, skb, last);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
entry = call->reply;

View File

@ -3891,8 +3891,7 @@ void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
int dev_get_nest_level(struct net_device *dev,
bool (*type_check)(const struct net_device *dev));
int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);

View File

@ -70,8 +70,16 @@ struct qed_dbcx_pfc_params {
u8 max_tc;
};
enum qed_dcbx_sf_ieee_type {
QED_DCBX_SF_IEEE_ETHTYPE,
QED_DCBX_SF_IEEE_TCP_PORT,
QED_DCBX_SF_IEEE_UDP_PORT,
QED_DCBX_SF_IEEE_TCP_UDP_PORT
};
struct qed_app_entry {
bool ethtype;
enum qed_dcbx_sf_ieee_type sf_ieee;
bool enabled;
u8 prio;
u16 proto_id;

View File

@ -705,70 +705,6 @@ typedef struct sctp_auth_chunk {
sctp_authhdr_t auth_hdr;
} __packed sctp_auth_chunk_t;
struct sctp_info {
__u32 sctpi_tag;
__u32 sctpi_state;
__u32 sctpi_rwnd;
__u16 sctpi_unackdata;
__u16 sctpi_penddata;
__u16 sctpi_instrms;
__u16 sctpi_outstrms;
__u32 sctpi_fragmentation_point;
__u32 sctpi_inqueue;
__u32 sctpi_outqueue;
__u32 sctpi_overall_error;
__u32 sctpi_max_burst;
__u32 sctpi_maxseg;
__u32 sctpi_peer_rwnd;
__u32 sctpi_peer_tag;
__u8 sctpi_peer_capable;
__u8 sctpi_peer_sack;
__u16 __reserved1;
/* assoc status info */
__u64 sctpi_isacks;
__u64 sctpi_osacks;
__u64 sctpi_opackets;
__u64 sctpi_ipackets;
__u64 sctpi_rtxchunks;
__u64 sctpi_outofseqtsns;
__u64 sctpi_idupchunks;
__u64 sctpi_gapcnt;
__u64 sctpi_ouodchunks;
__u64 sctpi_iuodchunks;
__u64 sctpi_oodchunks;
__u64 sctpi_iodchunks;
__u64 sctpi_octrlchunks;
__u64 sctpi_ictrlchunks;
/* primary transport info */
struct sockaddr_storage sctpi_p_address;
__s32 sctpi_p_state;
__u32 sctpi_p_cwnd;
__u32 sctpi_p_srtt;
__u32 sctpi_p_rto;
__u32 sctpi_p_hbinterval;
__u32 sctpi_p_pathmaxrxt;
__u32 sctpi_p_sackdelay;
__u32 sctpi_p_sackfreq;
__u32 sctpi_p_ssthresh;
__u32 sctpi_p_partial_bytes_acked;
__u32 sctpi_p_flight_size;
__u16 sctpi_p_error;
__u16 __reserved2;
/* sctp sock info */
__u32 sctpi_s_autoclose;
__u32 sctpi_s_adaptation_ind;
__u32 sctpi_s_pd_point;
__u8 sctpi_s_nodelay;
__u8 sctpi_s_disable_fragments;
__u8 sctpi_s_v4mapped;
__u8 sctpi_s_frag_interleave;
__u32 sctpi_s_type;
__u32 __reserved3;
};
struct sctp_infox {
struct sctp_info *sctpinfo;
struct sctp_association *asoc;

View File

@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
__skb_linearize(skb) : 0;
}
static __always_inline void
__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
unsigned int off)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_block_sub(skb->csum,
csum_partial(start, len, 0), off);
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_start_offset(skb) < 0)
skb->ip_summed = CHECKSUM_NONE;
}
/**
* skb_postpull_rcsum - update checksum for received skb after pull
* @skb: buffer to update
@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
* CHECKSUM_NONE so that it can be recomputed from scratch.
*/
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_start_offset(skb) < 0)
skb->ip_summed = CHECKSUM_NONE;
__skb_postpull_rcsum(skb, start, len, 0);
}
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
static __always_inline void
__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
unsigned int off)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_block_add(skb->csum,
csum_partial(start, len, 0), off);
}
/**
* skb_postpush_rcsum - update checksum for received skb after push
* @skb: buffer to update
* @start: start of data after push
* @len: length of data pushed
*
* After doing a push on a received packet, you need to call this to
* update the CHECKSUM_COMPLETE checksum.
*/
static inline void skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
/* For performing the reverse operation to skb_postpull_rcsum(),
* we can instead of ...
*
* skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
*
* ... just use this equivalent version here to save a few
* instructions. Feeding csum of 0 in csum_partial() and later
* on adding skb->csum is equivalent to feed skb->csum in the
* first place.
*/
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_partial(start, len, skb->csum);
__skb_postpush_rcsum(skb, start, len, 0);
}
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
/**
* skb_push_rcsum - push skb and update receive checksum
* @skb: buffer to update

View File

@ -176,8 +176,8 @@ int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
int tcf_action_destroy(struct list_head *actions, int bind);
int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
struct tcf_result *res);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct nlattr *nla,
struct nlattr *est, char *n, int ovr,
int bind, struct list_head *);
@ -189,30 +189,17 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
#define tc_no_actions(_exts) \
(list_empty(&(_exts)->actions))
#define tc_for_each_action(_a, _exts) \
list_for_each_entry(a, &(_exts)->actions, list)
#define tc_single_action(_exts) \
(list_is_singular(&(_exts)->actions))
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
u64 packets, u64 lastuse)
{
#ifdef CONFIG_NET_CLS_ACT
if (!a->ops->stats_update)
return;
a->ops->stats_update(a, bytes, packets, lastuse);
#endif
}
#else /* CONFIG_NET_CLS_ACT */
#define tc_no_actions(_exts) true
#define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
#define tc_single_action(_exts) false
#define tcf_action_stats_update(a, bytes, packets, lastuse)
#endif /* CONFIG_NET_CLS_ACT */
#endif

View File

@ -40,12 +40,12 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
unsigned long,
gfp_t);
int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
void rxrpc_kernel_end_call(struct rxrpc_call *);
bool rxrpc_kernel_is_data_last(struct sk_buff *);
u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
int rxrpc_kernel_get_error_number(struct sk_buff *);
void rxrpc_kernel_data_delivered(struct sk_buff *);
void rxrpc_kernel_free_skb(struct sk_buff *);
struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
int rxrpc_kernel_reject_call(struct socket *);

View File

@ -104,6 +104,7 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
skb_push(skb, hdr_len);
skb_set_inner_protocol(skb, proto);
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = gre_tnl_flags_to_gre_flags(flags);

View File

@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
to = from | htonl(INET_ECN_CE << 20);
*(__be32 *)iph = to;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(csum_sub(skb->csum, from), to);
skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
(__force __wsum)to);
return 1;
}

View File

@ -3620,7 +3620,8 @@ struct ieee80211_ops {
int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
u32 (*get_expected_throughput)(struct ieee80211_sta *sta);
u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta);
int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm);

View File

@ -59,7 +59,8 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
struct tcf_exts {
#ifdef CONFIG_NET_CLS_ACT
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
struct list_head actions;
int nr_actions;
struct tc_action **actions;
#endif
/* Map to export classifier specific extension TLV types to the
* generic extensions API. Unsupported extensions must be set to 0.
@ -72,7 +73,10 @@ static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
{
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
INIT_LIST_HEAD(&exts->actions);
exts->nr_actions = 0;
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL);
WARN_ON(!exts->actions); /* TODO: propagate the error to callers */
#endif
exts->action = action;
exts->police = police;
@ -89,7 +93,7 @@ static inline int
tcf_exts_is_predicative(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return !list_empty(&exts->actions);
return exts->nr_actions;
#else
return 0;
#endif
@ -108,6 +112,20 @@ tcf_exts_is_available(struct tcf_exts *exts)
return tcf_exts_is_predicative(exts);
}
static inline void tcf_exts_to_list(const struct tcf_exts *exts,
struct list_head *actions)
{
#ifdef CONFIG_NET_CLS_ACT
int i;
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
list_add(&a->list, actions);
}
#endif
}
/**
* tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer
@ -124,12 +142,25 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
struct tcf_result *res)
{
#ifdef CONFIG_NET_CLS_ACT
if (!list_empty(&exts->actions))
return tcf_action_exec(skb, &exts->actions, res);
if (exts->nr_actions)
return tcf_action_exec(skb, exts->actions, exts->nr_actions,
res);
#endif
return 0;
}
#ifdef CONFIG_NET_CLS_ACT
#define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
#else /* CONFIG_NET_CLS_ACT */
#define tc_no_actions(_exts) true
#define tc_single_action(_exts) false
#endif /* CONFIG_NET_CLS_ACT */
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, bool ovr);

View File

@ -339,7 +339,7 @@ enum bpf_func_id {
BPF_FUNC_skb_change_type,
/**
* bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
* bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb
* @skb: pointer to skb
* @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
* @index: index of the cgroup in the bpf_map
@ -348,7 +348,7 @@ enum bpf_func_id {
* == 1 skb succeeded the cgroup2 descendant test
* < 0 error
*/
BPF_FUNC_skb_in_cgroup,
BPF_FUNC_skb_under_cgroup,
/**
* bpf_get_hash_recalc(skb)

View File

@ -24,7 +24,7 @@ enum nft_registers {
__NFT_REG_MAX,
NFT_REG32_00 = 8,
MFT_REG32_01,
NFT_REG32_01,
NFT_REG32_02,
NFT_REG32_03,
NFT_REG32_04,

View File

@ -944,4 +944,68 @@ struct sctp_default_prinfo {
__u16 pr_policy;
};
struct sctp_info {
__u32 sctpi_tag;
__u32 sctpi_state;
__u32 sctpi_rwnd;
__u16 sctpi_unackdata;
__u16 sctpi_penddata;
__u16 sctpi_instrms;
__u16 sctpi_outstrms;
__u32 sctpi_fragmentation_point;
__u32 sctpi_inqueue;
__u32 sctpi_outqueue;
__u32 sctpi_overall_error;
__u32 sctpi_max_burst;
__u32 sctpi_maxseg;
__u32 sctpi_peer_rwnd;
__u32 sctpi_peer_tag;
__u8 sctpi_peer_capable;
__u8 sctpi_peer_sack;
__u16 __reserved1;
/* assoc status info */
__u64 sctpi_isacks;
__u64 sctpi_osacks;
__u64 sctpi_opackets;
__u64 sctpi_ipackets;
__u64 sctpi_rtxchunks;
__u64 sctpi_outofseqtsns;
__u64 sctpi_idupchunks;
__u64 sctpi_gapcnt;
__u64 sctpi_ouodchunks;
__u64 sctpi_iuodchunks;
__u64 sctpi_oodchunks;
__u64 sctpi_iodchunks;
__u64 sctpi_octrlchunks;
__u64 sctpi_ictrlchunks;
/* primary transport info */
struct sockaddr_storage sctpi_p_address;
__s32 sctpi_p_state;
__u32 sctpi_p_cwnd;
__u32 sctpi_p_srtt;
__u32 sctpi_p_rto;
__u32 sctpi_p_hbinterval;
__u32 sctpi_p_pathmaxrxt;
__u32 sctpi_p_sackdelay;
__u32 sctpi_p_sackfreq;
__u32 sctpi_p_ssthresh;
__u32 sctpi_p_partial_bytes_acked;
__u32 sctpi_p_flight_size;
__u16 sctpi_p_error;
__u16 __reserved2;
/* sctp sock info */
__u32 sctpi_s_autoclose;
__u32 sctpi_s_adaptation_ind;
__u32 sctpi_s_pd_point;
__u8 sctpi_s_nodelay;
__u8 sctpi_s_disable_fragments;
__u8 sctpi_s_v4mapped;
__u8 sctpi_s_frag_interleave;
__u32 sctpi_s_type;
__u32 __reserved3;
};
#endif /* _UAPI_SCTP_H */

View File

@ -26,11 +26,18 @@ struct bpf_htab {
struct bucket *buckets;
void *elems;
struct pcpu_freelist freelist;
void __percpu *extra_elems;
atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
};
enum extra_elem_state {
HTAB_NOT_AN_EXTRA_ELEM = 0,
HTAB_EXTRA_ELEM_FREE,
HTAB_EXTRA_ELEM_USED
};
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
union {
@ -38,7 +45,10 @@ struct htab_elem {
struct bpf_htab *htab;
struct pcpu_freelist_node fnode;
};
struct rcu_head rcu;
union {
struct rcu_head rcu;
enum extra_elem_state state;
};
u32 hash;
char key[0] __aligned(8);
};
@ -113,6 +123,23 @@ free_elems:
return err;
}
static int alloc_extra_elems(struct bpf_htab *htab)
{
void __percpu *pptr;
int cpu;
pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
if (!pptr)
return -ENOMEM;
for_each_possible_cpu(cpu) {
((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
HTAB_EXTRA_ELEM_FREE;
}
htab->extra_elems = pptr;
return 0;
}
/* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
@ -185,6 +212,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (percpu)
cost += (u64) round_up(htab->map.value_size, 8) *
num_possible_cpus() * htab->map.max_entries;
else
cost += (u64) htab->elem_size * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
/* make sure page count doesn't overflow */
@ -212,14 +241,22 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
raw_spin_lock_init(&htab->buckets[i].lock);
}
if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
err = prealloc_elems_and_freelist(htab);
if (!percpu) {
err = alloc_extra_elems(htab);
if (err)
goto free_buckets;
}
if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
err = prealloc_elems_and_freelist(htab);
if (err)
goto free_extra_elems;
}
return &htab->map;
free_extra_elems:
free_percpu(htab->extra_elems);
free_buckets:
kvfree(htab->buckets);
free_htab:
@ -349,7 +386,6 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
kfree(l);
}
static void htab_elem_free_rcu(struct rcu_head *head)
@ -370,6 +406,11 @@ static void htab_elem_free_rcu(struct rcu_head *head)
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{
if (l->state == HTAB_EXTRA_ELEM_USED) {
l->state = HTAB_EXTRA_ELEM_FREE;
return;
}
if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
pcpu_freelist_push(&htab->freelist, &l->fnode);
} else {
@ -381,25 +422,44 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash,
bool percpu, bool onallcpus)
bool percpu, bool onallcpus,
bool old_elem_exists)
{
u32 size = htab->map.value_size;
bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
struct htab_elem *l_new;
void __percpu *pptr;
int err = 0;
if (prealloc) {
l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
if (!l_new)
return ERR_PTR(-E2BIG);
err = -E2BIG;
} else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
atomic_dec(&htab->count);
return ERR_PTR(-E2BIG);
err = -E2BIG;
} else {
l_new = kmalloc(htab->elem_size,
GFP_ATOMIC | __GFP_NOWARN);
if (!l_new)
return ERR_PTR(-ENOMEM);
}
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
if (!l_new)
return ERR_PTR(-ENOMEM);
}
if (err) {
if (!old_elem_exists)
return ERR_PTR(err);
/* if we're updating the existing element and the hash table
* is full, use per-cpu extra elems
*/
l_new = this_cpu_ptr(htab->extra_elems);
if (l_new->state != HTAB_EXTRA_ELEM_FREE)
return ERR_PTR(-E2BIG);
l_new->state = HTAB_EXTRA_ELEM_USED;
} else {
l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
}
memcpy(l_new->key, key, key_size);
@ -489,7 +549,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
if (ret)
goto err;
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false);
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
!!l_old);
if (IS_ERR(l_new)) {
/* all pre-allocated elements are in use or memory exhausted */
ret = PTR_ERR(l_new);
@ -563,7 +624,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
}
} else {
l_new = alloc_htab_elem(htab, key, value, key_size,
hash, true, onallcpus);
hash, true, onallcpus, false);
if (IS_ERR(l_new)) {
ret = PTR_ERR(l_new);
goto err;
@ -652,6 +713,7 @@ static void htab_map_free(struct bpf_map *map)
htab_free_elems(htab);
pcpu_freelist_destroy(&htab->freelist);
}
free_percpu(htab->extra_elems);
kvfree(htab->buckets);
kfree(htab);
}

View File

@ -194,6 +194,7 @@ struct verifier_env {
struct verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
};
@ -1052,7 +1053,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_ARRAY:
if (func_id != BPF_FUNC_skb_in_cgroup)
if (func_id != BPF_FUNC_skb_under_cgroup)
goto error;
break;
default:
@ -1074,7 +1075,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
case BPF_FUNC_skb_in_cgroup:
case BPF_FUNC_skb_under_cgroup:
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error;
break;
@ -1301,7 +1302,7 @@ add_imm:
/* dst_reg stays as pkt_ptr type and since some positive
* integer value was added to the pointer, increment its 'id'
*/
dst_reg->id++;
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range and off to zero */
dst_reg->off = 0;

View File

@ -30,7 +30,7 @@
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U
#define BUCKET_LOCKS_PER_CPU 128UL
#define BUCKET_LOCKS_PER_CPU 32UL
static u32 head_hashfn(struct rhashtable *ht,
const struct bucket_table *tbl,
@ -70,7 +70,7 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
unsigned int nr_pcpus = num_possible_cpus();
#endif
nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
/* Never allocate more than 0.5 locks per bucket */
@ -83,6 +83,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
tbl->locks = vmalloc(size * sizeof(spinlock_t));
else
#endif
if (gfp != GFP_KERNEL)
gfp |= __GFP_NOWARN | __GFP_NORETRY;
tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
gfp);
if (!tbl->locks)
@ -321,12 +324,14 @@ static int rhashtable_expand(struct rhashtable *ht)
static int rhashtable_shrink(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
unsigned int size;
unsigned int nelems = atomic_read(&ht->nelems);
unsigned int size = 0;
int err;
ASSERT_RHT_MUTEX(ht);
size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
if (nelems)
size = roundup_pow_of_two(nelems * 3 / 2);
if (size < ht->p.min_size)
size = ht->p.min_size;

View File

@ -38,7 +38,7 @@ MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
static int max_size = 0;
module_param(max_size, int, 0);
MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)");
MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
static bool shrinking = false;
module_param(shrinking, bool, 0);

View File

@ -169,7 +169,7 @@ int register_vlan_dev(struct net_device *dev)
if (err < 0)
goto out_uninit_mvrp;
vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
vlan->nest_level = dev_get_nest_level(real_dev) + 1;
err = register_netdevice(dev);
if (err < 0)
goto out_uninit_mvrp;

View File

@ -267,7 +267,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
/* If old entry was unassociated with any port, then delete it. */
f = __br_fdb_get(br, br->dev->dev_addr, 0);
if (f && f->is_local && !f->dst)
if (f && f->is_local && !f->dst && !f->added_by_user)
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, 0);
@ -282,7 +282,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
if (!br_vlan_should_use(v))
continue;
f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
if (f && f->is_local && !f->dst)
if (f && f->is_local && !f->dst && !f->added_by_user)
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, v->vid);
}
@ -764,20 +764,25 @@ out:
}
/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
__u16 state, __u16 flags, __u16 vid)
static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
{
struct net_bridge *br = source->br;
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool modified = false;
/* If the port cannot learn allow only local and static entries */
if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
!(source->state == BR_STATE_LEARNING ||
source->state == BR_STATE_FORWARDING))
return -EPERM;
if (!source && !(state & NUD_PERMANENT)) {
pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
br->dev->name);
return -EINVAL;
}
fdb = fdb_find(head, addr, vid);
if (fdb == NULL) {
if (!(flags & NLM_F_CREATE))
@ -832,22 +837,28 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
return 0;
}
static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
const unsigned char *addr, u16 nlh_flags, u16 vid)
static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
struct net_bridge_port *p, const unsigned char *addr,
u16 nlh_flags, u16 vid)
{
int err = 0;
if (ndm->ndm_flags & NTF_USE) {
if (!p) {
pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
br->dev->name);
return -EINVAL;
}
local_bh_disable();
rcu_read_lock();
br_fdb_update(p->br, p, addr, vid, true);
br_fdb_update(br, p, addr, vid, true);
rcu_read_unlock();
local_bh_enable();
} else {
spin_lock_bh(&p->br->hash_lock);
err = fdb_add_entry(p, addr, ndm->ndm_state,
spin_lock_bh(&br->hash_lock);
err = fdb_add_entry(br, p, addr, ndm->ndm_state,
nlh_flags, vid);
spin_unlock_bh(&p->br->hash_lock);
spin_unlock_bh(&br->hash_lock);
}
return err;
@ -884,6 +895,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
dev->name);
return -EINVAL;
}
br = p->br;
vg = nbp_vlan_group(p);
}
@ -895,15 +907,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
}
/* VID was specified, so use it. */
if (dev->priv_flags & IFF_EBRIDGE)
err = br_fdb_insert(br, NULL, addr, vid);
else
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
} else {
if (dev->priv_flags & IFF_EBRIDGE)
err = br_fdb_insert(br, NULL, addr, 0);
else
err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
if (err || !vg || !vg->num_vlans)
goto out;
@ -914,11 +920,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
if (dev->priv_flags & IFF_EBRIDGE)
err = br_fdb_insert(br, NULL, addr, v->vid);
else
err = __br_fdb_add(ndm, p, addr, nlh_flags,
v->vid);
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
if (err)
goto out;
}

View File

@ -6045,8 +6045,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
EXPORT_SYMBOL(netdev_lower_dev_get_private);
int dev_get_nest_level(struct net_device *dev,
bool (*type_check)(const struct net_device *dev))
int dev_get_nest_level(struct net_device *dev)
{
struct net_device *lower = NULL;
struct list_head *iter;
@ -6056,15 +6055,12 @@ int dev_get_nest_level(struct net_device *dev,
ASSERT_RTNL();
netdev_for_each_lower_dev(dev, lower, iter) {
nest = dev_get_nest_level(lower, type_check);
nest = dev_get_nest_level(lower);
if (max_nest < nest)
max_nest = nest;
}
if (type_check(dev))
max_nest++;
return max_nest;
return max_nest + 1;
}
EXPORT_SYMBOL(dev_get_nest_level);

View File

@ -1355,56 +1355,47 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
{
int err;
if (!skb_cloned(skb))
return 0;
if (skb_clone_writable(skb, write_len))
return 0;
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (!err)
bpf_compute_data_end(skb);
err = skb_ensure_writable(skb, write_len);
bpf_compute_data_end(skb);
return err;
}
static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
}
static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
}
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
{
struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
struct sk_buff *skb = (struct sk_buff *) (long) r1;
int offset = (int) r2;
unsigned int offset = (unsigned int) r2;
void *from = (void *) (long) r3;
unsigned int len = (unsigned int) r4;
void *ptr;
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
return -EINVAL;
/* bpf verifier guarantees that:
* 'from' pointer points to bpf program stack
* 'len' bytes of it were initialized
* 'len' > 0
* 'skb' is a valid pointer to 'struct sk_buff'
*
* so check for invalid 'offset' and too large 'len'
*/
if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
if (unlikely(offset > 0xffff))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + len)))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, len, sp->buff);
if (unlikely(!ptr))
return -EFAULT;
ptr = skb->data + offset;
if (flags & BPF_F_RECOMPUTE_CSUM)
skb_postpull_rcsum(skb, ptr, len);
__skb_postpull_rcsum(skb, ptr, len, offset);
memcpy(ptr, from, len);
if (ptr == sp->buff)
/* skb_store_bits cannot return -EFAULT here */
skb_store_bits(skb, offset, ptr, len);
if (flags & BPF_F_RECOMPUTE_CSUM)
skb_postpush_rcsum(skb, ptr, len);
__skb_postpush_rcsum(skb, ptr, len, offset);
if (flags & BPF_F_INVALIDATE_HASH)
skb_clear_hash(skb);
@ -1425,12 +1416,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
int offset = (int) r2;
unsigned int offset = (unsigned int) r2;
void *to = (void *)(unsigned long) r3;
unsigned int len = (unsigned int) r4;
void *ptr;
if (unlikely((u32) offset > 0xffff))
if (unlikely(offset > 0xffff))
goto err_clear;
ptr = skb_header_pointer(skb, offset, len, to);
@ -1458,20 +1449,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
int offset = (int) r2;
__sum16 sum, *ptr;
unsigned int offset = (unsigned int) r2;
__sum16 *ptr;
if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
return -EINVAL;
if (unlikely((u32) offset > 0xffff))
if (unlikely(offset > 0xffff || offset & 1))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
if (unlikely(!ptr))
if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
return -EFAULT;
ptr = (__sum16 *)(skb->data + offset);
switch (flags & BPF_F_HDR_FIELD_MASK) {
case 0:
if (unlikely(from != 0))
@ -1489,10 +1477,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return -EINVAL;
}
if (ptr == &sum)
/* skb_store_bits guaranteed to not return -EFAULT here */
skb_store_bits(skb, offset, ptr, sizeof(sum));
return 0;
}
@ -1512,20 +1496,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
struct sk_buff *skb = (struct sk_buff *) (long) r1;
bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
int offset = (int) r2;
__sum16 sum, *ptr;
unsigned int offset = (unsigned int) r2;
__sum16 *ptr;
if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
BPF_F_HDR_FIELD_MASK)))
return -EINVAL;
if (unlikely((u32) offset > 0xffff))
if (unlikely(offset > 0xffff || offset & 1))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
if (unlikely(!ptr))
return -EFAULT;
ptr = (__sum16 *)(skb->data + offset);
if (is_mmzero && !*ptr)
return 0;
@ -1548,10 +1530,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
if (is_mmzero && !*ptr)
*ptr = CSUM_MANGLED_0;
if (ptr == &sum)
/* skb_store_bits guaranteed to not return -EFAULT here */
skb_store_bits(skb, offset, ptr, sizeof(sum));
return 0;
}
@ -1607,9 +1585,6 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
return dev_forward_skb(dev, skb);
}
@ -1648,6 +1623,8 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
if (unlikely(!skb))
return -ENOMEM;
bpf_push_mac_rcsum(skb);
return flags & BPF_F_INGRESS ?
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}
@ -1693,6 +1670,8 @@ int skb_do_redirect(struct sk_buff *skb)
return -EINVAL;
}
bpf_push_mac_rcsum(skb);
return ri->flags & BPF_F_INGRESS ?
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}
@ -1756,7 +1735,10 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
vlan_proto != htons(ETH_P_8021AD)))
vlan_proto = htons(ETH_P_8021Q);
bpf_push_mac_rcsum(skb);
ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
bpf_pull_mac_rcsum(skb);
bpf_compute_data_end(skb);
return ret;
}
@ -1776,7 +1758,10 @@ static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
struct sk_buff *skb = (struct sk_buff *) (long) r1;
int ret;
bpf_push_mac_rcsum(skb);
ret = skb_vlan_pop(skb);
bpf_pull_mac_rcsum(skb);
bpf_compute_data_end(skb);
return ret;
}
@ -2298,7 +2283,7 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
}
#ifdef CONFIG_SOCK_CGROUP_DATA
static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long)r1;
struct bpf_map *map = (struct bpf_map *)(long)r2;
@ -2321,8 +2306,8 @@ static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp);
}
static const struct bpf_func_proto bpf_skb_in_cgroup_proto = {
.func = bpf_skb_in_cgroup,
static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
.func = bpf_skb_under_cgroup,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
@ -2402,8 +2387,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_get_smp_processor_id:
return &bpf_get_smp_processor_id_proto;
#ifdef CONFIG_SOCK_CGROUP_DATA
case BPF_FUNC_skb_in_cgroup:
return &bpf_skb_in_cgroup_proto;
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
#endif
default:
return sk_filter_func_proto(func_id);

View File

@ -2452,9 +2452,7 @@ struct fib_route_iter {
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
loff_t pos)
{
struct fib_table *tb = iter->main_tb;
struct key_vector *l, **tp = &iter->tnode;
struct trie *t;
t_key key;
/* use cache location of next-to-find key */
@ -2462,8 +2460,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
pos -= iter->pos;
key = iter->key;
} else {
t = (struct trie *)tb->tb_data;
iter->tnode = t->kv;
iter->pos = 0;
key = 0;
}
@ -2504,12 +2500,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
return NULL;
iter->main_tb = tb;
t = (struct trie *)tb->tb_data;
iter->tnode = t->kv;
if (*pos != 0)
return fib_route_get_idx(iter, *pos);
t = (struct trie *)tb->tb_data;
iter->tnode = t->kv;
iter->pos = 0;
iter->key = 0;

View File

@ -370,7 +370,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
tunnel->parms.o_flags, proto, tunnel->parms.o_key,
htonl(tunnel->o_seqno));
skb_set_inner_protocol(skb, proto);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}

View File

@ -557,6 +557,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
.get_link_net = ip_tunnel_get_link_net,
};
static bool is_vti_tunnel(const struct net_device *dev)
{
return dev->netdev_ops == &vti_netdev_ops;
}
static int vti_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ip_tunnel *tunnel = netdev_priv(dev);
if (!is_vti_tunnel(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_DOWN:
if (!net_eq(tunnel->net, dev_net(dev)))
xfrm_garbage_collect(tunnel->net);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block vti_notifier_block __read_mostly = {
.notifier_call = vti_device_event,
};
static int __init vti_init(void)
{
const char *msg;
@ -564,6 +591,8 @@ static int __init vti_init(void)
pr_info("IPv4 over IPsec tunneling driver\n");
register_netdevice_notifier(&vti_notifier_block);
msg = "tunnel device";
err = register_pernet_device(&vti_net_ops);
if (err < 0)
@ -596,6 +625,7 @@ xfrm_proto_ah_failed:
xfrm_proto_esp_failed:
unregister_pernet_device(&vti_net_ops);
pernet_dev_failed:
unregister_netdevice_notifier(&vti_notifier_block);
pr_err("vti init: failed to register %s\n", msg);
return err;
}
@ -607,6 +637,7 @@ static void __exit vti_fini(void)
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
unregister_pernet_device(&vti_net_ops);
unregister_netdevice_notifier(&vti_notifier_block);
}
module_init(vti_init);

View File

@ -3543,7 +3543,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
/* combine the user config with event to determine if permanent
* addresses are to be removed from address hash table
*/
keep_addr = !(how || _keep_addr <= 0);
keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
/* Step 2: clear hash table */
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
@ -3599,7 +3599,7 @@ restart:
/* re-combine the user config with event to determine if permanent
* addresses are to be removed from the interface list
*/
keep_addr = (!how && _keep_addr > 0);
keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
INIT_LIST_HEAD(&del_list);
list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {

View File

@ -952,8 +952,10 @@ calipso_opt_insert(struct ipv6_opt_hdr *hop,
memcpy(new, hop, start);
ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def,
secattr);
if (ret_val < 0)
if (ret_val < 0) {
kfree(new);
return ERR_PTR(ret_val);
}
buf_len = start + ret_val;
/* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */

View File

@ -519,8 +519,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
skb_set_inner_protocol(skb, protocol);
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
NEXTHDR_GRE);
}

View File

@ -55,7 +55,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct icmp6hdr user_icmph;
int addr_type;
struct in6_addr *daddr;
int iif = 0;
int oif = 0;
struct flowi6 fl6;
int err;
struct dst_entry *dst;
@ -78,25 +78,30 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (u->sin6_family != AF_INET6) {
return -EAFNOSUPPORT;
}
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != u->sin6_scope_id) {
return -EINVAL;
}
daddr = &(u->sin6_addr);
iif = u->sin6_scope_id;
if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
oif = u->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
}
if (!iif)
iif = sk->sk_bound_dev_if;
if (!oif)
oif = sk->sk_bound_dev_if;
if (!oif)
oif = np->sticky_pktinfo.ipi6_ifindex;
if (!oif && ipv6_addr_is_multicast(daddr))
oif = np->mcast_oif;
else if (!oif)
oif = np->ucast_oif;
addr_type = ipv6_addr_type(daddr);
if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
return -EINVAL;
if (addr_type & IPV6_ADDR_MAPPED)
if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
(addr_type & IPV6_ADDR_MAPPED) ||
(oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
return -EINVAL;
/* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@ -106,16 +111,12 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
fl6.flowi6_oif = oif;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
ipc6.tclass = np->tclass;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);

View File

@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
self->magic = IAS_MAGIC;
self->mode = mode;
if (mode == IAS_CLIENT)
iriap_register_lsap(self, slsap_sel, mode);
if (mode == IAS_CLIENT) {
if (iriap_register_lsap(self, slsap_sel, mode)) {
kfree(self);
return NULL;
}
}
self->confirm = callback;
self->priv = priv;

View File

@ -869,7 +869,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
/* free all potentially still buffered bcast frames */
local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
skb_queue_purge(&sdata->u.ap.ps.bc_buf);
ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
mutex_lock(&local->mtx);
ieee80211_vif_copy_chanctx_to_vlans(sdata, true);

View File

@ -1094,7 +1094,7 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
trace_drv_get_expected_throughput(sta);
if (local->ops->get_expected_throughput)
ret = local->ops->get_expected_throughput(sta);
ret = local->ops->get_expected_throughput(&local->hw, sta);
trace_drv_return_u32(local, ret);
return ret;

View File

@ -881,20 +881,22 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
netif_carrier_off(sdata->dev);
/* flush STAs and mpaths on this iface */
sta_info_flush(sdata);
mesh_path_flush_by_iface(sdata);
/* stop the beacon */
ifmsh->mesh_id_len = 0;
sdata->vif.bss_conf.enable_beacon = false;
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
/* remove beacon */
bcn = rcu_dereference_protected(ifmsh->beacon,
lockdep_is_held(&sdata->wdev.mtx));
RCU_INIT_POINTER(ifmsh->beacon, NULL);
kfree_rcu(bcn, rcu_head);
/* flush STAs and mpaths on this iface */
sta_info_flush(sdata);
mesh_path_flush_by_iface(sdata);
/* free all potentially still buffered group-addressed frames */
local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
skb_queue_purge(&ifmsh->ps.bc_buf);

View File

@ -1268,7 +1268,7 @@ static void sta_ps_start(struct sta_info *sta)
for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
if (!txqi->tin.backlog_packets)
if (txqi->tin.backlog_packets)
set_bit(tid, &sta->txq_buffered_tids);
else
clear_bit(tid, &sta->txq_buffered_tids);

View File

@ -771,6 +771,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
clear_sta_flag(sta, WLAN_STA_SP);
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
/* mesh Peer Service Period support */
if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
ieee80211_is_data_qos(fc))
ieee80211_mpsp_trigger_process(
ieee80211_get_qos_ctl(hdr), sta, true, acked);
if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* The STA is in power save mode, so assume
@ -781,13 +788,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
/* mesh Peer Service Period support */
if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
ieee80211_is_data_qos(fc))
ieee80211_mpsp_trigger_process(
ieee80211_get_qos_ctl(hdr),
sta, true, acked);
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
(ieee80211_is_data(hdr->frame_control)) &&
(rates_idx != -1))

View File

@ -368,7 +368,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
skb = skb_dequeue(&ps->bc_buf);
if (skb) {
purged++;
dev_kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
}
total += skb_queue_len(&ps->bc_buf);
}
@ -451,7 +451,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
ps_dbg(tx->sdata,
"BC TX buffer full - dropping the oldest frame\n");
dev_kfree_skb(skb_dequeue(&ps->bc_buf));
ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
} else
tx->local->total_ps_buffered++;
@ -4275,7 +4275,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
break;
dev_kfree_skb_any(skb);
ieee80211_free_txskb(hw, skb);
}
info = IEEE80211_SKB_CB(skb);

View File

@ -574,7 +574,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
helper = rcu_dereference(nfct_help(expect->master)->helper);
if (helper) {
seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
if (helper->expect_policy[expect->class].name)
if (helper->expect_policy[expect->class].name[0])
seq_printf(s, "/%s",
helper->expect_policy[expect->class].name);
}

View File

@ -1473,7 +1473,8 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
"timeout to %u seconds for",
info->timeout);
nf_ct_dump_tuple(&exp->tuple);
mod_timer(&exp->timeout, jiffies + info->timeout * HZ);
mod_timer_pending(&exp->timeout,
jiffies + info->timeout * HZ);
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}

View File

@ -1894,6 +1894,8 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
return -EINVAL;
if (otuple.dst.protonum != rtuple.dst.protonum)
return -EINVAL;
ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
&rtuple, u3);
@ -2362,12 +2364,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
return PTR_ERR(exp);
err = nf_ct_expect_related_report(exp, portid, report);
if (err < 0) {
nf_ct_expect_put(exp);
return err;
}
return 0;
nf_ct_expect_put(exp);
return err;
}
static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,

View File

@ -1383,7 +1383,7 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
return NF_DROP;
}
cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
if (!cseq) {
if (!cseq && *(*dptr + matchoff) != '0') {
nf_ct_helper_log(skb, ct, "cannot get cseq");
return NF_DROP;
}
@ -1446,7 +1446,7 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
return NF_DROP;
}
cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
if (!cseq) {
if (!cseq && *(*dptr + matchoff) != '0') {
nf_ct_helper_log(skb, ct, "cannot get cseq");
return NF_DROP;
}

View File

@ -1145,10 +1145,8 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
int err;
queue = instance_lookup(q, queue_num);
if (!queue)
queue = verdict_instance_lookup(q, queue_num,
NETLINK_CB(skb).portid);
queue = verdict_instance_lookup(q, queue_num,
NETLINK_CB(skb).portid);
if (IS_ERR(queue))
return PTR_ERR(queue);

View File

@ -59,6 +59,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
u32 offset, len;
if (tb[NFTA_EXTHDR_DREG] == NULL ||
tb[NFTA_EXTHDR_TYPE] == NULL ||
@ -66,9 +67,15 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
tb[NFTA_EXTHDR_LEN] == NULL)
return -EINVAL;
offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
if (offset > U8_MAX || len > U8_MAX)
return -ERANGE;
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
priv->offset = offset;
priv->len = len;
priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
return nft_validate_register_store(ctx, priv->dreg, NULL,

View File

@ -70,7 +70,6 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
} else if (d > 0)
parent = parent->rb_right;
else {
found:
if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = parent->rb_left;
continue;
@ -84,9 +83,12 @@ found:
}
}
if (set->flags & NFT_SET_INTERVAL && interval != NULL) {
rbe = interval;
goto found;
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
!nft_rbtree_interval_end(interval)) {
spin_unlock_bh(&nft_rbtree_lock);
*ext = &interval->ext;
return true;
}
out:
spin_unlock_bh(&nft_rbtree_lock);

View File

@ -433,7 +433,6 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
unsigned int dataoff;
u8 protonum;
@ -458,13 +457,8 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
ct = nf_ct_tuplehash_to_ctrack(h);
ctinfo = ovs_ct_get_info(h);
if (ctinfo == IP_CT_NEW) {
/* This should not happen. */
WARN_ONCE(1, "ovs_ct_find_existing: new packet for %p\n", ct);
}
skb->nfct = &ct->ct_general;
skb->nfctinfo = ctinfo;
skb->nfctinfo = ovs_ct_get_info(h);
return ct;
}

View File

@ -93,7 +93,14 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms)
return ERR_CAST(dev);
}
dev_change_flags(dev, dev->flags | IFF_UP);
err = dev_change_flags(dev, dev->flags | IFF_UP);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
ovs_vport_free(vport);
goto error;
}
rtnl_unlock();
return vport;
error:

View File

@ -54,6 +54,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
struct net *net = ovs_dp_get_net(parms->dp);
struct net_device *dev;
struct vport *vport;
int err;
vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
if (IS_ERR(vport))
@ -67,9 +68,15 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
return ERR_CAST(dev);
}
dev_change_flags(dev, dev->flags | IFF_UP);
rtnl_unlock();
err = dev_change_flags(dev, dev->flags | IFF_UP);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
ovs_vport_free(vport);
return ERR_PTR(err);
}
rtnl_unlock();
return vport;
}

View File

@ -140,7 +140,7 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
{
dev->needed_headroom = new_hr;
dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
}
static const struct net_device_ops internal_dev_netdev_ops = {

View File

@ -130,7 +130,14 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
return ERR_CAST(dev);
}
dev_change_flags(dev, dev->flags | IFF_UP);
err = dev_change_flags(dev, dev->flags | IFF_UP);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
ovs_vport_free(vport);
goto error;
}
rtnl_unlock();
return vport;
error:

View File

@ -425,6 +425,7 @@ struct rxrpc_call {
spinlock_t lock;
rwlock_t state_lock; /* lock for state transition */
atomic_t usage;
atomic_t skb_count; /* Outstanding packets on this call */
atomic_t sequence; /* Tx data packet sequence counter */
u32 local_abort; /* local abort code */
u32 remote_abort; /* remote abort code */

View File

@ -130,6 +130,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
call->state = RXRPC_CALL_SERVER_ACCEPTING;
list_add_tail(&call->accept_link, &rx->acceptq);
rxrpc_get_call(call);
atomic_inc(&call->skb_count);
nsp = rxrpc_skb(notification);
nsp->call = call;

View File

@ -460,6 +460,7 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
ASSERTCMP(sp->call, ==, NULL);
sp->call = call;
rxrpc_get_call(call);
atomic_inc(&call->skb_count);
/* insert into the buffer in sequence order */
spin_lock_bh(&call->lock);
@ -734,6 +735,7 @@ all_acked:
skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
sp->call = call;
rxrpc_get_call(call);
atomic_inc(&call->skb_count);
spin_lock_bh(&call->lock);
if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
BUG();
@ -793,6 +795,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
sp->error = error;
sp->call = call;
rxrpc_get_call(call);
atomic_inc(&call->skb_count);
spin_lock_bh(&call->lock);
ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
@ -834,6 +837,9 @@ void rxrpc_process_call(struct work_struct *work)
return;
}
if (!call->conn)
goto skip_msg_init;
/* there's a good chance we're going to have to send a message, so set
* one up in advance */
msg.msg_name = &call->conn->params.peer->srx.transport;
@ -856,6 +862,7 @@ void rxrpc_process_call(struct work_struct *work)
memset(iov, 0, sizeof(iov));
iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr);
skip_msg_init:
/* deal with events of a final nature */
if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {

View File

@ -275,6 +275,7 @@ error:
list_del_init(&call->link);
write_unlock_bh(&rxrpc_call_lock);
set_bit(RXRPC_CALL_RELEASED, &call->flags);
call->state = RXRPC_CALL_DEAD;
rxrpc_put_call(call);
_leave(" = %d", ret);
@ -287,6 +288,7 @@ error:
*/
found_user_ID_now_present:
write_unlock(&rx->call_lock);
set_bit(RXRPC_CALL_RELEASED, &call->flags);
call->state = RXRPC_CALL_DEAD;
rxrpc_put_call(call);
_leave(" = -EEXIST [%p]", call);
@ -491,15 +493,9 @@ void rxrpc_release_call(struct rxrpc_call *call)
spin_lock_bh(&call->lock);
while ((skb = skb_dequeue(&call->rx_queue)) ||
(skb = skb_dequeue(&call->rx_oos_queue))) {
sp = rxrpc_skb(skb);
if (sp->call) {
ASSERTCMP(sp->call, ==, call);
rxrpc_put_call(call);
sp->call = NULL;
}
skb->destructor = NULL;
spin_unlock_bh(&call->lock);
sp = rxrpc_skb(skb);
_debug("- zap %s %%%u #%u",
rxrpc_pkts[sp->hdr.type],
sp->hdr.serial, sp->hdr.seq);
@ -605,6 +601,7 @@ void __rxrpc_put_call(struct rxrpc_call *call)
if (atomic_dec_and_test(&call->usage)) {
_debug("call %d dead", call->debug_id);
WARN_ON(atomic_read(&call->skb_count) != 0);
ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
rxrpc_queue_work(&call->destroyer);
}

Some files were not shown because too many files have changed in this diff Show More