2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-28 12:35:22 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Free AF_PACKET po->rollover properly, from Willem de Bruijn.

 2) Read SFP eeprom in max 16 byte increments to avoid problems with
    some SFP modules, from Russell King.

 3) Fix UDP socket lookup wrt. VRF, from Tim Beale.

 4) Handle route invalidation properly in s390 qeth driver, from Julian
    Wiedmann.

 5) Memory leak on unload in RDS, from Zhu Yanjun.

 6) sctp_process_init leak, from Neil HOrman.

 7) Fix fib_rules rule insertion semantic change that broke Android,
    from Hangbin Liu.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (33 commits)
  pktgen: do not sleep with the thread lock held.
  net: mvpp2: Use strscpy to handle stat strings
  net: rds: fix memory leak in rds_ib_flush_mr_pool
  ipv6: fix EFAULT on sendto with icmpv6 and hdrincl
  ipv6: use READ_ONCE() for inet->hdrincl as in ipv4
  Revert "fib_rules: return 0 directly if an exactly same rule exists when NLM_F_EXCL not supplied"
  net: aquantia: fix wol configuration not applied sometimes
  ethtool: fix potential userspace buffer overflow
  Fix memory leak in sctp_process_init
  net: rds: fix memory leak when unload rds_rdma
  ipv6: fix the check before getting the cookie in rt6_get_cookie
  ipv4: not do cache for local delivery if bc_forwarding is enabled
  s390/qeth: handle error when updating TX queue count
  s390/qeth: fix VLAN attribute in bridge_hostnotify udev event
  s390/qeth: check dst entry before use
  s390/qeth: handle limited IPv4 broadcast in L3 TX path
  net: fix indirect calls helpers for ptype list hooks.
  net: ipvlan: Fix ipvlan device tso disabled while NETIF_F_IP_CSUM is set
  udp: only choose unbound UDP socket for multicast when not in a VRF
  net/tls: replace the sleeping lock around RX resync with a bit lock
  ...
This commit is contained in:
Linus Torvalds 2019-06-07 09:29:14 -07:00
commit 1e1d926369
34 changed files with 218 additions and 131 deletions

View File

@ -17312,7 +17312,7 @@ F: Documentation/ABI/stable/sysfs-hypervisor-xen
F: Documentation/ABI/testing/sysfs-hypervisor-xen
XEN NETWORK BACKEND DRIVER
M: Wei Liu <wei.liu2@citrix.com>
M: Wei Liu <wei.liu@kernel.org>
M: Paul Durrant <paul.durrant@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: netdev@vger.kernel.org

View File

@ -1388,7 +1388,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
int err;
if (!vid)
return -EINVAL;
return -EOPNOTSUPP;
entry->vid = vid - 1;
entry->valid = false;

View File

@ -652,16 +652,6 @@ static int sja1105_speed[] = {
[SJA1105_SPEED_1000MBPS] = 1000,
};
static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps)
{
int i;
for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++)
if (sja1105_speed[i] == speed_mbps)
return i;
return -EINVAL;
}
/* Set link speed and enable/disable traffic I/O in the MAC configuration
* for a specific port.
*
@ -684,8 +674,21 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
speed = sja1105_get_speed_cfg(speed_mbps);
if (speed_mbps && speed < 0) {
switch (speed_mbps) {
case 0:
/* No speed update requested */
speed = SJA1105_SPEED_AUTO;
break;
case 10:
speed = SJA1105_SPEED_10MBPS;
break;
case 100:
speed = SJA1105_SPEED_100MBPS;
break;
case 1000:
speed = SJA1105_SPEED_1000MBPS;
break;
default:
dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
return -EINVAL;
}
@ -695,10 +698,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* and we no longer need to store it in the static config (already told
* hardware we want auto during upload phase).
*/
if (speed_mbps)
mac[port].speed = speed;
else
mac[port].speed = SJA1105_SPEED_AUTO;
mac[port].speed = speed;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
* tables. On E/T, MAC reconfig tables are not readable, only writable.

View File

@ -335,13 +335,13 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
{
u32 val;
int err = 0;
bool is_locked;
is_locked = hw_atl_sem_ram_get(self);
if (!is_locked) {
err = -ETIME;
err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
val, val == 1U,
10U, 100000U);
if (err < 0)
goto err_exit;
}
if (IS_CHIP_FEATURE(REVISION_B1)) {
u32 offset = 0;
@ -353,8 +353,8 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
/* 1000 times by 10us = 10ms */
err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
self, val,
(val & 0xF0000000) ==
0x80000000,
(val & 0xF0000000) !=
0x80000000,
10U, 10000U);
}
} else {

View File

@ -384,7 +384,7 @@ static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
self, val,
val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
1U, 10000U);
1U, 100000U);
err_exit:
return err;
@ -404,6 +404,8 @@ static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
msg = (struct fw2x_msg_wol *)rpc;
memset(msg, 0, sizeof(*msg));
msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
msg->magic_packet_enabled = true;
memcpy(msg->hw_addr, mac, ETH_ALEN);

View File

@ -335,6 +335,7 @@ static int __lb_setup(struct net_device *ndev,
static int __lb_up(struct net_device *ndev,
enum hnae_loop loop_mode)
{
#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int speed, duplex;
@ -361,6 +362,9 @@ static int __lb_up(struct net_device *ndev,
h->dev->ops->adjust_link(h, speed, duplex);
/* wait adjust link done and phy ready */
msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
return 0;
}

View File

@ -1304,8 +1304,8 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
int i;
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
memcpy(data + i * ETH_GSTRING_LEN,
&mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
strscpy(data + i * ETH_GSTRING_LEN,
mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
}
}

View File

@ -1778,6 +1778,7 @@ static void mtk_poll_controller(struct net_device *dev)
static int mtk_start_dma(struct mtk_eth *eth)
{
u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
int err;
err = mtk_dma_init(eth);
@ -1794,7 +1795,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
MTK_QDMA_GLO_CFG);
mtk_w32(eth,
MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
MTK_PDMA_GLO_CFG);
@ -2298,13 +2299,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
if (dev->features & NETIF_F_LRO) {
if (dev->hw_features & NETIF_F_LRO) {
cmd->data = MTK_MAX_RX_RING_NUM;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRLCNT:
if (dev->features & NETIF_F_LRO) {
if (dev->hw_features & NETIF_F_LRO) {
struct mtk_mac *mac = netdev_priv(dev);
cmd->rule_cnt = mac->hwlro_ip_cnt;
@ -2312,11 +2313,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
}
break;
case ETHTOOL_GRXCLSRULE:
if (dev->features & NETIF_F_LRO)
if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_entry(dev, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
if (dev->features & NETIF_F_LRO)
if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_all(dev, cmd,
rule_locs);
break;
@ -2333,11 +2334,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
if (dev->features & NETIF_F_LRO)
if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_add_ipaddr(dev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
if (dev->features & NETIF_F_LRO)
if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_del_ipaddr(dev, cmd);
break;
default:

View File

@ -643,7 +643,7 @@ void cpsw_get_ringparam(struct net_device *ndev,
struct cpsw_common *cpsw = priv->cpsw;
/* not supported */
ering->tx_max_pending = 0;
ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);

View File

@ -107,7 +107,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
}
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
(NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)

View File

@ -1073,6 +1073,7 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
int phylink_ethtool_ksettings_set(struct phylink *pl,
const struct ethtool_link_ksettings *kset)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct ethtool_link_ksettings our_kset;
struct phylink_link_state config;
int ret;
@ -1083,11 +1084,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
kset->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
linkmode_copy(support, pl->supported);
config = pl->link_config;
/* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
pl->supported);
support);
/* FIXME: should we reject autoneg if phy/mac does not support it? */
if (kset->base.autoneg == AUTONEG_DISABLE) {
@ -1097,7 +1099,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* duplex.
*/
s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
pl->supported, false);
support, false);
if (!s)
return -EINVAL;
@ -1126,7 +1128,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
}
if (phylink_validate(pl, pl->supported, &config))
if (phylink_validate(pl, support, &config))
return -EINVAL;
/* If autonegotiation is enabled, we must have an advertisement */
@ -1576,6 +1578,7 @@ static int phylink_sfp_module_insert(void *upstream,
{
struct phylink *pl = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
struct phylink_link_state config;
phy_interface_t iface;
int ret = 0;
@ -1603,6 +1606,8 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
linkmode_copy(support1, support);
iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
if (iface == PHY_INTERFACE_MODE_NA) {
netdev_err(pl->netdev,
@ -1612,7 +1617,7 @@ static int phylink_sfp_module_insert(void *upstream,
}
config.interface = iface;
ret = phylink_validate(pl, support, &config);
ret = phylink_validate(pl, support1, &config);
if (ret) {
netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
phylink_an_mode_str(MLO_AN_INBAND),

View File

@ -281,6 +281,7 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
{
struct i2c_msg msgs[2];
u8 bus_addr = a2 ? 0x51 : 0x50;
size_t this_len;
int ret;
msgs[0].addr = bus_addr;
@ -292,11 +293,26 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
msgs[1].len = len;
msgs[1].buf = buf;
ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
if (ret < 0)
return ret;
while (len) {
this_len = len;
if (this_len > 16)
this_len = 16;
return ret == ARRAY_SIZE(msgs) ? len : 0;
msgs[1].len = this_len;
ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
break;
msgs[1].buf += this_len;
dev_addr += this_len;
len -= this_len;
}
return msgs[1].buf - (u8 *)buf;
}
static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,

View File

@ -1274,16 +1274,20 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
return 0;
}
static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int count = single ? 1 : card->dev->num_tx_queues;
int rc;
rtnl_lock();
netif_set_real_num_tx_queues(card->dev, count);
rc = netif_set_real_num_tx_queues(card->dev, count);
rtnl_unlock();
if (rc)
return rc;
if (card->qdio.no_out_queues == count)
return;
return 0;
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);
@ -1293,12 +1297,14 @@ static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
card->qdio.no_out_queues = count;
return 0;
}
static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "chp_desc");
@ -1311,12 +1317,12 @@ static int qeth_update_from_chp_desc(struct qeth_card *card)
if (IS_OSD(card) || IS_OSX(card))
/* CHPP field bit 6 == 1 -> single queue */
qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
kfree(chp_dsc);
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
return 0;
return rc;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@ -5597,8 +5603,12 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
if (IS_IQD(card)) {
netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ);
dev->features |= NETIF_F_SG;
if (netif_set_real_num_tx_queues(dev,
QETH_IQD_MIN_TXQ)) {
free_netdev(dev);
return NULL;
}
}
}

View File

@ -1680,7 +1680,7 @@ static void qeth_bridgeport_an_set_cb(void *priv,
l2entry = (struct qdio_brinfo_entry_l2 *)entry;
code = IPA_ADDR_CHANGE_CODE_MACADDR;
if (l2entry->addr_lnid.lnid)
if (l2entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
(struct net_if_token *)&l2entry->nit,

View File

@ -1888,13 +1888,20 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int qeth_l3_get_cast_type(struct sk_buff *skb)
{
int ipv = qeth_get_ip_version(skb);
struct neighbour *n = NULL;
struct dst_entry *dst;
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
n = dst_neigh_lookup_skb(dst, skb);
if (dst) {
struct rt6_info *rt = (struct rt6_info *) dst;
dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
if (dst)
n = dst_neigh_lookup_skb(dst, skb);
}
if (n) {
int cast_type = n->type;
@ -1909,8 +1916,10 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
rcu_read_unlock();
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
switch (qeth_get_ip_version(skb)) {
switch (ipv) {
case 4:
if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
return RTN_BROADCAST;
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
case 6:
@ -1940,6 +1949,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
struct qeth_card *card = queue->card;
struct dst_entry *dst;
hdr->hdr.l3.length = data_len;
@ -1985,15 +1995,27 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
}
rcu_read_lock();
dst = skb_dst(skb);
if (ipv == 4) {
struct rtable *rt = skb_rtable(skb);
struct rtable *rt;
if (dst)
dst = dst_check(dst, 0);
rt = (struct rtable *) dst;
*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
rt_nexthop(rt, ip_hdr(skb)->daddr) :
ip_hdr(skb)->daddr;
} else {
/* IPv6 */
const struct rt6_info *rt = skb_rt6_info(skb);
struct rt6_info *rt;
if (dst) {
rt = (struct rt6_info *) dst;
dst = dst_check(dst, rt6_get_cookie(rt));
}
rt = (struct rt6_info *) dst;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;

View File

@ -20,18 +20,6 @@
#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull
#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull
enum sja1105_frame_type {
SJA1105_FRAME_TYPE_NORMAL = 0,
SJA1105_FRAME_TYPE_LINK_LOCAL,
};
struct sja1105_skb_cb {
enum sja1105_frame_type type;
};
#define SJA1105_SKB_CB(skb) \
((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
struct sja1105_port {
struct dsa_port *dp;
int mgmt_slot;

View File

@ -259,8 +259,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
rcu_read_lock();
from = rcu_dereference(rt->from);
if (from && (rt->rt6i_flags & RTF_PCPU ||
unlikely(!list_empty(&rt->rt6i_uncached))))
if (from)
fib6_get_cookie_safe(from, &cookie);
rcu_read_unlock();

View File

@ -209,6 +209,10 @@ struct tls_offload_context_tx {
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
enum tls_context_flags {
TLS_RX_SYNC_RUNNING = 0,
};
struct cipher_context {
char *iv;
char *rec_seq;

View File

@ -5021,12 +5021,12 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
if (list_empty(head))
return;
if (pt_prev->list_func != NULL)
pt_prev->list_func(head, pt_prev, orig_dev);
INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
ip_list_rcv, head, pt_prev, orig_dev);
else
list_for_each_entry_safe(skb, next, head, list) {
skb_list_del_init(skb);
INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
skb->dev, pt_prev, orig_dev);
pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
}

View File

@ -1355,13 +1355,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (!regbuf)
return -ENOMEM;
if (regs.len < reglen)
reglen = regs.len;
ops->get_regs(dev, &regs, regbuf);
ret = -EFAULT;
if (copy_to_user(useraddr, &regs, sizeof(regs)))
goto out;
useraddr += offsetof(struct ethtool_regs, data);
if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
if (copy_to_user(useraddr, regbuf, reglen))
goto out;
ret = 0;

View File

@ -757,9 +757,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
goto errout;
if (rule_exists(ops, frh, tb, rule)) {
if (nlh->nlmsg_flags & NLM_F_EXCL)
err = -EEXIST;
if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
rule_exists(ops, frh, tb, rule)) {
err = -EEXIST;
goto errout_free;
}

View File

@ -3059,7 +3059,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
{
while (thread_is_running(t)) {
/* note: 't' will still be around even after the unlock/lock
* cycle because pktgen_thread threads are only cleared at
* net exit
*/
mutex_unlock(&pktgen_thread_lock);
msleep_interruptible(100);
mutex_lock(&pktgen_thread_lock);
if (signal_pending(current))
goto signal;
@ -3074,6 +3080,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
struct pktgen_thread *t;
int sig = 1;
/* prevent from racing with rmmod */
if (!try_module_get(THIS_MODULE))
return sig;
mutex_lock(&pktgen_thread_lock);
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
@ -3087,6 +3097,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
t->control |= (T_STOP);
mutex_unlock(&pktgen_thread_lock);
module_put(THIS_MODULE);
return sig;
}

View File

@ -28,14 +28,10 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb)
*/
static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
{
if (sja1105_is_link_local(skb)) {
SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_LINK_LOCAL;
if (sja1105_is_link_local(skb))
return true;
}
if (!dsa_port_is_vlan_filtering(dev->dsa_ptr)) {
SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_NORMAL;
if (!dsa_port_is_vlan_filtering(dev->dsa_ptr))
return true;
}
return false;
}
@ -84,7 +80,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
skb->offload_fwd_mark = 1;
if (SJA1105_SKB_CB(skb)->type == SJA1105_FRAME_TYPE_LINK_LOCAL) {
if (sja1105_is_link_local(skb)) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
* the incl_srcpt options.

View File

@ -1981,7 +1981,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u32 itag = 0;
struct rtable *rth;
struct flowi4 fl4;
bool do_cache;
bool do_cache = true;
/* IP on this device is disabled. */
@ -2058,6 +2058,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (res->type == RTN_BROADCAST) {
if (IN_DEV_BFORWARD(in_dev))
goto make_route;
/* not do cache if bc_forwarding is enabled */
if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
do_cache = false;
goto brd_input;
}
@ -2095,18 +2098,15 @@ brd_input:
RT_CACHE_STAT_INC(in_brd);
local_input:
do_cache = false;
if (res->fi) {
if (!itag) {
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
do_cache &= res->fi && !itag;
if (do_cache) {
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
rth = rcu_dereference(nhc->nhc_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
err = 0;
goto out;
}
do_cache = true;
rth = rcu_dereference(nhc->nhc_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
err = 0;
goto out;
}
}

View File

@ -533,8 +533,7 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
(inet->inet_dport != rmt_port && inet->inet_dport) ||
(inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
ipv6_only_sock(sk) ||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
sk->sk_bound_dev_if != sdif))
!udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
return false;
if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
return false;

View File

@ -779,6 +779,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct flowi6 fl6;
struct ipcm6_cookie ipc6;
int addr_len = msg->msg_namelen;
int hdrincl;
u16 proto;
int err;
@ -792,6 +793,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/* hdrincl should be READ_ONCE(inet->hdrincl)
* but READ_ONCE() doesn't work with bit fields.
* Doing this indirectly yields the same result.
*/
hdrincl = inet->hdrincl;
hdrincl = READ_ONCE(hdrincl);
/*
* Get and verify the address.
*/
@ -883,11 +891,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
rfv.msg = msg;
rfv.hlen = 0;
err = rawv6_probe_proto_opt(&rfv, &fl6);
if (err)
goto out;
if (!hdrincl) {
rfv.msg = msg;
rfv.hlen = 0;
err = rawv6_probe_proto_opt(&rfv, &fl6);
if (err)
goto out;
}
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
@ -904,7 +915,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
if (inet->hdrincl)
if (hdrincl)
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
if (ipc6.tclass < 0)
@ -927,7 +938,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
if (hdrincl)
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
msg->msg_flags, &ipc6.sockc);
else {

View File

@ -3008,8 +3008,8 @@ static int packet_release(struct socket *sock)
synchronize_net();
kfree(po->rollover);
if (f) {
kfree(po->rollover);
fanout_release_data(f);
kfree(f);
}

View File

@ -87,7 +87,7 @@ static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
spin_lock_irqsave(&rds_ibdev->spinlock, flags);
list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
rds_conn_drop(ic->conn);
rds_conn_path_drop(&ic->conn->c_path[0], true);
spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
}

View File

@ -428,12 +428,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
wait_clean_list_grace();
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
if (ibmr_ret)
if (ibmr_ret) {
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
clean_nodes = clean_nodes->next;
}
/* more than one entry in llist nodes */
if (clean_nodes->next)
llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
if (clean_nodes)
llist_add_batch(clean_nodes, clean_tail,
&pool->clean_list);
}

View File

@ -168,6 +168,7 @@ void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
list_del(&inc->ii_cache_entry);
WARN_ON(!list_empty(&inc->ii_frags));
kmem_cache_free(rds_ib_incoming_slab, inc);
atomic_dec(&rds_ib_allocation);
}
rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
@ -1057,6 +1058,8 @@ out:
void rds_ib_recv_exit(void)
{
WARN_ON(atomic_read(&rds_ib_allocation));
kmem_cache_destroy(rds_ib_incoming_slab);
kmem_cache_destroy(rds_ib_frag_slab);
}

View File

@ -2312,7 +2312,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
union sctp_addr addr;
struct sctp_af *af;
int src_match = 0;
char *cookie;
/* We must include the address that the INIT packet came from.
* This is the only address that matters for an INIT packet.
@ -2416,14 +2415,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
/* Peer Rwnd : Current calculated value of the peer's rwnd. */
asoc->peer.rwnd = asoc->peer.i.a_rwnd;
/* Copy cookie in case we need to resend COOKIE-ECHO. */
cookie = asoc->peer.cookie;
if (cookie) {
asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
if (!asoc->peer.cookie)
goto clean_up;
}
/* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
* high (for example, implementations MAY use the size of the receiver
* advertised window).
@ -2592,7 +2583,9 @@ do_addr_param:
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
asoc->peer.cookie = param.cookie->body;
asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
if (!asoc->peer.cookie)
retval = 0;
break;
case SCTP_PARAM_HEARTBEAT_INFO:

View File

@ -883,6 +883,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
asoc->rto_initial;
}
if (sctp_state(asoc, ESTABLISHED)) {
kfree(asoc->peer.cookie);
asoc->peer.cookie = NULL;
}
if (sctp_state(asoc, ESTABLISHED) ||
sctp_state(asoc, CLOSED) ||
sctp_state(asoc, SHUTDOWN_RECEIVED)) {

View File

@ -550,11 +550,23 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
}
}
static void tls_device_resync_rx(struct tls_context *tls_ctx,
struct sock *sk, u32 seq, u64 rcd_sn)
{
struct net_device *netdev;
if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
return;
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
}
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
struct net_device *netdev;
u32 is_req_pending;
s64 resync_req;
u32 req_seq;
@ -570,12 +582,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
if (unlikely(is_req_pending) && req_seq == seq &&
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
seq += TLS_HEADER_SIZE - 1;
down_read(&device_offload_lock);
netdev = tls_ctx->netdev;
if (netdev)
netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq,
rcd_sn);
up_read(&device_offload_lock);
tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
}
}
@ -977,7 +984,10 @@ static int tls_device_down(struct net_device *netdev)
if (ctx->rx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
ctx->netdev = NULL;
WRITE_ONCE(ctx->netdev, NULL);
smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
usleep_range(10, 200);
dev_put(netdev);
list_del_init(&ctx->list);

View File

@ -145,16 +145,19 @@ bc_forwarding_disable()
{
sysctl_set net.ipv4.conf.all.bc_forwarding 0
sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0
sysctl_set net.ipv4.conf.$rp2.bc_forwarding 0
}
bc_forwarding_enable()
{
sysctl_set net.ipv4.conf.all.bc_forwarding 1
sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1
sysctl_set net.ipv4.conf.$rp2.bc_forwarding 1
}
bc_forwarding_restore()
{
sysctl_restore net.ipv4.conf.$rp2.bc_forwarding
sysctl_restore net.ipv4.conf.$rp1.bc_forwarding
sysctl_restore net.ipv4.conf.all.bc_forwarding
}
@ -171,7 +174,7 @@ ping_test_from()
log_info "ping $dip, expected reply from $from"
ip vrf exec $(master_name_get $oif) \
$PING -I $oif $dip -c 10 -i 0.1 -w $PING_TIMEOUT -b 2>&1 \
| grep $from &> /dev/null
| grep "bytes from $from" > /dev/null
check_err_fail $fail $?
}