mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/bonding/bond_main.c drivers/net/ethernet/mellanox/mlxsw/spectrum.h drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c The bond_main.c and mellanox switch conflicts were cases of overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9d367eddf3
@ -37,7 +37,7 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for IBM Active 2000 ISDN card");
|
||||
MODULE_AUTHOR("Fritz Elfert");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_PARM_DESC(act_bus, "BusType of first card, 1=ISA, 2=MCA, 3=PCMCIA, currently only ISA");
|
||||
MODULE_PARM_DESC(membase, "Base port address of first card");
|
||||
MODULE_PARM_DESC(act_port, "Base port address of first card");
|
||||
MODULE_PARM_DESC(act_irq, "IRQ of first card");
|
||||
MODULE_PARM_DESC(act_id, "ID-String of first card");
|
||||
module_param(act_bus, int, 0);
|
||||
|
@ -1226,7 +1226,6 @@ static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave)
|
||||
&lag_upper_info);
|
||||
if (err)
|
||||
return err;
|
||||
slave->dev->flags |= IFF_SLAVE;
|
||||
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
|
||||
return 0;
|
||||
}
|
||||
@ -1493,6 +1492,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
||||
}
|
||||
}
|
||||
|
||||
/* set slave flag before open to prevent IPv6 addrconf */
|
||||
slave_dev->flags |= IFF_SLAVE;
|
||||
|
||||
/* open the slave since the application closed it */
|
||||
res = dev_open(slave_dev);
|
||||
if (res) {
|
||||
@ -1758,6 +1760,7 @@ err_close:
|
||||
dev_close(slave_dev);
|
||||
|
||||
err_restore_mac:
|
||||
slave_dev->flags &= ~IFF_SLAVE;
|
||||
if (!bond->params.fail_over_mac ||
|
||||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
|
||||
/* XXX TODO - fom follow mode needs to change master's
|
||||
|
@ -120,6 +120,7 @@ struct mlxsw_sp {
|
||||
} fdb_notify;
|
||||
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
|
||||
u32 ageing_time;
|
||||
struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */
|
||||
struct mlxsw_sp_upper master_bridge;
|
||||
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
|
||||
};
|
||||
|
@ -1057,6 +1057,7 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
if (!sfd_pl)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
|
||||
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
|
||||
u16 tmp;
|
||||
|
||||
@ -1122,6 +1123,7 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
} while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
|
||||
|
||||
out:
|
||||
mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
|
||||
kfree(sfd_pl);
|
||||
return stored_err ? stored_err : err;
|
||||
}
|
||||
@ -1371,6 +1373,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
|
||||
|
||||
mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
|
||||
|
||||
mutex_lock(&mlxsw_sp->fdb_lock);
|
||||
do {
|
||||
mlxsw_reg_sfn_pack(sfn_pl);
|
||||
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
|
||||
@ -1383,6 +1386,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
|
||||
mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
|
||||
|
||||
} while (num_rec);
|
||||
mutex_unlock(&mlxsw_sp->fdb_lock);
|
||||
|
||||
kfree(sfn_pl);
|
||||
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
|
||||
@ -1397,6 +1401,7 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
|
||||
dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
|
||||
return err;
|
||||
}
|
||||
mutex_init(&mlxsw_sp->fdb_lock);
|
||||
INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
|
||||
mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
|
||||
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
|
||||
|
@ -343,16 +343,13 @@ error:
|
||||
static void ravb_emac_init(struct net_device *ndev)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
u32 ecmr;
|
||||
|
||||
/* Receive frame limit set register */
|
||||
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
|
||||
|
||||
/* PAUSE prohibition */
|
||||
ecmr = ravb_read(ndev, ECMR);
|
||||
ecmr &= ECMR_DM;
|
||||
ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
|
||||
ravb_write(ndev, ecmr, ECMR);
|
||||
ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
|
||||
ECMR_TE | ECMR_RE, ECMR);
|
||||
|
||||
ravb_set_rate(ndev);
|
||||
|
||||
|
@ -1240,7 +1240,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
|
||||
{
|
||||
int ret = 0;
|
||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||
u32 val;
|
||||
|
||||
/* Soft Reset */
|
||||
ret = sh_eth_reset(ndev);
|
||||
@ -1293,10 +1292,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
|
||||
}
|
||||
|
||||
/* PAUSE Prohibition */
|
||||
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
|
||||
ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
|
||||
|
||||
sh_eth_write(ndev, val, ECMR);
|
||||
sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
|
||||
ECMR_TE | ECMR_RE, ECMR);
|
||||
|
||||
if (mdp->cd->set_rate)
|
||||
mdp->cd->set_rate(ndev);
|
||||
|
@ -388,7 +388,7 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
|
||||
int err;
|
||||
|
||||
if (sa_family == AF_INET) {
|
||||
err = udp_add_offload(&gs->udp_offloads);
|
||||
err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
|
||||
if (err)
|
||||
pr_warn("geneve: udp_add_offload failed with status %d\n",
|
||||
err);
|
||||
|
@ -130,16 +130,6 @@ static int toim3232delay = 150; /* default is 150 ms */
|
||||
module_param(toim3232delay, int, 0);
|
||||
MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay");
|
||||
|
||||
#if 0
|
||||
static int toim3232flipdtr = 0; /* default is DTR high to reset */
|
||||
module_param(toim3232flipdtr, int, 0);
|
||||
MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)");
|
||||
|
||||
static int toim3232fliprts = 0; /* default is RTS high for baud change */
|
||||
module_param(toim3232fliptrs, int, 0);
|
||||
MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)");
|
||||
#endif
|
||||
|
||||
static int toim3232_open(struct sir_dev *);
|
||||
static int toim3232_close(struct sir_dev *);
|
||||
static int toim3232_change_speed(struct sir_dev *, unsigned);
|
||||
|
@ -483,9 +483,17 @@ static int ksz9031_config_init(struct phy_device *phydev)
|
||||
"txd2-skew-ps", "txd3-skew-ps"
|
||||
};
|
||||
static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
|
||||
const struct device *dev_walker;
|
||||
|
||||
if (!of_node && dev->parent->of_node)
|
||||
of_node = dev->parent->of_node;
|
||||
/* The Micrel driver has a deprecated option to place phy OF
|
||||
* properties in the MAC node. Walk up the tree of devices to
|
||||
* find a device with an OF node.
|
||||
*/
|
||||
dev_walker = &phydev->mdio.dev;
|
||||
do {
|
||||
of_node = dev_walker->of_node;
|
||||
dev_walker = dev_walker->parent;
|
||||
} while (!of_node && dev_walker);
|
||||
|
||||
if (of_node) {
|
||||
ksz9031_of_load_skew_values(phydev, of_node,
|
||||
|
@ -160,6 +160,12 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
info->u = header.usb_cdc_union_desc;
|
||||
info->header = header.usb_cdc_header_desc;
|
||||
info->ether = header.usb_cdc_ether_desc;
|
||||
if (!info->u) {
|
||||
if (rndis)
|
||||
goto skip;
|
||||
else /* in that case a quirk is mandatory */
|
||||
goto bad_desc;
|
||||
}
|
||||
/* we need a master/control interface (what we're
|
||||
* probed with) and a slave/data interface; union
|
||||
* descriptors sort this all out.
|
||||
@ -256,7 +262,7 @@ skip:
|
||||
goto bad_desc;
|
||||
}
|
||||
|
||||
} else if (!info->header || !info->u || (!rndis && !info->ether)) {
|
||||
} else if (!info->header || (!rndis && !info->ether)) {
|
||||
dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
|
||||
info->header ? "" : "header ",
|
||||
info->u ? "" : "union ",
|
||||
|
@ -603,6 +603,59 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
|
||||
u32 length, u8 *data)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
u32 buf;
|
||||
unsigned long timeout;
|
||||
|
||||
ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
|
||||
|
||||
if (buf & OTP_PWR_DN_PWRDN_N_) {
|
||||
/* clear it and wait to be cleared */
|
||||
ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
|
||||
|
||||
timeout = jiffies + HZ;
|
||||
do {
|
||||
udelay(1);
|
||||
ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
|
||||
if (time_after(jiffies, timeout)) {
|
||||
netdev_warn(dev->net,
|
||||
"timeout on OTP_PWR_DN completion");
|
||||
return -EIO;
|
||||
}
|
||||
} while (buf & OTP_PWR_DN_PWRDN_N_);
|
||||
}
|
||||
|
||||
/* set to BYTE program mode */
|
||||
ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
|
||||
|
||||
for (i = 0; i < length; i++) {
|
||||
ret = lan78xx_write_reg(dev, OTP_ADDR1,
|
||||
((offset + i) >> 8) & OTP_ADDR1_15_11);
|
||||
ret = lan78xx_write_reg(dev, OTP_ADDR2,
|
||||
((offset + i) & OTP_ADDR2_10_3));
|
||||
ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
|
||||
ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
|
||||
ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
|
||||
|
||||
timeout = jiffies + HZ;
|
||||
do {
|
||||
udelay(1);
|
||||
ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
|
||||
if (time_after(jiffies, timeout)) {
|
||||
netdev_warn(dev->net,
|
||||
"Timeout on OTP_STATUS completion");
|
||||
return -EIO;
|
||||
}
|
||||
} while (buf & OTP_STATUS_BUSY_);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
|
||||
u32 length, u8 *data)
|
||||
{
|
||||
@ -969,7 +1022,7 @@ static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
|
||||
(ee->offset == 0) &&
|
||||
(ee->len == 512) &&
|
||||
(data[0] == OTP_INDICATOR_1))
|
||||
return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
|
||||
return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -886,6 +886,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
|
||||
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
@ -25,12 +25,13 @@
|
||||
#include <uapi/linux/mdio.h>
|
||||
#include <linux/mdio.h>
|
||||
#include <linux/usb/cdc.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
/* Information for net-next */
|
||||
#define NETNEXT_VERSION "08"
|
||||
|
||||
/* Information for net */
|
||||
#define NET_VERSION "2"
|
||||
#define NET_VERSION "3"
|
||||
|
||||
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
|
||||
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
|
||||
@ -604,6 +605,9 @@ struct r8152 {
|
||||
struct delayed_work schedule;
|
||||
struct mii_if_info mii;
|
||||
struct mutex control; /* use for hw setting */
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
struct notifier_block pm_notifier;
|
||||
#endif
|
||||
|
||||
struct rtl_ops {
|
||||
void (*init)(struct r8152 *);
|
||||
@ -3036,6 +3040,33 @@ out1:
|
||||
usb_autopm_put_interface(tp->intf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int rtl_notifier(struct notifier_block *nb, unsigned long action,
|
||||
void *data)
|
||||
{
|
||||
struct r8152 *tp = container_of(nb, struct r8152, pm_notifier);
|
||||
|
||||
switch (action) {
|
||||
case PM_HIBERNATION_PREPARE:
|
||||
case PM_SUSPEND_PREPARE:
|
||||
usb_autopm_get_interface(tp->intf);
|
||||
break;
|
||||
|
||||
case PM_POST_HIBERNATION:
|
||||
case PM_POST_SUSPEND:
|
||||
usb_autopm_put_interface(tp->intf);
|
||||
break;
|
||||
|
||||
case PM_POST_RESTORE:
|
||||
case PM_RESTORE_PREPARE:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int rtl8152_open(struct net_device *netdev)
|
||||
{
|
||||
struct r8152 *tp = netdev_priv(netdev);
|
||||
@ -3078,6 +3109,10 @@ static int rtl8152_open(struct net_device *netdev)
|
||||
mutex_unlock(&tp->control);
|
||||
|
||||
usb_autopm_put_interface(tp->intf);
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
tp->pm_notifier.notifier_call = rtl_notifier;
|
||||
register_pm_notifier(&tp->pm_notifier);
|
||||
#endif
|
||||
|
||||
out:
|
||||
return res;
|
||||
@ -3088,6 +3123,9 @@ static int rtl8152_close(struct net_device *netdev)
|
||||
struct r8152 *tp = netdev_priv(netdev);
|
||||
int res = 0;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
unregister_pm_notifier(&tp->pm_notifier);
|
||||
#endif
|
||||
napi_disable(&tp->napi);
|
||||
clear_bit(WORK_ENABLE, &tp->flags);
|
||||
usb_kill_urb(tp->intr_urb);
|
||||
|
@ -621,7 +621,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
|
||||
int err;
|
||||
|
||||
if (sa_family == AF_INET) {
|
||||
err = udp_add_offload(&vs->udp_offloads);
|
||||
err = udp_add_offload(net, &vs->udp_offloads);
|
||||
if (err)
|
||||
pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
|
||||
}
|
||||
@ -2750,7 +2750,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
||||
struct vxlan_config *conf)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
|
||||
struct vxlan_rdst *dst = &vxlan->default_dst;
|
||||
unsigned short needed_headroom = ETH_HLEN;
|
||||
int err;
|
||||
@ -2816,9 +2816,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
||||
if (!vxlan->cfg.age_interval)
|
||||
vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
|
||||
|
||||
if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
|
||||
vxlan->cfg.dst_port, vxlan->flags))
|
||||
list_for_each_entry(tmp, &vn->vxlan_list, next) {
|
||||
if (tmp->cfg.vni == conf->vni &&
|
||||
(tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
|
||||
tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
|
||||
tmp->cfg.dst_port == vxlan->cfg.dst_port &&
|
||||
(tmp->flags & VXLAN_F_RCV_FLAGS) ==
|
||||
(vxlan->flags & VXLAN_F_RCV_FLAGS))
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
dev->ethtool_ops = &vxlan_ethtool_ops;
|
||||
|
||||
|
@ -830,6 +830,7 @@ struct user_struct {
|
||||
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
|
||||
#endif
|
||||
unsigned long locked_shm; /* How many pages of mlocked shm ? */
|
||||
unsigned long unix_inflight; /* How many files in flight in unix sockets */
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
struct key *uid_keyring; /* UID specific keyring */
|
||||
|
@ -107,7 +107,7 @@ int inet_del_offload(const struct net_offload *prot, unsigned char num);
|
||||
void inet_register_protosw(struct inet_protosw *p);
|
||||
void inet_unregister_protosw(struct inet_protosw *p);
|
||||
|
||||
int udp_add_offload(struct udp_offload *prot);
|
||||
int udp_add_offload(struct net *net, struct udp_offload *prot);
|
||||
void udp_del_offload(struct udp_offload *prot);
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
@ -185,7 +185,8 @@ unlock:
|
||||
static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
|
||||
int max_if_num, int del_if_num)
|
||||
{
|
||||
int chunk_size, ret = -ENOMEM, if_offset;
|
||||
int ret = -ENOMEM;
|
||||
size_t chunk_size, if_offset;
|
||||
void *data_ptr = NULL;
|
||||
|
||||
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
|
||||
@ -203,8 +204,9 @@ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
|
||||
memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
|
||||
|
||||
/* copy second part */
|
||||
if_offset = (del_if_num + 1) * chunk_size;
|
||||
memcpy((char *)data_ptr + del_if_num * chunk_size,
|
||||
orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
|
||||
(uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
|
||||
(max_if_num - del_if_num) * chunk_size);
|
||||
|
||||
free_bcast_own:
|
||||
|
@ -2787,7 +2787,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
|
||||
} else {
|
||||
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
|
||||
}
|
||||
skb_reserve(skb, LL_RESERVED_SPACE(dev));
|
||||
|
||||
if (likely(skb))
|
||||
skb_reserve(skb, LL_RESERVED_SPACE(dev));
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
@ -498,7 +498,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
|
||||
sk->sk_allocation = GFP_ATOMIC;
|
||||
|
||||
if (cfg->udp_config.family == AF_INET) {
|
||||
err = udp_add_offload(&fou->udp_offloads);
|
||||
err = udp_add_offload(net, &fou->udp_offloads);
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
|
@ -920,7 +920,7 @@ static int __ip_append_data(struct sock *sk,
|
||||
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
(sk->sk_type == SOCK_DGRAM)) {
|
||||
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
|
||||
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen, transhdrlen,
|
||||
maxfraglen, flags);
|
||||
|
@ -219,7 +219,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
|
||||
yeah->fast_count = 0;
|
||||
yeah->reno_count = max(yeah->reno_count>>1, 2U);
|
||||
|
||||
return tp->snd_cwnd - reduction;
|
||||
return max_t(int, tp->snd_cwnd - reduction, 2);
|
||||
}
|
||||
|
||||
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
|
||||
|
@ -21,6 +21,7 @@ static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
|
||||
|
||||
struct udp_offload_priv {
|
||||
struct udp_offload *offload;
|
||||
possible_net_t net;
|
||||
struct rcu_head rcu;
|
||||
struct udp_offload_priv __rcu *next;
|
||||
};
|
||||
@ -242,13 +243,14 @@ out:
|
||||
return segs;
|
||||
}
|
||||
|
||||
int udp_add_offload(struct udp_offload *uo)
|
||||
int udp_add_offload(struct net *net, struct udp_offload *uo)
|
||||
{
|
||||
struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
|
||||
|
||||
if (!new_offload)
|
||||
return -ENOMEM;
|
||||
|
||||
write_pnet(&new_offload->net, net);
|
||||
new_offload->offload = uo;
|
||||
|
||||
spin_lock(&udp_offload_lock);
|
||||
@ -312,7 +314,8 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
|
||||
rcu_read_lock();
|
||||
uo_priv = rcu_dereference(udp_offload_base);
|
||||
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
|
||||
if (uo_priv->offload->port == uh->dest &&
|
||||
if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
|
||||
uo_priv->offload->port == uh->dest &&
|
||||
uo_priv->offload->callbacks.gro_receive)
|
||||
goto unflush;
|
||||
}
|
||||
@ -390,7 +393,8 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
|
||||
uo_priv = rcu_dereference(udp_offload_base);
|
||||
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
|
||||
if (uo_priv->offload->port == uh->dest &&
|
||||
if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
|
||||
uo_priv->offload->port == uh->dest &&
|
||||
uo_priv->offload->callbacks.gro_complete)
|
||||
break;
|
||||
}
|
||||
|
@ -1353,7 +1353,7 @@ emsgsize:
|
||||
(skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) &&
|
||||
(sk->sk_type == SOCK_DGRAM)) {
|
||||
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
|
||||
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen,
|
||||
transhdrlen, mtu, flags, fl6);
|
||||
|
@ -462,8 +462,10 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
||||
if (np->repflow && ireq->pktopts)
|
||||
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
|
||||
|
||||
rcu_read_lock();
|
||||
err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
|
||||
np->tclass);
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
||||
|
@ -252,23 +252,28 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
||||
fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
|
||||
mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
|
||||
sizeof(key->eth.src));
|
||||
|
||||
fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
|
||||
&mask->basic.n_proto, TCA_FLOWER_UNSPEC,
|
||||
sizeof(key->basic.n_proto));
|
||||
|
||||
if (key->basic.n_proto == htons(ETH_P_IP) ||
|
||||
key->basic.n_proto == htons(ETH_P_IPV6)) {
|
||||
fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
|
||||
&mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
|
||||
sizeof(key->basic.ip_proto));
|
||||
}
|
||||
if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||
|
||||
if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
|
||||
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
||||
fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
|
||||
&mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
|
||||
sizeof(key->ipv4.src));
|
||||
fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
|
||||
&mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
|
||||
sizeof(key->ipv4.dst));
|
||||
} else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
|
||||
} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
|
||||
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
||||
fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
|
||||
&mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
|
||||
sizeof(key->ipv6.src));
|
||||
@ -276,6 +281,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
||||
&mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
|
||||
sizeof(key->ipv6.dst));
|
||||
}
|
||||
|
||||
if (key->basic.ip_proto == IPPROTO_TCP) {
|
||||
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
|
||||
&mask->tp.src, TCA_FLOWER_UNSPEC,
|
||||
|
@ -63,7 +63,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
|
||||
static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
|
||||
sctp_state_t state,
|
||||
struct sctp_endpoint *ep,
|
||||
struct sctp_association *asoc,
|
||||
struct sctp_association **asoc,
|
||||
void *event_arg,
|
||||
sctp_disposition_t status,
|
||||
sctp_cmd_seq_t *commands,
|
||||
@ -1125,7 +1125,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
|
||||
debug_post_sfn();
|
||||
|
||||
error = sctp_side_effects(event_type, subtype, state,
|
||||
ep, asoc, event_arg, status,
|
||||
ep, &asoc, event_arg, status,
|
||||
&commands, gfp);
|
||||
debug_post_sfx();
|
||||
|
||||
@ -1138,7 +1138,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
|
||||
static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
|
||||
sctp_state_t state,
|
||||
struct sctp_endpoint *ep,
|
||||
struct sctp_association *asoc,
|
||||
struct sctp_association **asoc,
|
||||
void *event_arg,
|
||||
sctp_disposition_t status,
|
||||
sctp_cmd_seq_t *commands,
|
||||
@ -1153,7 +1153,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
|
||||
* disposition SCTP_DISPOSITION_CONSUME.
|
||||
*/
|
||||
if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
|
||||
ep, asoc,
|
||||
ep, *asoc,
|
||||
event_arg, status,
|
||||
commands, gfp)))
|
||||
goto bail;
|
||||
@ -1176,11 +1176,12 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
|
||||
break;
|
||||
|
||||
case SCTP_DISPOSITION_DELETE_TCB:
|
||||
case SCTP_DISPOSITION_ABORT:
|
||||
/* This should now be a command. */
|
||||
*asoc = NULL;
|
||||
break;
|
||||
|
||||
case SCTP_DISPOSITION_CONSUME:
|
||||
case SCTP_DISPOSITION_ABORT:
|
||||
/*
|
||||
* We should no longer have much work to do here as the
|
||||
* real work has been done as explicit commands above.
|
||||
|
@ -2976,7 +2976,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
|
||||
SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
|
||||
goto discard_force;
|
||||
case SCTP_IERROR_NO_DATA:
|
||||
goto consume;
|
||||
return SCTP_DISPOSITION_ABORT;
|
||||
case SCTP_IERROR_PROTO_VIOLATION:
|
||||
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
|
||||
(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
|
||||
@ -3043,9 +3043,6 @@ discard_noforce:
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
|
||||
|
||||
return SCTP_DISPOSITION_DISCARD;
|
||||
consume:
|
||||
return SCTP_DISPOSITION_CONSUME;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3093,7 +3090,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
|
||||
case SCTP_IERROR_BAD_STREAM:
|
||||
break;
|
||||
case SCTP_IERROR_NO_DATA:
|
||||
goto consume;
|
||||
return SCTP_DISPOSITION_ABORT;
|
||||
case SCTP_IERROR_PROTO_VIOLATION:
|
||||
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
|
||||
(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
|
||||
@ -3119,7 +3116,6 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
|
||||
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
|
||||
}
|
||||
|
||||
consume:
|
||||
return SCTP_DISPOSITION_CONSUME;
|
||||
}
|
||||
|
||||
@ -4825,9 +4821,6 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
|
||||
* if necessary to fill gaps.
|
||||
*/
|
||||
struct sctp_chunk *abort = arg;
|
||||
sctp_disposition_t retval;
|
||||
|
||||
retval = SCTP_DISPOSITION_CONSUME;
|
||||
|
||||
if (abort)
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
|
||||
@ -4845,7 +4838,7 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
|
||||
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
|
||||
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
|
||||
|
||||
return retval;
|
||||
return SCTP_DISPOSITION_ABORT;
|
||||
}
|
||||
|
||||
/* We tried an illegal operation on an association which is closed. */
|
||||
@ -4960,12 +4953,10 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
|
||||
sctp_cmd_seq_t *commands)
|
||||
{
|
||||
struct sctp_chunk *abort = arg;
|
||||
sctp_disposition_t retval;
|
||||
|
||||
/* Stop T1-init timer */
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
|
||||
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
|
||||
retval = SCTP_DISPOSITION_CONSUME;
|
||||
|
||||
if (abort)
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
|
||||
@ -4985,7 +4976,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
|
||||
SCTP_PERR(SCTP_ERROR_USER_ABORT));
|
||||
|
||||
return retval;
|
||||
return SCTP_DISPOSITION_ABORT;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -327,7 +327,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
|
||||
struct ctl_table tbl;
|
||||
bool changed = false;
|
||||
char *none = "none";
|
||||
char tmp[8];
|
||||
char tmp[8] = {0};
|
||||
int ret;
|
||||
|
||||
memset(&tbl, 0, sizeof(struct ctl_table));
|
||||
|
@ -1513,6 +1513,21 @@ static void unix_destruct_scm(struct sk_buff *skb)
|
||||
sock_wfree(skb);
|
||||
}
|
||||
|
||||
/*
|
||||
* The "user->unix_inflight" variable is protected by the garbage
|
||||
* collection lock, and we just read it locklessly here. If you go
|
||||
* over the limit, there might be a tiny race in actually noticing
|
||||
* it across threads. Tough.
|
||||
*/
|
||||
static inline bool too_many_unix_fds(struct task_struct *p)
|
||||
{
|
||||
struct user_struct *user = current_user();
|
||||
|
||||
if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
|
||||
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
|
||||
return false;
|
||||
}
|
||||
|
||||
#define MAX_RECURSION_LEVEL 4
|
||||
|
||||
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
@ -1521,6 +1536,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
unsigned char max_level = 0;
|
||||
int unix_sock_count = 0;
|
||||
|
||||
if (too_many_unix_fds(current))
|
||||
return -ETOOMANYREFS;
|
||||
|
||||
for (i = scm->fp->count - 1; i >= 0; i--) {
|
||||
struct sock *sk = unix_get_socket(scm->fp->fp[i]);
|
||||
|
||||
@ -1542,10 +1560,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
if (!UNIXCB(skb).fp)
|
||||
return -ENOMEM;
|
||||
|
||||
if (unix_sock_count) {
|
||||
for (i = scm->fp->count - 1; i >= 0; i--)
|
||||
unix_inflight(scm->fp->fp[i]);
|
||||
}
|
||||
for (i = scm->fp->count - 1; i >= 0; i--)
|
||||
unix_inflight(scm->fp->fp[i]);
|
||||
return max_level;
|
||||
}
|
||||
|
||||
|
@ -120,11 +120,11 @@ void unix_inflight(struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (atomic_long_inc_return(&u->inflight) == 1) {
|
||||
BUG_ON(!list_empty(&u->link));
|
||||
list_add_tail(&u->link, &gc_inflight_list);
|
||||
@ -132,25 +132,28 @@ void unix_inflight(struct file *fp)
|
||||
BUG_ON(list_empty(&u->link));
|
||||
}
|
||||
unix_tot_inflight++;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
fp->f_cred->user->unix_inflight++;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
void unix_notinflight(struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
BUG_ON(list_empty(&u->link));
|
||||
|
||||
if (atomic_long_dec_and_test(&u->inflight))
|
||||
list_del_init(&u->link);
|
||||
unix_tot_inflight--;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
fp->f_cred->user->unix_inflight--;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
|
||||
|
Loading…
Reference in New Issue
Block a user