mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 20:48:49 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Unbreak zebra and other netlink apps, from Eric W Biederman. 2) Some new qmi_wwan device IDs, from Aleksander Morgado. 3) Fix info leak in DCB netlink handler of qlcnic driver, from Dan Carpenter. 4) inet_getid() and ipv6_select_ident() do not generate monotonically increasing ID numbers, fix from Eric Dumazet. 5) Fix memory leak in __sk_prepare_filter(), from Leon Yu. 6) Netlink leftover bytes warning message is user triggerable, rate limit it. From Michal Schmidt. 7) Fix non-linear SKB panic in ipvs, from Peter Christensen. 8) Congestion window undo needs to be performed even if only never retransmitted data is SACK'd, fix from Yuching Cheng. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (24 commits) net: filter: fix possible memory leak in __sk_prepare_filter() net: ec_bhf: Add runtime dependencies tcp: fix cwnd undo on DSACK in F-RTO netlink: Only check file credentials for implicit destinations ipheth: Add support for iPad 2 and iPad 3 team: fix mtu setting net: fix inet_getid() and ipv6_select_ident() bugs net: qmi_wwan: interface #11 in Sierra Wireless MC73xx is not QMI net: qmi_wwan: add additional Sierra Wireless QMI devices bridge: Prevent insertion of FDB entry with disallowed vlan netlink: rate-limit leftover bytes warning and print process name bridge: notify user space after fdb update net: qmi_wwan: add Netgear AirCard 341U net: fix wrong mac_len calculation for vlans batman-adv: fix NULL pointer dereferences net/mlx4_core: Reset RoCE VF gids when guest driver goes down emac: aggregation of v1-2 PLB errors for IER register emac: add missing support of 10mbit in emac/rgmii can: only rename enabled led triggers when changing the netdev name ipvs: Fix panic due to non-linear skb ...
This commit is contained in:
commit
cae61ba37b
@ -3153,10 +3153,9 @@ S: Maintained
|
||||
F: drivers/scsi/eata_pio.*
|
||||
|
||||
EBTABLES
|
||||
M: Bart De Schuymer <bart.de.schuymer@pandora.be>
|
||||
L: netfilter-devel@vger.kernel.org
|
||||
W: http://ebtables.sourceforge.net/
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: include/linux/netfilter_bridge/ebt_*.h
|
||||
F: include/uapi/linux/netfilter_bridge/ebt_*.h
|
||||
F: net/bridge/netfilter/ebt*.c
|
||||
|
@ -97,6 +97,9 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
|
||||
if (!priv)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!priv->tx_led_trig || !priv->rx_led_trig)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (msg == NETDEV_CHANGENAME) {
|
||||
snprintf(name, sizeof(name), "%s-tx", netdev->name);
|
||||
led_trigger_rename_static(name, priv->tx_led_trig);
|
||||
|
@ -39,6 +39,7 @@ source "drivers/net/ethernet/cisco/Kconfig"
|
||||
config CX_ECAT
|
||||
tristate "Beckhoff CX5020 EtherCAT master support"
|
||||
depends on PCI
|
||||
depends on X86 || COMPILE_TEST
|
||||
---help---
|
||||
Driver for EtherCAT master module located on CCAT FPGA
|
||||
that can be found on Beckhoff CX5020, and possibly other of CX
|
||||
|
@ -682,10 +682,7 @@ static int mal_probe(struct platform_device *ofdev)
|
||||
goto fail6;
|
||||
|
||||
/* Enable all MAL SERR interrupt sources */
|
||||
if (mal->version == 2)
|
||||
set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
|
||||
else
|
||||
set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
|
||||
set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
|
||||
|
||||
/* Enable EOB interrupt */
|
||||
mal_enable_eob_irq(mal);
|
||||
|
@ -95,24 +95,20 @@
|
||||
|
||||
|
||||
#define MAL_IER 0x02
|
||||
/* MAL IER bits */
|
||||
#define MAL_IER_DE 0x00000010
|
||||
#define MAL_IER_OTE 0x00000004
|
||||
#define MAL_IER_OE 0x00000002
|
||||
#define MAL_IER_PE 0x00000001
|
||||
/* MAL V1 IER bits */
|
||||
#define MAL1_IER_NWE 0x00000008
|
||||
#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
|
||||
#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
|
||||
MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
|
||||
|
||||
/* MAL V2 IER bits */
|
||||
#define MAL2_IER_PT 0x00000080
|
||||
#define MAL2_IER_PRE 0x00000040
|
||||
#define MAL2_IER_PWE 0x00000020
|
||||
#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
|
||||
#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
|
||||
MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
|
||||
/* PLB read/write/timeout errors */
|
||||
#define MAL_IER_PTE 0x00000080
|
||||
#define MAL_IER_PRE 0x00000040
|
||||
#define MAL_IER_PWE 0x00000020
|
||||
|
||||
#define MAL_IER_SOC_EVENTS (MAL_IER_PTE | MAL_IER_PRE | MAL_IER_PWE)
|
||||
#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_DE | \
|
||||
MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
|
||||
|
||||
#define MAL_TXCASR 0x04
|
||||
#define MAL_TXCARR 0x05
|
||||
|
@ -45,6 +45,7 @@
|
||||
|
||||
/* RGMIIx_SSR */
|
||||
#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
|
||||
#define RGMII_SSR_10(idx) (0x1 << ((idx) * 8))
|
||||
#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
|
||||
#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
|
||||
|
||||
@ -139,6 +140,8 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
|
||||
ssr |= RGMII_SSR_1000(input);
|
||||
else if (speed == SPEED_100)
|
||||
ssr |= RGMII_SSR_100(input);
|
||||
else if (speed == SPEED_10)
|
||||
ssr |= RGMII_SSR_10(input);
|
||||
|
||||
out_be32(&p->ssr, ssr);
|
||||
|
||||
|
@ -2044,6 +2044,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
|
||||
if (!mlx4_is_slave(dev)) {
|
||||
mlx4_init_mac_table(dev, &info->mac_table);
|
||||
mlx4_init_vlan_table(dev, &info->vlan_table);
|
||||
mlx4_init_roce_gid_table(dev, &info->gid_table);
|
||||
info->base_qpn = mlx4_get_base_qpn(dev, port);
|
||||
}
|
||||
|
||||
|
@ -695,6 +695,17 @@ struct mlx4_mac_table {
|
||||
int max;
|
||||
};
|
||||
|
||||
#define MLX4_ROCE_GID_ENTRY_SIZE 16
|
||||
|
||||
struct mlx4_roce_gid_entry {
|
||||
u8 raw[MLX4_ROCE_GID_ENTRY_SIZE];
|
||||
};
|
||||
|
||||
struct mlx4_roce_gid_table {
|
||||
struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS];
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#define MLX4_MAX_VLAN_NUM 128
|
||||
#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
|
||||
|
||||
@ -758,6 +769,7 @@ struct mlx4_port_info {
|
||||
struct device_attribute port_mtu_attr;
|
||||
struct mlx4_mac_table mac_table;
|
||||
struct mlx4_vlan_table vlan_table;
|
||||
struct mlx4_roce_gid_table gid_table;
|
||||
int base_qpn;
|
||||
};
|
||||
|
||||
@ -788,10 +800,6 @@ enum {
|
||||
MLX4_USE_RR = 1,
|
||||
};
|
||||
|
||||
struct mlx4_roce_gid_entry {
|
||||
u8 raw[16];
|
||||
};
|
||||
|
||||
struct mlx4_priv {
|
||||
struct mlx4_dev dev;
|
||||
|
||||
@ -839,7 +847,6 @@ struct mlx4_priv {
|
||||
int fs_hash_mode;
|
||||
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
|
||||
__be64 slave_node_guids[MLX4_MFUNC_MAX];
|
||||
struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
|
||||
|
||||
atomic_t opreq_count;
|
||||
struct work_struct opreq_task;
|
||||
@ -1140,6 +1147,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
|
||||
|
||||
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
|
||||
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
|
||||
void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
|
||||
struct mlx4_roce_gid_table *table);
|
||||
void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
|
||||
int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
|
||||
|
||||
@ -1149,6 +1158,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
|
||||
enum mlx4_resource resource_type,
|
||||
u64 resource_id, int *slave);
|
||||
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
|
||||
void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave);
|
||||
int mlx4_init_resource_tracker(struct mlx4_dev *dev);
|
||||
|
||||
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
|
||||
|
@ -75,6 +75,16 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
|
||||
table->total = 0;
|
||||
}
|
||||
|
||||
void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
|
||||
struct mlx4_roce_gid_table *table)
|
||||
{
|
||||
int i;
|
||||
|
||||
mutex_init(&table->mutex);
|
||||
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
|
||||
memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
|
||||
}
|
||||
|
||||
static int validate_index(struct mlx4_dev *dev,
|
||||
struct mlx4_mac_table *table, int index)
|
||||
{
|
||||
@ -584,6 +594,84 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
|
||||
|
||||
static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
|
||||
int port, struct mlx4_cmd_mailbox *mailbox)
|
||||
{
|
||||
struct mlx4_roce_gid_entry *gid_entry_mbox;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int num_gids, base, offset;
|
||||
int i, err;
|
||||
|
||||
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
|
||||
base = mlx4_get_base_gid_ix(dev, slave, port);
|
||||
|
||||
memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
|
||||
|
||||
mutex_lock(&(priv->port[port].gid_table.mutex));
|
||||
/* Zero-out gids belonging to that slave in the port GID table */
|
||||
for (i = 0, offset = base; i < num_gids; offset++, i++)
|
||||
memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
|
||||
zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
|
||||
|
||||
/* Now, copy roce port gids table to mailbox for passing to FW */
|
||||
gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
|
||||
for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
|
||||
memcpy(gid_entry_mbox->raw,
|
||||
priv->port[port].gid_table.roce_gids[i].raw,
|
||||
MLX4_ROCE_GID_ENTRY_SIZE);
|
||||
|
||||
err = mlx4_cmd(dev, mailbox->dma,
|
||||
((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
|
||||
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_NATIVE);
|
||||
mutex_unlock(&(priv->port[port].gid_table.mutex));
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
|
||||
{
|
||||
struct mlx4_active_ports actv_ports;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
int num_eth_ports, err;
|
||||
int i;
|
||||
|
||||
if (slave < 0 || slave > dev->num_vfs)
|
||||
return;
|
||||
|
||||
actv_ports = mlx4_get_active_ports(dev, slave);
|
||||
|
||||
for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
|
||||
if (test_bit(i, actv_ports.ports)) {
|
||||
if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
|
||||
continue;
|
||||
num_eth_ports++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!num_eth_ports)
|
||||
return;
|
||||
|
||||
/* have ETH ports. Alloc mailbox for SET_PORT command */
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return;
|
||||
|
||||
for (i = 0; i < dev->caps.num_ports; i++) {
|
||||
if (test_bit(i, actv_ports.ports)) {
|
||||
if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
|
||||
continue;
|
||||
err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
|
||||
if (err)
|
||||
mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
|
||||
slave, i + 1, err);
|
||||
}
|
||||
}
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return;
|
||||
}
|
||||
|
||||
static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
|
||||
u8 op_mod, struct mlx4_cmd_mailbox *inbox)
|
||||
{
|
||||
@ -692,10 +780,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
|
||||
/* 2. Check that do not have duplicates in OTHER
|
||||
* entries in the port GID table
|
||||
*/
|
||||
|
||||
mutex_lock(&(priv->port[port].gid_table.mutex));
|
||||
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
|
||||
if (i >= base && i < base + num_gids)
|
||||
continue; /* don't compare to slave's current gids */
|
||||
gid_entry_tbl = &priv->roce_gids[port - 1][i];
|
||||
gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
|
||||
if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
|
||||
continue;
|
||||
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
|
||||
@ -709,6 +799,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
|
||||
mlx4_warn(dev, "requested gid entry for slave:%d "
|
||||
"is a duplicate of gid at index %d\n",
|
||||
slave, i);
|
||||
mutex_unlock(&(priv->port[port].gid_table.mutex));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -717,16 +808,24 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
|
||||
/* insert slave GIDs with memcpy, starting at slave's base index */
|
||||
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
|
||||
for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
|
||||
memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16);
|
||||
memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
|
||||
gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
|
||||
|
||||
/* Now, copy roce port gids table to current mailbox for passing to FW */
|
||||
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
|
||||
for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
|
||||
memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16);
|
||||
memcpy(gid_entry_mbox->raw,
|
||||
priv->port[port].gid_table.roce_gids[i].raw,
|
||||
MLX4_ROCE_GID_ENTRY_SIZE);
|
||||
|
||||
break;
|
||||
err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
|
||||
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_NATIVE);
|
||||
mutex_unlock(&(priv->port[port].gid_table.mutex));
|
||||
return err;
|
||||
}
|
||||
return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
|
||||
|
||||
return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
|
||||
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_NATIVE);
|
||||
}
|
||||
@ -1099,7 +1198,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
|
||||
num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
|
||||
|
||||
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
|
||||
if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) {
|
||||
if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
|
||||
MLX4_ROCE_GID_ENTRY_SIZE)) {
|
||||
found_ix = i;
|
||||
break;
|
||||
}
|
||||
@ -1187,7 +1287,8 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
|
||||
if (!mlx4_is_master(dev))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16);
|
||||
memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
|
||||
MLX4_ROCE_GID_ENTRY_SIZE);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
|
||||
|
@ -586,6 +586,7 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
|
||||
}
|
||||
/* free master's vlans */
|
||||
i = dev->caps.function;
|
||||
mlx4_reset_roce_gids(dev, i);
|
||||
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
|
||||
rem_slave_vlans(dev, i);
|
||||
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
|
||||
@ -4681,7 +4682,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
|
||||
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
mlx4_reset_roce_gids(dev, slave);
|
||||
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
|
||||
rem_slave_vlans(dev, slave);
|
||||
rem_slave_macs(dev, slave);
|
||||
|
@ -1020,6 +1020,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
|
||||
struct qlcnic_dcb_cee *peer;
|
||||
int i;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
*app_count = 0;
|
||||
|
||||
if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
|
||||
|
@ -1724,6 +1724,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
|
||||
* to traverse list in reverse under rcu_read_lock
|
||||
*/
|
||||
mutex_lock(&team->lock);
|
||||
team->port_mtu_change_allowed = true;
|
||||
list_for_each_entry(port, &team->port_list, list) {
|
||||
err = dev_set_mtu(port->dev, new_mtu);
|
||||
if (err) {
|
||||
@ -1732,6 +1733,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
|
||||
goto unwind;
|
||||
}
|
||||
}
|
||||
team->port_mtu_change_allowed = false;
|
||||
mutex_unlock(&team->lock);
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
@ -1741,6 +1743,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
|
||||
unwind:
|
||||
list_for_each_entry_continue_reverse(port, &team->port_list, list)
|
||||
dev_set_mtu(port->dev, dev->mtu);
|
||||
team->port_mtu_change_allowed = false;
|
||||
mutex_unlock(&team->lock);
|
||||
|
||||
return err;
|
||||
@ -2851,7 +2854,9 @@ static int team_device_event(struct notifier_block *unused,
|
||||
break;
|
||||
case NETDEV_PRECHANGEMTU:
|
||||
/* Forbid to change mtu of underlaying device */
|
||||
return NOTIFY_BAD;
|
||||
if (!port->team->port_mtu_change_allowed)
|
||||
return NOTIFY_BAD;
|
||||
break;
|
||||
case NETDEV_PRE_TYPE_CHANGE:
|
||||
/* Forbid to change type of underlaying device */
|
||||
return NOTIFY_BAD;
|
||||
|
@ -59,6 +59,8 @@
|
||||
#define USB_PRODUCT_IPHONE_3GS 0x1294
|
||||
#define USB_PRODUCT_IPHONE_4 0x1297
|
||||
#define USB_PRODUCT_IPAD 0x129a
|
||||
#define USB_PRODUCT_IPAD_2 0x12a2
|
||||
#define USB_PRODUCT_IPAD_3 0x12a6
|
||||
#define USB_PRODUCT_IPAD_MINI 0x12ab
|
||||
#define USB_PRODUCT_IPHONE_4_VZW 0x129c
|
||||
#define USB_PRODUCT_IPHONE_4S 0x12a0
|
||||
@ -106,6 +108,14 @@ static struct usb_device_id ipheth_table[] = {
|
||||
USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
|
||||
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
|
||||
IPHETH_USBINTF_PROTO) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(
|
||||
USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2,
|
||||
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
|
||||
IPHETH_USBINTF_PROTO) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(
|
||||
USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3,
|
||||
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
|
||||
IPHETH_USBINTF_PROTO) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(
|
||||
USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI,
|
||||
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
|
||||
|
@ -748,11 +748,15 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
|
||||
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
|
||||
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
|
||||
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
|
||||
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
|
||||
|
@ -826,7 +826,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
||||
if (ret)
|
||||
goto out_remove_mac;
|
||||
|
||||
if (!mvm->bf_allowed_vif &&
|
||||
if (!mvm->bf_allowed_vif && false &&
|
||||
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
|
||||
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
|
||||
mvm->bf_allowed_vif = mvmvif;
|
||||
|
@ -194,6 +194,7 @@ struct team {
|
||||
bool user_carrier_enabled;
|
||||
bool queue_override_enabled;
|
||||
struct list_head *qom_lists; /* array of queue override mapping lists */
|
||||
bool port_mtu_change_allowed;
|
||||
struct {
|
||||
unsigned int count;
|
||||
unsigned int interval; /* in ms */
|
||||
|
@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
|
||||
}
|
||||
|
||||
enum netlink_skb_flags {
|
||||
NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
|
||||
NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
|
||||
NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
|
||||
NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
|
||||
NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
|
||||
NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
|
||||
NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
|
||||
};
|
||||
|
||||
struct netlink_skb_parms {
|
||||
|
@ -177,16 +177,9 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
|
||||
/* can be called with or without local BH being disabled */
|
||||
static inline int inet_getid(struct inet_peer *p, int more)
|
||||
{
|
||||
int old, new;
|
||||
more++;
|
||||
inet_peer_refcheck(p);
|
||||
do {
|
||||
old = atomic_read(&p->ip_id_count);
|
||||
new = old + more;
|
||||
if (!new)
|
||||
new = 1;
|
||||
} while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
|
||||
return new;
|
||||
return atomic_add_return(more, &p->ip_id_count) - more;
|
||||
}
|
||||
|
||||
#endif /* _NET_INETPEER_H */
|
||||
|
@ -201,8 +201,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
|
||||
}
|
||||
|
||||
if (unlikely(rem > 0))
|
||||
printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
|
||||
"attributes.\n", rem);
|
||||
pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
|
||||
rem, current->comm);
|
||||
|
||||
err = 0;
|
||||
errout:
|
||||
|
@ -415,7 +415,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
|
||||
hlist_for_each_entry_rcu(tmp_orig_node,
|
||||
&bat_priv->mcast.want_all_ipv4_list,
|
||||
mcast_want_all_ipv4_node) {
|
||||
if (!atomic_inc_not_zero(&orig_node->refcount))
|
||||
if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
|
||||
continue;
|
||||
|
||||
orig_node = tmp_orig_node;
|
||||
@ -442,7 +442,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
|
||||
hlist_for_each_entry_rcu(tmp_orig_node,
|
||||
&bat_priv->mcast.want_all_ipv6_list,
|
||||
mcast_want_all_ipv6_node) {
|
||||
if (!atomic_inc_not_zero(&orig_node->refcount))
|
||||
if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
|
||||
continue;
|
||||
|
||||
orig_node = tmp_orig_node;
|
||||
@ -493,7 +493,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
|
||||
hlist_for_each_entry_rcu(tmp_orig_node,
|
||||
&bat_priv->mcast.want_all_unsnoopables_list,
|
||||
mcast_want_all_unsnoopables_node) {
|
||||
if (!atomic_inc_not_zero(&orig_node->refcount))
|
||||
if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
|
||||
continue;
|
||||
|
||||
orig_node = tmp_orig_node;
|
||||
|
@ -7519,9 +7519,9 @@ int __init l2cap_init(void)
|
||||
l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
|
||||
NULL, &l2cap_debugfs_fops);
|
||||
|
||||
debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
|
||||
debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
|
||||
&le_max_credits);
|
||||
debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
|
||||
debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
|
||||
&le_default_mps);
|
||||
|
||||
bt_6lowpan_init();
|
||||
|
@ -487,6 +487,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
|
||||
{
|
||||
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
|
||||
struct net_bridge_fdb_entry *fdb;
|
||||
bool fdb_modified = false;
|
||||
|
||||
/* some users want to always flood. */
|
||||
if (hold_time(br) == 0)
|
||||
@ -507,10 +508,15 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
|
||||
source->dev->name);
|
||||
} else {
|
||||
/* fastpath: update of existing entry */
|
||||
fdb->dst = source;
|
||||
if (unlikely(source != fdb->dst)) {
|
||||
fdb->dst = source;
|
||||
fdb_modified = true;
|
||||
}
|
||||
fdb->updated = jiffies;
|
||||
if (unlikely(added_by_user))
|
||||
fdb->added_by_user = 1;
|
||||
if (unlikely(fdb_modified))
|
||||
fdb_notify(br, fdb, RTM_NEWNEIGH);
|
||||
}
|
||||
} else {
|
||||
spin_lock(&br->hash_lock);
|
||||
|
@ -147,8 +147,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
|
||||
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
|
||||
u16 vid = 0;
|
||||
|
||||
br_vlan_get_tag(skb, &vid);
|
||||
if (p->flags & BR_LEARNING)
|
||||
/* check if vlan is allowed, to avoid spoofing */
|
||||
if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
|
||||
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
|
||||
return 0; /* process further */
|
||||
}
|
||||
|
@ -581,6 +581,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
|
||||
struct sk_buff *skb, u16 *vid);
|
||||
bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
|
||||
const struct sk_buff *skb);
|
||||
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
|
||||
struct sk_buff *br_handle_vlan(struct net_bridge *br,
|
||||
const struct net_port_vlans *v,
|
||||
struct sk_buff *skb);
|
||||
@ -648,6 +649,12 @@ static inline bool br_allowed_egress(struct net_bridge *br,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool br_should_learn(struct net_bridge_port *p,
|
||||
struct sk_buff *skb, u16 *vid)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
|
||||
const struct net_port_vlans *v,
|
||||
struct sk_buff *skb)
|
||||
|
@ -241,6 +241,34 @@ bool br_allowed_egress(struct net_bridge *br,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Called under RCU */
|
||||
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
|
||||
{
|
||||
struct net_bridge *br = p->br;
|
||||
struct net_port_vlans *v;
|
||||
|
||||
if (!br->vlan_enabled)
|
||||
return true;
|
||||
|
||||
v = rcu_dereference(p->vlan_info);
|
||||
if (!v)
|
||||
return false;
|
||||
|
||||
br_vlan_get_tag(skb, vid);
|
||||
if (!*vid) {
|
||||
*vid = br_get_pvid(v);
|
||||
if (*vid == VLAN_N_VID)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if (test_bit(*vid, v->vlan_bitmap))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Must be protected by RTNL.
|
||||
* Must be called with vid in range from 1 to 4094 inclusive.
|
||||
*/
|
||||
|
@ -2283,8 +2283,8 @@ EXPORT_SYMBOL(skb_checksum_help);
|
||||
|
||||
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
|
||||
{
|
||||
unsigned int vlan_depth = skb->mac_len;
|
||||
__be16 type = skb->protocol;
|
||||
int vlan_depth = skb->mac_len;
|
||||
|
||||
/* Tunnel gso handlers can set protocol to ethernet. */
|
||||
if (type == htons(ETH_P_TEB)) {
|
||||
@ -2297,15 +2297,30 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
|
||||
type = eth->h_proto;
|
||||
}
|
||||
|
||||
while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
|
||||
struct vlan_hdr *vh;
|
||||
/* if skb->protocol is 802.1Q/AD then the header should already be
|
||||
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
|
||||
* ETH_HLEN otherwise
|
||||
*/
|
||||
if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
|
||||
if (vlan_depth) {
|
||||
if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
|
||||
return 0;
|
||||
vlan_depth -= VLAN_HLEN;
|
||||
} else {
|
||||
vlan_depth = ETH_HLEN;
|
||||
}
|
||||
do {
|
||||
struct vlan_hdr *vh;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
|
||||
return 0;
|
||||
if (unlikely(!pskb_may_pull(skb,
|
||||
vlan_depth + VLAN_HLEN)))
|
||||
return 0;
|
||||
|
||||
vh = (struct vlan_hdr *)(skb->data + vlan_depth);
|
||||
type = vh->h_vlan_encapsulated_proto;
|
||||
vlan_depth += VLAN_HLEN;
|
||||
vh = (struct vlan_hdr *)(skb->data + vlan_depth);
|
||||
type = vh->h_vlan_encapsulated_proto;
|
||||
vlan_depth += VLAN_HLEN;
|
||||
} while (type == htons(ETH_P_8021Q) ||
|
||||
type == htons(ETH_P_8021AD));
|
||||
}
|
||||
|
||||
*depth = vlan_depth;
|
||||
|
@ -1559,8 +1559,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
|
||||
fp->jited = 0;
|
||||
|
||||
err = sk_chk_filter(fp->insns, fp->len);
|
||||
if (err)
|
||||
if (err) {
|
||||
if (sk != NULL)
|
||||
sk_filter_uncharge(sk, fp);
|
||||
else
|
||||
kfree(fp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/* Probe if we can JIT compile the filter and if so, do
|
||||
* the compilation of the filter.
|
||||
|
@ -2684,13 +2684,12 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
||||
bool recovered = !before(tp->snd_una, tp->high_seq);
|
||||
|
||||
if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
|
||||
if (flag & FLAG_ORIG_SACK_ACKED) {
|
||||
/* Step 3.b. A timeout is spurious if not all data are
|
||||
* lost, i.e., never-retransmitted data are (s)acked.
|
||||
*/
|
||||
tcp_try_undo_loss(sk, true);
|
||||
/* Step 3.b. A timeout is spurious if not all data are
|
||||
* lost, i.e., never-retransmitted data are (s)acked.
|
||||
*/
|
||||
if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
|
||||
return;
|
||||
}
|
||||
|
||||
if (after(tp->snd_nxt, tp->high_seq) &&
|
||||
(flag & FLAG_DATA_SACKED || is_dupack)) {
|
||||
tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
|
||||
|
@ -12,7 +12,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
|
||||
{
|
||||
static atomic_t ipv6_fragmentation_id;
|
||||
struct in6_addr addr;
|
||||
int old, new;
|
||||
int ident;
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct inet_peer *peer;
|
||||
@ -26,15 +26,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
do {
|
||||
old = atomic_read(&ipv6_fragmentation_id);
|
||||
new = old + 1;
|
||||
if (!new)
|
||||
new = 1;
|
||||
} while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
|
||||
ident = atomic_inc_return(&ipv6_fragmentation_id);
|
||||
|
||||
addr = rt->rt6i_dst.addr;
|
||||
addr.s6_addr32[0] ^= (__force __be32)new;
|
||||
addr.s6_addr32[0] ^= (__force __be32)ident;
|
||||
fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));
|
||||
}
|
||||
EXPORT_SYMBOL(ipv6_select_ident);
|
||||
|
@ -1392,15 +1392,19 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
|
||||
|
||||
if (ipip) {
|
||||
__be32 info = ic->un.gateway;
|
||||
__u8 type = ic->type;
|
||||
__u8 code = ic->code;
|
||||
|
||||
/* Update the MTU */
|
||||
if (ic->type == ICMP_DEST_UNREACH &&
|
||||
ic->code == ICMP_FRAG_NEEDED) {
|
||||
struct ip_vs_dest *dest = cp->dest;
|
||||
u32 mtu = ntohs(ic->un.frag.mtu);
|
||||
__be16 frag_off = cih->frag_off;
|
||||
|
||||
/* Strip outer IP and ICMP, go to IPIP header */
|
||||
__skb_pull(skb, ihl + sizeof(_icmph));
|
||||
if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
|
||||
goto ignore_ipip;
|
||||
offset2 -= ihl + sizeof(_icmph);
|
||||
skb_reset_network_header(skb);
|
||||
IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
|
||||
@ -1408,7 +1412,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
|
||||
ipv4_update_pmtu(skb, dev_net(skb->dev),
|
||||
mtu, 0, 0, 0, 0);
|
||||
/* Client uses PMTUD? */
|
||||
if (!(cih->frag_off & htons(IP_DF)))
|
||||
if (!(frag_off & htons(IP_DF)))
|
||||
goto ignore_ipip;
|
||||
/* Prefer the resulting PMTU */
|
||||
if (dest) {
|
||||
@ -1427,12 +1431,13 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
|
||||
/* Strip outer IP, ICMP and IPIP, go to IP header of
|
||||
* original request.
|
||||
*/
|
||||
__skb_pull(skb, offset2);
|
||||
if (pskb_pull(skb, offset2) == NULL)
|
||||
goto ignore_ipip;
|
||||
skb_reset_network_header(skb);
|
||||
IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
|
||||
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
|
||||
ic->type, ic->code, ntohl(info));
|
||||
icmp_send(skb, ic->type, ic->code, info);
|
||||
type, code, ntohl(info));
|
||||
icmp_send(skb, type, code, info);
|
||||
/* ICMP can be shorter but anyways, account it */
|
||||
ip_vs_out_stats(cp, skb);
|
||||
|
||||
|
@ -1373,7 +1373,9 @@ retry:
|
||||
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
|
||||
struct user_namespace *user_ns, int cap)
|
||||
{
|
||||
return sk_ns_capable(nsp->sk, user_ns, cap);
|
||||
return ((nsp->flags & NETLINK_SKB_DST) ||
|
||||
file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
|
||||
ns_capable(user_ns, cap);
|
||||
}
|
||||
EXPORT_SYMBOL(__netlink_ns_capable);
|
||||
|
||||
@ -2293,6 +2295,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
struct scm_cookie scm;
|
||||
u32 netlink_skb_flags = 0;
|
||||
|
||||
if (msg->msg_flags&MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
@ -2314,6 +2317,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
||||
if ((dst_group || dst_portid) &&
|
||||
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
|
||||
goto out;
|
||||
netlink_skb_flags |= NETLINK_SKB_DST;
|
||||
} else {
|
||||
dst_portid = nlk->dst_portid;
|
||||
dst_group = nlk->dst_group;
|
||||
@ -2343,6 +2347,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
||||
NETLINK_CB(skb).portid = nlk->portid;
|
||||
NETLINK_CB(skb).dst_group = dst_group;
|
||||
NETLINK_CB(skb).creds = siocb->scm->creds;
|
||||
NETLINK_CB(skb).flags = netlink_skb_flags;
|
||||
|
||||
err = -EFAULT;
|
||||
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
|
||||
|
Loading…
Reference in New Issue
Block a user