mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 09:13:55 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix MAC address setting in mac80211 pmsr code, from Johannes Berg. 2) Probe SFP modules after being attached, from Russell King. 3) Byte ordering bug in SMC rx_curs_confirmed code, from Ursula Braun. 4) Revert some r8169 changes that are causing regressions, from Heiner Kallweit. 5) Fix spurious connection timeouts in netfilter nat code, from Florian Westphal. 6) SKB leak in tipc, from Hoang Le. 7) Short packet checkum issue in mlx4, similar to a previous mlx5 change, from Saeed Mahameed. The issue is that whilst padding bytes are usually zero, it is not guarateed and the hardware doesn't take the padding bytes into consideration when generating the checksum. 8) Fix various races in cls_tcindex, from Cong Wang. 9) Need to set stream ext to NULL before freeing in SCTP code, from Xin Long. 10) Fix locking in phy_is_started, from Heiner Kallweit. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (54 commits) net: ethernet: freescale: set FEC ethtool regs version net: hns: Fix object reference leaks in hns_dsaf_roce_reset() mm: page_alloc: fix ref bias in page_frag_alloc() for 1-byte allocs net: phy: fix potential race in the phylib state machine net: phy: don't use locking in phy_is_started selftests: fix timestamping Makefile net: dsa: bcm_sf2: potential array overflow in bcm_sf2_sw_suspend() net: fix possible overflow in __sk_mem_raise_allocated() dsa: mv88e6xxx: Ensure all pending interrupts are handled prior to exit net: phy: fix interrupt handling in non-started states sctp: set stream ext to NULL after freeing it in sctp_stream_outq_migrate sctp: call gso_reset_checksum when computing checksum in sctp_gso_segment net/mlx5e: XDP, fix redirect resources availability check net/mlx5: Fix a compilation warning in events.c net/mlx5: No command allowed when command interface is not ready net/mlx5e: Fix NULL pointer derefernce in set channels error flow netfilter: nft_compat: use-after-free when deleting targets team: avoid complex list operations in team_nl_cmd_options_set() net_sched: fix two more memory leaks in cls_tcindex net_sched: fix a memory leak in cls_tcindex ...
This commit is contained in:
commit
6e7bd3b549
@ -22,8 +22,9 @@ and changeable from userspace under certain rules.
|
|||||||
2. Querying from userspace
|
2. Querying from userspace
|
||||||
|
|
||||||
Both admin and operational state can be queried via the netlink
|
Both admin and operational state can be queried via the netlink
|
||||||
operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK
|
operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK
|
||||||
to be notified of updates. This is important for setting from userspace.
|
to be notified of updates while the interface is admin up. This is
|
||||||
|
important for setting from userspace.
|
||||||
|
|
||||||
These values contain interface state:
|
These values contain interface state:
|
||||||
|
|
||||||
@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to
|
|||||||
complete. Corresponding functions are netif_dormant_on() to set the
|
complete. Corresponding functions are netif_dormant_on() to set the
|
||||||
flag, netif_dormant_off() to clear it and netif_dormant() to query.
|
flag, netif_dormant_off() to clear it and netif_dormant() to query.
|
||||||
|
|
||||||
On device allocation, networking core sets the flags equivalent to
|
On device allocation, both flags __LINK_STATE_NOCARRIER and
|
||||||
netif_carrier_ok() and !netif_dormant().
|
__LINK_STATE_DORMANT are cleared, so the effective state is equivalent
|
||||||
|
to netif_carrier_ok() and !netif_dormant().
|
||||||
|
|
||||||
|
|
||||||
Whenever the driver CHANGES one of these flags, a workqueue event is
|
Whenever the driver CHANGES one of these flags, a workqueue event is
|
||||||
@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the
|
|||||||
driver. Afterwards, the userspace application can set IFLA_OPERSTATE
|
driver. Afterwards, the userspace application can set IFLA_OPERSTATE
|
||||||
to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set
|
to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set
|
||||||
netif_carrier_off() or netif_dormant_on(). Changes made by userspace
|
netif_carrier_off() or netif_dormant_on(). Changes made by userspace
|
||||||
are multicasted on the netlink group RTMGRP_LINK.
|
are multicasted on the netlink group RTNLGRP_LINK.
|
||||||
|
|
||||||
So basically a 802.1X supplicant interacts with the kernel like this:
|
So basically a 802.1X supplicant interacts with the kernel like this:
|
||||||
|
|
||||||
-subscribe to RTMGRP_LINK
|
-subscribe to RTNLGRP_LINK
|
||||||
-set IFLA_LINKMODE to 1 via RTM_SETLINK
|
-set IFLA_LINKMODE to 1 via RTM_SETLINK
|
||||||
-query RTM_GETLINK once to get initial state
|
-query RTM_GETLINK once to get initial state
|
||||||
-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until
|
-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until
|
||||||
|
@ -197,9 +197,9 @@ config VXLAN
|
|||||||
|
|
||||||
config GENEVE
|
config GENEVE
|
||||||
tristate "Generic Network Virtualization Encapsulation"
|
tristate "Generic Network Virtualization Encapsulation"
|
||||||
depends on INET && NET_UDP_TUNNEL
|
depends on INET
|
||||||
depends on IPV6 || !IPV6
|
depends on IPV6 || !IPV6
|
||||||
select NET_IP_TUNNEL
|
select NET_UDP_TUNNEL
|
||||||
select GRO_CELLS
|
select GRO_CELLS
|
||||||
---help---
|
---help---
|
||||||
This allows one to create geneve virtual interfaces that provide
|
This allows one to create geneve virtual interfaces that provide
|
||||||
|
@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
|
|||||||
* port, the other ones have already been disabled during
|
* port, the other ones have already been disabled during
|
||||||
* bcm_sf2_sw_setup
|
* bcm_sf2_sw_setup
|
||||||
*/
|
*/
|
||||||
for (port = 0; port < DSA_MAX_PORTS; port++) {
|
for (port = 0; port < ds->num_ports; port++) {
|
||||||
if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
|
if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
|
||||||
bcm_sf2_port_disable(ds, port, NULL);
|
bcm_sf2_port_disable(ds, port, NULL);
|
||||||
}
|
}
|
||||||
|
@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
|
|||||||
unsigned int sub_irq;
|
unsigned int sub_irq;
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
u16 reg;
|
u16 reg;
|
||||||
|
u16 ctl1;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_lock(&chip->reg_lock);
|
mutex_lock(&chip->reg_lock);
|
||||||
@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for (n = 0; n < chip->g1_irq.nirqs; ++n) {
|
do {
|
||||||
if (reg & (1 << n)) {
|
for (n = 0; n < chip->g1_irq.nirqs; ++n) {
|
||||||
sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
|
if (reg & (1 << n)) {
|
||||||
handle_nested_irq(sub_irq);
|
sub_irq = irq_find_mapping(chip->g1_irq.domain,
|
||||||
++nhandled;
|
n);
|
||||||
|
handle_nested_irq(sub_irq);
|
||||||
|
++nhandled;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
mutex_lock(&chip->reg_lock);
|
||||||
|
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
|
||||||
|
if (err)
|
||||||
|
goto unlock;
|
||||||
|
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&chip->reg_lock);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
|
||||||
|
} while (reg & ctl1);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
|
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
|
||||||
}
|
}
|
||||||
|
@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
|
|||||||
goto err_device_destroy;
|
goto err_device_destroy;
|
||||||
}
|
}
|
||||||
|
|
||||||
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
|
|
||||||
/* Make sure we don't have a race with AENQ Links state handler */
|
|
||||||
if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
|
|
||||||
netif_carrier_on(adapter->netdev);
|
|
||||||
|
|
||||||
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
|
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
|
||||||
adapter->num_queues);
|
adapter->num_queues);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
|
|||||||
}
|
}
|
||||||
|
|
||||||
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
|
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
|
||||||
|
|
||||||
|
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
|
||||||
|
if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
|
||||||
|
netif_carrier_on(adapter->netdev);
|
||||||
|
|
||||||
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
|
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
|
||||||
dev_err(&pdev->dev,
|
dev_err(&pdev->dev,
|
||||||
"Device reset completed successfully, Driver info: %s\n",
|
"Device reset completed successfully, Driver info: %s\n",
|
||||||
|
@ -45,7 +45,7 @@
|
|||||||
|
|
||||||
#define DRV_MODULE_VER_MAJOR 2
|
#define DRV_MODULE_VER_MAJOR 2
|
||||||
#define DRV_MODULE_VER_MINOR 0
|
#define DRV_MODULE_VER_MINOR 0
|
||||||
#define DRV_MODULE_VER_SUBMINOR 2
|
#define DRV_MODULE_VER_SUBMINOR 3
|
||||||
|
|
||||||
#define DRV_MODULE_NAME "ena"
|
#define DRV_MODULE_NAME "ena"
|
||||||
#ifndef DRV_MODULE_VERSION
|
#ifndef DRV_MODULE_VERSION
|
||||||
|
@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
|
|||||||
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
|
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
|
||||||
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
|
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
|
||||||
defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
|
defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
|
||||||
|
static __u32 fec_enet_register_version = 2;
|
||||||
static u32 fec_enet_register_offset[] = {
|
static u32 fec_enet_register_offset[] = {
|
||||||
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
|
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
|
||||||
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
|
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
|
||||||
@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = {
|
|||||||
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
|
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
|
||||||
};
|
};
|
||||||
#else
|
#else
|
||||||
|
static __u32 fec_enet_register_version = 1;
|
||||||
static u32 fec_enet_register_offset[] = {
|
static u32 fec_enet_register_offset[] = {
|
||||||
FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
|
FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
|
||||||
FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
|
FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
|
||||||
@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
|
|||||||
u32 *buf = (u32 *)regbuf;
|
u32 *buf = (u32 *)regbuf;
|
||||||
u32 i, off;
|
u32 i, off;
|
||||||
|
|
||||||
|
regs->version = fec_enet_register_version;
|
||||||
|
|
||||||
memset(buf, 0, regs->len);
|
memset(buf, 0, regs->len);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
|
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
|
||||||
|
@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
|
|||||||
dsaf_dev = dev_get_drvdata(&pdev->dev);
|
dsaf_dev = dev_get_drvdata(&pdev->dev);
|
||||||
if (!dsaf_dev) {
|
if (!dsaf_dev) {
|
||||||
dev_err(&pdev->dev, "dsaf_dev is NULL\n");
|
dev_err(&pdev->dev, "dsaf_dev is NULL\n");
|
||||||
|
put_device(&pdev->dev);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
|
|||||||
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
|
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
|
||||||
dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
|
dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
|
||||||
dsaf_dev->ae_dev.name);
|
dsaf_dev->ae_dev.name);
|
||||||
|
put_device(&pdev->dev);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
|
||||||
|
|
||||||
/* We reach this function only after checking that any of
|
/* We reach this function only after checking that any of
|
||||||
* the (IPv4 | IPv6) bits are set in cqe->status.
|
* the (IPv4 | IPv6) bits are set in cqe->status.
|
||||||
*/
|
*/
|
||||||
@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
|
|||||||
netdev_features_t dev_features)
|
netdev_features_t dev_features)
|
||||||
{
|
{
|
||||||
__wsum hw_checksum = 0;
|
__wsum hw_checksum = 0;
|
||||||
|
void *hdr;
|
||||||
|
|
||||||
void *hdr = (u8 *)va + sizeof(struct ethhdr);
|
/* CQE csum doesn't cover padding octets in short ethernet
|
||||||
|
* frames. And the pad field is appended prior to calculating
|
||||||
|
* and appending the FCS field.
|
||||||
|
*
|
||||||
|
* Detecting these padded frames requires to verify and parse
|
||||||
|
* IP headers, so we simply force all those small frames to skip
|
||||||
|
* checksum complete.
|
||||||
|
*/
|
||||||
|
if (short_frame(skb->len))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
hdr = (u8 *)va + sizeof(struct ethhdr);
|
||||||
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
|
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
|
||||||
|
|
||||||
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
|
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
|
||||||
@ -819,6 +832,11 @@ xdp_drop_no_cnt:
|
|||||||
skb_record_rx_queue(skb, cq_ring);
|
skb_record_rx_queue(skb, cq_ring);
|
||||||
|
|
||||||
if (likely(dev->features & NETIF_F_RXCSUM)) {
|
if (likely(dev->features & NETIF_F_RXCSUM)) {
|
||||||
|
/* TODO: For IP non TCP/UDP packets when csum complete is
|
||||||
|
* not an option (not supported or any other reason) we can
|
||||||
|
* actually check cqe IPOK status bit and report
|
||||||
|
* CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
|
||||||
|
*/
|
||||||
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
|
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
|
||||||
MLX4_CQE_STATUS_UDP)) &&
|
MLX4_CQE_STATUS_UDP)) &&
|
||||||
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
|
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
|
||||||
|
@ -1583,6 +1583,24 @@ no_trig:
|
|||||||
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
|
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mlx5_cmd_flush(struct mlx5_core_dev *dev)
|
||||||
|
{
|
||||||
|
struct mlx5_cmd *cmd = &dev->cmd;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||||
|
while (down_trylock(&cmd->sem))
|
||||||
|
mlx5_cmd_trigger_completions(dev);
|
||||||
|
|
||||||
|
while (down_trylock(&cmd->pages_sem))
|
||||||
|
mlx5_cmd_trigger_completions(dev);
|
||||||
|
|
||||||
|
/* Unlock cmdif */
|
||||||
|
up(&cmd->pages_sem);
|
||||||
|
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||||
|
up(&cmd->sem);
|
||||||
|
}
|
||||||
|
|
||||||
static int status_to_err(u8 status)
|
static int status_to_err(u8 status)
|
||||||
{
|
{
|
||||||
return status ? -1 : 0; /* TBD more meaningful codes */
|
return status ? -1 : 0; /* TBD more meaningful codes */
|
||||||
|
@ -657,6 +657,7 @@ struct mlx5e_channel_stats {
|
|||||||
enum {
|
enum {
|
||||||
MLX5E_STATE_OPENED,
|
MLX5E_STATE_OPENED,
|
||||||
MLX5E_STATE_DESTROYING,
|
MLX5E_STATE_DESTROYING,
|
||||||
|
MLX5E_STATE_XDP_TX_ENABLED,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5e_rqt {
|
struct mlx5e_rqt {
|
||||||
|
@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|||||||
int sq_num;
|
int sq_num;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
|
/* this flag is sufficient, no need to test internal sq state */
|
||||||
|
if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
|
||||||
return -ENETDOWN;
|
return -ENETDOWN;
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|||||||
|
|
||||||
sq = &priv->channels.c[sq_num]->xdpsq;
|
sq = &priv->channels.c[sq_num]->xdpsq;
|
||||||
|
|
||||||
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
|
|
||||||
return -ENETDOWN;
|
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
struct xdp_frame *xdpf = frames[i];
|
struct xdp_frame *xdpf = frames[i];
|
||||||
struct mlx5e_xdp_info xdpi;
|
struct mlx5e_xdp_info xdpi;
|
||||||
|
@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
|
|||||||
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
u32 flags);
|
u32 flags);
|
||||||
|
|
||||||
|
static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
|
||||||
|
{
|
||||||
|
set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
|
||||||
|
{
|
||||||
|
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
||||||
|
/* let other device's napi(s) see our new state */
|
||||||
|
synchronize_rcu();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
|
||||||
|
{
|
||||||
|
return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
|
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
|
||||||
{
|
{
|
||||||
if (sq->doorbell_cseg) {
|
if (sq->doorbell_cseg) {
|
||||||
|
@ -354,9 +354,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
|
|||||||
|
|
||||||
new_channels.params = priv->channels.params;
|
new_channels.params = priv->channels.params;
|
||||||
new_channels.params.num_channels = count;
|
new_channels.params.num_channels = count;
|
||||||
if (!netif_is_rxfh_configured(priv->netdev))
|
|
||||||
mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
|
|
||||||
MLX5E_INDIR_RQT_SIZE, count);
|
|
||||||
|
|
||||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||||
priv->channels.params = new_channels.params;
|
priv->channels.params = new_channels.params;
|
||||||
@ -372,6 +369,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
|
|||||||
if (arfs_enabled)
|
if (arfs_enabled)
|
||||||
mlx5e_arfs_disable(priv);
|
mlx5e_arfs_disable(priv);
|
||||||
|
|
||||||
|
if (!netif_is_rxfh_configured(priv->netdev))
|
||||||
|
mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
|
||||||
|
MLX5E_INDIR_RQT_SIZE, count);
|
||||||
|
|
||||||
/* Switch to new channels, set new parameters and close old ones */
|
/* Switch to new channels, set new parameters and close old ones */
|
||||||
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
|
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
|
||||||
|
|
||||||
|
@ -2938,6 +2938,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
|
|||||||
|
|
||||||
mlx5e_build_tx2sq_maps(priv);
|
mlx5e_build_tx2sq_maps(priv);
|
||||||
mlx5e_activate_channels(&priv->channels);
|
mlx5e_activate_channels(&priv->channels);
|
||||||
|
mlx5e_xdp_tx_enable(priv);
|
||||||
netif_tx_start_all_queues(priv->netdev);
|
netif_tx_start_all_queues(priv->netdev);
|
||||||
|
|
||||||
if (mlx5e_is_vport_rep(priv))
|
if (mlx5e_is_vport_rep(priv))
|
||||||
@ -2959,6 +2960,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
|
|||||||
*/
|
*/
|
||||||
netif_tx_stop_all_queues(priv->netdev);
|
netif_tx_stop_all_queues(priv->netdev);
|
||||||
netif_tx_disable(priv->netdev);
|
netif_tx_disable(priv->netdev);
|
||||||
|
mlx5e_xdp_tx_disable(priv);
|
||||||
mlx5e_deactivate_channels(&priv->channels);
|
mlx5e_deactivate_channels(&priv->channels);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,11 +211,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
|
|||||||
enum port_module_event_status_type module_status;
|
enum port_module_event_status_type module_status;
|
||||||
enum port_module_event_error_type error_type;
|
enum port_module_event_error_type error_type;
|
||||||
struct mlx5_eqe_port_module *module_event_eqe;
|
struct mlx5_eqe_port_module *module_event_eqe;
|
||||||
const char *status_str, *error_str;
|
const char *status_str;
|
||||||
u8 module_num;
|
u8 module_num;
|
||||||
|
|
||||||
module_event_eqe = &eqe->data.port_module;
|
module_event_eqe = &eqe->data.port_module;
|
||||||
module_num = module_event_eqe->module;
|
|
||||||
module_status = module_event_eqe->module_status &
|
module_status = module_event_eqe->module_status &
|
||||||
PORT_MODULE_EVENT_MODULE_STATUS_MASK;
|
PORT_MODULE_EVENT_MODULE_STATUS_MASK;
|
||||||
error_type = module_event_eqe->error_type &
|
error_type = module_event_eqe->error_type &
|
||||||
@ -223,25 +222,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
|
|||||||
|
|
||||||
if (module_status < MLX5_MODULE_STATUS_NUM)
|
if (module_status < MLX5_MODULE_STATUS_NUM)
|
||||||
events->pme_stats.status_counters[module_status]++;
|
events->pme_stats.status_counters[module_status]++;
|
||||||
status_str = mlx5_pme_status_to_string(module_status);
|
|
||||||
|
|
||||||
if (module_status == MLX5_MODULE_STATUS_ERROR) {
|
if (module_status == MLX5_MODULE_STATUS_ERROR)
|
||||||
if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
|
if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
|
||||||
events->pme_stats.error_counters[error_type]++;
|
events->pme_stats.error_counters[error_type]++;
|
||||||
error_str = mlx5_pme_error_to_string(error_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!printk_ratelimit())
|
if (!printk_ratelimit())
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
||||||
if (module_status == MLX5_MODULE_STATUS_ERROR)
|
module_num = module_event_eqe->module;
|
||||||
|
status_str = mlx5_pme_status_to_string(module_status);
|
||||||
|
if (module_status == MLX5_MODULE_STATUS_ERROR) {
|
||||||
|
const char *error_str = mlx5_pme_error_to_string(error_type);
|
||||||
|
|
||||||
mlx5_core_err(events->dev,
|
mlx5_core_err(events->dev,
|
||||||
"Port module event[error]: module %u, %s, %s\n",
|
"Port module event[error]: module %u, %s, %s\n",
|
||||||
module_num, status_str, error_str);
|
module_num, status_str, error_str);
|
||||||
else
|
} else {
|
||||||
mlx5_core_info(events->dev,
|
mlx5_core_info(events->dev,
|
||||||
"Port module event: module %u, %s\n",
|
"Port module event: module %u, %s\n",
|
||||||
module_num, status_str);
|
module_num, status_str);
|
||||||
|
}
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
|||||||
mlx5_core_err(dev, "start\n");
|
mlx5_core_err(dev, "start\n");
|
||||||
if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
|
if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
|
||||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||||
mlx5_cmd_trigger_completions(dev);
|
mlx5_cmd_flush(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
|
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
|
||||||
|
@ -126,6 +126,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
|
|||||||
struct ptp_system_timestamp *sts);
|
struct ptp_system_timestamp *sts);
|
||||||
|
|
||||||
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
|
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
|
||||||
|
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
|
||||||
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
|
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
|
||||||
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||||
|
|
||||||
|
@ -1286,11 +1286,13 @@ static u16 rtl_get_events(struct rtl8169_private *tp)
|
|||||||
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
|
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
|
||||||
{
|
{
|
||||||
RTL_W16(tp, IntrStatus, bits);
|
RTL_W16(tp, IntrStatus, bits);
|
||||||
|
mmiowb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rtl_irq_disable(struct rtl8169_private *tp)
|
static void rtl_irq_disable(struct rtl8169_private *tp)
|
||||||
{
|
{
|
||||||
RTL_W16(tp, IntrMask, 0);
|
RTL_W16(tp, IntrMask, 0);
|
||||||
|
mmiowb();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
|
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
|
||||||
@ -6072,7 +6074,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
struct device *d = tp_to_dev(tp);
|
struct device *d = tp_to_dev(tp);
|
||||||
dma_addr_t mapping;
|
dma_addr_t mapping;
|
||||||
u32 opts[2], len;
|
u32 opts[2], len;
|
||||||
bool stop_queue;
|
|
||||||
int frags;
|
int frags;
|
||||||
|
|
||||||
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
|
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
|
||||||
@ -6114,6 +6115,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
|
|
||||||
txd->opts2 = cpu_to_le32(opts[1]);
|
txd->opts2 = cpu_to_le32(opts[1]);
|
||||||
|
|
||||||
|
netdev_sent_queue(dev, skb->len);
|
||||||
|
|
||||||
skb_tx_timestamp(skb);
|
skb_tx_timestamp(skb);
|
||||||
|
|
||||||
/* Force memory writes to complete before releasing descriptor */
|
/* Force memory writes to complete before releasing descriptor */
|
||||||
@ -6126,14 +6129,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
|
|
||||||
tp->cur_tx += frags + 1;
|
tp->cur_tx += frags + 1;
|
||||||
|
|
||||||
stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
|
RTL_W8(tp, TxPoll, NPQ);
|
||||||
if (unlikely(stop_queue))
|
|
||||||
|
mmiowb();
|
||||||
|
|
||||||
|
if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
|
||||||
|
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
||||||
|
* not miss a ring update when it notices a stopped queue.
|
||||||
|
*/
|
||||||
|
smp_wmb();
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
|
|
||||||
if (__netdev_sent_queue(dev, skb->len, skb->xmit_more))
|
|
||||||
RTL_W8(tp, TxPoll, NPQ);
|
|
||||||
|
|
||||||
if (unlikely(stop_queue)) {
|
|
||||||
/* Sync with rtl_tx:
|
/* Sync with rtl_tx:
|
||||||
* - publish queue status and cur_tx ring index (write barrier)
|
* - publish queue status and cur_tx ring index (write barrier)
|
||||||
* - refresh dirty_tx ring index (read barrier).
|
* - refresh dirty_tx ring index (read barrier).
|
||||||
@ -6483,7 +6488,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
|
|||||||
|
|
||||||
if (work_done < budget) {
|
if (work_done < budget) {
|
||||||
napi_complete_done(napi, work_done);
|
napi_complete_done(napi, work_done);
|
||||||
|
|
||||||
rtl_irq_enable(tp);
|
rtl_irq_enable(tp);
|
||||||
|
mmiowb();
|
||||||
}
|
}
|
||||||
|
|
||||||
return work_done;
|
return work_done;
|
||||||
|
@ -6115,7 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
|
|||||||
static int efx_ef10_mtd_probe(struct efx_nic *efx)
|
static int efx_ef10_mtd_probe(struct efx_nic *efx)
|
||||||
{
|
{
|
||||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
|
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
|
||||||
DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT);
|
DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
|
||||||
struct efx_mcdi_mtd_partition *parts;
|
struct efx_mcdi_mtd_partition *parts;
|
||||||
size_t outlen, n_parts_total, i, n_parts;
|
size_t outlen, n_parts_total, i, n_parts;
|
||||||
unsigned int type;
|
unsigned int type;
|
||||||
|
@ -553,7 +553,7 @@ int phy_start_aneg(struct phy_device *phydev)
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (__phy_is_started(phydev)) {
|
if (phy_is_started(phydev)) {
|
||||||
if (phydev->autoneg == AUTONEG_ENABLE) {
|
if (phydev->autoneg == AUTONEG_ENABLE) {
|
||||||
err = phy_check_link_status(phydev);
|
err = phy_check_link_status(phydev);
|
||||||
} else {
|
} else {
|
||||||
@ -709,7 +709,7 @@ void phy_stop_machine(struct phy_device *phydev)
|
|||||||
cancel_delayed_work_sync(&phydev->state_queue);
|
cancel_delayed_work_sync(&phydev->state_queue);
|
||||||
|
|
||||||
mutex_lock(&phydev->lock);
|
mutex_lock(&phydev->lock);
|
||||||
if (__phy_is_started(phydev))
|
if (phy_is_started(phydev))
|
||||||
phydev->state = PHY_UP;
|
phydev->state = PHY_UP;
|
||||||
mutex_unlock(&phydev->lock);
|
mutex_unlock(&phydev->lock);
|
||||||
}
|
}
|
||||||
@ -762,9 +762,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
|
|||||||
{
|
{
|
||||||
struct phy_device *phydev = phy_dat;
|
struct phy_device *phydev = phy_dat;
|
||||||
|
|
||||||
if (!phy_is_started(phydev))
|
|
||||||
return IRQ_NONE; /* It can't be ours. */
|
|
||||||
|
|
||||||
if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
|
if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
|
||||||
@ -842,15 +839,14 @@ EXPORT_SYMBOL(phy_stop_interrupts);
|
|||||||
*/
|
*/
|
||||||
void phy_stop(struct phy_device *phydev)
|
void phy_stop(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
mutex_lock(&phydev->lock);
|
if (!phy_is_started(phydev)) {
|
||||||
|
|
||||||
if (!__phy_is_started(phydev)) {
|
|
||||||
WARN(1, "called from state %s\n",
|
WARN(1, "called from state %s\n",
|
||||||
phy_state_to_str(phydev->state));
|
phy_state_to_str(phydev->state));
|
||||||
mutex_unlock(&phydev->lock);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&phydev->lock);
|
||||||
|
|
||||||
if (phy_interrupt_is_valid(phydev))
|
if (phy_interrupt_is_valid(phydev))
|
||||||
phy_disable_interrupts(phydev);
|
phy_disable_interrupts(phydev);
|
||||||
|
|
||||||
@ -989,8 +985,10 @@ void phy_state_machine(struct work_struct *work)
|
|||||||
* state machine would be pointless and possibly error prone when
|
* state machine would be pointless and possibly error prone when
|
||||||
* called from phy_disconnect() synchronously.
|
* called from phy_disconnect() synchronously.
|
||||||
*/
|
*/
|
||||||
|
mutex_lock(&phydev->lock);
|
||||||
if (phy_polling_mode(phydev) && phy_is_started(phydev))
|
if (phy_polling_mode(phydev) && phy_is_started(phydev))
|
||||||
phy_queue_state_machine(phydev, PHY_STATE_TIME);
|
phy_queue_state_machine(phydev, PHY_STATE_TIME);
|
||||||
|
mutex_unlock(&phydev->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -474,6 +474,17 @@ static void phylink_run_resolve(struct phylink *pl)
|
|||||||
queue_work(system_power_efficient_wq, &pl->resolve);
|
queue_work(system_power_efficient_wq, &pl->resolve);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
|
||||||
|
{
|
||||||
|
unsigned long state = pl->phylink_disable_state;
|
||||||
|
|
||||||
|
set_bit(bit, &pl->phylink_disable_state);
|
||||||
|
if (state == 0) {
|
||||||
|
queue_work(system_power_efficient_wq, &pl->resolve);
|
||||||
|
flush_work(&pl->resolve);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void phylink_fixed_poll(struct timer_list *t)
|
static void phylink_fixed_poll(struct timer_list *t)
|
||||||
{
|
{
|
||||||
struct phylink *pl = container_of(t, struct phylink, link_poll);
|
struct phylink *pl = container_of(t, struct phylink, link_poll);
|
||||||
@ -924,9 +935,7 @@ void phylink_stop(struct phylink *pl)
|
|||||||
if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
|
if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
|
||||||
del_timer_sync(&pl->link_poll);
|
del_timer_sync(&pl->link_poll);
|
||||||
|
|
||||||
set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
|
phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
|
||||||
queue_work(system_power_efficient_wq, &pl->resolve);
|
|
||||||
flush_work(&pl->resolve);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(phylink_stop);
|
EXPORT_SYMBOL_GPL(phylink_stop);
|
||||||
|
|
||||||
@ -1632,9 +1641,7 @@ static void phylink_sfp_link_down(void *upstream)
|
|||||||
|
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
|
phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
|
||||||
queue_work(system_power_efficient_wq, &pl->resolve);
|
|
||||||
flush_work(&pl->resolve);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void phylink_sfp_link_up(void *upstream)
|
static void phylink_sfp_link_up(void *upstream)
|
||||||
|
@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
bus->socket_ops->attach(bus->sfp);
|
||||||
if (bus->started)
|
if (bus->started)
|
||||||
bus->socket_ops->start(bus->sfp);
|
bus->socket_ops->start(bus->sfp);
|
||||||
bus->netdev->sfp_bus = bus;
|
bus->netdev->sfp_bus = bus;
|
||||||
@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
|
|||||||
if (bus->registered) {
|
if (bus->registered) {
|
||||||
if (bus->started)
|
if (bus->started)
|
||||||
bus->socket_ops->stop(bus->sfp);
|
bus->socket_ops->stop(bus->sfp);
|
||||||
|
bus->socket_ops->detach(bus->sfp);
|
||||||
if (bus->phydev && ops && ops->disconnect_phy)
|
if (bus->phydev && ops && ops->disconnect_phy)
|
||||||
ops->disconnect_phy(bus->upstream);
|
ops->disconnect_phy(bus->upstream);
|
||||||
}
|
}
|
||||||
|
@ -184,6 +184,7 @@ struct sfp {
|
|||||||
|
|
||||||
struct gpio_desc *gpio[GPIO_MAX];
|
struct gpio_desc *gpio[GPIO_MAX];
|
||||||
|
|
||||||
|
bool attached;
|
||||||
unsigned int state;
|
unsigned int state;
|
||||||
struct delayed_work poll;
|
struct delayed_work poll;
|
||||||
struct delayed_work timeout;
|
struct delayed_work timeout;
|
||||||
@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
|
|||||||
*/
|
*/
|
||||||
switch (sfp->sm_mod_state) {
|
switch (sfp->sm_mod_state) {
|
||||||
default:
|
default:
|
||||||
if (event == SFP_E_INSERT) {
|
if (event == SFP_E_INSERT && sfp->attached) {
|
||||||
sfp_module_tx_disable(sfp);
|
sfp_module_tx_disable(sfp);
|
||||||
sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
|
sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
|
||||||
}
|
}
|
||||||
@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
|
|||||||
mutex_unlock(&sfp->sm_mutex);
|
mutex_unlock(&sfp->sm_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sfp_attach(struct sfp *sfp)
|
||||||
|
{
|
||||||
|
sfp->attached = true;
|
||||||
|
if (sfp->state & SFP_F_PRESENT)
|
||||||
|
sfp_sm_event(sfp, SFP_E_INSERT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sfp_detach(struct sfp *sfp)
|
||||||
|
{
|
||||||
|
sfp->attached = false;
|
||||||
|
sfp_sm_event(sfp, SFP_E_REMOVE);
|
||||||
|
}
|
||||||
|
|
||||||
static void sfp_start(struct sfp *sfp)
|
static void sfp_start(struct sfp *sfp)
|
||||||
{
|
{
|
||||||
sfp_sm_event(sfp, SFP_E_DEV_UP);
|
sfp_sm_event(sfp, SFP_E_DEV_UP);
|
||||||
@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct sfp_socket_ops sfp_module_ops = {
|
static const struct sfp_socket_ops sfp_module_ops = {
|
||||||
|
.attach = sfp_attach,
|
||||||
|
.detach = sfp_detach,
|
||||||
.start = sfp_start,
|
.start = sfp_start,
|
||||||
.stop = sfp_stop,
|
.stop = sfp_stop,
|
||||||
.module_info = sfp_module_info,
|
.module_info = sfp_module_info,
|
||||||
@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
|
|||||||
dev_info(sfp->dev, "Host maximum power %u.%uW\n",
|
dev_info(sfp->dev, "Host maximum power %u.%uW\n",
|
||||||
sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
|
sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
|
||||||
|
|
||||||
sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
|
|
||||||
if (!sfp->sfp_bus)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* Get the initial state, and always signal TX disable,
|
/* Get the initial state, and always signal TX disable,
|
||||||
* since the network interface will not be up.
|
* since the network interface will not be up.
|
||||||
*/
|
*/
|
||||||
@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
|
|||||||
sfp->state |= SFP_F_RATE_SELECT;
|
sfp->state |= SFP_F_RATE_SELECT;
|
||||||
sfp_set_state(sfp, sfp->state);
|
sfp_set_state(sfp, sfp->state);
|
||||||
sfp_module_tx_disable(sfp);
|
sfp_module_tx_disable(sfp);
|
||||||
rtnl_lock();
|
|
||||||
if (sfp->state & SFP_F_PRESENT)
|
|
||||||
sfp_sm_event(sfp, SFP_E_INSERT);
|
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
for (i = 0; i < GPIO_MAX; i++) {
|
for (i = 0; i < GPIO_MAX; i++) {
|
||||||
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
|
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
|
||||||
@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
|
|||||||
dev_warn(sfp->dev,
|
dev_warn(sfp->dev,
|
||||||
"No tx_disable pin: SFP modules will always be emitting.\n");
|
"No tx_disable pin: SFP modules will always be emitting.\n");
|
||||||
|
|
||||||
|
sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
|
||||||
|
if (!sfp->sfp_bus)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
struct sfp;
|
struct sfp;
|
||||||
|
|
||||||
struct sfp_socket_ops {
|
struct sfp_socket_ops {
|
||||||
|
void (*attach)(struct sfp *sfp);
|
||||||
|
void (*detach)(struct sfp *sfp);
|
||||||
void (*start)(struct sfp *sfp);
|
void (*start)(struct sfp *sfp);
|
||||||
void (*stop)(struct sfp *sfp);
|
void (*stop)(struct sfp *sfp);
|
||||||
int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
|
int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
|
||||||
|
@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __team_option_inst_tmp_find(const struct list_head *opts,
|
|
||||||
const struct team_option_inst *needle)
|
|
||||||
{
|
|
||||||
struct team_option_inst *opt_inst;
|
|
||||||
|
|
||||||
list_for_each_entry(opt_inst, opts, tmp_list)
|
|
||||||
if (opt_inst == needle)
|
|
||||||
return true;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __team_options_register(struct team *team,
|
static int __team_options_register(struct team *team,
|
||||||
const struct team_option *option,
|
const struct team_option *option,
|
||||||
size_t option_count)
|
size_t option_count)
|
||||||
@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
int i;
|
int i;
|
||||||
struct nlattr *nl_option;
|
struct nlattr *nl_option;
|
||||||
LIST_HEAD(opt_inst_list);
|
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
|
|
||||||
@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|||||||
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
|
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
|
||||||
struct nlattr *attr;
|
struct nlattr *attr;
|
||||||
struct nlattr *attr_data;
|
struct nlattr *attr_data;
|
||||||
|
LIST_HEAD(opt_inst_list);
|
||||||
enum team_option_type opt_type;
|
enum team_option_type opt_type;
|
||||||
int opt_port_ifindex = 0; /* != 0 for per-port options */
|
int opt_port_ifindex = 0; /* != 0 for per-port options */
|
||||||
u32 opt_array_index = 0;
|
u32 opt_array_index = 0;
|
||||||
@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|||||||
if (err)
|
if (err)
|
||||||
goto team_put;
|
goto team_put;
|
||||||
opt_inst->changed = true;
|
opt_inst->changed = true;
|
||||||
|
|
||||||
/* dumb/evil user-space can send us duplicate opt,
|
|
||||||
* keep only the last one
|
|
||||||
*/
|
|
||||||
if (__team_option_inst_tmp_find(&opt_inst_list,
|
|
||||||
opt_inst))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
list_add(&opt_inst->tmp_list, &opt_inst_list);
|
list_add(&opt_inst->tmp_list, &opt_inst_list);
|
||||||
}
|
}
|
||||||
if (!opt_found) {
|
if (!opt_found) {
|
||||||
err = -ENOENT;
|
err = -ENOENT;
|
||||||
goto team_put;
|
goto team_put;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
err = team_nl_send_event_options_get(team, &opt_inst_list);
|
err = team_nl_send_event_options_get(team, &opt_inst_list);
|
||||||
|
if (err)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
team_put:
|
team_put:
|
||||||
team_nl_team_put(team);
|
team_nl_team_put(team);
|
||||||
|
@ -2219,7 +2219,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|||||||
struct pcpu_sw_netstats *tx_stats, *rx_stats;
|
struct pcpu_sw_netstats *tx_stats, *rx_stats;
|
||||||
union vxlan_addr loopback;
|
union vxlan_addr loopback;
|
||||||
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
|
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
|
||||||
struct net_device *dev = skb->dev;
|
struct net_device *dev;
|
||||||
int len = skb->len;
|
int len = skb->len;
|
||||||
|
|
||||||
tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
|
tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
|
||||||
@ -2239,9 +2239,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
dev = skb->dev;
|
||||||
|
if (unlikely(!(dev->flags & IFF_UP))) {
|
||||||
|
kfree_skb(skb);
|
||||||
|
goto drop;
|
||||||
|
}
|
||||||
|
|
||||||
if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
|
if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
|
||||||
vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
|
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
|
||||||
vni);
|
|
||||||
|
|
||||||
u64_stats_update_begin(&tx_stats->syncp);
|
u64_stats_update_begin(&tx_stats->syncp);
|
||||||
tx_stats->tx_packets++;
|
tx_stats->tx_packets++;
|
||||||
@ -2254,8 +2260,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|||||||
rx_stats->rx_bytes += len;
|
rx_stats->rx_bytes += len;
|
||||||
u64_stats_update_end(&rx_stats->syncp);
|
u64_stats_update_end(&rx_stats->syncp);
|
||||||
} else {
|
} else {
|
||||||
|
drop:
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
|
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
|
||||||
|
@ -674,26 +674,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
|
|||||||
size_t phy_speeds(unsigned int *speeds, size_t size,
|
size_t phy_speeds(unsigned int *speeds, size_t size,
|
||||||
unsigned long *mask);
|
unsigned long *mask);
|
||||||
|
|
||||||
static inline bool __phy_is_started(struct phy_device *phydev)
|
|
||||||
{
|
|
||||||
WARN_ON(!mutex_is_locked(&phydev->lock));
|
|
||||||
|
|
||||||
return phydev->state >= PHY_UP;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* phy_is_started - Convenience function to check whether PHY is started
|
* phy_is_started - Convenience function to check whether PHY is started
|
||||||
* @phydev: The phy_device struct
|
* @phydev: The phy_device struct
|
||||||
*/
|
*/
|
||||||
static inline bool phy_is_started(struct phy_device *phydev)
|
static inline bool phy_is_started(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
bool started;
|
return phydev->state >= PHY_UP;
|
||||||
|
|
||||||
mutex_lock(&phydev->lock);
|
|
||||||
started = __phy_is_started(phydev);
|
|
||||||
mutex_unlock(&phydev->lock);
|
|
||||||
|
|
||||||
return started;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
|
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
|
||||||
|
@ -39,6 +39,7 @@ struct inet_peer {
|
|||||||
|
|
||||||
u32 metrics[RTAX_MAX];
|
u32 metrics[RTAX_MAX];
|
||||||
u32 rate_tokens; /* rate limiting for ICMP */
|
u32 rate_tokens; /* rate limiting for ICMP */
|
||||||
|
u32 n_redirects;
|
||||||
unsigned long rate_last;
|
unsigned long rate_last;
|
||||||
/*
|
/*
|
||||||
* Once inet_peer is queued for deletion (refcnt == 0), following field
|
* Once inet_peer is queued for deletion (refcnt == 0), following field
|
||||||
|
@ -1277,7 +1277,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
|
|||||||
percpu_counter_inc(sk->sk_prot->sockets_allocated);
|
percpu_counter_inc(sk->sk_prot->sockets_allocated);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline u64
|
||||||
sk_sockets_allocated_read_positive(struct sock *sk)
|
sk_sockets_allocated_read_positive(struct sock *sk)
|
||||||
{
|
{
|
||||||
return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
|
return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
|
||||||
|
@ -137,15 +137,21 @@ enum {
|
|||||||
INET_DIAG_TCLASS,
|
INET_DIAG_TCLASS,
|
||||||
INET_DIAG_SKMEMINFO,
|
INET_DIAG_SKMEMINFO,
|
||||||
INET_DIAG_SHUTDOWN,
|
INET_DIAG_SHUTDOWN,
|
||||||
INET_DIAG_DCTCPINFO,
|
|
||||||
INET_DIAG_PROTOCOL, /* response attribute only */
|
/*
|
||||||
|
* Next extenstions cannot be requested in struct inet_diag_req_v2:
|
||||||
|
* its field idiag_ext has only 8 bits.
|
||||||
|
*/
|
||||||
|
|
||||||
|
INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
|
||||||
|
INET_DIAG_PROTOCOL, /* response attribute only */
|
||||||
INET_DIAG_SKV6ONLY,
|
INET_DIAG_SKV6ONLY,
|
||||||
INET_DIAG_LOCALS,
|
INET_DIAG_LOCALS,
|
||||||
INET_DIAG_PEERS,
|
INET_DIAG_PEERS,
|
||||||
INET_DIAG_PAD,
|
INET_DIAG_PAD,
|
||||||
INET_DIAG_MARK,
|
INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
|
||||||
INET_DIAG_BBRINFO,
|
INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
|
||||||
INET_DIAG_CLASS_ID,
|
INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
|
||||||
INET_DIAG_MD5SIG,
|
INET_DIAG_MD5SIG,
|
||||||
__INET_DIAG_MAX,
|
__INET_DIAG_MAX,
|
||||||
};
|
};
|
||||||
|
@ -4675,11 +4675,11 @@ refill:
|
|||||||
/* Even if we own the page, we do not use atomic_set().
|
/* Even if we own the page, we do not use atomic_set().
|
||||||
* This would break get_page_unless_zero() users.
|
* This would break get_page_unless_zero() users.
|
||||||
*/
|
*/
|
||||||
page_ref_add(page, size - 1);
|
page_ref_add(page, size);
|
||||||
|
|
||||||
/* reset page count bias and offset to start of new frag */
|
/* reset page count bias and offset to start of new frag */
|
||||||
nc->pfmemalloc = page_is_pfmemalloc(page);
|
nc->pfmemalloc = page_is_pfmemalloc(page);
|
||||||
nc->pagecnt_bias = size;
|
nc->pagecnt_bias = size + 1;
|
||||||
nc->offset = size;
|
nc->offset = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4695,10 +4695,10 @@ refill:
|
|||||||
size = nc->size;
|
size = nc->size;
|
||||||
#endif
|
#endif
|
||||||
/* OK, page count is 0, we can safely set it */
|
/* OK, page count is 0, we can safely set it */
|
||||||
set_page_count(page, size);
|
set_page_count(page, size + 1);
|
||||||
|
|
||||||
/* reset page count bias and offset to start of new frag */
|
/* reset page count bias and offset to start of new frag */
|
||||||
nc->pagecnt_bias = size;
|
nc->pagecnt_bias = size + 1;
|
||||||
offset = size - fragsz;
|
offset = size - fragsz;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,6 +227,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
|
|||||||
|
|
||||||
switch (ntohs(ethhdr->h_proto)) {
|
switch (ntohs(ethhdr->h_proto)) {
|
||||||
case ETH_P_8021Q:
|
case ETH_P_8021Q:
|
||||||
|
if (!pskb_may_pull(skb, sizeof(*vhdr)))
|
||||||
|
goto dropped;
|
||||||
vhdr = vlan_eth_hdr(skb);
|
vhdr = vlan_eth_hdr(skb);
|
||||||
|
|
||||||
/* drop batman-in-batman packets to prevent loops */
|
/* drop batman-in-batman packets to prevent loops */
|
||||||
|
@ -2380,7 +2380,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (sk_has_memory_pressure(sk)) {
|
if (sk_has_memory_pressure(sk)) {
|
||||||
int alloc;
|
u64 alloc;
|
||||||
|
|
||||||
if (!sk_under_memory_pressure(sk))
|
if (!sk_under_memory_pressure(sk))
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
|
|||||||
+ nla_total_size(1) /* INET_DIAG_TOS */
|
+ nla_total_size(1) /* INET_DIAG_TOS */
|
||||||
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
||||||
+ nla_total_size(4) /* INET_DIAG_MARK */
|
+ nla_total_size(4) /* INET_DIAG_MARK */
|
||||||
|
+ nla_total_size(4) /* INET_DIAG_CLASS_ID */
|
||||||
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
||||||
+ nla_total_size(sizeof(struct inet_diag_msg))
|
+ nla_total_size(sizeof(struct inet_diag_msg))
|
||||||
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
|
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
|
||||||
@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
|||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
|
if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
|
||||||
|
ext & (1 << (INET_DIAG_TCLASS - 1))) {
|
||||||
u32 classid = 0;
|
u32 classid = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_SOCK_CGROUP_DATA
|
#ifdef CONFIG_SOCK_CGROUP_DATA
|
||||||
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
|
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
|
||||||
#endif
|
#endif
|
||||||
|
/* Fallback to socket priority if class id isn't set.
|
||||||
|
* Classful qdiscs use it as direct reference to class.
|
||||||
|
* For cgroup2 classid is always zero.
|
||||||
|
*/
|
||||||
|
if (!classid)
|
||||||
|
classid = sk->sk_priority;
|
||||||
|
|
||||||
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
|
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
|
||||||
goto errout;
|
goto errout;
|
||||||
|
@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
|
|||||||
atomic_set(&p->rid, 0);
|
atomic_set(&p->rid, 0);
|
||||||
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
|
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
|
||||||
p->rate_tokens = 0;
|
p->rate_tokens = 0;
|
||||||
|
p->n_redirects = 0;
|
||||||
/* 60*HZ is arbitrary, but chosen enough high so that the first
|
/* 60*HZ is arbitrary, but chosen enough high so that the first
|
||||||
* calculation of tokens is at its maximum.
|
* calculation of tokens is at its maximum.
|
||||||
*/
|
*/
|
||||||
|
@ -215,6 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
|
|||||||
|
|
||||||
/* Change outer to look like the reply to an incoming packet */
|
/* Change outer to look like the reply to an incoming packet */
|
||||||
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
|
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
|
||||||
|
target.dst.protonum = IPPROTO_ICMP;
|
||||||
if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
|
if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
|
|||||||
int snmp_version(void *context, size_t hdrlen, unsigned char tag,
|
int snmp_version(void *context, size_t hdrlen, unsigned char tag,
|
||||||
const void *data, size_t datalen)
|
const void *data, size_t datalen)
|
||||||
{
|
{
|
||||||
|
if (datalen != 1)
|
||||||
|
return -EINVAL;
|
||||||
if (*(unsigned char *)data > 1)
|
if (*(unsigned char *)data > 1)
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
return 1;
|
return 1;
|
||||||
@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
|
|||||||
const void *data, size_t datalen)
|
const void *data, size_t datalen)
|
||||||
{
|
{
|
||||||
struct snmp_ctx *ctx = (struct snmp_ctx *)context;
|
struct snmp_ctx *ctx = (struct snmp_ctx *)context;
|
||||||
__be32 *pdata = (__be32 *)data;
|
__be32 *pdata;
|
||||||
|
|
||||||
|
if (datalen != 4)
|
||||||
|
return -EINVAL;
|
||||||
|
pdata = (__be32 *)data;
|
||||||
if (*pdata == ctx->from) {
|
if (*pdata == ctx->from) {
|
||||||
pr_debug("%s: %pI4 to %pI4\n", __func__,
|
pr_debug("%s: %pI4 to %pI4\n", __func__,
|
||||||
(void *)&ctx->from, (void *)&ctx->to);
|
(void *)&ctx->from, (void *)&ctx->to);
|
||||||
|
@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|||||||
/* No redirected packets during ip_rt_redirect_silence;
|
/* No redirected packets during ip_rt_redirect_silence;
|
||||||
* reset the algorithm.
|
* reset the algorithm.
|
||||||
*/
|
*/
|
||||||
if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
|
if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
|
||||||
peer->rate_tokens = 0;
|
peer->rate_tokens = 0;
|
||||||
|
peer->n_redirects = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Too many ignored redirects; do not send anything
|
/* Too many ignored redirects; do not send anything
|
||||||
* set dst.rate_last to the last seen redirected packet.
|
* set dst.rate_last to the last seen redirected packet.
|
||||||
*/
|
*/
|
||||||
if (peer->rate_tokens >= ip_rt_redirect_number) {
|
if (peer->n_redirects >= ip_rt_redirect_number) {
|
||||||
peer->rate_last = jiffies;
|
peer->rate_last = jiffies;
|
||||||
goto out_put_peer;
|
goto out_put_peer;
|
||||||
}
|
}
|
||||||
@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|||||||
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
|
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
|
||||||
peer->rate_last = jiffies;
|
peer->rate_last = jiffies;
|
||||||
++peer->rate_tokens;
|
++peer->rate_tokens;
|
||||||
|
++peer->n_redirects;
|
||||||
#ifdef CONFIG_IP_ROUTE_VERBOSE
|
#ifdef CONFIG_IP_ROUTE_VERBOSE
|
||||||
if (log_martians &&
|
if (log_martians &&
|
||||||
peer->rate_tokens == ip_rt_redirect_number)
|
peer->rate_tokens == ip_rt_redirect_number)
|
||||||
|
@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
|
|||||||
list_for_each_entry(ifa, &idev->addr_list, if_list) {
|
list_for_each_entry(ifa, &idev->addr_list, if_list) {
|
||||||
if (ifa == ifp)
|
if (ifa == ifp)
|
||||||
continue;
|
continue;
|
||||||
if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
|
if (ifa->prefix_len != ifp->prefix_len ||
|
||||||
|
!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
|
||||||
ifp->prefix_len))
|
ifp->prefix_len))
|
||||||
continue;
|
continue;
|
||||||
if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
|
if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
|
||||||
|
@ -226,6 +226,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
|
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
|
||||||
|
target.dst.protonum = IPPROTO_ICMPV6;
|
||||||
if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
|
if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
genlmsg_end(msg, hdr);
|
genlmsg_end(msg, hdr);
|
||||||
genlmsg_reply(msg, info);
|
return genlmsg_reply(msg, info);
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
nla_put_failure:
|
nla_put_failure:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
||||||
* Copyright 2007-2010, Intel Corporation
|
* Copyright 2007-2010, Intel Corporation
|
||||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||||
* Copyright (C) 2018 Intel Corporation
|
* Copyright (C) 2018 - 2019 Intel Corporation
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|||||||
|
|
||||||
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
|
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
|
||||||
|
|
||||||
|
ieee80211_agg_stop_txq(sta, tid);
|
||||||
|
|
||||||
spin_unlock_bh(&sta->lock);
|
spin_unlock_bh(&sta->lock);
|
||||||
|
|
||||||
ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
|
ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
|
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
|
||||||
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
||||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||||
* Copyright (C) 2018 Intel Corporation
|
* Copyright (C) 2018-2019 Intel Corporation
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
@ -2146,6 +2146,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
|
|||||||
case NL80211_IFTYPE_AP_VLAN:
|
case NL80211_IFTYPE_AP_VLAN:
|
||||||
case NL80211_IFTYPE_MONITOR:
|
case NL80211_IFTYPE_MONITOR:
|
||||||
break;
|
break;
|
||||||
|
case NL80211_IFTYPE_ADHOC:
|
||||||
|
if (sdata->vif.bss_conf.ibss_joined)
|
||||||
|
WARN_ON(drv_join_ibss(local, sdata));
|
||||||
|
/* fall through */
|
||||||
default:
|
default:
|
||||||
ieee80211_reconfig_stations(sdata);
|
ieee80211_reconfig_stations(sdata);
|
||||||
/* fall through */
|
/* fall through */
|
||||||
|
@ -29,6 +29,7 @@ config IP_VS_IPV6
|
|||||||
bool "IPv6 support for IPVS"
|
bool "IPv6 support for IPVS"
|
||||||
depends on IPV6 = y || IP_VS = IPV6
|
depends on IPV6 = y || IP_VS = IPV6
|
||||||
select IP6_NF_IPTABLES
|
select IP6_NF_IPTABLES
|
||||||
|
select NF_DEFRAG_IPV6
|
||||||
---help---
|
---help---
|
||||||
Add IPv6 support to IPVS.
|
Add IPv6 support to IPVS.
|
||||||
|
|
||||||
|
@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
|
|||||||
/* sorry, all this trouble for a no-hit :) */
|
/* sorry, all this trouble for a no-hit :) */
|
||||||
IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
|
IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
|
||||||
"ip_vs_in: packet continues traversal as normal");
|
"ip_vs_in: packet continues traversal as normal");
|
||||||
if (iph->fragoffs) {
|
|
||||||
/* Fragment that couldn't be mapped to a conn entry
|
/* Fragment couldn't be mapped to a conn entry */
|
||||||
* is missing module nf_defrag_ipv6
|
if (iph->fragoffs)
|
||||||
*/
|
|
||||||
IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
|
|
||||||
IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
|
IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
|
||||||
"unhandled fragment");
|
"unhandled fragment");
|
||||||
}
|
|
||||||
*verdict = NF_ACCEPT;
|
*verdict = NF_ACCEPT;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
#ifdef CONFIG_IP_VS_IPV6
|
#ifdef CONFIG_IP_VS_IPV6
|
||||||
#include <net/ipv6.h>
|
#include <net/ipv6.h>
|
||||||
#include <net/ip6_route.h>
|
#include <net/ip6_route.h>
|
||||||
|
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
|
||||||
#endif
|
#endif
|
||||||
#include <net/route.h>
|
#include <net/route.h>
|
||||||
#include <net/sock.h>
|
#include <net/sock.h>
|
||||||
@ -895,6 +896,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
|
|||||||
{
|
{
|
||||||
struct ip_vs_dest *dest;
|
struct ip_vs_dest *dest;
|
||||||
unsigned int atype, i;
|
unsigned int atype, i;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
EnterFunction(2);
|
EnterFunction(2);
|
||||||
|
|
||||||
@ -905,6 +907,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
|
|||||||
atype & IPV6_ADDR_LINKLOCAL) &&
|
atype & IPV6_ADDR_LINKLOCAL) &&
|
||||||
!__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
|
!__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = nf_defrag_ipv6_enable(svc->ipvs->net);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
@ -1228,6 +1234,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = nf_defrag_ipv6_enable(ipvs->net);
|
||||||
|
if (ret)
|
||||||
|
goto out_err;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -315,6 +315,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|||||||
{
|
{
|
||||||
struct xt_target *target = expr->ops->data;
|
struct xt_target *target = expr->ops->data;
|
||||||
void *info = nft_expr_priv(expr);
|
void *info = nft_expr_priv(expr);
|
||||||
|
struct module *me = target->me;
|
||||||
struct xt_tgdtor_param par;
|
struct xt_tgdtor_param par;
|
||||||
|
|
||||||
par.net = ctx->net;
|
par.net = ctx->net;
|
||||||
@ -325,7 +326,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|||||||
par.target->destroy(&par);
|
par.target->destroy(&par);
|
||||||
|
|
||||||
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
||||||
module_put(target->me);
|
module_put(me);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nft_extension_dump_info(struct sk_buff *skb, int attr,
|
static int nft_extension_dump_info(struct sk_buff *skb, int attr,
|
||||||
|
@ -1899,7 +1899,7 @@ static int __init xt_init(void)
|
|||||||
seqcount_init(&per_cpu(xt_recseq, i));
|
seqcount_init(&per_cpu(xt_recseq, i));
|
||||||
}
|
}
|
||||||
|
|
||||||
xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
|
xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
|
||||||
if (!xt)
|
if (!xt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
|||||||
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
|
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
|
||||||
if (unlikely(rb->frames_per_block == 0))
|
if (unlikely(rb->frames_per_block == 0))
|
||||||
goto out;
|
goto out;
|
||||||
if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
|
if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
|
||||||
goto out;
|
goto out;
|
||||||
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
|
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
|
||||||
req->tp_frame_nr))
|
req->tp_frame_nr))
|
||||||
|
@ -48,7 +48,7 @@ struct tcindex_data {
|
|||||||
u32 hash; /* hash table size; 0 if undefined */
|
u32 hash; /* hash table size; 0 if undefined */
|
||||||
u32 alloc_hash; /* allocated size */
|
u32 alloc_hash; /* allocated size */
|
||||||
u32 fall_through; /* 0: only classify if explicit match */
|
u32 fall_through; /* 0: only classify if explicit match */
|
||||||
struct rcu_head rcu;
|
struct rcu_work rwork;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
|
static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
|
||||||
@ -221,17 +221,11 @@ found:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tcindex_destroy_element(struct tcf_proto *tp,
|
static void tcindex_destroy_work(struct work_struct *work)
|
||||||
void *arg, struct tcf_walker *walker)
|
|
||||||
{
|
{
|
||||||
bool last;
|
struct tcindex_data *p = container_of(to_rcu_work(work),
|
||||||
|
struct tcindex_data,
|
||||||
return tcindex_delete(tp, arg, &last, NULL);
|
rwork);
|
||||||
}
|
|
||||||
|
|
||||||
static void __tcindex_destroy(struct rcu_head *head)
|
|
||||||
{
|
|
||||||
struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
|
|
||||||
|
|
||||||
kfree(p->perfect);
|
kfree(p->perfect);
|
||||||
kfree(p->h);
|
kfree(p->h);
|
||||||
@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
|
|||||||
return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __tcindex_partial_destroy(struct rcu_head *head)
|
static void tcindex_partial_destroy_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
|
struct tcindex_data *p = container_of(to_rcu_work(work),
|
||||||
|
struct tcindex_data,
|
||||||
|
rwork);
|
||||||
|
|
||||||
kfree(p->perfect);
|
kfree(p->perfect);
|
||||||
kfree(p);
|
kfree(p);
|
||||||
@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
|
|||||||
kfree(cp->perfect);
|
kfree(cp->perfect);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
|
static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
|
||||||
{
|
{
|
||||||
int i, err = 0;
|
int i, err = 0;
|
||||||
|
|
||||||
@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
|
|||||||
TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto errout;
|
goto errout;
|
||||||
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
|
cp->perfect[i].exts.net = net;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|||||||
struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
|
struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
struct tcindex_filter_result new_filter_result, *old_r = r;
|
struct tcindex_filter_result new_filter_result, *old_r = r;
|
||||||
struct tcindex_filter_result cr;
|
|
||||||
struct tcindex_data *cp = NULL, *oldp;
|
struct tcindex_data *cp = NULL, *oldp;
|
||||||
struct tcindex_filter *f = NULL; /* make gcc behave */
|
struct tcindex_filter *f = NULL; /* make gcc behave */
|
||||||
|
struct tcf_result cr = {};
|
||||||
int err, balloc = 0;
|
int err, balloc = 0;
|
||||||
struct tcf_exts e;
|
struct tcf_exts e;
|
||||||
|
|
||||||
@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|||||||
if (p->perfect) {
|
if (p->perfect) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (tcindex_alloc_perfect_hash(cp) < 0)
|
if (tcindex_alloc_perfect_hash(net, cp) < 0)
|
||||||
goto errout;
|
goto errout;
|
||||||
for (i = 0; i < cp->hash; i++)
|
for (i = 0; i < cp->hash; i++)
|
||||||
cp->perfect[i].res = p->perfect[i].res;
|
cp->perfect[i].res = p->perfect[i].res;
|
||||||
@ -346,13 +345,10 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|||||||
cp->h = p->h;
|
cp->h = p->h;
|
||||||
|
|
||||||
err = tcindex_filter_result_init(&new_filter_result);
|
err = tcindex_filter_result_init(&new_filter_result);
|
||||||
if (err < 0)
|
|
||||||
goto errout1;
|
|
||||||
err = tcindex_filter_result_init(&cr);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto errout1;
|
goto errout1;
|
||||||
if (old_r)
|
if (old_r)
|
||||||
cr.res = r->res;
|
cr = r->res;
|
||||||
|
|
||||||
if (tb[TCA_TCINDEX_HASH])
|
if (tb[TCA_TCINDEX_HASH])
|
||||||
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
|
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
|
||||||
@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
if (!cp->perfect && !cp->h) {
|
if (!cp->perfect && !cp->h) {
|
||||||
if (valid_perfect_hash(cp)) {
|
if (valid_perfect_hash(cp)) {
|
||||||
if (tcindex_alloc_perfect_hash(cp) < 0)
|
if (tcindex_alloc_perfect_hash(net, cp) < 0)
|
||||||
goto errout_alloc;
|
goto errout_alloc;
|
||||||
balloc = 1;
|
balloc = 1;
|
||||||
} else {
|
} else {
|
||||||
@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (tb[TCA_TCINDEX_CLASSID]) {
|
if (tb[TCA_TCINDEX_CLASSID]) {
|
||||||
cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
|
cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
|
||||||
tcf_bind_filter(tp, &cr.res, base);
|
tcf_bind_filter(tp, &cr, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (old_r && old_r != r) {
|
if (old_r && old_r != r) {
|
||||||
@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|||||||
}
|
}
|
||||||
|
|
||||||
oldp = p;
|
oldp = p;
|
||||||
r->res = cr.res;
|
r->res = cr;
|
||||||
tcf_exts_change(&r->exts, &e);
|
tcf_exts_change(&r->exts, &e);
|
||||||
|
|
||||||
rcu_assign_pointer(tp->root, cp);
|
rcu_assign_pointer(tp->root, cp);
|
||||||
@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|||||||
; /* nothing */
|
; /* nothing */
|
||||||
|
|
||||||
rcu_assign_pointer(*fp, f);
|
rcu_assign_pointer(*fp, f);
|
||||||
|
} else {
|
||||||
|
tcf_exts_destroy(&new_filter_result.exts);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (oldp)
|
if (oldp)
|
||||||
call_rcu(&oldp->rcu, __tcindex_partial_destroy);
|
tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
errout_alloc:
|
errout_alloc:
|
||||||
@ -487,7 +485,6 @@ errout_alloc:
|
|||||||
else if (balloc == 2)
|
else if (balloc == 2)
|
||||||
kfree(cp->h);
|
kfree(cp->h);
|
||||||
errout1:
|
errout1:
|
||||||
tcf_exts_destroy(&cr.exts);
|
|
||||||
tcf_exts_destroy(&new_filter_result.exts);
|
tcf_exts_destroy(&new_filter_result.exts);
|
||||||
errout:
|
errout:
|
||||||
kfree(cp);
|
kfree(cp);
|
||||||
@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
|
|||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
struct tcindex_data *p = rtnl_dereference(tp->root);
|
struct tcindex_data *p = rtnl_dereference(tp->root);
|
||||||
struct tcf_walker walker;
|
int i;
|
||||||
|
|
||||||
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
|
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
|
||||||
walker.count = 0;
|
|
||||||
walker.skip = 0;
|
|
||||||
walker.fn = tcindex_destroy_element;
|
|
||||||
tcindex_walk(tp, &walker);
|
|
||||||
|
|
||||||
call_rcu(&p->rcu, __tcindex_destroy);
|
if (p->perfect) {
|
||||||
|
for (i = 0; i < p->hash; i++) {
|
||||||
|
struct tcindex_filter_result *r = p->perfect + i;
|
||||||
|
|
||||||
|
tcf_unbind_filter(tp, &r->res);
|
||||||
|
if (tcf_exts_get_net(&r->exts))
|
||||||
|
tcf_queue_work(&r->rwork,
|
||||||
|
tcindex_destroy_rexts_work);
|
||||||
|
else
|
||||||
|
__tcindex_destroy_rexts(r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; p->h && i < p->hash; i++) {
|
||||||
|
struct tcindex_filter *f, *next;
|
||||||
|
bool last;
|
||||||
|
|
||||||
|
for (f = rtnl_dereference(p->h[i]); f; f = next) {
|
||||||
|
next = rtnl_dereference(f->next);
|
||||||
|
tcindex_delete(tp, &f->result, &last, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tcf_queue_work(&p->rwork, tcindex_destroy_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -500,7 +500,7 @@ static void dev_watchdog_down(struct net_device *dev)
|
|||||||
* netif_carrier_on - set carrier
|
* netif_carrier_on - set carrier
|
||||||
* @dev: network device
|
* @dev: network device
|
||||||
*
|
*
|
||||||
* Device has detected that carrier.
|
* Device has detected acquisition of carrier.
|
||||||
*/
|
*/
|
||||||
void netif_carrier_on(struct net_device *dev)
|
void netif_carrier_on(struct net_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
|
|||||||
+ nla_total_size(1) /* INET_DIAG_TOS */
|
+ nla_total_size(1) /* INET_DIAG_TOS */
|
||||||
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
||||||
+ nla_total_size(4) /* INET_DIAG_MARK */
|
+ nla_total_size(4) /* INET_DIAG_MARK */
|
||||||
|
+ nla_total_size(4) /* INET_DIAG_CLASS_ID */
|
||||||
+ nla_total_size(addrlen * asoc->peer.transport_count)
|
+ nla_total_size(addrlen * asoc->peer.transport_count)
|
||||||
+ nla_total_size(addrlen * addrcnt)
|
+ nla_total_size(addrlen * addrcnt)
|
||||||
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
||||||
|
@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
|
|||||||
{
|
{
|
||||||
skb->ip_summed = CHECKSUM_NONE;
|
skb->ip_summed = CHECKSUM_NONE;
|
||||||
skb->csum_not_inet = 0;
|
skb->csum_not_inet = 0;
|
||||||
|
gso_reset_checksum(skb, ~0);
|
||||||
return sctp_compute_cksum(skb, skb_transport_offset(skb));
|
return sctp_compute_cksum(skb, skb_transport_offset(skb));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = outcnt; i < stream->outcnt; i++)
|
for (i = outcnt; i < stream->outcnt; i++) {
|
||||||
kfree(SCTP_SO(stream, i)->ext);
|
kfree(SCTP_SO(stream, i)->ext);
|
||||||
|
SCTP_SO(stream, i)->ext = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
|
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
|
||||||
|
@ -101,9 +101,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
|
|||||||
|
|
||||||
conn->tx_cdc_seq++;
|
conn->tx_cdc_seq++;
|
||||||
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
|
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
|
||||||
smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
|
smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
|
||||||
&conn->local_tx_ctrl, conn);
|
|
||||||
smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn);
|
|
||||||
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
|
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
|
||||||
if (!rc)
|
if (!rc)
|
||||||
smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
|
smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
|
||||||
|
@ -211,26 +211,27 @@ static inline int smc_curs_diff_large(unsigned int size,
|
|||||||
|
|
||||||
static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
|
static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
|
||||||
union smc_host_cursor *local,
|
union smc_host_cursor *local,
|
||||||
|
union smc_host_cursor *save,
|
||||||
struct smc_connection *conn)
|
struct smc_connection *conn)
|
||||||
{
|
{
|
||||||
union smc_host_cursor temp;
|
smc_curs_copy(save, local, conn);
|
||||||
|
peer->count = htonl(save->count);
|
||||||
smc_curs_copy(&temp, local, conn);
|
peer->wrap = htons(save->wrap);
|
||||||
peer->count = htonl(temp.count);
|
|
||||||
peer->wrap = htons(temp.wrap);
|
|
||||||
/* peer->reserved = htons(0); must be ensured by caller */
|
/* peer->reserved = htons(0); must be ensured by caller */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
|
static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
|
||||||
struct smc_host_cdc_msg *local,
|
struct smc_connection *conn,
|
||||||
struct smc_connection *conn)
|
union smc_host_cursor *save)
|
||||||
{
|
{
|
||||||
|
struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
|
||||||
|
|
||||||
peer->common.type = local->common.type;
|
peer->common.type = local->common.type;
|
||||||
peer->len = local->len;
|
peer->len = local->len;
|
||||||
peer->seqno = htons(local->seqno);
|
peer->seqno = htons(local->seqno);
|
||||||
peer->token = htonl(local->token);
|
peer->token = htonl(local->token);
|
||||||
smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn);
|
smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn);
|
||||||
smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn);
|
smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn);
|
||||||
peer->prod_flags = local->prod_flags;
|
peer->prod_flags = local->prod_flags;
|
||||||
peer->conn_state_flags = local->conn_state_flags;
|
peer->conn_state_flags = local->conn_state_flags;
|
||||||
}
|
}
|
||||||
|
@ -1145,7 +1145,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
|
|||||||
default:
|
default:
|
||||||
pr_warn("Dropping received illegal msg type\n");
|
pr_warn("Dropping received illegal msg type\n");
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return false;
|
return true;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1425,6 +1425,10 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
|
|||||||
l->rcv_unacked = 0;
|
l->rcv_unacked = 0;
|
||||||
} else {
|
} else {
|
||||||
/* RESET_MSG or ACTIVATE_MSG */
|
/* RESET_MSG or ACTIVATE_MSG */
|
||||||
|
if (mtyp == ACTIVATE_MSG) {
|
||||||
|
msg_set_dest_session_valid(hdr, 1);
|
||||||
|
msg_set_dest_session(hdr, l->peer_session);
|
||||||
|
}
|
||||||
msg_set_max_pkt(hdr, l->advertised_mtu);
|
msg_set_max_pkt(hdr, l->advertised_mtu);
|
||||||
strcpy(data, l->if_name);
|
strcpy(data, l->if_name);
|
||||||
msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
|
msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
|
||||||
@ -1642,6 +1646,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|||||||
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
|
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If this endpoint was re-created while peer was ESTABLISHING
|
||||||
|
* it doesn't know current session number. Force re-synch.
|
||||||
|
*/
|
||||||
|
if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
|
||||||
|
l->session != msg_dest_session(hdr)) {
|
||||||
|
if (less(l->session, msg_dest_session(hdr)))
|
||||||
|
l->session = msg_dest_session(hdr) + 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
|
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
|
||||||
if (mtyp == RESET_MSG || !link_is_up(l))
|
if (mtyp == RESET_MSG || !link_is_up(l))
|
||||||
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
|
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
|
||||||
|
@ -360,6 +360,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
|
|||||||
msg_set_bits(m, 1, 0, 0xffff, n);
|
msg_set_bits(m, 1, 0, 0xffff, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch
|
||||||
|
* link peer session number
|
||||||
|
*/
|
||||||
|
static inline bool msg_dest_session_valid(struct tipc_msg *m)
|
||||||
|
{
|
||||||
|
return msg_bits(m, 1, 16, 0x1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid)
|
||||||
|
{
|
||||||
|
msg_set_bits(m, 1, 16, 0x1, valid);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u16 msg_dest_session(struct tipc_msg *m)
|
||||||
|
{
|
||||||
|
return msg_bits(m, 1, 0, 0xffff);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void msg_set_dest_session(struct tipc_msg *m, u16 n)
|
||||||
|
{
|
||||||
|
msg_set_bits(m, 1, 0, 0xffff, n);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Word 2
|
* Word 2
|
||||||
|
@ -830,15 +830,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
|
|||||||
tipc_node_write_lock(n);
|
tipc_node_write_lock(n);
|
||||||
if (!tipc_link_is_establishing(l)) {
|
if (!tipc_link_is_establishing(l)) {
|
||||||
__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
|
__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
|
||||||
if (delete) {
|
|
||||||
kfree(l);
|
|
||||||
le->link = NULL;
|
|
||||||
n->link_cnt--;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
/* Defuse pending tipc_node_link_up() */
|
/* Defuse pending tipc_node_link_up() */
|
||||||
|
tipc_link_reset(l);
|
||||||
tipc_link_fsm_evt(l, LINK_RESET_EVT);
|
tipc_link_fsm_evt(l, LINK_RESET_EVT);
|
||||||
}
|
}
|
||||||
|
if (delete) {
|
||||||
|
kfree(l);
|
||||||
|
le->link = NULL;
|
||||||
|
n->link_cnt--;
|
||||||
|
}
|
||||||
trace_tipc_node_link_down(n, true, "node link down or deleted!");
|
trace_tipc_node_link_down(n, true, "node link down or deleted!");
|
||||||
tipc_node_write_unlock(n);
|
tipc_node_write_unlock(n);
|
||||||
if (delete)
|
if (delete)
|
||||||
|
@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
|
|||||||
|
|
||||||
static void vmci_transport_destruct(struct vsock_sock *vsk)
|
static void vmci_transport_destruct(struct vsock_sock *vsk)
|
||||||
{
|
{
|
||||||
|
/* transport can be NULL if we hit a failure at init() time */
|
||||||
|
if (!vmci_trans(vsk))
|
||||||
|
return;
|
||||||
|
|
||||||
/* Ensure that the detach callback doesn't use the sk/vsk
|
/* Ensure that the detach callback doesn't use the sk/vsk
|
||||||
* we are about to destruct.
|
* we are about to destruct.
|
||||||
*/
|
*/
|
||||||
|
@ -1068,6 +1068,8 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync)
|
|||||||
|
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
|
flush_work(&wdev->pmsr_free_wk);
|
||||||
|
|
||||||
nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
|
nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
|
||||||
|
|
||||||
list_del_rcu(&wdev->list);
|
list_del_rcu(&wdev->list);
|
||||||
|
@ -250,7 +250,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
|
|||||||
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
|
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
|
||||||
NLA_POLICY_MAX(NLA_U8, 15),
|
NLA_POLICY_MAX(NLA_U8, 15),
|
||||||
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
|
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
|
||||||
NLA_POLICY_MAX(NLA_U8, 15),
|
NLA_POLICY_MAX(NLA_U8, 31),
|
||||||
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
|
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
|
||||||
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
|
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
|
||||||
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
|
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
|
||||||
|
@ -256,8 +256,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
} else {
|
} else {
|
||||||
memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]),
|
memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN);
|
||||||
ETH_ALEN);
|
|
||||||
memset(req->mac_addr_mask, 0xff, ETH_ALEN);
|
memset(req->mac_addr_mask, 0xff, ETH_ALEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,6 +271,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
|
|||||||
|
|
||||||
req->n_peers = count;
|
req->n_peers = count;
|
||||||
req->cookie = cfg80211_assign_cookie(rdev);
|
req->cookie = cfg80211_assign_cookie(rdev);
|
||||||
|
req->nl_portid = info->snd_portid;
|
||||||
|
|
||||||
err = rdev_start_pmsr(rdev, wdev, req);
|
err = rdev_start_pmsr(rdev, wdev, req);
|
||||||
if (err)
|
if (err)
|
||||||
@ -530,14 +530,14 @@ free:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
|
EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
|
||||||
|
|
||||||
void cfg80211_pmsr_free_wk(struct work_struct *work)
|
static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev)
|
||||||
{
|
{
|
||||||
struct wireless_dev *wdev = container_of(work, struct wireless_dev,
|
|
||||||
pmsr_free_wk);
|
|
||||||
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
|
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
|
||||||
struct cfg80211_pmsr_request *req, *tmp;
|
struct cfg80211_pmsr_request *req, *tmp;
|
||||||
LIST_HEAD(free_list);
|
LIST_HEAD(free_list);
|
||||||
|
|
||||||
|
lockdep_assert_held(&wdev->mtx);
|
||||||
|
|
||||||
spin_lock_bh(&wdev->pmsr_lock);
|
spin_lock_bh(&wdev->pmsr_lock);
|
||||||
list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
|
list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
|
||||||
if (req->nl_portid)
|
if (req->nl_portid)
|
||||||
@ -547,14 +547,22 @@ void cfg80211_pmsr_free_wk(struct work_struct *work)
|
|||||||
spin_unlock_bh(&wdev->pmsr_lock);
|
spin_unlock_bh(&wdev->pmsr_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(req, tmp, &free_list, list) {
|
list_for_each_entry_safe(req, tmp, &free_list, list) {
|
||||||
wdev_lock(wdev);
|
|
||||||
rdev_abort_pmsr(rdev, wdev, req);
|
rdev_abort_pmsr(rdev, wdev, req);
|
||||||
wdev_unlock(wdev);
|
|
||||||
|
|
||||||
kfree(req);
|
kfree(req);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void cfg80211_pmsr_free_wk(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct wireless_dev *wdev = container_of(work, struct wireless_dev,
|
||||||
|
pmsr_free_wk);
|
||||||
|
|
||||||
|
wdev_lock(wdev);
|
||||||
|
cfg80211_pmsr_process_abort(wdev);
|
||||||
|
wdev_unlock(wdev);
|
||||||
|
}
|
||||||
|
|
||||||
void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
|
void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
|
||||||
{
|
{
|
||||||
struct cfg80211_pmsr_request *req;
|
struct cfg80211_pmsr_request *req;
|
||||||
@ -568,8 +576,8 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
|
|||||||
spin_unlock_bh(&wdev->pmsr_lock);
|
spin_unlock_bh(&wdev->pmsr_lock);
|
||||||
|
|
||||||
if (found)
|
if (found)
|
||||||
schedule_work(&wdev->pmsr_free_wk);
|
cfg80211_pmsr_process_abort(wdev);
|
||||||
flush_work(&wdev->pmsr_free_wk);
|
|
||||||
WARN_ON(!list_empty(&wdev->pmsr_list));
|
WARN_ON(!list_empty(&wdev->pmsr_list));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
|
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
|
||||||
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
||||||
* Copyright 2017 Intel Deutschland GmbH
|
* Copyright 2017 Intel Deutschland GmbH
|
||||||
* Copyright (C) 2018 Intel Corporation
|
* Copyright (C) 2018-2019 Intel Corporation
|
||||||
*/
|
*/
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
@ -19,6 +19,7 @@
|
|||||||
#include <linux/mpls.h>
|
#include <linux/mpls.h>
|
||||||
#include <linux/gcd.h>
|
#include <linux/gcd.h>
|
||||||
#include <linux/bitfield.h>
|
#include <linux/bitfield.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include "core.h"
|
#include "core.h"
|
||||||
#include "rdev-ops.h"
|
#include "rdev-ops.h"
|
||||||
|
|
||||||
@ -715,20 +716,25 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
|
|||||||
{
|
{
|
||||||
unsigned int dscp;
|
unsigned int dscp;
|
||||||
unsigned char vlan_priority;
|
unsigned char vlan_priority;
|
||||||
|
unsigned int ret;
|
||||||
|
|
||||||
/* skb->priority values from 256->263 are magic values to
|
/* skb->priority values from 256->263 are magic values to
|
||||||
* directly indicate a specific 802.1d priority. This is used
|
* directly indicate a specific 802.1d priority. This is used
|
||||||
* to allow 802.1d priority to be passed directly in from VLAN
|
* to allow 802.1d priority to be passed directly in from VLAN
|
||||||
* tags, etc.
|
* tags, etc.
|
||||||
*/
|
*/
|
||||||
if (skb->priority >= 256 && skb->priority <= 263)
|
if (skb->priority >= 256 && skb->priority <= 263) {
|
||||||
return skb->priority - 256;
|
ret = skb->priority - 256;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (skb_vlan_tag_present(skb)) {
|
if (skb_vlan_tag_present(skb)) {
|
||||||
vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
|
vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
|
||||||
>> VLAN_PRIO_SHIFT;
|
>> VLAN_PRIO_SHIFT;
|
||||||
if (vlan_priority > 0)
|
if (vlan_priority > 0) {
|
||||||
return vlan_priority;
|
ret = vlan_priority;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (skb->protocol) {
|
switch (skb->protocol) {
|
||||||
@ -747,8 +753,9 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
|
|||||||
if (!mpls)
|
if (!mpls)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return (ntohl(mpls->entry) & MPLS_LS_TC_MASK)
|
ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK)
|
||||||
>> MPLS_LS_TC_SHIFT;
|
>> MPLS_LS_TC_SHIFT;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
case htons(ETH_P_80221):
|
case htons(ETH_P_80221):
|
||||||
/* 802.21 is always network control traffic */
|
/* 802.21 is always network control traffic */
|
||||||
@ -761,18 +768,24 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
|
|||||||
unsigned int i, tmp_dscp = dscp >> 2;
|
unsigned int i, tmp_dscp = dscp >> 2;
|
||||||
|
|
||||||
for (i = 0; i < qos_map->num_des; i++) {
|
for (i = 0; i < qos_map->num_des; i++) {
|
||||||
if (tmp_dscp == qos_map->dscp_exception[i].dscp)
|
if (tmp_dscp == qos_map->dscp_exception[i].dscp) {
|
||||||
return qos_map->dscp_exception[i].up;
|
ret = qos_map->dscp_exception[i].up;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
if (tmp_dscp >= qos_map->up[i].low &&
|
if (tmp_dscp >= qos_map->up[i].low &&
|
||||||
tmp_dscp <= qos_map->up[i].high)
|
tmp_dscp <= qos_map->up[i].high) {
|
||||||
return i;
|
ret = i;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return dscp >> 5;
|
ret = dscp >> 5;
|
||||||
|
out:
|
||||||
|
return array_index_nospec(ret, IEEE80211_NUM_TIDS);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cfg80211_classify8021d);
|
EXPORT_SYMBOL(cfg80211_classify8021d);
|
||||||
|
|
||||||
|
@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
|
|||||||
unsigned int lci = 1;
|
unsigned int lci = 1;
|
||||||
struct sock *sk;
|
struct sock *sk;
|
||||||
|
|
||||||
read_lock_bh(&x25_list_lock);
|
while ((sk = x25_find_socket(lci, nb)) != NULL) {
|
||||||
|
|
||||||
while ((sk = __x25_find_socket(lci, nb)) != NULL) {
|
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
if (++lci == 4096) {
|
if (++lci == 4096) {
|
||||||
lci = 0;
|
lci = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
read_unlock_bh(&x25_list_lock);
|
|
||||||
return lci;
|
return lci;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,6 +9,3 @@ all: $(TEST_PROGS)
|
|||||||
top_srcdir = ../../../../..
|
top_srcdir = ../../../../..
|
||||||
KSFT_KHDR_INSTALL := 1
|
KSFT_KHDR_INSTALL := 1
|
||||||
include ../../lib.mk
|
include ../../lib.mk
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -fr $(TEST_GEN_FILES)
|
|
||||||
|
Loading…
Reference in New Issue
Block a user