mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 07:04:00 +08:00
Networking fixes for 5.16-rc7, including fixes from netfilter.
Current release - regressions: - revert "tipc: use consistent GFP flags" Previous releases - regressions: - igb: fix deadlock caused by taking RTNL in runtime resume path - accept UFOv6 packages in virtio_net_hdr_to_skb - netfilter: fix regression in looped (broad|multi)cast's MAC handling - bridge: fix ioctl old_deviceless bridge argument - ice: xsk: do not clear status_error0 for ntu + nb_buffs descriptor, avoid stalls when multiple sockets use an interface Previous releases - always broken: - inet: fully convert sk->sk_rx_dst to RCU rules - veth: ensure skb entering GRO are not cloned - sched: fix zone matching for invalid conntrack state - bonding: fix ad_actor_system option setting to default - nf_tables: fix use-after-free in nft_set_catchall_destroy() - lantiq_xrx200: increase buffer reservation to avoid mem corruption - ice: xsk: avoid leaking app buffers during clean up - tun: avoid double free in tun_free_netdev Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmHEwHEACgkQMUZtbf5S IrvFgw/9GGpIqnx4YBwq9MAsbk/JzE93icLM4obcmqkvHX7PCYotapwqAu2YfiW8 i/UzkwPY8HpDCOHeYQfwdf3QMGHC+o428SZBjKZDAi/BhLciD//9jp20AReaBznX Y+6ia5qH/1T6YzsrGXzv7T+pyHgY36IivAoAJZoDDeOsmy8iQckSNI09nn4EAt0K U9qMJo4Dw3jkaAGQdVz3mGO8/xjf9oWmuf+ghmsghMl1QC3i2Qq6+Cuf/H7jGOau p72/Fpwwi26lX798m7sqdSmQRVINo+UJbMpoBbfttCAIT/RTpn/lcuqHVV5aeqhs 0qanit/3weCGSGz8zVX9xxPNO6zyPC47u7zWXD5q3JdkMDOylJR/dUAnPxatQ7BM TkUT4sdRVm6AqkmJqKEEthpVX7FPhG9BaCDDUZbY4hCxd2Bdth8eRVHgJAFFsops vqY7KMzbUCKl2RmHbm5l+vYl3HTgpE1kQzhD/wTKE4djtKYFut8Jgwq6aDfTeFjh d/IhTMJF1iJ2ZEw5fx6RC7Z9rEkSr+JrpW6OusunapbK2U5PeS/b3jeYv4a6wbbS 03E3Ouin6cuyUwDwdwPhkE9id2Y0Q+r0MeKdK+wNQ4sAKcgVvc0/95HbZx2biNW/ iduaJoRvGlLC8Fb/aCOBLVO3CeTubr5yPkg0jPZ4QB30lQc44xY= =/g9I -----END PGP SIGNATURE----- Merge tag 'net-5.16-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from netfilter. Current release - regressions: - revert "tipc: use consistent GFP flags" Previous releases - regressions: - igb: fix deadlock caused by taking RTNL in runtime resume path - accept UFOv6 packages in virtio_net_hdr_to_skb - netfilter: fix regression in looped (broad|multi)cast's MAC handling - bridge: fix ioctl old_deviceless bridge argument - ice: xsk: do not clear status_error0 for ntu + nb_buffs descriptor, avoid stalls when multiple sockets use an interface Previous releases - always broken: - inet: fully convert sk->sk_rx_dst to RCU rules - veth: ensure skb entering GRO are not cloned - sched: fix zone matching for invalid conntrack state - bonding: fix ad_actor_system option setting to default - nf_tables: fix use-after-free in nft_set_catchall_destroy() - lantiq_xrx200: increase buffer reservation to avoid mem corruption - ice: xsk: avoid leaking app buffers during clean up - tun: avoid double free in tun_free_netdev" * tag 'net-5.16-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (45 commits) net: stmmac: dwmac-visconti: Fix value of ETHER_CLK_SEL_FREQ_SEL_2P5M r8152: sync ocp base r8152: fix the force speed doesn't work for RTL8156 net: bridge: fix ioctl old_deviceless bridge argument net: stmmac: ptp: fix potentially overflowing expression net: dsa: tag_ocelot: use traffic class to map priority on injected header veth: ensure skb entering GRO are not cloned. asix: fix wrong return value in asix_check_host_enable() asix: fix uninit-value in asix_mdio_read() sfc: falcon: Check null pointer of rx_queue->page_ring sfc: Check null pointer of rx_queue->page_ring net: ks8851: Check for error irq drivers: net: smc911x: Check for error irq fjes: Check for error irq bonding: fix ad_actor_system option setting to default igb: fix deadlock caused by taking RTNL in RPM resume path gve: Correct order of processing device options net: skip virtio_net_hdr_set_proto if protocol already set net: accept UFOv6 packages in virtio_net_hdr_to_skb docs: networking: replace skb_hwtstamp_tx with skb_tstamp_tx ...
This commit is contained in:
commit
76657eaef4
@ -196,11 +196,12 @@ ad_actor_sys_prio
|
||||
ad_actor_system
|
||||
|
||||
In an AD system, this specifies the mac-address for the actor in
|
||||
protocol packet exchanges (LACPDUs). The value cannot be NULL or
|
||||
multicast. It is preferred to have the local-admin bit set for this
|
||||
mac but driver does not enforce it. If the value is not given then
|
||||
system defaults to using the masters' mac address as actors' system
|
||||
address.
|
||||
protocol packet exchanges (LACPDUs). The value cannot be a multicast
|
||||
address. If the all-zeroes MAC is specified, bonding will internally
|
||||
use the MAC of the bond itself. It is preferred to have the
|
||||
local-admin bit set for this mac but driver does not enforce it. If
|
||||
the value is not given then system defaults to using the masters'
|
||||
mac address as actors' system address.
|
||||
|
||||
This parameter has effect only in 802.3ad mode and is available through
|
||||
SysFs interface.
|
||||
|
@ -183,6 +183,7 @@ PHY and allows physical transmission and reception of Ethernet frames.
|
||||
IRQ config, enable, reset
|
||||
|
||||
DPNI (Datapath Network Interface)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Contains TX/RX queues, network interface configuration, and RX buffer pool
|
||||
configuration mechanisms. The TX/RX queues are in memory and are identified
|
||||
by queue number.
|
||||
|
@ -582,8 +582,8 @@ Time stamps for outgoing packets are to be generated as follows:
|
||||
and hardware timestamping is not possible (SKBTX_IN_PROGRESS not set).
|
||||
- As soon as the driver has sent the packet and/or obtained a
|
||||
hardware time stamp for it, it passes the time stamp back by
|
||||
calling skb_hwtstamp_tx() with the original skb, the raw
|
||||
hardware time stamp. skb_hwtstamp_tx() clones the original skb and
|
||||
calling skb_tstamp_tx() with the original skb, the raw
|
||||
hardware time stamp. skb_tstamp_tx() clones the original skb and
|
||||
adds the timestamps, therefore the original skb has to be freed now.
|
||||
If obtaining the hardware time stamp somehow fails, then the driver
|
||||
should not fall back to software time stamping. The rationale is that
|
||||
|
@ -1526,7 +1526,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
|
||||
mac = (u8 *)&newval->value;
|
||||
}
|
||||
|
||||
if (!is_valid_ether_addr(mac))
|
||||
if (is_multicast_ether_addr(mac))
|
||||
goto err;
|
||||
|
||||
netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
|
||||
|
@ -738,10 +738,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
|
||||
* is not set to GqiRda, choose the queue format in a priority order:
|
||||
* DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
|
||||
*/
|
||||
if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
|
||||
dev_info(&priv->pdev->dev,
|
||||
"Driver is running with GQI RDA queue format.\n");
|
||||
} else if (dev_op_dqo_rda) {
|
||||
if (dev_op_dqo_rda) {
|
||||
priv->queue_format = GVE_DQO_RDA_FORMAT;
|
||||
dev_info(&priv->pdev->dev,
|
||||
"Driver is running with DQO RDA queue format.\n");
|
||||
@ -753,6 +750,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
|
||||
"Driver is running with GQI RDA queue format.\n");
|
||||
supported_features_mask =
|
||||
be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
|
||||
} else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
|
||||
dev_info(&priv->pdev->dev,
|
||||
"Driver is running with GQI RDA queue format.\n");
|
||||
} else {
|
||||
priv->queue_format = GVE_GQI_QPL_FORMAT;
|
||||
if (dev_op_gqi_qpl)
|
||||
|
@ -6,6 +6,18 @@
|
||||
#include "ice_lib.h"
|
||||
#include "ice_dcb_lib.h"
|
||||
|
||||
static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
|
||||
return !!rx_ring->xdp_buf;
|
||||
}
|
||||
|
||||
static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
|
||||
return !!rx_ring->rx_buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
|
||||
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
|
||||
@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||||
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
||||
ring->q_index, ring->q_vector->napi.napi_id);
|
||||
|
||||
kfree(ring->rx_buf);
|
||||
ring->xsk_pool = ice_xsk_pool(ring);
|
||||
if (ring->xsk_pool) {
|
||||
if (!ice_alloc_rx_buf_zc(ring))
|
||||
return -ENOMEM;
|
||||
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
||||
|
||||
ring->rx_buf_len =
|
||||
@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||||
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
|
||||
ring->q_index);
|
||||
} else {
|
||||
if (!ice_alloc_rx_buf(ring))
|
||||
return -ENOMEM;
|
||||
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
|
||||
/* coverity[check_return] */
|
||||
xdp_rxq_info_reg(&ring->xdp_rxq,
|
||||
|
@ -419,7 +419,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
}
|
||||
|
||||
rx_skip_free:
|
||||
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
|
||||
if (rx_ring->xsk_pool)
|
||||
memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
|
||||
else
|
||||
memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
|
||||
|
||||
/* Zero out the descriptor ring */
|
||||
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
|
||||
@ -446,8 +449,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
|
||||
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
|
||||
rx_ring->xdp_prog = NULL;
|
||||
devm_kfree(rx_ring->dev, rx_ring->rx_buf);
|
||||
rx_ring->rx_buf = NULL;
|
||||
if (rx_ring->xsk_pool) {
|
||||
kfree(rx_ring->xdp_buf);
|
||||
rx_ring->xdp_buf = NULL;
|
||||
} else {
|
||||
kfree(rx_ring->rx_buf);
|
||||
rx_ring->rx_buf = NULL;
|
||||
}
|
||||
|
||||
if (rx_ring->desc) {
|
||||
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
|
||||
@ -475,8 +483,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
/* warn if we are about to overwrite the pointer */
|
||||
WARN_ON(rx_ring->rx_buf);
|
||||
rx_ring->rx_buf =
|
||||
devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
|
||||
GFP_KERNEL);
|
||||
kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
|
||||
if (!rx_ring->rx_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -505,7 +512,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
return 0;
|
||||
|
||||
err:
|
||||
devm_kfree(dev, rx_ring->rx_buf);
|
||||
kfree(rx_ring->rx_buf);
|
||||
rx_ring->rx_buf = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -24,7 +24,6 @@
|
||||
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
|
||||
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
|
||||
|
||||
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
|
||||
#define ICE_MAX_TXQ_PER_TXQG 128
|
||||
|
||||
/* Attempt to maximize the headroom available for incoming frames. We use a 2K
|
||||
|
@ -12,6 +12,11 @@
|
||||
#include "ice_txrx_lib.h"
|
||||
#include "ice_lib.h"
|
||||
|
||||
static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
|
||||
{
|
||||
return &rx_ring->xdp_buf[idx];
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_qp_reset_stats - Resets all stats for rings of given index
|
||||
* @vsi: VSI that contains rings of interest
|
||||
@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
dma_addr_t dma;
|
||||
|
||||
rx_desc = ICE_RX_DESC(rx_ring, ntu);
|
||||
xdp = &rx_ring->xdp_buf[ntu];
|
||||
xdp = ice_xdp_buf(rx_ring, ntu);
|
||||
|
||||
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
|
||||
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
|
||||
@ -390,14 +395,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
}
|
||||
|
||||
ntu += nb_buffs;
|
||||
if (ntu == rx_ring->count) {
|
||||
rx_desc = ICE_RX_DESC(rx_ring, 0);
|
||||
xdp = rx_ring->xdp_buf;
|
||||
if (ntu == rx_ring->count)
|
||||
ntu = 0;
|
||||
}
|
||||
|
||||
/* clear the status bits for the next_to_use descriptor */
|
||||
rx_desc->wb.status_error0 = 0;
|
||||
ice_release_rx_desc(rx_ring, ntu);
|
||||
|
||||
return count == nb_buffs;
|
||||
@ -419,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
|
||||
/**
|
||||
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
|
||||
* @rx_ring: Rx ring
|
||||
* @xdp_arr: Pointer to the SW ring of xdp_buff pointers
|
||||
* @xdp: Pointer to XDP buffer
|
||||
*
|
||||
* This function allocates a new skb from a zero-copy Rx buffer.
|
||||
*
|
||||
* Returns the skb on success, NULL on failure.
|
||||
*/
|
||||
static struct sk_buff *
|
||||
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
|
||||
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
|
||||
{
|
||||
struct xdp_buff *xdp = *xdp_arr;
|
||||
unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
|
||||
unsigned int metasize = xdp->data - xdp->data_meta;
|
||||
unsigned int datasize = xdp->data_end - xdp->data;
|
||||
unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
|
||||
@ -445,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
|
||||
skb_metadata_set(skb, metasize);
|
||||
|
||||
xsk_buff_free(xdp);
|
||||
*xdp_arr = NULL;
|
||||
return skb;
|
||||
}
|
||||
|
||||
@ -507,7 +505,6 @@ out_failure:
|
||||
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
{
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
|
||||
struct ice_tx_ring *xdp_ring;
|
||||
unsigned int xdp_xmit = 0;
|
||||
struct bpf_prog *xdp_prog;
|
||||
@ -522,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
while (likely(total_rx_packets < (unsigned int)budget)) {
|
||||
union ice_32b_rx_flex_desc *rx_desc;
|
||||
unsigned int size, xdp_res = 0;
|
||||
struct xdp_buff **xdp;
|
||||
struct xdp_buff *xdp;
|
||||
struct sk_buff *skb;
|
||||
u16 stat_err_bits;
|
||||
u16 vlan_tag = 0;
|
||||
@ -540,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
|
||||
|
||||
size = le16_to_cpu(rx_desc->wb.pkt_len) &
|
||||
ICE_RX_FLX_DESC_PKT_LEN_M;
|
||||
if (!size)
|
||||
break;
|
||||
if (!size) {
|
||||
xdp->data = NULL;
|
||||
xdp->data_end = NULL;
|
||||
xdp->data_hard_start = NULL;
|
||||
xdp->data_meta = NULL;
|
||||
goto construct_skb;
|
||||
}
|
||||
|
||||
xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
|
||||
xsk_buff_set_size(*xdp, size);
|
||||
xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
|
||||
xsk_buff_set_size(xdp, size);
|
||||
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
|
||||
|
||||
xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
|
||||
xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
|
||||
if (xdp_res) {
|
||||
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
|
||||
xdp_xmit |= xdp_res;
|
||||
else
|
||||
xsk_buff_free(*xdp);
|
||||
xsk_buff_free(xdp);
|
||||
|
||||
*xdp = NULL;
|
||||
total_rx_bytes += size;
|
||||
total_rx_packets++;
|
||||
cleaned_count++;
|
||||
|
||||
ice_bump_ntc(rx_ring);
|
||||
continue;
|
||||
}
|
||||
|
||||
construct_skb:
|
||||
/* XDP_PASS path */
|
||||
skb = ice_construct_skb_zc(rx_ring, xdp);
|
||||
if (!skb) {
|
||||
@ -572,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
break;
|
||||
}
|
||||
|
||||
cleaned_count++;
|
||||
ice_bump_ntc(rx_ring);
|
||||
|
||||
if (eth_skb_pad(skb)) {
|
||||
@ -594,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
ice_receive_skb(rx_ring, skb, vlan_tag);
|
||||
}
|
||||
|
||||
if (cleaned_count >= ICE_RX_BUF_WRITE)
|
||||
failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
|
||||
failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
|
||||
|
||||
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
|
||||
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
|
||||
@ -811,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
|
||||
*/
|
||||
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
u16 i;
|
||||
u16 count_mask = rx_ring->count - 1;
|
||||
u16 ntc = rx_ring->next_to_clean;
|
||||
u16 ntu = rx_ring->next_to_use;
|
||||
|
||||
for (i = 0; i < rx_ring->count; i++) {
|
||||
struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
|
||||
for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
|
||||
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
|
||||
|
||||
if (!xdp)
|
||||
continue;
|
||||
|
||||
*xdp = NULL;
|
||||
xsk_buff_free(xdp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9254,7 +9254,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
|
||||
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
|
||||
}
|
||||
|
||||
static int __maybe_unused igb_resume(struct device *dev)
|
||||
static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
@ -9297,17 +9297,24 @@ static int __maybe_unused igb_resume(struct device *dev)
|
||||
|
||||
wr32(E1000_WUS, ~0);
|
||||
|
||||
rtnl_lock();
|
||||
if (!rpm)
|
||||
rtnl_lock();
|
||||
if (!err && netif_running(netdev))
|
||||
err = __igb_open(netdev, true);
|
||||
|
||||
if (!err)
|
||||
netif_device_attach(netdev);
|
||||
rtnl_unlock();
|
||||
if (!rpm)
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __maybe_unused igb_resume(struct device *dev)
|
||||
{
|
||||
return __igb_resume(dev, false);
|
||||
}
|
||||
|
||||
static int __maybe_unused igb_runtime_idle(struct device *dev)
|
||||
{
|
||||
struct net_device *netdev = dev_get_drvdata(dev);
|
||||
@ -9326,7 +9333,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
|
||||
|
||||
static int __maybe_unused igb_runtime_resume(struct device *dev)
|
||||
{
|
||||
return igb_resume(dev);
|
||||
return __igb_resume(dev, true);
|
||||
}
|
||||
|
||||
static void igb_shutdown(struct pci_dev *pdev)
|
||||
@ -9442,7 +9449,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
|
||||
* @pdev: Pointer to PCI device
|
||||
*
|
||||
* Restart the card from scratch, as if from a cold-boot. Implementation
|
||||
* resembles the first-half of the igb_resume routine.
|
||||
* resembles the first-half of the __igb_resume routine.
|
||||
**/
|
||||
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
@ -9482,7 +9489,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
|
||||
*
|
||||
* This callback is called when the error recovery driver tells us that
|
||||
* its OK to resume normal operation. Implementation resembles the
|
||||
* second-half of the igb_resume routine.
|
||||
* second-half of the __igb_resume routine.
|
||||
*/
|
||||
static void igb_io_resume(struct pci_dev *pdev)
|
||||
{
|
||||
|
@ -71,6 +71,8 @@ struct xrx200_priv {
|
||||
struct xrx200_chan chan_tx;
|
||||
struct xrx200_chan chan_rx;
|
||||
|
||||
u16 rx_buf_size;
|
||||
|
||||
struct net_device *net_dev;
|
||||
struct device *dev;
|
||||
|
||||
@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
|
||||
xrx200_pmac_w32(priv, val, offset);
|
||||
}
|
||||
|
||||
static int xrx200_max_frame_len(int mtu)
|
||||
{
|
||||
return VLAN_ETH_HLEN + mtu;
|
||||
}
|
||||
|
||||
static int xrx200_buffer_size(int mtu)
|
||||
{
|
||||
return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
|
||||
}
|
||||
|
||||
/* drop all the packets from the DMA ring */
|
||||
static void xrx200_flush_dma(struct xrx200_chan *ch)
|
||||
{
|
||||
@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
|
||||
break;
|
||||
|
||||
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
|
||||
(ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
|
||||
ETH_FCS_LEN);
|
||||
ch->priv->rx_buf_size;
|
||||
ch->dma.desc++;
|
||||
ch->dma.desc %= LTQ_DESC_NUM;
|
||||
}
|
||||
@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev)
|
||||
|
||||
static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
{
|
||||
int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
|
||||
struct sk_buff *skb = ch->skb[ch->dma.desc];
|
||||
struct xrx200_priv *priv = ch->priv;
|
||||
dma_addr_t mapping;
|
||||
int ret = 0;
|
||||
|
||||
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
|
||||
len);
|
||||
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
|
||||
priv->rx_buf_size);
|
||||
if (!ch->skb[ch->dma.desc]) {
|
||||
ret = -ENOMEM;
|
||||
goto skip;
|
||||
}
|
||||
|
||||
mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
|
||||
len, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
|
||||
mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
|
||||
priv->rx_buf_size, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(priv->dev, mapping))) {
|
||||
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
|
||||
ch->skb[ch->dma.desc] = skb;
|
||||
ret = -ENOMEM;
|
||||
@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
wmb();
|
||||
skip:
|
||||
ch->dma.desc_base[ch->dma.desc].ctl =
|
||||
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
|
||||
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
|
||||
int ret = 0;
|
||||
|
||||
net_dev->mtu = new_mtu;
|
||||
priv->rx_buf_size = xrx200_buffer_size(new_mtu);
|
||||
|
||||
if (new_mtu <= old_mtu)
|
||||
return ret;
|
||||
@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
|
||||
ret = xrx200_alloc_skb(ch_rx);
|
||||
if (ret) {
|
||||
net_dev->mtu = old_mtu;
|
||||
priv->rx_buf_size = xrx200_buffer_size(old_mtu);
|
||||
break;
|
||||
}
|
||||
dev_kfree_skb_any(skb);
|
||||
@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev)
|
||||
net_dev->netdev_ops = &xrx200_netdev_ops;
|
||||
SET_NETDEV_DEV(net_dev, dev);
|
||||
net_dev->min_mtu = ETH_ZLEN;
|
||||
net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
|
||||
net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
|
||||
priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
|
||||
|
||||
/* load the memory ranges */
|
||||
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
|
||||
|
@ -54,12 +54,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
|
||||
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
|
||||
u32 dev_id, u32 hw_id)
|
||||
{
|
||||
struct prestera_port *port = NULL;
|
||||
struct prestera_port *port = NULL, *tmp;
|
||||
|
||||
read_lock(&sw->port_list_lock);
|
||||
list_for_each_entry(port, &sw->port_list, list) {
|
||||
if (port->dev_id == dev_id && port->hw_id == hw_id)
|
||||
list_for_each_entry(tmp, &sw->port_list, list) {
|
||||
if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
|
||||
port = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
read_unlock(&sw->port_list_lock);
|
||||
|
||||
@ -68,12 +70,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
|
||||
|
||||
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
|
||||
{
|
||||
struct prestera_port *port = NULL;
|
||||
struct prestera_port *port = NULL, *tmp;
|
||||
|
||||
read_lock(&sw->port_list_lock);
|
||||
list_for_each_entry(port, &sw->port_list, list) {
|
||||
if (port->id == id)
|
||||
list_for_each_entry(tmp, &sw->port_list, list) {
|
||||
if (tmp->id == id) {
|
||||
port = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
read_unlock(&sw->port_list_lock);
|
||||
|
||||
@ -764,23 +768,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
|
||||
struct net_device *dev,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct netdev_notifier_changeupper_info *info = ptr;
|
||||
struct netdev_notifier_info *info = ptr;
|
||||
struct netdev_notifier_changeupper_info *cu_info;
|
||||
struct prestera_port *port = netdev_priv(dev);
|
||||
struct netlink_ext_ack *extack;
|
||||
struct net_device *upper;
|
||||
|
||||
extack = netdev_notifier_info_to_extack(&info->info);
|
||||
upper = info->upper_dev;
|
||||
extack = netdev_notifier_info_to_extack(info);
|
||||
cu_info = container_of(info,
|
||||
struct netdev_notifier_changeupper_info,
|
||||
info);
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_PRECHANGEUPPER:
|
||||
upper = cu_info->upper_dev;
|
||||
if (!netif_is_bridge_master(upper) &&
|
||||
!netif_is_lag_master(upper)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!info->linking)
|
||||
if (!cu_info->linking)
|
||||
break;
|
||||
|
||||
if (netdev_has_any_upper_dev(upper)) {
|
||||
@ -789,7 +797,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
|
||||
}
|
||||
|
||||
if (netif_is_lag_master(upper) &&
|
||||
!prestera_lag_master_check(upper, info->upper_info, extack))
|
||||
!prestera_lag_master_check(upper, cu_info->upper_info, extack))
|
||||
return -EOPNOTSUPP;
|
||||
if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
@ -805,14 +813,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
|
||||
break;
|
||||
|
||||
case NETDEV_CHANGEUPPER:
|
||||
upper = cu_info->upper_dev;
|
||||
if (netif_is_bridge_master(upper)) {
|
||||
if (info->linking)
|
||||
if (cu_info->linking)
|
||||
return prestera_bridge_port_join(upper, port,
|
||||
extack);
|
||||
else
|
||||
prestera_bridge_port_leave(upper, port);
|
||||
} else if (netif_is_lag_master(upper)) {
|
||||
if (info->linking)
|
||||
if (cu_info->linking)
|
||||
return prestera_lag_port_add(port, upper);
|
||||
else
|
||||
prestera_lag_port_del(port);
|
||||
|
@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev)
|
||||
return ret;
|
||||
|
||||
netdev->irq = platform_get_irq(pdev, 0);
|
||||
if (netdev->irq < 0)
|
||||
return netdev->irq;
|
||||
|
||||
return ks8851_probe_common(netdev, dev, msg_enable);
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
|
||||
struct qlcnic_info *, u16);
|
||||
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
|
||||
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
|
||||
void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
|
||||
int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
|
||||
bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
|
||||
void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
|
||||
struct qlcnic_vf_info *, u16);
|
||||
|
@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
|
||||
struct qlcnic_cmd_args *cmd)
|
||||
{
|
||||
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
|
||||
int i, num_vlans;
|
||||
int i, num_vlans, ret;
|
||||
u16 *vlans;
|
||||
|
||||
if (sriov->allowed_vlans)
|
||||
@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
|
||||
dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
|
||||
sriov->num_allowed_vlans);
|
||||
|
||||
qlcnic_sriov_alloc_vlans(adapter);
|
||||
ret = qlcnic_sriov_alloc_vlans(adapter);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!sriov->any_vlan)
|
||||
return 0;
|
||||
@ -2154,7 +2156,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
|
||||
return err;
|
||||
}
|
||||
|
||||
void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
|
||||
int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
|
||||
struct qlcnic_vf_info *vf;
|
||||
@ -2164,7 +2166,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
|
||||
vf = &sriov->vf_info[i];
|
||||
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
|
||||
sizeof(*vf->sriov_vlans), GFP_KERNEL);
|
||||
if (!vf->sriov_vlans)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
|
||||
|
@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
|
||||
if (err)
|
||||
goto del_flr_queue;
|
||||
|
||||
qlcnic_sriov_alloc_vlans(adapter);
|
||||
err = qlcnic_sriov_alloc_vlans(adapter);
|
||||
if (err)
|
||||
goto del_flr_queue;
|
||||
|
||||
return err;
|
||||
|
||||
|
@ -728,7 +728,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
|
||||
efx->rx_bufs_per_page);
|
||||
rx_queue->page_ring = kcalloc(page_ring_size,
|
||||
sizeof(*rx_queue->page_ring), GFP_KERNEL);
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
if (!rx_queue->page_ring)
|
||||
rx_queue->page_ptr_mask = 0;
|
||||
else
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
}
|
||||
|
||||
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
|
||||
|
@ -150,7 +150,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
|
||||
efx->rx_bufs_per_page);
|
||||
rx_queue->page_ring = kcalloc(page_ring_size,
|
||||
sizeof(*rx_queue->page_ring), GFP_KERNEL);
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
if (!rx_queue->page_ring)
|
||||
rx_queue->page_ptr_mask = 0;
|
||||
else
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
}
|
||||
|
||||
static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
|
||||
|
@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
|
||||
|
||||
ndev->dma = (unsigned char)-1;
|
||||
ndev->irq = platform_get_irq(pdev, 0);
|
||||
if (ndev->irq < 0) {
|
||||
ret = ndev->irq;
|
||||
goto release_both;
|
||||
}
|
||||
|
||||
lp = netdev_priv(ndev);
|
||||
lp->netdev = ndev;
|
||||
#ifdef SMC_DYNAMIC_BUS_CONFIG
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0)
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
|
||||
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
|
||||
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
|
||||
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
|
||||
|
@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
|
||||
time.tv_nsec = priv->plat->est->btr_reserve[0];
|
||||
time.tv_sec = priv->plat->est->btr_reserve[1];
|
||||
basetime = timespec64_to_ktime(time);
|
||||
cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
|
||||
cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
|
||||
priv->plat->est->ctr[0];
|
||||
time = stmmac_calc_tas_basetime(basetime,
|
||||
current_time_ns,
|
||||
|
@ -1262,6 +1262,11 @@ static int fjes_probe(struct platform_device *plat_dev)
|
||||
hw->hw_res.start = res->start;
|
||||
hw->hw_res.size = resource_size(res);
|
||||
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
|
||||
if (hw->hw_res.irq < 0) {
|
||||
err = hw->hw_res.irq;
|
||||
goto err_free_control_wq;
|
||||
}
|
||||
|
||||
err = fjes_hw_init(&adapter->hw);
|
||||
if (err)
|
||||
goto err_free_control_wq;
|
||||
|
@ -794,14 +794,14 @@ static void mkiss_close(struct tty_struct *tty)
|
||||
*/
|
||||
netif_stop_queue(ax->dev);
|
||||
|
||||
ax->tty = NULL;
|
||||
|
||||
unregister_netdev(ax->dev);
|
||||
|
||||
/* Free all AX25 frame buffers after unreg. */
|
||||
kfree(ax->rbuff);
|
||||
kfree(ax->xbuff);
|
||||
|
||||
ax->tty = NULL;
|
||||
|
||||
free_netdev(ax->dev);
|
||||
}
|
||||
|
||||
|
@ -209,6 +209,9 @@ struct tun_struct {
|
||||
struct tun_prog __rcu *steering_prog;
|
||||
struct tun_prog __rcu *filter_prog;
|
||||
struct ethtool_link_ksettings link_ksettings;
|
||||
/* init args */
|
||||
struct file *file;
|
||||
struct ifreq *ifr;
|
||||
};
|
||||
|
||||
struct veth {
|
||||
@ -216,6 +219,9 @@ struct veth {
|
||||
__be16 h_vlan_TCI;
|
||||
};
|
||||
|
||||
static void tun_flow_init(struct tun_struct *tun);
|
||||
static void tun_flow_uninit(struct tun_struct *tun);
|
||||
|
||||
static int tun_napi_receive(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
|
||||
@ -953,6 +959,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
|
||||
|
||||
static const struct ethtool_ops tun_ethtool_ops;
|
||||
|
||||
static int tun_net_init(struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
struct ifreq *ifr = tun->ifr;
|
||||
int err;
|
||||
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&tun->lock);
|
||||
|
||||
err = security_tun_dev_alloc_security(&tun->security);
|
||||
if (err < 0) {
|
||||
free_percpu(dev->tstats);
|
||||
return err;
|
||||
}
|
||||
|
||||
tun_flow_init(tun);
|
||||
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
|
||||
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
dev->features = dev->hw_features | NETIF_F_LLTX;
|
||||
dev->vlan_features = dev->features &
|
||||
~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
tun->flags = (tun->flags & ~TUN_FEATURES) |
|
||||
(ifr->ifr_flags & TUN_FEATURES);
|
||||
|
||||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS, false);
|
||||
if (err < 0) {
|
||||
tun_flow_uninit(tun);
|
||||
security_tun_dev_free_security(tun->security);
|
||||
free_percpu(dev->tstats);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Net device detach from fd. */
|
||||
static void tun_net_uninit(struct net_device *dev)
|
||||
{
|
||||
@ -1169,6 +1218,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
|
||||
}
|
||||
|
||||
static const struct net_device_ops tun_netdev_ops = {
|
||||
.ndo_init = tun_net_init,
|
||||
.ndo_uninit = tun_net_uninit,
|
||||
.ndo_open = tun_net_open,
|
||||
.ndo_stop = tun_net_close,
|
||||
@ -1252,6 +1302,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
|
||||
}
|
||||
|
||||
static const struct net_device_ops tap_netdev_ops = {
|
||||
.ndo_init = tun_net_init,
|
||||
.ndo_uninit = tun_net_uninit,
|
||||
.ndo_open = tun_net_open,
|
||||
.ndo_stop = tun_net_close,
|
||||
@ -1292,7 +1343,7 @@ static void tun_flow_uninit(struct tun_struct *tun)
|
||||
#define MAX_MTU 65535
|
||||
|
||||
/* Initialize net device. */
|
||||
static void tun_net_init(struct net_device *dev)
|
||||
static void tun_net_initialize(struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
|
||||
@ -2206,11 +2257,6 @@ static void tun_free_netdev(struct net_device *dev)
|
||||
BUG_ON(!(list_empty(&tun->disabled)));
|
||||
|
||||
free_percpu(dev->tstats);
|
||||
/* We clear tstats so that tun_set_iff() can tell if
|
||||
* tun_free_netdev() has been called from register_netdevice().
|
||||
*/
|
||||
dev->tstats = NULL;
|
||||
|
||||
tun_flow_uninit(tun);
|
||||
security_tun_dev_free_security(tun->security);
|
||||
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
|
||||
@ -2716,41 +2762,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
tun->rx_batched = 0;
|
||||
RCU_INIT_POINTER(tun->steering_prog, NULL);
|
||||
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_dev;
|
||||
}
|
||||
tun->ifr = ifr;
|
||||
tun->file = file;
|
||||
|
||||
spin_lock_init(&tun->lock);
|
||||
|
||||
err = security_tun_dev_alloc_security(&tun->security);
|
||||
if (err < 0)
|
||||
goto err_free_stat;
|
||||
|
||||
tun_net_init(dev);
|
||||
tun_flow_init(tun);
|
||||
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
|
||||
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
dev->features = dev->hw_features | NETIF_F_LLTX;
|
||||
dev->vlan_features = dev->features &
|
||||
~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
tun->flags = (tun->flags & ~TUN_FEATURES) |
|
||||
(ifr->ifr_flags & TUN_FEATURES);
|
||||
|
||||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS, false);
|
||||
if (err < 0)
|
||||
goto err_free_flow;
|
||||
tun_net_initialize(dev);
|
||||
|
||||
err = register_netdevice(tun->dev);
|
||||
if (err < 0)
|
||||
goto err_detach;
|
||||
if (err < 0) {
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
}
|
||||
/* free_netdev() won't check refcnt, to avoid race
|
||||
* with dev_put() we need publish tun after registration.
|
||||
*/
|
||||
@ -2767,24 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
|
||||
strcpy(ifr->ifr_name, tun->dev->name);
|
||||
return 0;
|
||||
|
||||
err_detach:
|
||||
tun_detach_all(dev);
|
||||
/* We are here because register_netdevice() has failed.
|
||||
* If register_netdevice() already called tun_free_netdev()
|
||||
* while dealing with the error, dev->stats has been cleared.
|
||||
*/
|
||||
if (!dev->tstats)
|
||||
goto err_free_dev;
|
||||
|
||||
err_free_flow:
|
||||
tun_flow_uninit(tun);
|
||||
security_tun_dev_free_security(tun->security);
|
||||
err_free_stat:
|
||||
free_percpu(dev->tstats);
|
||||
err_free_dev:
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
#include "asix.h"
|
||||
|
||||
#define AX_HOST_EN_RETRIES 30
|
||||
|
||||
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
|
||||
u16 size, void *data, int in_pm)
|
||||
{
|
||||
@ -68,7 +70,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
|
||||
int i, ret;
|
||||
u8 smsr;
|
||||
|
||||
for (i = 0; i < 30; ++i) {
|
||||
for (i = 0; i < AX_HOST_EN_RETRIES; ++i) {
|
||||
ret = asix_set_sw_mii(dev, in_pm);
|
||||
if (ret == -ENODEV || ret == -ETIMEDOUT)
|
||||
break;
|
||||
@ -77,13 +79,13 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
|
||||
0, 0, 1, &smsr, in_pm);
|
||||
if (ret == -ENODEV)
|
||||
break;
|
||||
else if (ret < 0)
|
||||
else if (ret < sizeof(smsr))
|
||||
continue;
|
||||
else if (smsr & AX_HOST_EN)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return i >= AX_HOST_EN_RETRIES ? -ETIMEDOUT : ret;
|
||||
}
|
||||
|
||||
static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
|
||||
|
@ -32,7 +32,7 @@
|
||||
#define NETNEXT_VERSION "12"
|
||||
|
||||
/* Information for net */
|
||||
#define NET_VERSION "11"
|
||||
#define NET_VERSION "12"
|
||||
|
||||
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
|
||||
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
|
||||
@ -4016,6 +4016,11 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
|
||||
ocp_write_word(tp, type, PLA_BP_BA, 0);
|
||||
}
|
||||
|
||||
static inline void rtl_reset_ocp_base(struct r8152 *tp)
|
||||
{
|
||||
tp->ocp_base = -1;
|
||||
}
|
||||
|
||||
static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
|
||||
{
|
||||
u16 data, check;
|
||||
@ -4087,8 +4092,6 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait)
|
||||
|
||||
rtl_phy_patch_request(tp, false, wait);
|
||||
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4800,6 +4803,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
|
||||
u32 len;
|
||||
u8 *data;
|
||||
|
||||
rtl_reset_ocp_base(tp);
|
||||
|
||||
if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) {
|
||||
dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
|
||||
return;
|
||||
@ -4845,7 +4850,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
|
||||
}
|
||||
}
|
||||
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
|
||||
rtl_reset_ocp_base(tp);
|
||||
|
||||
rtl_phy_patch_request(tp, false, wait);
|
||||
|
||||
if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version))
|
||||
@ -4861,6 +4867,8 @@ static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver)
|
||||
ver_addr = __le16_to_cpu(phy_ver->ver.addr);
|
||||
ver = __le16_to_cpu(phy_ver->ver.data);
|
||||
|
||||
rtl_reset_ocp_base(tp);
|
||||
|
||||
if (sram_read(tp, ver_addr) >= ver) {
|
||||
dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
|
||||
return 0;
|
||||
@ -4877,6 +4885,8 @@ static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix)
|
||||
{
|
||||
u16 addr, data;
|
||||
|
||||
rtl_reset_ocp_base(tp);
|
||||
|
||||
addr = __le16_to_cpu(fix->setting.addr);
|
||||
data = ocp_reg_read(tp, addr);
|
||||
|
||||
@ -4908,6 +4918,8 @@ static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *ph
|
||||
u32 length;
|
||||
int i, num;
|
||||
|
||||
rtl_reset_ocp_base(tp);
|
||||
|
||||
num = phy->pre_num;
|
||||
for (i = 0; i < num; i++)
|
||||
sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr),
|
||||
@ -4938,6 +4950,8 @@ static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
|
||||
u32 length, i, num;
|
||||
__le16 *data;
|
||||
|
||||
rtl_reset_ocp_base(tp);
|
||||
|
||||
mode_reg = __le16_to_cpu(phy->mode_reg);
|
||||
sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre));
|
||||
sram_write(tp, __le16_to_cpu(phy->ba_reg),
|
||||
@ -5107,6 +5121,7 @@ post_fw:
|
||||
if (rtl_fw->post_fw)
|
||||
rtl_fw->post_fw(tp);
|
||||
|
||||
rtl_reset_ocp_base(tp);
|
||||
strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
|
||||
dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
|
||||
}
|
||||
@ -6584,6 +6599,21 @@ static bool rtl8153_in_nway(struct r8152 *tp)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void r8156_mdio_force_mode(struct r8152 *tp)
|
||||
{
|
||||
u16 data;
|
||||
|
||||
/* Select force mode through 0xa5b4 bit 15
|
||||
* 0: MDIO force mode
|
||||
* 1: MMD force mode
|
||||
*/
|
||||
data = ocp_reg_read(tp, 0xa5b4);
|
||||
if (data & BIT(15)) {
|
||||
data &= ~BIT(15);
|
||||
ocp_reg_write(tp, 0xa5b4, data);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_carrier(struct r8152 *tp)
|
||||
{
|
||||
struct net_device *netdev = tp->netdev;
|
||||
@ -8016,6 +8046,7 @@ static void r8156_init(struct r8152 *tp)
|
||||
ocp_data |= ACT_ODMA;
|
||||
ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data);
|
||||
|
||||
r8156_mdio_force_mode(tp);
|
||||
rtl_tally_reset(tp);
|
||||
|
||||
tp->coalesce = 15000; /* 15 us */
|
||||
@ -8145,6 +8176,7 @@ static void r8156b_init(struct r8152 *tp)
|
||||
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
|
||||
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
|
||||
|
||||
r8156_mdio_force_mode(tp);
|
||||
rtl_tally_reset(tp);
|
||||
|
||||
tp->coalesce = 15000; /* 15 us */
|
||||
@ -8467,6 +8499,8 @@ static int rtl8152_resume(struct usb_interface *intf)
|
||||
|
||||
mutex_lock(&tp->control);
|
||||
|
||||
rtl_reset_ocp_base(tp);
|
||||
|
||||
if (test_bit(SELECTIVE_SUSPEND, &tp->flags))
|
||||
ret = rtl8152_runtime_resume(tp);
|
||||
else
|
||||
@ -8482,6 +8516,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
|
||||
struct r8152 *tp = usb_get_intfdata(intf);
|
||||
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
rtl_reset_ocp_base(tp);
|
||||
tp->rtl_ops.init(tp);
|
||||
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
|
||||
set_ethernet_addr(tp, true);
|
||||
|
@ -879,8 +879,12 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
|
||||
|
||||
stats->xdp_bytes += skb->len;
|
||||
skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
|
||||
if (skb)
|
||||
napi_gro_receive(&rq->xdp_napi, skb);
|
||||
if (skb) {
|
||||
if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
|
||||
netif_receive_skb(skb);
|
||||
else
|
||||
napi_gro_receive(&rq->xdp_napi, skb);
|
||||
}
|
||||
}
|
||||
done++;
|
||||
}
|
||||
|
@ -1937,7 +1937,7 @@ enum netdev_ml_priv_type {
|
||||
* @udp_tunnel_nic: UDP tunnel offload state
|
||||
* @xdp_state: stores info on attached XDP BPF programs
|
||||
*
|
||||
* @nested_level: Used as as a parameter of spin_lock_nested() of
|
||||
* @nested_level: Used as a parameter of spin_lock_nested() of
|
||||
* dev->addr_list_lock.
|
||||
* @unlink_list: As netif_addr_lock() can be called recursively,
|
||||
* keep a list of interfaces to be deleted.
|
||||
|
@ -286,6 +286,7 @@ struct nf_bridge_info {
|
||||
struct tc_skb_ext {
|
||||
__u32 chain;
|
||||
__u16 mru;
|
||||
__u16 zone;
|
||||
bool post_ct;
|
||||
};
|
||||
#endif
|
||||
@ -1380,7 +1381,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
|
||||
struct flow_dissector *flow_dissector,
|
||||
void *target_container,
|
||||
u16 *ctinfo_map, size_t mapsize,
|
||||
bool post_ct);
|
||||
bool post_ct, u16 zone);
|
||||
void
|
||||
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
|
||||
struct flow_dissector *flow_dissector,
|
||||
|
@ -7,9 +7,27 @@
|
||||
#include <uapi/linux/udp.h>
|
||||
#include <uapi/linux/virtio_net.h>
|
||||
|
||||
static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
|
||||
{
|
||||
switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
||||
case VIRTIO_NET_HDR_GSO_TCPV4:
|
||||
return protocol == cpu_to_be16(ETH_P_IP);
|
||||
case VIRTIO_NET_HDR_GSO_TCPV6:
|
||||
return protocol == cpu_to_be16(ETH_P_IPV6);
|
||||
case VIRTIO_NET_HDR_GSO_UDP:
|
||||
return protocol == cpu_to_be16(ETH_P_IP) ||
|
||||
protocol == cpu_to_be16(ETH_P_IPV6);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
|
||||
const struct virtio_net_hdr *hdr)
|
||||
{
|
||||
if (skb->protocol)
|
||||
return 0;
|
||||
|
||||
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
||||
case VIRTIO_NET_HDR_GSO_TCPV4:
|
||||
case VIRTIO_NET_HDR_GSO_UDP:
|
||||
@ -88,9 +106,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
if (!skb->protocol) {
|
||||
__be16 protocol = dev_parse_header_protocol(skb);
|
||||
|
||||
virtio_net_hdr_set_proto(skb, hdr);
|
||||
if (protocol && protocol != skb->protocol)
|
||||
if (!protocol)
|
||||
virtio_net_hdr_set_proto(skb, hdr);
|
||||
else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
|
||||
return -EINVAL;
|
||||
else
|
||||
skb->protocol = protocol;
|
||||
}
|
||||
retry:
|
||||
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
|
||||
|
@ -193,4 +193,20 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
|
||||
skb->tstamp = ktime_set(0, 0);
|
||||
}
|
||||
|
||||
struct tc_skb_cb {
|
||||
struct qdisc_skb_cb qdisc_cb;
|
||||
|
||||
u16 mru;
|
||||
bool post_ct;
|
||||
u16 zone; /* Only valid if post_ct = true */
|
||||
};
|
||||
|
||||
static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
|
||||
{
|
||||
struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
|
||||
|
||||
BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
|
||||
return cb;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -447,8 +447,6 @@ struct qdisc_skb_cb {
|
||||
};
|
||||
#define QDISC_CB_PRIV_LEN 20
|
||||
unsigned char data[QDISC_CB_PRIV_LEN];
|
||||
u16 mru;
|
||||
bool post_ct;
|
||||
};
|
||||
|
||||
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
|
||||
|
@ -431,7 +431,7 @@ struct sock {
|
||||
#ifdef CONFIG_XFRM
|
||||
struct xfrm_policy __rcu *sk_policy[2];
|
||||
#endif
|
||||
struct dst_entry *sk_rx_dst;
|
||||
struct dst_entry __rcu *sk_rx_dst;
|
||||
int sk_rx_dst_ifindex;
|
||||
u32 sk_rx_dst_cookie;
|
||||
|
||||
|
@ -85,8 +85,10 @@ static void ax25_kill_by_device(struct net_device *dev)
|
||||
again:
|
||||
ax25_for_each(s, &ax25_list) {
|
||||
if (s->ax25_dev == ax25_dev) {
|
||||
s->ax25_dev = NULL;
|
||||
spin_unlock_bh(&ax25_list_lock);
|
||||
lock_sock(s->sk);
|
||||
s->ax25_dev = NULL;
|
||||
release_sock(s->sk);
|
||||
ax25_disconnect(s, ENETUNREACH);
|
||||
spin_lock_bh(&ax25_list_lock);
|
||||
|
||||
|
@ -337,7 +337,7 @@ static int old_deviceless(struct net *net, void __user *uarg)
|
||||
|
||||
args[2] = get_bridge_ifindices(net, indices, args[2]);
|
||||
|
||||
ret = copy_to_user(uarg, indices,
|
||||
ret = copy_to_user((void __user *)args[1], indices,
|
||||
array_size(args[2], sizeof(int)))
|
||||
? -EFAULT : args[2];
|
||||
|
||||
|
@ -3941,8 +3941,8 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
|
||||
return skb;
|
||||
|
||||
/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
|
||||
qdisc_skb_cb(skb)->mru = 0;
|
||||
qdisc_skb_cb(skb)->post_ct = false;
|
||||
tc_skb_cb(skb)->mru = 0;
|
||||
tc_skb_cb(skb)->post_ct = false;
|
||||
mini_qdisc_bstats_cpu_update(miniq, skb);
|
||||
|
||||
switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
|
||||
@ -5103,8 +5103,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
|
||||
}
|
||||
|
||||
qdisc_skb_cb(skb)->pkt_len = skb->len;
|
||||
qdisc_skb_cb(skb)->mru = 0;
|
||||
qdisc_skb_cb(skb)->post_ct = false;
|
||||
tc_skb_cb(skb)->mru = 0;
|
||||
tc_skb_cb(skb)->post_ct = false;
|
||||
skb->tc_at_ingress = 1;
|
||||
mini_qdisc_bstats_cpu_update(miniq, skb);
|
||||
|
||||
|
@ -238,7 +238,7 @@ void
|
||||
skb_flow_dissect_ct(const struct sk_buff *skb,
|
||||
struct flow_dissector *flow_dissector,
|
||||
void *target_container, u16 *ctinfo_map,
|
||||
size_t mapsize, bool post_ct)
|
||||
size_t mapsize, bool post_ct, u16 zone)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
struct flow_dissector_key_ct *key;
|
||||
@ -260,6 +260,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
|
||||
if (!ct) {
|
||||
key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
|
||||
TCA_FLOWER_KEY_CT_FLAGS_INVALID;
|
||||
key->ct_zone = zone;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -47,9 +47,13 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
|
||||
void *injection;
|
||||
__be32 *prefix;
|
||||
u32 rew_op = 0;
|
||||
u64 qos_class;
|
||||
|
||||
ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
|
||||
|
||||
qos_class = netdev_get_num_tc(netdev) ?
|
||||
netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
|
||||
|
||||
injection = skb_push(skb, OCELOT_TAG_LEN);
|
||||
prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
|
||||
|
||||
@ -57,7 +61,7 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
|
||||
memset(injection, 0, OCELOT_TAG_LEN);
|
||||
ocelot_ifh_set_bypass(injection, 1);
|
||||
ocelot_ifh_set_src(injection, ds->num_ports);
|
||||
ocelot_ifh_set_qos_class(injection, skb->priority);
|
||||
ocelot_ifh_set_qos_class(injection, qos_class);
|
||||
ocelot_ifh_set_vlan_tci(injection, vlan_tci);
|
||||
ocelot_ifh_set_tag_type(injection, tag_type);
|
||||
|
||||
|
@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
|
||||
|
||||
kfree(rcu_dereference_protected(inet->inet_opt, 1));
|
||||
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
|
||||
dst_release(sk->sk_rx_dst);
|
||||
dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
|
||||
sk_refcnt_debug_dec(sk);
|
||||
}
|
||||
EXPORT_SYMBOL(inet_sock_destruct);
|
||||
|
@ -3012,8 +3012,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
|
||||
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
|
||||
__sk_dst_reset(sk);
|
||||
dst_release(sk->sk_rx_dst);
|
||||
sk->sk_rx_dst = NULL;
|
||||
dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
|
||||
tcp_saved_syn_free(tp);
|
||||
tp->compressed_ack = 0;
|
||||
tp->segs_in = 0;
|
||||
|
@ -5787,7 +5787,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
|
||||
trace_tcp_probe(sk, skb);
|
||||
|
||||
tcp_mstamp_refresh(tp);
|
||||
if (unlikely(!sk->sk_rx_dst))
|
||||
if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
|
||||
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
|
||||
/*
|
||||
* Header prediction.
|
||||
|
@ -1701,7 +1701,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
struct sock *rsk;
|
||||
|
||||
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
|
||||
struct dst_entry *dst = sk->sk_rx_dst;
|
||||
struct dst_entry *dst;
|
||||
|
||||
dst = rcu_dereference_protected(sk->sk_rx_dst,
|
||||
lockdep_sock_is_held(sk));
|
||||
|
||||
sock_rps_save_rxhash(sk, skb);
|
||||
sk_mark_napi_id(sk, skb);
|
||||
@ -1709,8 +1712,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
|
||||
!INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
|
||||
dst, 0)) {
|
||||
RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
|
||||
dst_release(dst);
|
||||
sk->sk_rx_dst = NULL;
|
||||
}
|
||||
}
|
||||
tcp_rcv_established(sk, skb);
|
||||
@ -1786,7 +1789,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_edemux;
|
||||
if (sk_fullsock(sk)) {
|
||||
struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
|
||||
struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
|
||||
|
||||
if (dst)
|
||||
dst = dst_check(dst, 0);
|
||||
@ -2201,7 +2204,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
|
||||
if (dst && dst_hold_safe(dst)) {
|
||||
sk->sk_rx_dst = dst;
|
||||
rcu_assign_pointer(sk->sk_rx_dst, dst);
|
||||
sk->sk_rx_dst_ifindex = skb->skb_iif;
|
||||
}
|
||||
}
|
||||
|
@ -2250,7 +2250,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
|
||||
struct dst_entry *old;
|
||||
|
||||
if (dst_hold_safe(dst)) {
|
||||
old = xchg(&sk->sk_rx_dst, dst);
|
||||
old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
|
||||
dst_release(old);
|
||||
return old != dst;
|
||||
}
|
||||
@ -2440,7 +2440,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
int ret;
|
||||
|
||||
if (unlikely(sk->sk_rx_dst != dst))
|
||||
if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
|
||||
udp_sk_rx_dst_set(sk, dst);
|
||||
|
||||
ret = udp_unicast_rcv_skb(sk, skb, uh);
|
||||
@ -2599,7 +2599,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
|
||||
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_efree;
|
||||
dst = READ_ONCE(sk->sk_rx_dst);
|
||||
dst = rcu_dereference(sk->sk_rx_dst);
|
||||
|
||||
if (dst)
|
||||
dst = dst_check(dst, 0);
|
||||
|
@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
|
||||
if (dst && dst_hold_safe(dst)) {
|
||||
const struct rt6_info *rt = (const struct rt6_info *)dst;
|
||||
|
||||
sk->sk_rx_dst = dst;
|
||||
rcu_assign_pointer(sk->sk_rx_dst, dst);
|
||||
sk->sk_rx_dst_ifindex = skb->skb_iif;
|
||||
sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
|
||||
}
|
||||
@ -1505,7 +1505,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
|
||||
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
|
||||
struct dst_entry *dst = sk->sk_rx_dst;
|
||||
struct dst_entry *dst;
|
||||
|
||||
dst = rcu_dereference_protected(sk->sk_rx_dst,
|
||||
lockdep_sock_is_held(sk));
|
||||
|
||||
sock_rps_save_rxhash(sk, skb);
|
||||
sk_mark_napi_id(sk, skb);
|
||||
@ -1513,8 +1516,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
|
||||
INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
|
||||
dst, sk->sk_rx_dst_cookie) == NULL) {
|
||||
RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
|
||||
dst_release(dst);
|
||||
sk->sk_rx_dst = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1874,7 +1877,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_edemux;
|
||||
if (sk_fullsock(sk)) {
|
||||
struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
|
||||
struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
|
||||
|
||||
if (dst)
|
||||
dst = dst_check(dst, sk->sk_rx_dst_cookie);
|
||||
|
@ -956,7 +956,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
int ret;
|
||||
|
||||
if (unlikely(sk->sk_rx_dst != dst))
|
||||
if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
|
||||
udp6_sk_rx_dst_set(sk, dst);
|
||||
|
||||
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
|
||||
@ -1070,7 +1070,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
|
||||
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_efree;
|
||||
dst = READ_ONCE(sk->sk_rx_dst);
|
||||
dst = rcu_dereference(sk->sk_rx_dst);
|
||||
|
||||
if (dst)
|
||||
dst = dst_check(dst, sk->sk_rx_dst_cookie);
|
||||
|
@ -1264,7 +1264,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
||||
return 0;
|
||||
|
||||
error:
|
||||
mutex_lock(&local->mtx);
|
||||
ieee80211_vif_release_channel(sdata);
|
||||
mutex_unlock(&local->mtx);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1195,8 +1195,6 @@ restart:
|
||||
}
|
||||
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
|
||||
hnnode) {
|
||||
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
|
||||
continue;
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (nf_ct_is_expired(ct)) {
|
||||
if (i < ARRAY_SIZE(nf_ct_evict) &&
|
||||
@ -1208,6 +1206,9 @@ restart:
|
||||
if (!net_eq(net, nf_ct_net(ct)))
|
||||
continue;
|
||||
|
||||
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
|
||||
continue;
|
||||
|
||||
if (cb->args[1]) {
|
||||
if (ct != last)
|
||||
continue;
|
||||
|
@ -4481,9 +4481,9 @@ struct nft_set_elem_catchall {
|
||||
static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
|
||||
struct nft_set *set)
|
||||
{
|
||||
struct nft_set_elem_catchall *catchall;
|
||||
struct nft_set_elem_catchall *next, *catchall;
|
||||
|
||||
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
|
||||
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
|
||||
list_del_rcu(&catchall->list);
|
||||
nft_set_elem_destroy(set, catchall->elem, true);
|
||||
kfree_rcu(catchall);
|
||||
|
@ -556,7 +556,8 @@ __build_packet_message(struct nfnl_log_net *log,
|
||||
goto nla_put_failure;
|
||||
|
||||
if (indev && skb->dev &&
|
||||
skb->mac_header != skb->network_header) {
|
||||
skb_mac_header_was_set(skb) &&
|
||||
skb_mac_header_len(skb) != 0) {
|
||||
struct nfulnl_msg_packet_hw phw;
|
||||
int len;
|
||||
|
||||
|
@ -560,7 +560,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
|
||||
goto nla_put_failure;
|
||||
|
||||
if (indev && entskb->dev &&
|
||||
skb_mac_header_was_set(entskb)) {
|
||||
skb_mac_header_was_set(entskb) &&
|
||||
skb_mac_header_len(entskb) != 0) {
|
||||
struct nfqnl_msg_packet_hw phw;
|
||||
int len;
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <net/mpls.h>
|
||||
#include <net/ndisc.h>
|
||||
#include <net/nsh.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
|
||||
#include "conntrack.h"
|
||||
#include "datapath.h"
|
||||
@ -860,6 +861,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
|
||||
#endif
|
||||
bool post_ct = false;
|
||||
int res, err;
|
||||
u16 zone = 0;
|
||||
|
||||
/* Extract metadata from packet. */
|
||||
if (tun_info) {
|
||||
@ -898,6 +900,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
|
||||
key->recirc_id = tc_ext ? tc_ext->chain : 0;
|
||||
OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
|
||||
post_ct = tc_ext ? tc_ext->post_ct : false;
|
||||
zone = post_ct ? tc_ext->zone : 0;
|
||||
} else {
|
||||
key->recirc_id = 0;
|
||||
}
|
||||
@ -906,8 +909,11 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
|
||||
#endif
|
||||
|
||||
err = key_extract(skb, key);
|
||||
if (!err)
|
||||
if (!err) {
|
||||
ovs_ct_fill_key(skb, key, post_ct); /* Must be after key_extract(). */
|
||||
if (post_ct && !skb_get_nfct(skb))
|
||||
key->ct_zone = zone;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -947,6 +947,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
||||
ret = -EBUSY;
|
||||
else if (sk->sk_state == TCP_ESTABLISHED)
|
||||
ret = -EISCONN;
|
||||
else if (!pn->pn_sk.sobject)
|
||||
ret = -EADDRNOTAVAIL;
|
||||
else
|
||||
ret = pep_sock_enable(sk, NULL, 0);
|
||||
release_sock(sk);
|
||||
|
@ -690,10 +690,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
|
||||
u8 family, u16 zone, bool *defrag)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct qdisc_skb_cb cb;
|
||||
struct nf_conn *ct;
|
||||
int err = 0;
|
||||
bool frag;
|
||||
u16 mru;
|
||||
|
||||
/* Previously seen (loopback)? Ignore. */
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
@ -708,7 +708,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
|
||||
return err;
|
||||
|
||||
skb_get(skb);
|
||||
cb = *qdisc_skb_cb(skb);
|
||||
mru = tc_skb_cb(skb)->mru;
|
||||
|
||||
if (family == NFPROTO_IPV4) {
|
||||
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
|
||||
@ -722,7 +722,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
|
||||
|
||||
if (!err) {
|
||||
*defrag = true;
|
||||
cb.mru = IPCB(skb)->frag_max_size;
|
||||
mru = IPCB(skb)->frag_max_size;
|
||||
}
|
||||
} else { /* NFPROTO_IPV6 */
|
||||
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
|
||||
@ -735,7 +735,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
|
||||
|
||||
if (!err) {
|
||||
*defrag = true;
|
||||
cb.mru = IP6CB(skb)->frag_max_size;
|
||||
mru = IP6CB(skb)->frag_max_size;
|
||||
}
|
||||
#else
|
||||
err = -EOPNOTSUPP;
|
||||
@ -744,7 +744,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (err != -EINPROGRESS)
|
||||
*qdisc_skb_cb(skb) = cb;
|
||||
tc_skb_cb(skb)->mru = mru;
|
||||
skb_clear_hash(skb);
|
||||
skb->ignore_df = 1;
|
||||
return err;
|
||||
@ -963,7 +963,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
tcf_action_update_bstats(&c->common, skb);
|
||||
|
||||
if (clear) {
|
||||
qdisc_skb_cb(skb)->post_ct = false;
|
||||
tc_skb_cb(skb)->post_ct = false;
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (ct) {
|
||||
nf_conntrack_put(&ct->ct_general);
|
||||
@ -1048,7 +1048,8 @@ do_nat:
|
||||
out_push:
|
||||
skb_push_rcsum(skb, nh_ofs);
|
||||
|
||||
qdisc_skb_cb(skb)->post_ct = true;
|
||||
tc_skb_cb(skb)->post_ct = true;
|
||||
tc_skb_cb(skb)->zone = p->zone;
|
||||
out_clear:
|
||||
if (defrag)
|
||||
qdisc_skb_cb(skb)->pkt_len = skb->len;
|
||||
|
@ -1617,12 +1617,15 @@ int tcf_classify(struct sk_buff *skb,
|
||||
|
||||
/* If we missed on some chain */
|
||||
if (ret == TC_ACT_UNSPEC && last_executed_chain) {
|
||||
struct tc_skb_cb *cb = tc_skb_cb(skb);
|
||||
|
||||
ext = tc_skb_ext_alloc(skb);
|
||||
if (WARN_ON_ONCE(!ext))
|
||||
return TC_ACT_SHOT;
|
||||
ext->chain = last_executed_chain;
|
||||
ext->mru = qdisc_skb_cb(skb)->mru;
|
||||
ext->post_ct = qdisc_skb_cb(skb)->post_ct;
|
||||
ext->mru = cb->mru;
|
||||
ext->post_ct = cb->post_ct;
|
||||
ext->zone = cb->zone;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/flow_dissector.h>
|
||||
#include <net/geneve.h>
|
||||
@ -309,7 +310,8 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res)
|
||||
{
|
||||
struct cls_fl_head *head = rcu_dereference_bh(tp->root);
|
||||
bool post_ct = qdisc_skb_cb(skb)->post_ct;
|
||||
bool post_ct = tc_skb_cb(skb)->post_ct;
|
||||
u16 zone = tc_skb_cb(skb)->zone;
|
||||
struct fl_flow_key skb_key;
|
||||
struct fl_flow_mask *mask;
|
||||
struct cls_fl_filter *f;
|
||||
@ -327,7 +329,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
|
||||
fl_ct_info_to_flower_map,
|
||||
ARRAY_SIZE(fl_ct_info_to_flower_map),
|
||||
post_ct);
|
||||
post_ct, zone);
|
||||
skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
|
||||
skb_flow_dissect(skb, &mask->dissector, &skb_key,
|
||||
FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
#include <net/netlink.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ip6_fib.h>
|
||||
@ -137,7 +138,7 @@ err:
|
||||
|
||||
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
|
||||
{
|
||||
u16 mru = qdisc_skb_cb(skb)->mru;
|
||||
u16 mru = tc_skb_cb(skb)->mru;
|
||||
int err;
|
||||
|
||||
if (mru && skb->len > mru + skb->dev->hard_header_len)
|
||||
|
@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
|
||||
return -EEXIST;
|
||||
|
||||
/* Allocate a new AEAD */
|
||||
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
|
||||
tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
|
||||
if (unlikely(!tmp))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1474,7 +1474,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
|
||||
return -EEXIST;
|
||||
|
||||
/* Allocate crypto */
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
c = kzalloc(sizeof(*c), GFP_ATOMIC);
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1488,7 +1488,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
|
||||
}
|
||||
|
||||
/* Allocate statistic structure */
|
||||
c->stats = alloc_percpu(struct tipc_crypto_stats);
|
||||
c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
|
||||
if (!c->stats) {
|
||||
if (c->wq)
|
||||
destroy_workqueue(c->wq);
|
||||
@ -2461,7 +2461,7 @@ static void tipc_crypto_work_tx(struct work_struct *work)
|
||||
}
|
||||
|
||||
/* Lets duplicate it first */
|
||||
skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL);
|
||||
skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Now, generate new key, initiate & distribute it */
|
||||
|
Loading…
Reference in New Issue
Block a user