2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 07:35:12 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Just a bunch of small fixes and tidy ups:

   1) Finish the "busy_poll" renames, from Eliezer Tamir.

   2) Fix RCU stalls in IFB driver, from Ding Tianhong.

   3) Linearize buffers properly in tun/macvtap zerocopy code.

   4) Don't crash on rmmod in vxlan, from Pravin B Shelar.

   5) Spinlock used before init in alx driver, from Maarten Lankhorst.

   6) A sparse warning fix in bnx2x broke TSO checksums, fix from Dmitry
      Kravkov.

   7) Dummy and ifb driver load failure paths can oops, fixes from Tan
      Xiaojun and Ding Tianhong.

   8) Correct MTU calculations in IP tunnels, from Alexander Duyck.

   9) Account all TCP retransmits in SNMP stats properly, from Yuchung
      Cheng.

  10) atl1e and via-rhine do not handle DMA mapping failures properly,
      from Neil Horman.

  11) Various equal-cost multipath route fixes in ipv6 from Hannes
      Frederic Sowa"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (36 commits)
  ipv6: only static routes qualify for equal cost multipathing
  via-rhine: fix dma mapping errors
  atl1e: fix dma mapping warnings
  tcp: account all retransmit failures
  usb/net/r815x: fix cast to restricted __le32
  usb/net/r8152: fix integer overflow in expression
  net: access page->private by using page_private
  net: strict_strtoul is obsolete, use kstrtoul instead
  drivers/net/ieee802154: don't use devm_pinctrl_get_select_default() in probe
  drivers/net/ethernet/cadence: don't use devm_pinctrl_get_select_default() in probe
  drivers/net/can/c_can: don't use devm_pinctrl_get_select_default() in probe
  net/usb: add relative mii functions for r815x
  net/tipc: use %*phC to dump small buffers in hex form
  qlcnic: Adding Maintainers.
  gre: Fix MTU sizing check for gretap tunnels
  pkt_sched: sch_qfq: remove forward declaration of qfq_update_agg_ts
  pkt_sched: sch_qfq: improve efficiency of make_eligible
  gso: Update tunnel segmentation to support Tx checksum offload
  inet: fix spacing in assignment
  ifb: fix oops when loading the ifb failed
  ...
This commit is contained in:
Linus Torvalds 2013-07-13 17:42:22 -07:00
commit be9c6d9169
65 changed files with 610 additions and 244 deletions

View File

@ -50,26 +50,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
it's a Per-CPU variable.
Default: 64
low_latency_read
busy_read
----------------
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
Approximate time in us to busy loop waiting for packets on the device queue.
This sets the default value of the SO_LL socket option.
Can be set or overridden per socket by setting socket option SO_LL, which is
the preferred method of enabling.
If you need to enable the feature globally via sysctl, a value of 50 is recommended.
This sets the default value of the SO_BUSY_POLL socket option.
Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
which is the preferred method of enabling. If you need to enable the feature
globally via sysctl, a value of 50 is recommended.
Will increase power usage.
Default: 0 (off)
low_latency_poll
busy_poll
----------------
Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100.
For more than that you probably want to use epoll.
Note that only sockets with SO_LL set will be busy polled, so you want to either
selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally.
Note that only sockets with SO_BUSY_POLL set will be busy polled,
so you want to either selectively set SO_BUSY_POLL on those sockets or set
sysctl.net.busy_read globally.
Will increase power usage.
Default: 0 (off)

View File

@ -6681,10 +6681,12 @@ F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
M: Himanshu Madhani <himanshu.madhani@qlogic.com>
M: Rajesh Borundia <rajesh.borundia@qlogic.com>
M: Shahed Shaikh <shahed.shaikh@qlogic.com>
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
M: Sucheta Chakraborty <sucheta.chakraborty@qlogic.com>
M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
S: Supported

View File

@ -81,6 +81,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _UAPI_ASM_SOCKET_H */

View File

@ -74,6 +74,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* __ASM_AVR32_SOCKET_H */

View File

@ -76,7 +76,7 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -74,7 +74,7 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -74,6 +74,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -83,6 +83,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_IA64_SOCKET_H */

View File

@ -74,6 +74,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_M32R_SOCKET_H */

View File

@ -92,6 +92,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _UAPI_ASM_SOCKET_H */

View File

@ -74,6 +74,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -73,7 +73,7 @@
#define SO_SELECT_ERR_QUEUE 0x4026
#define SO_LL 0x4027
#define SO_BUSY_POLL 0x4027
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.

View File

@ -81,6 +81,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_POWERPC_SOCKET_H */

View File

@ -80,6 +80,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -70,7 +70,7 @@
#define SO_SELECT_ERR_QUEUE 0x0029
#define SO_LL 0x0030
#define SO_BUSY_POLL 0x0030
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001

View File

@ -85,6 +85,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _XTENSA_SOCKET_H */

View File

@ -32,7 +32,6 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/can/dev.h>
@ -114,7 +113,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
struct c_can_priv *priv;
const struct of_device_id *match;
const struct platform_device_id *id;
struct pinctrl *pinctrl;
struct resource *mem, *res;
int irq;
struct clk *clk;
@ -131,11 +129,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
id = platform_get_device_id(pdev);
}
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev,
"failed to configure pins from driver\n");
/* get the appropriate clk */
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {

View File

@ -185,6 +185,8 @@ static int __init dummy_init_module(void)
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
if (err < 0)
goto out;
for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
@ -192,6 +194,8 @@ static int __init dummy_init_module(void)
}
if (err < 0)
__rtnl_link_unregister(&dummy_link_ops);
out:
rtnl_unlock();
return err;

View File

@ -1245,6 +1245,8 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
alx = netdev_priv(netdev);
spin_lock_init(&alx->hw.mdio_lock);
spin_lock_init(&alx->irq_lock);
alx->dev = netdev;
alx->hw.pdev = pdev;
alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
@ -1327,9 +1329,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&alx->link_check_wk, alx_link_check);
INIT_WORK(&alx->reset_wk, alx_reset);
spin_lock_init(&alx->hw.mdio_lock);
spin_lock_init(&alx->irq_lock);
netif_carrier_off(netdev);
err = register_netdev(netdev);

View File

@ -1665,8 +1665,8 @@ check_sum:
return 0;
}
static void atl1e_tx_map(struct atl1e_adapter *adapter,
struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
static int atl1e_tx_map(struct atl1e_adapter *adapter,
struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
{
struct atl1e_tpd_desc *use_tpd = NULL;
struct atl1e_tx_buffer *tx_buffer = NULL;
@ -1677,6 +1677,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
u16 nr_frags;
u16 f;
int segment;
int ring_start = adapter->tx_ring.next_to_use;
nr_frags = skb_shinfo(skb)->nr_frags;
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
@ -1689,6 +1690,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
tx_buffer->length = map_len;
tx_buffer->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
return -ENOSPC;
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
mapped_len += map_len;
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
@ -1715,6 +1719,13 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
tx_buffer->dma =
pci_map_single(adapter->pdev, skb->data + mapped_len,
map_len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
/* Reset the tx rings next pointer */
adapter->tx_ring.next_to_use = ring_start;
return -ENOSPC;
}
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
mapped_len += map_len;
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
@ -1750,6 +1761,13 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
(i * MAX_TX_BUF_LEN),
tx_buffer->length,
DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
/* Reset the ring next to use pointer */
adapter->tx_ring.next_to_use = ring_start;
return -ENOSPC;
}
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
@ -1767,6 +1785,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
/* The last buffer info contain the skb address,
so it will be free after unmap */
tx_buffer->skb = skb;
return 0;
}
static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
@ -1834,10 +1853,13 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
atl1e_tx_map(adapter, skb, tpd);
if (atl1e_tx_map(adapter, skb, tpd))
goto out;
atl1e_tx_queue(adapter, tpd_req, tpd);
netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
out:
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}

View File

@ -24,7 +24,7 @@
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
@ -990,7 +990,7 @@ reuse_rx:
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(cqe_fp->vlan_tag));
skb_mark_ll(skb, &fp->napi);
skb_mark_napi_id(skb, &fp->napi);
if (bnx2x_fp_ll_polling(fp))
netif_receive_skb(skb);
@ -3543,9 +3543,9 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
/* outer IP header info */
if (xmit_type & XMIT_CSUM_V4) {
struct iphdr *iph = ip_hdr(skb);
u16 csum = (__force u16)(~iph->check) -
(__force u16)iph->tot_len -
(__force u16)iph->frag_off;
u32 csum = (__force u32)(~iph->check) -
(__force u32)iph->tot_len -
(__force u32)iph->frag_off;
pbd2->fw_ip_csum_wo_len_flags_frag =
bswab16(csum_fold((__force __wsum)csum));

View File

@ -12027,7 +12027,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif
#ifdef CONFIG_NET_LL_RX_POLL
.ndo_ll_poll = bnx2x_low_latency_recv,
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
};

View File

@ -29,7 +29,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
#include "macb.h"
@ -309,7 +308,6 @@ static int __init at91ether_probe(struct platform_device *pdev)
struct resource *regs;
struct net_device *dev;
struct phy_device *phydev;
struct pinctrl *pinctrl;
struct macb *lp;
int res;
u32 reg;
@ -319,15 +317,6 @@ static int __init at91ether_probe(struct platform_device *pdev)
if (!regs)
return -ENOENT;
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
res = PTR_ERR(pinctrl);
if (res == -EPROBE_DEFER)
return res;
dev_warn(&pdev->dev, "No pinctrl provided\n");
}
dev = alloc_etherdev(sizeof(struct macb));
if (!dev)
return -ENOMEM;

View File

@ -52,7 +52,7 @@
#include <linux/dca.h>
#endif
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL
#define LL_EXTENDED_STATS

View File

@ -1978,7 +1978,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
#endif /* IXGBE_FCOE */
skb_mark_ll(skb, &q_vector->napi);
skb_mark_napi_id(skb, &q_vector->napi);
ixgbe_rx_skb(q_vector, skb);
/* update budget accounting */
@ -7228,7 +7228,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
.ndo_ll_poll = ixgbe_low_latency_recv,
.ndo_busy_poll = ixgbe_low_latency_recv,
#endif
#ifdef IXGBE_FCOE
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,

View File

@ -38,7 +38,7 @@
#include <linux/slab.h>
#include <linux/hash.h>
#include <net/ip.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@ -2141,7 +2141,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
.ndo_ll_poll = mlx4_en_low_latency_recv,
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
};

View File

@ -31,7 +31,7 @@
*
*/
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
@ -767,7 +767,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
timestamp);
}
skb_mark_ll(skb, &cq->napi);
skb_mark_napi_id(skb, &cq->napi);
/* Push it up the stack */
netif_receive_skb(skb);

View File

@ -46,6 +46,7 @@
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw"
#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
@ -144,6 +145,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_41,
RTL_GIGA_MAC_VER_42,
RTL_GIGA_MAC_VER_43,
RTL_GIGA_MAC_VER_44,
RTL_GIGA_MAC_NONE = 0xff,
};
@ -276,6 +278,9 @@ static const struct {
[RTL_GIGA_MAC_VER_43] =
_R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2,
JUMBO_1K, true),
[RTL_GIGA_MAC_VER_44] =
_R("RTL8411", RTL_TD_1, FIRMWARE_8411_2,
JUMBO_9K, false),
};
#undef _R
@ -394,6 +399,7 @@ enum rtl8168_8101_registers {
#define CSIAR_FUNC_CARD 0x00000000
#define CSIAR_FUNC_SDIO 0x00010000
#define CSIAR_FUNC_NIC 0x00020000
#define CSIAR_FUNC_NIC2 0x00010000
PMCH = 0x6f,
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
@ -826,6 +832,7 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1);
MODULE_FIRMWARE(FIRMWARE_8168F_2);
MODULE_FIRMWARE(FIRMWARE_8402_1);
MODULE_FIRMWARE(FIRMWARE_8411_1);
MODULE_FIRMWARE(FIRMWARE_8411_2);
MODULE_FIRMWARE(FIRMWARE_8106E_1);
MODULE_FIRMWARE(FIRMWARE_8106E_2);
MODULE_FIRMWARE(FIRMWARE_8168G_2);
@ -2051,6 +2058,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
int mac_version;
} mac_info[] = {
/* 8168G family. */
{ 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 },
{ 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
{ 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
{ 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
@ -3651,6 +3659,7 @@ static void rtl_hw_phy_config(struct net_device *dev)
break;
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
rtl8168g_2_hw_phy_config(tp);
break;
@ -3863,6 +3872,7 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
ops->write = r8168g_mdio_write;
ops->read = r8168g_mdio_read;
break;
@ -3916,6 +3926,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
RTL_W32(RxConfig, RTL_R32(RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@ -4178,6 +4189,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_44:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@ -4224,6 +4236,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
@ -4384,6 +4397,7 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
default:
ops->disable = NULL;
ops->enable = NULL;
@ -4493,6 +4507,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_41 ||
tp->mac_version == RTL_GIGA_MAC_VER_42 ||
tp->mac_version == RTL_GIGA_MAC_VER_43 ||
tp->mac_version == RTL_GIGA_MAC_VER_44 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
@ -4782,6 +4797,29 @@ static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(CSIDR) : ~0;
}
static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
{
void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
CSIAR_FUNC_NIC2);
rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
{
void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
RTL_R32(CSIDR) : ~0;
}
static void rtl_init_csi_ops(struct rtl8169_private *tp)
{
struct csi_ops *ops = &tp->csi_ops;
@ -4811,6 +4849,11 @@ static void rtl_init_csi_ops(struct rtl8169_private *tp)
ops->read = r8402_csi_read;
break;
case RTL_GIGA_MAC_VER_44:
ops->write = r8411_csi_write;
ops->read = r8411_csi_read;
break;
default:
ops->write = r8169_csi_write;
ops->read = r8169_csi_read;
@ -5255,6 +5298,25 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
}
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8411_2[] = {
{ 0x00, 0x0000, 0x0008 },
{ 0x0c, 0x3df0, 0x0200 },
{ 0x0f, 0xffff, 0x5200 },
{ 0x19, 0x0020, 0x0000 },
{ 0x1e, 0x0000, 0x2000 }
};
rtl_hw_start_8168g_1(tp);
/* disable aspm and clock request before access ephy */
RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
}
static void rtl_hw_start_8168(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@ -5361,6 +5423,10 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_hw_start_8168g_2(tp);
break;
case RTL_GIGA_MAC_VER_44:
rtl_hw_start_8411_2(tp);
break;
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
@ -6877,6 +6943,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
rtl_hw_init_8168g(tp);
break;

View File

@ -4,6 +4,7 @@
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
select CRC32
select MII
select MDIO_BITBANG

View File

@ -1171,7 +1171,11 @@ static void alloc_rbufs(struct net_device *dev)
rp->rx_skbuff_dma[i] =
pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
rp->rx_skbuff_dma[i] = 0;
dev_kfree_skb(skb);
break;
}
rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
}
@ -1687,6 +1691,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_skbuff_dma[entry] =
pci_map_single(rp->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
dev_kfree_skb(skb);
rp->tx_skbuff_dma[entry] = 0;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
}
@ -1961,6 +1971,11 @@ static int rhine_rx(struct net_device *dev, int limit)
pci_map_single(rp->pdev, skb->data,
rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
dev_kfree_skb(skb);
rp->rx_skbuff_dma[entry] = 0;
break;
}
rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
}
rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);

View File

@ -22,7 +22,6 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <net/wpan-phy.h>
#include <net/mac802154.h>
#include <net/ieee802154.h>
@ -627,7 +626,6 @@ static int mrf24j40_probe(struct spi_device *spi)
int ret = -ENOMEM;
u8 val;
struct mrf24j40 *devrec;
struct pinctrl *pinctrl;
printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
@ -638,11 +636,6 @@ static int mrf24j40_probe(struct spi_device *spi)
if (!devrec->buf)
goto err_buf;
pinctrl = devm_pinctrl_get_select_default(&spi->dev);
if (IS_ERR(pinctrl))
dev_warn(&spi->dev,
"pinctrl pins are not configured from the driver");
spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
spi->max_speed_hz = MAX_SPI_SPEED_HZ;

View File

@ -291,11 +291,17 @@ static int __init ifb_init_module(void)
rtnl_lock();
err = __rtnl_link_register(&ifb_link_ops);
if (err < 0)
goto out;
for (i = 0; i < numifbs && !err; i++)
for (i = 0; i < numifbs && !err; i++) {
err = ifb_init_one(i);
cond_resched();
}
if (err)
__rtnl_link_unregister(&ifb_link_ops);
out:
rtnl_unlock();
return err;

View File

@ -712,6 +712,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
int vnet_hdr_len = 0;
int copylen = 0;
bool zerocopy = false;
size_t linear;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = q->vnet_hdr_sz;
@ -766,11 +767,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
copylen = vnet_hdr.hdr_len;
if (!copylen)
copylen = GOODCOPY_LEN;
} else
linear = copylen;
} else {
copylen = len;
linear = vnet_hdr.hdr_len;
}
skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
vnet_hdr.hdr_len, noblock, &err);
linear, noblock, &err);
if (!skb)
goto err;

View File

@ -217,6 +217,7 @@ module_exit(atheros_exit);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ 0x004dd076, 0xffffffef },
{ 0x004dd074, 0xffffffef },
{ 0x004dd072, 0xffffffef },
{ }
};

View File

@ -1042,7 +1042,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
size_t len = total_len, align = NET_SKB_PAD;
size_t len = total_len, align = NET_SKB_PAD, linear;
struct virtio_net_hdr gso = { 0 };
int offset = 0;
int copylen;
@ -1106,10 +1106,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
copylen = gso.hdr_len;
if (!copylen)
copylen = GOODCOPY_LEN;
} else
linear = copylen;
} else {
copylen = len;
linear = gso.hdr_len;
}
skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
tun->dev->stats.rx_dropped++;

View File

@ -11,7 +11,7 @@ obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o

View File

@ -646,13 +646,18 @@ static const struct usb_device_id products [] = {
},
/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
#endif
/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/*
* WHITELIST!!!

View File

@ -934,7 +934,8 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev);
struct tx_desc *tx_desc;
int len, res;
unsigned int len;
int res;
netif_stop_queue(netdev);
len = skb->len;

234
drivers/net/usb/r815x.c Normal file
View File

@ -0,0 +1,234 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#define RTL815x_REQT_READ 0xc0
#define RTL815x_REQT_WRITE 0x40
#define RTL815x_REQ_GET_REGS 0x05
#define RTL815x_REQ_SET_REGS 0x05
#define MCU_TYPE_PLA 0x0100
#define OCP_BASE 0xe86c
#define BASE_MII 0xa400
#define BYTE_EN_DWORD 0xff
#define BYTE_EN_WORD 0x33
#define BYTE_EN_BYTE 0x11
#define R815x_PHY_ID 32
#define REALTEK_VENDOR_ID 0x0bda
static int pla_read_word(struct usb_device *udev, u16 index)
{
int data, ret;
u8 shift = index & 2;
__le32 ocp_data;
index &= ~3;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
500);
if (ret < 0)
return ret;
data = __le32_to_cpu(ocp_data);
data >>= (shift * 8);
data &= 0xffff;
return data;
}
static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
{
__le32 ocp_data;
u32 mask = 0xffff;
u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
int ret;
data &= mask;
if (shift) {
byen <<= shift;
mask <<= (shift * 8);
data <<= (shift * 8);
index &= ~3;
}
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
500);
if (ret < 0)
return ret;
data |= __le32_to_cpu(ocp_data) & ~mask;
ocp_data = __cpu_to_le32(data);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
index, MCU_TYPE_PLA | byen, &ocp_data,
sizeof(ocp_data), 500);
return ret;
}
static int ocp_reg_read(struct usbnet *dev, u16 addr)
{
u16 ocp_base, ocp_index;
int ret;
ocp_base = addr & 0xf000;
ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
if (ret < 0)
goto out;
ocp_index = (addr & 0x0fff) | 0xb000;
ret = pla_read_word(dev->udev, ocp_index);
out:
return ret;
}
static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data)
{
u16 ocp_base, ocp_index;
int ret;
ocp_base = addr & 0xf000;
ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
if (ret < 0)
goto out1;
ocp_index = (addr & 0x0fff) | 0xb000;
ret = pla_write_word(dev->udev, ocp_index, data);
out1:
return ret;
}
static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
{
struct usbnet *dev = netdev_priv(netdev);
if (phy_id != R815x_PHY_ID)
return -EINVAL;
return ocp_reg_read(dev, BASE_MII + reg * 2);
}
static
void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
{
struct usbnet *dev = netdev_priv(netdev);
if (phy_id != R815x_PHY_ID)
return;
ocp_reg_write(dev, BASE_MII + reg * 2, val);
}
static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status;
status = usbnet_cdc_bind(dev, intf);
if (status < 0)
return status;
dev->mii.dev = dev->net;
dev->mii.mdio_read = r815x_mdio_read;
dev->mii.mdio_write = r815x_mdio_write;
dev->mii.phy_id_mask = 0x3f;
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = R815x_PHY_ID;
dev->mii.supports_gmii = 1;
return 0;
}
static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status;
status = usbnet_cdc_bind(dev, intf);
if (status < 0)
return status;
dev->mii.dev = dev->net;
dev->mii.mdio_read = r815x_mdio_read;
dev->mii.mdio_write = r815x_mdio_write;
dev->mii.phy_id_mask = 0x3f;
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = R815x_PHY_ID;
dev->mii.supports_gmii = 0;
return 0;
}
static const struct driver_info r8152_info = {
.description = "RTL8152 ECM Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
.bind = r8152_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
.manage_power = usbnet_manage_power,
};
static const struct driver_info r8153_info = {
.description = "RTL8153 ECM Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
.bind = r8153_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
.manage_power = usbnet_manage_power,
};
static const struct usb_device_id products[] = {
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
.driver_info = 0,
#else
.driver_info = (unsigned long) &r8152_info,
#endif
},
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
#if defined(CONFIG_USB_RTL8153) || defined(CONFIG_USB_RTL8153_MODULE)
.driver_info = 0,
#else
.driver_info = (unsigned long) &r8153_info,
#endif
},
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver r815x_driver = {
.name = "r815x",
.id_table = products,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.reset_resume = usbnet_resume,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(r815x_driver);
MODULE_AUTHOR("Hayes Wang");
MODULE_DESCRIPTION("Realtek USB ECM device");
MODULE_LICENSE("GPL");

View File

@ -1916,9 +1916,9 @@ late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
unregister_pernet_device(&vxlan_net_ops);
rtnl_link_unregister(&vxlan_link_ops);
destroy_workqueue(vxlan_wq);
unregister_pernet_device(&vxlan_net_ops);
rcu_barrier();
}
module_exit(vxlan_cleanup_module);

View File

@ -28,7 +28,7 @@
#include <linux/hrtimer.h>
#include <linux/sched/rt.h>
#include <linux/freezer.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <asm/uaccess.h>

View File

@ -974,7 +974,7 @@ struct net_device_ops {
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
#ifdef CONFIG_NET_LL_RX_POLL
int (*ndo_ll_poll)(struct napi_struct *dev);
int (*ndo_busy_poll)(struct napi_struct *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);

View File

@ -1,5 +1,5 @@
/*
* Low Latency Sockets
* net busy poll support
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
@ -21,8 +21,8 @@
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
*/
#ifndef _LINUX_NET_LL_POLL_H
#define _LINUX_NET_LL_POLL_H
#ifndef _LINUX_NET_BUSY_POLL_H
#define _LINUX_NET_BUSY_POLL_H
#include <linux/netdevice.h>
#include <net/ip.h>
@ -30,8 +30,8 @@
#ifdef CONFIG_NET_LL_RX_POLL
struct napi_struct;
extern unsigned int sysctl_net_ll_read __read_mostly;
extern unsigned int sysctl_net_ll_poll __read_mostly;
extern unsigned int sysctl_net_busy_read __read_mostly;
extern unsigned int sysctl_net_busy_poll __read_mostly;
/* return values from ndo_ll_poll */
#define LL_FLUSH_FAILED -1
@ -39,7 +39,7 @@ extern unsigned int sysctl_net_ll_poll __read_mostly;
static inline bool net_busy_loop_on(void)
{
return sysctl_net_ll_poll;
return sysctl_net_busy_poll;
}
/* a wrapper to make debug_smp_processor_id() happy
@ -72,7 +72,7 @@ static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
/* in poll/select we use the global sysctl_net_ll_poll value */
static inline unsigned long busy_loop_end_time(void)
{
return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
}
static inline bool sk_can_busy_loop(struct sock *sk)
@ -110,11 +110,11 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
goto out;
ops = napi->dev->netdev_ops;
if (!ops->ndo_ll_poll)
if (!ops->ndo_busy_poll)
goto out;
do {
rc = ops->ndo_ll_poll(napi);
rc = ops->ndo_busy_poll(napi);
if (rc == LL_FLUSH_FAILED)
break; /* permanent failure */
@ -134,13 +134,14 @@ out:
}
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
skb->napi_id = napi->napi_id;
}
/* used in the protocol hanlder to propagate the napi_id to the socket */
static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
{
sk->sk_napi_id = skb->napi_id;
}
@ -166,11 +167,12 @@ static inline bool sk_busy_poll(struct sock *sk, int nonblock)
return false;
}
static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
}
static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
{
}
@ -180,4 +182,4 @@ static inline bool busy_loop_timeout(unsigned long end_time)
}
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* _LINUX_NET_LL_POLL_H */
#endif /* _LINUX_NET_BUSY_POLL_H */

View File

@ -76,6 +76,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* __ASM_GENERIC_SOCKET_H */

View File

@ -24,11 +24,11 @@
*/
void p9_release_pages(struct page **pages, int nr_pages)
{
int i = 0;
while (pages[i] && nr_pages--) {
put_page(pages[i]);
i++;
}
int i;
for (i = 0; i < nr_pages; i++)
if (pages[i])
put_page(pages[i]);
}
EXPORT_SYMBOL(p9_release_pages);

View File

@ -56,7 +56,7 @@
#include <net/sock.h>
#include <net/tcp_states.h>
#include <trace/events/skb.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
/*
* Is a socket 'connection oriented' ?

View File

@ -2481,10 +2481,10 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
}
static netdev_features_t harmonize_features(struct sk_buff *skb,
__be16 protocol, netdev_features_t features)
netdev_features_t features)
{
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, protocol)) {
!can_checksum_protocol(features, skb_network_protocol(skb))) {
features &= ~NETIF_F_ALL_CSUM;
} else if (illegal_highdma(skb->dev, skb)) {
features &= ~NETIF_F_SG;
@ -2505,20 +2505,18 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
} else if (!vlan_tx_tag_present(skb)) {
return harmonize_features(skb, protocol, features);
return harmonize_features(skb, features);
}
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
return harmonize_features(skb, protocol, features);
} else {
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
return harmonize_features(skb, protocol, features);
}
return harmonize_features(skb, features);
}
EXPORT_SYMBOL(netif_skb_features);

View File

@ -824,7 +824,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
page = alloc_page(gfp_mask);
if (!page) {
while (head) {
struct page *next = (struct page *)head->private;
struct page *next = (struct page *)page_private(head);
put_page(head);
head = next;
}
@ -834,7 +834,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
memcpy(page_address(page),
vaddr + f->page_offset, skb_frag_size(f));
kunmap_atomic(vaddr);
page->private = (unsigned long)head;
set_page_private(page, (unsigned long)head);
head = page;
}
@ -848,7 +848,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
for (i = num_frags - 1; i >= 0; i--) {
__skb_fill_page_desc(skb, i, head, 0,
skb_shinfo(skb)->frags[i].size);
head = (struct page *)head->private;
head = (struct page *)page_private(head);
}
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;

View File

@ -139,7 +139,7 @@
#include <net/tcp.h>
#endif
#include <net/ll_poll.h>
#include <net/busy_poll.h>
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
@ -901,7 +901,7 @@ set_rcvbuf:
break;
#ifdef CONFIG_NET_LL_RX_POLL
case SO_LL:
case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
ret = -EPERM;
@ -1171,7 +1171,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
#ifdef CONFIG_NET_LL_RX_POLL
case SO_LL:
case SO_BUSY_POLL:
v.val = sk->sk_ll_usec;
break;
#endif
@ -2294,7 +2294,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
#ifdef CONFIG_NET_LL_RX_POLL
sk->sk_napi_id = 0;
sk->sk_ll_usec = sysctl_net_ll_read;
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
/*

View File

@ -19,7 +19,7 @@
#include <net/ip.h>
#include <net/sock.h>
#include <net/net_ratelimit.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
static int one = 1;
@ -300,15 +300,15 @@ static struct ctl_table net_core_table[] = {
#endif /* CONFIG_NET_FLOW_LIMIT */
#ifdef CONFIG_NET_LL_RX_POLL
{
.procname = "low_latency_poll",
.data = &sysctl_net_ll_poll,
.procname = "busy_poll",
.data = &sysctl_net_busy_poll,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "low_latency_read",
.data = &sysctl_net_ll_read,
.procname = "busy_read",
.data = &sysctl_net_busy_read,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec

View File

@ -118,7 +118,7 @@ dns_resolver_instantiate(struct key *key, struct key_preparsed_payload *prep)
if (opt_vlen <= 0)
goto bad_option_value;
ret = strict_strtoul(eq, 10, &derrno);
ret = kstrtoul(eq, 10, &derrno);
if (ret < 0)
goto bad_option_value;

View File

@ -100,6 +100,9 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
}
__skb_push(skb, tnl_hlen - ghl);
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
skb->mac_len = mac_len;

View File

@ -467,7 +467,7 @@ void inet_unhash(struct sock *sk)
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock_bh(lock);
done =__sk_nulls_del_node_init_rcu(sk);
done = __sk_nulls_del_node_init_rcu(sk);
if (done)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);

View File

@ -476,7 +476,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
struct rtable *rt, __be16 df)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int pkt_size = skb->len - tunnel->hlen;
int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
int mtu;
if (df)

View File

@ -279,7 +279,7 @@
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;

View File

@ -75,7 +75,7 @@
#include <net/netdma.h>
#include <net/secure_seq.h>
#include <net/tcp_memcontrol.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
@ -1994,7 +1994,7 @@ process:
if (sk_filter(sk, skb))
goto discard_and_relse;
sk_mark_ll(sk, skb);
sk_mark_napi_id(sk, skb);
skb->dev = NULL;
bh_lock_sock_nested(sk);

View File

@ -2407,6 +2407,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
* see tcp_input.c tcp_sacktag_write_queue().
*/
TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
} else {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
return err;
}
@ -2528,10 +2530,9 @@ begin_fwd:
if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
continue;
if (tcp_retransmit_skb(sk, skb)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
if (tcp_retransmit_skb(sk, skb))
return;
}
NET_INC_STATS_BH(sock_net(sk), mib_idx);
if (tcp_in_cwnd_reduction(sk))

View File

@ -109,7 +109,7 @@
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
struct udp_table udp_table __read_mostly;
@ -1713,7 +1713,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (sk != NULL) {
int ret;
sk_mark_ll(sk, skb);
sk_mark_napi_id(sk, skb);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
@ -2323,6 +2323,9 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
struct udphdr *uh;
int udp_offset = outer_hlen - tnl_hlen;
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
skb->mac_len = mac_len;
skb_push(skb, outer_hlen);
@ -2345,7 +2348,6 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
uh->check = CSUM_MANGLED_0;
}
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = protocol;
} while ((skb = skb->next));
out:

View File

@ -632,6 +632,12 @@ insert_above:
return ln;
}
static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
{
return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
RTF_GATEWAY;
}
/*
* Insert routing information in a node.
*/
@ -646,6 +652,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
int add = (!info->nlh ||
(info->nlh->nlmsg_flags & NLM_F_CREATE));
int found = 0;
bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
ins = &fn->leaf;
@ -691,9 +698,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
* To avoid long list, we only had siblings if the
* route have a gateway.
*/
if (rt->rt6i_flags & RTF_GATEWAY &&
!(rt->rt6i_flags & RTF_EXPIRES) &&
!(iter->rt6i_flags & RTF_EXPIRES))
if (rt_can_ecmp &&
rt6_qualify_for_ecmp(iter))
rt->rt6i_nsiblings++;
}
@ -715,7 +721,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
/* Find the first route that have the same metric */
sibling = fn->leaf;
while (sibling) {
if (sibling->rt6i_metric == rt->rt6i_metric) {
if (sibling->rt6i_metric == rt->rt6i_metric &&
rt6_qualify_for_ecmp(sibling)) {
list_add_tail(&rt->rt6i_siblings,
&sibling->rt6i_siblings);
break;

View File

@ -65,6 +65,12 @@
#include <linux/sysctl.h>
#endif
enum rt6_nud_state {
RT6_NUD_FAIL_HARD = -2,
RT6_NUD_FAIL_SOFT = -1,
RT6_NUD_SUCCEED = 1
};
static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
@ -531,28 +537,29 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
return 0;
}
static inline bool rt6_check_neigh(struct rt6_info *rt)
static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
{
struct neighbour *neigh;
bool ret = false;
enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
return true;
return RT6_NUD_SUCCEED;
rcu_read_lock_bh();
neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
if (neigh) {
read_lock(&neigh->lock);
if (neigh->nud_state & NUD_VALID)
ret = true;
ret = RT6_NUD_SUCCEED;
#ifdef CONFIG_IPV6_ROUTER_PREF
else if (!(neigh->nud_state & NUD_FAILED))
ret = true;
ret = RT6_NUD_SUCCEED;
#endif
read_unlock(&neigh->lock);
} else if (IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
ret = true;
} else {
ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
}
rcu_read_unlock_bh();
@ -566,43 +573,52 @@ static int rt6_score_route(struct rt6_info *rt, int oif,
m = rt6_check_dev(rt, oif);
if (!m && (strict & RT6_LOOKUP_F_IFACE))
return -1;
return RT6_NUD_FAIL_HARD;
#ifdef CONFIG_IPV6_ROUTER_PREF
m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
#endif
if (!rt6_check_neigh(rt) && (strict & RT6_LOOKUP_F_REACHABLE))
return -1;
if (strict & RT6_LOOKUP_F_REACHABLE) {
int n = rt6_check_neigh(rt);
if (n < 0)
return n;
}
return m;
}
static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
int *mpri, struct rt6_info *match)
int *mpri, struct rt6_info *match,
bool *do_rr)
{
int m;
bool match_do_rr = false;
if (rt6_check_expired(rt))
goto out;
m = rt6_score_route(rt, oif, strict);
if (m < 0)
if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
match_do_rr = true;
m = 0; /* lowest valid score */
} else if (m < 0) {
goto out;
if (m > *mpri) {
if (strict & RT6_LOOKUP_F_REACHABLE)
rt6_probe(match);
*mpri = m;
match = rt;
} else if (strict & RT6_LOOKUP_F_REACHABLE) {
rt6_probe(rt);
}
if (strict & RT6_LOOKUP_F_REACHABLE)
rt6_probe(rt);
if (m > *mpri) {
*do_rr = match_do_rr;
*mpri = m;
match = rt;
}
out:
return match;
}
static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
struct rt6_info *rr_head,
u32 metric, int oif, int strict)
u32 metric, int oif, int strict,
bool *do_rr)
{
struct rt6_info *rt, *match;
int mpri = -1;
@ -610,10 +626,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
match = NULL;
for (rt = rr_head; rt && rt->rt6i_metric == metric;
rt = rt->dst.rt6_next)
match = find_match(rt, oif, strict, &mpri, match);
match = find_match(rt, oif, strict, &mpri, match, do_rr);
for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
rt = rt->dst.rt6_next)
match = find_match(rt, oif, strict, &mpri, match);
match = find_match(rt, oif, strict, &mpri, match, do_rr);
return match;
}
@ -622,15 +638,16 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
{
struct rt6_info *match, *rt0;
struct net *net;
bool do_rr = false;
rt0 = fn->rr_ptr;
if (!rt0)
fn->rr_ptr = rt0 = fn->leaf;
match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
&do_rr);
if (!match &&
(strict & RT6_LOOKUP_F_REACHABLE)) {
if (do_rr) {
struct rt6_info *next = rt0->dst.rt6_next;
/* no entries matched; do round-robin */
@ -1080,10 +1097,13 @@ static void ip6_link_failure(struct sk_buff *skb)
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
if (rt->rt6i_flags & RTF_CACHE)
rt6_update_expires(rt, 0);
else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
if (rt->rt6i_flags & RTF_CACHE) {
dst_hold(&rt->dst);
if (ip6_del_rt(rt))
dst_free(&rt->dst);
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
rt->rt6i_node->fn_sernum = -1;
}
}
}

View File

@ -63,7 +63,7 @@
#include <net/inet_common.h>
#include <net/secure_seq.h>
#include <net/tcp_memcontrol.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <asm/uaccess.h>
@ -1499,7 +1499,7 @@ process:
if (sk_filter(sk, skb))
goto discard_and_relse;
sk_mark_ll(sk, skb);
sk_mark_napi_id(sk, skb);
skb->dev = NULL;
bh_lock_sock_nested(sk);

View File

@ -46,7 +46,7 @@
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/inet6_hashtables.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@ -844,7 +844,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (sk != NULL) {
int ret;
sk_mark_ll(sk, skb);
sk_mark_napi_id(sk, skb);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);

View File

@ -821,7 +821,14 @@ static void qfq_make_eligible(struct qfq_sched *q)
unsigned long old_vslot = q->oldV >> q->min_slot_shift;
if (vslot != old_vslot) {
unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1;
unsigned long mask;
int last_flip_pos = fls(vslot ^ old_vslot);
if (last_flip_pos > 31) /* higher than the number of groups */
mask = ~0UL; /* make all groups eligible */
else
mask = (1UL << last_flip_pos) - 1;
qfq_move_groups(q, mask, IR, ER);
qfq_move_groups(q, mask, IB, EB);
}
@ -1003,9 +1010,61 @@ static inline void charge_actual_service(struct qfq_aggregate *agg)
agg->F = agg->S + (u64)service_received * agg->inv_w;
}
static inline void qfq_update_agg_ts(struct qfq_sched *q,
struct qfq_aggregate *agg,
enum update_reason reason);
/* Assign a reasonable start time for a new aggregate in group i.
* Admissible values for \hat(F) are multiples of \sigma_i
* no greater than V+\sigma_i . Larger values mean that
* we had a wraparound so we consider the timestamp to be stale.
*
* If F is not stale and F >= V then we set S = F.
* Otherwise we should assign S = V, but this may violate
* the ordering in EB (see [2]). So, if we have groups in ER,
* set S to the F_j of the first group j which would be blocking us.
* We are guaranteed not to move S backward because
* otherwise our group i would still be blocked.
*/
static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
{
unsigned long mask;
u64 limit, roundedF;
int slot_shift = agg->grp->slot_shift;
roundedF = qfq_round_down(agg->F, slot_shift);
limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
/* timestamp was stale */
mask = mask_from(q->bitmaps[ER], agg->grp->index);
if (mask) {
struct qfq_group *next = qfq_ffs(q, mask);
if (qfq_gt(roundedF, next->F)) {
if (qfq_gt(limit, next->F))
agg->S = next->F;
else /* preserve timestamp correctness */
agg->S = limit;
return;
}
}
agg->S = q->V;
} else /* timestamp is not stale */
agg->S = agg->F;
}
/* Update the timestamps of agg before scheduling/rescheduling it for
* service. In particular, assign to agg->F its maximum possible
* value, i.e., the virtual finish time with which the aggregate
* should be labeled if it used all its budget once in service.
*/
static inline void
qfq_update_agg_ts(struct qfq_sched *q,
struct qfq_aggregate *agg, enum update_reason reason)
{
if (reason != requeue)
qfq_update_start(q, agg);
else /* just charge agg for the service received */
agg->S = agg->F;
agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
}
static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
@ -1128,66 +1187,6 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
return agg;
}
/*
* Assign a reasonable start time for a new aggregate in group i.
* Admissible values for \hat(F) are multiples of \sigma_i
* no greater than V+\sigma_i . Larger values mean that
* we had a wraparound so we consider the timestamp to be stale.
*
* If F is not stale and F >= V then we set S = F.
* Otherwise we should assign S = V, but this may violate
* the ordering in EB (see [2]). So, if we have groups in ER,
* set S to the F_j of the first group j which would be blocking us.
* We are guaranteed not to move S backward because
* otherwise our group i would still be blocked.
*/
static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
{
unsigned long mask;
u64 limit, roundedF;
int slot_shift = agg->grp->slot_shift;
roundedF = qfq_round_down(agg->F, slot_shift);
limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
/* timestamp was stale */
mask = mask_from(q->bitmaps[ER], agg->grp->index);
if (mask) {
struct qfq_group *next = qfq_ffs(q, mask);
if (qfq_gt(roundedF, next->F)) {
if (qfq_gt(limit, next->F))
agg->S = next->F;
else /* preserve timestamp correctness */
agg->S = limit;
return;
}
}
agg->S = q->V;
} else /* timestamp is not stale */
agg->S = agg->F;
}
/*
* Update the timestamps of agg before scheduling/rescheduling it for
* service. In particular, assign to agg->F its maximum possible
* value, i.e., the virtual finish time with which the aggregate
* should be labeled if it used all its budget once in service.
*/
static inline void
qfq_update_agg_ts(struct qfq_sched *q,
struct qfq_aggregate *agg, enum update_reason reason)
{
if (reason != requeue)
qfq_update_start(q, agg);
else /* just charge agg for the service received */
agg->S = agg->F;
agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
}
static void qfq_schedule_agg(struct qfq_sched *, struct qfq_aggregate *);
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);

View File

@ -104,11 +104,11 @@
#include <linux/route.h>
#include <linux/sockios.h>
#include <linux/atalk.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL
unsigned int sysctl_net_ll_read __read_mostly;
unsigned int sysctl_net_ll_poll __read_mostly;
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
static int sock_no_open(struct inode *irrelevant, struct file *dontcare);

View File

@ -292,13 +292,7 @@ static int ib_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
if (str_size < 60) /* 60 = 19 * strlen("xx:") + strlen("xx\0") */
return 1;
sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:"
"%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
a->value[0], a->value[1], a->value[2], a->value[3],
a->value[4], a->value[5], a->value[6], a->value[7],
a->value[8], a->value[9], a->value[10], a->value[11],
a->value[12], a->value[13], a->value[14], a->value[15],
a->value[16], a->value[17], a->value[18], a->value[19]);
sprintf(str_buf, "%20phC", a->value);
return 0;
}