mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller: "Here is a pile of bug fixes that accumulated while I was in Europe" 1) In fixing kernel leaks to userspace during copying of socket addresses, we broke a case that used to work, namely the user providing a buffer larger than the in-kernel generic socket address structure. This broke Ruby amongst other things. Fix from Dan Carpenter. 2) Fix regression added by byte queue limit support in 8139cp driver, from Yang Yingliang. 3) The addition of MSG_SENDPAGE_NOTLAST buggered up a few sendpage implementations, they should just treat it the same as MSG_MORE. Fix from Richard Weinberger and Shawn Landden. 4) Handle icmpv4 errors received on ipv6 SIT tunnels correctly, from Oussama Ghorbel. In particular we should send an ICMPv6 unreachable in such situations. 5) Fix some regressions in the recent genetlink fixes, in particular get the pmcraid driver to use the new safer interfaces correctly. From Johannes Berg. 6) macvtap was converted to use a per-cpu set of statistics, but some code was still bumping tx_dropped elsewhere. From Jason Wang. 7) Fix build failure of xen-netback due to missing include on some architectures, from Andy Whitecroft. 8) macvtap double counts received packets in statistics, fix from Vlad Yasevich. 9) Fix various cases of using *_STATS_BH() when *_STATS() is more appropriate. From Eric Dumazet and Hannes Frederic Sowa. 10) Pktgen ipsec mode doesn't update the ipv4 header length and checksum properly after encapsulation. Fix from Fan Du. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits) net/mlx4_en: Remove selftest TX queues empty condition {pktgen, xfrm} Update IPv4 header total len and checksum after tranformation virtio_net: make all RX paths handle erors consistently virtio_net: fix error handling for mergeable buffers virtio_net: Fixed a trivial typo (fitler --> filter) netem: fix gemodel loss generator netem: fix loss 4 state model netem: missing break in ge loss generator net/hsr: Support iproute print_opt ('ip -details ...') net/hsr: Very small fix of comment style. MAINTAINERS: Added net/hsr/ maintainer ipv6: fix possible seqlock deadlock in ip6_finish_output2 ixgbe: Make ixgbe_identify_qsfp_module_generic static ixgbe: turn NETIF_F_HW_L2FW_DOFFLOAD off by default ixgbe: ixgbe_fwd_ring_down needs to be static e1000: fix possible reset_task running after adapter down e1000: fix lockdep warning in e1000_reset_task e1000: prevent oops when adapter is being closed and reset simultaneously igb: Fixed Wake On LAN support inet: fix possible seqlock deadlocks ...
This commit is contained in:
commit
5fc92de3c7
@ -4049,6 +4049,12 @@ W: http://www.pharscape.org
|
||||
S: Maintained
|
||||
F: drivers/net/usb/hso.c
|
||||
|
||||
HSR NETWORK PROTOCOL
|
||||
M: Arvid Brodin <arvid.brodin@alten.se>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: net/hsr/
|
||||
|
||||
HTCPEN TOUCHSCREEN DRIVER
|
||||
M: Pau Oliva Fora <pof@eslack.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
|
@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
|
||||
struct hash_ctx *ctx = ask->private;
|
||||
int err;
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
flags |= MSG_MORE;
|
||||
|
||||
lock_sock(sk);
|
||||
sg_init_table(ctx->sgl.sg, 1);
|
||||
sg_set_page(ctx->sgl.sg, page, size, offset);
|
||||
|
@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
|
||||
struct skcipher_sg_list *sgl;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
flags |= MSG_MORE;
|
||||
|
||||
lock_sock(sk);
|
||||
if (!ctx->more && ctx->used)
|
||||
goto unlock;
|
||||
|
@ -4110,7 +4110,7 @@ static int bond_check_params(struct bond_params *params)
|
||||
if (!miimon) {
|
||||
pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
|
||||
pr_warning("Forcing miimon to 100msec\n");
|
||||
miimon = 100;
|
||||
miimon = BOND_DEFAULT_MIIMON;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4147,7 +4147,7 @@ static int bond_check_params(struct bond_params *params)
|
||||
if (!miimon) {
|
||||
pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
|
||||
pr_warning("Forcing miimon to 100msec\n");
|
||||
miimon = 100;
|
||||
miimon = BOND_DEFAULT_MIIMON;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,10 +45,15 @@ int bond_option_mode_set(struct bonding *bond, int mode)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
|
||||
pr_err("%s: %s mode is incompatible with arp monitoring.\n",
|
||||
bond->dev->name, bond_mode_tbl[mode].modename);
|
||||
return -EINVAL;
|
||||
if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) {
|
||||
pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
|
||||
bond->dev->name, bond_mode_tbl[mode].modename);
|
||||
/* disable arp monitoring */
|
||||
bond->params.arp_interval = 0;
|
||||
/* set miimon to default value */
|
||||
bond->params.miimon = BOND_DEFAULT_MIIMON;
|
||||
pr_info("%s: Setting MII monitoring interval to %d.\n",
|
||||
bond->dev->name, bond->params.miimon);
|
||||
}
|
||||
|
||||
/* don't cache arp_validate between modes */
|
||||
|
@ -523,9 +523,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (bond->params.mode == BOND_MODE_ALB ||
|
||||
bond->params.mode == BOND_MODE_TLB ||
|
||||
bond->params.mode == BOND_MODE_8023AD) {
|
||||
if (BOND_NO_USES_ARP(bond->params.mode)) {
|
||||
pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
|
||||
bond->dev->name, bond->dev->name);
|
||||
ret = -EINVAL;
|
||||
|
@ -35,6 +35,8 @@
|
||||
|
||||
#define BOND_MAX_ARP_TARGETS 16
|
||||
|
||||
#define BOND_DEFAULT_MIIMON 100
|
||||
|
||||
#define IS_UP(dev) \
|
||||
((((dev)->flags & IFF_UP) == IFF_UP) && \
|
||||
netif_running(dev) && \
|
||||
@ -55,6 +57,11 @@
|
||||
((mode) == BOND_MODE_TLB) || \
|
||||
((mode) == BOND_MODE_ALB))
|
||||
|
||||
#define BOND_NO_USES_ARP(mode) \
|
||||
(((mode) == BOND_MODE_8023AD) || \
|
||||
((mode) == BOND_MODE_TLB) || \
|
||||
((mode) == BOND_MODE_ALB))
|
||||
|
||||
#define TX_QUEUE_OVERRIDE(mode) \
|
||||
(((mode) == BOND_MODE_ACTIVEBACKUP) || \
|
||||
((mode) == BOND_MODE_ROUNDROBIN))
|
||||
|
@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int c_can_get_berr_counter(const struct net_device *dev,
|
||||
struct can_berr_counter *bec)
|
||||
static int __c_can_get_berr_counter(const struct net_device *dev,
|
||||
struct can_berr_counter *bec)
|
||||
{
|
||||
unsigned int reg_err_counter;
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
|
||||
c_can_pm_runtime_get_sync(priv);
|
||||
|
||||
reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
|
||||
bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
|
||||
ERR_CNT_REC_SHIFT;
|
||||
bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int c_can_get_berr_counter(const struct net_device *dev,
|
||||
struct can_berr_counter *bec)
|
||||
{
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
c_can_pm_runtime_get_sync(priv);
|
||||
err = __c_can_get_berr_counter(dev, bec);
|
||||
c_can_pm_runtime_put_sync(priv);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -754,6 +763,7 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
if (!(val & (1 << (msg_obj_no - 1)))) {
|
||||
can_get_echo_skb(dev,
|
||||
msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
|
||||
c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL);
|
||||
stats->tx_bytes += priv->read_reg(priv,
|
||||
C_CAN_IFACE(MSGCTRL_REG, 0))
|
||||
& IF_MCONT_DLC_MASK;
|
||||
@ -872,7 +882,7 @@ static int c_can_handle_state_change(struct net_device *dev,
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
c_can_get_berr_counter(dev, &bec);
|
||||
__c_can_get_berr_counter(dev, &bec);
|
||||
reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
|
||||
rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
|
||||
ERR_CNT_RP_SHIFT;
|
||||
|
@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev)
|
||||
dev_err(&pdev->dev, "no ipg clock defined\n");
|
||||
return PTR_ERR(clk_ipg);
|
||||
}
|
||||
clock_freq = clk_get_rate(clk_ipg);
|
||||
|
||||
clk_per = devm_clk_get(&pdev->dev, "per");
|
||||
if (IS_ERR(clk_per)) {
|
||||
dev_err(&pdev->dev, "no per clock defined\n");
|
||||
return PTR_ERR(clk_per);
|
||||
}
|
||||
clock_freq = clk_get_rate(clk_per);
|
||||
}
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
|
||||
uint8_t isrc, status;
|
||||
int n = 0;
|
||||
|
||||
/* Shared interrupts and IRQ off? */
|
||||
if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (priv->pre_irq)
|
||||
priv->pre_irq(priv);
|
||||
|
||||
/* Shared interrupts and IRQ off? */
|
||||
if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
|
||||
goto out;
|
||||
|
||||
while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
|
||||
(n < SJA1000_MAX_IRQ)) {
|
||||
n++;
|
||||
|
||||
status = priv->read_reg(priv, SJA1000_SR);
|
||||
/* check for absent controller due to hw unplug */
|
||||
if (status == 0xFF && sja1000_is_absent(priv))
|
||||
return IRQ_NONE;
|
||||
goto out;
|
||||
|
||||
if (isrc & IRQ_WUI)
|
||||
netdev_warn(dev, "wakeup interrupt\n");
|
||||
@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
|
||||
status = priv->read_reg(priv, SJA1000_SR);
|
||||
/* check for absent controller */
|
||||
if (status == 0xFF && sja1000_is_absent(priv))
|
||||
return IRQ_NONE;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
|
||||
@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
|
||||
if (sja1000_err(dev, isrc, status))
|
||||
break;
|
||||
}
|
||||
n++;
|
||||
}
|
||||
|
||||
out:
|
||||
if (priv->post_irq)
|
||||
priv->post_irq(priv);
|
||||
|
||||
|
@ -10629,10 +10629,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
|
||||
static ssize_t tg3_show_temp(struct device *dev,
|
||||
struct device_attribute *devattr, char *buf)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct tg3 *tp = netdev_priv(netdev);
|
||||
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
|
||||
struct tg3 *tp = dev_get_drvdata(dev);
|
||||
u32 temperature;
|
||||
|
||||
spin_lock_bh(&tp->lock);
|
||||
@ -10650,29 +10648,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
|
||||
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
|
||||
TG3_TEMP_MAX_OFFSET);
|
||||
|
||||
static struct attribute *tg3_attributes[] = {
|
||||
static struct attribute *tg3_attrs[] = {
|
||||
&sensor_dev_attr_temp1_input.dev_attr.attr,
|
||||
&sensor_dev_attr_temp1_crit.dev_attr.attr,
|
||||
&sensor_dev_attr_temp1_max.dev_attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group tg3_group = {
|
||||
.attrs = tg3_attributes,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(tg3);
|
||||
|
||||
static void tg3_hwmon_close(struct tg3 *tp)
|
||||
{
|
||||
if (tp->hwmon_dev) {
|
||||
hwmon_device_unregister(tp->hwmon_dev);
|
||||
tp->hwmon_dev = NULL;
|
||||
sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
|
||||
}
|
||||
}
|
||||
|
||||
static void tg3_hwmon_open(struct tg3 *tp)
|
||||
{
|
||||
int i, err;
|
||||
int i;
|
||||
u32 size = 0;
|
||||
struct pci_dev *pdev = tp->pdev;
|
||||
struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
|
||||
@ -10690,18 +10684,11 @@ static void tg3_hwmon_open(struct tg3 *tp)
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
/* Register hwmon sysfs hooks */
|
||||
err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
|
||||
return;
|
||||
}
|
||||
|
||||
tp->hwmon_dev = hwmon_device_register(&pdev->dev);
|
||||
tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
|
||||
tp, tg3_groups);
|
||||
if (IS_ERR(tp->hwmon_dev)) {
|
||||
tp->hwmon_dev = NULL;
|
||||
dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
|
||||
sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -503,6 +503,7 @@ struct be_adapter {
|
||||
};
|
||||
|
||||
#define be_physfn(adapter) (!adapter->virtfn)
|
||||
#define be_virtfn(adapter) (adapter->virtfn)
|
||||
#define sriov_enabled(adapter) (adapter->num_vfs > 0)
|
||||
#define sriov_want(adapter) (be_physfn(adapter) && \
|
||||
(num_vfs || pci_num_vf(adapter->pdev)))
|
||||
|
@ -1032,6 +1032,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
|
||||
} else {
|
||||
req->hdr.version = 2;
|
||||
req->page_size = 1; /* 1 for 4K */
|
||||
|
||||
/* coalesce-wm field in this cmd is not relevant to Lancer.
|
||||
* Lancer uses COMMON_MODIFY_CQ to set this field
|
||||
*/
|
||||
if (!lancer_chip(adapter))
|
||||
AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
|
||||
ctxt, coalesce_wm);
|
||||
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
|
||||
no_delay);
|
||||
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
|
||||
|
@ -2658,8 +2658,8 @@ static int be_close(struct net_device *netdev)
|
||||
|
||||
be_roce_dev_close(adapter);
|
||||
|
||||
for_all_evt_queues(adapter, eqo, i) {
|
||||
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
|
||||
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
|
||||
for_all_evt_queues(adapter, eqo, i) {
|
||||
napi_disable(&eqo->napi);
|
||||
be_disable_busy_poll(eqo);
|
||||
}
|
||||
@ -3253,12 +3253,10 @@ static int be_mac_setup(struct be_adapter *adapter)
|
||||
memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
/* On BE3 VFs this cmd may fail due to lack of privilege.
|
||||
* Ignore the failure as in this case pmac_id is fetched
|
||||
* in the IFACE_CREATE cmd.
|
||||
*/
|
||||
be_cmd_pmac_add(adapter, mac, adapter->if_handle,
|
||||
&adapter->pmac_id[0], 0);
|
||||
/* For BE3-R VFs, the PF programs the initial MAC address */
|
||||
if (!(BEx_chip(adapter) && be_virtfn(adapter)))
|
||||
be_cmd_pmac_add(adapter, mac, adapter->if_handle,
|
||||
&adapter->pmac_id[0], 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4599,6 +4597,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
if (adapter->wol)
|
||||
be_setup_wol(adapter, true);
|
||||
|
||||
be_intr_set(adapter, false);
|
||||
cancel_delayed_work_sync(&adapter->func_recovery_work);
|
||||
|
||||
netif_device_detach(netdev);
|
||||
@ -4634,6 +4633,7 @@ static int be_resume(struct pci_dev *pdev)
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
be_intr_set(adapter, true);
|
||||
/* tell fw we're ready to fire cmds */
|
||||
status = be_cmd_fw_init(adapter);
|
||||
if (status)
|
||||
|
@ -83,6 +83,11 @@ struct e1000_adapter;
|
||||
|
||||
#define E1000_MAX_INTR 10
|
||||
|
||||
/*
|
||||
* Count for polling __E1000_RESET condition every 10-20msec.
|
||||
*/
|
||||
#define E1000_CHECK_RESET_COUNT 50
|
||||
|
||||
/* TX/RX descriptor defines */
|
||||
#define E1000_DEFAULT_TXD 256
|
||||
#define E1000_MAX_TXD 256
|
||||
@ -312,8 +317,6 @@ struct e1000_adapter {
|
||||
struct delayed_work watchdog_task;
|
||||
struct delayed_work fifo_stall_task;
|
||||
struct delayed_work phy_info_task;
|
||||
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
enum e1000_state_t {
|
||||
|
@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
|
||||
{
|
||||
set_bit(__E1000_DOWN, &adapter->flags);
|
||||
|
||||
cancel_delayed_work_sync(&adapter->watchdog_task);
|
||||
|
||||
/*
|
||||
* Since the watchdog task can reschedule other tasks, we should cancel
|
||||
* it first, otherwise we can run into the situation when a work is
|
||||
* still running after the adapter has been turned down.
|
||||
*/
|
||||
|
||||
cancel_delayed_work_sync(&adapter->phy_info_task);
|
||||
cancel_delayed_work_sync(&adapter->fifo_stall_task);
|
||||
|
||||
/* Only kill reset task if adapter is not resetting */
|
||||
if (!test_bit(__E1000_RESETTING, &adapter->flags))
|
||||
cancel_work_sync(&adapter->reset_task);
|
||||
|
||||
cancel_delayed_work_sync(&adapter->watchdog_task);
|
||||
cancel_delayed_work_sync(&adapter->phy_info_task);
|
||||
cancel_delayed_work_sync(&adapter->fifo_stall_task);
|
||||
}
|
||||
|
||||
void e1000_down(struct e1000_adapter *adapter)
|
||||
@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter)
|
||||
e1000_clean_all_rx_rings(adapter);
|
||||
}
|
||||
|
||||
static void e1000_reinit_safe(struct e1000_adapter *adapter)
|
||||
{
|
||||
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
|
||||
msleep(1);
|
||||
mutex_lock(&adapter->mutex);
|
||||
e1000_down(adapter);
|
||||
e1000_up(adapter);
|
||||
mutex_unlock(&adapter->mutex);
|
||||
clear_bit(__E1000_RESETTING, &adapter->flags);
|
||||
}
|
||||
|
||||
void e1000_reinit_locked(struct e1000_adapter *adapter)
|
||||
{
|
||||
/* if rtnl_lock is not held the call path is bogus */
|
||||
ASSERT_RTNL();
|
||||
WARN_ON(in_interrupt());
|
||||
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
|
||||
msleep(1);
|
||||
@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
|
||||
e1000_irq_disable(adapter);
|
||||
|
||||
spin_lock_init(&adapter->stats_lock);
|
||||
mutex_init(&adapter->mutex);
|
||||
|
||||
set_bit(__E1000_DOWN, &adapter->flags);
|
||||
|
||||
@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
int count = E1000_CHECK_RESET_COUNT;
|
||||
|
||||
while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
|
||||
e1000_down(adapter);
|
||||
@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work)
|
||||
struct e1000_adapter *adapter = container_of(work,
|
||||
struct e1000_adapter,
|
||||
phy_info_task.work);
|
||||
if (test_bit(__E1000_DOWN, &adapter->flags))
|
||||
return;
|
||||
mutex_lock(&adapter->mutex);
|
||||
|
||||
e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
|
||||
mutex_unlock(&adapter->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
u32 tctl;
|
||||
|
||||
if (test_bit(__E1000_DOWN, &adapter->flags))
|
||||
return;
|
||||
mutex_lock(&adapter->mutex);
|
||||
if (atomic_read(&adapter->tx_fifo_stall)) {
|
||||
if ((er32(TDT) == er32(TDH)) &&
|
||||
(er32(TDFT) == er32(TDFH)) &&
|
||||
@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
|
||||
schedule_delayed_work(&adapter->fifo_stall_task, 1);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&adapter->mutex);
|
||||
}
|
||||
|
||||
bool e1000_has_link(struct e1000_adapter *adapter)
|
||||
@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work)
|
||||
struct e1000_tx_ring *txdr = adapter->tx_ring;
|
||||
u32 link, tctl;
|
||||
|
||||
if (test_bit(__E1000_DOWN, &adapter->flags))
|
||||
return;
|
||||
|
||||
mutex_lock(&adapter->mutex);
|
||||
link = e1000_has_link(adapter);
|
||||
if ((netif_carrier_ok(netdev)) && link)
|
||||
goto link_up;
|
||||
@ -2516,7 +2502,7 @@ link_up:
|
||||
adapter->tx_timeout_count++;
|
||||
schedule_work(&adapter->reset_task);
|
||||
/* exit immediately since reset is imminent */
|
||||
goto unlock;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2544,9 +2530,6 @@ link_up:
|
||||
/* Reschedule the task */
|
||||
if (!test_bit(__E1000_DOWN, &adapter->flags))
|
||||
schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&adapter->mutex);
|
||||
}
|
||||
|
||||
enum latency_range {
|
||||
@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work)
|
||||
struct e1000_adapter *adapter =
|
||||
container_of(work, struct e1000_adapter, reset_task);
|
||||
|
||||
if (test_bit(__E1000_DOWN, &adapter->flags))
|
||||
return;
|
||||
e_err(drv, "Reset adapter\n");
|
||||
e1000_reinit_safe(adapter);
|
||||
e1000_reinit_locked(adapter);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
int count = E1000_CHECK_RESET_COUNT;
|
||||
|
||||
while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
|
||||
e1000_down(adapter);
|
||||
}
|
||||
|
@ -2062,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
wol->supported = WAKE_UCAST | WAKE_MCAST |
|
||||
WAKE_BCAST | WAKE_MAGIC |
|
||||
WAKE_PHY;
|
||||
wol->wolopts = 0;
|
||||
|
||||
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
|
||||
return;
|
||||
|
||||
wol->supported = WAKE_UCAST | WAKE_MCAST |
|
||||
WAKE_BCAST | WAKE_MAGIC |
|
||||
WAKE_PHY;
|
||||
|
||||
/* apply any specific unsupported masks here */
|
||||
switch (adapter->hw.device_id) {
|
||||
default:
|
||||
|
@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
|
||||
rx_ring->l2_accel_priv = NULL;
|
||||
}
|
||||
|
||||
int ixgbe_fwd_ring_down(struct net_device *vdev,
|
||||
struct ixgbe_fwd_adapter *accel)
|
||||
static int ixgbe_fwd_ring_down(struct net_device *vdev,
|
||||
struct ixgbe_fwd_adapter *accel)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = accel->real_adapter;
|
||||
unsigned int rxbase = accel->rx_base_queue;
|
||||
@ -7986,10 +7986,9 @@ skip_sriov:
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_TSO6 |
|
||||
NETIF_F_RXHASH |
|
||||
NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_L2FW_DOFFLOAD;
|
||||
NETIF_F_RXCSUM;
|
||||
|
||||
netdev->hw_features = netdev->features;
|
||||
netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
|
||||
|
||||
switch (adapter->hw.mac.type) {
|
||||
case ixgbe_mac_82599EB:
|
||||
|
@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl);
|
||||
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
|
||||
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
|
||||
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
|
||||
static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
|
||||
|
||||
/**
|
||||
* ixgbe_identify_phy_generic - Get physical layer module
|
||||
@ -1164,7 +1165,7 @@ err_read_i2c_eeprom:
|
||||
*
|
||||
* Searches for and identifies the QSFP module and assigns appropriate PHY type
|
||||
**/
|
||||
s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
|
||||
static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = hw->back;
|
||||
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
|
||||
|
@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
|
||||
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
|
||||
u16 *list_offset,
|
||||
u16 *data_offset);
|
||||
|
@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_en_tx_ring *tx_ring;
|
||||
int i, carrier_ok;
|
||||
|
||||
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
|
||||
@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
|
||||
carrier_ok = netif_carrier_ok(dev);
|
||||
|
||||
netif_carrier_off(dev);
|
||||
retry_tx:
|
||||
/* Wait until all tx queues are empty.
|
||||
* there should not be any additional incoming traffic
|
||||
* since we turned the carrier off */
|
||||
msleep(200);
|
||||
for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
|
||||
tx_ring = priv->tx_ring[i];
|
||||
if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
|
||||
goto retry_tx;
|
||||
}
|
||||
|
||||
if (priv->mdev->dev->caps.flags &
|
||||
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
|
||||
|
@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp)
|
||||
le32_to_cpu(txd->opts1) & 0xffff,
|
||||
PCI_DMA_TODEVICE);
|
||||
|
||||
bytes_compl += skb->len;
|
||||
pkts_compl++;
|
||||
|
||||
if (status & LastFrag) {
|
||||
if (status & (TxError | TxFIFOUnder)) {
|
||||
netif_dbg(cp, tx_err, cp->dev,
|
||||
@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp)
|
||||
netif_dbg(cp, tx_done, cp->dev,
|
||||
"tx done, slot %d\n", tx_tail);
|
||||
}
|
||||
bytes_compl += skb->len;
|
||||
pkts_compl++;
|
||||
dev_kfree_skb_irq(skb);
|
||||
}
|
||||
|
||||
|
@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
|
||||
rtl_writephy(tp, 0x14, 0x9065);
|
||||
rtl_writephy(tp, 0x14, 0x1065);
|
||||
|
||||
/* Check ALDPS bit, disable it if enabled */
|
||||
rtl_writephy(tp, 0x1f, 0x0a43);
|
||||
if (rtl_readphy(tp, 0x10) & 0x0004)
|
||||
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
|
||||
|
||||
rtl_writephy(tp, 0x1f, 0x0000);
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,8 @@ struct efx_mcdi_mon {
|
||||
unsigned long last_update;
|
||||
struct device *device;
|
||||
struct efx_mcdi_mon_attribute *attrs;
|
||||
struct attribute_group group;
|
||||
const struct attribute_group *groups[2];
|
||||
unsigned int n_attrs;
|
||||
};
|
||||
|
||||
|
@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_name(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", KBUILD_MODNAME);
|
||||
}
|
||||
|
||||
static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
|
||||
efx_dword_t *entry)
|
||||
{
|
||||
struct efx_nic *efx = dev_get_drvdata(dev);
|
||||
struct efx_nic *efx = dev_get_drvdata(dev->parent);
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
int rc;
|
||||
|
||||
@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev,
|
||||
efx_mcdi_sensor_type[mon_attr->type].label);
|
||||
}
|
||||
|
||||
static int
|
||||
static void
|
||||
efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
|
||||
ssize_t (*reader)(struct device *,
|
||||
struct device_attribute *, char *),
|
||||
@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
|
||||
int rc;
|
||||
|
||||
strlcpy(attr->name, name, sizeof(attr->name));
|
||||
attr->index = index;
|
||||
@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
|
||||
attr->dev_attr.attr.name = attr->name;
|
||||
attr->dev_attr.attr.mode = S_IRUGO;
|
||||
attr->dev_attr.show = reader;
|
||||
rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr);
|
||||
if (rc == 0)
|
||||
++hwmon->n_attrs;
|
||||
return rc;
|
||||
hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
|
||||
}
|
||||
|
||||
int efx_mcdi_mon_probe(struct efx_nic *efx)
|
||||
@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
|
||||
efx_mcdi_mon_update(efx);
|
||||
|
||||
/* Allocate space for the maximum possible number of
|
||||
* attributes for this set of sensors: name of the driver plus
|
||||
* attributes for this set of sensors:
|
||||
* value, min, max, crit, alarm and label for each sensor.
|
||||
*/
|
||||
n_attrs = 1 + 6 * n_sensors;
|
||||
n_attrs = 6 * n_sensors;
|
||||
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
|
||||
if (!hwmon->attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hwmon->device = hwmon_device_register(&efx->pci_dev->dev);
|
||||
if (IS_ERR(hwmon->device)) {
|
||||
rc = PTR_ERR(hwmon->device);
|
||||
hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
|
||||
GFP_KERNEL);
|
||||
if (!hwmon->group.attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
for (i = 0, j = -1, type = -1; ; i++) {
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
const char *hwmon_prefix;
|
||||
@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
|
||||
page = type / 32;
|
||||
j = -1;
|
||||
if (page == n_pages)
|
||||
return 0;
|
||||
goto hwmon_register;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
|
||||
page);
|
||||
@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
|
||||
if (min1 != max1) {
|
||||
snprintf(name, sizeof(name), "%s%u_input",
|
||||
hwmon_prefix, hwmon_index);
|
||||
rc = efx_mcdi_mon_add_attr(
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_value, i, type, 0);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
if (hwmon_type != EFX_HWMON_POWER) {
|
||||
snprintf(name, sizeof(name), "%s%u_min",
|
||||
hwmon_prefix, hwmon_index);
|
||||
rc = efx_mcdi_mon_add_attr(
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, min1);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "%s%u_max",
|
||||
hwmon_prefix, hwmon_index);
|
||||
rc = efx_mcdi_mon_add_attr(
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, max1);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
if (min2 != max2) {
|
||||
/* Assume max2 is critical value.
|
||||
@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
|
||||
*/
|
||||
snprintf(name, sizeof(name), "%s%u_crit",
|
||||
hwmon_prefix, hwmon_index);
|
||||
rc = efx_mcdi_mon_add_attr(
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, max2);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "%s%u_alarm",
|
||||
hwmon_prefix, hwmon_index);
|
||||
rc = efx_mcdi_mon_add_attr(
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
|
||||
efx_mcdi_sensor_type[type].label) {
|
||||
snprintf(name, sizeof(name), "%s%u_label",
|
||||
hwmon_prefix, hwmon_index);
|
||||
rc = efx_mcdi_mon_add_attr(
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_label, i, type, 0);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
hwmon_register:
|
||||
hwmon->groups[0] = &hwmon->group;
|
||||
hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
|
||||
KBUILD_MODNAME, NULL,
|
||||
hwmon->groups);
|
||||
if (IS_ERR(hwmon->device)) {
|
||||
rc = PTR_ERR(hwmon->device);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
efx_mcdi_mon_remove(efx);
|
||||
return rc;
|
||||
@ -516,14 +501,11 @@ fail:
|
||||
void efx_mcdi_mon_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < hwmon->n_attrs; i++)
|
||||
device_remove_file(&efx->pci_dev->dev,
|
||||
&hwmon->attrs[i].dev_attr);
|
||||
kfree(hwmon->attrs);
|
||||
if (hwmon->device)
|
||||
hwmon_device_unregister(hwmon->device);
|
||||
kfree(hwmon->attrs);
|
||||
kfree(hwmon->group.attrs);
|
||||
efx_nic_free_buffer(efx, &hwmon->dma_buf);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,8 @@
|
||||
defined(CONFIG_MACH_LITTLETON) ||\
|
||||
defined(CONFIG_MACH_ZYLONITE2) ||\
|
||||
defined(CONFIG_ARCH_VIPER) ||\
|
||||
defined(CONFIG_MACH_STARGATE2)
|
||||
defined(CONFIG_MACH_STARGATE2) ||\
|
||||
defined(CONFIG_ARCH_VERSATILE)
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
|
||||
@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
|
||||
#define SMC_outl(v, a, r) writel(v, (a) + (r))
|
||||
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
|
||||
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
|
||||
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
||||
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
||||
#define SMC_IRQ_FLAGS (-1) /* from resource */
|
||||
|
||||
/* We actually can't write halfwords properly if not word aligned */
|
||||
@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
|
||||
#define RPC_LSA_DEFAULT RPC_LED_TX_RX
|
||||
#define RPC_LSB_DEFAULT RPC_LED_100_10
|
||||
|
||||
#elif defined(CONFIG_ARCH_VERSATILE)
|
||||
|
||||
#define SMC_CAN_USE_8BIT 1
|
||||
#define SMC_CAN_USE_16BIT 1
|
||||
#define SMC_CAN_USE_32BIT 1
|
||||
#define SMC_NOWAIT 1
|
||||
|
||||
#define SMC_inb(a, r) readb((a) + (r))
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_inl(a, r) readl((a) + (r))
|
||||
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
||||
#define SMC_outw(v, a, r) writew(v, (a) + (r))
|
||||
#define SMC_outl(v, a, r) writel(v, (a) + (r))
|
||||
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
|
||||
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
|
||||
#define SMC_IRQ_FLAGS (-1) /* from resource */
|
||||
|
||||
#elif defined(CONFIG_MN10300)
|
||||
|
||||
/*
|
||||
|
@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget)
|
||||
unsigned int rx_done;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vptr->lock, flags);
|
||||
/*
|
||||
* Do rx and tx twice for performance (taken from the VIA
|
||||
* out-of-tree driver).
|
||||
*/
|
||||
rx_done = velocity_rx_srv(vptr, budget / 2);
|
||||
rx_done = velocity_rx_srv(vptr, budget);
|
||||
spin_lock_irqsave(&vptr->lock, flags);
|
||||
velocity_tx_srv(vptr);
|
||||
rx_done += velocity_rx_srv(vptr, budget - rx_done);
|
||||
velocity_tx_srv(vptr);
|
||||
|
||||
/* If budget not fully consumed, exit the polling mode */
|
||||
if (rx_done < budget) {
|
||||
napi_complete(napi);
|
||||
@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
|
||||
if (ret < 0)
|
||||
goto out_free_tmp_vptr_1;
|
||||
|
||||
napi_disable(&vptr->napi);
|
||||
|
||||
spin_lock_irqsave(&vptr->lock, flags);
|
||||
|
||||
netif_stop_queue(dev);
|
||||
@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
|
||||
|
||||
velocity_give_many_rx_descs(vptr);
|
||||
|
||||
napi_enable(&vptr->napi);
|
||||
|
||||
mac_enable_int(vptr->mac_regs);
|
||||
netif_start_queue(dev);
|
||||
|
||||
|
@ -744,7 +744,7 @@ err:
|
||||
rcu_read_lock();
|
||||
vlan = rcu_dereference(q->vlan);
|
||||
if (vlan)
|
||||
vlan->dev->stats.tx_dropped++;
|
||||
this_cpu_inc(vlan->pcpu_stats->tx_dropped);
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
@ -767,7 +767,6 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
|
||||
const struct sk_buff *skb,
|
||||
const struct iovec *iv, int len)
|
||||
{
|
||||
struct macvlan_dev *vlan;
|
||||
int ret;
|
||||
int vnet_hdr_len = 0;
|
||||
int vlan_offset = 0;
|
||||
@ -821,15 +820,6 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
|
||||
copied += len;
|
||||
|
||||
done:
|
||||
rcu_read_lock();
|
||||
vlan = rcu_dereference(q->vlan);
|
||||
if (vlan) {
|
||||
preempt_disable();
|
||||
macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
|
||||
preempt_enable();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret ? ret : copied;
|
||||
}
|
||||
|
||||
|
@ -64,6 +64,7 @@
|
||||
|
||||
#define PHY_ID_VSC8234 0x000fc620
|
||||
#define PHY_ID_VSC8244 0x000fc6c0
|
||||
#define PHY_ID_VSC8514 0x00070670
|
||||
#define PHY_ID_VSC8574 0x000704a0
|
||||
#define PHY_ID_VSC8662 0x00070660
|
||||
#define PHY_ID_VSC8221 0x000fc550
|
||||
@ -131,6 +132,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
|
||||
err = phy_write(phydev, MII_VSC8244_IMASK,
|
||||
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
|
||||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
|
||||
phydev->drv->phy_id == PHY_ID_VSC8514 ||
|
||||
phydev->drv->phy_id == PHY_ID_VSC8574) ?
|
||||
MII_VSC8244_IMASK_MASK :
|
||||
MII_VSC8221_IMASK_MASK);
|
||||
@ -245,6 +247,18 @@ static struct phy_driver vsc82xx_driver[] = {
|
||||
.ack_interrupt = &vsc824x_ack_interrupt,
|
||||
.config_intr = &vsc82xx_config_intr,
|
||||
.driver = { .owner = THIS_MODULE,},
|
||||
}, {
|
||||
.phy_id = PHY_ID_VSC8514,
|
||||
.name = "Vitesse VSC8514",
|
||||
.phy_id_mask = 0x000ffff0,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_init = &vsc824x_config_init,
|
||||
.config_aneg = &vsc82x4_config_aneg,
|
||||
.read_status = &genphy_read_status,
|
||||
.ack_interrupt = &vsc824x_ack_interrupt,
|
||||
.config_intr = &vsc82xx_config_intr,
|
||||
.driver = { .owner = THIS_MODULE,},
|
||||
}, {
|
||||
.phy_id = PHY_ID_VSC8574,
|
||||
.name = "Vitesse VSC8574",
|
||||
@ -315,6 +329,7 @@ module_exit(vsc82xx_exit);
|
||||
static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
|
||||
{ PHY_ID_VSC8234, 0x000ffff0 },
|
||||
{ PHY_ID_VSC8244, 0x000fffc0 },
|
||||
{ PHY_ID_VSC8514, 0x000ffff0 },
|
||||
{ PHY_ID_VSC8574, 0x000ffff0 },
|
||||
{ PHY_ID_VSC8662, 0x000ffff0 },
|
||||
{ PHY_ID_VSC8221, 0x000ffff0 },
|
||||
|
@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __team_carrier_check(struct team *team);
|
||||
|
||||
static int team_user_linkup_option_set(struct team *team,
|
||||
struct team_gsetter_ctx *ctx)
|
||||
{
|
||||
@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team,
|
||||
|
||||
port->user.linkup = ctx->data.bool_val;
|
||||
team_refresh_port_linkup(port);
|
||||
__team_carrier_check(port->team);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team,
|
||||
|
||||
port->user.linkup_enabled = ctx->data.bool_val;
|
||||
team_refresh_port_linkup(port);
|
||||
__team_carrier_check(port->team);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -299,35 +299,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
|
||||
static struct sk_buff *receive_small(void *buf, unsigned int len)
|
||||
{
|
||||
struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
|
||||
struct sk_buff *curr_skb = head_skb;
|
||||
char *buf;
|
||||
struct page *page;
|
||||
int num_buf, len, offset;
|
||||
struct sk_buff * skb = buf;
|
||||
|
||||
len -= sizeof(struct virtio_net_hdr);
|
||||
skb_trim(skb, len);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *receive_big(struct net_device *dev,
|
||||
struct receive_queue *rq,
|
||||
void *buf,
|
||||
unsigned int len)
|
||||
{
|
||||
struct page *page = buf;
|
||||
struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
|
||||
|
||||
if (unlikely(!skb))
|
||||
goto err;
|
||||
|
||||
return skb;
|
||||
|
||||
err:
|
||||
dev->stats.rx_dropped++;
|
||||
give_pages(rq, page);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
struct receive_queue *rq,
|
||||
void *buf,
|
||||
unsigned int len)
|
||||
{
|
||||
struct skb_vnet_hdr *hdr = buf;
|
||||
int num_buf = hdr->mhdr.num_buffers;
|
||||
struct page *page = virt_to_head_page(buf);
|
||||
int offset = buf - page_address(page);
|
||||
struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
|
||||
MERGE_BUFFER_LEN);
|
||||
struct sk_buff *curr_skb = head_skb;
|
||||
|
||||
if (unlikely(!curr_skb))
|
||||
goto err_skb;
|
||||
|
||||
num_buf = hdr->mhdr.num_buffers;
|
||||
while (--num_buf) {
|
||||
int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
|
||||
int num_skb_frags;
|
||||
|
||||
buf = virtqueue_get_buf(rq->vq, &len);
|
||||
if (unlikely(!buf)) {
|
||||
pr_debug("%s: rx error: %d buffers missing\n",
|
||||
head_skb->dev->name, hdr->mhdr.num_buffers);
|
||||
head_skb->dev->stats.rx_length_errors++;
|
||||
return -EINVAL;
|
||||
pr_debug("%s: rx error: %d buffers out of %d missing\n",
|
||||
dev->name, num_buf, hdr->mhdr.num_buffers);
|
||||
dev->stats.rx_length_errors++;
|
||||
goto err_buf;
|
||||
}
|
||||
if (unlikely(len > MERGE_BUFFER_LEN)) {
|
||||
pr_debug("%s: rx error: merge buffer too long\n",
|
||||
head_skb->dev->name);
|
||||
dev->name);
|
||||
len = MERGE_BUFFER_LEN;
|
||||
}
|
||||
|
||||
page = virt_to_head_page(buf);
|
||||
--rq->num;
|
||||
|
||||
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
|
||||
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
|
||||
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
|
||||
if (unlikely(!nskb)) {
|
||||
head_skb->dev->stats.rx_dropped++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (unlikely(!nskb))
|
||||
goto err_skb;
|
||||
if (curr_skb == head_skb)
|
||||
skb_shinfo(curr_skb)->frag_list = nskb;
|
||||
else
|
||||
@ -341,8 +382,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
|
||||
head_skb->len += len;
|
||||
head_skb->truesize += MERGE_BUFFER_LEN;
|
||||
}
|
||||
page = virt_to_head_page(buf);
|
||||
offset = buf - (char *)page_address(page);
|
||||
offset = buf - page_address(page);
|
||||
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
|
||||
put_page(page);
|
||||
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
|
||||
@ -351,9 +391,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
|
||||
skb_add_rx_frag(curr_skb, num_skb_frags, page,
|
||||
offset, len, MERGE_BUFFER_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
return head_skb;
|
||||
|
||||
err_skb:
|
||||
put_page(page);
|
||||
while (--num_buf) {
|
||||
buf = virtqueue_get_buf(rq->vq, &len);
|
||||
if (unlikely(!buf)) {
|
||||
pr_debug("%s: rx error: %d buffers missing\n",
|
||||
dev->name, num_buf);
|
||||
dev->stats.rx_length_errors++;
|
||||
break;
|
||||
}
|
||||
page = virt_to_head_page(buf);
|
||||
put_page(page);
|
||||
--rq->num;
|
||||
}
|
||||
return 0;
|
||||
err_buf:
|
||||
dev->stats.rx_dropped++;
|
||||
dev_kfree_skb(head_skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
||||
@ -362,7 +421,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
||||
struct net_device *dev = vi->dev;
|
||||
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
struct skb_vnet_hdr *hdr;
|
||||
|
||||
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
|
||||
@ -377,33 +435,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!vi->mergeable_rx_bufs && !vi->big_packets) {
|
||||
skb = buf;
|
||||
len -= sizeof(struct virtio_net_hdr);
|
||||
skb_trim(skb, len);
|
||||
} else if (vi->mergeable_rx_bufs) {
|
||||
struct page *page = virt_to_head_page(buf);
|
||||
skb = page_to_skb(rq, page,
|
||||
(char *)buf - (char *)page_address(page),
|
||||
len, MERGE_BUFFER_LEN);
|
||||
if (unlikely(!skb)) {
|
||||
dev->stats.rx_dropped++;
|
||||
put_page(page);
|
||||
return;
|
||||
}
|
||||
if (receive_mergeable(rq, skb)) {
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
page = buf;
|
||||
skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
|
||||
if (unlikely(!skb)) {
|
||||
dev->stats.rx_dropped++;
|
||||
give_pages(rq, page);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (vi->mergeable_rx_bufs)
|
||||
skb = receive_mergeable(dev, rq, buf, len);
|
||||
else if (vi->big_packets)
|
||||
skb = receive_big(dev, rq, buf, len);
|
||||
else
|
||||
skb = receive_small(buf, len);
|
||||
|
||||
if (unlikely(!skb))
|
||||
return;
|
||||
|
||||
hdr = skb_vnet_hdr(skb);
|
||||
|
||||
@ -1084,7 +1124,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
|
||||
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
|
||||
VIRTIO_NET_CTRL_MAC_TABLE_SET,
|
||||
sg, NULL))
|
||||
dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
|
||||
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <linux/udp.h>
|
||||
|
||||
#include <net/tcp.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/events.h>
|
||||
|
@ -1404,11 +1404,22 @@ enum {
|
||||
};
|
||||
#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
|
||||
|
||||
static struct genl_multicast_group pmcraid_mcgrps[] = {
|
||||
{ .name = "events", /* not really used - see ID discussion below */ },
|
||||
};
|
||||
|
||||
static struct genl_family pmcraid_event_family = {
|
||||
.id = GENL_ID_GENERATE,
|
||||
/*
|
||||
* Due to prior multicast group abuse (the code having assumed that
|
||||
* the family ID can be used as a multicast group ID) we need to
|
||||
* statically allocate a family (and thus group) ID.
|
||||
*/
|
||||
.id = GENL_ID_PMCRAID,
|
||||
.name = "pmcraid",
|
||||
.version = 1,
|
||||
.maxattr = PMCRAID_AEN_ATTR_MAX
|
||||
.maxattr = PMCRAID_AEN_ATTR_MAX,
|
||||
.mcgrps = pmcraid_mcgrps,
|
||||
.n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1511,9 +1522,8 @@ static int pmcraid_notify_aen(
|
||||
return result;
|
||||
}
|
||||
|
||||
result =
|
||||
genlmsg_multicast(&pmcraid_event_family, skb, 0,
|
||||
pmcraid_event_family.id, GFP_ATOMIC);
|
||||
result = genlmsg_multicast(&pmcraid_event_family, skb,
|
||||
0, 0, GFP_ATOMIC);
|
||||
|
||||
/* If there are no listeners, genlmsg_multicast may return non-zero
|
||||
* value.
|
||||
|
@ -473,7 +473,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
|
||||
int ip_ra_control(struct sock *sk, unsigned char on,
|
||||
void (*destructor)(struct sock *));
|
||||
|
||||
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
|
||||
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
|
||||
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
|
||||
u32 info, u8 *payload);
|
||||
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
|
||||
|
@ -776,8 +776,10 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
||||
|
||||
int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
|
||||
|
||||
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
|
||||
int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
|
||||
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len);
|
||||
int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len);
|
||||
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
|
||||
u32 info, u8 *payload);
|
||||
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
|
||||
|
@ -31,7 +31,8 @@
|
||||
|
||||
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
|
||||
struct pingv6_ops {
|
||||
int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len);
|
||||
int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len);
|
||||
int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg,
|
||||
struct sk_buff *skb);
|
||||
int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
|
||||
|
@ -629,6 +629,7 @@ struct sctp_chunk {
|
||||
#define SCTP_NEED_FRTX 0x1
|
||||
#define SCTP_DONT_FRTX 0x2
|
||||
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
|
||||
resent:1, /* Has this chunk ever been resent. */
|
||||
has_tsn:1, /* Does this chunk have a TSN yet? */
|
||||
has_ssn:1, /* Does this chunk have a SSN yet? */
|
||||
singleton:1, /* Only chunk in the packet? */
|
||||
|
@ -28,6 +28,7 @@ struct genlmsghdr {
|
||||
#define GENL_ID_GENERATE 0
|
||||
#define GENL_ID_CTRL NLMSG_MIN_TYPE
|
||||
#define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1)
|
||||
#define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2)
|
||||
|
||||
/**************************************************************************
|
||||
* Controller
|
||||
|
@ -488,7 +488,9 @@ enum {
|
||||
IFLA_HSR_UNSPEC,
|
||||
IFLA_HSR_SLAVE1,
|
||||
IFLA_HSR_SLAVE2,
|
||||
IFLA_HSR_MULTICAST_SPEC,
|
||||
IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */
|
||||
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
|
||||
IFLA_HSR_SEQ_NR,
|
||||
__IFLA_HSR_MAX,
|
||||
};
|
||||
|
||||
|
@ -33,6 +33,7 @@ struct netlink_diag_ring {
|
||||
};
|
||||
|
||||
enum {
|
||||
/* NETLINK_DIAG_NONE, standard nl API requires this attribute! */
|
||||
NETLINK_DIAG_MEMINFO,
|
||||
NETLINK_DIAG_GROUPS,
|
||||
NETLINK_DIAG_RX_RING,
|
||||
|
@ -29,6 +29,7 @@ struct packet_diag_msg {
|
||||
};
|
||||
|
||||
enum {
|
||||
/* PACKET_DIAG_NONE, standard nl API requires this attribute! */
|
||||
PACKET_DIAG_INFO,
|
||||
PACKET_DIAG_MCLIST,
|
||||
PACKET_DIAG_RX_RING,
|
||||
|
@ -31,6 +31,7 @@ struct unix_diag_msg {
|
||||
};
|
||||
|
||||
enum {
|
||||
/* UNIX_DIAG_NONE, standard nl API requires this attribute! */
|
||||
UNIX_DIAG_NAME,
|
||||
UNIX_DIAG_VFS,
|
||||
UNIX_DIAG_PEER,
|
||||
|
@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
|
||||
__get_user(kmsg->msg_flags, &umsg->msg_flags))
|
||||
return -EFAULT;
|
||||
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
|
||||
return -EINVAL;
|
||||
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
|
||||
kmsg->msg_name = compat_ptr(tmp1);
|
||||
kmsg->msg_iov = compat_ptr(tmp2);
|
||||
kmsg->msg_control = compat_ptr(tmp3);
|
||||
|
@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
|
||||
if (x) {
|
||||
int ret;
|
||||
__u8 *eth;
|
||||
struct iphdr *iph;
|
||||
|
||||
nhead = x->props.header_len - skb_headroom(skb);
|
||||
if (nhead > 0) {
|
||||
ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
|
||||
@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
|
||||
eth = (__u8 *) skb_push(skb, ETH_HLEN);
|
||||
memcpy(eth, pkt_dev->hh, 12);
|
||||
*(u16 *) ð[12] = protocol;
|
||||
|
||||
/* Update IPv4 header len as well as checksum value */
|
||||
iph = ip_hdr(skb);
|
||||
iph->tot_len = htons(skb->len - ETH_HLEN);
|
||||
ip_send_check(iph);
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
|
@ -288,7 +288,8 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
|
||||
static bool seq_nr_after(u16 a, u16 b)
|
||||
{
|
||||
/* Remove inconsistency where
|
||||
* seq_nr_after(a, b) == seq_nr_before(a, b) */
|
||||
* seq_nr_after(a, b) == seq_nr_before(a, b)
|
||||
*/
|
||||
if ((int) b - a == 32768)
|
||||
return false;
|
||||
|
||||
|
@ -23,6 +23,8 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
|
||||
[IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
|
||||
[IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
|
||||
[IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
|
||||
[IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
|
||||
[IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
|
||||
};
|
||||
|
||||
|
||||
@ -59,6 +61,31 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
|
||||
return hsr_dev_finalize(dev, link, multicast_spec);
|
||||
}
|
||||
|
||||
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
{
|
||||
struct hsr_priv *hsr_priv;
|
||||
|
||||
hsr_priv = netdev_priv(dev);
|
||||
|
||||
if (hsr_priv->slave[0])
|
||||
if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (hsr_priv->slave[1])
|
||||
if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
|
||||
hsr_priv->sup_multicast_addr) ||
|
||||
nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr))
|
||||
goto nla_put_failure;
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static struct rtnl_link_ops hsr_link_ops __read_mostly = {
|
||||
.kind = "hsr",
|
||||
.maxtype = IFLA_HSR_MAX,
|
||||
@ -66,6 +93,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
|
||||
.priv_size = sizeof(struct hsr_priv),
|
||||
.setup = hsr_dev_setup,
|
||||
.newlink = hsr_newlink,
|
||||
.fill_info = hsr_fill_info,
|
||||
};
|
||||
|
||||
|
||||
|
@ -386,7 +386,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
|
||||
/*
|
||||
* Handle MSG_ERRQUEUE
|
||||
*/
|
||||
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
||||
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
||||
{
|
||||
struct sock_exterr_skb *serr;
|
||||
struct sk_buff *skb, *skb2;
|
||||
@ -423,6 +423,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
||||
serr->addr_offset);
|
||||
sin->sin_port = serr->port;
|
||||
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
|
||||
*addr_len = sizeof(*sin);
|
||||
}
|
||||
|
||||
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
|
||||
|
@ -772,7 +772,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
err = PTR_ERR(rt);
|
||||
rt = NULL;
|
||||
if (err == -ENETUNREACH)
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -841,10 +841,11 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
|
||||
if (flags & MSG_ERRQUEUE) {
|
||||
if (family == AF_INET) {
|
||||
return ip_recv_error(sk, msg, len);
|
||||
return ip_recv_error(sk, msg, len, addr_len);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
} else if (family == AF_INET6) {
|
||||
return pingv6_ops.ipv6_recv_error(sk, msg, len);
|
||||
return pingv6_ops.ipv6_recv_error(sk, msg, len,
|
||||
addr_len);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -31,10 +31,6 @@
|
||||
const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
|
||||
const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
|
||||
|
||||
/*
|
||||
* Add a protocol handler to the hash tables
|
||||
*/
|
||||
|
||||
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
|
||||
{
|
||||
if (!prot->netns_ok) {
|
||||
@ -55,10 +51,6 @@ int inet_add_offload(const struct net_offload *prot, unsigned char protocol)
|
||||
}
|
||||
EXPORT_SYMBOL(inet_add_offload);
|
||||
|
||||
/*
|
||||
* Remove a protocol from the hash tables.
|
||||
*/
|
||||
|
||||
int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
|
||||
{
|
||||
int ret;
|
||||
|
@ -697,7 +697,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
goto out;
|
||||
|
||||
if (flags & MSG_ERRQUEUE) {
|
||||
err = ip_recv_error(sk, msg, len);
|
||||
err = ip_recv_error(sk, msg, len, addr_len);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
if (IS_ERR(rt)) {
|
||||
err = PTR_ERR(rt);
|
||||
if (err == -ENETUNREACH)
|
||||
IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
|
||||
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,6 @@ EXPORT_SYMBOL(tcp_destroy_cgroup);
|
||||
static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
|
||||
{
|
||||
struct cg_proto *cg_proto;
|
||||
u64 old_lim;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
@ -71,7 +70,6 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
|
||||
if (val > RES_COUNTER_MAX)
|
||||
val = RES_COUNTER_MAX;
|
||||
|
||||
old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
|
||||
ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -274,33 +274,32 @@ static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *
|
||||
{
|
||||
const struct iphdr *iph = skb_gro_network_header(skb);
|
||||
__wsum wsum;
|
||||
__sum16 sum;
|
||||
|
||||
/* Don't bother verifying checksum if we're going to flush anyway. */
|
||||
if (NAPI_GRO_CB(skb)->flush)
|
||||
goto skip_csum;
|
||||
|
||||
wsum = skb->csum;
|
||||
|
||||
switch (skb->ip_summed) {
|
||||
case CHECKSUM_NONE:
|
||||
wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
|
||||
0);
|
||||
|
||||
/* fall through */
|
||||
|
||||
case CHECKSUM_COMPLETE:
|
||||
if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
|
||||
skb->csum)) {
|
||||
wsum)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
break;
|
||||
}
|
||||
flush:
|
||||
|
||||
NAPI_GRO_CB(skb)->flush = 1;
|
||||
return NULL;
|
||||
|
||||
case CHECKSUM_NONE:
|
||||
wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
|
||||
skb_gro_len(skb), IPPROTO_TCP, 0);
|
||||
sum = csum_fold(skb_checksum(skb,
|
||||
skb_gro_offset(skb),
|
||||
skb_gro_len(skb),
|
||||
wsum));
|
||||
if (sum)
|
||||
goto flush;
|
||||
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
break;
|
||||
}
|
||||
|
||||
skip_csum:
|
||||
return tcp_gro_receive(head, skb);
|
||||
}
|
||||
|
||||
|
@ -999,7 +999,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
err = PTR_ERR(rt);
|
||||
rt = NULL;
|
||||
if (err == -ENETUNREACH)
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1098,6 +1098,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
int ret;
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
flags |= MSG_MORE;
|
||||
|
||||
if (!up->pending) {
|
||||
struct msghdr msg = { .msg_flags = flags|MSG_MORE };
|
||||
|
||||
@ -1236,7 +1239,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
bool slow;
|
||||
|
||||
if (flags & MSG_ERRQUEUE)
|
||||
return ip_recv_error(sk, msg, len);
|
||||
return ip_recv_error(sk, msg, len, addr_len);
|
||||
|
||||
try_again:
|
||||
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
|
||||
|
@ -318,7 +318,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
|
||||
/*
|
||||
* Handle MSG_ERRQUEUE
|
||||
*/
|
||||
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
||||
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
||||
{
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct sock_exterr_skb *serr;
|
||||
@ -369,6 +369,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
||||
&sin->sin6_addr);
|
||||
sin->sin6_scope_id = 0;
|
||||
}
|
||||
*addr_len = sizeof(*sin);
|
||||
}
|
||||
|
||||
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
|
||||
@ -377,6 +378,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
||||
if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
|
||||
sin->sin6_family = AF_INET6;
|
||||
sin->sin6_flowinfo = 0;
|
||||
sin->sin6_port = 0;
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
sin->sin6_addr = ipv6_hdr(skb)->saddr;
|
||||
if (np->rxopt.all)
|
||||
@ -423,7 +425,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error);
|
||||
/*
|
||||
* Handle IPV6_RECVPATHMTU
|
||||
*/
|
||||
int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
|
||||
int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len)
|
||||
{
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
@ -457,6 +460,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
|
||||
sin->sin6_port = 0;
|
||||
sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
|
||||
sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
|
||||
*addr_len = sizeof(*sin);
|
||||
}
|
||||
|
||||
put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
|
||||
|
@ -116,8 +116,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
IP6_INC_STATS_BH(dev_net(dst->dev),
|
||||
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
|
||||
IP6_INC_STATS(dev_net(dst->dev),
|
||||
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -57,7 +57,8 @@ static struct inet_protosw pingv6_protosw = {
|
||||
|
||||
|
||||
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
|
||||
static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
||||
static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len)
|
||||
{
|
||||
return -EAFNOSUPPORT;
|
||||
}
|
||||
|
@ -36,10 +36,6 @@ int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol
|
||||
}
|
||||
EXPORT_SYMBOL(inet6_add_protocol);
|
||||
|
||||
/*
|
||||
* Remove a protocol from the hash tables.
|
||||
*/
|
||||
|
||||
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
|
||||
{
|
||||
int ret;
|
||||
|
@ -466,10 +466,10 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (flags & MSG_ERRQUEUE)
|
||||
return ipv6_recv_error(sk, msg, len);
|
||||
return ipv6_recv_error(sk, msg, len, addr_len);
|
||||
|
||||
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
|
||||
return ipv6_recv_rxpmtu(sk, msg, len);
|
||||
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
|
||||
|
||||
skb = skb_recv_datagram(sk, flags, noblock, &err);
|
||||
if (!skb)
|
||||
|
@ -478,14 +478,44 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
|
||||
* if sufficient data bytes are available
|
||||
*/
|
||||
static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
|
||||
{
|
||||
const struct iphdr *iph = (const struct iphdr *) skb->data;
|
||||
struct rt6_info *rt;
|
||||
struct sk_buff *skb2;
|
||||
|
||||
if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
|
||||
return 1;
|
||||
|
||||
skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (!skb2)
|
||||
return 1;
|
||||
|
||||
skb_dst_drop(skb2);
|
||||
skb_pull(skb2, iph->ihl * 4);
|
||||
skb_reset_network_header(skb2);
|
||||
|
||||
rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
|
||||
|
||||
if (rt && rt->dst.dev)
|
||||
skb2->dev = rt->dst.dev;
|
||||
|
||||
icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
|
||||
|
||||
if (rt)
|
||||
ip6_rt_put(rt);
|
||||
|
||||
kfree_skb(skb2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipip6_err(struct sk_buff *skb, u32 info)
|
||||
{
|
||||
|
||||
/* All the routers (except for Linux) return only
|
||||
8 bytes of packet payload. It means, that precise relaying of
|
||||
ICMP in the real Internet is absolutely infeasible.
|
||||
*/
|
||||
const struct iphdr *iph = (const struct iphdr *)skb->data;
|
||||
const int type = icmp_hdr(skb)->type;
|
||||
const int code = icmp_hdr(skb)->code;
|
||||
@ -500,7 +530,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
|
||||
case ICMP_DEST_UNREACH:
|
||||
switch (code) {
|
||||
case ICMP_SR_FAILED:
|
||||
case ICMP_PORT_UNREACH:
|
||||
/* Impossible event. */
|
||||
return 0;
|
||||
default:
|
||||
@ -545,6 +574,9 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
|
||||
goto out;
|
||||
|
||||
err = 0;
|
||||
if (!ipip6_err_gen_icmpv6_unreach(skb))
|
||||
goto out;
|
||||
|
||||
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
|
||||
goto out;
|
||||
|
||||
@ -919,7 +951,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
|
||||
if (!new_skb) {
|
||||
ip_rt_put(rt);
|
||||
dev->stats.tx_dropped++;
|
||||
dev_kfree_skb(skb);
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
if (skb->sk)
|
||||
@ -945,7 +977,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
|
||||
tx_error_icmp:
|
||||
dst_link_failure(skb);
|
||||
tx_error:
|
||||
dev_kfree_skb(skb);
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
dev->stats.tx_errors++;
|
||||
return NETDEV_TX_OK;
|
||||
@ -985,7 +1017,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
|
||||
|
||||
tx_err:
|
||||
dev->stats.tx_errors++;
|
||||
dev_kfree_skb(skb);
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
}
|
||||
|
@ -37,34 +37,32 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
|
||||
{
|
||||
const struct ipv6hdr *iph = skb_gro_network_header(skb);
|
||||
__wsum wsum;
|
||||
__sum16 sum;
|
||||
|
||||
/* Don't bother verifying checksum if we're going to flush anyway. */
|
||||
if (NAPI_GRO_CB(skb)->flush)
|
||||
goto skip_csum;
|
||||
|
||||
wsum = skb->csum;
|
||||
|
||||
switch (skb->ip_summed) {
|
||||
case CHECKSUM_NONE:
|
||||
wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
|
||||
wsum);
|
||||
|
||||
/* fall through */
|
||||
|
||||
case CHECKSUM_COMPLETE:
|
||||
if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
|
||||
skb->csum)) {
|
||||
wsum)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
break;
|
||||
}
|
||||
flush:
|
||||
|
||||
NAPI_GRO_CB(skb)->flush = 1;
|
||||
return NULL;
|
||||
|
||||
case CHECKSUM_NONE:
|
||||
wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
|
||||
skb_gro_len(skb),
|
||||
IPPROTO_TCP, 0));
|
||||
sum = csum_fold(skb_checksum(skb,
|
||||
skb_gro_offset(skb),
|
||||
skb_gro_len(skb),
|
||||
wsum));
|
||||
if (sum)
|
||||
goto flush;
|
||||
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
break;
|
||||
}
|
||||
|
||||
skip_csum:
|
||||
return tcp_gro_receive(head, skb);
|
||||
}
|
||||
|
||||
|
@ -393,10 +393,10 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
||||
bool slow;
|
||||
|
||||
if (flags & MSG_ERRQUEUE)
|
||||
return ipv6_recv_error(sk, msg, len);
|
||||
return ipv6_recv_error(sk, msg, len, addr_len);
|
||||
|
||||
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
|
||||
return ipv6_recv_rxpmtu(sk, msg, len);
|
||||
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
|
||||
|
||||
try_again:
|
||||
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
|
||||
|
@ -665,7 +665,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
||||
*addr_len = sizeof(*lsa);
|
||||
|
||||
if (flags & MSG_ERRQUEUE)
|
||||
return ipv6_recv_error(sk, msg, len);
|
||||
return ipv6_recv_error(sk, msg, len, addr_len);
|
||||
|
||||
skb = skb_recv_datagram(sk, flags, noblock, &err);
|
||||
if (!skb)
|
||||
|
@ -74,9 +74,12 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE];
|
||||
* Bit 17 is marked as already used since the VFS quota code
|
||||
* also abused this API and relied on family == group ID, we
|
||||
* cater to that by giving it a static family and group ID.
|
||||
* Bit 18 is marked as already used since the PMCRAID driver
|
||||
* did the same thing as the VFS quota code (maybe copied?)
|
||||
*/
|
||||
static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
|
||||
BIT(GENL_ID_VFS_DQUOT);
|
||||
BIT(GENL_ID_VFS_DQUOT) |
|
||||
BIT(GENL_ID_PMCRAID);
|
||||
static unsigned long *mc_groups = &mc_group_start;
|
||||
static unsigned long mc_groups_longs = 1;
|
||||
|
||||
@ -139,6 +142,7 @@ static u16 genl_generate_id(void)
|
||||
|
||||
for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
|
||||
if (id_gen_idx != GENL_ID_VFS_DQUOT &&
|
||||
id_gen_idx != GENL_ID_PMCRAID &&
|
||||
!genl_family_find_byid(id_gen_idx))
|
||||
return id_gen_idx;
|
||||
if (++id_gen_idx > GENL_MAX_ID)
|
||||
@ -214,7 +218,7 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
|
||||
{
|
||||
int first_id;
|
||||
int n_groups = family->n_mcgrps;
|
||||
int err, i;
|
||||
int err = 0, i;
|
||||
bool groups_allocated = false;
|
||||
|
||||
if (!n_groups)
|
||||
@ -236,9 +240,12 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
|
||||
} else if (strcmp(family->name, "NET_DM") == 0) {
|
||||
first_id = 1;
|
||||
BUG_ON(n_groups != 1);
|
||||
} else if (strcmp(family->name, "VFS_DQUOT") == 0) {
|
||||
} else if (family->id == GENL_ID_VFS_DQUOT) {
|
||||
first_id = GENL_ID_VFS_DQUOT;
|
||||
BUG_ON(n_groups != 1);
|
||||
} else if (family->id == GENL_ID_PMCRAID) {
|
||||
first_id = GENL_ID_PMCRAID;
|
||||
BUG_ON(n_groups != 1);
|
||||
} else {
|
||||
groups_allocated = true;
|
||||
err = genl_allocate_reserve_groups(n_groups, &first_id);
|
||||
|
@ -439,9 +439,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
|
||||
|
||||
pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
|
||||
|
||||
spin_lock(&rb_queue->lock);
|
||||
spin_lock_bh(&rb_queue->lock);
|
||||
pkc->delete_blk_timer = 1;
|
||||
spin_unlock(&rb_queue->lock);
|
||||
spin_unlock_bh(&rb_queue->lock);
|
||||
|
||||
prb_del_retire_blk_timer(pkc);
|
||||
}
|
||||
|
@ -215,10 +215,10 @@ static bool loss_4state(struct netem_sched_data *q)
|
||||
if (rnd < clg->a4) {
|
||||
clg->state = 4;
|
||||
return true;
|
||||
} else if (clg->a4 < rnd && rnd < clg->a1) {
|
||||
} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
|
||||
clg->state = 3;
|
||||
return true;
|
||||
} else if (clg->a1 < rnd)
|
||||
} else if (clg->a1 + clg->a4 < rnd)
|
||||
clg->state = 1;
|
||||
|
||||
break;
|
||||
@ -268,10 +268,11 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
|
||||
clg->state = 2;
|
||||
if (net_random() < clg->a4)
|
||||
return true;
|
||||
break;
|
||||
case 2:
|
||||
if (net_random() < clg->a2)
|
||||
clg->state = 1;
|
||||
if (clg->a3 > net_random())
|
||||
if (net_random() > clg->a3)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <net/netlink.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
|
||||
/* Simple Token Bucket Filter.
|
||||
@ -117,6 +118,22 @@ struct tbf_sched_data {
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Return length of individual segments of a gso packet,
|
||||
* including all headers (MAC, IP, TCP/UDP)
|
||||
*/
|
||||
static unsigned int skb_gso_seglen(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
|
||||
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
|
||||
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
|
||||
hdr_len += tcp_hdrlen(skb);
|
||||
else
|
||||
hdr_len += sizeof(struct udphdr);
|
||||
return hdr_len + shinfo->gso_size;
|
||||
}
|
||||
|
||||
/* GSO packet is too big, segment it so that tbf can transmit
|
||||
* each segment in time
|
||||
*/
|
||||
@ -136,12 +153,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
|
||||
while (segs) {
|
||||
nskb = segs->next;
|
||||
segs->next = NULL;
|
||||
if (likely(segs->len <= q->max_size)) {
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
ret = qdisc_enqueue(segs, q->qdisc);
|
||||
} else {
|
||||
ret = qdisc_reshape_fail(skb, sch);
|
||||
}
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
ret = qdisc_enqueue(segs, q->qdisc);
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(ret))
|
||||
sch->qstats.drops++;
|
||||
@ -163,7 +176,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
int ret;
|
||||
|
||||
if (qdisc_pkt_len(skb) > q->max_size) {
|
||||
if (skb_is_gso(skb))
|
||||
if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
|
||||
return tbf_segment(skb, sch);
|
||||
return qdisc_reshape_fail(skb, sch);
|
||||
}
|
||||
@ -319,6 +332,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
|
||||
if (max_size < 0)
|
||||
goto done;
|
||||
|
||||
if (max_size < psched_mtu(qdisc_dev(sch)))
|
||||
pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n",
|
||||
max_size, qdisc_dev(sch)->name,
|
||||
psched_mtu(qdisc_dev(sch)));
|
||||
|
||||
if (q->qdisc != &noop_qdisc) {
|
||||
err = fifo_set_limit(q->qdisc, qopt->limit);
|
||||
if (err)
|
||||
|
@ -474,10 +474,11 @@ int sctp_packet_transmit(struct sctp_packet *packet)
|
||||
* for a given destination transport address.
|
||||
*/
|
||||
|
||||
if (!tp->rto_pending) {
|
||||
if (!chunk->resent && !tp->rto_pending) {
|
||||
chunk->rtt_in_progress = 1;
|
||||
tp->rto_pending = 1;
|
||||
}
|
||||
|
||||
has_data = 1;
|
||||
}
|
||||
|
||||
|
@ -446,6 +446,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
|
||||
transport->rto_pending = 0;
|
||||
}
|
||||
|
||||
chunk->resent = 1;
|
||||
|
||||
/* Move the chunk to the retransmit queue. The chunks
|
||||
* on the retransmit queue are always kept in order.
|
||||
*/
|
||||
@ -1375,6 +1377,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
|
||||
* instance).
|
||||
*/
|
||||
if (!tchunk->tsn_gap_acked &&
|
||||
!tchunk->resent &&
|
||||
tchunk->rtt_in_progress) {
|
||||
tchunk->rtt_in_progress = 0;
|
||||
rtt = jiffies - tchunk->sent_at;
|
||||
@ -1391,7 +1394,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
|
||||
*/
|
||||
if (!tchunk->tsn_gap_acked) {
|
||||
tchunk->tsn_gap_acked = 1;
|
||||
*highest_new_tsn_in_sack = tsn;
|
||||
if (TSN_lt(*highest_new_tsn_in_sack, tsn))
|
||||
*highest_new_tsn_in_sack = tsn;
|
||||
bytes_acked += sctp_data_size(tchunk);
|
||||
if (!tchunk->transport)
|
||||
migrate_bytes += sctp_data_size(tchunk);
|
||||
|
@ -1973,7 +1973,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
|
||||
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
|
||||
return -EFAULT;
|
||||
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
|
||||
return -EINVAL;
|
||||
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user