mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: Revert "NET: Fix locking issues in PPP, 6pack, mkiss and strip line disciplines." skbuff.h: Fix comment for NET_IP_ALIGN drivers/net: using spin_lock_irqsave() in net_send_packet() NET: phy_device, fix lock imbalance gre: fix ToS/DiffServ inherit bug igb: gcc-3.4.6 fix atlx: duplicate testing of MCAST flag NET: Fix locking issues in PPP, 6pack, mkiss and strip line disciplines. netdev: restore MTU change operation netdev: restore MAC address set and validate operations sit: fix regression: do not release skb->dst before xmit net: ip_push_pending_frames() fix net: sk_prot_alloc() should not blindly overwrite memory
This commit is contained in:
commit
a376d44677
@ -908,6 +908,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
|
|||||||
.ndo_open = rtl8139_open,
|
.ndo_open = rtl8139_open,
|
||||||
.ndo_stop = rtl8139_close,
|
.ndo_stop = rtl8139_close,
|
||||||
.ndo_get_stats = rtl8139_get_stats,
|
.ndo_get_stats = rtl8139_get_stats,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_set_mac_address = rtl8139_set_mac_address,
|
.ndo_set_mac_address = rtl8139_set_mac_address,
|
||||||
.ndo_start_xmit = rtl8139_start_xmit,
|
.ndo_start_xmit = rtl8139_start_xmit,
|
||||||
|
@ -1142,7 +1142,9 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
|
|||||||
.ndo_start_xmit = eth_xmit,
|
.ndo_start_xmit = eth_xmit,
|
||||||
.ndo_set_multicast_list = eth_set_mcast_list,
|
.ndo_set_multicast_list = eth_set_mcast_list,
|
||||||
.ndo_do_ioctl = eth_ioctl,
|
.ndo_do_ioctl = eth_ioctl,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __devinit eth_init_one(struct platform_device *pdev)
|
static int __devinit eth_init_one(struct platform_device *pdev)
|
||||||
|
@ -2071,7 +2071,7 @@ static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|||||||
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
|
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST))
|
if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/* these settings will always override what we currently have */
|
/* these settings will always override what we currently have */
|
||||||
|
@ -1524,6 +1524,7 @@ static void net_timeout(struct net_device *dev)
|
|||||||
static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct net_local *lp = netdev_priv(dev);
|
struct net_local *lp = netdev_priv(dev);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (net_debug > 3) {
|
if (net_debug > 3) {
|
||||||
printk("%s: sent %d byte packet of type %x\n",
|
printk("%s: sent %d byte packet of type %x\n",
|
||||||
@ -1535,7 +1536,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||||||
ask the chip to start transmitting before the
|
ask the chip to start transmitting before the
|
||||||
whole packet has been completely uploaded. */
|
whole packet has been completely uploaded. */
|
||||||
|
|
||||||
spin_lock_irq(&lp->lock);
|
spin_lock_irqsave(&lp->lock, flags);
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
|
|
||||||
/* initiate a transmit sequence */
|
/* initiate a transmit sequence */
|
||||||
@ -1549,13 +1550,13 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||||||
* we're waiting for TxOk, so return 1 and requeue this packet.
|
* we're waiting for TxOk, so return 1 and requeue this packet.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_unlock_irq(&lp->lock);
|
spin_unlock_irqrestore(&lp->lock, flags);
|
||||||
if (net_debug) printk("cs89x0: Tx buffer not free!\n");
|
if (net_debug) printk("cs89x0: Tx buffer not free!\n");
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
}
|
}
|
||||||
/* Write the contents of the packet */
|
/* Write the contents of the packet */
|
||||||
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
|
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
|
||||||
spin_unlock_irq(&lp->lock);
|
spin_unlock_irqrestore(&lp->lock, flags);
|
||||||
lp->stats.tx_bytes += skb->len;
|
lp->stats.tx_bytes += skb->len;
|
||||||
dev->trans_start = jiffies;
|
dev->trans_start = jiffies;
|
||||||
dev_kfree_skb (skb);
|
dev_kfree_skb (skb);
|
||||||
|
@ -3080,7 +3080,9 @@ static const struct net_device_ops ehea_netdev_ops = {
|
|||||||
.ndo_poll_controller = ehea_netpoll,
|
.ndo_poll_controller = ehea_netpoll,
|
||||||
#endif
|
#endif
|
||||||
.ndo_get_stats = ehea_get_stats,
|
.ndo_get_stats = ehea_get_stats,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
.ndo_set_mac_address = ehea_set_mac_addr,
|
.ndo_set_mac_address = ehea_set_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_set_multicast_list = ehea_set_multicast_list,
|
.ndo_set_multicast_list = ehea_set_multicast_list,
|
||||||
.ndo_change_mtu = ehea_change_mtu,
|
.ndo_change_mtu = ehea_change_mtu,
|
||||||
.ndo_vlan_rx_register = ehea_vlan_rx_register,
|
.ndo_vlan_rx_register = ehea_vlan_rx_register,
|
||||||
|
@ -1642,6 +1642,7 @@ static const struct net_device_ops fec_netdev_ops = {
|
|||||||
.ndo_stop = fec_enet_close,
|
.ndo_stop = fec_enet_close,
|
||||||
.ndo_start_xmit = fec_enet_start_xmit,
|
.ndo_start_xmit = fec_enet_start_xmit,
|
||||||
.ndo_set_multicast_list = set_multicast_list,
|
.ndo_set_multicast_list = set_multicast_list,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_tx_timeout = fec_timeout,
|
.ndo_tx_timeout = fec_timeout,
|
||||||
.ndo_set_mac_address = fec_set_mac_address,
|
.ndo_set_mac_address = fec_set_mac_address,
|
||||||
|
@ -156,6 +156,8 @@ static const struct net_device_ops gfar_netdev_ops = {
|
|||||||
.ndo_tx_timeout = gfar_timeout,
|
.ndo_tx_timeout = gfar_timeout,
|
||||||
.ndo_do_ioctl = gfar_ioctl,
|
.ndo_do_ioctl = gfar_ioctl,
|
||||||
.ndo_vlan_rx_register = gfar_vlan_rx_register,
|
.ndo_vlan_rx_register = gfar_vlan_rx_register,
|
||||||
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||||
.ndo_poll_controller = gfar_netpoll,
|
.ndo_poll_controller = gfar_netpoll,
|
||||||
#endif
|
#endif
|
||||||
|
@ -127,14 +127,48 @@ static void igb_restore_vlan(struct igb_adapter *);
|
|||||||
static void igb_ping_all_vfs(struct igb_adapter *);
|
static void igb_ping_all_vfs(struct igb_adapter *);
|
||||||
static void igb_msg_task(struct igb_adapter *);
|
static void igb_msg_task(struct igb_adapter *);
|
||||||
static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
|
static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
|
||||||
static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
|
|
||||||
static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
|
static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
|
||||||
static void igb_vmm_control(struct igb_adapter *);
|
static void igb_vmm_control(struct igb_adapter *);
|
||||||
static inline void igb_set_vmolr(struct e1000_hw *, int);
|
|
||||||
static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
|
|
||||||
static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
|
static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
|
||||||
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
|
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
|
||||||
|
|
||||||
|
static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
|
||||||
|
{
|
||||||
|
u32 reg_data;
|
||||||
|
|
||||||
|
reg_data = rd32(E1000_VMOLR(vfn));
|
||||||
|
reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
|
||||||
|
E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
|
||||||
|
E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
|
||||||
|
E1000_VMOLR_AUPE | /* Accept untagged packets */
|
||||||
|
E1000_VMOLR_STRVLAN; /* Strip vlan tags */
|
||||||
|
wr32(E1000_VMOLR(vfn), reg_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
|
||||||
|
int vfn)
|
||||||
|
{
|
||||||
|
struct e1000_hw *hw = &adapter->hw;
|
||||||
|
u32 vmolr;
|
||||||
|
|
||||||
|
vmolr = rd32(E1000_VMOLR(vfn));
|
||||||
|
vmolr &= ~E1000_VMOLR_RLPML_MASK;
|
||||||
|
vmolr |= size | E1000_VMOLR_LPE;
|
||||||
|
wr32(E1000_VMOLR(vfn), vmolr);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
|
||||||
|
{
|
||||||
|
u32 reg_data;
|
||||||
|
|
||||||
|
reg_data = rd32(E1000_RAH(entry));
|
||||||
|
reg_data &= ~E1000_RAH_POOL_MASK;
|
||||||
|
reg_data |= E1000_RAH_POOL_1 << pool;;
|
||||||
|
wr32(E1000_RAH(entry), reg_data);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
static int igb_suspend(struct pci_dev *, pm_message_t);
|
static int igb_suspend(struct pci_dev *, pm_message_t);
|
||||||
static int igb_resume(struct pci_dev *);
|
static int igb_resume(struct pci_dev *);
|
||||||
@ -5418,43 +5452,6 @@ static void igb_io_resume(struct pci_dev *pdev)
|
|||||||
igb_get_hw_control(adapter);
|
igb_get_hw_control(adapter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
|
|
||||||
{
|
|
||||||
u32 reg_data;
|
|
||||||
|
|
||||||
reg_data = rd32(E1000_VMOLR(vfn));
|
|
||||||
reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
|
|
||||||
E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
|
|
||||||
E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
|
|
||||||
E1000_VMOLR_AUPE | /* Accept untagged packets */
|
|
||||||
E1000_VMOLR_STRVLAN; /* Strip vlan tags */
|
|
||||||
wr32(E1000_VMOLR(vfn), reg_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
|
|
||||||
int vfn)
|
|
||||||
{
|
|
||||||
struct e1000_hw *hw = &adapter->hw;
|
|
||||||
u32 vmolr;
|
|
||||||
|
|
||||||
vmolr = rd32(E1000_VMOLR(vfn));
|
|
||||||
vmolr &= ~E1000_VMOLR_RLPML_MASK;
|
|
||||||
vmolr |= size | E1000_VMOLR_LPE;
|
|
||||||
wr32(E1000_VMOLR(vfn), vmolr);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
|
|
||||||
{
|
|
||||||
u32 reg_data;
|
|
||||||
|
|
||||||
reg_data = rd32(E1000_RAH(entry));
|
|
||||||
reg_data &= ~E1000_RAH_POOL_MASK;
|
|
||||||
reg_data |= E1000_RAH_POOL_1 << pool;;
|
|
||||||
wr32(E1000_RAH(entry), reg_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void igb_set_mc_list_pools(struct igb_adapter *adapter,
|
static void igb_set_mc_list_pools(struct igb_adapter *adapter,
|
||||||
int entry_count, u16 total_rar_filters)
|
int entry_count, u16 total_rar_filters)
|
||||||
{
|
{
|
||||||
|
@ -430,7 +430,8 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||||||
* hardware interrupt handler. Queue flow control is
|
* hardware interrupt handler. Queue flow control is
|
||||||
* thus managed under this lock as well.
|
* thus managed under this lock as well.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&np->lock);
|
unsigned long flags;
|
||||||
|
spin_lock_irqsave(&np->lock, flags);
|
||||||
|
|
||||||
add_to_tx_ring(np, skb, length);
|
add_to_tx_ring(np, skb, length);
|
||||||
dev->trans_start = jiffies;
|
dev->trans_start = jiffies;
|
||||||
@ -446,7 +447,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||||||
* is when the transmit statistics are updated.
|
* is when the transmit statistics are updated.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_unlock_irq(&np->lock);
|
spin_unlock_irqrestore(&np->lock, flags);
|
||||||
#else
|
#else
|
||||||
/* This is the case for older hardware which takes
|
/* This is the case for older hardware which takes
|
||||||
* a single transmit buffer at a time, and it is
|
* a single transmit buffer at a time, and it is
|
||||||
|
@ -134,8 +134,10 @@ int phy_scan_fixups(struct phy_device *phydev)
|
|||||||
|
|
||||||
err = fixup->run(phydev);
|
err = fixup->run(phydev);
|
||||||
|
|
||||||
if (err < 0)
|
if (err < 0) {
|
||||||
|
mutex_unlock(&phy_fixup_lock);
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&phy_fixup_lock);
|
mutex_unlock(&phy_fixup_lock);
|
||||||
|
@ -270,6 +270,9 @@ static const struct net_device_ops plip_netdev_ops = {
|
|||||||
.ndo_stop = plip_close,
|
.ndo_stop = plip_close,
|
||||||
.ndo_start_xmit = plip_tx_packet,
|
.ndo_start_xmit = plip_tx_packet,
|
||||||
.ndo_do_ioctl = plip_ioctl,
|
.ndo_do_ioctl = plip_ioctl,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Entry point of PLIP driver.
|
/* Entry point of PLIP driver.
|
||||||
|
@ -1411,6 +1411,7 @@ static const struct net_device_ops gelic_netdevice_ops = {
|
|||||||
.ndo_set_multicast_list = gelic_net_set_multi,
|
.ndo_set_multicast_list = gelic_net_set_multi,
|
||||||
.ndo_change_mtu = gelic_net_change_mtu,
|
.ndo_change_mtu = gelic_net_change_mtu,
|
||||||
.ndo_tx_timeout = gelic_net_tx_timeout,
|
.ndo_tx_timeout = gelic_net_tx_timeout,
|
||||||
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||||
.ndo_poll_controller = gelic_net_poll_controller,
|
.ndo_poll_controller = gelic_net_poll_controller,
|
||||||
|
@ -2707,6 +2707,7 @@ static const struct net_device_ops gelic_wl_netdevice_ops = {
|
|||||||
.ndo_set_multicast_list = gelic_net_set_multi,
|
.ndo_set_multicast_list = gelic_net_set_multi,
|
||||||
.ndo_change_mtu = gelic_net_change_mtu,
|
.ndo_change_mtu = gelic_net_change_mtu,
|
||||||
.ndo_tx_timeout = gelic_net_tx_timeout,
|
.ndo_tx_timeout = gelic_net_tx_timeout,
|
||||||
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||||
.ndo_poll_controller = gelic_net_poll_controller,
|
.ndo_poll_controller = gelic_net_poll_controller,
|
||||||
|
@ -1774,6 +1774,7 @@ static const struct net_device_ops smc_netdev_ops = {
|
|||||||
.ndo_start_xmit = smc_hard_start_xmit,
|
.ndo_start_xmit = smc_hard_start_xmit,
|
||||||
.ndo_tx_timeout = smc_timeout,
|
.ndo_tx_timeout = smc_timeout,
|
||||||
.ndo_set_multicast_list = smc_set_multicast_list,
|
.ndo_set_multicast_list = smc_set_multicast_list,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_set_mac_address = eth_mac_addr,
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||||
|
@ -1779,6 +1779,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
|
|||||||
.ndo_get_stats = smsc911x_get_stats,
|
.ndo_get_stats = smsc911x_get_stats,
|
||||||
.ndo_set_multicast_list = smsc911x_set_multicast_list,
|
.ndo_set_multicast_list = smsc911x_set_multicast_list,
|
||||||
.ndo_do_ioctl = smsc911x_do_ioctl,
|
.ndo_do_ioctl = smsc911x_do_ioctl,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_set_mac_address = smsc911x_set_mac_address,
|
.ndo_set_mac_address = smsc911x_set_mac_address,
|
||||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||||
|
@ -1016,7 +1016,9 @@ static const struct net_device_ops vnet_ops = {
|
|||||||
.ndo_open = vnet_open,
|
.ndo_open = vnet_open,
|
||||||
.ndo_stop = vnet_close,
|
.ndo_stop = vnet_close,
|
||||||
.ndo_set_multicast_list = vnet_set_rx_mode,
|
.ndo_set_multicast_list = vnet_set_rx_mode,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
.ndo_set_mac_address = vnet_set_mac_addr,
|
.ndo_set_mac_address = vnet_set_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_tx_timeout = vnet_tx_timeout,
|
.ndo_tx_timeout = vnet_tx_timeout,
|
||||||
.ndo_change_mtu = vnet_change_mtu,
|
.ndo_change_mtu = vnet_change_mtu,
|
||||||
.ndo_start_xmit = vnet_start_xmit,
|
.ndo_start_xmit = vnet_start_xmit,
|
||||||
|
@ -999,6 +999,9 @@ static const struct net_device_ops kaweth_netdev_ops = {
|
|||||||
.ndo_tx_timeout = kaweth_tx_timeout,
|
.ndo_tx_timeout = kaweth_tx_timeout,
|
||||||
.ndo_set_multicast_list = kaweth_set_rx_mode,
|
.ndo_set_multicast_list = kaweth_set_rx_mode,
|
||||||
.ndo_get_stats = kaweth_netdev_stats,
|
.ndo_get_stats = kaweth_netdev_stats,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int kaweth_probe(
|
static int kaweth_probe(
|
||||||
|
@ -1493,6 +1493,9 @@ static const struct net_device_ops pegasus_netdev_ops = {
|
|||||||
.ndo_set_multicast_list = pegasus_set_multicast,
|
.ndo_set_multicast_list = pegasus_set_multicast,
|
||||||
.ndo_get_stats = pegasus_netdev_stats,
|
.ndo_get_stats = pegasus_netdev_stats,
|
||||||
.ndo_tx_timeout = pegasus_tx_timeout,
|
.ndo_tx_timeout = pegasus_tx_timeout,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct usb_driver pegasus_driver = {
|
static struct usb_driver pegasus_driver = {
|
||||||
|
@ -621,6 +621,7 @@ static const struct net_device_ops rhine_netdev_ops = {
|
|||||||
.ndo_start_xmit = rhine_start_tx,
|
.ndo_start_xmit = rhine_start_tx,
|
||||||
.ndo_get_stats = rhine_get_stats,
|
.ndo_get_stats = rhine_get_stats,
|
||||||
.ndo_set_multicast_list = rhine_set_rx_mode,
|
.ndo_set_multicast_list = rhine_set_rx_mode,
|
||||||
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_set_mac_address = eth_mac_addr,
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
.ndo_do_ioctl = netdev_ioctl,
|
.ndo_do_ioctl = netdev_ioctl,
|
||||||
|
@ -2521,6 +2521,8 @@ static const struct net_device_ops orinoco_netdev_ops = {
|
|||||||
.ndo_start_xmit = orinoco_xmit,
|
.ndo_start_xmit = orinoco_xmit,
|
||||||
.ndo_set_multicast_list = orinoco_set_multicast_list,
|
.ndo_set_multicast_list = orinoco_set_multicast_list,
|
||||||
.ndo_change_mtu = orinoco_change_mtu,
|
.ndo_change_mtu = orinoco_change_mtu,
|
||||||
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_tx_timeout = orinoco_tx_timeout,
|
.ndo_tx_timeout = orinoco_tx_timeout,
|
||||||
.ndo_get_stats = orinoco_get_stats,
|
.ndo_get_stats = orinoco_get_stats,
|
||||||
};
|
};
|
||||||
@ -2555,7 +2557,6 @@ struct net_device
|
|||||||
priv->wireless_data.spy_data = &priv->spy_data;
|
priv->wireless_data.spy_data = &priv->spy_data;
|
||||||
dev->wireless_data = &priv->wireless_data;
|
dev->wireless_data = &priv->wireless_data;
|
||||||
#endif
|
#endif
|
||||||
/* we use the default eth_mac_addr for setting the MAC addr */
|
|
||||||
|
|
||||||
/* Reserve space in skb for the SNAP header */
|
/* Reserve space in skb for the SNAP header */
|
||||||
dev->hard_header_len += ENCAPS_OVERHEAD;
|
dev->hard_header_len += ENCAPS_OVERHEAD;
|
||||||
|
@ -1342,12 +1342,12 @@ static inline int skb_network_offset(const struct sk_buff *skb)
|
|||||||
* shifting the start of the packet by 2 bytes. Drivers should do this
|
* shifting the start of the packet by 2 bytes. Drivers should do this
|
||||||
* with:
|
* with:
|
||||||
*
|
*
|
||||||
* skb_reserve(NET_IP_ALIGN);
|
* skb_reserve(skb, NET_IP_ALIGN);
|
||||||
*
|
*
|
||||||
* The downside to this alignment of the IP header is that the DMA is now
|
* The downside to this alignment of the IP header is that the DMA is now
|
||||||
* unaligned. On some architectures the cost of an unaligned DMA is high
|
* unaligned. On some architectures the cost of an unaligned DMA is high
|
||||||
* and this cost outweighs the gains made by aligning the IP header.
|
* and this cost outweighs the gains made by aligning the IP header.
|
||||||
*
|
*
|
||||||
* Since this trade off varies between architectures, we allow NET_IP_ALIGN
|
* Since this trade off varies between architectures, we allow NET_IP_ALIGN
|
||||||
* to be overridden.
|
* to be overridden.
|
||||||
*/
|
*/
|
||||||
|
@ -939,8 +939,23 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
|||||||
struct kmem_cache *slab;
|
struct kmem_cache *slab;
|
||||||
|
|
||||||
slab = prot->slab;
|
slab = prot->slab;
|
||||||
if (slab != NULL)
|
if (slab != NULL) {
|
||||||
sk = kmem_cache_alloc(slab, priority);
|
sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
|
||||||
|
if (!sk)
|
||||||
|
return sk;
|
||||||
|
if (priority & __GFP_ZERO) {
|
||||||
|
/*
|
||||||
|
* caches using SLAB_DESTROY_BY_RCU should let
|
||||||
|
* sk_node.next un-modified. Special care is taken
|
||||||
|
* when initializing object to zero.
|
||||||
|
*/
|
||||||
|
if (offsetof(struct sock, sk_node.next) != 0)
|
||||||
|
memset(sk, 0, offsetof(struct sock, sk_node.next));
|
||||||
|
memset(&sk->sk_node.pprev, 0,
|
||||||
|
prot->obj_size - offsetof(struct sock,
|
||||||
|
sk_node.pprev));
|
||||||
|
}
|
||||||
|
}
|
||||||
else
|
else
|
||||||
sk = kmalloc(prot->obj_size, priority);
|
sk = kmalloc(prot->obj_size, priority);
|
||||||
|
|
||||||
|
@ -735,10 +735,10 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
tos = tiph->tos;
|
tos = tiph->tos;
|
||||||
if (tos&1) {
|
if (tos == 1) {
|
||||||
|
tos = 0;
|
||||||
if (skb->protocol == htons(ETH_P_IP))
|
if (skb->protocol == htons(ETH_P_IP))
|
||||||
tos = old_iph->tos;
|
tos = old_iph->tos;
|
||||||
tos &= ~1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -1243,7 +1243,6 @@ int ip_push_pending_frames(struct sock *sk)
|
|||||||
skb->len += tmp_skb->len;
|
skb->len += tmp_skb->len;
|
||||||
skb->data_len += tmp_skb->len;
|
skb->data_len += tmp_skb->len;
|
||||||
skb->truesize += tmp_skb->truesize;
|
skb->truesize += tmp_skb->truesize;
|
||||||
__sock_put(tmp_skb->sk);
|
|
||||||
tmp_skb->destructor = NULL;
|
tmp_skb->destructor = NULL;
|
||||||
tmp_skb->sk = NULL;
|
tmp_skb->sk = NULL;
|
||||||
}
|
}
|
||||||
|
@ -1484,7 +1484,6 @@ int ip6_push_pending_frames(struct sock *sk)
|
|||||||
skb->len += tmp_skb->len;
|
skb->len += tmp_skb->len;
|
||||||
skb->data_len += tmp_skb->len;
|
skb->data_len += tmp_skb->len;
|
||||||
skb->truesize += tmp_skb->truesize;
|
skb->truesize += tmp_skb->truesize;
|
||||||
__sock_put(tmp_skb->sk);
|
|
||||||
tmp_skb->destructor = NULL;
|
tmp_skb->destructor = NULL;
|
||||||
tmp_skb->sk = NULL;
|
tmp_skb->sk = NULL;
|
||||||
}
|
}
|
||||||
|
@ -1018,6 +1018,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
|
|||||||
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
|
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
|
||||||
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
|
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
|
||||||
dev->flags = IFF_NOARP;
|
dev->flags = IFF_NOARP;
|
||||||
|
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
||||||
dev->iflink = 0;
|
dev->iflink = 0;
|
||||||
dev->addr_len = 4;
|
dev->addr_len = 4;
|
||||||
dev->features |= NETIF_F_NETNS_LOCAL;
|
dev->features |= NETIF_F_NETNS_LOCAL;
|
||||||
|
Loading…
Reference in New Issue
Block a user