mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Revert iwlwifi reclaimed packet tracking, it causes problems for a bunch of folks. From Emmanuel Grumbach. 2) Work limiting code in brcmsmac wifi driver can clear tx status without processing the event. From Arend van Spriel. 3) rtlwifi USB driver processes wrong SKB, fix from Larry Finger. 4) l2tp tunnel delete can race with close, fix from Tom Parkin. 5) pktgen_add_device() failures are not checked at all, fix from Cong Wang. 6) Fix unintentional removal of carrier off from tun_detach(), otherwise we confuse userspace, from Michael S. Tsirkin. 7) Don't leak socket reference counts and ubufs in vhost-net driver, from Jason Wang. 8) vmxnet3 driver gets it's initial carrier state wrong, fix from Neil Horman. 9) Protect against USB networking devices which spam the host with 0 length frames, from Bjørn Mork. 10) Prevent neighbour overflows in ipv6 for locally destined routes, from Marcelo Ricardo. This is the best short-term fix for this, a longer term fix has been implemented in net-next. 11) L2TP uses ipv4 datagram routines in it's ipv6 code, whoops. This mistake is largely because the ipv6 functions don't even have some kind of prefix in their names to suggest they are ipv6 specific. From Tom Parkin. 12) Check SYN packet drops properly in tcp_rcv_fastopen_synack(), from Yuchung Cheng. 13) Fix races and TX skb freeing bugs in via-rhine's NAPI support, from Francois Romieu and your's truly. 14) Fix infinite loops and divides by zero in TCP congestion window handling, from Eric Dumazet, Neal Cardwell, and Ilpo Järvinen. 15) AF_PACKET tx ring handling can leak kernel memory to userspace, fix from Phil Sutter. 16) Fix error handling in ipv6 GRE tunnel transmit, from Tommi Rantala. 17) Protect XEN netback driver against hostile frontend putting garbage into the rings, don't leak pages in TX GOP checking, and add proper resource releasing in error path of xen_netbk_get_requests(). From Ian Campbell. 18) SCTP authentication keys should be cleared out and released with kzfree(), from Daniel Borkmann. 19) L2TP is a bit too clever trying to maintain skb->truesize, and ends up corrupting socket memory accounting to the point where packet sending is halted indefinitely. Just remove the adjustments entirely, they aren't really needed. From Eric Dumazet. 20) ATM Iphase driver uses a data type with the same name as the S390 headers, rename to fix the build. From Heiko Carstens. 21) Fix a typo in copying the inner network header offset from one SKB to another, from Pravin B Shelar. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (56 commits) net: sctp: sctp_endpoint_free: zero out secret key data net: sctp: sctp_setsockopt_auth_key: use kzfree instead of kfree atm/iphase: rename fregt_t -> ffreg_t net: usb: fix regression from FLAG_NOARP code l2tp: dont play with skb->truesize net: sctp: sctp_auth_key_put: use kzfree instead of kfree netback: correct netbk_tx_err to handle wrap around. xen/netback: free already allocated memory on failure in xen_netbk_get_requests xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop. xen/netback: shutdown the ring if it contains garbage. net: qmi_wwan: add more Huawei devices, including E320 net: cdc_ncm: add another Huawei vendor specific device ipv6/ip6_gre: fix error case handling in ip6gre_tunnel_xmit() tcp: fix for zero packets_in_flight was too broad brcmsmac: rework of mac80211 .flush() callback operation ssb: unregister gpios before unloading ssb bcma: unregister gpios before unloading bcma rtlwifi: Fix scheduling while atomic bug net: usbnet: fix tx_dropped statistics tcp: ipv6: Update MIB counters for drops ...
This commit is contained in:
commit
e06b84052a
@ -636,81 +636,81 @@ struct rx_buf_desc {
|
||||
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
|
||||
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
|
||||
|
||||
typedef volatile u_int freg_t;
|
||||
typedef volatile u_int ffreg_t;
|
||||
typedef u_int rreg_t;
|
||||
|
||||
typedef struct _ffredn_t {
|
||||
freg_t idlehead_high; /* Idle cell header (high) */
|
||||
freg_t idlehead_low; /* Idle cell header (low) */
|
||||
freg_t maxrate; /* Maximum rate */
|
||||
freg_t stparms; /* Traffic Management Parameters */
|
||||
freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
|
||||
freg_t rm_type; /* */
|
||||
ffreg_t idlehead_high; /* Idle cell header (high) */
|
||||
ffreg_t idlehead_low; /* Idle cell header (low) */
|
||||
ffreg_t maxrate; /* Maximum rate */
|
||||
ffreg_t stparms; /* Traffic Management Parameters */
|
||||
ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
|
||||
ffreg_t rm_type; /* */
|
||||
u_int filler5[0x17 - 0x06];
|
||||
freg_t cmd_reg; /* Command register */
|
||||
ffreg_t cmd_reg; /* Command register */
|
||||
u_int filler18[0x20 - 0x18];
|
||||
freg_t cbr_base; /* CBR Pointer Base */
|
||||
freg_t vbr_base; /* VBR Pointer Base */
|
||||
freg_t abr_base; /* ABR Pointer Base */
|
||||
freg_t ubr_base; /* UBR Pointer Base */
|
||||
ffreg_t cbr_base; /* CBR Pointer Base */
|
||||
ffreg_t vbr_base; /* VBR Pointer Base */
|
||||
ffreg_t abr_base; /* ABR Pointer Base */
|
||||
ffreg_t ubr_base; /* UBR Pointer Base */
|
||||
u_int filler24;
|
||||
freg_t vbrwq_base; /* VBR Wait Queue Base */
|
||||
freg_t abrwq_base; /* ABR Wait Queue Base */
|
||||
freg_t ubrwq_base; /* UBR Wait Queue Base */
|
||||
freg_t vct_base; /* Main VC Table Base */
|
||||
freg_t vcte_base; /* Extended Main VC Table Base */
|
||||
ffreg_t vbrwq_base; /* VBR Wait Queue Base */
|
||||
ffreg_t abrwq_base; /* ABR Wait Queue Base */
|
||||
ffreg_t ubrwq_base; /* UBR Wait Queue Base */
|
||||
ffreg_t vct_base; /* Main VC Table Base */
|
||||
ffreg_t vcte_base; /* Extended Main VC Table Base */
|
||||
u_int filler2a[0x2C - 0x2A];
|
||||
freg_t cbr_tab_beg; /* CBR Table Begin */
|
||||
freg_t cbr_tab_end; /* CBR Table End */
|
||||
freg_t cbr_pointer; /* CBR Pointer */
|
||||
ffreg_t cbr_tab_beg; /* CBR Table Begin */
|
||||
ffreg_t cbr_tab_end; /* CBR Table End */
|
||||
ffreg_t cbr_pointer; /* CBR Pointer */
|
||||
u_int filler2f[0x30 - 0x2F];
|
||||
freg_t prq_st_adr; /* Packet Ready Queue Start Address */
|
||||
freg_t prq_ed_adr; /* Packet Ready Queue End Address */
|
||||
freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
|
||||
freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
|
||||
freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
|
||||
freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
|
||||
freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
|
||||
freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
|
||||
ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
|
||||
ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
|
||||
ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
|
||||
ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
|
||||
ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
|
||||
ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
|
||||
ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
|
||||
ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
|
||||
u_int filler38[0x40 - 0x38];
|
||||
freg_t queue_base; /* Base address for PRQ and TCQ */
|
||||
freg_t desc_base; /* Base address of descriptor table */
|
||||
ffreg_t queue_base; /* Base address for PRQ and TCQ */
|
||||
ffreg_t desc_base; /* Base address of descriptor table */
|
||||
u_int filler42[0x45 - 0x42];
|
||||
freg_t mode_reg_0; /* Mode register 0 */
|
||||
freg_t mode_reg_1; /* Mode register 1 */
|
||||
freg_t intr_status_reg;/* Interrupt Status register */
|
||||
freg_t mask_reg; /* Mask Register */
|
||||
freg_t cell_ctr_high1; /* Total cell transfer count (high) */
|
||||
freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
|
||||
freg_t state_reg; /* Status register */
|
||||
ffreg_t mode_reg_0; /* Mode register 0 */
|
||||
ffreg_t mode_reg_1; /* Mode register 1 */
|
||||
ffreg_t intr_status_reg;/* Interrupt Status register */
|
||||
ffreg_t mask_reg; /* Mask Register */
|
||||
ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
|
||||
ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
|
||||
ffreg_t state_reg; /* Status register */
|
||||
u_int filler4c[0x58 - 0x4c];
|
||||
freg_t curr_desc_num; /* Contains the current descriptor num */
|
||||
freg_t next_desc; /* Next descriptor */
|
||||
freg_t next_vc; /* Next VC */
|
||||
ffreg_t curr_desc_num; /* Contains the current descriptor num */
|
||||
ffreg_t next_desc; /* Next descriptor */
|
||||
ffreg_t next_vc; /* Next VC */
|
||||
u_int filler5b[0x5d - 0x5b];
|
||||
freg_t present_slot_cnt;/* Present slot count */
|
||||
ffreg_t present_slot_cnt;/* Present slot count */
|
||||
u_int filler5e[0x6a - 0x5e];
|
||||
freg_t new_desc_num; /* New descriptor number */
|
||||
freg_t new_vc; /* New VC */
|
||||
freg_t sched_tbl_ptr; /* Schedule table pointer */
|
||||
freg_t vbrwq_wptr; /* VBR wait queue write pointer */
|
||||
freg_t vbrwq_rptr; /* VBR wait queue read pointer */
|
||||
freg_t abrwq_wptr; /* ABR wait queue write pointer */
|
||||
freg_t abrwq_rptr; /* ABR wait queue read pointer */
|
||||
freg_t ubrwq_wptr; /* UBR wait queue write pointer */
|
||||
freg_t ubrwq_rptr; /* UBR wait queue read pointer */
|
||||
freg_t cbr_vc; /* CBR VC */
|
||||
freg_t vbr_sb_vc; /* VBR SB VC */
|
||||
freg_t abr_sb_vc; /* ABR SB VC */
|
||||
freg_t ubr_sb_vc; /* UBR SB VC */
|
||||
freg_t vbr_next_link; /* VBR next link */
|
||||
freg_t abr_next_link; /* ABR next link */
|
||||
freg_t ubr_next_link; /* UBR next link */
|
||||
ffreg_t new_desc_num; /* New descriptor number */
|
||||
ffreg_t new_vc; /* New VC */
|
||||
ffreg_t sched_tbl_ptr; /* Schedule table pointer */
|
||||
ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
|
||||
ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
|
||||
ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
|
||||
ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
|
||||
ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
|
||||
ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
|
||||
ffreg_t cbr_vc; /* CBR VC */
|
||||
ffreg_t vbr_sb_vc; /* VBR SB VC */
|
||||
ffreg_t abr_sb_vc; /* ABR SB VC */
|
||||
ffreg_t ubr_sb_vc; /* UBR SB VC */
|
||||
ffreg_t vbr_next_link; /* VBR next link */
|
||||
ffreg_t abr_next_link; /* ABR next link */
|
||||
ffreg_t ubr_next_link; /* UBR next link */
|
||||
u_int filler7a[0x7c-0x7a];
|
||||
freg_t out_rate_head; /* Out of rate head */
|
||||
ffreg_t out_rate_head; /* Out of rate head */
|
||||
u_int filler7d[0xca-0x7d]; /* pad out to full address space */
|
||||
freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
|
||||
freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
|
||||
ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
|
||||
ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
|
||||
u_int fillercc[0x100-0xcc]; /* pad out to full address space */
|
||||
} ffredn_t;
|
||||
|
||||
|
@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
|
||||
#ifdef CONFIG_BCMA_DRIVER_GPIO
|
||||
/* driver_gpio.c */
|
||||
int bcma_gpio_init(struct bcma_drv_cc *cc);
|
||||
int bcma_gpio_unregister(struct bcma_drv_cc *cc);
|
||||
#else
|
||||
static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BCMA_DRIVER_GPIO */
|
||||
|
||||
#endif
|
||||
|
@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
|
||||
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
|
||||
cc->core->id.rev != 0x38) {
|
||||
cc->core->id.rev != 38) {
|
||||
bcma_err(bus, "NAND flash on unsupported board!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||
|
||||
return gpiochip_add(chip);
|
||||
}
|
||||
|
||||
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
|
||||
{
|
||||
return gpiochip_remove(&cc->gpio);
|
||||
}
|
||||
|
@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)
|
||||
void bcma_bus_unregister(struct bcma_bus *bus)
|
||||
{
|
||||
struct bcma_device *cores[3];
|
||||
int err;
|
||||
|
||||
err = bcma_gpio_unregister(&bus->drv_cc);
|
||||
if (err == -EBUSY)
|
||||
bcma_err(bus, "Some GPIOs are still in use.\n");
|
||||
else if (err)
|
||||
bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
|
||||
|
||||
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
|
||||
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
|
||||
|
@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
|
||||
pr_info("%s: Setting primary slave to None.\n",
|
||||
bond->dev->name);
|
||||
bond->primary_slave = NULL;
|
||||
memset(bond->params.primary, 0, sizeof(bond->params.primary));
|
||||
bond_select_active_slave(bond);
|
||||
goto out;
|
||||
}
|
||||
|
@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
|
||||
|
||||
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
|
||||
IFX_WRITE_LOW_16BIT(mask));
|
||||
|
||||
/* According to C_CAN documentation, the reserved bit
|
||||
* in IFx_MASK2 register is fixed 1
|
||||
*/
|
||||
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
|
||||
IFX_WRITE_HIGH_16BIT(mask));
|
||||
IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
|
||||
|
||||
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
|
||||
IFX_WRITE_LOW_16BIT(id));
|
||||
|
@ -36,13 +36,13 @@
|
||||
|
||||
#define DRV_VER "4.4.161.0u"
|
||||
#define DRV_NAME "be2net"
|
||||
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
|
||||
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
|
||||
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
|
||||
#define BE_NAME "Emulex BladeEngine2"
|
||||
#define BE3_NAME "Emulex BladeEngine3"
|
||||
#define OC_NAME "Emulex OneConnect"
|
||||
#define OC_NAME_BE OC_NAME "(be3)"
|
||||
#define OC_NAME_LANCER OC_NAME "(Lancer)"
|
||||
#define OC_NAME_SH OC_NAME "(Skyhawk)"
|
||||
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
|
||||
#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
|
||||
|
||||
#define BE_VENDOR_ID 0x19a2
|
||||
#define EMULEX_VENDOR_ID 0x10df
|
||||
|
@ -25,7 +25,7 @@
|
||||
MODULE_VERSION(DRV_VER);
|
||||
MODULE_DEVICE_TABLE(pci, be_dev_ids);
|
||||
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
|
||||
MODULE_AUTHOR("ServerEngines Corporation");
|
||||
MODULE_AUTHOR("Emulex Corporation");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static unsigned int num_vfs;
|
||||
|
@ -232,6 +232,7 @@
|
||||
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
|
||||
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
|
||||
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
|
||||
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
|
||||
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
|
||||
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
|
||||
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
|
||||
@ -389,6 +390,12 @@
|
||||
|
||||
#define E1000_PBS_16K E1000_PBA_16K
|
||||
|
||||
/* Uncorrectable/correctable ECC Error counts and enable bits */
|
||||
#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
|
||||
#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
|
||||
#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
|
||||
#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
|
||||
|
||||
#define IFS_MAX 80
|
||||
#define IFS_MIN 40
|
||||
#define IFS_RATIO 4
|
||||
@ -408,6 +415,7 @@
|
||||
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
|
||||
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
|
||||
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
|
||||
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
|
||||
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
|
||||
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
|
||||
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
|
||||
@ -443,6 +451,7 @@
|
||||
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
|
||||
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
|
||||
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
|
||||
#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
|
||||
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
|
||||
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
|
||||
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
|
||||
|
@ -309,6 +309,8 @@ struct e1000_adapter {
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
unsigned int uncorr_errors; /* uncorrectable ECC errors */
|
||||
unsigned int corr_errors; /* correctable ECC errors */
|
||||
unsigned int restart_queue;
|
||||
u32 txd_cmd;
|
||||
|
||||
|
@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
|
||||
E1000_STAT("dropped_smbus", stats.mgpdc),
|
||||
E1000_STAT("rx_dma_failed", rx_dma_failed),
|
||||
E1000_STAT("tx_dma_failed", tx_dma_failed),
|
||||
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
|
||||
E1000_STAT("corr_ecc_errors", corr_errors),
|
||||
};
|
||||
|
||||
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
|
||||
|
@ -77,6 +77,7 @@ enum e1e_registers {
|
||||
#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
|
||||
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
|
||||
E1000_PBS = 0x01008, /* Packet Buffer Size */
|
||||
E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
|
||||
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
|
||||
E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
|
||||
E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
|
||||
|
@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
|
||||
if (hw->mac.type == e1000_ich8lan)
|
||||
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
|
||||
ew32(RFCTL, reg);
|
||||
|
||||
/* Enable ECC on Lynxpoint */
|
||||
if (hw->mac.type == e1000_pch_lpt) {
|
||||
reg = er32(PBECCSTS);
|
||||
reg |= E1000_PBECCSTS_ECC_ENABLE;
|
||||
ew32(PBECCSTS, reg);
|
||||
|
||||
reg = er32(CTRL);
|
||||
reg |= E1000_CTRL_MEHE;
|
||||
ew32(CTRL, reg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
/* Reset on uncorrectable ECC error */
|
||||
if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
|
||||
u32 pbeccsts = er32(PBECCSTS);
|
||||
|
||||
adapter->corr_errors +=
|
||||
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
|
||||
adapter->uncorr_errors +=
|
||||
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
|
||||
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
|
||||
|
||||
/* Do the reset outside of interrupt context */
|
||||
schedule_work(&adapter->reset_task);
|
||||
|
||||
/* return immediately since reset is imminent */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (napi_schedule_prep(&adapter->napi)) {
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
/* Reset on uncorrectable ECC error */
|
||||
if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
|
||||
u32 pbeccsts = er32(PBECCSTS);
|
||||
|
||||
adapter->corr_errors +=
|
||||
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
|
||||
adapter->uncorr_errors +=
|
||||
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
|
||||
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
|
||||
|
||||
/* Do the reset outside of interrupt context */
|
||||
schedule_work(&adapter->reset_task);
|
||||
|
||||
/* return immediately since reset is imminent */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (napi_schedule_prep(&adapter->napi)) {
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
|
||||
if (adapter->msix_entries) {
|
||||
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
|
||||
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
|
||||
} else if (hw->mac.type == e1000_pch_lpt) {
|
||||
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
|
||||
} else {
|
||||
ew32(IMS, IMS_ENABLE_MASK);
|
||||
}
|
||||
@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
|
||||
adapter->stats.mgptc += er32(MGTPTC);
|
||||
adapter->stats.mgprc += er32(MGTPRC);
|
||||
adapter->stats.mgpdc += er32(MGTPDC);
|
||||
|
||||
/* Correctable ECC Errors */
|
||||
if (hw->mac.type == e1000_pch_lpt) {
|
||||
u32 pbeccsts = er32(PBECCSTS);
|
||||
adapter->corr_errors +=
|
||||
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
|
||||
adapter->uncorr_errors +=
|
||||
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
|
||||
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
|
||||
rp->tx_skbuff[entry]->len,
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
dev_kfree_skb_irq(rp->tx_skbuff[entry]);
|
||||
dev_kfree_skb(rp->tx_skbuff[entry]);
|
||||
rp->tx_skbuff[entry] = NULL;
|
||||
entry = (++rp->dirty_tx) % TX_RING_SIZE;
|
||||
}
|
||||
@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
|
||||
if (intr_status & IntrPCIErr)
|
||||
netif_warn(rp, hw, dev, "PCI error\n");
|
||||
|
||||
napi_disable(&rp->napi);
|
||||
rhine_irq_disable(rp);
|
||||
/* Slow and safe. Consider __napi_schedule as a replacement ? */
|
||||
napi_enable(&rp->napi);
|
||||
napi_schedule(&rp->napi);
|
||||
iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&rp->task_lock);
|
||||
|
@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
|
||||
}
|
||||
|
||||
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
|
||||
u16 queue_index)
|
||||
struct tun_file *tfile)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct tun_flow_entry *e;
|
||||
unsigned long delay = tun->ageing_time;
|
||||
u16 queue_index = tfile->queue_index;
|
||||
|
||||
if (!rxhash)
|
||||
return;
|
||||
@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (tun->numqueues == 1)
|
||||
/* We may get a very small possibility of OOO during switching, not
|
||||
* worth to optimize.*/
|
||||
if (tun->numqueues == 1 || tfile->detached)
|
||||
goto unlock;
|
||||
|
||||
e = tun_flow_find(head, rxhash);
|
||||
@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
||||
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
|
||||
if (tun) {
|
||||
if (tun && !tfile->detached) {
|
||||
u16 index = tfile->queue_index;
|
||||
BUG_ON(index >= tun->numqueues);
|
||||
dev = tun->dev;
|
||||
|
||||
rcu_assign_pointer(tun->tfiles[index],
|
||||
tun->tfiles[tun->numqueues - 1]);
|
||||
rcu_assign_pointer(tfile->tun, NULL);
|
||||
ntfile = rtnl_dereference(tun->tfiles[index]);
|
||||
ntfile->queue_index = index;
|
||||
|
||||
--tun->numqueues;
|
||||
if (clean)
|
||||
if (clean) {
|
||||
rcu_assign_pointer(tfile->tun, NULL);
|
||||
sock_put(&tfile->sk);
|
||||
else
|
||||
} else
|
||||
tun_disable_queue(tun, tfile);
|
||||
|
||||
synchronize_net();
|
||||
@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
||||
}
|
||||
|
||||
if (clean) {
|
||||
if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
|
||||
!(tun->flags & TUN_PERSIST))
|
||||
if (tun->dev->reg_state == NETREG_REGISTERED)
|
||||
if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
|
||||
netif_carrier_off(tun->dev);
|
||||
|
||||
if (!(tun->flags & TUN_PERSIST) &&
|
||||
tun->dev->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdevice(tun->dev);
|
||||
}
|
||||
|
||||
BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
|
||||
&tfile->socket.flags));
|
||||
@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
|
||||
rcu_assign_pointer(tfile->tun, NULL);
|
||||
--tun->numqueues;
|
||||
}
|
||||
list_for_each_entry(tfile, &tun->disabled, next) {
|
||||
wake_up_all(&tfile->wq.wait);
|
||||
rcu_assign_pointer(tfile->tun, NULL);
|
||||
}
|
||||
BUG_ON(tun->numqueues != 0);
|
||||
|
||||
synchronize_net();
|
||||
@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
|
||||
goto out;
|
||||
|
||||
err = -EINVAL;
|
||||
if (rtnl_dereference(tfile->tun))
|
||||
if (rtnl_dereference(tfile->tun) && !tfile->detached)
|
||||
goto out;
|
||||
|
||||
err = -EBUSY;
|
||||
@ -1199,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
tun->dev->stats.rx_packets++;
|
||||
tun->dev->stats.rx_bytes += len;
|
||||
|
||||
tun_flow_update(tun, rxhash, tfile->queue_index);
|
||||
tun_flow_update(tun, rxhash, tfile);
|
||||
return total_len;
|
||||
}
|
||||
|
||||
@ -1658,9 +1668,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
|
||||
device_create_file(&tun->dev->dev, &dev_attr_group))
|
||||
pr_err("Failed to create tun sysfs files\n");
|
||||
}
|
||||
|
||||
netif_carrier_on(tun->dev);
|
||||
}
|
||||
|
||||
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
|
||||
|
||||
@ -1813,7 +1823,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
|
||||
ret = tun_attach(tun, file);
|
||||
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
if (!tun || !(tun->flags & TUN_TAP_MQ))
|
||||
if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
__tun_detach(tfile, false);
|
||||
|
@ -1215,6 +1215,9 @@ static const struct usb_device_id cdc_devs[] = {
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
},
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
},
|
||||
|
||||
/* Infineon(now Intel) HSPA Modem platform */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
|
||||
|
@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
|
||||
/* 2. Combined interface devices matching on class+protocol */
|
||||
{ /* Huawei E367 and possibly others in "Windows mode" */
|
||||
@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* Pantech UML290, P4200 and more */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
@ -461,6 +473,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
|
||||
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
|
||||
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
|
||||
unsigned long lockflags;
|
||||
size_t size = dev->rx_urb_size;
|
||||
|
||||
/* prevent rx skb allocation when error ratio is high */
|
||||
if (test_bit(EVENT_RX_KILL, &dev->flags)) {
|
||||
usb_free_urb(urb);
|
||||
return -ENOLINK;
|
||||
}
|
||||
|
||||
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
|
||||
if (!skb) {
|
||||
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
|
||||
@ -539,6 +545,17 @@ block:
|
||||
break;
|
||||
}
|
||||
|
||||
/* stop rx if packet error rate is high */
|
||||
if (++dev->pkt_cnt > 30) {
|
||||
dev->pkt_cnt = 0;
|
||||
dev->pkt_err = 0;
|
||||
} else {
|
||||
if (state == rx_cleanup)
|
||||
dev->pkt_err++;
|
||||
if (dev->pkt_err > 20)
|
||||
set_bit(EVENT_RX_KILL, &dev->flags);
|
||||
}
|
||||
|
||||
state = defer_bh(dev, skb, &dev->rxq, state);
|
||||
|
||||
if (urb) {
|
||||
@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
|
||||
(dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
|
||||
"simple");
|
||||
|
||||
/* reset rx error state */
|
||||
dev->pkt_cnt = 0;
|
||||
dev->pkt_err = 0;
|
||||
clear_bit(EVENT_RX_KILL, &dev->flags);
|
||||
|
||||
// delay posting reads until we're fully open
|
||||
tasklet_schedule (&dev->bh);
|
||||
if (info->manage_power) {
|
||||
@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
|
||||
if (info->tx_fixup) {
|
||||
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
if (netif_msg_tx_err(dev)) {
|
||||
/* packet collected; minidriver waiting for more */
|
||||
if (info->flags & FLAG_MULTI_PACKET)
|
||||
goto not_drop;
|
||||
netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
|
||||
goto drop;
|
||||
} else {
|
||||
/* cdc_ncm collected packet; waits for more */
|
||||
goto not_drop;
|
||||
}
|
||||
}
|
||||
}
|
||||
length = skb->len;
|
||||
@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
|
||||
}
|
||||
}
|
||||
|
||||
/* restart RX again after disabling due to high error rate */
|
||||
clear_bit(EVENT_RX_KILL, &dev->flags);
|
||||
|
||||
// waiting for all pending urbs to complete?
|
||||
if (dev->wait) {
|
||||
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
|
||||
|
@ -154,7 +154,6 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
|
||||
if (ret & 1) { /* Link is up. */
|
||||
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
|
||||
adapter->netdev->name, adapter->link_speed);
|
||||
if (!netif_carrier_ok(adapter->netdev))
|
||||
netif_carrier_on(adapter->netdev);
|
||||
|
||||
if (affectTxQueue) {
|
||||
@ -165,7 +164,6 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
|
||||
} else {
|
||||
printk(KERN_INFO "%s: NIC Link is Down\n",
|
||||
adapter->netdev->name);
|
||||
if (netif_carrier_ok(adapter->netdev))
|
||||
netif_carrier_off(adapter->netdev);
|
||||
|
||||
if (affectTxQueue) {
|
||||
@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
||||
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
|
||||
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
err = register_netdev(netdev);
|
||||
|
||||
if (err) {
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "debug.h"
|
||||
|
||||
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
|
||||
#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
|
||||
|
||||
/* Flags we support */
|
||||
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
|
||||
@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
|
||||
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
|
||||
}
|
||||
|
||||
static bool brcms_tx_flush_completed(struct brcms_info *wl)
|
||||
{
|
||||
bool result;
|
||||
|
||||
spin_lock_bh(&wl->lock);
|
||||
result = brcms_c_tx_flush_completed(wl->wlc);
|
||||
spin_unlock_bh(&wl->lock);
|
||||
return result;
|
||||
}
|
||||
|
||||
static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
|
||||
{
|
||||
struct brcms_info *wl = hw->priv;
|
||||
int ret;
|
||||
|
||||
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
|
||||
|
||||
/* wait for packet queue and dma fifos to run empty */
|
||||
spin_lock_bh(&wl->lock);
|
||||
brcms_c_wait_for_tx_completion(wl->wlc, drop);
|
||||
spin_unlock_bh(&wl->lock);
|
||||
ret = wait_event_timeout(wl->tx_flush_wq,
|
||||
brcms_tx_flush_completed(wl),
|
||||
msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
|
||||
|
||||
brcms_dbg_mac80211(wl->wlc->hw->d11core,
|
||||
"ret=%d\n", jiffies_to_msecs(ret));
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops brcms_ops = {
|
||||
@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)
|
||||
|
||||
done:
|
||||
spin_unlock_bh(&wl->lock);
|
||||
wake_up(&wl->tx_flush_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
|
||||
|
||||
atomic_set(&wl->callbacks, 0);
|
||||
|
||||
init_waitqueue_head(&wl->tx_flush_wq);
|
||||
|
||||
/* setup the bottom half handler */
|
||||
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
|
||||
|
||||
@ -1609,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
|
||||
spin_lock_bh(&wl->lock);
|
||||
return blocked;
|
||||
}
|
||||
|
||||
/*
|
||||
* precondition: perimeter lock has been acquired
|
||||
*/
|
||||
void brcms_msleep(struct brcms_info *wl, uint ms)
|
||||
{
|
||||
spin_unlock_bh(&wl->lock);
|
||||
msleep(ms);
|
||||
spin_lock_bh(&wl->lock);
|
||||
}
|
||||
|
@ -68,6 +68,8 @@ struct brcms_info {
|
||||
spinlock_t lock; /* per-device perimeter lock */
|
||||
spinlock_t isr_lock; /* per-device ISR synchronization lock */
|
||||
|
||||
/* tx flush */
|
||||
wait_queue_head_t tx_flush_wq;
|
||||
|
||||
/* timer related fields */
|
||||
atomic_t callbacks; /* # outstanding callback functions */
|
||||
@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
|
||||
extern void brcms_free_timer(struct brcms_timer *timer);
|
||||
extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
|
||||
extern bool brcms_del_timer(struct brcms_timer *timer);
|
||||
extern void brcms_msleep(struct brcms_info *wl, uint ms);
|
||||
extern void brcms_dpc(unsigned long data);
|
||||
extern void brcms_timer(struct brcms_timer *t);
|
||||
extern void brcms_fatal_error(struct brcms_info *wl);
|
||||
|
@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
|
||||
static bool
|
||||
brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
|
||||
{
|
||||
bool morepending = false;
|
||||
struct bcma_device *core;
|
||||
struct tx_status txstatus, *txs;
|
||||
u32 s1, s2;
|
||||
@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
|
||||
txs = &txstatus;
|
||||
core = wlc_hw->d11core;
|
||||
*fatal = false;
|
||||
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
|
||||
while (!(*fatal)
|
||||
&& (s1 & TXS_V)) {
|
||||
/* !give others some time to run! */
|
||||
if (n >= max_tx_num) {
|
||||
morepending = true;
|
||||
break;
|
||||
}
|
||||
|
||||
while (n < max_tx_num) {
|
||||
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
|
||||
if (s1 == 0xffffffff) {
|
||||
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
|
||||
__func__);
|
||||
*fatal = true;
|
||||
return false;
|
||||
}
|
||||
s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
|
||||
/* only process when valid */
|
||||
if (!(s1 & TXS_V))
|
||||
break;
|
||||
|
||||
s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
|
||||
txs->status = s1 & TXS_STATUS_MASK;
|
||||
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
|
||||
txs->sequence = s2 & TXS_SEQ_MASK;
|
||||
@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
|
||||
txs->lasttxtime = 0;
|
||||
|
||||
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
|
||||
|
||||
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
|
||||
if (*fatal == true)
|
||||
return false;
|
||||
n++;
|
||||
}
|
||||
|
||||
if (*fatal)
|
||||
return false;
|
||||
|
||||
return morepending;
|
||||
return n >= max_tx_num;
|
||||
}
|
||||
|
||||
static void brcms_c_tbtt(struct brcms_c_info *wlc)
|
||||
@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
|
||||
return wlc->band->bandunit;
|
||||
}
|
||||
|
||||
void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
|
||||
bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
|
||||
{
|
||||
int timeout = 20;
|
||||
int i;
|
||||
|
||||
/* Kick DMA to send any pending AMPDU */
|
||||
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
|
||||
if (wlc->hw->di[i])
|
||||
dma_txflush(wlc->hw->di[i]);
|
||||
dma_kick_tx(wlc->hw->di[i]);
|
||||
|
||||
/* wait for queue and DMA fifos to run dry */
|
||||
while (brcms_txpktpendtot(wlc) > 0) {
|
||||
brcms_msleep(wlc->wl, 1);
|
||||
|
||||
if (--timeout == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(timeout == 0);
|
||||
return !brcms_txpktpendtot(wlc);
|
||||
}
|
||||
|
||||
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
|
||||
|
@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
|
||||
extern void brcms_c_scan_start(struct brcms_c_info *wlc);
|
||||
extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
|
||||
extern int brcms_c_get_curband(struct brcms_c_info *wlc);
|
||||
extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
|
||||
bool drop);
|
||||
extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
|
||||
extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
|
||||
extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
|
||||
@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
|
||||
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
|
||||
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
|
||||
extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
|
||||
extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
|
||||
|
||||
#endif /* _BRCM_PUB_H_ */
|
||||
|
@ -1153,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||
next_reclaimed = ssn;
|
||||
}
|
||||
|
||||
if (tid != IWL_TID_NON_QOS) {
|
||||
priv->tid_data[sta_id][tid].next_reclaimed =
|
||||
next_reclaimed;
|
||||
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
|
||||
next_reclaimed);
|
||||
}
|
||||
|
||||
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
|
||||
|
||||
iwlagn_check_ratid_empty(priv, sta_id, tid);
|
||||
@ -1203,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||
if (!is_agg)
|
||||
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
|
||||
|
||||
/*
|
||||
* W/A for FW bug - the seq_ctl isn't updated when the
|
||||
* queues are flushed. Fetch it from the packet itself
|
||||
*/
|
||||
if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
|
||||
next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
|
||||
next_reclaimed =
|
||||
SEQ_TO_SN(next_reclaimed + 0x10);
|
||||
}
|
||||
|
||||
is_offchannel_skb =
|
||||
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
|
||||
freed++;
|
||||
}
|
||||
|
||||
if (tid != IWL_TID_NON_QOS) {
|
||||
priv->tid_data[sta_id][tid].next_reclaimed =
|
||||
next_reclaimed;
|
||||
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
|
||||
next_reclaimed);
|
||||
}
|
||||
|
||||
WARN_ON(!is_agg && freed != 1);
|
||||
|
||||
/*
|
||||
|
@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
|
||||
scan_rsp->number_of_sets);
|
||||
ret = -1;
|
||||
goto done;
|
||||
goto check_next_scan;
|
||||
}
|
||||
|
||||
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
|
||||
@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
if (!beacon_size || beacon_size > bytes_left) {
|
||||
bss_info += bytes_left;
|
||||
bytes_left = 0;
|
||||
return -1;
|
||||
ret = -1;
|
||||
goto check_next_scan;
|
||||
}
|
||||
|
||||
/* Initialize the current working beacon pointer for this BSS
|
||||
@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
dev_err(priv->adapter->dev,
|
||||
"%s: bytes left < IE length\n",
|
||||
__func__);
|
||||
goto done;
|
||||
goto check_next_scan;
|
||||
}
|
||||
if (element_id == WLAN_EID_DS_PARAMS) {
|
||||
channel = *(current_ptr + sizeof(struct ieee_types_header));
|
||||
@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
}
|
||||
}
|
||||
|
||||
check_next_scan:
|
||||
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
|
||||
if (list_empty(&adapter->scan_pending_q)) {
|
||||
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
|
||||
@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
||||
is_tx ? "Tx" : "Rx");
|
||||
|
||||
if (is_tx) {
|
||||
rtl_lps_leave(hw);
|
||||
schedule_work(&rtlpriv->
|
||||
works.lps_leave_work);
|
||||
ppsc->last_delaylps_stamp_jiffies =
|
||||
jiffies;
|
||||
}
|
||||
@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
||||
}
|
||||
} else if (ETH_P_ARP == ether_type) {
|
||||
if (is_tx) {
|
||||
rtl_lps_leave(hw);
|
||||
schedule_work(&rtlpriv->works.lps_leave_work);
|
||||
ppsc->last_delaylps_stamp_jiffies = jiffies;
|
||||
}
|
||||
|
||||
@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
||||
"802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
|
||||
|
||||
if (is_tx) {
|
||||
rtl_lps_leave(hw);
|
||||
schedule_work(&rtlpriv->works.lps_leave_work);
|
||||
ppsc->last_delaylps_stamp_jiffies = jiffies;
|
||||
}
|
||||
|
||||
|
@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
WARN_ON(skb_queue_empty(&rx_queue));
|
||||
while (!skb_queue_empty(&rx_queue)) {
|
||||
_skb = skb_dequeue(&rx_queue);
|
||||
_rtl_usb_rx_process_agg(hw, skb);
|
||||
ieee80211_rx_irqsafe(hw, skb);
|
||||
_rtl_usb_rx_process_agg(hw, _skb);
|
||||
ieee80211_rx_irqsafe(hw, _skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
|
||||
/* Notify xenvif that ring now has space to send an skb to the frontend */
|
||||
void xenvif_notify_tx_completion(struct xenvif *vif);
|
||||
|
||||
/* Prevent the device from generating any further traffic. */
|
||||
void xenvif_carrier_off(struct xenvif *vif);
|
||||
|
||||
/* Returns number of ring slots required to send an skb to the frontend */
|
||||
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
|
||||
|
||||
|
@ -343,10 +343,10 @@ err:
|
||||
return err;
|
||||
}
|
||||
|
||||
void xenvif_disconnect(struct xenvif *vif)
|
||||
void xenvif_carrier_off(struct xenvif *vif)
|
||||
{
|
||||
struct net_device *dev = vif->dev;
|
||||
if (netif_carrier_ok(dev)) {
|
||||
|
||||
rtnl_lock();
|
||||
netif_carrier_off(dev); /* discard queued packets */
|
||||
if (netif_running(dev))
|
||||
@ -355,6 +355,11 @@ void xenvif_disconnect(struct xenvif *vif)
|
||||
xenvif_put(vif);
|
||||
}
|
||||
|
||||
void xenvif_disconnect(struct xenvif *vif)
|
||||
{
|
||||
if (netif_carrier_ok(vif->dev))
|
||||
xenvif_carrier_off(vif);
|
||||
|
||||
atomic_dec(&vif->refcnt);
|
||||
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
|
||||
|
||||
|
@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
|
||||
atomic_dec(&netbk->netfront_count);
|
||||
}
|
||||
|
||||
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
|
||||
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
|
||||
u8 status);
|
||||
static void make_tx_response(struct xenvif *vif,
|
||||
struct xen_netif_tx_request *txp,
|
||||
s8 st);
|
||||
@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
|
||||
|
||||
do {
|
||||
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
|
||||
if (cons >= end)
|
||||
if (cons == end)
|
||||
break;
|
||||
txp = RING_GET_REQUEST(&vif->tx, cons++);
|
||||
} while (1);
|
||||
@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
|
||||
xenvif_put(vif);
|
||||
}
|
||||
|
||||
static void netbk_fatal_tx_err(struct xenvif *vif)
|
||||
{
|
||||
netdev_err(vif->dev, "fatal error; disabling device\n");
|
||||
xenvif_carrier_off(vif);
|
||||
xenvif_put(vif);
|
||||
}
|
||||
|
||||
static int netbk_count_requests(struct xenvif *vif,
|
||||
struct xen_netif_tx_request *first,
|
||||
struct xen_netif_tx_request *txp,
|
||||
@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,
|
||||
|
||||
do {
|
||||
if (frags >= work_to_do) {
|
||||
netdev_dbg(vif->dev, "Need more frags\n");
|
||||
netdev_err(vif->dev, "Need more frags\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -frags;
|
||||
}
|
||||
|
||||
if (unlikely(frags >= MAX_SKB_FRAGS)) {
|
||||
netdev_dbg(vif->dev, "Too many frags\n");
|
||||
netdev_err(vif->dev, "Too many frags\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -frags;
|
||||
}
|
||||
|
||||
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
|
||||
sizeof(*txp));
|
||||
if (txp->size > first->size) {
|
||||
netdev_dbg(vif->dev, "Frags galore\n");
|
||||
netdev_err(vif->dev, "Frag is bigger than frame.\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -frags;
|
||||
}
|
||||
|
||||
@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,
|
||||
frags++;
|
||||
|
||||
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
|
||||
netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
|
||||
netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
|
||||
txp->offset, txp->size);
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -frags;
|
||||
}
|
||||
} while ((txp++)->flags & XEN_NETTXF_more_data);
|
||||
@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
|
||||
pending_idx = netbk->pending_ring[index];
|
||||
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
|
||||
if (!page)
|
||||
return NULL;
|
||||
goto err;
|
||||
|
||||
gop->source.u.ref = txp->gref;
|
||||
gop->source.domid = vif->domid;
|
||||
@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
|
||||
}
|
||||
|
||||
return gop;
|
||||
err:
|
||||
/* Unwind, freeing all pages and sending error responses. */
|
||||
while (i-- > start) {
|
||||
xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
|
||||
XEN_NETIF_RSP_ERROR);
|
||||
}
|
||||
/* The head too, if necessary. */
|
||||
if (start)
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
{
|
||||
struct gnttab_copy *gop = *gopp;
|
||||
u16 pending_idx = *((u16 *)skb->data);
|
||||
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
|
||||
struct xenvif *vif = pending_tx_info[pending_idx].vif;
|
||||
struct xen_netif_tx_request *txp;
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
int nr_frags = shinfo->nr_frags;
|
||||
int i, err, start;
|
||||
|
||||
/* Check status of header. */
|
||||
err = gop->status;
|
||||
if (unlikely(err)) {
|
||||
pending_ring_idx_t index;
|
||||
index = pending_index(netbk->pending_prod++);
|
||||
txp = &pending_tx_info[pending_idx].req;
|
||||
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
|
||||
netbk->pending_ring[index] = pending_idx;
|
||||
xenvif_put(vif);
|
||||
}
|
||||
if (unlikely(err))
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
|
||||
|
||||
/* Skip first skb fragment if it is on same page as header fragment. */
|
||||
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
|
||||
|
||||
for (i = start; i < nr_frags; i++) {
|
||||
int j, newerr;
|
||||
pending_ring_idx_t index;
|
||||
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
|
||||
|
||||
@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
if (likely(!newerr)) {
|
||||
/* Had a previous error? Invalidate this fragment. */
|
||||
if (unlikely(err))
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Error on this fragment: respond to client with an error. */
|
||||
txp = &netbk->pending_tx_info[pending_idx].req;
|
||||
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
|
||||
index = pending_index(netbk->pending_prod++);
|
||||
netbk->pending_ring[index] = pending_idx;
|
||||
xenvif_put(vif);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
|
||||
|
||||
/* Not the first error? Preceding frags already invalidated. */
|
||||
if (err)
|
||||
@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
|
||||
/* First error: invalidate header and preceding fragments. */
|
||||
pending_idx = *((u16 *)skb->data);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
for (j = start; j < i; j++) {
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
|
||||
/* Remember the error: invalidate all subsequent fragments. */
|
||||
@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
|
||||
|
||||
/* Take an extra reference to offset xen_netbk_idx_release */
|
||||
get_page(netbk->mmap_pages[pending_idx]);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
|
||||
|
||||
do {
|
||||
if (unlikely(work_to_do-- <= 0)) {
|
||||
netdev_dbg(vif->dev, "Missing extra info\n");
|
||||
netdev_err(vif->dev, "Missing extra info\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -EBADR;
|
||||
}
|
||||
|
||||
@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
|
||||
if (unlikely(!extra.type ||
|
||||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||
vif->tx.req_cons = ++cons;
|
||||
netdev_dbg(vif->dev,
|
||||
netdev_err(vif->dev,
|
||||
"Invalid extra type: %d\n", extra.type);
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
|
||||
struct xen_netif_extra_info *gso)
|
||||
{
|
||||
if (!gso->u.gso.size) {
|
||||
netdev_dbg(vif->dev, "GSO size must not be zero.\n");
|
||||
netdev_err(vif->dev, "GSO size must not be zero.\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Currently only TCPv4 S.O. is supported. */
|
||||
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
|
||||
netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
|
||||
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
||||
|
||||
/* Get a netif from the list with work to do. */
|
||||
vif = poll_net_schedule_list(netbk);
|
||||
/* This can sometimes happen because the test of
|
||||
* list_empty(net_schedule_list) at the top of the
|
||||
* loop is unlocked. Just go back and have another
|
||||
* look.
|
||||
*/
|
||||
if (!vif)
|
||||
continue;
|
||||
|
||||
if (vif->tx.sring->req_prod - vif->tx.req_cons >
|
||||
XEN_NETIF_TX_RING_SIZE) {
|
||||
netdev_err(vif->dev,
|
||||
"Impossible number of requests. "
|
||||
"req_prod %d, req_cons %d, size %ld\n",
|
||||
vif->tx.sring->req_prod, vif->tx.req_cons,
|
||||
XEN_NETIF_TX_RING_SIZE);
|
||||
netbk_fatal_tx_err(vif);
|
||||
continue;
|
||||
}
|
||||
|
||||
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
|
||||
if (!work_to_do) {
|
||||
xenvif_put(vif);
|
||||
@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
||||
work_to_do = xen_netbk_get_extras(vif, extras,
|
||||
work_to_do);
|
||||
idx = vif->tx.req_cons;
|
||||
if (unlikely(work_to_do < 0)) {
|
||||
netbk_tx_err(vif, &txreq, idx);
|
||||
if (unlikely(work_to_do < 0))
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
|
||||
if (unlikely(ret < 0)) {
|
||||
netbk_tx_err(vif, &txreq, idx - ret);
|
||||
if (unlikely(ret < 0))
|
||||
continue;
|
||||
}
|
||||
|
||||
idx += ret;
|
||||
|
||||
if (unlikely(txreq.size < ETH_HLEN)) {
|
||||
@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
||||
|
||||
/* No crossing a page as the payload mustn't fragment. */
|
||||
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
|
||||
netdev_dbg(vif->dev,
|
||||
netdev_err(vif->dev,
|
||||
"txreq.offset: %x, size: %u, end: %lu\n",
|
||||
txreq.offset, txreq.size,
|
||||
(txreq.offset&~PAGE_MASK) + txreq.size);
|
||||
netbk_tx_err(vif, &txreq, idx);
|
||||
netbk_fatal_tx_err(vif);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
||||
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
|
||||
|
||||
if (netbk_set_skb_gso(vif, skb, gso)) {
|
||||
/* Failure in netbk_set_skb_gso is fatal. */
|
||||
kfree_skb(skb);
|
||||
netbk_tx_err(vif, &txreq, idx);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
|
||||
txp->size -= data_len;
|
||||
} else {
|
||||
/* Schedule a response immediately. */
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
|
||||
if (txp->flags & XEN_NETTXF_csum_blank)
|
||||
@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
|
||||
xen_netbk_tx_submit(netbk);
|
||||
}
|
||||
|
||||
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
|
||||
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
|
||||
u8 status)
|
||||
{
|
||||
struct xenvif *vif;
|
||||
struct pending_tx_info *pending_tx_info;
|
||||
@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
|
||||
|
||||
vif = pending_tx_info->vif;
|
||||
|
||||
make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
|
||||
make_tx_response(vif, &pending_tx_info->req, status);
|
||||
|
||||
index = pending_index(netbk->pending_prod++);
|
||||
netbk->pending_ring[index] = pending_idx;
|
||||
|
@ -174,3 +174,15 @@ int ssb_gpio_init(struct ssb_bus *bus)
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int ssb_gpio_unregister(struct ssb_bus *bus)
|
||||
{
|
||||
if (ssb_chipco_available(&bus->chipco) ||
|
||||
ssb_extif_available(&bus->extif)) {
|
||||
return gpiochip_remove(&bus->gpio);
|
||||
} else {
|
||||
SSB_WARN_ON(1);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus)
|
||||
|
||||
void ssb_bus_unregister(struct ssb_bus *bus)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = ssb_gpio_unregister(bus);
|
||||
if (err == -EBUSY)
|
||||
ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n");
|
||||
else if (err)
|
||||
ssb_dprintk(KERN_ERR PFX
|
||||
"Can not unregister GPIO driver: %i\n", err);
|
||||
|
||||
ssb_buses_lock();
|
||||
ssb_devices_unregister(bus);
|
||||
list_del(&bus->list);
|
||||
|
@ -252,11 +252,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif)
|
||||
|
||||
#ifdef CONFIG_SSB_DRIVER_GPIO
|
||||
extern int ssb_gpio_init(struct ssb_bus *bus);
|
||||
extern int ssb_gpio_unregister(struct ssb_bus *bus);
|
||||
#else /* CONFIG_SSB_DRIVER_GPIO */
|
||||
static inline int ssb_gpio_init(struct ssb_bus *bus)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
static inline int ssb_gpio_unregister(struct ssb_bus *bus)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SSB_DRIVER_GPIO */
|
||||
|
||||
#endif /* LINUX_SSB_PRIVATE_H_ */
|
||||
|
@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)
|
||||
}
|
||||
|
||||
/* Caller must have TX VQ lock */
|
||||
static void tx_poll_start(struct vhost_net *net, struct socket *sock)
|
||||
static int tx_poll_start(struct vhost_net *net, struct socket *sock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
|
||||
return;
|
||||
vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
|
||||
return 0;
|
||||
ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
|
||||
if (!ret)
|
||||
net->tx_poll_state = VHOST_NET_POLL_STARTED;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* In case of DMA done not in order in lower device driver for some reason.
|
||||
@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,
|
||||
vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
|
||||
}
|
||||
|
||||
static void vhost_net_enable_vq(struct vhost_net *n,
|
||||
static int vhost_net_enable_vq(struct vhost_net *n,
|
||||
struct vhost_virtqueue *vq)
|
||||
{
|
||||
struct socket *sock;
|
||||
int ret;
|
||||
|
||||
sock = rcu_dereference_protected(vq->private_data,
|
||||
lockdep_is_held(&vq->mutex));
|
||||
if (!sock)
|
||||
return;
|
||||
return 0;
|
||||
if (vq == n->vqs + VHOST_NET_VQ_TX) {
|
||||
n->tx_poll_state = VHOST_NET_POLL_STOPPED;
|
||||
tx_poll_start(n, sock);
|
||||
ret = tx_poll_start(n, sock);
|
||||
} else
|
||||
vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
|
||||
ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct socket *vhost_net_stop_vq(struct vhost_net *n,
|
||||
@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
|
||||
r = PTR_ERR(ubufs);
|
||||
goto err_ubufs;
|
||||
}
|
||||
oldubufs = vq->ubufs;
|
||||
vq->ubufs = ubufs;
|
||||
|
||||
vhost_net_disable_vq(n, vq);
|
||||
rcu_assign_pointer(vq->private_data, sock);
|
||||
vhost_net_enable_vq(n, vq);
|
||||
|
||||
r = vhost_init_used(vq);
|
||||
if (r)
|
||||
goto err_vq;
|
||||
goto err_used;
|
||||
r = vhost_net_enable_vq(n, vq);
|
||||
if (r)
|
||||
goto err_used;
|
||||
|
||||
oldubufs = vq->ubufs;
|
||||
vq->ubufs = ubufs;
|
||||
|
||||
n->tx_packets = 0;
|
||||
n->tx_zcopy_err = 0;
|
||||
@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
|
||||
mutex_unlock(&n->dev.mutex);
|
||||
return 0;
|
||||
|
||||
err_used:
|
||||
rcu_assign_pointer(vq->private_data, oldsock);
|
||||
vhost_net_enable_vq(n, vq);
|
||||
if (ubufs)
|
||||
vhost_ubuf_put_and_wait(ubufs);
|
||||
err_ubufs:
|
||||
fput(sock->file);
|
||||
err_vq:
|
||||
|
@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
|
||||
init_poll_funcptr(&poll->table, vhost_poll_func);
|
||||
poll->mask = mask;
|
||||
poll->dev = dev;
|
||||
poll->wqh = NULL;
|
||||
|
||||
vhost_work_init(&poll->work, fn);
|
||||
}
|
||||
|
||||
/* Start polling a file. We add ourselves to file's wait queue. The caller must
|
||||
* keep a reference to a file until after vhost_poll_stop is called. */
|
||||
void vhost_poll_start(struct vhost_poll *poll, struct file *file)
|
||||
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
|
||||
{
|
||||
unsigned long mask;
|
||||
int ret = 0;
|
||||
|
||||
mask = file->f_op->poll(file, &poll->table);
|
||||
if (mask)
|
||||
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
|
||||
if (mask & POLLERR) {
|
||||
if (poll->wqh)
|
||||
remove_wait_queue(poll->wqh, &poll->wait);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Stop polling a file. After this function returns, it becomes safe to drop the
|
||||
* file reference. You must also flush afterwards. */
|
||||
void vhost_poll_stop(struct vhost_poll *poll)
|
||||
{
|
||||
if (poll->wqh) {
|
||||
remove_wait_queue(poll->wqh, &poll->wait);
|
||||
poll->wqh = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
|
||||
@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
|
||||
fput(filep);
|
||||
|
||||
if (pollstart && vq->handle_kick)
|
||||
vhost_poll_start(&vq->poll, vq->kick);
|
||||
r = vhost_poll_start(&vq->poll, vq->kick);
|
||||
|
||||
mutex_unlock(&vq->mutex);
|
||||
|
||||
|
@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
|
||||
|
||||
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
|
||||
unsigned long mask, struct vhost_dev *dev);
|
||||
void vhost_poll_start(struct vhost_poll *poll, struct file *file);
|
||||
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
|
||||
void vhost_poll_stop(struct vhost_poll *poll);
|
||||
void vhost_poll_flush(struct vhost_poll *poll);
|
||||
void vhost_poll_queue(struct vhost_poll *poll);
|
||||
|
@ -33,6 +33,7 @@ struct usbnet {
|
||||
wait_queue_head_t *wait;
|
||||
struct mutex phy_mutex;
|
||||
unsigned char suspend_count;
|
||||
unsigned char pkt_cnt, pkt_err;
|
||||
|
||||
/* i/o info: pipes etc */
|
||||
unsigned in, out;
|
||||
@ -70,6 +71,7 @@ struct usbnet {
|
||||
# define EVENT_DEV_OPEN 7
|
||||
# define EVENT_DEVICE_REPORT_IDLE 8
|
||||
# define EVENT_NO_RUNTIME_PM 9
|
||||
# define EVENT_RX_KILL 10
|
||||
};
|
||||
|
||||
static inline struct usb_driver *driver_of(struct usb_interface *intf)
|
||||
@ -100,7 +102,6 @@ struct driver_info {
|
||||
#define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */
|
||||
|
||||
#define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */
|
||||
#define FLAG_NOARP 0x2000 /* device can't do ARP */
|
||||
|
||||
/*
|
||||
* Indicates to usbnet, that USB driver accumulates multiple IP packets.
|
||||
@ -108,6 +109,7 @@ struct driver_info {
|
||||
*/
|
||||
#define FLAG_MULTI_PACKET 0x2000
|
||||
#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
|
||||
#define FLAG_NOARP 0x8000 /* device can't do ARP */
|
||||
|
||||
/* init device ... can sleep, or cause probe() failure */
|
||||
int (*bind)(struct usbnet *, struct usb_interface *);
|
||||
|
@ -34,11 +34,11 @@ extern int udpv6_connect(struct sock *sk,
|
||||
struct sockaddr *uaddr,
|
||||
int addr_len);
|
||||
|
||||
extern int datagram_recv_ctl(struct sock *sk,
|
||||
extern int ip6_datagram_recv_ctl(struct sock *sk,
|
||||
struct msghdr *msg,
|
||||
struct sk_buff *skb);
|
||||
|
||||
extern int datagram_send_ctl(struct net *net,
|
||||
extern int ip6_datagram_send_ctl(struct net *net,
|
||||
struct sock *sk,
|
||||
struct msghdr *msg,
|
||||
struct flowi6 *fl6,
|
||||
|
@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
|
||||
__u8 reason = hci_proto_disconn_ind(conn);
|
||||
|
||||
switch (conn->type) {
|
||||
case ACL_LINK:
|
||||
hci_acl_disconn(conn, reason);
|
||||
break;
|
||||
case AMP_LINK:
|
||||
hci_amp_disconn(conn, reason);
|
||||
break;
|
||||
default:
|
||||
hci_acl_disconn(conn, reason);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
|
||||
skb_pull(skb, sizeof(code));
|
||||
|
||||
/*
|
||||
* The SMP context must be initialized for all other PDUs except
|
||||
* pairing and security requests. If we get any other PDU when
|
||||
* not initialized simply disconnect (done if this function
|
||||
* returns an error).
|
||||
*/
|
||||
if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
|
||||
!conn->smp_chan) {
|
||||
BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
|
||||
kfree_skb(skb);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
switch (code) {
|
||||
case SMP_CMD_PAIRING_REQ:
|
||||
reason = smp_cmd_pairing_req(conn, skb);
|
||||
|
@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file,
|
||||
return -EFAULT;
|
||||
i += len;
|
||||
mutex_lock(&pktgen_thread_lock);
|
||||
pktgen_add_device(t, f);
|
||||
ret = pktgen_add_device(t, f);
|
||||
mutex_unlock(&pktgen_thread_lock);
|
||||
if (!ret) {
|
||||
ret = count;
|
||||
sprintf(pg_result, "OK: add_device=%s", f);
|
||||
} else
|
||||
sprintf(pg_result, "ERROR: can not add device %s", f);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
|
||||
new->network_header = old->network_header;
|
||||
new->mac_header = old->mac_header;
|
||||
new->inner_transport_header = old->inner_transport_header;
|
||||
new->inner_network_header = old->inner_transport_header;
|
||||
new->inner_network_header = old->inner_network_header;
|
||||
skb_dst_copy(new, old);
|
||||
new->rxhash = old->rxhash;
|
||||
new->ooo_okay = old->ooo_okay;
|
||||
|
@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)
|
||||
{
|
||||
int cnt; /* increase in packets */
|
||||
unsigned int delta = 0;
|
||||
u32 snd_cwnd = tp->snd_cwnd;
|
||||
|
||||
if (unlikely(!snd_cwnd)) {
|
||||
pr_err_once("snd_cwnd is nul, please report this bug.\n");
|
||||
snd_cwnd = 1U;
|
||||
}
|
||||
|
||||
/* RFC3465: ABC Slow start
|
||||
* Increase only after a full MSS of bytes is acked
|
||||
@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)
|
||||
if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
|
||||
cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
|
||||
else
|
||||
cnt = tp->snd_cwnd; /* exponential increase */
|
||||
cnt = snd_cwnd; /* exponential increase */
|
||||
|
||||
/* RFC3465: ABC
|
||||
* We MAY increase by 2 if discovered delayed ack
|
||||
@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)
|
||||
tp->bytes_acked = 0;
|
||||
|
||||
tp->snd_cwnd_cnt += cnt;
|
||||
while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
|
||||
tp->snd_cwnd_cnt -= tp->snd_cwnd;
|
||||
while (tp->snd_cwnd_cnt >= snd_cwnd) {
|
||||
tp->snd_cwnd_cnt -= snd_cwnd;
|
||||
delta++;
|
||||
}
|
||||
tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
|
||||
tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcp_slow_start);
|
||||
|
||||
|
@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
|
||||
}
|
||||
} else {
|
||||
if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
|
||||
if (!tcp_packets_in_flight(tp)) {
|
||||
tcp_enter_frto_loss(sk, 2, flag);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Prevent sending of new data. */
|
||||
tp->snd_cwnd = min(tp->snd_cwnd,
|
||||
tcp_packets_in_flight(tp));
|
||||
@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
|
||||
* the remote receives only the retransmitted (regular) SYNs: either
|
||||
* the original SYN-data or the corresponding SYN-ACK is lost.
|
||||
*/
|
||||
syn_drop = (cookie->len <= 0 && data &&
|
||||
inet_csk(sk)->icsk_retransmits);
|
||||
syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
|
||||
|
||||
tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
|
||||
|
||||
|
@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
||||
* errors returned from accept().
|
||||
*/
|
||||
inet_csk_reqsk_queue_drop(sk, req, prev);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
goto out;
|
||||
|
||||
case TCP_SYN_SENT:
|
||||
@ -1500,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
* clogging syn queue with openreqs with exponentially increasing
|
||||
* timeout.
|
||||
*/
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
req = inet_reqsk_alloc(&tcp_request_sock_ops);
|
||||
if (!req)
|
||||
@ -1666,6 +1669,7 @@ drop_and_release:
|
||||
drop_and_free:
|
||||
reqsk_free(req);
|
||||
drop:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_v4_conn_request);
|
||||
|
@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
|
||||
if (dev->addr_len != IEEE802154_ADDR_LEN)
|
||||
return -1;
|
||||
memcpy(eui, dev->dev_addr, 8);
|
||||
eui[0] ^= 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
sin->sin6_addr = ipv6_hdr(skb)->saddr;
|
||||
if (np->rxopt.all)
|
||||
datagram_recv_ctl(sk, msg, skb);
|
||||
ip6_datagram_recv_ctl(sk, msg, skb);
|
||||
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
|
||||
sin->sin6_scope_id = IP6CB(skb)->iif;
|
||||
} else {
|
||||
@ -468,7 +468,8 @@ out:
|
||||
}
|
||||
|
||||
|
||||
int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
|
||||
int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct inet6_skb_parm *opt = IP6CB(skb);
|
||||
@ -597,8 +598,9 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
|
||||
|
||||
int datagram_send_ctl(struct net *net, struct sock *sk,
|
||||
int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
|
||||
struct msghdr *msg, struct flowi6 *fl6,
|
||||
struct ipv6_txoptions *opt,
|
||||
int *hlimit, int *tclass, int *dontfrag)
|
||||
@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
|
||||
exit_f:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(datagram_send_ctl);
|
||||
EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
|
||||
|
@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
|
||||
msg.msg_control = (void*)(fl->opt+1);
|
||||
memset(&flowi6, 0, sizeof(flowi6));
|
||||
|
||||
err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
|
||||
&junk, &junk);
|
||||
err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
|
||||
&junk, &junk, &junk);
|
||||
if (err)
|
||||
goto done;
|
||||
err = -EINVAL;
|
||||
|
@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
|
||||
int ret;
|
||||
|
||||
if (!ip6_tnl_xmit_ctl(t))
|
||||
return -1;
|
||||
goto tx_err;
|
||||
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_IP):
|
||||
|
@ -476,8 +476,8 @@ sticky_done:
|
||||
msg.msg_controllen = optlen;
|
||||
msg.msg_control = (void*)(opt+1);
|
||||
|
||||
retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
|
||||
&junk);
|
||||
retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
|
||||
&junk, &junk);
|
||||
if (retv)
|
||||
goto done;
|
||||
update:
|
||||
@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
||||
release_sock(sk);
|
||||
|
||||
if (skb) {
|
||||
int err = datagram_recv_ctl(sk, &msg, skb);
|
||||
int err = ip6_datagram_recv_ctl(sk, &msg, skb);
|
||||
kfree_skb(skb);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
||||
sock_recv_ts_and_drops(msg, sk, skb);
|
||||
|
||||
if (np->rxopt.all)
|
||||
datagram_recv_ctl(sk, msg, skb);
|
||||
ip6_datagram_recv_ctl(sk, msg, skb);
|
||||
|
||||
err = copied;
|
||||
if (flags & MSG_TRUNC)
|
||||
@ -822,7 +822,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
|
||||
memset(opt, 0, sizeof(struct ipv6_txoptions));
|
||||
opt->tot_len = sizeof(struct ipv6_txoptions);
|
||||
|
||||
err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
|
||||
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
|
||||
&hlimit, &tclass, &dontfrag);
|
||||
if (err < 0) {
|
||||
fl6_sock_release(flowlabel);
|
||||
|
@ -928,7 +928,7 @@ restart:
|
||||
dst_hold(&rt->dst);
|
||||
read_unlock_bh(&table->tb6_lock);
|
||||
|
||||
if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
|
||||
if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
|
||||
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
|
||||
else if (!(rt->dst.flags & DST_HOST))
|
||||
nrt = rt6_alloc_clone(rt, &fl6->daddr);
|
||||
|
@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
}
|
||||
|
||||
inet_csk_reqsk_queue_drop(sk, req, prev);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
goto out;
|
||||
|
||||
case TCP_SYN_SENT:
|
||||
@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
|
||||
if (req == NULL)
|
||||
@ -1108,6 +1111,7 @@ drop_and_release:
|
||||
drop_and_free:
|
||||
reqsk_free(req);
|
||||
drop:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
return 0; /* don't send reset */
|
||||
}
|
||||
|
||||
|
@ -443,7 +443,7 @@ try_again:
|
||||
ip_cmsg_recv(msg, skb);
|
||||
} else {
|
||||
if (np->rxopt.all)
|
||||
datagram_recv_ctl(sk, msg, skb);
|
||||
ip6_datagram_recv_ctl(sk, msg, skb);
|
||||
}
|
||||
|
||||
err = copied;
|
||||
@ -1153,7 +1153,7 @@ do_udp_sendmsg:
|
||||
memset(opt, 0, sizeof(struct ipv6_txoptions));
|
||||
opt->tot_len = sizeof(*opt);
|
||||
|
||||
err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
|
||||
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
|
||||
&hlimit, &tclass, &dontfrag);
|
||||
if (err < 0) {
|
||||
fl6_sock_release(flowlabel);
|
||||
|
@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
|
||||
|
||||
}
|
||||
|
||||
/* Lookup the tunnel socket, possibly involving the fs code if the socket is
|
||||
* owned by userspace. A struct sock returned from this function must be
|
||||
* released using l2tp_tunnel_sock_put once you're done with it.
|
||||
*/
|
||||
struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
|
||||
{
|
||||
int err = 0;
|
||||
struct socket *sock = NULL;
|
||||
struct sock *sk = NULL;
|
||||
|
||||
if (!tunnel)
|
||||
goto out;
|
||||
|
||||
if (tunnel->fd >= 0) {
|
||||
/* Socket is owned by userspace, who might be in the process
|
||||
* of closing it. Look the socket up using the fd to ensure
|
||||
* consistency.
|
||||
*/
|
||||
sock = sockfd_lookup(tunnel->fd, &err);
|
||||
if (sock)
|
||||
sk = sock->sk;
|
||||
} else {
|
||||
/* Socket is owned by kernelspace */
|
||||
sk = tunnel->sock;
|
||||
}
|
||||
|
||||
out:
|
||||
return sk;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
|
||||
|
||||
/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
|
||||
void l2tp_tunnel_sock_put(struct sock *sk)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
|
||||
if (tunnel) {
|
||||
if (tunnel->fd >= 0) {
|
||||
/* Socket is owned by userspace */
|
||||
sockfd_put(sk->sk_socket);
|
||||
}
|
||||
sock_put(sk);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
|
||||
|
||||
/* Lookup a session by id in the global session list
|
||||
*/
|
||||
static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
|
||||
@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
|
||||
struct udphdr *uh;
|
||||
struct inet_sock *inet;
|
||||
__wsum csum;
|
||||
int old_headroom;
|
||||
int new_headroom;
|
||||
int headroom;
|
||||
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
|
||||
int udp_len;
|
||||
@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
|
||||
*/
|
||||
headroom = NET_SKB_PAD + sizeof(struct iphdr) +
|
||||
uhlen + hdr_len;
|
||||
old_headroom = skb_headroom(skb);
|
||||
if (skb_cow_head(skb, headroom)) {
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
new_headroom = skb_headroom(skb);
|
||||
skb_orphan(skb);
|
||||
skb->truesize += new_headroom - old_headroom;
|
||||
|
||||
/* Setup L2TP header */
|
||||
session->build_header(session, __skb_push(skb, hdr_len));
|
||||
|
||||
@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
|
||||
tunnel->old_sk_destruct = sk->sk_destruct;
|
||||
sk->sk_destruct = &l2tp_tunnel_destruct;
|
||||
tunnel->sock = sk;
|
||||
tunnel->fd = fd;
|
||||
lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
|
||||
|
||||
sk->sk_allocation = GFP_ATOMIC;
|
||||
@ -1642,14 +1682,21 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
|
||||
*/
|
||||
int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
|
||||
{
|
||||
int err = 0;
|
||||
struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
|
||||
int err = -EBADF;
|
||||
struct socket *sock = NULL;
|
||||
struct sock *sk = NULL;
|
||||
|
||||
sk = l2tp_tunnel_sock_lookup(tunnel);
|
||||
if (!sk)
|
||||
goto out;
|
||||
|
||||
sock = sk->sk_socket;
|
||||
BUG_ON(!sock);
|
||||
|
||||
/* Force the tunnel socket to close. This will eventually
|
||||
* cause the tunnel to be deleted via the normal socket close
|
||||
* mechanisms when userspace closes the tunnel socket.
|
||||
*/
|
||||
if (sock != NULL) {
|
||||
err = inet_shutdown(sock, 2);
|
||||
|
||||
/* If the tunnel's socket was created by the kernel,
|
||||
@ -1658,8 +1705,9 @@ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
|
||||
*/
|
||||
if (sock->file == NULL)
|
||||
err = inet_release(sock);
|
||||
}
|
||||
|
||||
l2tp_tunnel_sock_put(sk);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
|
||||
|
@ -188,7 +188,8 @@ struct l2tp_tunnel {
|
||||
int (*recv_payload_hook)(struct sk_buff *skb);
|
||||
void (*old_sk_destruct)(struct sock *);
|
||||
struct sock *sock; /* Parent socket */
|
||||
int fd;
|
||||
int fd; /* Parent fd, if tunnel socket
|
||||
* was created by userspace */
|
||||
|
||||
uint8_t priv[0]; /* private data */
|
||||
};
|
||||
@ -228,6 +229,8 @@ out:
|
||||
return tunnel;
|
||||
}
|
||||
|
||||
extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
|
||||
extern void l2tp_tunnel_sock_put(struct sock *sk);
|
||||
extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
|
||||
extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
|
||||
extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
|
||||
|
@ -554,7 +554,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
|
||||
memset(opt, 0, sizeof(struct ipv6_txoptions));
|
||||
opt->tot_len = sizeof(struct ipv6_txoptions);
|
||||
|
||||
err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
|
||||
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
|
||||
&hlimit, &tclass, &dontfrag);
|
||||
if (err < 0) {
|
||||
fl6_sock_release(flowlabel);
|
||||
@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
||||
struct msghdr *msg, size_t len, int noblock,
|
||||
int flags, int *addr_len)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
|
||||
size_t copied = 0;
|
||||
int err = -EOPNOTSUPP;
|
||||
@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
||||
lsa->l2tp_scope_id = IP6CB(skb)->iif;
|
||||
}
|
||||
|
||||
if (inet->cmsg_flags)
|
||||
ip_cmsg_recv(msg, skb);
|
||||
if (np->rxopt.all)
|
||||
ip6_datagram_recv_ctl(sk, msg, skb);
|
||||
|
||||
if (flags & MSG_TRUNC)
|
||||
copied = skb->len;
|
||||
|
@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
struct l2tp_session *session;
|
||||
struct l2tp_tunnel *tunnel;
|
||||
struct pppol2tp_session *ps;
|
||||
int old_headroom;
|
||||
int new_headroom;
|
||||
int uhlen, headroom;
|
||||
|
||||
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
|
||||
@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
if (tunnel == NULL)
|
||||
goto abort_put_sess;
|
||||
|
||||
old_headroom = skb_headroom(skb);
|
||||
uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
|
||||
headroom = NET_SKB_PAD +
|
||||
sizeof(struct iphdr) + /* IP header */
|
||||
@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
if (skb_cow_head(skb, headroom))
|
||||
goto abort_put_sess_tun;
|
||||
|
||||
new_headroom = skb_headroom(skb);
|
||||
skb->truesize += new_headroom - old_headroom;
|
||||
|
||||
/* Setup PPP header */
|
||||
__skb_push(skb, sizeof(ppph));
|
||||
skb->data[0] = ppph[0];
|
||||
|
@ -35,10 +35,11 @@
|
||||
/* Must be called with rcu_read_lock. */
|
||||
static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(!vport)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
if (unlikely(!vport))
|
||||
goto error;
|
||||
|
||||
if (unlikely(skb_warn_if_lro(skb)))
|
||||
goto error;
|
||||
|
||||
/* Make our own copy of the packet. Otherwise we will mangle the
|
||||
* packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
|
||||
@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
|
||||
|
||||
skb_push(skb, ETH_HLEN);
|
||||
ovs_vport_receive(vport, skb);
|
||||
return;
|
||||
|
||||
error:
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* Called with rcu_read_lock and bottom-halves disabled. */
|
||||
@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (unlikely(skb_warn_if_lro(skb)))
|
||||
goto error;
|
||||
|
||||
skb->dev = netdev_vport->dev;
|
||||
len = skb->len;
|
||||
dev_queue_xmit(skb);
|
||||
|
@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
|
||||
|
||||
packet_flush_mclist(sk);
|
||||
|
||||
if (po->rx_ring.pg_vec) {
|
||||
memset(&req_u, 0, sizeof(req_u));
|
||||
|
||||
if (po->rx_ring.pg_vec)
|
||||
packet_set_ring(sk, &req_u, 1, 0);
|
||||
}
|
||||
|
||||
if (po->tx_ring.pg_vec)
|
||||
if (po->tx_ring.pg_vec) {
|
||||
memset(&req_u, 0, sizeof(req_u));
|
||||
packet_set_ring(sk, &req_u, 1, 1);
|
||||
}
|
||||
|
||||
fanout_release(sk);
|
||||
|
||||
|
@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
if (q->rate) {
|
||||
struct sk_buff_head *list = &sch->q;
|
||||
|
||||
delay += packet_len_2_sched_time(skb->len, q);
|
||||
|
||||
if (!skb_queue_empty(list)) {
|
||||
/*
|
||||
* Last packet in queue is reference point (now).
|
||||
* First packet in queue is already in flight,
|
||||
* calculate this time bonus and substract
|
||||
* Last packet in queue is reference point (now),
|
||||
* calculate this time bonus and subtract
|
||||
* from delay.
|
||||
*/
|
||||
delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
|
||||
delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
|
||||
delay = max_t(psched_tdiff_t, 0, delay);
|
||||
now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
|
||||
}
|
||||
|
||||
delay += packet_len_2_sched_time(skb->len, q);
|
||||
}
|
||||
|
||||
cb->time_to_send = now + delay;
|
||||
|
@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
|
||||
return;
|
||||
|
||||
if (atomic_dec_and_test(&key->refcnt)) {
|
||||
kfree(key);
|
||||
kzfree(key);
|
||||
SCTP_DBG_OBJCNT_DEC(keys);
|
||||
}
|
||||
}
|
||||
|
@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
|
||||
/* Final destructor for endpoint. */
|
||||
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
||||
{
|
||||
int i;
|
||||
|
||||
SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
|
||||
|
||||
/* Free up the HMAC transform. */
|
||||
@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
||||
sctp_inq_free(&ep->base.inqueue);
|
||||
sctp_bind_addr_free(&ep->base.bind_addr);
|
||||
|
||||
for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
|
||||
memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
|
||||
|
||||
/* Remove and free the port */
|
||||
if (sctp_sk(ep->base.sk)->bind_hash)
|
||||
sctp_put_port(ep->base.sk);
|
||||
|
@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
|
||||
|
||||
ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
|
||||
out:
|
||||
kfree(authkey);
|
||||
kzfree(authkey);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
|
||||
}
|
||||
|
||||
/*
|
||||
* See net/ipv6/datagram.c : datagram_recv_ctl
|
||||
* See net/ipv6/datagram.c : ip6_datagram_recv_ctl
|
||||
*/
|
||||
static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
|
||||
struct cmsghdr *cmh)
|
||||
|
@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
|
||||
&iwe, IW_EV_UINT_LEN);
|
||||
}
|
||||
|
||||
buf = kmalloc(30, GFP_ATOMIC);
|
||||
buf = kmalloc(31, GFP_ATOMIC);
|
||||
if (buf) {
|
||||
memset(&iwe, 0, sizeof(iwe));
|
||||
iwe.cmd = IWEVCUSTOM;
|
||||
|
Loading…
Reference in New Issue
Block a user