mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 08:34:20 +08:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/mac80211/mlme.c
This commit is contained in:
commit
b171e19ed0
@ -337,6 +337,11 @@ typedef struct scc_param {
|
||||
uint scc_tcrc; /* Internal */
|
||||
} sccp_t;
|
||||
|
||||
/* Function code bits.
|
||||
*/
|
||||
#define SCC_EB ((u_char) 0x10) /* Set big endian byte order */
|
||||
#define SCC_GBL ((u_char) 0x20) /* Snooping enabled */
|
||||
|
||||
/* CPM Ethernet through SCC1.
|
||||
*/
|
||||
typedef struct scc_enet {
|
||||
|
@ -822,14 +822,14 @@ config ULTRA32
|
||||
will be called smc-ultra32.
|
||||
|
||||
config BFIN_MAC
|
||||
tristate "Blackfin 527/536/537 on-chip mac support"
|
||||
depends on NET_ETHERNET && (BF527 || BF537 || BF536)
|
||||
tristate "Blackfin on-chip MAC support"
|
||||
depends on NET_ETHERNET && (BF526 || BF527 || BF536 || BF537)
|
||||
select CRC32
|
||||
select MII
|
||||
select PHYLIB
|
||||
select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
|
||||
help
|
||||
This is the driver for blackfin on-chip mac device. Say Y if you want it
|
||||
This is the driver for Blackfin on-chip mac device. Say Y if you want it
|
||||
compiled into the kernel. This driver is also available as a module
|
||||
( = code which can be inserted in and removed from the running kernel
|
||||
whenever you want). The module will be called bfin_mac.
|
||||
|
@ -2232,10 +2232,11 @@ static int atl1e_resume(struct pci_dev *pdev)
|
||||
|
||||
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
|
||||
|
||||
if (netif_running(netdev))
|
||||
if (netif_running(netdev)) {
|
||||
err = atl1e_request_irq(adapter);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
atl1e_reset_hw(&adapter->hw);
|
||||
|
||||
|
@ -3022,7 +3022,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
|
||||
netdev->features = NETIF_F_HW_CSUM;
|
||||
netdev->features |= NETIF_F_SG;
|
||||
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
|
||||
netdev->features |= NETIF_F_TSO;
|
||||
netdev->features |= NETIF_F_LLTX;
|
||||
|
||||
/*
|
||||
|
@ -271,7 +271,7 @@ struct bnx2x_fastpath {
|
||||
(fp->tx_pkt_prod != fp->tx_pkt_cons))
|
||||
|
||||
#define BNX2X_HAS_RX_WORK(fp) \
|
||||
(fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb))
|
||||
(fp->rx_comp_cons != rx_cons_sb)
|
||||
|
||||
#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
|
||||
|
||||
|
@ -59,8 +59,8 @@
|
||||
#include "bnx2x.h"
|
||||
#include "bnx2x_init.h"
|
||||
|
||||
#define DRV_MODULE_VERSION "1.45.17"
|
||||
#define DRV_MODULE_RELDATE "2008/08/13"
|
||||
#define DRV_MODULE_VERSION "1.45.20"
|
||||
#define DRV_MODULE_RELDATE "2008/08/25"
|
||||
#define BNX2X_BC_VER 0x040200
|
||||
|
||||
/* Time in jiffies before concluding the transmitter is hung */
|
||||
@ -1717,8 +1717,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
/* Try for 1 second every 5ms */
|
||||
for (cnt = 0; cnt < 200; cnt++) {
|
||||
/* Try for 5 second every 5ms */
|
||||
for (cnt = 0; cnt < 1000; cnt++) {
|
||||
/* Try to acquire the lock */
|
||||
REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
|
||||
lock_status = REG_RD(bp, hw_lock_control_reg);
|
||||
@ -2550,6 +2550,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
|
||||
BNX2X_ERR("SPIO5 hw attention\n");
|
||||
|
||||
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
|
||||
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
|
||||
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
|
||||
/* Fan failure attention */
|
||||
|
||||
@ -4605,6 +4606,17 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (bp->flags & TPA_ENABLE_FLAG) {
|
||||
struct tstorm_eth_tpa_exist tpa = {0};
|
||||
|
||||
tpa.tpa_exist = 1;
|
||||
|
||||
REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
|
||||
((u32 *)&tpa)[0]);
|
||||
REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
|
||||
((u32 *)&tpa)[1]);
|
||||
}
|
||||
|
||||
/* Zero this manually as its initialization is
|
||||
currently missing in the initTool */
|
||||
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
|
||||
@ -5337,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
|
||||
}
|
||||
|
||||
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
|
||||
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
|
||||
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
|
||||
/* Fan failure is indicated by SPIO 5 */
|
||||
bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
|
||||
@ -5363,17 +5376,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
|
||||
|
||||
enable_blocks_attention(bp);
|
||||
|
||||
if (bp->flags & TPA_ENABLE_FLAG) {
|
||||
struct tstorm_eth_tpa_exist tmp = {0};
|
||||
|
||||
tmp.tpa_exist = 1;
|
||||
|
||||
REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
|
||||
((u32 *)&tmp)[0]);
|
||||
REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
|
||||
((u32 *)&tmp)[1]);
|
||||
}
|
||||
|
||||
if (!BP_NOMCP(bp)) {
|
||||
bnx2x_acquire_phy_lock(bp);
|
||||
bnx2x_common_init_phy(bp, bp->common.shmem_base);
|
||||
@ -5531,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
|
||||
/* Port DMAE comes here */
|
||||
|
||||
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
|
||||
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
|
||||
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
|
||||
/* add SPIO 5 to group 0 */
|
||||
val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
|
||||
@ -6055,6 +6058,44 @@ static int bnx2x_req_irq(struct bnx2x *bp)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnx2x_napi_enable(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_queue(bp, i)
|
||||
napi_enable(&bnx2x_fp(bp, i, napi));
|
||||
}
|
||||
|
||||
static void bnx2x_napi_disable(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_queue(bp, i)
|
||||
napi_disable(&bnx2x_fp(bp, i, napi));
|
||||
}
|
||||
|
||||
static void bnx2x_netif_start(struct bnx2x *bp)
|
||||
{
|
||||
if (atomic_dec_and_test(&bp->intr_sem)) {
|
||||
if (netif_running(bp->dev)) {
|
||||
if (bp->state == BNX2X_STATE_OPEN)
|
||||
netif_wake_queue(bp->dev);
|
||||
bnx2x_napi_enable(bp);
|
||||
bnx2x_int_enable(bp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void bnx2x_netif_stop(struct bnx2x *bp)
|
||||
{
|
||||
bnx2x_int_disable_sync(bp);
|
||||
if (netif_running(bp->dev)) {
|
||||
bnx2x_napi_disable(bp);
|
||||
netif_tx_disable(bp->dev);
|
||||
bp->dev->trans_start = jiffies; /* prevent tx timeout */
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Init service functions
|
||||
*/
|
||||
@ -6338,7 +6379,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
rc = bnx2x_init_hw(bp, load_code);
|
||||
if (rc) {
|
||||
BNX2X_ERR("HW init failed, aborting\n");
|
||||
goto load_error;
|
||||
goto load_int_disable;
|
||||
}
|
||||
|
||||
/* Setup NIC internals and enable interrupts */
|
||||
@ -6350,7 +6391,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
if (!load_code) {
|
||||
BNX2X_ERR("MCP response failure, aborting\n");
|
||||
rc = -EBUSY;
|
||||
goto load_int_disable;
|
||||
goto load_rings_free;
|
||||
}
|
||||
}
|
||||
|
||||
@ -6360,8 +6401,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
|
||||
/* Enable Rx interrupt handling before sending the ramrod
|
||||
as it's completed on Rx FP queue */
|
||||
for_each_queue(bp, i)
|
||||
napi_enable(&bnx2x_fp(bp, i, napi));
|
||||
bnx2x_napi_enable(bp);
|
||||
|
||||
/* Enable interrupt handling */
|
||||
atomic_set(&bp->intr_sem, 0);
|
||||
@ -6369,7 +6409,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
rc = bnx2x_setup_leading(bp);
|
||||
if (rc) {
|
||||
BNX2X_ERR("Setup leading failed!\n");
|
||||
goto load_stop_netif;
|
||||
goto load_netif_stop;
|
||||
}
|
||||
|
||||
if (CHIP_IS_E1H(bp))
|
||||
@ -6382,7 +6422,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
for_each_nondefault_queue(bp, i) {
|
||||
rc = bnx2x_setup_multi(bp, i);
|
||||
if (rc)
|
||||
goto load_stop_netif;
|
||||
goto load_netif_stop;
|
||||
}
|
||||
|
||||
if (CHIP_IS_E1(bp))
|
||||
@ -6427,20 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
|
||||
return 0;
|
||||
|
||||
load_stop_netif:
|
||||
for_each_queue(bp, i)
|
||||
napi_disable(&bnx2x_fp(bp, i, napi));
|
||||
|
||||
load_int_disable:
|
||||
bnx2x_int_disable_sync(bp);
|
||||
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
|
||||
load_netif_stop:
|
||||
bnx2x_napi_disable(bp);
|
||||
load_rings_free:
|
||||
/* Free SKBs, SGEs, TPA pool and driver internals */
|
||||
bnx2x_free_skbs(bp);
|
||||
for_each_queue(bp, i)
|
||||
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
|
||||
load_int_disable:
|
||||
bnx2x_int_disable_sync(bp);
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
load_error:
|
||||
bnx2x_free_mem(bp);
|
||||
|
||||
@ -6455,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
|
||||
|
||||
/* halt the connection */
|
||||
bp->fp[index].state = BNX2X_FP_STATE_HALTING;
|
||||
bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
|
||||
bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
|
||||
|
||||
/* Wait for completion */
|
||||
rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
|
||||
@ -6613,11 +6650,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
|
||||
bp->rx_mode = BNX2X_RX_MODE_NONE;
|
||||
bnx2x_set_storm_rx_mode(bp);
|
||||
|
||||
if (netif_running(bp->dev)) {
|
||||
netif_tx_disable(bp->dev);
|
||||
bp->dev->trans_start = jiffies; /* prevent tx timeout */
|
||||
}
|
||||
|
||||
bnx2x_netif_stop(bp);
|
||||
if (!netif_running(bp->dev))
|
||||
bnx2x_napi_disable(bp);
|
||||
del_timer_sync(&bp->timer);
|
||||
SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
|
||||
(DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
|
||||
@ -6631,9 +6666,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
|
||||
smp_rmb();
|
||||
while (BNX2X_HAS_TX_WORK(fp)) {
|
||||
|
||||
if (!netif_running(bp->dev))
|
||||
bnx2x_tx_int(fp, 1000);
|
||||
|
||||
bnx2x_tx_int(fp, 1000);
|
||||
if (!cnt) {
|
||||
BNX2X_ERR("timeout waiting for queue[%d]\n",
|
||||
i);
|
||||
@ -6649,18 +6682,42 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
|
||||
smp_rmb();
|
||||
}
|
||||
}
|
||||
|
||||
/* Give HW time to discard old tx messages */
|
||||
msleep(1);
|
||||
|
||||
for_each_queue(bp, i)
|
||||
napi_disable(&bnx2x_fp(bp, i, napi));
|
||||
/* Disable interrupts after Tx and Rx are disabled on stack level */
|
||||
bnx2x_int_disable_sync(bp);
|
||||
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
|
||||
if (CHIP_IS_E1(bp)) {
|
||||
struct mac_configuration_cmd *config =
|
||||
bnx2x_sp(bp, mcast_config);
|
||||
|
||||
bnx2x_set_mac_addr_e1(bp, 0);
|
||||
|
||||
for (i = 0; i < config->hdr.length_6b; i++)
|
||||
CAM_INVALIDATE(config->config_table[i]);
|
||||
|
||||
config->hdr.length_6b = i;
|
||||
if (CHIP_REV_IS_SLOW(bp))
|
||||
config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
|
||||
else
|
||||
config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
|
||||
config->hdr.client_id = BP_CL_ID(bp);
|
||||
config->hdr.reserved1 = 0;
|
||||
|
||||
bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
|
||||
U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
|
||||
U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
|
||||
|
||||
} else { /* E1H */
|
||||
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
|
||||
|
||||
bnx2x_set_mac_addr_e1h(bp, 0);
|
||||
|
||||
for (i = 0; i < MC_HASH_SIZE; i++)
|
||||
REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
|
||||
}
|
||||
|
||||
if (unload_mode == UNLOAD_NORMAL)
|
||||
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
|
||||
|
||||
@ -6689,37 +6746,6 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
|
||||
} else
|
||||
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
|
||||
|
||||
if (CHIP_IS_E1(bp)) {
|
||||
struct mac_configuration_cmd *config =
|
||||
bnx2x_sp(bp, mcast_config);
|
||||
|
||||
bnx2x_set_mac_addr_e1(bp, 0);
|
||||
|
||||
for (i = 0; i < config->hdr.length_6b; i++)
|
||||
CAM_INVALIDATE(config->config_table[i]);
|
||||
|
||||
config->hdr.length_6b = i;
|
||||
if (CHIP_REV_IS_SLOW(bp))
|
||||
config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
|
||||
else
|
||||
config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
|
||||
config->hdr.client_id = BP_CL_ID(bp);
|
||||
config->hdr.reserved1 = 0;
|
||||
|
||||
bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
|
||||
U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
|
||||
U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
|
||||
|
||||
} else { /* E1H */
|
||||
bnx2x_set_mac_addr_e1h(bp, 0);
|
||||
|
||||
for (i = 0; i < MC_HASH_SIZE; i++)
|
||||
REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
|
||||
}
|
||||
|
||||
if (CHIP_IS_E1H(bp))
|
||||
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
|
||||
|
||||
/* Close multi and leading connections
|
||||
Completions for ramrods are collected in a synchronous way */
|
||||
for_each_nondefault_queue(bp, i)
|
||||
@ -6821,6 +6847,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
|
||||
*/
|
||||
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
|
||||
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
|
||||
if (val == 0x7)
|
||||
REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
|
||||
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
|
||||
|
||||
if (val == 0x7) {
|
||||
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
|
||||
/* save our func */
|
||||
@ -6898,7 +6928,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
|
||||
(SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
|
||||
DRV_MSG_SEQ_NUMBER_MASK);
|
||||
}
|
||||
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8617,34 +8646,6 @@ test_mem_exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnx2x_netif_start(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (atomic_dec_and_test(&bp->intr_sem)) {
|
||||
if (netif_running(bp->dev)) {
|
||||
bnx2x_int_enable(bp);
|
||||
for_each_queue(bp, i)
|
||||
napi_enable(&bnx2x_fp(bp, i, napi));
|
||||
if (bp->state == BNX2X_STATE_OPEN)
|
||||
netif_wake_queue(bp->dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void bnx2x_netif_stop(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (netif_running(bp->dev)) {
|
||||
netif_tx_disable(bp->dev);
|
||||
bp->dev->trans_start = jiffies; /* prevent tx timeout */
|
||||
for_each_queue(bp, i)
|
||||
napi_disable(&bnx2x_fp(bp, i, napi));
|
||||
}
|
||||
bnx2x_int_disable_sync(bp);
|
||||
}
|
||||
|
||||
static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
|
||||
{
|
||||
int cnt = 1000;
|
||||
@ -9250,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
|
||||
napi);
|
||||
struct bnx2x *bp = fp->bp;
|
||||
int work_done = 0;
|
||||
u16 rx_cons_sb;
|
||||
|
||||
#ifdef BNX2X_STOP_ON_ERROR
|
||||
if (unlikely(bp->panic))
|
||||
@ -9265,10 +9267,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
|
||||
if (BNX2X_HAS_TX_WORK(fp))
|
||||
bnx2x_tx_int(fp, budget);
|
||||
|
||||
rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
|
||||
if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
|
||||
rx_cons_sb++;
|
||||
if (BNX2X_HAS_RX_WORK(fp))
|
||||
work_done = bnx2x_rx_int(fp, budget);
|
||||
|
||||
rmb(); /* BNX2X_HAS_WORK() reads the status block */
|
||||
rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
|
||||
if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
|
||||
rx_cons_sb++;
|
||||
|
||||
/* must not complete if we consumed full budget */
|
||||
if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
|
||||
@ -9484,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
fp_index = (smp_processor_id() % bp->num_queues);
|
||||
fp = &bp->fp[fp_index];
|
||||
|
||||
if (unlikely(bnx2x_tx_avail(bp->fp) <
|
||||
(skb_shinfo(skb)->nr_frags + 3))) {
|
||||
if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
|
||||
bp->eth_stats.driver_xoff++,
|
||||
netif_stop_queue(dev);
|
||||
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
|
||||
@ -9548,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
tx_bd->vlan = cpu_to_le16(pkt_prod);
|
||||
|
||||
if (xmit_type) {
|
||||
|
||||
/* turn on parsing and get a BD */
|
||||
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
|
||||
pbd = (void *)&fp->tx_desc_ring[bd_prod];
|
||||
|
@ -1838,7 +1838,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
|
||||
if ((le16_to_cpu(rfd->command) & cb_el) &&
|
||||
(RU_RUNNING == nic->ru_running))
|
||||
|
||||
if (readb(&nic->csr->scb.status) & rus_no_res)
|
||||
if (ioread8(&nic->csr->scb.status) & rus_no_res)
|
||||
nic->ru_running = RU_SUSPENDED;
|
||||
return -ENODATA;
|
||||
}
|
||||
@ -1861,7 +1861,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
|
||||
if ((le16_to_cpu(rfd->command) & cb_el) &&
|
||||
(RU_RUNNING == nic->ru_running)) {
|
||||
|
||||
if (readb(&nic->csr->scb.status) & rus_no_res)
|
||||
if (ioread8(&nic->csr->scb.status) & rus_no_res)
|
||||
nic->ru_running = RU_SUSPENDED;
|
||||
}
|
||||
|
||||
|
@ -5522,7 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
||||
if (id->driver_data & DEV_HAS_CHECKSUM) {
|
||||
np->rx_csum = 1;
|
||||
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
|
||||
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
|
||||
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
|
||||
dev->features |= NETIF_F_TSO;
|
||||
}
|
||||
|
||||
@ -5835,7 +5835,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
||||
|
||||
dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
|
||||
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
|
||||
dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ?
|
||||
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
|
||||
"csum " : "",
|
||||
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
|
||||
"vlan " : "",
|
||||
|
@ -792,6 +792,10 @@ static int fs_enet_open(struct net_device *dev)
|
||||
int r;
|
||||
int err;
|
||||
|
||||
/* to initialize the fep->cur_rx,... */
|
||||
/* not doing this, will cause a crash in fs_enet_rx_napi */
|
||||
fs_init_bds(fep->ndev);
|
||||
|
||||
if (fep->fpi->use_napi)
|
||||
napi_enable(&fep->napi);
|
||||
|
||||
@ -1167,6 +1171,10 @@ static struct of_device_id fs_enet_match[] = {
|
||||
.compatible = "fsl,cpm1-scc-enet",
|
||||
.data = (void *)&fs_scc_ops,
|
||||
},
|
||||
{
|
||||
.compatible = "fsl,cpm2-scc-enet",
|
||||
.data = (void *)&fs_scc_ops,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_FS_ENET_HAS_FCC
|
||||
{
|
||||
|
@ -47,7 +47,6 @@
|
||||
#include "fs_enet.h"
|
||||
|
||||
/*************************************************/
|
||||
|
||||
#if defined(CONFIG_CPM1)
|
||||
/* for a 8xx __raw_xxx's are sufficient */
|
||||
#define __fs_out32(addr, x) __raw_writel(x, addr)
|
||||
@ -62,6 +61,8 @@
|
||||
#define __fs_out16(addr, x) out_be16(addr, x)
|
||||
#define __fs_in32(addr) in_be32(addr)
|
||||
#define __fs_in16(addr) in_be16(addr)
|
||||
#define __fs_out8(addr, x) out_8(addr, x)
|
||||
#define __fs_in8(addr) in_8(addr)
|
||||
#endif
|
||||
|
||||
/* write, read, set bits, clear bits */
|
||||
@ -262,8 +263,13 @@ static void restart(struct net_device *dev)
|
||||
|
||||
/* Initialize function code registers for big-endian.
|
||||
*/
|
||||
#ifndef CONFIG_NOT_COHERENT_CACHE
|
||||
W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
|
||||
W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
|
||||
#else
|
||||
W8(ep, sen_genscc.scc_rfcr, SCC_EB);
|
||||
W8(ep, sen_genscc.scc_tfcr, SCC_EB);
|
||||
#endif
|
||||
|
||||
/* Set maximum bytes per receive buffer.
|
||||
* This appears to be an Ethernet frame size, not the buffer
|
||||
|
@ -105,6 +105,7 @@ const char gfar_driver_version[] = "1.3";
|
||||
|
||||
static int gfar_enet_open(struct net_device *dev);
|
||||
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
static void gfar_reset_task(struct work_struct *work);
|
||||
static void gfar_timeout(struct net_device *dev);
|
||||
static int gfar_close(struct net_device *dev);
|
||||
struct sk_buff *gfar_new_skb(struct net_device *dev);
|
||||
@ -209,6 +210,7 @@ static int gfar_probe(struct platform_device *pdev)
|
||||
spin_lock_init(&priv->txlock);
|
||||
spin_lock_init(&priv->rxlock);
|
||||
spin_lock_init(&priv->bflock);
|
||||
INIT_WORK(&priv->reset_task, gfar_reset_task);
|
||||
|
||||
platform_set_drvdata(pdev, dev);
|
||||
|
||||
@ -1212,6 +1214,7 @@ static int gfar_close(struct net_device *dev)
|
||||
|
||||
napi_disable(&priv->napi);
|
||||
|
||||
cancel_work_sync(&priv->reset_task);
|
||||
stop_gfar(dev);
|
||||
|
||||
/* Disconnect from the PHY */
|
||||
@ -1326,13 +1329,16 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* gfar_timeout gets called when a packet has not been
|
||||
/* gfar_reset_task gets scheduled when a packet has not been
|
||||
* transmitted after a set amount of time.
|
||||
* For now, assume that clearing out all the structures, and
|
||||
* starting over will fix the problem. */
|
||||
static void gfar_timeout(struct net_device *dev)
|
||||
* starting over will fix the problem.
|
||||
*/
|
||||
static void gfar_reset_task(struct work_struct *work)
|
||||
{
|
||||
dev->stats.tx_errors++;
|
||||
struct gfar_private *priv = container_of(work, struct gfar_private,
|
||||
reset_task);
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
if (dev->flags & IFF_UP) {
|
||||
stop_gfar(dev);
|
||||
@ -1342,6 +1348,14 @@ static void gfar_timeout(struct net_device *dev)
|
||||
netif_tx_schedule_all(dev);
|
||||
}
|
||||
|
||||
static void gfar_timeout(struct net_device *dev)
|
||||
{
|
||||
struct gfar_private *priv = netdev_priv(dev);
|
||||
|
||||
dev->stats.tx_errors++;
|
||||
schedule_work(&priv->reset_task);
|
||||
}
|
||||
|
||||
/* Interrupt Handler for Transmit complete */
|
||||
static int gfar_clean_tx_ring(struct net_device *dev)
|
||||
{
|
||||
|
@ -756,6 +756,7 @@ struct gfar_private {
|
||||
|
||||
uint32_t msg_enable;
|
||||
|
||||
struct work_struct reset_task;
|
||||
/* Network Statistics */
|
||||
struct gfar_extra_stats extra_stats;
|
||||
};
|
||||
|
@ -663,9 +663,6 @@ static int emac_configure(struct emac_instance *dev)
|
||||
if (emac_phy_gpcs(dev->phy.mode))
|
||||
emac_mii_reset_phy(&dev->phy);
|
||||
|
||||
/* Required for Pause packet support in EMAC */
|
||||
dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1150,6 +1147,9 @@ static int emac_open(struct net_device *ndev)
|
||||
} else
|
||||
netif_carrier_on(dev->ndev);
|
||||
|
||||
/* Required for Pause packet support in EMAC */
|
||||
dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
|
||||
|
||||
emac_configure(dev);
|
||||
mal_poll_add(dev->mal, &dev->commac);
|
||||
mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
|
||||
|
@ -904,8 +904,6 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
unsigned long data_dma_addr;
|
||||
|
||||
desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
|
||||
data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
|
||||
skb->len, DMA_TO_DEVICE);
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
|
||||
@ -924,6 +922,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
buf[1] = 0;
|
||||
}
|
||||
|
||||
data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
|
||||
skb->len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
|
||||
if (!firmware_has_feature(FW_FEATURE_CMO))
|
||||
ibmveth_error_printk("tx: unable to map xmit buffer\n");
|
||||
@ -932,6 +932,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
desc.fields.address = adapter->bounce_buffer_dma;
|
||||
tx_map_failed++;
|
||||
used_bounce = 1;
|
||||
wmb();
|
||||
} else
|
||||
desc.fields.address = data_dma_addr;
|
||||
|
||||
|
@ -87,7 +87,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
||||
case E1000_DEV_ID_82576:
|
||||
case E1000_DEV_ID_82576_FIBER:
|
||||
case E1000_DEV_ID_82576_SERDES:
|
||||
case E1000_DEV_ID_82576_QUAD_COPPER:
|
||||
mac->type = e1000_82576;
|
||||
break;
|
||||
default:
|
||||
|
@ -41,7 +41,6 @@ struct e1000_hw;
|
||||
#define E1000_DEV_ID_82576 0x10C9
|
||||
#define E1000_DEV_ID_82576_FIBER 0x10E6
|
||||
#define E1000_DEV_ID_82576_SERDES 0x10E7
|
||||
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
|
||||
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
|
||||
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
|
||||
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
|
||||
|
@ -373,13 +373,17 @@ static void igb_get_regs(struct net_device *netdev,
|
||||
regs_buff[12] = rd32(E1000_EECD);
|
||||
|
||||
/* Interrupt */
|
||||
regs_buff[13] = rd32(E1000_EICR);
|
||||
/* Reading EICS for EICR because they read the
|
||||
* same but EICS does not clear on read */
|
||||
regs_buff[13] = rd32(E1000_EICS);
|
||||
regs_buff[14] = rd32(E1000_EICS);
|
||||
regs_buff[15] = rd32(E1000_EIMS);
|
||||
regs_buff[16] = rd32(E1000_EIMC);
|
||||
regs_buff[17] = rd32(E1000_EIAC);
|
||||
regs_buff[18] = rd32(E1000_EIAM);
|
||||
regs_buff[19] = rd32(E1000_ICR);
|
||||
/* Reading ICS for ICR because they read the
|
||||
* same but ICS does not clear on read */
|
||||
regs_buff[19] = rd32(E1000_ICS);
|
||||
regs_buff[20] = rd32(E1000_ICS);
|
||||
regs_buff[21] = rd32(E1000_IMS);
|
||||
regs_buff[22] = rd32(E1000_IMC);
|
||||
@ -1746,15 +1750,6 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
|
||||
/* return success for non excluded adapter ports */
|
||||
retval = 0;
|
||||
break;
|
||||
case E1000_DEV_ID_82576_QUAD_COPPER:
|
||||
/* quad port adapters only support WoL on port A */
|
||||
if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
|
||||
wol->supported = 0;
|
||||
break;
|
||||
}
|
||||
/* return success for non excluded adapter ports */
|
||||
retval = 0;
|
||||
break;
|
||||
default:
|
||||
/* dual port cards only support WoL on port A from now on
|
||||
* unless it was enabled in the eeprom for port B
|
||||
|
@ -61,7 +61,6 @@ static struct pci_device_id igb_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
|
||||
@ -521,7 +520,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
|
||||
adapter->msix_entries,
|
||||
numvecs);
|
||||
if (err == 0)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
igb_reset_interrupt_capability(adapter);
|
||||
|
||||
@ -531,7 +530,7 @@ msi_only:
|
||||
adapter->num_tx_queues = 1;
|
||||
if (!pci_enable_msi(adapter->pdev))
|
||||
adapter->flags |= IGB_FLAG_HAS_MSI;
|
||||
|
||||
out:
|
||||
/* Notify the stack of the (possibly) reduced Tx Queue count. */
|
||||
adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
|
||||
return;
|
||||
@ -1217,16 +1216,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
|
||||
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
|
||||
adapter->eeprom_wol = 0;
|
||||
break;
|
||||
case E1000_DEV_ID_82576_QUAD_COPPER:
|
||||
/* if quad port adapter, disable WoL on all but port A */
|
||||
if (global_quad_port_a != 0)
|
||||
adapter->eeprom_wol = 0;
|
||||
else
|
||||
adapter->flags |= IGB_FLAG_QUAD_PORT_A;
|
||||
/* Reset for multiple quad port adapters */
|
||||
if (++global_quad_port_a == 4)
|
||||
global_quad_port_a = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* initialize the wol settings based on the eeprom settings */
|
||||
@ -2290,7 +2279,9 @@ static void igb_watchdog_task(struct work_struct *work)
|
||||
struct igb_ring *tx_ring = adapter->tx_ring;
|
||||
struct e1000_mac_info *mac = &adapter->hw.mac;
|
||||
u32 link;
|
||||
u32 eics = 0;
|
||||
s32 ret_val;
|
||||
int i;
|
||||
|
||||
if ((netif_carrier_ok(netdev)) &&
|
||||
(rd32(E1000_STATUS) & E1000_STATUS_LU))
|
||||
@ -2392,7 +2383,13 @@ link_up:
|
||||
}
|
||||
|
||||
/* Cause software interrupt to ensure rx ring is cleaned */
|
||||
wr32(E1000_ICS, E1000_ICS_RXDMT0);
|
||||
if (adapter->msix_entries) {
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
eics |= adapter->rx_ring[i].eims_value;
|
||||
wr32(E1000_EICS, eics);
|
||||
} else {
|
||||
wr32(E1000_ICS, E1000_ICS_RXDMT0);
|
||||
}
|
||||
|
||||
/* Force detection of hung controller every watchdog period */
|
||||
tx_ring->detect_tx_hung = true;
|
||||
|
@ -1636,16 +1636,17 @@ static void ixgbe_set_multi(struct net_device *netdev)
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct dev_mc_list *mc_ptr;
|
||||
u8 *mta_list;
|
||||
u32 fctrl;
|
||||
u32 fctrl, vlnctrl;
|
||||
int i;
|
||||
|
||||
/* Check for Promiscuous and All Multicast modes */
|
||||
|
||||
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
|
||||
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
|
||||
|
||||
if (netdev->flags & IFF_PROMISC) {
|
||||
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
|
||||
fctrl &= ~IXGBE_VLNCTRL_VFE;
|
||||
vlnctrl &= ~IXGBE_VLNCTRL_VFE;
|
||||
} else {
|
||||
if (netdev->flags & IFF_ALLMULTI) {
|
||||
fctrl |= IXGBE_FCTRL_MPE;
|
||||
@ -1653,10 +1654,11 @@ static void ixgbe_set_multi(struct net_device *netdev)
|
||||
} else {
|
||||
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
|
||||
}
|
||||
fctrl |= IXGBE_VLNCTRL_VFE;
|
||||
vlnctrl |= IXGBE_VLNCTRL_VFE;
|
||||
}
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
|
||||
|
||||
if (netdev->mc_count) {
|
||||
mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
|
||||
|
@ -55,7 +55,7 @@
|
||||
#include <asm/system.h>
|
||||
|
||||
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
|
||||
static char mv643xx_eth_driver_version[] = "1.2";
|
||||
static char mv643xx_eth_driver_version[] = "1.3";
|
||||
|
||||
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
|
||||
#define MV643XX_ETH_NAPI
|
||||
@ -474,11 +474,19 @@ static void rxq_refill(struct rx_queue *rxq)
|
||||
/*
|
||||
* Reserve 2+14 bytes for an ethernet header (the
|
||||
* hardware automatically prepends 2 bytes of dummy
|
||||
* data to each received packet), 4 bytes for a VLAN
|
||||
* header, and 4 bytes for the trailing FCS -- 24
|
||||
* bytes total.
|
||||
* data to each received packet), 16 bytes for up to
|
||||
* four VLAN tags, and 4 bytes for the trailing FCS
|
||||
* -- 36 bytes total.
|
||||
*/
|
||||
skb_size = mp->dev->mtu + 24;
|
||||
skb_size = mp->dev->mtu + 36;
|
||||
|
||||
/*
|
||||
* Make sure that the skb size is a multiple of 8
|
||||
* bytes, as the lower three bits of the receive
|
||||
* descriptor's buffer size field are ignored by
|
||||
* the hardware.
|
||||
*/
|
||||
skb_size = (skb_size + 7) & ~7;
|
||||
|
||||
skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
|
||||
if (skb == NULL)
|
||||
@ -509,10 +517,8 @@ static void rxq_refill(struct rx_queue *rxq)
|
||||
skb_reserve(skb, 2);
|
||||
}
|
||||
|
||||
if (rxq->rx_desc_count != rxq->rx_ring_size) {
|
||||
rxq->rx_oom.expires = jiffies + (HZ / 10);
|
||||
add_timer(&rxq->rx_oom);
|
||||
}
|
||||
if (rxq->rx_desc_count != rxq->rx_ring_size)
|
||||
mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
|
||||
|
||||
spin_unlock_irqrestore(&mp->lock, flags);
|
||||
}
|
||||
@ -529,7 +535,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
|
||||
int rx;
|
||||
|
||||
rx = 0;
|
||||
while (rx < budget) {
|
||||
while (rx < budget && rxq->rx_desc_count) {
|
||||
struct rx_desc *rx_desc;
|
||||
unsigned int cmd_sts;
|
||||
struct sk_buff *skb;
|
||||
@ -554,7 +560,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
|
||||
spin_unlock_irqrestore(&mp->lock, flags);
|
||||
|
||||
dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
|
||||
mp->dev->mtu + 24, DMA_FROM_DEVICE);
|
||||
rx_desc->buf_size, DMA_FROM_DEVICE);
|
||||
rxq->rx_desc_count--;
|
||||
rx++;
|
||||
|
||||
@ -636,9 +642,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
|
||||
txq_reclaim(mp->txq + i, 0);
|
||||
|
||||
if (netif_carrier_ok(mp->dev)) {
|
||||
spin_lock(&mp->lock);
|
||||
spin_lock_irq(&mp->lock);
|
||||
__txq_maybe_wake(mp->txq + mp->txq_primary);
|
||||
spin_unlock(&mp->lock);
|
||||
spin_unlock_irq(&mp->lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -650,8 +656,6 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
if (rx < budget) {
|
||||
netif_rx_complete(mp->dev, napi);
|
||||
wrl(mp, INT_CAUSE(mp->port_num), 0);
|
||||
wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
|
||||
wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
|
||||
}
|
||||
|
||||
@ -1796,6 +1800,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
|
||||
*/
|
||||
#ifdef MV643XX_ETH_NAPI
|
||||
if (int_cause & INT_RX) {
|
||||
wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
|
||||
wrl(mp, INT_MASK(mp->port_num), 0x00000000);
|
||||
rdl(mp, INT_MASK(mp->port_num));
|
||||
|
||||
|
@ -76,7 +76,7 @@
|
||||
#include "myri10ge_mcp.h"
|
||||
#include "myri10ge_mcp_gen_header.h"
|
||||
|
||||
#define MYRI10GE_VERSION_STR "1.3.99-1.347"
|
||||
#define MYRI10GE_VERSION_STR "1.4.3-1.358"
|
||||
|
||||
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
|
||||
MODULE_AUTHOR("Maintainer: help@myri.com");
|
||||
|
@ -2792,7 +2792,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
|
||||
pkt_size, PCI_DMA_FROMDEVICE);
|
||||
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
|
||||
} else {
|
||||
pci_unmap_single(pdev, addr, pkt_size,
|
||||
pci_unmap_single(pdev, addr, tp->rx_buf_sz,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
tp->Rx_skbuff[entry] = NULL;
|
||||
}
|
||||
|
@ -510,7 +510,7 @@ static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
|
||||
chg->path.para.p_type = SMT_P320B ;
|
||||
chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
|
||||
chg->path.mib_index = SBAPATHINDEX ;
|
||||
chg->path.path_pad = (u_short)NULL ;
|
||||
chg->path.path_pad = 0;
|
||||
chg->path.path_index = PRIMARY_RING ;
|
||||
|
||||
/* set P320F */
|
||||
@ -606,7 +606,7 @@ static void ess_send_alc_req(struct s_smc *smc)
|
||||
req->path.para.p_type = SMT_P320B ;
|
||||
req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
|
||||
req->path.mib_index = SBAPATHINDEX ;
|
||||
req->path.path_pad = (u_short)NULL ;
|
||||
req->path.path_pad = 0;
|
||||
req->path.path_index = PRIMARY_RING ;
|
||||
|
||||
/* set P0017 */
|
||||
@ -636,7 +636,7 @@ static void ess_send_alc_req(struct s_smc *smc)
|
||||
/* set P19 */
|
||||
req->a_addr.para.p_type = SMT_P0019 ;
|
||||
req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ;
|
||||
req->a_addr.sba_pad = (u_short)NULL ;
|
||||
req->a_addr.sba_pad = 0;
|
||||
req->a_addr.alloc_addr = null_addr ;
|
||||
|
||||
/* set P1A */
|
||||
|
@ -2255,7 +2255,7 @@ static int smc_drv_remove(struct platform_device *pdev)
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
|
||||
if (!res)
|
||||
platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
release_mem_region(res->start, SMC_IO_EXTENT);
|
||||
|
||||
free_netdev(ndev);
|
||||
|
@ -397,7 +397,7 @@ static const struct usb_device_id hso_ids[] = {
|
||||
{default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */
|
||||
{icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */
|
||||
{icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */
|
||||
{default_port_device(0x0af0, 0xd033)}, /* Icon-322 */
|
||||
{icon321_port_device(0x0af0, 0xd033)}, /* Icon-322 */
|
||||
{USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */
|
||||
{USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */
|
||||
{USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */
|
||||
@ -2613,6 +2613,7 @@ static int hso_resume(struct usb_interface *iface)
|
||||
"Transmitting lingering data\n");
|
||||
hso_net_start_xmit(hso_net->skb_tx_buf,
|
||||
hso_net->net);
|
||||
hso_net->skb_tx_buf = NULL;
|
||||
}
|
||||
result = hso_start_net_device(network_table[i]);
|
||||
if (result)
|
||||
|
@ -46,6 +46,10 @@
|
||||
|
||||
#define MCS7830_VENDOR_ID 0x9710
|
||||
#define MCS7830_PRODUCT_ID 0x7830
|
||||
#define MCS7730_PRODUCT_ID 0x7730
|
||||
|
||||
#define SITECOM_VENDOR_ID 0x0DF6
|
||||
#define LN_030_PRODUCT_ID 0x0021
|
||||
|
||||
#define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \
|
||||
ADVERTISE_100HALF | ADVERTISE_10FULL | \
|
||||
@ -442,6 +446,29 @@ static struct ethtool_ops mcs7830_ethtool_ops = {
|
||||
.nway_reset = usbnet_nway_reset,
|
||||
};
|
||||
|
||||
static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
|
||||
{
|
||||
int ret;
|
||||
struct usbnet *dev = netdev_priv(netdev);
|
||||
struct sockaddr *addr = p;
|
||||
|
||||
if (netif_running(netdev))
|
||||
return -EBUSY;
|
||||
|
||||
if (!is_valid_ether_addr(addr->sa_data))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
|
||||
|
||||
ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
|
||||
netdev->dev_addr);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
|
||||
{
|
||||
struct net_device *net = dev->net;
|
||||
@ -455,6 +482,7 @@ static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
|
||||
net->ethtool_ops = &mcs7830_ethtool_ops;
|
||||
net->set_multicast_list = mcs7830_set_multicast;
|
||||
mcs7830_set_multicast(net);
|
||||
net->set_mac_address = mcs7830_set_mac_address;
|
||||
|
||||
/* reserve space for the status byte on rx */
|
||||
dev->rx_urb_size = ETH_FRAME_LEN + 1;
|
||||
@ -491,7 +519,16 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
static const struct driver_info moschip_info = {
|
||||
.description = "MOSCHIP 7830 usb-NET adapter",
|
||||
.description = "MOSCHIP 7830/7730 usb-NET adapter",
|
||||
.bind = mcs7830_bind,
|
||||
.rx_fixup = mcs7830_rx_fixup,
|
||||
.flags = FLAG_ETHER,
|
||||
.in = 1,
|
||||
.out = 2,
|
||||
};
|
||||
|
||||
static const struct driver_info sitecom_info = {
|
||||
.description = "Sitecom LN-30 usb-NET adapter",
|
||||
.bind = mcs7830_bind,
|
||||
.rx_fixup = mcs7830_rx_fixup,
|
||||
.flags = FLAG_ETHER,
|
||||
@ -504,6 +541,14 @@ static const struct usb_device_id products[] = {
|
||||
USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
|
||||
.driver_info = (unsigned long) &moschip_info,
|
||||
},
|
||||
{
|
||||
USB_DEVICE(MCS7830_VENDOR_ID, MCS7730_PRODUCT_ID),
|
||||
.driver_info = (unsigned long) &moschip_info,
|
||||
},
|
||||
{
|
||||
USB_DEVICE(SITECOM_VENDOR_ID, LN_030_PRODUCT_ID),
|
||||
.driver_info = (unsigned long) &sitecom_info,
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, products);
|
||||
|
@ -1317,7 +1317,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
|
||||
break;
|
||||
|
||||
case SIOCDEVRESINSTATS :
|
||||
if( current->euid != 0 ) /* root only */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
|
||||
break;
|
||||
@ -1334,7 +1334,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
|
||||
break;
|
||||
|
||||
case SIOCDEVSHWSTATE :
|
||||
if( current->euid != 0 ) /* root only */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
spin_lock( &nl->lock );
|
||||
@ -1355,7 +1355,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
|
||||
#ifdef CONFIG_SBNI_MULTILINE
|
||||
|
||||
case SIOCDEVENSLAVE :
|
||||
if( current->euid != 0 ) /* root only */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
|
||||
@ -1370,7 +1370,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
|
||||
return enslave( dev, slave_dev );
|
||||
|
||||
case SIOCDEVEMANSIPATE :
|
||||
if( current->euid != 0 ) /* root only */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
return emancipate( dev );
|
||||
|
@ -337,7 +337,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
dev->poll_controller = ei_poll;
|
||||
#endif
|
||||
NS8390p_init(dev, 0);
|
||||
NS8390_init(dev, 0);
|
||||
|
||||
#if 1
|
||||
/* Enable interrupt generation on softconfig cards -- M.U */
|
||||
|
@ -290,7 +290,7 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
|
||||
return;
|
||||
pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
|
||||
PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb(bf->skb);
|
||||
dev_kfree_skb_any(bf->skb);
|
||||
bf->skb = NULL;
|
||||
}
|
||||
|
||||
@ -505,6 +505,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
|
||||
mutex_init(&sc->lock);
|
||||
spin_lock_init(&sc->rxbuflock);
|
||||
spin_lock_init(&sc->txbuflock);
|
||||
spin_lock_init(&sc->block);
|
||||
|
||||
/* Set private data */
|
||||
pci_set_drvdata(pdev, hw);
|
||||
@ -2123,8 +2124,11 @@ ath5k_beacon_config(struct ath5k_softc *sc)
|
||||
|
||||
sc->imask |= AR5K_INT_SWBA;
|
||||
|
||||
if (ath5k_hw_hasveol(ah))
|
||||
if (ath5k_hw_hasveol(ah)) {
|
||||
spin_lock(&sc->block);
|
||||
ath5k_beacon_send(sc);
|
||||
spin_unlock(&sc->block);
|
||||
}
|
||||
}
|
||||
/* TODO else AP */
|
||||
|
||||
@ -2324,7 +2328,9 @@ ath5k_intr(int irq, void *dev_id)
|
||||
TSF_TO_TU(tsf),
|
||||
(unsigned long long) tsf);
|
||||
} else {
|
||||
spin_lock(&sc->block);
|
||||
ath5k_beacon_send(sc);
|
||||
spin_unlock(&sc->block);
|
||||
}
|
||||
}
|
||||
if (status & AR5K_INT_RXEOL) {
|
||||
@ -2685,6 +2691,11 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
|
||||
ret = -EOPNOTSUPP;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Set to a reasonable value. Note that this will
|
||||
* be set to mac80211's value at ath5k_config(). */
|
||||
sc->bintval = 1000;
|
||||
|
||||
ret = 0;
|
||||
end:
|
||||
mutex_unlock(&sc->lock);
|
||||
@ -2729,9 +2740,6 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
int ret;
|
||||
|
||||
/* Set to a reasonable value. Note that this will
|
||||
* be set to mac80211's value at ath5k_config(). */
|
||||
sc->bintval = 1000;
|
||||
mutex_lock(&sc->lock);
|
||||
if (sc->vif != vif) {
|
||||
ret = -EIO;
|
||||
@ -2991,6 +2999,7 @@ static int
|
||||
ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ath5k_debug_dump_skb(sc, skb, "BC ", 1);
|
||||
@ -3000,12 +3009,14 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
goto end;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&sc->block, flags);
|
||||
ath5k_txbuf_free(sc, sc->bbuf);
|
||||
sc->bbuf->skb = skb;
|
||||
ret = ath5k_beacon_setup(sc, sc->bbuf);
|
||||
if (ret)
|
||||
sc->bbuf->skb = NULL;
|
||||
else {
|
||||
spin_unlock_irqrestore(&sc->block, flags);
|
||||
if (!ret) {
|
||||
ath5k_beacon_config(sc);
|
||||
mmiowb();
|
||||
}
|
||||
|
@ -168,6 +168,7 @@ struct ath5k_softc {
|
||||
struct tasklet_struct txtq; /* tx intr tasklet */
|
||||
struct ath5k_led tx_led; /* tx led */
|
||||
|
||||
spinlock_t block; /* protects beacon */
|
||||
struct ath5k_buf *bbuf; /* beacon buffer */
|
||||
unsigned int bhalq, /* SW q for outgoing beacons */
|
||||
bmisscount, /* missed beacon transmits */
|
||||
|
@ -1304,7 +1304,7 @@ EXPORT_SYMBOL(atmel_open);
|
||||
int atmel_open(struct net_device *dev)
|
||||
{
|
||||
struct atmel_private *priv = netdev_priv(dev);
|
||||
int i, channel;
|
||||
int i, channel, err;
|
||||
|
||||
/* any scheduled timer is no longer needed and might screw things up.. */
|
||||
del_timer_sync(&priv->management_timer);
|
||||
@ -1328,8 +1328,9 @@ int atmel_open(struct net_device *dev)
|
||||
priv->site_survey_state = SITE_SURVEY_IDLE;
|
||||
priv->station_is_associated = 0;
|
||||
|
||||
if (!reset_atmel_card(dev))
|
||||
return -EAGAIN;
|
||||
err = reset_atmel_card(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (priv->config_reg_domain) {
|
||||
priv->reg_domain = priv->config_reg_domain;
|
||||
@ -3061,12 +3062,20 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
|
||||
}
|
||||
|
||||
if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
|
||||
/* Do opensystem first, then try sharedkey */
|
||||
/* Flip back and forth between WEP auth modes until the max
|
||||
* authentication tries has been exceeded.
|
||||
*/
|
||||
if (system == WLAN_AUTH_OPEN) {
|
||||
priv->CurrentAuthentTransactionSeqNum = 0x001;
|
||||
priv->exclude_unencrypted = 1;
|
||||
send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0);
|
||||
return;
|
||||
} else if ( system == WLAN_AUTH_SHARED_KEY
|
||||
&& priv->wep_is_on) {
|
||||
priv->CurrentAuthentTransactionSeqNum = 0x001;
|
||||
priv->exclude_unencrypted = 0;
|
||||
send_authentication_request(priv, WLAN_AUTH_OPEN, NULL, 0);
|
||||
return;
|
||||
} else if (priv->connect_to_any_BSS) {
|
||||
int bss_index;
|
||||
|
||||
@ -3580,12 +3589,12 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
|
||||
|
||||
if (i == 0) {
|
||||
printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name);
|
||||
return 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) {
|
||||
printk(KERN_ALERT "%s: card missing.\n", priv->dev->name);
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* now check for completion of MAC initialization through
|
||||
@ -3609,19 +3618,19 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
|
||||
if (i == 0) {
|
||||
printk(KERN_ALERT "%s: MAC failed to initialise.\n",
|
||||
priv->dev->name);
|
||||
return 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */
|
||||
if ((mr3 & MAC_INIT_COMPLETE) &&
|
||||
!(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) {
|
||||
printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name);
|
||||
return 0;
|
||||
return -EIO;
|
||||
}
|
||||
if ((mr1 & MAC_INIT_COMPLETE) &&
|
||||
!(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) {
|
||||
printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name);
|
||||
return 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
atmel_copy_to_host(priv->dev, (unsigned char *)iface,
|
||||
@ -3642,7 +3651,7 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
|
||||
iface->func_ctrl = le16_to_cpu(iface->func_ctrl);
|
||||
iface->mac_status = le16_to_cpu(iface->mac_status);
|
||||
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* determine type of memory and MAC address */
|
||||
@ -3693,7 +3702,7 @@ static int probe_atmel_card(struct net_device *dev)
|
||||
/* Standard firmware in flash, boot it up and ask
|
||||
for the Mac Address */
|
||||
priv->card_type = CARD_TYPE_SPI_FLASH;
|
||||
if (atmel_wakeup_firmware(priv)) {
|
||||
if (atmel_wakeup_firmware(priv) == 0) {
|
||||
atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6);
|
||||
|
||||
/* got address, now squash it again until the network
|
||||
@ -3835,6 +3844,7 @@ static int reset_atmel_card(struct net_device *dev)
|
||||
struct atmel_private *priv = netdev_priv(dev);
|
||||
u8 configuration;
|
||||
int old_state = priv->station_state;
|
||||
int err = 0;
|
||||
|
||||
/* data to add to the firmware names, in priority order
|
||||
this implemenents firmware versioning */
|
||||
@ -3868,11 +3878,12 @@ static int reset_atmel_card(struct net_device *dev)
|
||||
dev->name);
|
||||
strcpy(priv->firmware_id, "atmel_at76c502.bin");
|
||||
}
|
||||
if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) {
|
||||
err = request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev);
|
||||
if (err != 0) {
|
||||
printk(KERN_ALERT
|
||||
"%s: firmware %s is missing, cannot continue.\n",
|
||||
dev->name, priv->firmware_id);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
int fw_index = 0;
|
||||
@ -3901,7 +3912,7 @@ static int reset_atmel_card(struct net_device *dev)
|
||||
"%s: firmware %s is missing, cannot start.\n",
|
||||
dev->name, priv->firmware_id);
|
||||
priv->firmware_id[0] = '\0';
|
||||
return 0;
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3926,8 +3937,9 @@ static int reset_atmel_card(struct net_device *dev)
|
||||
release_firmware(fw_entry);
|
||||
}
|
||||
|
||||
if (!atmel_wakeup_firmware(priv))
|
||||
return 0;
|
||||
err = atmel_wakeup_firmware(priv);
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
||||
/* Check the version and set the correct flag for wpa stuff,
|
||||
old and new firmware is incompatible.
|
||||
@ -3968,10 +3980,9 @@ static int reset_atmel_card(struct net_device *dev)
|
||||
if (!priv->radio_on_broken) {
|
||||
if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) ==
|
||||
CMD_STATUS_REJECTED_RADIO_OFF) {
|
||||
printk(KERN_INFO
|
||||
"%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n",
|
||||
printk(KERN_INFO "%s: cannot turn the radio on.\n",
|
||||
dev->name);
|
||||
return 0;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4006,7 +4017,7 @@ static int reset_atmel_card(struct net_device *dev)
|
||||
wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
|
||||
}
|
||||
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void atmel_send_command(struct atmel_private *priv, int command,
|
||||
|
@ -133,14 +133,14 @@ claw_register_debug_facility(void)
|
||||
static inline void
|
||||
claw_set_busy(struct net_device *dev)
|
||||
{
|
||||
((struct claw_privbk *) dev->priv)->tbusy=1;
|
||||
((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
|
||||
eieio();
|
||||
}
|
||||
|
||||
static inline void
|
||||
claw_clear_busy(struct net_device *dev)
|
||||
{
|
||||
clear_bit(0, &(((struct claw_privbk *) dev->priv)->tbusy));
|
||||
clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
|
||||
netif_wake_queue(dev);
|
||||
eieio();
|
||||
}
|
||||
@ -149,20 +149,20 @@ static inline int
|
||||
claw_check_busy(struct net_device *dev)
|
||||
{
|
||||
eieio();
|
||||
return ((struct claw_privbk *) dev->priv)->tbusy;
|
||||
return ((struct claw_privbk *) dev->ml_priv)->tbusy;
|
||||
}
|
||||
|
||||
static inline void
|
||||
claw_setbit_busy(int nr,struct net_device *dev)
|
||||
{
|
||||
netif_stop_queue(dev);
|
||||
set_bit(nr, (void *)&(((struct claw_privbk *)dev->priv)->tbusy));
|
||||
set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
|
||||
}
|
||||
|
||||
static inline void
|
||||
claw_clearbit_busy(int nr,struct net_device *dev)
|
||||
{
|
||||
clear_bit(nr,(void *)&(((struct claw_privbk *)dev->priv)->tbusy));
|
||||
clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
@ -171,7 +171,7 @@ claw_test_and_setbit_busy(int nr,struct net_device *dev)
|
||||
{
|
||||
netif_stop_queue(dev);
|
||||
return test_and_set_bit(nr,
|
||||
(void *)&(((struct claw_privbk *) dev->priv)->tbusy));
|
||||
(void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
|
||||
}
|
||||
|
||||
|
||||
@ -271,6 +271,7 @@ claw_probe(struct ccwgroup_device *cgdev)
|
||||
if (!get_device(&cgdev->dev))
|
||||
return -ENODEV;
|
||||
privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
|
||||
cgdev->dev.driver_data = privptr;
|
||||
if (privptr == NULL) {
|
||||
probe_error(cgdev);
|
||||
put_device(&cgdev->dev);
|
||||
@ -305,7 +306,6 @@ claw_probe(struct ccwgroup_device *cgdev)
|
||||
privptr->p_env->p_priv = privptr;
|
||||
cgdev->cdev[0]->handler = claw_irq_handler;
|
||||
cgdev->cdev[1]->handler = claw_irq_handler;
|
||||
cgdev->dev.driver_data = privptr;
|
||||
CLAW_DBF_TEXT(2, setup, "prbext 0");
|
||||
|
||||
return 0;
|
||||
@ -319,7 +319,7 @@ static int
|
||||
claw_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int rc;
|
||||
struct claw_privbk *privptr=dev->priv;
|
||||
struct claw_privbk *privptr = dev->ml_priv;
|
||||
unsigned long saveflags;
|
||||
struct chbk *p_ch;
|
||||
|
||||
@ -404,7 +404,7 @@ claw_pack_skb(struct claw_privbk *privptr)
|
||||
static int
|
||||
claw_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct claw_privbk *privptr=dev->priv;
|
||||
struct claw_privbk *privptr = dev->ml_priv;
|
||||
int buff_size;
|
||||
CLAW_DBF_TEXT(4, trace, "setmtu");
|
||||
buff_size = privptr->p_env->write_size;
|
||||
@ -434,7 +434,7 @@ claw_open(struct net_device *dev)
|
||||
struct ccwbk *p_buf;
|
||||
|
||||
CLAW_DBF_TEXT(4, trace, "open");
|
||||
privptr = (struct claw_privbk *)dev->priv;
|
||||
privptr = (struct claw_privbk *)dev->ml_priv;
|
||||
/* allocate and initialize CCW blocks */
|
||||
if (privptr->buffs_alloc == 0) {
|
||||
rc=init_ccw_bk(dev);
|
||||
@ -780,7 +780,7 @@ claw_irq_tasklet ( unsigned long data )
|
||||
p_ch = (struct chbk *) data;
|
||||
dev = (struct net_device *)p_ch->ndev;
|
||||
CLAW_DBF_TEXT(4, trace, "IRQtask");
|
||||
privptr = (struct claw_privbk *) dev->priv;
|
||||
privptr = (struct claw_privbk *)dev->ml_priv;
|
||||
unpack_read(dev);
|
||||
clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
|
||||
CLAW_DBF_TEXT(4, trace, "TskletXt");
|
||||
@ -805,7 +805,7 @@ claw_release(struct net_device *dev)
|
||||
|
||||
if (!dev)
|
||||
return 0;
|
||||
privptr = (struct claw_privbk *) dev->priv;
|
||||
privptr = (struct claw_privbk *)dev->ml_priv;
|
||||
if (!privptr)
|
||||
return 0;
|
||||
CLAW_DBF_TEXT(4, trace, "release");
|
||||
@ -960,7 +960,7 @@ claw_write_next ( struct chbk * p_ch )
|
||||
if (p_ch->claw_state == CLAW_STOP)
|
||||
return;
|
||||
dev = (struct net_device *) p_ch->ndev;
|
||||
privptr = (struct claw_privbk *) dev->priv;
|
||||
privptr = (struct claw_privbk *) dev->ml_priv;
|
||||
claw_free_wrt_buf( dev );
|
||||
if ((privptr->write_free_count > 0) &&
|
||||
!skb_queue_empty(&p_ch->collect_queue)) {
|
||||
@ -1042,7 +1042,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
|
||||
struct ccw1 temp_ccw;
|
||||
struct endccw * p_end;
|
||||
CLAW_DBF_TEXT(4, trace, "addreads");
|
||||
privptr = dev->priv;
|
||||
privptr = dev->ml_priv;
|
||||
p_end = privptr->p_end_ccw;
|
||||
|
||||
/* first CCW and last CCW contains a new set of read channel programs
|
||||
@ -1212,7 +1212,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name )
|
||||
int rc=0;
|
||||
|
||||
CLAW_DBF_TEXT(2, setup, "findlink");
|
||||
privptr=dev->priv;
|
||||
privptr = dev->ml_priv;
|
||||
p_env=privptr->p_env;
|
||||
switch (p_env->packing)
|
||||
{
|
||||
@ -1264,7 +1264,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
|
||||
struct chbk *ch;
|
||||
|
||||
CLAW_DBF_TEXT(4, trace, "hw_tx");
|
||||
privptr = (struct claw_privbk *) (dev->priv);
|
||||
privptr = (struct claw_privbk *)(dev->ml_priv);
|
||||
p_ch=(struct chbk *)&privptr->channel[WRITE];
|
||||
p_env =privptr->p_env;
|
||||
claw_free_wrt_buf(dev); /* Clean up free chain if posible */
|
||||
@ -1483,8 +1483,8 @@ init_ccw_bk(struct net_device *dev)
|
||||
struct ccwbk*p_last_CCWB;
|
||||
struct ccwbk*p_first_CCWB;
|
||||
struct endccw *p_endccw=NULL;
|
||||
addr_t real_address;
|
||||
struct claw_privbk *privptr=dev->priv;
|
||||
addr_t real_address;
|
||||
struct claw_privbk *privptr = dev->ml_priv;
|
||||
struct clawh *pClawH=NULL;
|
||||
addr_t real_TIC_address;
|
||||
int i,j;
|
||||
@ -1960,19 +1960,16 @@ init_ccw_bk(struct net_device *dev)
|
||||
static void
|
||||
probe_error( struct ccwgroup_device *cgdev)
|
||||
{
|
||||
struct claw_privbk *privptr;
|
||||
struct claw_privbk *privptr;
|
||||
|
||||
CLAW_DBF_TEXT(4, trace, "proberr");
|
||||
privptr=(struct claw_privbk *)cgdev->dev.driver_data;
|
||||
if (privptr!=NULL) {
|
||||
privptr = (struct claw_privbk *) cgdev->dev.driver_data;
|
||||
if (privptr != NULL) {
|
||||
cgdev->dev.driver_data = NULL;
|
||||
kfree(privptr->p_env);
|
||||
privptr->p_env=NULL;
|
||||
kfree(privptr->p_mtc_envelope);
|
||||
privptr->p_mtc_envelope=NULL;
|
||||
kfree(privptr);
|
||||
privptr=NULL;
|
||||
}
|
||||
return;
|
||||
kfree(privptr->p_mtc_envelope);
|
||||
kfree(privptr);
|
||||
}
|
||||
} /* probe_error */
|
||||
|
||||
/*-------------------------------------------------------------------*
|
||||
@ -2000,7 +1997,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
|
||||
CLAW_DBF_TEXT(2, setup, "clw_cntl");
|
||||
udelay(1000); /* Wait a ms for the control packets to
|
||||
*catch up to each other */
|
||||
privptr=dev->priv;
|
||||
privptr = dev->ml_priv;
|
||||
p_env=privptr->p_env;
|
||||
tdev = &privptr->channel[READ].cdev->dev;
|
||||
memcpy( &temp_host_name, p_env->host_name, 8);
|
||||
@ -2278,7 +2275,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
|
||||
struct sk_buff *skb;
|
||||
|
||||
CLAW_DBF_TEXT(2, setup, "sndcntl");
|
||||
privptr=dev->priv;
|
||||
privptr = dev->ml_priv;
|
||||
p_ctl=(struct clawctl *)&privptr->ctl_bk;
|
||||
|
||||
p_ctl->command=type;
|
||||
@ -2348,7 +2345,7 @@ static int
|
||||
claw_snd_conn_req(struct net_device *dev, __u8 link)
|
||||
{
|
||||
int rc;
|
||||
struct claw_privbk *privptr=dev->priv;
|
||||
struct claw_privbk *privptr = dev->ml_priv;
|
||||
struct clawctl *p_ctl;
|
||||
|
||||
CLAW_DBF_TEXT(2, setup, "snd_conn");
|
||||
@ -2408,7 +2405,7 @@ claw_snd_sys_validate_rsp(struct net_device *dev,
|
||||
int rc;
|
||||
|
||||
CLAW_DBF_TEXT(2, setup, "chkresp");
|
||||
privptr = dev->priv;
|
||||
privptr = dev->ml_priv;
|
||||
p_env=privptr->p_env;
|
||||
rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
|
||||
p_ctl->linkid,
|
||||
@ -2446,7 +2443,7 @@ net_device_stats *claw_stats(struct net_device *dev)
|
||||
struct claw_privbk *privptr;
|
||||
|
||||
CLAW_DBF_TEXT(4, trace, "stats");
|
||||
privptr = dev->priv;
|
||||
privptr = dev->ml_priv;
|
||||
return &privptr->stats;
|
||||
} /* end of claw_stats */
|
||||
|
||||
@ -2482,7 +2479,7 @@ unpack_read(struct net_device *dev )
|
||||
p_last_ccw=NULL;
|
||||
p_packh=NULL;
|
||||
p_packd=NULL;
|
||||
privptr=dev->priv;
|
||||
privptr = dev->ml_priv;
|
||||
|
||||
p_dev = &privptr->channel[READ].cdev->dev;
|
||||
p_env = privptr->p_env;
|
||||
@ -2651,7 +2648,7 @@ claw_strt_read (struct net_device *dev, int lock )
|
||||
int rc = 0;
|
||||
__u32 parm;
|
||||
unsigned long saveflags = 0;
|
||||
struct claw_privbk *privptr=dev->priv;
|
||||
struct claw_privbk *privptr = dev->ml_priv;
|
||||
struct ccwbk*p_ccwbk;
|
||||
struct chbk *p_ch;
|
||||
struct clawh *p_clawh;
|
||||
@ -2708,7 +2705,7 @@ claw_strt_out_IO( struct net_device *dev )
|
||||
if (!dev) {
|
||||
return;
|
||||
}
|
||||
privptr=(struct claw_privbk *)dev->priv;
|
||||
privptr = (struct claw_privbk *)dev->ml_priv;
|
||||
p_ch=&privptr->channel[WRITE];
|
||||
|
||||
CLAW_DBF_TEXT(4, trace, "strt_io");
|
||||
@ -2741,7 +2738,7 @@ static void
|
||||
claw_free_wrt_buf( struct net_device *dev )
|
||||
{
|
||||
|
||||
struct claw_privbk *privptr=(struct claw_privbk *)dev->priv;
|
||||
struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
|
||||
struct ccwbk*p_first_ccw;
|
||||
struct ccwbk*p_last_ccw;
|
||||
struct ccwbk*p_this_ccw;
|
||||
@ -2798,13 +2795,13 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
|
||||
if (!dev)
|
||||
return;
|
||||
CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
|
||||
privptr = dev->priv;
|
||||
privptr = dev->ml_priv;
|
||||
if (dev->flags & IFF_RUNNING)
|
||||
claw_release(dev);
|
||||
if (privptr) {
|
||||
privptr->channel[READ].ndev = NULL; /* say it's free */
|
||||
}
|
||||
dev->priv=NULL;
|
||||
dev->ml_priv = NULL;
|
||||
#ifdef MODULE
|
||||
if (free_dev) {
|
||||
free_netdev(dev);
|
||||
@ -2921,7 +2918,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
|
||||
printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__);
|
||||
goto out;
|
||||
}
|
||||
dev->priv = privptr;
|
||||
dev->ml_priv = privptr;
|
||||
cgdev->dev.driver_data = privptr;
|
||||
cgdev->cdev[READ]->dev.driver_data = privptr;
|
||||
cgdev->cdev[WRITE]->dev.driver_data = privptr;
|
||||
@ -3002,7 +2999,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
|
||||
ret = claw_release(ndev);
|
||||
ndev->flags &=~IFF_RUNNING;
|
||||
unregister_netdev(ndev);
|
||||
ndev->priv = NULL; /* cgdev data, not ndev's to free */
|
||||
ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
|
||||
claw_free_netdevice(ndev, 1);
|
||||
priv->channel[READ].ndev = NULL;
|
||||
priv->channel[WRITE].ndev = NULL;
|
||||
|
@ -245,7 +245,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct sk_buff *skb;
|
||||
int first = 1;
|
||||
int i;
|
||||
@ -336,7 +336,7 @@ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
|
||||
|
||||
@ -357,7 +357,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
|
||||
struct sk_buff *skb = ch->trans_skb;
|
||||
__u16 block_len = *((__u16 *)skb->data);
|
||||
@ -459,7 +459,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
|
||||
chx_rxidle(fi, event, arg);
|
||||
} else {
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
fsm_newstate(fi, CTC_STATE_TXIDLE);
|
||||
fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
|
||||
}
|
||||
@ -496,7 +496,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
|
||||
if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
|
||||
(ch->protocol == CTCM_PROTO_S390)) {
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
|
||||
}
|
||||
}
|
||||
@ -514,7 +514,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
__u16 buflen;
|
||||
int rc;
|
||||
|
||||
@ -699,7 +699,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
|
||||
struct channel *ch)
|
||||
{
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
|
||||
"%s(%s): %s[%d]\n",
|
||||
@ -784,7 +784,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
/*
|
||||
* Special case: Got UC_RCRESET on setmode.
|
||||
@ -874,7 +874,7 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
if (event == CTC_EVENT_TIMER) {
|
||||
if (!IS_MPCDEV(dev))
|
||||
@ -902,7 +902,7 @@ static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
|
||||
"%s(%s): RX %s busy, init. fail",
|
||||
@ -923,7 +923,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
|
||||
struct channel *ch = arg;
|
||||
struct channel *ch2;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
|
||||
"%s: %s: remote disconnect - re-init ...",
|
||||
@ -954,7 +954,7 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
if (event == CTC_EVENT_TIMER) {
|
||||
fsm_deltimer(&ch->timer);
|
||||
@ -984,7 +984,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct sk_buff *skb;
|
||||
|
||||
CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
|
||||
@ -1057,7 +1057,7 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
int rd = CHANNEL_DIRECTION(ch->flags);
|
||||
|
||||
fsm_deltimer(&ch->timer);
|
||||
@ -1207,7 +1207,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct sk_buff *skb;
|
||||
int first = 1;
|
||||
@ -1368,7 +1368,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct sk_buff *skb = ch->trans_skb;
|
||||
struct sk_buff *new_skb;
|
||||
@ -1471,7 +1471,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *gptr = priv->mpcg;
|
||||
|
||||
CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
|
||||
@ -1525,7 +1525,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
int rc;
|
||||
unsigned long saveflags = 0; /* avoids compiler warning */
|
||||
@ -1580,7 +1580,7 @@ static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
|
||||
@ -1639,7 +1639,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
|
||||
@ -1724,7 +1724,7 @@ static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
|
||||
@ -1740,7 +1740,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
|
||||
{
|
||||
struct channel *ach = arg;
|
||||
struct net_device *dev = ach->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct channel *wch = priv->channel[WRITE];
|
||||
struct channel *rch = priv->channel[READ];
|
||||
@ -2050,7 +2050,7 @@ int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
|
||||
static void dev_action_start(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct net_device *dev = arg;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
int direction;
|
||||
|
||||
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
|
||||
@ -2076,7 +2076,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
int direction;
|
||||
struct net_device *dev = arg;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
|
||||
|
||||
@ -2096,7 +2096,7 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
int restart_timer;
|
||||
struct net_device *dev = arg;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCMY_DBF_DEV_NAME(TRACE, dev, "");
|
||||
|
||||
@ -2133,12 +2133,12 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg)
|
||||
static void dev_action_chup(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
struct net_device *dev = arg;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
int dev_stat = fsm_getstate(fi);
|
||||
|
||||
CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
|
||||
"%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
|
||||
dev->name, dev->priv, dev_stat, event);
|
||||
dev->name, dev->ml_priv, dev_stat, event);
|
||||
|
||||
switch (fsm_getstate(fi)) {
|
||||
case DEV_STATE_STARTWAIT_RXTX:
|
||||
@ -2195,7 +2195,7 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
|
||||
struct net_device *dev = arg;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
|
||||
|
||||
|
@ -69,7 +69,7 @@ struct channel *channels;
|
||||
void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
|
||||
{
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
__u16 len = *((__u16 *) pskb->data);
|
||||
|
||||
skb_put(pskb, 2 + LL_HEADER_LENGTH);
|
||||
@ -414,7 +414,7 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
|
||||
*/
|
||||
int ctcm_open(struct net_device *dev)
|
||||
{
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
|
||||
if (!IS_MPC(priv))
|
||||
@ -432,7 +432,7 @@ int ctcm_open(struct net_device *dev)
|
||||
*/
|
||||
int ctcm_close(struct net_device *dev)
|
||||
{
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
|
||||
if (!IS_MPC(priv))
|
||||
@ -573,7 +573,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||
skb_pull(skb, LL_HEADER_LENGTH + 2);
|
||||
} else if (ccw_idx == 0) {
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
priv->stats.tx_packets++;
|
||||
priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
|
||||
}
|
||||
@ -592,7 +592,7 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
|
||||
struct channel *ch;
|
||||
/* int rc = 0; */
|
||||
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
grp = priv->mpcg;
|
||||
ch = priv->channel[WRITE];
|
||||
|
||||
@ -652,7 +652,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||
{
|
||||
struct pdu *p_header;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct th_header *header;
|
||||
struct sk_buff *nskb;
|
||||
@ -867,7 +867,7 @@ done:
|
||||
/* first merge version - leaving both functions separated */
|
||||
static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
|
||||
if (skb == NULL) {
|
||||
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
|
||||
@ -911,7 +911,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int len = 0;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct sk_buff *newskb = NULL;
|
||||
|
||||
@ -1025,7 +1025,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
|
||||
if (new_mtu < 576 || new_mtu > 65527)
|
||||
return -EINVAL;
|
||||
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
max_bufsize = priv->channel[READ]->max_bufsize;
|
||||
|
||||
if (IS_MPC(priv)) {
|
||||
@ -1050,7 +1050,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
|
||||
*/
|
||||
static struct net_device_stats *ctcm_stats(struct net_device *dev)
|
||||
{
|
||||
return &((struct ctcm_priv *)dev->priv)->stats;
|
||||
return &((struct ctcm_priv *)dev->ml_priv)->stats;
|
||||
}
|
||||
|
||||
static void ctcm_free_netdevice(struct net_device *dev)
|
||||
@ -1060,7 +1060,7 @@ static void ctcm_free_netdevice(struct net_device *dev)
|
||||
|
||||
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
|
||||
"%s(%s)", CTCM_FUNTAIL, dev->name);
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
if (priv) {
|
||||
grp = priv->mpcg;
|
||||
if (grp) {
|
||||
@ -1125,7 +1125,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
|
||||
CTCM_FUNTAIL);
|
||||
return NULL;
|
||||
}
|
||||
dev->priv = priv;
|
||||
dev->ml_priv = priv;
|
||||
priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
|
||||
CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
|
||||
dev_fsm, dev_fsm_len, GFP_KERNEL);
|
||||
|
@ -229,14 +229,14 @@ void ctcm_remove_files(struct device *dev);
|
||||
*/
|
||||
static inline void ctcm_clear_busy_do(struct net_device *dev)
|
||||
{
|
||||
clear_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy));
|
||||
clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy));
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
static inline void ctcm_clear_busy(struct net_device *dev)
|
||||
{
|
||||
struct mpc_group *grp;
|
||||
grp = ((struct ctcm_priv *)dev->priv)->mpcg;
|
||||
grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg;
|
||||
|
||||
if (!(grp && grp->in_sweep))
|
||||
ctcm_clear_busy_do(dev);
|
||||
@ -246,7 +246,8 @@ static inline void ctcm_clear_busy(struct net_device *dev)
|
||||
static inline int ctcm_test_and_set_busy(struct net_device *dev)
|
||||
{
|
||||
netif_stop_queue(dev);
|
||||
return test_and_set_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy));
|
||||
return test_and_set_bit(0,
|
||||
&(((struct ctcm_priv *)dev->ml_priv)->tbusy));
|
||||
}
|
||||
|
||||
extern int loglevel;
|
||||
@ -292,7 +293,7 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
|
||||
#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
|
||||
|
||||
/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
|
||||
#define IS_MPCDEV(d) IS_MPC((struct ctcm_priv *)d->priv)
|
||||
#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv)
|
||||
|
||||
static inline gfp_t gfp_type(void)
|
||||
{
|
||||
|
@ -313,10 +313,10 @@ static struct net_device *ctcmpc_get_dev(int port_num)
|
||||
CTCM_FUNTAIL, device);
|
||||
return NULL;
|
||||
}
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
if (priv == NULL) {
|
||||
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
|
||||
"%s(%s): dev->priv is NULL",
|
||||
"%s(%s): dev->ml_priv is NULL",
|
||||
CTCM_FUNTAIL, device);
|
||||
return NULL;
|
||||
}
|
||||
@ -345,7 +345,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
|
||||
dev = ctcmpc_get_dev(port_num);
|
||||
if (dev == NULL)
|
||||
return 1;
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
grp = priv->mpcg;
|
||||
|
||||
grp->allochanfunc = callback;
|
||||
@ -417,7 +417,7 @@ void ctc_mpc_establish_connectivity(int port_num,
|
||||
dev = ctcmpc_get_dev(port_num);
|
||||
if (dev == NULL)
|
||||
return;
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
grp = priv->mpcg;
|
||||
rch = priv->channel[READ];
|
||||
wch = priv->channel[WRITE];
|
||||
@ -535,7 +535,7 @@ void ctc_mpc_dealloc_ch(int port_num)
|
||||
dev = ctcmpc_get_dev(port_num);
|
||||
if (dev == NULL)
|
||||
return;
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
grp = priv->mpcg;
|
||||
|
||||
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
|
||||
@ -571,7 +571,7 @@ void ctc_mpc_flow_control(int port_num, int flowc)
|
||||
dev = ctcmpc_get_dev(port_num);
|
||||
if (dev == NULL)
|
||||
return;
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
grp = priv->mpcg;
|
||||
|
||||
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
|
||||
@ -620,7 +620,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
|
||||
{
|
||||
struct channel *rch = mpcginfo->ch;
|
||||
struct net_device *dev = rch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct channel *ch = priv->channel[WRITE];
|
||||
|
||||
@ -651,7 +651,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
|
||||
static void ctcmpc_send_sweep_resp(struct channel *rch)
|
||||
{
|
||||
struct net_device *dev = rch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
int rc = 0;
|
||||
struct th_sweep *header;
|
||||
@ -713,7 +713,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
|
||||
{
|
||||
struct channel *rch = mpcginfo->ch;
|
||||
struct net_device *dev = rch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct channel *ch = priv->channel[WRITE];
|
||||
|
||||
@ -847,7 +847,7 @@ static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
|
||||
static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
|
||||
{
|
||||
struct net_device *dev = arg;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
if (grp == NULL) {
|
||||
@ -891,7 +891,7 @@ static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
|
||||
void mpc_group_ready(unsigned long adev)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *)adev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct channel *ch = NULL;
|
||||
|
||||
@ -947,7 +947,7 @@ void mpc_group_ready(unsigned long adev)
|
||||
void mpc_channel_action(struct channel *ch, int direction, int action)
|
||||
{
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
if (grp == NULL) {
|
||||
@ -1057,7 +1057,7 @@ done:
|
||||
static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
|
||||
{
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct pdu *curr_pdu;
|
||||
struct mpcg_info *mpcginfo;
|
||||
@ -1255,7 +1255,7 @@ void ctcmpc_bh(unsigned long thischan)
|
||||
struct channel *ch = (struct channel *)thischan;
|
||||
struct sk_buff *skb;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n",
|
||||
@ -1377,7 +1377,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
|
||||
BUG_ON(dev == NULL);
|
||||
CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
|
||||
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
grp = priv->mpcg;
|
||||
grp->flow_off_called = 0;
|
||||
fsm_deltimer(&grp->timer);
|
||||
@ -1483,7 +1483,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
|
||||
|
||||
BUG_ON(dev == NULL);
|
||||
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
grp = priv->mpcg;
|
||||
wch = priv->channel[WRITE];
|
||||
rch = priv->channel[READ];
|
||||
@ -1521,7 +1521,7 @@ void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
|
||||
if (ch) {
|
||||
dev = ch->netdev;
|
||||
if (dev) {
|
||||
priv = dev->priv;
|
||||
priv = dev->ml_priv;
|
||||
if (priv) {
|
||||
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
|
||||
"%s: %s: %s\n",
|
||||
@ -1569,7 +1569,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
|
||||
{
|
||||
struct channel *ch = mpcginfo->ch;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
struct xid2 *xid = mpcginfo->xid;
|
||||
int rc = 0;
|
||||
@ -1866,7 +1866,7 @@ static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
|
||||
{
|
||||
struct channel *ch = arg;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
|
||||
@ -1906,7 +1906,7 @@ static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
|
||||
static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
|
||||
{
|
||||
struct net_device *dev = arg;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = NULL;
|
||||
int direction;
|
||||
int send = 0;
|
||||
@ -1983,7 +1983,7 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
|
||||
struct mpcg_info *mpcginfo = arg;
|
||||
struct channel *ch = mpcginfo->ch;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n",
|
||||
@ -2045,7 +2045,7 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
|
||||
struct mpcg_info *mpcginfo = arg;
|
||||
struct channel *ch = mpcginfo->ch;
|
||||
struct net_device *dev = ch->netdev;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
|
||||
@ -2097,7 +2097,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
|
||||
__u32 new_len = 0;
|
||||
struct sk_buff *skb;
|
||||
struct qllc *qllcptr;
|
||||
struct ctcm_priv *priv = dev->priv;
|
||||
struct ctcm_priv *priv = dev->ml_priv;
|
||||
struct mpc_group *grp = priv->mpcg;
|
||||
|
||||
CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
|
||||
|
@ -1412,7 +1412,8 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
|
||||
}
|
||||
/* How far in the ccw chain have we processed? */
|
||||
if ((channel->state != LCS_CH_STATE_INIT) &&
|
||||
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) {
|
||||
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
|
||||
(irb->scsw.cmd.cpa != 0)) {
|
||||
index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
|
||||
- channel->ccws;
|
||||
if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
|
||||
|
@ -689,6 +689,7 @@ struct qeth_mc_mac {
|
||||
struct list_head list;
|
||||
__u8 mc_addr[MAX_ADDR_LEN];
|
||||
unsigned char mc_addrlen;
|
||||
int is_vmac;
|
||||
};
|
||||
|
||||
struct qeth_card {
|
||||
|
@ -3024,7 +3024,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
|
||||
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
|
||||
int offset)
|
||||
{
|
||||
int length = skb->len;
|
||||
int length = skb->len - offset;
|
||||
int length_here;
|
||||
int element;
|
||||
char *data;
|
||||
|
@ -177,9 +177,10 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
|
||||
qeth_l2_send_delgroupmac_cb);
|
||||
}
|
||||
|
||||
static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac)
|
||||
static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
|
||||
{
|
||||
struct qeth_mc_mac *mc;
|
||||
int rc;
|
||||
|
||||
mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
|
||||
|
||||
@ -188,8 +189,16 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac)
|
||||
|
||||
memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
|
||||
mc->mc_addrlen = OSA_ADDR_LEN;
|
||||
mc->is_vmac = vmac;
|
||||
|
||||
if (!qeth_l2_send_setgroupmac(card, mac))
|
||||
if (vmac) {
|
||||
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
|
||||
NULL);
|
||||
} else {
|
||||
rc = qeth_l2_send_setgroupmac(card, mac);
|
||||
}
|
||||
|
||||
if (!rc)
|
||||
list_add_tail(&mc->list, &card->mc_list);
|
||||
else
|
||||
kfree(mc);
|
||||
@ -201,7 +210,11 @@ static void qeth_l2_del_all_mc(struct qeth_card *card)
|
||||
|
||||
spin_lock_bh(&card->mclock);
|
||||
list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
|
||||
qeth_l2_send_delgroupmac(card, mc->mc_addr);
|
||||
if (mc->is_vmac)
|
||||
qeth_l2_send_setdelmac(card, mc->mc_addr,
|
||||
IPA_CMD_DELVMAC, NULL);
|
||||
else
|
||||
qeth_l2_send_delgroupmac(card, mc->mc_addr);
|
||||
list_del(&mc->list);
|
||||
kfree(mc);
|
||||
}
|
||||
@ -590,7 +603,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
|
||||
static void qeth_l2_set_multicast_list(struct net_device *dev)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
struct dev_mc_list *dm;
|
||||
struct dev_addr_list *dm;
|
||||
|
||||
if (card->info.type == QETH_CARD_TYPE_OSN)
|
||||
return ;
|
||||
@ -599,7 +612,11 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
|
||||
qeth_l2_del_all_mc(card);
|
||||
spin_lock_bh(&card->mclock);
|
||||
for (dm = dev->mc_list; dm; dm = dm->next)
|
||||
qeth_l2_add_mc(card, dm->dmi_addr);
|
||||
qeth_l2_add_mc(card, dm->da_addr, 0);
|
||||
|
||||
for (dm = dev->uc_list; dm; dm = dm->next)
|
||||
qeth_l2_add_mc(card, dm->da_addr, 1);
|
||||
|
||||
spin_unlock_bh(&card->mclock);
|
||||
if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
|
||||
return;
|
||||
|
@ -136,7 +136,7 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
if (!qeth_is_supported(card, IPA_IPV6)) {
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return qeth_l3_dev_route_store(card, &card->options.route6,
|
||||
|
@ -297,7 +297,6 @@ unifdef-y += parport.h
|
||||
unifdef-y += patchkey.h
|
||||
unifdef-y += pci.h
|
||||
unifdef-y += personality.h
|
||||
unifdef-y += pim.h
|
||||
unifdef-y += pktcdvd.h
|
||||
unifdef-y += pmu.h
|
||||
unifdef-y += poll.h
|
||||
|
@ -6,7 +6,6 @@
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/in.h>
|
||||
#endif
|
||||
#include <linux/pim.h>
|
||||
|
||||
/*
|
||||
* Based on the MROUTING 3.5 defines primarily to keep
|
||||
@ -130,6 +129,7 @@ struct igmpmsg
|
||||
*/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/pim.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#ifdef CONFIG_IP_MROUTE
|
||||
|
@ -115,6 +115,7 @@ struct sioc_mif_req6
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/pim.h>
|
||||
#include <linux/skbuff.h> /* for struct sk_buff_head */
|
||||
|
||||
#ifdef CONFIG_IPV6_MROUTE
|
||||
|
@ -3,22 +3,6 @@
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#ifndef __KERNEL__
|
||||
struct pim {
|
||||
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
__u8 pim_type:4, /* PIM message type */
|
||||
pim_ver:4; /* PIM version */
|
||||
#elif defined(__BIG_ENDIAN_BITFIELD)
|
||||
__u8 pim_ver:4; /* PIM version */
|
||||
pim_type:4; /* PIM message type */
|
||||
#endif
|
||||
__u8 pim_rsv; /* Reserved */
|
||||
__be16 pim_cksum; /* Checksum */
|
||||
};
|
||||
|
||||
#define PIM_MINLEN 8
|
||||
#endif
|
||||
|
||||
/* Message types - V1 */
|
||||
#define PIM_V1_VERSION __constant_htonl(0x10000000)
|
||||
#define PIM_V1_REGISTER 1
|
||||
@ -27,7 +11,6 @@ struct pim {
|
||||
#define PIM_VERSION 2
|
||||
#define PIM_REGISTER 1
|
||||
|
||||
#if defined(__KERNEL__)
|
||||
#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
|
||||
|
||||
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
|
||||
@ -42,4 +25,3 @@ struct pimreghdr
|
||||
struct sk_buff;
|
||||
extern int pim_rcv_v1(struct sk_buff *);
|
||||
#endif
|
||||
#endif
|
||||
|
@ -78,6 +78,7 @@ extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
|
||||
|
||||
extern int register_qdisc(struct Qdisc_ops *qops);
|
||||
extern int unregister_qdisc(struct Qdisc_ops *qops);
|
||||
extern void qdisc_list_del(struct Qdisc *q);
|
||||
extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
|
||||
extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
|
||||
extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
|
||||
|
@ -217,6 +217,14 @@ static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
|
||||
return qdisc_lock(root);
|
||||
}
|
||||
|
||||
static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
|
||||
{
|
||||
struct Qdisc *root = qdisc_root_sleeping(qdisc);
|
||||
|
||||
ASSERT_RTNL();
|
||||
return qdisc_lock(root);
|
||||
}
|
||||
|
||||
static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
|
||||
{
|
||||
return qdisc->dev_queue->dev;
|
||||
@ -224,12 +232,12 @@ static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
|
||||
|
||||
static inline void sch_tree_lock(struct Qdisc *q)
|
||||
{
|
||||
spin_lock_bh(qdisc_root_lock(q));
|
||||
spin_lock_bh(qdisc_root_sleeping_lock(q));
|
||||
}
|
||||
|
||||
static inline void sch_tree_unlock(struct Qdisc *q)
|
||||
{
|
||||
spin_unlock_bh(qdisc_root_lock(q));
|
||||
spin_unlock_bh(qdisc_root_sleeping_lock(q));
|
||||
}
|
||||
|
||||
#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
|
||||
|
@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net)
|
||||
return net->ipv4.icmp_sk[smp_processor_id()];
|
||||
}
|
||||
|
||||
static inline int icmp_xmit_lock(struct sock *sk)
|
||||
static inline struct sock *icmp_xmit_lock(struct net *net)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
local_bh_disable();
|
||||
|
||||
sk = icmp_sk(net);
|
||||
|
||||
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
|
||||
/* This can happen if the output path signals a
|
||||
* dst_link_failure() for an outgoing ICMP packet.
|
||||
*/
|
||||
local_bh_enable();
|
||||
return 1;
|
||||
return NULL;
|
||||
}
|
||||
return 0;
|
||||
return sk;
|
||||
}
|
||||
|
||||
static inline void icmp_xmit_unlock(struct sock *sk)
|
||||
@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
|
||||
struct ipcm_cookie ipc;
|
||||
struct rtable *rt = skb->rtable;
|
||||
struct net *net = dev_net(rt->u.dst.dev);
|
||||
struct sock *sk = icmp_sk(net);
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct sock *sk;
|
||||
struct inet_sock *inet;
|
||||
__be32 daddr;
|
||||
|
||||
if (ip_options_echo(&icmp_param->replyopts, skb))
|
||||
return;
|
||||
|
||||
if (icmp_xmit_lock(sk))
|
||||
sk = icmp_xmit_lock(net);
|
||||
if (sk == NULL)
|
||||
return;
|
||||
inet = inet_sk(sk);
|
||||
|
||||
icmp_param->data.icmph.checksum = 0;
|
||||
|
||||
@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
|
||||
if (!rt)
|
||||
goto out;
|
||||
net = dev_net(rt->u.dst.dev);
|
||||
sk = icmp_sk(net);
|
||||
|
||||
/*
|
||||
* Find the original header. It is expected to be valid, of course.
|
||||
@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
|
||||
}
|
||||
}
|
||||
|
||||
if (icmp_xmit_lock(sk))
|
||||
sk = icmp_xmit_lock(net);
|
||||
if (sk == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -3122,13 +3122,22 @@ static ctl_table ipv4_route_table[] = {
|
||||
{ .ctl_name = 0 }
|
||||
};
|
||||
|
||||
static __net_initdata struct ctl_path ipv4_route_path[] = {
|
||||
{ .procname = "net", .ctl_name = CTL_NET, },
|
||||
{ .procname = "ipv4", .ctl_name = NET_IPV4, },
|
||||
{ .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
|
||||
{ },
|
||||
static struct ctl_table empty[1];
|
||||
|
||||
static struct ctl_table ipv4_skeleton[] =
|
||||
{
|
||||
{ .procname = "route", .ctl_name = NET_IPV4_ROUTE,
|
||||
.mode = 0555, .child = ipv4_route_table},
|
||||
{ .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
|
||||
.mode = 0555, .child = empty},
|
||||
{ }
|
||||
};
|
||||
|
||||
static __net_initdata struct ctl_path ipv4_path[] = {
|
||||
{ .procname = "net", .ctl_name = CTL_NET, },
|
||||
{ .procname = "ipv4", .ctl_name = NET_IPV4, },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct ctl_table ipv4_route_flush_table[] = {
|
||||
{
|
||||
@ -3142,6 +3151,13 @@ static struct ctl_table ipv4_route_flush_table[] = {
|
||||
{ .ctl_name = 0 },
|
||||
};
|
||||
|
||||
static __net_initdata struct ctl_path ipv4_route_path[] = {
|
||||
{ .procname = "net", .ctl_name = CTL_NET, },
|
||||
{ .procname = "ipv4", .ctl_name = NET_IPV4, },
|
||||
{ .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
|
||||
{ },
|
||||
};
|
||||
|
||||
static __net_init int sysctl_route_net_init(struct net *net)
|
||||
{
|
||||
struct ctl_table *tbl;
|
||||
@ -3293,7 +3309,7 @@ int __init ip_rt_init(void)
|
||||
*/
|
||||
void __init ip_static_sysctl_init(void)
|
||||
{
|
||||
register_sysctl_paths(ipv4_route_path, ipv4_route_table);
|
||||
register_sysctl_paths(ipv4_path, ipv4_skeleton);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -468,7 +468,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
if (likely(sysctl_tcp_window_scaling)) {
|
||||
opts->ws = tp->rx_opt.rcv_wscale;
|
||||
size += TCPOLEN_WSCALE_ALIGNED;
|
||||
if(likely(opts->ws))
|
||||
size += TCPOLEN_WSCALE_ALIGNED;
|
||||
}
|
||||
if (likely(sysctl_tcp_sack)) {
|
||||
opts->options |= OPTION_SACK_ADVERTISE;
|
||||
@ -509,7 +510,8 @@ static unsigned tcp_synack_options(struct sock *sk,
|
||||
|
||||
if (likely(ireq->wscale_ok)) {
|
||||
opts->ws = ireq->rcv_wscale;
|
||||
size += TCPOLEN_WSCALE_ALIGNED;
|
||||
if(likely(opts->ws))
|
||||
size += TCPOLEN_WSCALE_ALIGNED;
|
||||
}
|
||||
if (likely(doing_ts)) {
|
||||
opts->options |= OPTION_TS;
|
||||
|
@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
|
||||
.fc_dst_len = plen,
|
||||
.fc_flags = RTF_UP | flags,
|
||||
.fc_nlinfo.nl_net = dev_net(dev),
|
||||
.fc_protocol = RTPROT_KERNEL,
|
||||
};
|
||||
|
||||
ipv6_addr_copy(&cfg.fc_dst, pfx);
|
||||
|
@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = {
|
||||
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
|
||||
};
|
||||
|
||||
static __inline__ int icmpv6_xmit_lock(struct sock *sk)
|
||||
static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
local_bh_disable();
|
||||
|
||||
sk = icmpv6_sk(net);
|
||||
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
|
||||
/* This can happen if the output path (f.e. SIT or
|
||||
* ip6ip6 tunnel) signals dst_link_failure() for an
|
||||
* outgoing ICMP6 packet.
|
||||
*/
|
||||
local_bh_enable();
|
||||
return 1;
|
||||
return NULL;
|
||||
}
|
||||
return 0;
|
||||
return sk;
|
||||
}
|
||||
|
||||
static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
|
||||
@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
|
||||
fl.fl_icmp_code = code;
|
||||
security_skb_classify_flow(skb, &fl);
|
||||
|
||||
sk = icmpv6_sk(net);
|
||||
np = inet6_sk(sk);
|
||||
|
||||
if (icmpv6_xmit_lock(sk))
|
||||
sk = icmpv6_xmit_lock(net);
|
||||
if (sk == NULL)
|
||||
return;
|
||||
np = inet6_sk(sk);
|
||||
|
||||
if (!icmpv6_xrlim_allow(sk, type, &fl))
|
||||
goto out;
|
||||
@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
|
||||
fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
|
||||
security_skb_classify_flow(skb, &fl);
|
||||
|
||||
sk = icmpv6_sk(net);
|
||||
np = inet6_sk(sk);
|
||||
|
||||
if (icmpv6_xmit_lock(sk))
|
||||
sk = icmpv6_xmit_lock(net);
|
||||
if (sk == NULL)
|
||||
return;
|
||||
np = inet6_sk(sk);
|
||||
|
||||
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
|
||||
fl.oif = np->mcast_oif;
|
||||
|
@ -377,14 +377,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
|
||||
skb_checksum_complete(skb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
/* Charge it to the socket. */
|
||||
if (sock_queue_rcv_skb(sk,skb)<0) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
if (skb_checksum_complete(skb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ static struct ctl_table_header *ip6_base;
|
||||
int ipv6_static_sysctl_register(void)
|
||||
{
|
||||
static struct ctl_table empty[1];
|
||||
ip6_base = register_net_sysctl_rotable(net_ipv6_ctl_path, empty);
|
||||
ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
|
||||
if (ip6_base == NULL)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
|
@ -248,8 +248,8 @@ IEEE80211_IF_WFILE(min_discovery_timeout,
|
||||
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
DEBUGFS_ADD(drop_unencrypted, sta);
|
||||
DEBUGFS_ADD(force_unicast_rateidx, ap);
|
||||
DEBUGFS_ADD(max_ratectrl_rateidx, ap);
|
||||
DEBUGFS_ADD(force_unicast_rateidx, sta);
|
||||
DEBUGFS_ADD(max_ratectrl_rateidx, sta);
|
||||
|
||||
DEBUGFS_ADD(state, sta);
|
||||
DEBUGFS_ADD(bssid, sta);
|
||||
@ -283,8 +283,8 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
|
||||
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
DEBUGFS_ADD(drop_unencrypted, wds);
|
||||
DEBUGFS_ADD(force_unicast_rateidx, ap);
|
||||
DEBUGFS_ADD(max_ratectrl_rateidx, ap);
|
||||
DEBUGFS_ADD(force_unicast_rateidx, wds);
|
||||
DEBUGFS_ADD(max_ratectrl_rateidx, wds);
|
||||
|
||||
DEBUGFS_ADD(peer, wds);
|
||||
}
|
||||
@ -292,8 +292,8 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
|
||||
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
DEBUGFS_ADD(drop_unencrypted, vlan);
|
||||
DEBUGFS_ADD(force_unicast_rateidx, ap);
|
||||
DEBUGFS_ADD(max_ratectrl_rateidx, ap);
|
||||
DEBUGFS_ADD(force_unicast_rateidx, vlan);
|
||||
DEBUGFS_ADD(max_ratectrl_rateidx, vlan);
|
||||
}
|
||||
|
||||
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
|
||||
@ -381,8 +381,8 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
|
||||
static void del_sta_files(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
DEBUGFS_DEL(drop_unencrypted, sta);
|
||||
DEBUGFS_DEL(force_unicast_rateidx, ap);
|
||||
DEBUGFS_DEL(max_ratectrl_rateidx, ap);
|
||||
DEBUGFS_DEL(force_unicast_rateidx, sta);
|
||||
DEBUGFS_DEL(max_ratectrl_rateidx, sta);
|
||||
|
||||
DEBUGFS_DEL(state, sta);
|
||||
DEBUGFS_DEL(bssid, sta);
|
||||
@ -416,8 +416,8 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata)
|
||||
static void del_wds_files(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
DEBUGFS_DEL(drop_unencrypted, wds);
|
||||
DEBUGFS_DEL(force_unicast_rateidx, ap);
|
||||
DEBUGFS_DEL(max_ratectrl_rateidx, ap);
|
||||
DEBUGFS_DEL(force_unicast_rateidx, wds);
|
||||
DEBUGFS_DEL(max_ratectrl_rateidx, wds);
|
||||
|
||||
DEBUGFS_DEL(peer, wds);
|
||||
}
|
||||
@ -425,8 +425,8 @@ static void del_wds_files(struct ieee80211_sub_if_data *sdata)
|
||||
static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
DEBUGFS_DEL(drop_unencrypted, vlan);
|
||||
DEBUGFS_DEL(force_unicast_rateidx, ap);
|
||||
DEBUGFS_DEL(max_ratectrl_rateidx, ap);
|
||||
DEBUGFS_DEL(force_unicast_rateidx, vlan);
|
||||
DEBUGFS_DEL(max_ratectrl_rateidx, vlan);
|
||||
}
|
||||
|
||||
static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
|
||||
|
@ -472,6 +472,8 @@ struct ieee80211_sub_if_data {
|
||||
struct dentry *auth_transaction;
|
||||
struct dentry *flags;
|
||||
struct dentry *num_beacons_sta;
|
||||
struct dentry *force_unicast_rateidx;
|
||||
struct dentry *max_ratectrl_rateidx;
|
||||
} sta;
|
||||
struct {
|
||||
struct dentry *drop_unencrypted;
|
||||
@ -485,9 +487,13 @@ struct ieee80211_sub_if_data {
|
||||
struct {
|
||||
struct dentry *drop_unencrypted;
|
||||
struct dentry *peer;
|
||||
struct dentry *force_unicast_rateidx;
|
||||
struct dentry *max_ratectrl_rateidx;
|
||||
} wds;
|
||||
struct {
|
||||
struct dentry *drop_unencrypted;
|
||||
struct dentry *force_unicast_rateidx;
|
||||
struct dentry *max_ratectrl_rateidx;
|
||||
} vlan;
|
||||
struct {
|
||||
struct dentry *mode;
|
||||
|
@ -376,7 +376,7 @@ errcopy:
|
||||
hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
|
||||
tbl->free_node(p, 0);
|
||||
}
|
||||
__mesh_table_free(tbl);
|
||||
__mesh_table_free(newtbl);
|
||||
endgrow:
|
||||
return NULL;
|
||||
}
|
||||
|
@ -500,51 +500,21 @@ int ieee80211_ht_addt_info_ie_to_ht_bss_info(
|
||||
static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_if_sta *ifsta)
|
||||
{
|
||||
char *buf;
|
||||
size_t len;
|
||||
int i;
|
||||
union iwreq_data wrqu;
|
||||
|
||||
if (!ifsta->assocreq_ies && !ifsta->assocresp_ies)
|
||||
return;
|
||||
|
||||
buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len +
|
||||
ifsta->assocresp_ies_len), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
len = sprintf(buf, "ASSOCINFO(");
|
||||
if (ifsta->assocreq_ies) {
|
||||
len += sprintf(buf + len, "ReqIEs=");
|
||||
for (i = 0; i < ifsta->assocreq_ies_len; i++) {
|
||||
len += sprintf(buf + len, "%02x",
|
||||
ifsta->assocreq_ies[i]);
|
||||
}
|
||||
memset(&wrqu, 0, sizeof(wrqu));
|
||||
wrqu.data.length = ifsta->assocreq_ies_len;
|
||||
wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu,
|
||||
ifsta->assocreq_ies);
|
||||
}
|
||||
|
||||
if (ifsta->assocresp_ies) {
|
||||
if (ifsta->assocreq_ies)
|
||||
len += sprintf(buf + len, " ");
|
||||
len += sprintf(buf + len, "RespIEs=");
|
||||
for (i = 0; i < ifsta->assocresp_ies_len; i++) {
|
||||
len += sprintf(buf + len, "%02x",
|
||||
ifsta->assocresp_ies[i]);
|
||||
}
|
||||
memset(&wrqu, 0, sizeof(wrqu));
|
||||
wrqu.data.length = ifsta->assocresp_ies_len;
|
||||
wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu,
|
||||
ifsta->assocresp_ies);
|
||||
}
|
||||
len += sprintf(buf + len, ")");
|
||||
|
||||
if (len > IW_CUSTOM_MAX) {
|
||||
len = sprintf(buf, "ASSOCRESPIE=");
|
||||
for (i = 0; i < ifsta->assocresp_ies_len; i++) {
|
||||
len += sprintf(buf + len, "%02x",
|
||||
ifsta->assocresp_ies[i]);
|
||||
}
|
||||
}
|
||||
|
||||
memset(&wrqu, 0, sizeof(wrqu));
|
||||
wrqu.data.length = len;
|
||||
wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf);
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
|
||||
@ -864,7 +834,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
|
||||
}
|
||||
}
|
||||
|
||||
if (count == 8) {
|
||||
if (rates_len > count) {
|
||||
pos = skb_put(skb, rates_len - count + 2);
|
||||
*pos++ = WLAN_EID_EXT_SUPP_RATES;
|
||||
*pos++ = rates_len - count;
|
||||
@ -2788,7 +2758,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
|
||||
jiffies);
|
||||
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
|
||||
if (beacon_timestamp > rx_timestamp) {
|
||||
#ifndef CONFIG_MAC80211_IBSS_DEBUG
|
||||
#ifdef CONFIG_MAC80211_IBSS_DEBUG
|
||||
printk(KERN_DEBUG "%s: beacon TSF higher than "
|
||||
"local TSF - IBSS merge with BSSID %s\n",
|
||||
sdata->dev->name, print_mac(mac, mgmt->bssid));
|
||||
|
@ -205,7 +205,7 @@ replay:
|
||||
}
|
||||
}
|
||||
|
||||
root_lock = qdisc_root_lock(q);
|
||||
root_lock = qdisc_root_sleeping_lock(q);
|
||||
|
||||
if (tp == NULL) {
|
||||
/* Proto-tcf does not exist, create new one */
|
||||
|
@ -75,7 +75,7 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
|
||||
static inline
|
||||
void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
|
||||
{
|
||||
spinlock_t *root_lock = qdisc_root_lock(q);
|
||||
spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
|
||||
|
||||
spin_lock_bh(root_lock);
|
||||
memset(head->fastmap, 0, sizeof(head->fastmap));
|
||||
|
@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
|
||||
* without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
|
||||
*/
|
||||
static DEFINE_SPINLOCK(qdisc_list_lock);
|
||||
|
||||
static void qdisc_list_add(struct Qdisc *q)
|
||||
{
|
||||
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
|
||||
spin_lock_bh(&qdisc_list_lock);
|
||||
list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
|
||||
spin_unlock_bh(&qdisc_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void qdisc_list_del(struct Qdisc *q)
|
||||
{
|
||||
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
|
||||
spin_lock_bh(&qdisc_list_lock);
|
||||
list_del(&q->list);
|
||||
spin_unlock_bh(&qdisc_list_lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qdisc_list_del);
|
||||
|
||||
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
|
||||
{
|
||||
unsigned int i;
|
||||
struct Qdisc *q;
|
||||
|
||||
spin_lock_bh(&qdisc_list_lock);
|
||||
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
||||
struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
|
||||
struct Qdisc *txq_root = txq->qdisc_sleeping;
|
||||
|
||||
q = qdisc_match_from_root(txq_root, handle);
|
||||
if (q)
|
||||
return q;
|
||||
goto unlock;
|
||||
}
|
||||
return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
|
||||
|
||||
q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&qdisc_list_lock);
|
||||
|
||||
return q;
|
||||
}
|
||||
|
||||
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
|
||||
@ -590,7 +624,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
||||
struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
|
||||
spinlock_t *root_lock;
|
||||
|
||||
root_lock = qdisc_root_lock(oqdisc);
|
||||
root_lock = qdisc_lock(oqdisc);
|
||||
spin_lock_bh(root_lock);
|
||||
|
||||
/* Prune old scheduler */
|
||||
@ -601,7 +635,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
||||
if (qdisc == NULL)
|
||||
qdisc = &noop_qdisc;
|
||||
dev_queue->qdisc_sleeping = qdisc;
|
||||
dev_queue->qdisc = &noop_qdisc;
|
||||
rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
|
||||
|
||||
spin_unlock_bh(root_lock);
|
||||
|
||||
@ -796,9 +830,16 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
|
||||
sch->stab = stab;
|
||||
}
|
||||
if (tca[TCA_RATE]) {
|
||||
spinlock_t *root_lock;
|
||||
|
||||
if ((sch->parent != TC_H_ROOT) &&
|
||||
!(sch->flags & TCQ_F_INGRESS))
|
||||
root_lock = qdisc_root_sleeping_lock(sch);
|
||||
else
|
||||
root_lock = qdisc_lock(sch);
|
||||
|
||||
err = gen_new_estimator(&sch->bstats, &sch->rate_est,
|
||||
qdisc_root_lock(sch),
|
||||
tca[TCA_RATE]);
|
||||
root_lock, tca[TCA_RATE]);
|
||||
if (err) {
|
||||
/*
|
||||
* Any broken qdiscs that would require
|
||||
@ -810,8 +851,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
|
||||
goto err_out3;
|
||||
}
|
||||
}
|
||||
if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
|
||||
list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
|
||||
|
||||
qdisc_list_add(sch);
|
||||
|
||||
return sch;
|
||||
}
|
||||
@ -850,7 +891,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
|
||||
|
||||
if (tca[TCA_RATE])
|
||||
gen_replace_estimator(&sch->bstats, &sch->rate_est,
|
||||
qdisc_root_lock(sch), tca[TCA_RATE]);
|
||||
qdisc_root_sleeping_lock(sch),
|
||||
tca[TCA_RATE]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1127,8 +1169,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
||||
if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
|
||||
TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
|
||||
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
|
||||
qdisc_root_sleeping_lock(q), &d) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
|
||||
@ -1419,8 +1461,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
|
||||
if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
|
||||
TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
|
||||
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
|
||||
qdisc_root_sleeping_lock(q), &d) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
|
||||
|
@ -1754,7 +1754,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
|
||||
|
||||
if (--cl->refcnt == 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
spinlock_t *root_lock = qdisc_root_lock(sch);
|
||||
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
|
||||
struct cbq_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
spin_lock_bh(root_lock);
|
||||
@ -1839,7 +1839,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
|
||||
|
||||
if (tca[TCA_RATE])
|
||||
gen_replace_estimator(&cl->bstats, &cl->rate_est,
|
||||
qdisc_root_lock(sch),
|
||||
qdisc_root_sleeping_lock(sch),
|
||||
tca[TCA_RATE]);
|
||||
return 0;
|
||||
}
|
||||
@ -1930,7 +1930,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
|
||||
|
||||
if (tca[TCA_RATE])
|
||||
gen_new_estimator(&cl->bstats, &cl->rate_est,
|
||||
qdisc_root_lock(sch), tca[TCA_RATE]);
|
||||
qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
|
||||
|
||||
*arg = (unsigned long)cl;
|
||||
return 0;
|
||||
|
@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc)
|
||||
!atomic_dec_and_test(&qdisc->refcnt))
|
||||
return;
|
||||
|
||||
if (qdisc->parent)
|
||||
list_del(&qdisc->list);
|
||||
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
qdisc_list_del(qdisc);
|
||||
|
||||
qdisc_put_stab(qdisc->stab);
|
||||
#endif
|
||||
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
|
||||
@ -635,7 +634,7 @@ static void dev_deactivate_queue(struct net_device *dev,
|
||||
if (!(qdisc->flags & TCQ_F_BUILTIN))
|
||||
set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
|
||||
|
||||
dev_queue->qdisc = qdisc_default;
|
||||
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
|
||||
qdisc_reset(qdisc);
|
||||
|
||||
spin_unlock_bh(qdisc_lock(qdisc));
|
||||
@ -710,7 +709,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
|
||||
struct Qdisc *qdisc_default = _qdisc_default;
|
||||
|
||||
if (qdisc) {
|
||||
dev_queue->qdisc = qdisc_default;
|
||||
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
|
||||
dev_queue->qdisc_sleeping = qdisc_default;
|
||||
|
||||
qdisc_destroy(qdisc);
|
||||
|
@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
|
||||
if (tca[TCA_RATE])
|
||||
gen_replace_estimator(&cl->bstats, &cl->rate_est,
|
||||
qdisc_root_lock(sch),
|
||||
qdisc_root_sleeping_lock(sch),
|
||||
tca[TCA_RATE]);
|
||||
return 0;
|
||||
}
|
||||
@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
|
||||
if (tca[TCA_RATE])
|
||||
gen_new_estimator(&cl->bstats, &cl->rate_est,
|
||||
qdisc_root_lock(sch), tca[TCA_RATE]);
|
||||
qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
|
||||
*arg = (unsigned long)cl;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1043,7 +1043,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
|
||||
|
||||
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
{
|
||||
spinlock_t *root_lock = qdisc_root_lock(sch);
|
||||
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
|
||||
struct htb_sched *q = qdisc_priv(sch);
|
||||
struct nlattr *nest;
|
||||
struct tc_htb_glob gopt;
|
||||
@ -1075,7 +1075,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
|
||||
struct sk_buff *skb, struct tcmsg *tcm)
|
||||
{
|
||||
struct htb_class *cl = (struct htb_class *)arg;
|
||||
spinlock_t *root_lock = qdisc_root_lock(sch);
|
||||
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
|
||||
struct nlattr *nest;
|
||||
struct tc_htb_opt opt;
|
||||
|
||||
@ -1372,7 +1372,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
goto failure;
|
||||
|
||||
gen_new_estimator(&cl->bstats, &cl->rate_est,
|
||||
qdisc_root_lock(sch),
|
||||
qdisc_root_sleeping_lock(sch),
|
||||
tca[TCA_RATE] ? : &est.nla);
|
||||
cl->refcnt = 1;
|
||||
cl->children = 0;
|
||||
@ -1427,7 +1427,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
} else {
|
||||
if (tca[TCA_RATE])
|
||||
gen_replace_estimator(&cl->bstats, &cl->rate_est,
|
||||
qdisc_root_lock(sch),
|
||||
qdisc_root_sleeping_lock(sch),
|
||||
tca[TCA_RATE]);
|
||||
sch_tree_lock(sch);
|
||||
}
|
||||
|
@ -341,7 +341,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
|
||||
for (i = 0; i < n; i++)
|
||||
d->table[i] = data[i];
|
||||
|
||||
root_lock = qdisc_root_lock(sch);
|
||||
root_lock = qdisc_root_sleeping_lock(sch);
|
||||
|
||||
spin_lock_bh(root_lock);
|
||||
d = xchg(&q->delay_dist, d);
|
||||
|
@ -161,7 +161,7 @@ teql_destroy(struct Qdisc* sch)
|
||||
txq = netdev_get_tx_queue(master->dev, 0);
|
||||
master->slaves = NULL;
|
||||
|
||||
root_lock = qdisc_root_lock(txq->qdisc);
|
||||
root_lock = qdisc_root_sleeping_lock(txq->qdisc);
|
||||
spin_lock_bh(root_lock);
|
||||
qdisc_reset(txq->qdisc);
|
||||
spin_unlock_bh(root_lock);
|
||||
|
@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
|
||||
{
|
||||
struct sctp_auth_bytes *key;
|
||||
|
||||
/* Verify that we are not going to overflow INT_MAX */
|
||||
if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
|
||||
return NULL;
|
||||
|
||||
/* Allocate the shared key */
|
||||
key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
|
||||
if (!key)
|
||||
@ -782,6 +786,9 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
|
||||
for (i = 0; i < hmacs->shmac_num_idents; i++) {
|
||||
id = hmacs->shmac_idents[i];
|
||||
|
||||
if (id > SCTP_AUTH_HMAC_ID_MAX)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (SCTP_AUTH_HMAC_ID_SHA1 == id)
|
||||
has_sha1 = 1;
|
||||
|
||||
|
@ -3086,6 +3086,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
|
||||
int optlen)
|
||||
{
|
||||
struct sctp_hmacalgo *hmacs;
|
||||
u32 idents;
|
||||
int err;
|
||||
|
||||
if (!sctp_auth_enable)
|
||||
@ -3103,8 +3104,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (hmacs->shmac_num_idents == 0 ||
|
||||
hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) {
|
||||
idents = hmacs->shmac_num_idents;
|
||||
if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
|
||||
(idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -3144,6 +3146,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
|
||||
if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
|
||||
ret = -EINVAL;
|
||||
|
Loading…
Reference in New Issue
Block a user