mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-19 16:14:13 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) L2TP doesn't get autoloaded when you try to open an L2TP socket due to a missing module alias, fix from Benjamin LaHaise. 2) Netlabel and RDS should propagate gfp flags given to them by callers, fixes from Dan Carpeneter. 3) Recursive locking fix in usbnet wasn't bulletproof and can result in objects going away mid-flight due to races, fix from Ming Lei. 4) Fix up some confusion about a bool module parameter in netfilter's iptable_filter and ip6table_filter, from Rusty Russell. 5) If SKB recycling is used via napi_reuse_skb() we end up with different amounts of headroom reserved than we had at the original SKB allocation. Fix from Eric Dumazet. 6) Fix races in TG3 driver ring refilling, from Michael Chan. 7) We have callbacks for IPSEC replay notifiers, but some call sites were not using the ops method and instead were calling one of the implementations directly. Oops. Fix from Steffen Klassert. 8) Fix IP address validation properly in the bonding driver, the previous fix only works with netlink where the subnet mask and IP address are changed in one atomic operation. When 'ifconfig' ioctls are used the IP address and the subnet mask are changed in two distinct operations. Fix from Andy Gospodarek. 9) Provide a sky2 module operation to work around power management issues with some BIOSes. From Stephen Hemminger. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: usbnet: consider device busy at each recieved packet bonding: remove entries for master_ip and vlan_ip and query devices instead netfilter: remove forward module param confusion. usbnet: don't clear urb->dev in tx_complete usbnet: increase URB reference count before usb_unlink_urb xfrm: Access the replay notify functions via the registered callbacks xfrm: Remove unused xfrm_state from xfrm_state_check_space RDS: use gfp flags from caller in conn_alloc() netlabel: use GFP flags from caller instead of GFP_ATOMIC l2tp: enable automatic module loading for l2tp_ppp cnic: Fix parity error code conflict tg3: Fix RSS ring refill race condition sky2: override for PCI legacy power management net: fix napi_reuse_skb() skb reserve
This commit is contained in:
commit
934e18b5cb
@ -2573,12 +2573,16 @@ re_arm:
|
||||
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
|
||||
{
|
||||
struct vlan_entry *vlan;
|
||||
struct net_device *vlan_dev;
|
||||
|
||||
if (ip == bond->master_ip)
|
||||
if (ip == bond_confirm_addr(bond->dev, 0, ip))
|
||||
return 1;
|
||||
|
||||
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
|
||||
if (ip == vlan->vlan_ip)
|
||||
rcu_read_lock();
|
||||
vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id);
|
||||
rcu_read_unlock();
|
||||
if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -2620,17 +2624,19 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
|
||||
int i, vlan_id;
|
||||
__be32 *targets = bond->params.arp_targets;
|
||||
struct vlan_entry *vlan;
|
||||
struct net_device *vlan_dev;
|
||||
struct net_device *vlan_dev = NULL;
|
||||
struct rtable *rt;
|
||||
|
||||
for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
|
||||
__be32 addr;
|
||||
if (!targets[i])
|
||||
break;
|
||||
pr_debug("basa: target %x\n", targets[i]);
|
||||
if (!bond_vlan_used(bond)) {
|
||||
pr_debug("basa: empty vlan: arp_send\n");
|
||||
addr = bond_confirm_addr(bond->dev, targets[i], 0);
|
||||
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
|
||||
bond->master_ip, 0);
|
||||
addr, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2655,8 +2661,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
|
||||
if (rt->dst.dev == bond->dev) {
|
||||
ip_rt_put(rt);
|
||||
pr_debug("basa: rtdev == bond->dev: arp_send\n");
|
||||
addr = bond_confirm_addr(bond->dev, targets[i], 0);
|
||||
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
|
||||
bond->master_ip, 0);
|
||||
addr, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2674,10 +2681,11 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
|
||||
}
|
||||
}
|
||||
|
||||
if (vlan_id) {
|
||||
if (vlan_id && vlan_dev) {
|
||||
ip_rt_put(rt);
|
||||
addr = bond_confirm_addr(vlan_dev, targets[i], 0);
|
||||
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
|
||||
vlan->vlan_ip, vlan_id);
|
||||
addr, vlan_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -3299,68 +3307,10 @@ static int bond_netdev_event(struct notifier_block *this,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* bond_inetaddr_event: handle inetaddr notifier chain events.
|
||||
*
|
||||
* We keep track of device IPs primarily to use as source addresses in
|
||||
* ARP monitor probes (rather than spewing out broadcasts all the time).
|
||||
*
|
||||
* We track one IP for the main device (if it has one), plus one per VLAN.
|
||||
*/
|
||||
static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
|
||||
{
|
||||
struct in_ifaddr *ifa = ptr;
|
||||
struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
|
||||
struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
|
||||
struct bonding *bond;
|
||||
struct vlan_entry *vlan;
|
||||
|
||||
/* we only care about primary address */
|
||||
if(ifa->ifa_flags & IFA_F_SECONDARY)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
list_for_each_entry(bond, &bn->dev_list, bond_list) {
|
||||
if (bond->dev == event_dev) {
|
||||
switch (event) {
|
||||
case NETDEV_UP:
|
||||
bond->master_ip = ifa->ifa_local;
|
||||
return NOTIFY_OK;
|
||||
case NETDEV_DOWN:
|
||||
bond->master_ip = 0;
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
|
||||
vlan_dev = __vlan_find_dev_deep(bond->dev,
|
||||
vlan->vlan_id);
|
||||
if (vlan_dev == event_dev) {
|
||||
switch (event) {
|
||||
case NETDEV_UP:
|
||||
vlan->vlan_ip = ifa->ifa_local;
|
||||
return NOTIFY_OK;
|
||||
case NETDEV_DOWN:
|
||||
vlan->vlan_ip = 0;
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block bond_netdev_notifier = {
|
||||
.notifier_call = bond_netdev_event,
|
||||
};
|
||||
|
||||
static struct notifier_block bond_inetaddr_notifier = {
|
||||
.notifier_call = bond_inetaddr_event,
|
||||
};
|
||||
|
||||
/*---------------------------- Hashing Policies -----------------------------*/
|
||||
|
||||
/*
|
||||
@ -4929,7 +4879,6 @@ static int __init bonding_init(void)
|
||||
}
|
||||
|
||||
register_netdevice_notifier(&bond_netdev_notifier);
|
||||
register_inetaddr_notifier(&bond_inetaddr_notifier);
|
||||
out:
|
||||
return res;
|
||||
err:
|
||||
@ -4943,7 +4892,6 @@ err_link:
|
||||
static void __exit bonding_exit(void)
|
||||
{
|
||||
unregister_netdevice_notifier(&bond_netdev_notifier);
|
||||
unregister_inetaddr_notifier(&bond_inetaddr_notifier);
|
||||
|
||||
bond_destroy_debugfs();
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/netpoll.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include "bond_3ad.h"
|
||||
#include "bond_alb.h"
|
||||
|
||||
@ -166,7 +167,6 @@ struct bond_parm_tbl {
|
||||
|
||||
struct vlan_entry {
|
||||
struct list_head vlan_list;
|
||||
__be32 vlan_ip;
|
||||
unsigned short vlan_id;
|
||||
};
|
||||
|
||||
@ -232,7 +232,6 @@ struct bonding {
|
||||
struct list_head bond_list;
|
||||
struct netdev_hw_addr_list mc_list;
|
||||
int (*xmit_hash_policy)(struct sk_buff *, int);
|
||||
__be32 master_ip;
|
||||
u16 rr_tx_counter;
|
||||
struct ad_bond_info ad_info;
|
||||
struct alb_bond_info alb_info;
|
||||
@ -378,6 +377,21 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
|
||||
return slave->inactive;
|
||||
}
|
||||
|
||||
static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
|
||||
{
|
||||
struct in_device *in_dev;
|
||||
__be32 addr = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
in_dev = __in_dev_get_rcu(dev);
|
||||
|
||||
if (in_dev)
|
||||
addr = inet_confirm_addr(in_dev, dst, local, RT_SCOPE_HOST);
|
||||
|
||||
rcu_read_unlock();
|
||||
return addr;
|
||||
}
|
||||
|
||||
struct bond_net;
|
||||
|
||||
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include "bnx2x/bnx2x_hsi.h"
|
||||
#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
|
||||
#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
|
||||
#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
|
||||
#include "cnic.h"
|
||||
#include "cnic_defs.h"
|
||||
|
||||
@ -2547,7 +2548,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
|
||||
}
|
||||
kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
|
||||
kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
|
||||
kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR;
|
||||
kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
|
||||
kcqe.kcqe_info2 = cid;
|
||||
kcqe.kcqe_info0 = l5_cid;
|
||||
|
||||
@ -2558,7 +2559,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
|
||||
|
||||
kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
|
||||
kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
|
||||
kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR;
|
||||
kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
|
||||
kcqe.kcqe_info2 = cid;
|
||||
cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
|
||||
|
||||
@ -2577,7 +2578,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
|
||||
|
||||
kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
|
||||
KCQE_FLAGS_LAYER_MASK_L4;
|
||||
l4kcqe->status = L4_KCQE_COMPLETION_STATUS_NIC_ERROR;
|
||||
l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
|
||||
l4kcqe->cid = cid;
|
||||
cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
|
||||
} else {
|
||||
@ -3933,7 +3934,8 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
|
||||
case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
|
||||
if (l4kcqe->status == 0)
|
||||
set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
|
||||
else if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
|
||||
else if (l4kcqe->status ==
|
||||
L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
|
||||
set_bit(SK_F_HW_ERR, &csk->flags);
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
@ -3946,7 +3948,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
|
||||
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
|
||||
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
|
||||
case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
|
||||
if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
|
||||
if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
|
||||
set_bit(SK_F_HW_ERR, &csk->flags);
|
||||
|
||||
cp->close_conn(csk, opcode);
|
||||
|
@ -35,16 +35,6 @@
|
||||
#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
|
||||
#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
|
||||
|
||||
#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
|
||||
#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
|
||||
#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
|
||||
#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
|
||||
#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
|
||||
#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
|
||||
#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
|
||||
#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
|
||||
#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
|
||||
|
||||
#define FCOE_RAMROD_CMD_ID_INIT_FUNC (FCOE_KCQE_OPCODE_INIT_FUNC)
|
||||
#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC (FCOE_KCQE_OPCODE_DESTROY_FUNC)
|
||||
#define FCOE_RAMROD_CMD_ID_STAT_FUNC (FCOE_KCQE_OPCODE_STAT_FUNC)
|
||||
@ -54,23 +44,6 @@
|
||||
#define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN)
|
||||
#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81)
|
||||
|
||||
#define FCOE_KWQE_OPCODE_INIT1 (0)
|
||||
#define FCOE_KWQE_OPCODE_INIT2 (1)
|
||||
#define FCOE_KWQE_OPCODE_INIT3 (2)
|
||||
#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
|
||||
#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
|
||||
#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
|
||||
#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
|
||||
#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
|
||||
#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
|
||||
#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
|
||||
#define FCOE_KWQE_OPCODE_DESTROY (10)
|
||||
#define FCOE_KWQE_OPCODE_STAT (11)
|
||||
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
|
||||
|
||||
/* KCQ (kernel completion queue) response op codes */
|
||||
#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
|
||||
#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
|
||||
@ -87,6 +60,7 @@
|
||||
/* KCQ (kernel completion queue) completion status */
|
||||
#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
|
||||
#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR (4)
|
||||
#define L4_KCQE_COMPLETION_STATUS_PARITY_ERROR (0x81)
|
||||
#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
|
||||
|
||||
#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
|
||||
|
@ -12,8 +12,8 @@
|
||||
#ifndef CNIC_IF_H
|
||||
#define CNIC_IF_H
|
||||
|
||||
#define CNIC_MODULE_VERSION "2.5.9"
|
||||
#define CNIC_MODULE_RELDATE "Feb 8, 2012"
|
||||
#define CNIC_MODULE_VERSION "2.5.10"
|
||||
#define CNIC_MODULE_RELDATE "March 21, 2012"
|
||||
|
||||
#define CNIC_ULP_RDMA 0
|
||||
#define CNIC_ULP_ISCSI 1
|
||||
|
@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
|
||||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define TG3_MAJ_NUM 3
|
||||
#define TG3_MIN_NUM 122
|
||||
#define TG3_MIN_NUM 123
|
||||
#define DRV_MODULE_VERSION \
|
||||
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
|
||||
#define DRV_MODULE_RELDATE "December 7, 2011"
|
||||
#define DRV_MODULE_RELDATE "March 21, 2012"
|
||||
|
||||
#define RESET_KIND_SHUTDOWN 0
|
||||
#define RESET_KIND_INIT 1
|
||||
@ -5953,8 +5953,10 @@ next_pkt_nopost:
|
||||
tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
|
||||
tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
|
||||
|
||||
if (tnapi != &tp->napi[1])
|
||||
if (tnapi != &tp->napi[1]) {
|
||||
tp->rx_refill = true;
|
||||
napi_schedule(&tp->napi[1].napi);
|
||||
}
|
||||
}
|
||||
|
||||
return received;
|
||||
@ -6134,6 +6136,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
|
||||
u32 std_prod_idx = dpr->rx_std_prod_idx;
|
||||
u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
|
||||
|
||||
tp->rx_refill = false;
|
||||
for (i = 1; i < tp->irq_cnt; i++)
|
||||
err |= tg3_rx_prodring_xfer(tp, dpr,
|
||||
&tp->napi[i].prodring);
|
||||
@ -6197,9 +6200,25 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
|
||||
/* check for RX/TX work to do */
|
||||
if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
|
||||
*(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
|
||||
|
||||
/* This test here is not race free, but will reduce
|
||||
* the number of interrupts by looping again.
|
||||
*/
|
||||
if (tnapi == &tp->napi[1] && tp->rx_refill)
|
||||
continue;
|
||||
|
||||
napi_complete(napi);
|
||||
/* Reenable interrupts. */
|
||||
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
|
||||
|
||||
/* This test here is synchronized by napi_schedule()
|
||||
* and napi_complete() to close the race condition.
|
||||
*/
|
||||
if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
|
||||
tw32(HOSTCC_MODE, tp->coalesce_mode |
|
||||
HOSTCC_MODE_ENABLE |
|
||||
tnapi->coal_now);
|
||||
}
|
||||
mmiowb();
|
||||
break;
|
||||
}
|
||||
|
@ -3007,6 +3007,7 @@ struct tg3 {
|
||||
u32 rx_std_max_post;
|
||||
u32 rx_offset;
|
||||
u32 rx_pkt_map_sz;
|
||||
bool rx_refill;
|
||||
|
||||
|
||||
/* begin "everything else" cacheline(s) section */
|
||||
|
@ -95,6 +95,10 @@ static int disable_msi = 0;
|
||||
module_param(disable_msi, int, 0);
|
||||
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
|
||||
|
||||
static int legacy_pme = 0;
|
||||
module_param(legacy_pme, int, 0);
|
||||
MODULE_PARM_DESC(legacy_pme, "Legacy power management");
|
||||
|
||||
static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
|
||||
@ -867,6 +871,13 @@ static void sky2_wol_init(struct sky2_port *sky2)
|
||||
/* Disable PiG firmware */
|
||||
sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
|
||||
|
||||
/* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */
|
||||
if (legacy_pme) {
|
||||
u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
|
||||
reg1 |= PCI_Y2_PME_LEGACY;
|
||||
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
|
||||
}
|
||||
|
||||
/* block receiver */
|
||||
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
|
||||
sky2_read32(hw, B0_CTST);
|
||||
|
@ -493,6 +493,7 @@ block:
|
||||
if (netif_running (dev->net) &&
|
||||
!test_bit (EVENT_RX_HALT, &dev->flags)) {
|
||||
rx_submit (dev, urb, GFP_ATOMIC);
|
||||
usb_mark_last_busy(dev->udev);
|
||||
return;
|
||||
}
|
||||
usb_free_urb (urb);
|
||||
@ -589,6 +590,14 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
|
||||
entry = (struct skb_data *) skb->cb;
|
||||
urb = entry->urb;
|
||||
|
||||
/*
|
||||
* Get reference count of the URB to avoid it to be
|
||||
* freed during usb_unlink_urb, which may trigger
|
||||
* use-after-free problem inside usb_unlink_urb since
|
||||
* usb_unlink_urb is always racing with .complete
|
||||
* handler(include defer_bh).
|
||||
*/
|
||||
usb_get_urb(urb);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
// during some PM-driven resume scenarios,
|
||||
// these (async) unlinks complete immediately
|
||||
@ -597,6 +606,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
|
||||
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
|
||||
else
|
||||
count++;
|
||||
usb_put_urb(urb);
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore (&q->lock, flags);
|
||||
@ -1028,7 +1038,6 @@ static void tx_complete (struct urb *urb)
|
||||
}
|
||||
|
||||
usb_autopm_put_interface_async(dev->intf);
|
||||
urb->dev = NULL;
|
||||
entry->state = tx_done;
|
||||
defer_bh(dev, skb, &dev->txq);
|
||||
}
|
||||
|
@ -47,6 +47,7 @@
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR (0x81)
|
||||
|
||||
/* CQE type */
|
||||
#define FCOE_PENDING_CQE_TYPE 0
|
||||
|
@ -122,6 +122,7 @@
|
||||
#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
|
||||
|
||||
#define ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY (0x80)
|
||||
#define ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR (0x81)
|
||||
|
||||
/* SQ/RQ/CQ DB structure sizes */
|
||||
#define ISCSI_SQ_DB_SIZE (16)
|
||||
|
@ -3560,7 +3560,8 @@ EXPORT_SYMBOL(napi_gro_receive);
|
||||
static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
|
||||
{
|
||||
__skb_pull(skb, skb_headlen(skb));
|
||||
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
|
||||
/* restore the reserve we had after netdev_alloc_skb_ip_align() */
|
||||
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
|
||||
skb->vlan_tci = 0;
|
||||
skb->dev = napi->dev;
|
||||
skb->skb_iif = 0;
|
||||
|
@ -1079,6 +1079,7 @@ __be32 inet_confirm_addr(struct in_device *in_dev,
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_confirm_addr);
|
||||
|
||||
/*
|
||||
* Device notifier
|
||||
|
@ -52,7 +52,7 @@ iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
|
||||
static struct nf_hook_ops *filter_ops __read_mostly;
|
||||
|
||||
/* Default to forward because I got too much mail already. */
|
||||
static bool forward = NF_ACCEPT;
|
||||
static bool forward = true;
|
||||
module_param(forward, bool, 0000);
|
||||
|
||||
static int __net_init iptable_filter_net_init(struct net *net)
|
||||
@ -64,7 +64,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
|
||||
return -ENOMEM;
|
||||
/* Entry 1 is the FORWARD hook */
|
||||
((struct ipt_standard *)repl->entries)[1].target.verdict =
|
||||
-forward - 1;
|
||||
forward ? -NF_ACCEPT - 1 : -NF_DROP - 1;
|
||||
|
||||
net->ipv4.iptable_filter =
|
||||
ipt_register_table(net, &packet_filter, repl);
|
||||
@ -88,11 +88,6 @@ static int __init iptable_filter_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (forward < 0 || forward > NF_MAX_VERDICT) {
|
||||
pr_err("iptables forward must be 0 or 1\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&iptable_filter_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -44,7 +44,7 @@ ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
|
||||
static struct nf_hook_ops *filter_ops __read_mostly;
|
||||
|
||||
/* Default to forward because I got too much mail already. */
|
||||
static bool forward = NF_ACCEPT;
|
||||
static bool forward = true;
|
||||
module_param(forward, bool, 0000);
|
||||
|
||||
static int __net_init ip6table_filter_net_init(struct net *net)
|
||||
@ -56,7 +56,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
|
||||
return -ENOMEM;
|
||||
/* Entry 1 is the FORWARD hook */
|
||||
((struct ip6t_standard *)repl->entries)[1].target.verdict =
|
||||
-forward - 1;
|
||||
forward ? -NF_ACCEPT - 1 : -NF_DROP - 1;
|
||||
|
||||
net->ipv6.ip6table_filter =
|
||||
ip6t_register_table(net, &packet_filter, repl);
|
||||
@ -80,11 +80,6 @@ static int __init ip6table_filter_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (forward < 0 || forward > NF_MAX_VERDICT) {
|
||||
pr_err("iptables forward must be 0 or 1\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_filter_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1845,3 +1845,4 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
|
||||
MODULE_DESCRIPTION("PPP over L2TP over UDP");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
|
||||
MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
|
||||
|
@ -597,7 +597,7 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap,
|
||||
iter = iter->next;
|
||||
iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE;
|
||||
}
|
||||
ret_val = netlbl_secattr_catmap_setbit(iter, spot, GFP_ATOMIC);
|
||||
ret_val = netlbl_secattr_catmap_setbit(iter, spot, flags);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
|
@ -749,7 +749,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
|
||||
int ret;
|
||||
|
||||
/* XXX too lazy? */
|
||||
ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL);
|
||||
ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
|
||||
if (!ic)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -694,7 +694,7 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
|
||||
unsigned long flags;
|
||||
|
||||
/* XXX too lazy? */
|
||||
ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL);
|
||||
ic = kzalloc(sizeof(struct rds_iw_connection), gfp);
|
||||
if (!ic)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -121,7 +121,7 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
|
||||
struct rds_loop_connection *lc;
|
||||
unsigned long flags;
|
||||
|
||||
lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL);
|
||||
lc = kzalloc(sizeof(struct rds_loop_connection), gfp);
|
||||
if (!lc)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
static int xfrm_output2(struct sk_buff *skb);
|
||||
|
||||
static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
|
||||
static int xfrm_skb_check_space(struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
|
||||
@ -48,7 +48,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
|
||||
goto resume;
|
||||
|
||||
do {
|
||||
err = xfrm_state_check_space(x, skb);
|
||||
err = xfrm_skb_check_space(skb);
|
||||
if (err) {
|
||||
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
|
||||
goto error_nolock;
|
||||
|
@ -167,7 +167,7 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
|
||||
}
|
||||
|
||||
if (xfrm_aevent_is_on(xs_net(x)))
|
||||
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
|
||||
x->repl->notify(x, XFRM_REPLAY_UPDATE);
|
||||
}
|
||||
|
||||
static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
|
||||
@ -279,7 +279,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
|
||||
replay_esn->bmp[nr] |= (1U << bitnr);
|
||||
|
||||
if (xfrm_aevent_is_on(xs_net(x)))
|
||||
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
|
||||
x->repl->notify(x, XFRM_REPLAY_UPDATE);
|
||||
}
|
||||
|
||||
static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
|
||||
@ -473,7 +473,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
|
||||
replay_esn->bmp[nr] |= (1U << bitnr);
|
||||
|
||||
if (xfrm_aevent_is_on(xs_net(x)))
|
||||
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
|
||||
x->repl->notify(x, XFRM_REPLAY_UPDATE);
|
||||
}
|
||||
|
||||
static struct xfrm_replay xfrm_replay_legacy = {
|
||||
|
Loading…
Reference in New Issue
Block a user