mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 17:53:56 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix NAPI poll list corruption in enic driver, from Christian Lamparter. 2) Fix route use after free, from Eric Dumazet. 3) Fix regression in reuseaddr handling, from Josef Bacik. 4) Assert the size of control messages in compat handling since we copy it in from userspace twice. From Meng Xu. 5) SMC layer bug fixes (missing RCU locking, bad refcounting, etc.) from Ursula Braun. 6) Fix races in AF_PACKET fanout handling, from Willem de Bruijn. 7) Don't use ARRAY_SIZE on spinlock array which might have zero entries, from Geert Uytterhoeven. 8) Fix miscomputation of checksum in ipv6 udp code, from Subash Abhinov Kasiviswanathan. 9) Push the ipv6 header properly in ipv6 GRE tunnel driver, from Xin Long. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (75 commits) inet: fix improper empty comparison net: use inet6_rcv_saddr to compare sockets net: set tb->fast_sk_family net: orphan frags on stand-alone ptype in dev_queue_xmit_nit MAINTAINERS: update git tree locations for ieee802154 subsystem net: prevent dst uses after free net: phy: Fix truncation of large IRQ numbers in phy_attached_print() net/smc: no close wait in case of process shut down net/smc: introduce a delay net/smc: terminate link group if out-of-sync is received net/smc: longer delay for client link group removal net/smc: adapt send request completion notification net/smc: adjust net_device refcount net/smc: take RCU read lock for routing cache lookup net/smc: add receive timeout check net/smc: add missing dev_put net: stmmac: Cocci spatch "of_table" lan78xx: Use default values loaded from EEPROM/OTP after reset lan78xx: Allow EEPROM write for less than MAX_EEPROM_SIZE lan78xx: Fix for eeprom read/write when device auto suspend ...
This commit is contained in:
commit
71aa60f67f
@ -1680,6 +1680,9 @@ accept_dad - INTEGER
|
||||
2: Enable DAD, and disable IPv6 operation if MAC-based duplicate
|
||||
link-local address has been found.
|
||||
|
||||
DAD operation and mode on a given interface will be selected according
|
||||
to the maximum value of conf/{all,interface}/accept_dad.
|
||||
|
||||
force_tllao - BOOLEAN
|
||||
Enable sending the target link-layer address option even when
|
||||
responding to a unicast neighbor solicitation.
|
||||
@ -1727,16 +1730,23 @@ suppress_frag_ndisc - INTEGER
|
||||
|
||||
optimistic_dad - BOOLEAN
|
||||
Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
|
||||
0: disabled (default)
|
||||
1: enabled
|
||||
0: disabled (default)
|
||||
1: enabled
|
||||
|
||||
Optimistic Duplicate Address Detection for the interface will be enabled
|
||||
if at least one of conf/{all,interface}/optimistic_dad is set to 1,
|
||||
it will be disabled otherwise.
|
||||
|
||||
use_optimistic - BOOLEAN
|
||||
If enabled, do not classify optimistic addresses as deprecated during
|
||||
source address selection. Preferred addresses will still be chosen
|
||||
before optimistic addresses, subject to other ranking in the source
|
||||
address selection algorithm.
|
||||
0: disabled (default)
|
||||
1: enabled
|
||||
0: disabled (default)
|
||||
1: enabled
|
||||
|
||||
This will be enabled if at least one of
|
||||
conf/{all,interface}/use_optimistic is set to 1, disabled otherwise.
|
||||
|
||||
stable_secret - IPv6 address
|
||||
This IPv6 address will be used as a secret to generate IPv6
|
||||
|
@ -13,42 +13,42 @@ an example setup using a data-center-class switch ASIC chip. Other setups
|
||||
with SR-IOV or soft switches, such as OVS, are possible.
|
||||
|
||||
|
||||
User-space tools
|
||||
User-space tools
|
||||
|
||||
user space |
|
||||
+-------------------------------------------------------------------+
|
||||
kernel | Netlink
|
||||
|
|
||||
+--------------+-------------------------------+
|
||||
| Network stack |
|
||||
| (Linux) |
|
||||
| |
|
||||
+----------------------------------------------+
|
||||
user space |
|
||||
+-------------------------------------------------------------------+
|
||||
kernel | Netlink
|
||||
|
|
||||
+--------------+-------------------------------+
|
||||
| Network stack |
|
||||
| (Linux) |
|
||||
| |
|
||||
+----------------------------------------------+
|
||||
|
||||
sw1p2 sw1p4 sw1p6
|
||||
sw1p1 + sw1p3 + sw1p5 + eth1
|
||||
+ | + | + | +
|
||||
| | | | | | |
|
||||
+--+----+----+----+-+--+----+---+ +-----+-----+
|
||||
| Switch driver | | mgmt |
|
||||
| (this document) | | driver |
|
||||
| | | |
|
||||
+--------------+----------------+ +-----------+
|
||||
|
|
||||
kernel | HW bus (eg PCI)
|
||||
+-------------------------------------------------------------------+
|
||||
hardware |
|
||||
+--------------+---+------------+
|
||||
| Switch device (sw1) |
|
||||
| +----+ +--------+
|
||||
| | v offloaded data path | mgmt port
|
||||
| | | |
|
||||
+--|----|----+----+----+----+---+
|
||||
| | | | | |
|
||||
+ + + + + +
|
||||
p1 p2 p3 p4 p5 p6
|
||||
sw1p1 + sw1p3 + sw1p5 + eth1
|
||||
+ | + | + | +
|
||||
| | | | | | |
|
||||
+--+----+----+----+----+----+---+ +-----+-----+
|
||||
| Switch driver | | mgmt |
|
||||
| (this document) | | driver |
|
||||
| | | |
|
||||
+--------------+----------------+ +-----------+
|
||||
|
|
||||
kernel | HW bus (eg PCI)
|
||||
+-------------------------------------------------------------------+
|
||||
hardware |
|
||||
+--------------+----------------+
|
||||
| Switch device (sw1) |
|
||||
| +----+ +--------+
|
||||
| | v offloaded data path | mgmt port
|
||||
| | | |
|
||||
+--|----|----+----+----+----+---+
|
||||
| | | | | |
|
||||
+ + + + + +
|
||||
p1 p2 p3 p4 p5 p6
|
||||
|
||||
front-panel ports
|
||||
front-panel ports
|
||||
|
||||
|
||||
Fig 1.
|
||||
|
@ -2865,7 +2865,6 @@ S: Supported
|
||||
F: drivers/scsi/bnx2i/
|
||||
|
||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||
M: Yuval Mintz <Yuval.Mintz@cavium.com>
|
||||
M: Ariel Elior <ariel.elior@cavium.com>
|
||||
M: everest-linux-l2@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
@ -6655,8 +6654,8 @@ M: Alexander Aring <alex.aring@gmail.com>
|
||||
M: Stefan Schmidt <stefan@osg.samsung.com>
|
||||
L: linux-wpan@vger.kernel.org
|
||||
W: http://wpan.cakelab.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
|
||||
S: Maintained
|
||||
F: net/ieee802154/
|
||||
F: net/mac802154/
|
||||
@ -11059,7 +11058,6 @@ S: Supported
|
||||
F: drivers/scsi/qedi/
|
||||
|
||||
QLOGIC QL4xxx ETHERNET DRIVER
|
||||
M: Yuval Mintz <Yuval.Mintz@cavium.com>
|
||||
M: Ariel Elior <Ariel.Elior@cavium.com>
|
||||
M: everest-linux-l2@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
|
||||
isdn_net_local *lp;
|
||||
struct ippp_struct *is;
|
||||
int proto;
|
||||
unsigned char protobuf[4];
|
||||
|
||||
is = file->private_data;
|
||||
|
||||
@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
|
||||
if (!lp)
|
||||
printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
|
||||
else {
|
||||
/*
|
||||
* Don't reset huptimer for
|
||||
* LCP packets. (Echo requests).
|
||||
*/
|
||||
if (copy_from_user(protobuf, buf, 4))
|
||||
return -EFAULT;
|
||||
proto = PPP_PROTOCOL(protobuf);
|
||||
if (proto != PPP_LCP)
|
||||
lp->huptimer = 0;
|
||||
if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
|
||||
unsigned char protobuf[4];
|
||||
/*
|
||||
* Don't reset huptimer for
|
||||
* LCP packets. (Echo requests).
|
||||
*/
|
||||
if (copy_from_user(protobuf, buf, 4))
|
||||
return -EFAULT;
|
||||
|
||||
proto = PPP_PROTOCOL(protobuf);
|
||||
if (proto != PPP_LCP)
|
||||
lp->huptimer = 0;
|
||||
|
||||
if (lp->isdn_device < 0 || lp->isdn_channel < 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
|
||||
lp->dialstate == 0 &&
|
||||
(lp->flags & ISDN_NET_CONNECTED)) {
|
||||
unsigned short hl;
|
||||
struct sk_buff *skb;
|
||||
unsigned char *cpy_buf;
|
||||
/*
|
||||
* we need to reserve enough space in front of
|
||||
* sk_buff. old call to dev_alloc_skb only reserved
|
||||
@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
|
||||
return count;
|
||||
}
|
||||
skb_reserve(skb, hl);
|
||||
if (copy_from_user(skb_put(skb, count), buf, count))
|
||||
cpy_buf = skb_put(skb, count);
|
||||
if (copy_from_user(cpy_buf, buf, count))
|
||||
{
|
||||
kfree_skb(skb);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't reset huptimer for
|
||||
* LCP packets. (Echo requests).
|
||||
*/
|
||||
proto = PPP_PROTOCOL(cpy_buf);
|
||||
if (proto != PPP_LCP)
|
||||
lp->huptimer = 0;
|
||||
|
||||
if (is->debug & 0x40) {
|
||||
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
|
||||
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
|
||||
|
@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
|
||||
netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
|
||||
}
|
||||
|
||||
static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
|
||||
u64 *tx_bytes, u64 *tx_packets)
|
||||
{
|
||||
struct bcm_sysport_tx_ring *ring;
|
||||
u64 bytes = 0, packets = 0;
|
||||
unsigned int start;
|
||||
unsigned int q;
|
||||
|
||||
for (q = 0; q < priv->netdev->num_tx_queues; q++) {
|
||||
ring = &priv->tx_rings[q];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&priv->syncp);
|
||||
bytes = ring->bytes;
|
||||
packets = ring->packets;
|
||||
} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
|
||||
|
||||
*tx_bytes += bytes;
|
||||
*tx_packets += packets;
|
||||
}
|
||||
}
|
||||
|
||||
static void bcm_sysport_get_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev,
|
||||
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
|
||||
struct u64_stats_sync *syncp = &priv->syncp;
|
||||
struct bcm_sysport_tx_ring *ring;
|
||||
u64 tx_bytes = 0, tx_packets = 0;
|
||||
unsigned int start;
|
||||
int i, j;
|
||||
|
||||
if (netif_running(dev))
|
||||
if (netif_running(dev)) {
|
||||
bcm_sysport_update_mib_counters(priv);
|
||||
bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
|
||||
stats64->tx_bytes = tx_bytes;
|
||||
stats64->tx_packets = tx_packets;
|
||||
}
|
||||
|
||||
for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
|
||||
const struct bcm_sysport_stats *s;
|
||||
@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev,
|
||||
continue;
|
||||
p += s->stat_offset;
|
||||
|
||||
if (s->stat_sizeof == sizeof(u64))
|
||||
if (s->stat_sizeof == sizeof(u64) &&
|
||||
s->type == BCM_SYSPORT_STAT_NETDEV64) {
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(syncp);
|
||||
data[i] = *(u64 *)p;
|
||||
} while (u64_stats_fetch_retry_irq(syncp, start));
|
||||
else
|
||||
} else
|
||||
data[i] = *(u32 *)p;
|
||||
j++;
|
||||
}
|
||||
@ -1716,27 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
|
||||
{
|
||||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
|
||||
struct bcm_sysport_tx_ring *ring;
|
||||
u64 tx_packets = 0, tx_bytes = 0;
|
||||
unsigned int start;
|
||||
unsigned int q;
|
||||
|
||||
netdev_stats_to_stats64(stats, &dev->stats);
|
||||
|
||||
for (q = 0; q < dev->num_tx_queues; q++) {
|
||||
ring = &priv->tx_rings[q];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&priv->syncp);
|
||||
tx_bytes = ring->bytes;
|
||||
tx_packets = ring->packets;
|
||||
} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
|
||||
|
||||
stats->tx_bytes += tx_bytes;
|
||||
stats->tx_packets += tx_packets;
|
||||
}
|
||||
|
||||
stats64->tx_bytes = stats->tx_bytes;
|
||||
stats64->tx_packets = stats->tx_packets;
|
||||
bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
|
||||
&stats->tx_packets);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&priv->syncp);
|
||||
|
@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
|
||||
cls_flower->common.chain_index)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
|
||||
|
@ -374,8 +374,8 @@ struct bufdesc_ex {
|
||||
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
|
||||
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
|
||||
|
||||
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
|
||||
#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
|
||||
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
|
||||
#define FEC_NAPI_IMASK FEC_ENET_MII
|
||||
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
|
||||
|
||||
/* ENET interrupt coalescing macro define */
|
||||
|
@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
|
||||
if (int_events == 0)
|
||||
return false;
|
||||
|
||||
if (int_events & FEC_ENET_RXF)
|
||||
if (int_events & FEC_ENET_RXF_0)
|
||||
fep->work_rx |= (1 << 2);
|
||||
if (int_events & FEC_ENET_RXF_1)
|
||||
fep->work_rx |= (1 << 0);
|
||||
if (int_events & FEC_ENET_RXF_2)
|
||||
fep->work_rx |= (1 << 1);
|
||||
|
||||
if (int_events & FEC_ENET_TXF)
|
||||
if (int_events & FEC_ENET_TXF_0)
|
||||
fep->work_tx |= (1 << 2);
|
||||
if (int_events & FEC_ENET_TXF_1)
|
||||
fep->work_tx |= (1 << 0);
|
||||
@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
if (fep->ptp_clock)
|
||||
fec_ptp_check_pps_event(fep);
|
||||
|
||||
if (fec_ptp_check_pps_event(fep))
|
||||
ret = IRQ_HANDLED;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
|
||||
}
|
||||
|
||||
static int hnae3_match_n_instantiate(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev,
|
||||
bool is_reg, bool *matched)
|
||||
struct hnae3_ae_dev *ae_dev, bool is_reg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*matched = false;
|
||||
|
||||
/* check if this client matches the type of ae_dev */
|
||||
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
|
||||
hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
|
||||
return 0;
|
||||
}
|
||||
/* there is a match of client and dev */
|
||||
*matched = true;
|
||||
|
||||
/* now, (un-)instantiate client by calling lower layer */
|
||||
if (is_reg) {
|
||||
@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client)
|
||||
{
|
||||
struct hnae3_client *client_tmp;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
bool matched;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client)
|
||||
/* if the client could not be initialized on current port, for
|
||||
* any error reasons, move on to next available port
|
||||
*/
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched);
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed for port\n");
|
||||
@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client);
|
||||
void hnae3_unregister_client(struct hnae3_client *client)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
bool matched;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* un-initialize the client on every matched port */
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
hnae3_match_n_instantiate(client, ae_dev, false, &matched);
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
}
|
||||
|
||||
list_del(&client->node);
|
||||
@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
const struct pci_device_id *id;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
struct hnae3_client *client;
|
||||
bool matched;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
* initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true,
|
||||
&matched);
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed\n");
|
||||
if (matched)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
const struct pci_device_id *id;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
struct hnae3_client *client;
|
||||
bool matched;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* Check if there are matched ae_dev */
|
||||
@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
/* check the client list for the match with this ae_dev type and
|
||||
* un-initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
hnae3_match_n_instantiate(client, ae_dev, false,
|
||||
&matched);
|
||||
if (matched)
|
||||
break;
|
||||
}
|
||||
list_for_each_entry(client, &hnae3_client_list, node)
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
|
||||
ae_algo->ops->uninit_ae_dev(ae_dev);
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
const struct pci_device_id *id;
|
||||
struct hnae3_ae_algo *ae_algo;
|
||||
struct hnae3_client *client;
|
||||
bool matched;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
* initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true,
|
||||
&matched);
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed\n");
|
||||
if (matched)
|
||||
break;
|
||||
}
|
||||
|
||||
out_err:
|
||||
@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
const struct pci_device_id *id;
|
||||
struct hnae3_ae_algo *ae_algo;
|
||||
struct hnae3_client *client;
|
||||
bool matched;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* Check if there are matched ae_algo */
|
||||
@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
if (!id)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
hnae3_match_n_instantiate(client, ae_dev, false,
|
||||
&matched);
|
||||
if (matched)
|
||||
break;
|
||||
}
|
||||
list_for_each_entry(client, &hnae3_client_list, node)
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
|
||||
ae_algo->ops->uninit_ae_dev(ae_dev);
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
|
@ -49,7 +49,17 @@
|
||||
#define HNAE3_CLASS_NAME_SIZE 16
|
||||
|
||||
#define HNAE3_DEV_INITED_B 0x0
|
||||
#define HNAE_DEV_SUPPORT_ROCE_B 0x1
|
||||
#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
|
||||
#define HNAE3_DEV_SUPPORT_DCB_B 0x2
|
||||
|
||||
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
|
||||
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
|
||||
|
||||
#define hnae3_dev_roce_supported(hdev) \
|
||||
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
|
||||
|
||||
#define hnae3_dev_dcb_supported(hdev) \
|
||||
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
|
||||
|
||||
#define ring_ptr_move_fw(ring, p) \
|
||||
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
|
||||
@ -366,12 +376,12 @@ struct hnae3_ae_algo {
|
||||
struct hnae3_tc_info {
|
||||
u16 tqp_offset; /* TQP offset from base TQP */
|
||||
u16 tqp_count; /* Total TQPs */
|
||||
u8 up; /* user priority */
|
||||
u8 tc; /* TC index */
|
||||
bool enable; /* If this TC is enable or not */
|
||||
};
|
||||
|
||||
#define HNAE3_MAX_TC 8
|
||||
#define HNAE3_MAX_USER_PRIO 8
|
||||
struct hnae3_knic_private_info {
|
||||
struct net_device *netdev; /* Set by KNIC client when init instance */
|
||||
u16 rss_size; /* Allocated RSS queues */
|
||||
@ -379,6 +389,7 @@ struct hnae3_knic_private_info {
|
||||
u16 num_desc;
|
||||
|
||||
u8 num_tc; /* Total number of enabled TCs */
|
||||
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
|
||||
struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
|
||||
|
||||
u16 num_tqps; /* total number of TQPs in this handle */
|
||||
|
@ -238,7 +238,7 @@ struct hclge_tqp_map {
|
||||
u8 rsv[18];
|
||||
};
|
||||
|
||||
#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11
|
||||
#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10
|
||||
|
||||
enum hclge_int_type {
|
||||
HCLGE_INT_TX,
|
||||
@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain {
|
||||
#define HCLGE_INT_TYPE_S 0
|
||||
#define HCLGE_INT_TYPE_M 0x3
|
||||
#define HCLGE_TQP_ID_S 2
|
||||
#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S)
|
||||
#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S)
|
||||
#define HCLGE_INT_GL_IDX_S 13
|
||||
#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S)
|
||||
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
|
||||
u8 vfid;
|
||||
u8 rsv;
|
||||
};
|
||||
|
||||
#define HCLGE_TC_NUM 8
|
||||
@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc {
|
||||
|
||||
struct hclge_rx_priv_buff {
|
||||
__le16 buf_num[HCLGE_TC_NUM];
|
||||
u8 rsv[8];
|
||||
__le16 shared_buf;
|
||||
u8 rsv[6];
|
||||
};
|
||||
|
||||
struct hclge_query_version {
|
||||
@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue {
|
||||
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
|
||||
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
|
||||
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
|
||||
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
|
||||
|
||||
#define HCLGE_TYPE_CRQ 0
|
||||
#define HCLGE_TYPE_CSQ 1
|
||||
|
@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
|
||||
/* Required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
||||
static const struct pci_device_id roce_pci_tbl[] = {
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
|
||||
/* Required last entry */
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
||||
@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
|
||||
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
|
||||
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
|
||||
|
||||
if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) {
|
||||
if (hnae3_dev_roce_supported(hdev)) {
|
||||
hdev->num_roce_msix =
|
||||
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
|
||||
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
|
||||
@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev)
|
||||
hdev->base_tqp_pid = 0;
|
||||
hdev->rss_size_max = 1;
|
||||
hdev->rx_buf_len = cfg.rx_buf_len;
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i];
|
||||
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
|
||||
hdev->hw.mac.media_type = cfg.media_type;
|
||||
hdev->hw.mac.phy_addr = cfg.phy_addr;
|
||||
hdev->num_desc = cfg.tqp_desc_num;
|
||||
hdev->tm_info.num_pg = 1;
|
||||
hdev->tm_info.num_tc = cfg.tc_num;
|
||||
@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
|
||||
tc_num = hclge_get_tc_num(hdev);
|
||||
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
|
||||
|
||||
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
|
||||
if (hnae3_dev_dcb_supported(hdev))
|
||||
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
|
||||
else
|
||||
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
|
||||
|
||||
shared_buf_tc = pfc_enable_num * hdev->mps +
|
||||
(tc_num - pfc_enable_num) * hdev->mps / 2 +
|
||||
hdev->mps;
|
||||
@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
|
||||
struct hclge_priv_buf *priv;
|
||||
int i;
|
||||
|
||||
/* When DCB is not supported, rx private
|
||||
* buffer is not allocated.
|
||||
*/
|
||||
if (!hnae3_dev_dcb_supported(hdev)) {
|
||||
if (!hclge_is_rx_buf_ok(hdev, rx_all))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* step 1, try to alloc private buffer for all enabled tc */
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
priv = &hdev->priv_buf[i];
|
||||
@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
|
||||
priv->wl.high = 2 * hdev->mps;
|
||||
priv->buf_size = priv->wl.high;
|
||||
}
|
||||
} else {
|
||||
priv->enable = 0;
|
||||
priv->wl.low = 0;
|
||||
priv->wl.high = 0;
|
||||
priv->buf_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
priv = &hdev->priv_buf[i];
|
||||
|
||||
if (hdev->hw_tc_map & BIT(i))
|
||||
priv->enable = 1;
|
||||
priv->enable = 0;
|
||||
priv->wl.low = 0;
|
||||
priv->wl.high = 0;
|
||||
priv->buf_size = 0;
|
||||
|
||||
if (!(hdev->hw_tc_map & BIT(i)))
|
||||
continue;
|
||||
|
||||
priv->enable = 1;
|
||||
|
||||
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
|
||||
priv->wl.low = 128;
|
||||
@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
|
||||
cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
|
||||
}
|
||||
|
||||
req->shared_buf =
|
||||
cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
|
||||
(1 << HCLGE_TC0_PRI_BUF_EN_B));
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_rx_priv_wl_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could not configure rx private waterline %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (hnae3_dev_dcb_supported(hdev)) {
|
||||
ret = hclge_rx_priv_wl_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could not configure rx private waterline %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_common_thrd_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could not configure common threshold %d\n", ret);
|
||||
return ret;
|
||||
ret = hclge_common_thrd_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could not configure common threshold %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hclge_common_wl_config(hdev);
|
||||
@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
u16 tc_valid[HCLGE_MAX_TC_NUM];
|
||||
u16 tc_size[HCLGE_MAX_TC_NUM];
|
||||
u32 *rss_indir = NULL;
|
||||
u16 rss_size = 0, roundup_size;
|
||||
const u8 *key;
|
||||
int i, ret, j;
|
||||
|
||||
@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
|
||||
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
|
||||
vport[j].rss_indirection_tbl[i] =
|
||||
i % hdev->rss_size_max;
|
||||
i % vport[j].alloc_rss_size;
|
||||
|
||||
/* vport 0 is for PF */
|
||||
if (j != 0)
|
||||
continue;
|
||||
|
||||
rss_size = vport[j].alloc_rss_size;
|
||||
rss_indir[i] = vport[j].rss_indirection_tbl[i];
|
||||
}
|
||||
}
|
||||
@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
if (hdev->hw_tc_map & BIT(i))
|
||||
tc_valid[i] = 1;
|
||||
else
|
||||
tc_valid[i] = 0;
|
||||
|
||||
switch (hdev->rss_size_max) {
|
||||
case HCLGE_RSS_TC_SIZE_0:
|
||||
tc_size[i] = 0;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_1:
|
||||
tc_size[i] = 1;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_2:
|
||||
tc_size[i] = 2;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_3:
|
||||
tc_size[i] = 3;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_4:
|
||||
tc_size[i] = 4;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_5:
|
||||
tc_size[i] = 5;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_6:
|
||||
tc_size[i] = 6;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_7:
|
||||
tc_size[i] = 7;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
tc_offset[i] = hdev->rss_size_max * i;
|
||||
/* Each TC have the same queue size, and tc_size set to hardware is
|
||||
* the log2 of roundup power of two of rss_size, the acutal queue
|
||||
* size is limited by indirection table.
|
||||
*/
|
||||
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Configure rss tc size failed, invalid TC_SIZE = %d\n",
|
||||
rss_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
roundup_size = roundup_pow_of_two(rss_size);
|
||||
roundup_size = ilog2(roundup_size);
|
||||
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
tc_valid[i] = 0;
|
||||
|
||||
if (!(hdev->hw_tc_map & BIT(i)))
|
||||
continue;
|
||||
|
||||
tc_valid[i] = 1;
|
||||
tc_size[i] = roundup_size;
|
||||
tc_offset[i] = rss_size * i;
|
||||
}
|
||||
|
||||
ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
|
||||
|
||||
err:
|
||||
@ -2679,7 +2699,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
|
||||
HCLGE_TQP_ID_S, node->tqp_index);
|
||||
hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
|
||||
req->vfid = vport->vport_id;
|
||||
|
||||
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
|
||||
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
|
||||
@ -2763,8 +2787,12 @@ static int hclge_unmap_ring_from_vector(
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
|
||||
HCLGE_TQP_ID_S, node->tqp_index);
|
||||
hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
|
||||
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
|
||||
req->vfid = vport->vport_id;
|
||||
|
||||
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
|
||||
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
|
||||
@ -2778,7 +2806,7 @@ static int hclge_unmap_ring_from_vector(
|
||||
}
|
||||
i = 0;
|
||||
hclge_cmd_setup_basic_desc(&desc,
|
||||
HCLGE_OPC_ADD_RING_TO_VECTOR,
|
||||
HCLGE_OPC_DEL_RING_TO_VECTOR,
|
||||
false);
|
||||
req->int_vector_id = vector_id;
|
||||
}
|
||||
@ -3665,6 +3693,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_VLAN_TYPE_VF_TABLE 0
|
||||
#define HCLGE_VLAN_TYPE_PORT_TABLE 1
|
||||
struct hnae3_handle *handle;
|
||||
int ret;
|
||||
|
||||
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
|
||||
@ -3674,8 +3703,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
|
||||
|
||||
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
handle = &hdev->vport[0].nic;
|
||||
return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
|
||||
}
|
||||
|
||||
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
|
||||
@ -3920,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
|
||||
goto err;
|
||||
|
||||
if (hdev->roce_client &&
|
||||
hnae_get_bit(hdev->ae_dev->flag,
|
||||
HNAE_DEV_SUPPORT_ROCE_B)) {
|
||||
hnae3_dev_roce_supported(hdev)) {
|
||||
struct hnae3_client *rc = hdev->roce_client;
|
||||
|
||||
ret = hclge_init_roce_base_info(vport);
|
||||
@ -3944,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
|
||||
|
||||
break;
|
||||
case HNAE3_CLIENT_ROCE:
|
||||
if (hnae_get_bit(hdev->ae_dev->flag,
|
||||
HNAE_DEV_SUPPORT_ROCE_B)) {
|
||||
if (hnae3_dev_roce_supported(hdev)) {
|
||||
hdev->roce_client = client;
|
||||
vport->roce.client = client;
|
||||
}
|
||||
@ -4057,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
|
||||
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
{
|
||||
struct pci_dev *pdev = ae_dev->pdev;
|
||||
const struct pci_device_id *id;
|
||||
struct hclge_dev *hdev;
|
||||
int ret;
|
||||
|
||||
@ -4072,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
hdev->ae_dev = ae_dev;
|
||||
ae_dev->priv = hdev;
|
||||
|
||||
id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
|
||||
if (id)
|
||||
hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
|
||||
|
||||
ret = hclge_pci_init(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "PCI init failed\n");
|
||||
@ -4138,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_rss_init_hw(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_init_vlan_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
|
||||
@ -4156,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_rss_init_hw(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
setup_timer(&hdev->service_timer, hclge_service_timer,
|
||||
(unsigned long)hdev);
|
||||
INIT_WORK(&hdev->service_task, hclge_service_task);
|
||||
|
@ -176,7 +176,6 @@ struct hclge_pg_info {
|
||||
struct hclge_tc_info {
|
||||
u8 tc_id;
|
||||
u8 tc_sch_mode; /* 0: sp; 1: dwrr */
|
||||
u8 up;
|
||||
u8 pgid;
|
||||
u32 bw_limit;
|
||||
};
|
||||
@ -197,6 +196,7 @@ struct hclge_tm_info {
|
||||
u8 num_tc;
|
||||
u8 num_pg; /* It must be 1 if vNET-Base schd */
|
||||
u8 pg_dwrr[HCLGE_PG_NUM];
|
||||
u8 prio_tc[HNAE3_MAX_USER_PRIO];
|
||||
struct hclge_pg_info pg_info[HCLGE_PG_NUM];
|
||||
struct hclge_tc_info tc_info[HNAE3_MAX_TC];
|
||||
enum hclge_fc_mode fc_mode;
|
||||
@ -477,6 +477,7 @@ struct hclge_vport {
|
||||
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
|
||||
/* User configured lookup table entries */
|
||||
u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
|
||||
u16 alloc_rss_size;
|
||||
|
||||
u16 qs_offset;
|
||||
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
|
||||
|
@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
|
||||
{
|
||||
u8 tc;
|
||||
|
||||
for (tc = 0; tc < hdev->tm_info.num_tc; tc++)
|
||||
if (hdev->tm_info.tc_info[tc].up == pri_id)
|
||||
break;
|
||||
tc = hdev->tm_info.prio_tc[pri_id];
|
||||
|
||||
if (tc >= hdev->tm_info.num_tc)
|
||||
return -EINVAL;
|
||||
@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
|
||||
|
||||
for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) {
|
||||
for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
|
||||
ret = hclge_fill_pri_array(hdev, pri, pri_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
|
||||
|
||||
shap_cfg_cmd->pg_id = pg_id;
|
||||
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
|
||||
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
|
||||
|
||||
shap_cfg_cmd->pri_id = pri_id;
|
||||
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
|
||||
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
kinfo->num_tqps / kinfo->num_tc);
|
||||
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
|
||||
vport->dwrr = 100; /* 100 percent as init */
|
||||
vport->alloc_rss_size = kinfo->rss_size;
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
if (hdev->hw_tc_map & BIT(i)) {
|
||||
@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
|
||||
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
|
||||
kinfo->tc_info[i].tc = i;
|
||||
kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
|
||||
} else {
|
||||
/* Set to default queue if TC is disable */
|
||||
kinfo->tc_info[i].enable = false;
|
||||
kinfo->tc_info[i].tqp_offset = 0;
|
||||
kinfo->tc_info[i].tqp_count = 1;
|
||||
kinfo->tc_info[i].tc = 0;
|
||||
kinfo->tc_info[i].up = 0;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
|
||||
FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
|
||||
}
|
||||
|
||||
static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
|
||||
@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
hdev->tm_info.tc_info[i].tc_id = i;
|
||||
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
|
||||
hdev->tm_info.tc_info[i].up = i;
|
||||
hdev->tm_info.tc_info[i].pgid = 0;
|
||||
hdev->tm_info.tc_info[i].bw_limit =
|
||||
hdev->tm_info.pg_info[0].bw_limit;
|
||||
}
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
hdev->tm_info.prio_tc[i] =
|
||||
(i >= hdev->tm_info.num_tc) ? 0 : i;
|
||||
|
||||
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
|
||||
}
|
||||
|
||||
@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Only DCB-supported dev supports qset back pressure setting */
|
||||
if (!hnae3_dev_dcb_supported(hdev))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
ret = hclge_tm_qs_bp_cfg(hdev, i);
|
||||
if (ret)
|
||||
|
@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd {
|
||||
u32 rsvd1;
|
||||
};
|
||||
|
||||
#define hclge_tm_set_feild(dest, string, val) \
|
||||
#define hclge_tm_set_field(dest, string, val) \
|
||||
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
|
||||
(HCLGE_TM_SHAP_##string##_LSH), val)
|
||||
#define hclge_tm_get_feild(src, string) \
|
||||
#define hclge_tm_get_field(src, string) \
|
||||
hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
|
||||
(HCLGE_TM_SHAP_##string##_LSH))
|
||||
|
||||
|
@ -41,11 +41,16 @@ static struct hnae3_client client;
|
||||
static const struct pci_device_id hns3_pci_tbl[] = {
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
ae_dev->pdev = pdev;
|
||||
ae_dev->flag = ent->driver_data;
|
||||
ae_dev->dev_type = HNAE3_DEV_KNIC;
|
||||
pci_set_drvdata(pdev, ae_dev);
|
||||
|
||||
@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev)
|
||||
eth_hw_addr_random(netdev);
|
||||
dev_warn(priv->dev, "using random MAC address %pM\n",
|
||||
netdev->dev_addr);
|
||||
/* Also copy this new MAC address into hdev */
|
||||
if (h->ae_algo->ops->set_mac_addr)
|
||||
h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
|
||||
}
|
||||
|
||||
if (h->ae_algo->ops->set_mac_addr)
|
||||
h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
|
||||
|
||||
}
|
||||
|
||||
static void hns3_nic_set_priv_ops(struct net_device *netdev)
|
||||
|
@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
|
||||
unsigned long flags;
|
||||
|
||||
MAL_DBG2(mal, "poll(%d)" NL, budget);
|
||||
again:
|
||||
|
||||
/* Process TX skbs */
|
||||
list_for_each(l, &mal->poll_list) {
|
||||
struct mal_commac *mc =
|
||||
@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
|
||||
spin_lock_irqsave(&mal->lock, flags);
|
||||
mal_disable_eob_irq(mal);
|
||||
spin_unlock_irqrestore(&mal->lock, flags);
|
||||
goto again;
|
||||
}
|
||||
mc->ops->poll_tx(mc->dev);
|
||||
}
|
||||
|
@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data)
|
||||
static int emac_get_sset_count(struct net_device *netdev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
return 1;
|
||||
case ETH_SS_STATS:
|
||||
return EMAC_STATS_LEN;
|
||||
default:
|
||||
@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
|
||||
unsigned int i;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
strcpy(data, "single-pause-mode");
|
||||
break;
|
||||
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < EMAC_STATS_LEN; i++) {
|
||||
strlcpy(data, emac_ethtool_stat_strings[i],
|
||||
@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev)
|
||||
return EMAC_MAX_REG_SIZE * sizeof(u32);
|
||||
}
|
||||
|
||||
#define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0)
|
||||
|
||||
static int emac_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
{
|
||||
struct emac_adapter *adpt = netdev_priv(netdev);
|
||||
|
||||
adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE);
|
||||
|
||||
if (netif_running(netdev))
|
||||
return emac_reinit_locked(adpt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 emac_get_priv_flags(struct net_device *netdev)
|
||||
{
|
||||
struct emac_adapter *adpt = netdev_priv(netdev);
|
||||
|
||||
return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops emac_ethtool_ops = {
|
||||
.get_link_ksettings = phy_ethtool_get_link_ksettings,
|
||||
.set_link_ksettings = phy_ethtool_set_link_ksettings,
|
||||
@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = {
|
||||
|
||||
.get_regs_len = emac_get_regs_len,
|
||||
.get_regs = emac_get_regs,
|
||||
|
||||
.set_priv_flags = emac_set_priv_flags,
|
||||
.get_priv_flags = emac_get_priv_flags,
|
||||
};
|
||||
|
||||
void emac_set_ethtool_ops(struct net_device *netdev)
|
||||
|
@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt)
|
||||
mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
|
||||
DEBUG_MODE | SINGLE_PAUSE_MODE);
|
||||
|
||||
/* Enable single-pause-frame mode if requested.
|
||||
*
|
||||
* If enabled, the EMAC will send a single pause frame when the RX
|
||||
* queue is full. This normally leads to packet loss because
|
||||
* the pause frame disables the remote MAC only for 33ms (the quanta),
|
||||
* and then the remote MAC continues sending packets even though
|
||||
* the RX queue is still full.
|
||||
*
|
||||
* If disabled, the EMAC sends a pause frame every 31ms until the RX
|
||||
* queue is no longer full. Normally, this is the preferred
|
||||
* method of operation. However, when the system is hung (e.g.
|
||||
* cores are halted), the EMAC interrupt handler is never called
|
||||
* and so the RX queue fills up quickly and stays full. The resuling
|
||||
* non-stop "flood" of pause frames sometimes has the effect of
|
||||
* disabling nearby switches. In some cases, other nearby switches
|
||||
* are also affected, shutting down the entire network.
|
||||
*
|
||||
* The user can enable or disable single-pause-frame mode
|
||||
* via ethtool.
|
||||
*/
|
||||
mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
|
||||
|
||||
writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
|
||||
|
||||
writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
|
||||
|
@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt)
|
||||
|
||||
/* default to automatic flow control */
|
||||
adpt->automatic = true;
|
||||
|
||||
/* Disable single-pause-frame mode by default */
|
||||
adpt->single_pause_mode = false;
|
||||
}
|
||||
|
||||
/* Get the clock */
|
||||
|
@ -363,6 +363,9 @@ struct emac_adapter {
|
||||
bool tx_flow_control;
|
||||
bool rx_flow_control;
|
||||
|
||||
/* True == use single-pause-frame mode. */
|
||||
bool single_pause_mode;
|
||||
|
||||
/* Ring parameter */
|
||||
u8 tpd_burst;
|
||||
u8 rfd_burst;
|
||||
|
@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
|
||||
if (likely(RTL_R16(IntrStatus) & RxAckBits))
|
||||
work_done += rtl8139_rx(dev, tp, budget);
|
||||
|
||||
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||
if (work_done < budget) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tp->lock, flags);
|
||||
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
||||
if (napi_complete_done(napi, work_done))
|
||||
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
||||
spin_unlock_irqrestore(&tp->lock, flags);
|
||||
}
|
||||
spin_unlock(&tp->rx_lock);
|
||||
|
@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
|
||||
{ .compatible = "allwinner,sun8i-h3-emac" },
|
||||
{ .compatible = "allwinner,sun8i-v3s-emac" },
|
||||
{ .compatible = "allwinner,sun50i-a64-emac" },
|
||||
{},
|
||||
};
|
||||
|
||||
/* If phy-handle property is passed from DT, use it as the PHY */
|
||||
|
@ -150,6 +150,8 @@ struct netvsc_device_info {
|
||||
u32 num_chn;
|
||||
u32 send_sections;
|
||||
u32 recv_sections;
|
||||
u32 send_section_size;
|
||||
u32 recv_section_size;
|
||||
};
|
||||
|
||||
enum rndis_device_state {
|
||||
|
@ -76,9 +76,6 @@ static struct netvsc_device *alloc_net_device(void)
|
||||
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
|
||||
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
|
||||
|
||||
net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE;
|
||||
net_device->send_section_size = NETVSC_SEND_SECTION_SIZE;
|
||||
|
||||
init_completion(&net_device->channel_init_wait);
|
||||
init_waitqueue_head(&net_device->subchan_open);
|
||||
INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
|
||||
@ -262,7 +259,7 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
int ret = 0;
|
||||
|
||||
/* Get receive buffer area. */
|
||||
buf_size = device_info->recv_sections * net_device->recv_section_size;
|
||||
buf_size = device_info->recv_sections * device_info->recv_section_size;
|
||||
buf_size = roundup(buf_size, PAGE_SIZE);
|
||||
|
||||
net_device->recv_buf = vzalloc(buf_size);
|
||||
@ -344,7 +341,7 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
goto cleanup;
|
||||
|
||||
/* Now setup the send buffer. */
|
||||
buf_size = device_info->send_sections * net_device->send_section_size;
|
||||
buf_size = device_info->send_sections * device_info->send_section_size;
|
||||
buf_size = round_up(buf_size, PAGE_SIZE);
|
||||
|
||||
net_device->send_buf = vzalloc(buf_size);
|
||||
|
@ -848,7 +848,9 @@ static int netvsc_set_channels(struct net_device *net,
|
||||
device_info.num_chn = count;
|
||||
device_info.ring_size = ring_size;
|
||||
device_info.send_sections = nvdev->send_section_cnt;
|
||||
device_info.send_section_size = nvdev->send_section_size;
|
||||
device_info.recv_sections = nvdev->recv_section_cnt;
|
||||
device_info.recv_section_size = nvdev->recv_section_size;
|
||||
|
||||
rndis_filter_device_remove(dev, nvdev);
|
||||
|
||||
@ -963,7 +965,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
|
||||
device_info.ring_size = ring_size;
|
||||
device_info.num_chn = nvdev->num_chn;
|
||||
device_info.send_sections = nvdev->send_section_cnt;
|
||||
device_info.send_section_size = nvdev->send_section_size;
|
||||
device_info.recv_sections = nvdev->recv_section_cnt;
|
||||
device_info.recv_section_size = nvdev->recv_section_size;
|
||||
|
||||
rndis_filter_device_remove(hdev, nvdev);
|
||||
|
||||
@ -1485,7 +1489,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
|
||||
device_info.num_chn = nvdev->num_chn;
|
||||
device_info.ring_size = ring_size;
|
||||
device_info.send_sections = new_tx;
|
||||
device_info.send_section_size = nvdev->send_section_size;
|
||||
device_info.recv_sections = new_rx;
|
||||
device_info.recv_section_size = nvdev->recv_section_size;
|
||||
|
||||
netif_device_detach(ndev);
|
||||
was_opened = rndis_filter_opened(nvdev);
|
||||
@ -1934,7 +1940,9 @@ static int netvsc_probe(struct hv_device *dev,
|
||||
device_info.ring_size = ring_size;
|
||||
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
|
||||
device_info.send_sections = NETVSC_DEFAULT_TX;
|
||||
device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
|
||||
device_info.recv_sections = NETVSC_DEFAULT_RX;
|
||||
device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
|
||||
|
||||
nvdev = rndis_filter_device_add(dev, &device_info);
|
||||
if (IS_ERR(nvdev)) {
|
||||
|
@ -160,15 +160,6 @@ config MDIO_XGENE
|
||||
|
||||
endif
|
||||
|
||||
menuconfig PHYLIB
|
||||
tristate "PHY Device support and infrastructure"
|
||||
depends on NETDEVICES
|
||||
select MDIO_DEVICE
|
||||
help
|
||||
Ethernet controllers are usually attached to PHY
|
||||
devices. This option provides infrastructure for
|
||||
managing PHY devices.
|
||||
|
||||
config PHYLINK
|
||||
tristate
|
||||
depends on NETDEVICES
|
||||
@ -179,6 +170,15 @@ config PHYLINK
|
||||
configuration links, PHYs, and Serdes links with MAC level
|
||||
autonegotiation modes.
|
||||
|
||||
menuconfig PHYLIB
|
||||
tristate "PHY Device support and infrastructure"
|
||||
depends on NETDEVICES
|
||||
select MDIO_DEVICE
|
||||
help
|
||||
Ethernet controllers are usually attached to PHY
|
||||
devices. This option provides infrastructure for
|
||||
managing PHY devices.
|
||||
|
||||
if PHYLIB
|
||||
|
||||
config SWPHY
|
||||
|
@ -373,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
|
||||
cmd->base.port = PORT_BNC;
|
||||
else
|
||||
cmd->base.port = PORT_MII;
|
||||
|
||||
cmd->base.transceiver = phy_is_internal(phydev) ?
|
||||
XCVR_INTERNAL : XCVR_EXTERNAL;
|
||||
cmd->base.phy_address = phydev->mdio.addr;
|
||||
cmd->base.autoneg = phydev->autoneg;
|
||||
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
|
||||
|
@ -879,7 +879,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
|
||||
{
|
||||
const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
|
||||
char *irq_str;
|
||||
char irq_num[4];
|
||||
char irq_num[8];
|
||||
|
||||
switch(phydev->irq) {
|
||||
case PHY_POLL:
|
||||
|
@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
|
||||
priv->phy_drv->read_status(phydev);
|
||||
|
||||
val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
|
||||
val &= XILINX_GMII2RGMII_SPEED_MASK;
|
||||
val &= ~XILINX_GMII2RGMII_SPEED_MASK;
|
||||
|
||||
if (phydev->speed == SPEED_1000)
|
||||
val |= BMCR_SPEED1000;
|
||||
|
@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
|
||||
struct ethtool_eeprom *ee, u8 *data)
|
||||
{
|
||||
struct lan78xx_net *dev = netdev_priv(netdev);
|
||||
int ret;
|
||||
|
||||
ret = usb_autopm_get_interface(dev->intf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ee->magic = LAN78XX_EEPROM_MAGIC;
|
||||
|
||||
return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
|
||||
ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
|
||||
|
||||
usb_autopm_put_interface(dev->intf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
|
||||
struct ethtool_eeprom *ee, u8 *data)
|
||||
{
|
||||
struct lan78xx_net *dev = netdev_priv(netdev);
|
||||
int ret;
|
||||
|
||||
/* Allow entire eeprom update only */
|
||||
if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
|
||||
(ee->offset == 0) &&
|
||||
(ee->len == 512) &&
|
||||
(data[0] == EEPROM_INDICATOR))
|
||||
return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
|
||||
ret = usb_autopm_get_interface(dev->intf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
|
||||
* to load data from EEPROM
|
||||
*/
|
||||
if (ee->magic == LAN78XX_EEPROM_MAGIC)
|
||||
ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
|
||||
else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
|
||||
(ee->offset == 0) &&
|
||||
(ee->len == 512) &&
|
||||
(data[0] == OTP_INDICATOR_1))
|
||||
return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
|
||||
ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
|
||||
|
||||
return -EINVAL;
|
||||
usb_autopm_put_interface(dev->intf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
|
||||
@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
|
||||
/* LAN7801 only has RGMII mode */
|
||||
if (dev->chipid == ID_REV_CHIP_ID_7801_)
|
||||
buf &= ~MAC_CR_GMII_EN_;
|
||||
buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
|
||||
ret = lan78xx_write_reg(dev, MAC_CR, buf);
|
||||
|
||||
ret = lan78xx_read_reg(dev, MAC_TX, &buf);
|
||||
|
@ -272,6 +272,7 @@ struct trace_event_call {
|
||||
int perf_refcount;
|
||||
struct hlist_head __percpu *perf_events;
|
||||
struct bpf_prog *prog;
|
||||
struct perf_event *bpf_prog_owner;
|
||||
|
||||
int (*perf_perm)(struct trace_event_call *,
|
||||
struct perf_event *);
|
||||
|
@ -271,7 +271,7 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
|
||||
static inline struct dst_entry *dst_clone(struct dst_entry *dst)
|
||||
{
|
||||
if (dst)
|
||||
atomic_inc(&dst->__refcnt);
|
||||
dst_hold(dst);
|
||||
return dst;
|
||||
}
|
||||
|
||||
@ -311,21 +311,6 @@ static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb
|
||||
__skb_dst_copy(nskb, oskb->_skb_refdst);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_dst_force - makes sure skb dst is refcounted
|
||||
* @skb: buffer
|
||||
*
|
||||
* If dst is not yet refcounted, let's do it
|
||||
*/
|
||||
static inline void skb_dst_force(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_dst_is_noref(skb)) {
|
||||
WARN_ON(!rcu_read_lock_held());
|
||||
skb->_skb_refdst &= ~SKB_DST_NOREF;
|
||||
dst_clone(skb_dst(skb));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dst_hold_safe - Take a reference on a dst if possible
|
||||
* @dst: pointer to dst entry
|
||||
@ -339,16 +324,17 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_dst_force_safe - makes sure skb dst is refcounted
|
||||
* skb_dst_force - makes sure skb dst is refcounted
|
||||
* @skb: buffer
|
||||
*
|
||||
* If dst is not yet refcounted and not destroyed, grab a ref on it.
|
||||
*/
|
||||
static inline void skb_dst_force_safe(struct sk_buff *skb)
|
||||
static inline void skb_dst_force(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_dst_is_noref(skb)) {
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
|
||||
WARN_ON(!rcu_read_lock_held());
|
||||
if (!dst_hold_safe(dst))
|
||||
dst = NULL;
|
||||
|
||||
|
@ -190,7 +190,7 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
|
||||
rcu_read_lock();
|
||||
err = ip_route_input_noref(skb, dst, src, tos, devin);
|
||||
if (!err) {
|
||||
skb_dst_force_safe(skb);
|
||||
skb_dst_force(skb);
|
||||
if (!skb_dst(skb))
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
@ -856,7 +856,7 @@ void sk_stream_write_space(struct sock *sk);
|
||||
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
/* dont let skb dst not refcounted, we are going to leave rcu lock */
|
||||
skb_dst_force_safe(skb);
|
||||
skb_dst_force(skb);
|
||||
|
||||
if (!sk->sk_backlog.tail)
|
||||
sk->sk_backlog.head = skb;
|
||||
|
@ -544,7 +544,6 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
|
||||
int min_tso_segs);
|
||||
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
|
||||
int nonagle);
|
||||
bool tcp_may_send_now(struct sock *sk);
|
||||
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
|
||||
int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
|
||||
void tcp_retransmit_timer(struct sock *sk);
|
||||
|
@ -1753,6 +1753,8 @@ enum ethtool_reset_flags {
|
||||
* %ethtool_link_mode_bit_indices for the link modes, and other
|
||||
* link features that the link partner advertised through
|
||||
* autonegotiation; 0 if unknown or not applicable. Read-only.
|
||||
* @transceiver: Used to distinguish different possible PHY types,
|
||||
* reported consistently by PHYLIB. Read-only.
|
||||
*
|
||||
* If autonegotiation is disabled, the speed and @duplex represent the
|
||||
* fixed link mode and are writable if the driver supports multiple
|
||||
@ -1804,7 +1806,9 @@ struct ethtool_link_settings {
|
||||
__u8 eth_tp_mdix;
|
||||
__u8 eth_tp_mdix_ctrl;
|
||||
__s8 link_mode_masks_nwords;
|
||||
__u32 reserved[8];
|
||||
__u8 transceiver;
|
||||
__u8 reserved1[3];
|
||||
__u32 reserved[7];
|
||||
__u32 link_mode_masks[0];
|
||||
/* layout of link_mode_masks fields:
|
||||
* __u32 map_supported[link_mode_masks_nwords];
|
||||
|
@ -75,8 +75,8 @@ static u64 dev_map_bitmap_size(const union bpf_attr *attr)
|
||||
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_dtab *dtab;
|
||||
int err = -EINVAL;
|
||||
u64 cost;
|
||||
int err;
|
||||
|
||||
/* check sanity of attributes */
|
||||
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
||||
@ -108,6 +108,8 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
||||
if (err)
|
||||
goto free_dtab;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
/* A per cpu bitfield with a bit per possible net device */
|
||||
dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr),
|
||||
__alignof__(unsigned long));
|
||||
@ -128,7 +130,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
||||
free_dtab:
|
||||
free_percpu(dtab->flush_needed);
|
||||
kfree(dtab);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void dev_map_free(struct bpf_map *map)
|
||||
|
@ -186,15 +186,17 @@ static int bpf_map_alloc_id(struct bpf_map *map)
|
||||
|
||||
static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (do_idr_lock)
|
||||
spin_lock_bh(&map_idr_lock);
|
||||
spin_lock_irqsave(&map_idr_lock, flags);
|
||||
else
|
||||
__acquire(&map_idr_lock);
|
||||
|
||||
idr_remove(&map_idr, map->id);
|
||||
|
||||
if (do_idr_lock)
|
||||
spin_unlock_bh(&map_idr_lock);
|
||||
spin_unlock_irqrestore(&map_idr_lock, flags);
|
||||
else
|
||||
__release(&map_idr_lock);
|
||||
}
|
||||
|
@ -4205,7 +4205,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
}
|
||||
|
||||
if (insn->imm == BPF_FUNC_redirect_map) {
|
||||
u64 addr = (unsigned long)prog;
|
||||
/* Note, we cannot use prog directly as imm as subsequent
|
||||
* rewrites would still change the prog pointer. The only
|
||||
* stable address we can use is aux, which also works with
|
||||
* prog clones during blinding.
|
||||
*/
|
||||
u64 addr = (unsigned long)prog->aux;
|
||||
struct bpf_insn r4_ld[] = {
|
||||
BPF_LD_IMM64(BPF_REG_4, addr),
|
||||
*insn,
|
||||
|
@ -8171,6 +8171,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
|
||||
}
|
||||
}
|
||||
event->tp_event->prog = prog;
|
||||
event->tp_event->bpf_prog_owner = event;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -8185,7 +8186,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
|
||||
return;
|
||||
|
||||
prog = event->tp_event->prog;
|
||||
if (prog) {
|
||||
if (prog && event->tp_event->bpf_prog_owner == event) {
|
||||
event->tp_event->prog = NULL;
|
||||
bpf_prog_put(prog);
|
||||
}
|
||||
|
@ -735,9 +735,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
|
||||
* rhashtable_walk_start - Start a hash table walk
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Start a hash table walk. Note that we take the RCU lock in all
|
||||
* cases including when we return an error. So you must always call
|
||||
* rhashtable_walk_stop to clean up.
|
||||
* Start a hash table walk at the current iterator position. Note that we take
|
||||
* the RCU lock in all cases including when we return an error. So you must
|
||||
* always call rhashtable_walk_stop to clean up.
|
||||
*
|
||||
* Returns zero if successful.
|
||||
*
|
||||
@ -846,7 +846,8 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_next);
|
||||
* rhashtable_walk_stop - Finish a hash table walk
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Finish a hash table walk.
|
||||
* Finish a hash table walk. Does not reset the iterator to the start of the
|
||||
* hash table.
|
||||
*/
|
||||
void rhashtable_walk_stop(struct rhashtable_iter *iter)
|
||||
__releases(RCU)
|
||||
|
@ -185,6 +185,13 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
|
||||
ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
|
||||
}
|
||||
|
||||
/*
|
||||
* check the length of messages copied in is the same as the
|
||||
* what we get from the first loop
|
||||
*/
|
||||
if ((char *)kcmsg - (char *)kcmsg_base != kcmlen)
|
||||
goto Einval;
|
||||
|
||||
/* Ok, looks like we made it. Hook it up and return success. */
|
||||
kmsg->msg_control = kcmsg_base;
|
||||
kmsg->msg_controllen = kcmlen;
|
||||
|
@ -1948,8 +1948,12 @@ again:
|
||||
goto again;
|
||||
}
|
||||
out_unlock:
|
||||
if (pt_prev)
|
||||
pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
|
||||
if (pt_prev) {
|
||||
if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
|
||||
pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
|
||||
else
|
||||
kfree_skb(skb2);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
|
||||
@ -3892,6 +3896,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
|
||||
__skb_pull(skb, off);
|
||||
else if (off < 0)
|
||||
__skb_push(skb, -off);
|
||||
skb->mac_header += off;
|
||||
|
||||
switch (act) {
|
||||
case XDP_REDIRECT:
|
||||
|
@ -525,6 +525,8 @@ convert_link_ksettings_to_legacy_settings(
|
||||
= link_ksettings->base.eth_tp_mdix;
|
||||
legacy_settings->eth_tp_mdix_ctrl
|
||||
= link_ksettings->base.eth_tp_mdix_ctrl;
|
||||
legacy_settings->transceiver
|
||||
= link_ksettings->base.transceiver;
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -1794,7 +1794,7 @@ struct redirect_info {
|
||||
u32 flags;
|
||||
struct bpf_map *map;
|
||||
struct bpf_map *map_to_flush;
|
||||
const struct bpf_prog *map_owner;
|
||||
unsigned long map_owner;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct redirect_info, redirect_info);
|
||||
@ -2500,11 +2500,17 @@ void xdp_do_flush_map(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_do_flush_map);
|
||||
|
||||
static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
|
||||
unsigned long aux)
|
||||
{
|
||||
return (unsigned long)xdp_prog->aux != aux;
|
||||
}
|
||||
|
||||
static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
const struct bpf_prog *map_owner = ri->map_owner;
|
||||
unsigned long map_owner = ri->map_owner;
|
||||
struct bpf_map *map = ri->map;
|
||||
struct net_device *fwd = NULL;
|
||||
u32 index = ri->ifindex;
|
||||
@ -2512,9 +2518,9 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
||||
|
||||
ri->ifindex = 0;
|
||||
ri->map = NULL;
|
||||
ri->map_owner = NULL;
|
||||
ri->map_owner = 0;
|
||||
|
||||
if (unlikely(map_owner != xdp_prog)) {
|
||||
if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
|
||||
err = -EFAULT;
|
||||
map = NULL;
|
||||
goto err;
|
||||
@ -2574,7 +2580,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
const struct bpf_prog *map_owner = ri->map_owner;
|
||||
unsigned long map_owner = ri->map_owner;
|
||||
struct bpf_map *map = ri->map;
|
||||
struct net_device *fwd = NULL;
|
||||
u32 index = ri->ifindex;
|
||||
@ -2583,10 +2589,10 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
||||
|
||||
ri->ifindex = 0;
|
||||
ri->map = NULL;
|
||||
ri->map_owner = NULL;
|
||||
ri->map_owner = 0;
|
||||
|
||||
if (map) {
|
||||
if (unlikely(map_owner != xdp_prog)) {
|
||||
if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
|
||||
err = -EFAULT;
|
||||
map = NULL;
|
||||
goto err;
|
||||
@ -2632,7 +2638,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
|
||||
ri->ifindex = ifindex;
|
||||
ri->flags = flags;
|
||||
ri->map = NULL;
|
||||
ri->map_owner = NULL;
|
||||
ri->map_owner = 0;
|
||||
|
||||
return XDP_REDIRECT;
|
||||
}
|
||||
@ -2646,7 +2652,7 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
|
||||
};
|
||||
|
||||
BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
|
||||
const struct bpf_prog *, map_owner)
|
||||
unsigned long, map_owner)
|
||||
{
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
|
||||
|
@ -266,7 +266,7 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (tb->fast_sk_family == AF_INET6)
|
||||
return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
|
||||
&sk->sk_v6_rcv_saddr,
|
||||
inet6_rcv_saddr(sk),
|
||||
tb->fast_rcv_saddr,
|
||||
sk->sk_rcv_saddr,
|
||||
tb->fast_ipv6_only,
|
||||
@ -321,13 +321,14 @@ tb_found:
|
||||
goto fail_unlock;
|
||||
}
|
||||
success:
|
||||
if (!hlist_empty(&tb->owners)) {
|
||||
if (hlist_empty(&tb->owners)) {
|
||||
tb->fastreuse = reuse;
|
||||
if (sk->sk_reuseport) {
|
||||
tb->fastreuseport = FASTREUSEPORT_ANY;
|
||||
tb->fastuid = uid;
|
||||
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
|
||||
tb->fast_ipv6_only = ipv6_only_sock(sk);
|
||||
tb->fast_sk_family = sk->sk_family;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
|
||||
#endif
|
||||
@ -354,6 +355,7 @@ success:
|
||||
tb->fastuid = uid;
|
||||
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
|
||||
tb->fast_ipv6_only = ipv6_only_sock(sk);
|
||||
tb->fast_sk_family = sk->sk_family;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
|
||||
#endif
|
||||
|
@ -1806,40 +1806,6 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
|
||||
return !after(end_seq, tcp_wnd_end(tp));
|
||||
}
|
||||
|
||||
/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
|
||||
* should be put on the wire right now. If so, it returns the number of
|
||||
* packets allowed by the congestion window.
|
||||
*/
|
||||
static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int cur_mss, int nonagle)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
unsigned int cwnd_quota;
|
||||
|
||||
tcp_init_tso_segs(skb, cur_mss);
|
||||
|
||||
if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
|
||||
return 0;
|
||||
|
||||
cwnd_quota = tcp_cwnd_test(tp, skb);
|
||||
if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
|
||||
cwnd_quota = 0;
|
||||
|
||||
return cwnd_quota;
|
||||
}
|
||||
|
||||
/* Test if sending is allowed right now. */
|
||||
bool tcp_may_send_now(struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb = tcp_send_head(sk);
|
||||
|
||||
return skb &&
|
||||
tcp_snd_test(sk, skb, tcp_current_mss(sk),
|
||||
(tcp_skb_is_last(sk, skb) ?
|
||||
tp->nonagle : TCP_NAGLE_PUSH));
|
||||
}
|
||||
|
||||
/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
|
||||
* which is put after SKB on the list. It is very much like
|
||||
* tcp_fragment() except that it may make several kinds of assumptions
|
||||
@ -3423,6 +3389,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* data was not sent, this is our new send_head */
|
||||
sk->sk_send_head = syn_data;
|
||||
tp->packets_out -= tcp_skb_pcount(syn_data);
|
||||
|
||||
fallback:
|
||||
/* Send a regular SYN with Fast Open cookie request option */
|
||||
if (fo->cookie.len > 0)
|
||||
@ -3475,6 +3445,11 @@ int tcp_connect(struct sock *sk)
|
||||
*/
|
||||
tp->snd_nxt = tp->write_seq;
|
||||
tp->pushed_seq = tp->write_seq;
|
||||
buff = tcp_send_head(sk);
|
||||
if (unlikely(buff)) {
|
||||
tp->snd_nxt = TCP_SKB_CB(buff)->seq;
|
||||
tp->pushed_seq = TCP_SKB_CB(buff)->seq;
|
||||
}
|
||||
TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
|
||||
|
||||
/* Timer for repeating the SYN until an answer. */
|
||||
|
@ -1399,10 +1399,18 @@ static inline int ipv6_saddr_preferred(int type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
|
||||
static bool ipv6_use_optimistic_addr(struct net *net,
|
||||
struct inet6_dev *idev)
|
||||
{
|
||||
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
|
||||
return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
|
||||
if (!idev)
|
||||
return false;
|
||||
if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
|
||||
return false;
|
||||
if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@ -1472,7 +1480,7 @@ static int ipv6_get_saddr_eval(struct net *net,
|
||||
/* Rule 3: Avoid deprecated and optimistic addresses */
|
||||
u8 avoid = IFA_F_DEPRECATED;
|
||||
|
||||
if (!ipv6_use_optimistic_addr(score->ifa->idev))
|
||||
if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
|
||||
avoid |= IFA_F_OPTIMISTIC;
|
||||
ret = ipv6_saddr_preferred(score->addr_type) ||
|
||||
!(score->ifa->flags & avoid);
|
||||
@ -2460,7 +2468,8 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
|
||||
int max_addresses = in6_dev->cnf.max_addresses;
|
||||
|
||||
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
|
||||
if (in6_dev->cnf.optimistic_dad &&
|
||||
if ((net->ipv6.devconf_all->optimistic_dad ||
|
||||
in6_dev->cnf.optimistic_dad) &&
|
||||
!net->ipv6.devconf_all->forwarding && sllao)
|
||||
addr_flags |= IFA_F_OPTIMISTIC;
|
||||
#endif
|
||||
@ -3051,7 +3060,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev,
|
||||
u32 addr_flags = flags | IFA_F_PERMANENT;
|
||||
|
||||
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
|
||||
if (idev->cnf.optimistic_dad &&
|
||||
if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
|
||||
idev->cnf.optimistic_dad) &&
|
||||
!dev_net(idev->dev)->ipv6.devconf_all->forwarding)
|
||||
addr_flags |= IFA_F_OPTIMISTIC;
|
||||
#endif
|
||||
@ -3810,6 +3820,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
|
||||
goto out;
|
||||
|
||||
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
|
||||
dev_net(dev)->ipv6.devconf_all->accept_dad < 1 ||
|
||||
idev->cnf.accept_dad < 1 ||
|
||||
!(ifp->flags&IFA_F_TENTATIVE) ||
|
||||
ifp->flags & IFA_F_NODAD) {
|
||||
@ -3841,7 +3852,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
|
||||
*/
|
||||
if (ifp->flags & IFA_F_OPTIMISTIC) {
|
||||
ip6_ins_rt(ifp->rt);
|
||||
if (ipv6_use_optimistic_addr(idev)) {
|
||||
if (ipv6_use_optimistic_addr(dev_net(dev), idev)) {
|
||||
/* Because optimistic nodes can use this address,
|
||||
* notify listeners. If DAD fails, RTM_DELADDR is sent.
|
||||
*/
|
||||
@ -3897,7 +3908,9 @@ static void addrconf_dad_work(struct work_struct *w)
|
||||
action = DAD_ABORT;
|
||||
ifp->state = INET6_IFADDR_STATE_POSTDAD;
|
||||
|
||||
if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
|
||||
if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
|
||||
idev->cnf.accept_dad > 1) &&
|
||||
!idev->cnf.disable_ipv6 &&
|
||||
!(ifp->flags & IFA_F_STABLE_PRIVACY)) {
|
||||
struct in6_addr addr;
|
||||
|
||||
@ -4940,9 +4953,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
|
||||
|
||||
/* Don't send DELADDR notification for TENTATIVE address,
|
||||
* since NEWADDR notification is sent only after removing
|
||||
* TENTATIVE flag.
|
||||
* TENTATIVE flag, if DAD has not failed.
|
||||
*/
|
||||
if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
|
||||
if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) &&
|
||||
event == RTM_DELADDR)
|
||||
return;
|
||||
|
||||
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
|
||||
|
@ -940,24 +940,25 @@ done:
|
||||
}
|
||||
|
||||
static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
|
||||
unsigned short type,
|
||||
const void *daddr, const void *saddr, unsigned int len)
|
||||
unsigned short type, const void *daddr,
|
||||
const void *saddr, unsigned int len)
|
||||
{
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct ipv6hdr *ipv6h = skb_push(skb, t->hlen);
|
||||
__be16 *p = (__be16 *)(ipv6h+1);
|
||||
struct ipv6hdr *ipv6h;
|
||||
__be16 *p;
|
||||
|
||||
ip6_flow_hdr(ipv6h, 0,
|
||||
ip6_make_flowlabel(dev_net(dev), skb,
|
||||
t->fl.u.ip6.flowlabel, true,
|
||||
&t->fl.u.ip6));
|
||||
ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
|
||||
ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
|
||||
t->fl.u.ip6.flowlabel,
|
||||
true, &t->fl.u.ip6));
|
||||
ipv6h->hop_limit = t->parms.hop_limit;
|
||||
ipv6h->nexthdr = NEXTHDR_GRE;
|
||||
ipv6h->saddr = t->parms.laddr;
|
||||
ipv6h->daddr = t->parms.raddr;
|
||||
|
||||
p[0] = t->parms.o_flags;
|
||||
p[1] = htons(type);
|
||||
p = (__be16 *)(ipv6h + 1);
|
||||
p[0] = t->parms.o_flags;
|
||||
p[1] = htons(type);
|
||||
|
||||
/*
|
||||
* Set the source hardware address.
|
||||
|
@ -2259,6 +2259,9 @@ static int __init ip6_tunnel_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!ipv6_mod_enabled())
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = register_pernet_device(&ip6_tnl_net_ops);
|
||||
if (err < 0)
|
||||
goto out_pernet;
|
||||
|
@ -1015,6 +1015,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
|
||||
*/
|
||||
offset = skb_transport_offset(skb);
|
||||
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
|
||||
csum = skb->csum;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
|
@ -1041,12 +1041,24 @@ out:
|
||||
static int
|
||||
mtype_head(struct ip_set *set, struct sk_buff *skb)
|
||||
{
|
||||
const struct htype *h = set->data;
|
||||
struct htype *h = set->data;
|
||||
const struct htable *t;
|
||||
struct nlattr *nested;
|
||||
size_t memsize;
|
||||
u8 htable_bits;
|
||||
|
||||
/* If any members have expired, set->elements will be wrong
|
||||
* mytype_expire function will update it with the right count.
|
||||
* we do not hold set->lock here, so grab it first.
|
||||
* set->elements can still be incorrect in the case of a huge set,
|
||||
* because elements might time out during the listing.
|
||||
*/
|
||||
if (SET_WITH_TIMEOUT(set)) {
|
||||
spin_lock_bh(&set->lock);
|
||||
mtype_expire(set, h);
|
||||
spin_unlock_bh(&set->lock);
|
||||
}
|
||||
|
||||
rcu_read_lock_bh();
|
||||
t = rcu_dereference_bh_nfnl(h->table);
|
||||
memsize = mtype_ahash_memsize(h, t) + set->ext_size;
|
||||
|
@ -429,7 +429,7 @@ nf_nat_setup_info(struct nf_conn *ct,
|
||||
|
||||
srchash = hash_by_src(net,
|
||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)];
|
||||
lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
|
||||
spin_lock_bh(lock);
|
||||
hlist_add_head_rcu(&ct->nat_bysource,
|
||||
&nf_nat_bysource[srchash]);
|
||||
@ -532,9 +532,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
|
||||
unsigned int h;
|
||||
|
||||
h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
|
||||
spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
|
||||
hlist_del_rcu(&ct->nat_bysource);
|
||||
spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
|
||||
spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
|
||||
}
|
||||
|
||||
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
|
||||
@ -807,8 +807,8 @@ static int __init nf_nat_init(void)
|
||||
|
||||
/* Leave them the same for the moment. */
|
||||
nf_nat_htable_size = nf_conntrack_htable_size;
|
||||
if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks))
|
||||
nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks);
|
||||
if (nf_nat_htable_size < CONNTRACK_LOCKS)
|
||||
nf_nat_htable_size = CONNTRACK_LOCKS;
|
||||
|
||||
nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
|
||||
if (!nf_nat_bysource)
|
||||
@ -821,7 +821,7 @@ static int __init nf_nat_init(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++)
|
||||
for (i = 0; i < CONNTRACK_LOCKS; i++)
|
||||
spin_lock_init(&nf_nat_locks[i]);
|
||||
|
||||
nf_ct_helper_expectfn_register(&follow_master_nat);
|
||||
|
@ -1684,10 +1684,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
||||
|
||||
mutex_lock(&fanout_mutex);
|
||||
|
||||
err = -EINVAL;
|
||||
if (!po->running)
|
||||
goto out;
|
||||
|
||||
err = -EALREADY;
|
||||
if (po->fanout)
|
||||
goto out;
|
||||
@ -1749,7 +1745,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
||||
list_add(&match->list, &fanout_list);
|
||||
}
|
||||
err = -EINVAL;
|
||||
if (match->type == type &&
|
||||
|
||||
spin_lock(&po->bind_lock);
|
||||
if (po->running &&
|
||||
match->type == type &&
|
||||
match->prot_hook.type == po->prot_hook.type &&
|
||||
match->prot_hook.dev == po->prot_hook.dev) {
|
||||
err = -ENOSPC;
|
||||
@ -1761,6 +1760,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
||||
err = 0;
|
||||
}
|
||||
}
|
||||
spin_unlock(&po->bind_lock);
|
||||
|
||||
if (err && !refcount_read(&match->sk_ref)) {
|
||||
list_del(&match->list);
|
||||
kfree(match);
|
||||
}
|
||||
|
||||
out:
|
||||
if (err && rollover) {
|
||||
kfree(rollover);
|
||||
|
@ -922,28 +922,28 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
|
||||
if (!tc_flags_valid(fnew->flags)) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
goto errout_idr;
|
||||
}
|
||||
}
|
||||
|
||||
err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
|
||||
if (err)
|
||||
goto errout;
|
||||
goto errout_idr;
|
||||
|
||||
err = fl_check_assign_mask(head, &mask);
|
||||
if (err)
|
||||
goto errout;
|
||||
goto errout_idr;
|
||||
|
||||
if (!tc_skip_sw(fnew->flags)) {
|
||||
if (!fold && fl_lookup(head, &fnew->mkey)) {
|
||||
err = -EEXIST;
|
||||
goto errout;
|
||||
goto errout_idr;
|
||||
}
|
||||
|
||||
err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
|
||||
head->ht_params);
|
||||
if (err)
|
||||
goto errout;
|
||||
goto errout_idr;
|
||||
}
|
||||
|
||||
if (!tc_skip_hw(fnew->flags)) {
|
||||
@ -952,7 +952,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
&mask.key,
|
||||
fnew);
|
||||
if (err)
|
||||
goto errout;
|
||||
goto errout_idr;
|
||||
}
|
||||
|
||||
if (!tc_in_hw(fnew->flags))
|
||||
@ -981,6 +981,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
kfree(tb);
|
||||
return 0;
|
||||
|
||||
errout_idr:
|
||||
if (fnew->handle)
|
||||
idr_remove_ext(&head->handle_idr, fnew->handle);
|
||||
errout:
|
||||
tcf_exts_destroy(&fnew->exts);
|
||||
kfree(fnew);
|
||||
|
@ -32,6 +32,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
if (tc_skip_sw(head->flags))
|
||||
return -1;
|
||||
|
||||
*res = head->res;
|
||||
return tcf_exts_exec(skb, &head->exts, res);
|
||||
}
|
||||
|
||||
|
@ -685,6 +685,7 @@ void qdisc_reset(struct Qdisc *qdisc)
|
||||
qdisc->gso_skb = NULL;
|
||||
}
|
||||
qdisc->q.qlen = 0;
|
||||
qdisc->qstats.backlog = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(qdisc_reset);
|
||||
|
||||
|
@ -958,6 +958,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
}
|
||||
|
||||
if (cl != NULL) {
|
||||
int old_flags;
|
||||
|
||||
if (parentid) {
|
||||
if (cl->cl_parent &&
|
||||
cl->cl_parent->cl_common.classid != parentid)
|
||||
@ -978,6 +980,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
}
|
||||
|
||||
sch_tree_lock(sch);
|
||||
old_flags = cl->cl_flags;
|
||||
|
||||
if (rsc != NULL)
|
||||
hfsc_change_rsc(cl, rsc, cur_time);
|
||||
if (fsc != NULL)
|
||||
@ -986,10 +990,21 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
hfsc_change_usc(cl, usc, cur_time);
|
||||
|
||||
if (cl->qdisc->q.qlen != 0) {
|
||||
if (cl->cl_flags & HFSC_RSC)
|
||||
update_ed(cl, qdisc_peek_len(cl->qdisc));
|
||||
if (cl->cl_flags & HFSC_FSC)
|
||||
update_vf(cl, 0, cur_time);
|
||||
int len = qdisc_peek_len(cl->qdisc);
|
||||
|
||||
if (cl->cl_flags & HFSC_RSC) {
|
||||
if (old_flags & HFSC_RSC)
|
||||
update_ed(cl, len);
|
||||
else
|
||||
init_ed(cl, len);
|
||||
}
|
||||
|
||||
if (cl->cl_flags & HFSC_FSC) {
|
||||
if (old_flags & HFSC_FSC)
|
||||
update_vf(cl, 0, cur_time);
|
||||
else
|
||||
init_vf(cl, len);
|
||||
}
|
||||
}
|
||||
sch_tree_unlock(sch);
|
||||
|
||||
|
@ -282,6 +282,7 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock,
|
||||
__be32 *subnet, u8 *prefix_len)
|
||||
{
|
||||
struct dst_entry *dst = sk_dst_get(clcsock->sk);
|
||||
struct in_device *in_dev;
|
||||
struct sockaddr_in addr;
|
||||
int rc = -ENOENT;
|
||||
int len;
|
||||
@ -298,14 +299,17 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock,
|
||||
/* get address to which the internal TCP socket is bound */
|
||||
kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len);
|
||||
/* analyze IPv4 specific data of net_device belonging to TCP socket */
|
||||
for_ifa(dst->dev->ip_ptr) {
|
||||
if (ifa->ifa_address != addr.sin_addr.s_addr)
|
||||
rcu_read_lock();
|
||||
in_dev = __in_dev_get_rcu(dst->dev);
|
||||
for_ifa(in_dev) {
|
||||
if (!inet_ifa_match(addr.sin_addr.s_addr, ifa))
|
||||
continue;
|
||||
*prefix_len = inet_mask_len(ifa->ifa_mask);
|
||||
*subnet = ifa->ifa_address & ifa->ifa_mask;
|
||||
rc = 0;
|
||||
break;
|
||||
} endfor_ifa(dst->dev->ip_ptr);
|
||||
} endfor_ifa(in_dev);
|
||||
rcu_read_unlock();
|
||||
|
||||
out_rel:
|
||||
dst_release(dst);
|
||||
@ -509,7 +513,7 @@ decline_rdma:
|
||||
/* RDMA setup failed, switch back to TCP */
|
||||
smc->use_fallback = true;
|
||||
if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
|
||||
rc = smc_clc_send_decline(smc, reason_code, 0);
|
||||
rc = smc_clc_send_decline(smc, reason_code);
|
||||
if (rc < sizeof(struct smc_clc_msg_decline))
|
||||
goto out_err;
|
||||
}
|
||||
@ -804,8 +808,6 @@ static void smc_listen_work(struct work_struct *work)
|
||||
rc = local_contact;
|
||||
if (rc == -ENOMEM)
|
||||
reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
|
||||
else if (rc == -ENOLINK)
|
||||
reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
|
||||
goto decline_rdma;
|
||||
}
|
||||
link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
|
||||
@ -899,7 +901,7 @@ decline_rdma:
|
||||
smc_conn_free(&new_smc->conn);
|
||||
new_smc->use_fallback = true;
|
||||
if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
|
||||
rc = smc_clc_send_decline(new_smc, reason_code, 0);
|
||||
rc = smc_clc_send_decline(new_smc, reason_code);
|
||||
if (rc < sizeof(struct smc_clc_msg_decline))
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ struct smc_connection {
|
||||
atomic_t sndbuf_space; /* remaining space in sndbuf */
|
||||
u16 tx_cdc_seq; /* sequence # for CDC send */
|
||||
spinlock_t send_lock; /* protect wr_sends */
|
||||
struct work_struct tx_work; /* retry of smc_cdc_msg_send */
|
||||
struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
|
||||
|
||||
struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl.
|
||||
* .prod cf. TCP rcv_nxt
|
||||
|
@ -95,9 +95,10 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
|
||||
}
|
||||
if (clcm->type == SMC_CLC_DECLINE) {
|
||||
reason_code = SMC_CLC_DECL_REPLY;
|
||||
if (ntohl(((struct smc_clc_msg_decline *)buf)->peer_diagnosis)
|
||||
== SMC_CLC_DECL_SYNCERR)
|
||||
if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
|
||||
smc->conn.lgr->sync_err = true;
|
||||
smc_lgr_terminate(smc->conn.lgr);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
@ -105,8 +106,7 @@ out:
|
||||
}
|
||||
|
||||
/* send CLC DECLINE message across internal TCP socket */
|
||||
int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
|
||||
u8 out_of_sync)
|
||||
int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
|
||||
{
|
||||
struct smc_clc_msg_decline dclc;
|
||||
struct msghdr msg;
|
||||
@ -118,7 +118,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
|
||||
dclc.hdr.type = SMC_CLC_DECLINE;
|
||||
dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
|
||||
dclc.hdr.version = SMC_CLC_V1;
|
||||
dclc.hdr.flag = out_of_sync ? 1 : 0;
|
||||
dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
|
||||
memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
|
||||
dclc.peer_diagnosis = htonl(peer_diag_info);
|
||||
memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
|
||||
|
@ -106,8 +106,7 @@ struct smc_ib_device;
|
||||
|
||||
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
|
||||
u8 expected_type);
|
||||
int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
|
||||
u8 out_of_sync);
|
||||
int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
|
||||
int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev,
|
||||
u8 ibport);
|
||||
int smc_clc_send_confirm(struct smc_sock *smc);
|
||||
|
@ -174,15 +174,15 @@ int smc_close_active(struct smc_sock *smc)
|
||||
{
|
||||
struct smc_cdc_conn_state_flags *txflags =
|
||||
&smc->conn.local_tx_ctrl.conn_state_flags;
|
||||
long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT;
|
||||
struct smc_connection *conn = &smc->conn;
|
||||
struct sock *sk = &smc->sk;
|
||||
int old_state;
|
||||
long timeout;
|
||||
int rc = 0;
|
||||
|
||||
if (sock_flag(sk, SOCK_LINGER) &&
|
||||
!(current->flags & PF_EXITING))
|
||||
timeout = sk->sk_lingertime;
|
||||
timeout = current->flags & PF_EXITING ?
|
||||
0 : sock_flag(sk, SOCK_LINGER) ?
|
||||
sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
|
||||
|
||||
again:
|
||||
old_state = sk->sk_state;
|
||||
@ -208,7 +208,7 @@ again:
|
||||
case SMC_ACTIVE:
|
||||
smc_close_stream_wait(smc, timeout);
|
||||
release_sock(sk);
|
||||
cancel_work_sync(&conn->tx_work);
|
||||
cancel_delayed_work_sync(&conn->tx_work);
|
||||
lock_sock(sk);
|
||||
if (sk->sk_state == SMC_ACTIVE) {
|
||||
/* send close request */
|
||||
@ -234,7 +234,7 @@ again:
|
||||
if (!smc_cdc_rxed_any_close(conn))
|
||||
smc_close_stream_wait(smc, timeout);
|
||||
release_sock(sk);
|
||||
cancel_work_sync(&conn->tx_work);
|
||||
cancel_delayed_work_sync(&conn->tx_work);
|
||||
lock_sock(sk);
|
||||
if (sk->sk_err != ECONNABORTED) {
|
||||
/* confirm close from peer */
|
||||
@ -263,7 +263,9 @@ again:
|
||||
/* peer sending PeerConnectionClosed will cause transition */
|
||||
break;
|
||||
case SMC_PROCESSABORT:
|
||||
cancel_work_sync(&conn->tx_work);
|
||||
release_sock(sk);
|
||||
cancel_delayed_work_sync(&conn->tx_work);
|
||||
lock_sock(sk);
|
||||
smc_close_abort(conn);
|
||||
sk->sk_state = SMC_CLOSED;
|
||||
smc_close_wait_tx_pends(smc);
|
||||
@ -411,13 +413,14 @@ void smc_close_sock_put_work(struct work_struct *work)
|
||||
int smc_close_shutdown_write(struct smc_sock *smc)
|
||||
{
|
||||
struct smc_connection *conn = &smc->conn;
|
||||
long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT;
|
||||
struct sock *sk = &smc->sk;
|
||||
int old_state;
|
||||
long timeout;
|
||||
int rc = 0;
|
||||
|
||||
if (sock_flag(sk, SOCK_LINGER))
|
||||
timeout = sk->sk_lingertime;
|
||||
timeout = current->flags & PF_EXITING ?
|
||||
0 : sock_flag(sk, SOCK_LINGER) ?
|
||||
sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
|
||||
|
||||
again:
|
||||
old_state = sk->sk_state;
|
||||
@ -425,7 +428,7 @@ again:
|
||||
case SMC_ACTIVE:
|
||||
smc_close_stream_wait(smc, timeout);
|
||||
release_sock(sk);
|
||||
cancel_work_sync(&conn->tx_work);
|
||||
cancel_delayed_work_sync(&conn->tx_work);
|
||||
lock_sock(sk);
|
||||
/* send close wr request */
|
||||
rc = smc_close_wr(conn);
|
||||
@ -439,7 +442,7 @@ again:
|
||||
if (!smc_cdc_rxed_any_close(conn))
|
||||
smc_close_stream_wait(smc, timeout);
|
||||
release_sock(sk);
|
||||
cancel_work_sync(&conn->tx_work);
|
||||
cancel_delayed_work_sync(&conn->tx_work);
|
||||
lock_sock(sk);
|
||||
/* confirm close from peer */
|
||||
rc = smc_close_wr(conn);
|
||||
|
@ -25,8 +25,9 @@
|
||||
#include "smc_cdc.h"
|
||||
#include "smc_close.h"
|
||||
|
||||
#define SMC_LGR_NUM_INCR 256
|
||||
#define SMC_LGR_FREE_DELAY (600 * HZ)
|
||||
#define SMC_LGR_NUM_INCR 256
|
||||
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
|
||||
#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10)
|
||||
|
||||
static u32 smc_lgr_num; /* unique link group number */
|
||||
|
||||
@ -107,8 +108,15 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
|
||||
__smc_lgr_unregister_conn(conn);
|
||||
}
|
||||
write_unlock_bh(&lgr->conns_lock);
|
||||
if (reduced && !lgr->conns_num)
|
||||
schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY);
|
||||
if (!reduced || lgr->conns_num)
|
||||
return;
|
||||
/* client link group creation always follows the server link group
|
||||
* creation. For client use a somewhat higher removal delay time,
|
||||
* otherwise there is a risk of out-of-sync link groups.
|
||||
*/
|
||||
mod_delayed_work(system_wq, &lgr->free_work,
|
||||
lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
|
||||
SMC_LGR_FREE_DELAY_SERV);
|
||||
}
|
||||
|
||||
static void smc_lgr_free_work(struct work_struct *work)
|
||||
|
@ -380,6 +380,7 @@ static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
|
||||
ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport);
|
||||
if (ndev) {
|
||||
memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN);
|
||||
dev_put(ndev);
|
||||
} else if (!rc) {
|
||||
memcpy(&smcibdev->mac[ibport - 1][0],
|
||||
&smcibdev->gid[ibport - 1].raw[8], 3);
|
||||
|
@ -181,8 +181,10 @@ static int smc_pnet_enter(struct smc_pnetentry *new_pnetelem)
|
||||
sizeof(new_pnetelem->ndev->name)) ||
|
||||
smc_pnet_same_ibname(pnetelem,
|
||||
new_pnetelem->smcibdev->ibdev->name,
|
||||
new_pnetelem->ib_port))
|
||||
new_pnetelem->ib_port)) {
|
||||
dev_put(pnetelem->ndev);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist);
|
||||
rc = 0;
|
||||
|
@ -148,6 +148,8 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
|
||||
read_done = sock_intr_errno(timeo);
|
||||
break;
|
||||
}
|
||||
if (!timeo)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (!atomic_read(&conn->bytes_to_rcv)) {
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include "smc_cdc.h"
|
||||
#include "smc_tx.h"
|
||||
|
||||
#define SMC_TX_WORK_DELAY HZ
|
||||
|
||||
/***************************** sndbuf producer *******************************/
|
||||
|
||||
/* callback implementation for sk.sk_write_space()
|
||||
@ -406,7 +408,8 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
|
||||
goto out_unlock;
|
||||
}
|
||||
rc = 0;
|
||||
schedule_work(&conn->tx_work);
|
||||
schedule_delayed_work(&conn->tx_work,
|
||||
SMC_TX_WORK_DELAY);
|
||||
}
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -430,7 +433,7 @@ out_unlock:
|
||||
*/
|
||||
static void smc_tx_work(struct work_struct *work)
|
||||
{
|
||||
struct smc_connection *conn = container_of(work,
|
||||
struct smc_connection *conn = container_of(to_delayed_work(work),
|
||||
struct smc_connection,
|
||||
tx_work);
|
||||
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
|
||||
@ -468,7 +471,8 @@ void smc_tx_consumer_update(struct smc_connection *conn)
|
||||
if (!rc)
|
||||
rc = smc_cdc_msg_send(conn, wr_buf, pend);
|
||||
if (rc < 0) {
|
||||
schedule_work(&conn->tx_work);
|
||||
schedule_delayed_work(&conn->tx_work,
|
||||
SMC_TX_WORK_DELAY);
|
||||
return;
|
||||
}
|
||||
smc_curs_write(&conn->rx_curs_confirmed,
|
||||
@ -487,6 +491,6 @@ void smc_tx_consumer_update(struct smc_connection *conn)
|
||||
void smc_tx_init(struct smc_sock *smc)
|
||||
{
|
||||
smc->sk.sk_write_space = smc_tx_write_space;
|
||||
INIT_WORK(&smc->conn.tx_work, smc_tx_work);
|
||||
INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
|
||||
spin_lock_init(&smc->conn.send_lock);
|
||||
}
|
||||
|
@ -244,7 +244,7 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
|
||||
int rc;
|
||||
|
||||
ib_req_notify_cq(link->smcibdev->roce_cq_send,
|
||||
IB_CQ_SOLICITED_MASK | IB_CQ_REPORT_MISSED_EVENTS);
|
||||
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
|
||||
pend = container_of(priv, struct smc_wr_tx_pend, priv);
|
||||
rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
|
||||
&failed_wr);
|
||||
|
@ -9987,6 +9987,9 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!setup.chandef.chan)
|
||||
return -EINVAL;
|
||||
|
||||
err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
|
||||
&setup.beacon_rate);
|
||||
if (err)
|
||||
@ -10903,6 +10906,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
|
||||
!tb[NL80211_REKEY_DATA_KCK])
|
||||
return -EINVAL;
|
||||
if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
|
||||
return -ERANGE;
|
||||
if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
|
||||
|
Loading…
Reference in New Issue
Block a user