mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 09:13:55 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: "I was battling a cold after some recent trips, so quite a bit piled up meanwhile, sorry about that. Highlights: 1) Fix fd leak in various bpf selftests, from Brian Vazquez. 2) Fix crash in xsk when device doesn't support some methods, from Magnus Karlsson. 3) Fix various leaks and use-after-free in rxrpc, from David Howells. 4) Fix several SKB leaks due to confusion of who owns an SKB and who should release it in the llc code. From Eric Biggers. 5) Kill a bunc of KCSAN warnings in TCP, from Eric Dumazet. 6) Jumbo packets don't work after resume on r8169, as the BIOS resets the chip into non-jumbo mode during suspend. From Heiner Kallweit. 7) Corrupt L2 header during MPLS push, from Davide Caratti. 8) Prevent possible infinite loop in tc_ctl_action, from Eric Dumazet. 9) Get register bits right in bcmgenet driver, based upon chip version. From Florian Fainelli. 10) Fix mutex problems in microchip DSA driver, from Marek Vasut. 11) Cure race between route lookup and invalidation in ipv4, from Wei Wang. 12) Fix performance regression due to false sharing in 'net' structure, from Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (145 commits) net: reorder 'struct net' fields to avoid false sharing net: dsa: fix switch tree list net: ethernet: dwmac-sun8i: show message only when switching to promisc net: aquantia: add an error handling in aq_nic_set_multicast_list net: netem: correct the parent's backlog when corrupted packet was dropped net: netem: fix error path for corrupted GSO frames macb: propagate errors when getting optional clocks xen/netback: fix error path of xenvif_connect_data() net: hns3: fix mis-counting IRQ vector numbers issue net: usb: lan78xx: Connect PHY before registering MAC vsock/virtio: discard packets if credit is not respected vsock/virtio: send a credit update when buffer size is changed mlxsw: spectrum_trap: Push Ethernet header before reporting trap net: ensure correct skb->tstamp in various fragmenters net: bcmgenet: reset 40nm EPHY on energy detect net: bcmgenet: soft reset 40nm EPHYs before MAC init net: phy: bcm7xxx: define soft_reset for 40nm EPHY net: bcmgenet: don't set phydev->link from MAC net: Update address for MediaTek ethernet driver in MAINTAINERS ipv4: fix race condition between route lookup and invalidation ...
This commit is contained in:
commit
531e93d114
@ -36,8 +36,10 @@ Support
|
||||
=======
|
||||
For general Linux networking support, please use the netdev mailing
|
||||
list, which is monitored by Pensando personnel::
|
||||
|
||||
netdev@vger.kernel.org
|
||||
|
||||
For more specific support needs, please use the Pensando driver support
|
||||
email::
|
||||
drivers@pensando.io
|
||||
|
||||
drivers@pensando.io
|
||||
|
@ -92,16 +92,16 @@ under some conditions.
|
||||
Part III: Registering a Network Device to DIM
|
||||
==============================================
|
||||
|
||||
Net DIM API exposes the main function net_dim(struct net_dim *dim,
|
||||
struct net_dim_sample end_sample). This function is the entry point to the Net
|
||||
Net DIM API exposes the main function net_dim(struct dim *dim,
|
||||
struct dim_sample end_sample). This function is the entry point to the Net
|
||||
DIM algorithm and has to be called every time the driver would like to check if
|
||||
it should change interrupt moderation parameters. The driver should provide two
|
||||
data structures: struct net_dim and struct net_dim_sample. Struct net_dim
|
||||
data structures: struct dim and struct dim_sample. Struct dim
|
||||
describes the state of DIM for a specific object (RX queue, TX queue,
|
||||
other queues, etc.). This includes the current selected profile, previous data
|
||||
samples, the callback function provided by the driver and more.
|
||||
Struct net_dim_sample describes a data sample, which will be compared to the
|
||||
data sample stored in struct net_dim in order to decide on the algorithm's next
|
||||
Struct dim_sample describes a data sample, which will be compared to the
|
||||
data sample stored in struct dim in order to decide on the algorithm's next
|
||||
step. The sample should include bytes, packets and interrupts, measured by
|
||||
the driver.
|
||||
|
||||
@ -110,9 +110,9 @@ main net_dim() function. The recommended method is to call net_dim() on each
|
||||
interrupt. Since Net DIM has a built-in moderation and it might decide to skip
|
||||
iterations under certain conditions, there is no need to moderate the net_dim()
|
||||
calls as well. As mentioned above, the driver needs to provide an object of type
|
||||
struct net_dim to the net_dim() function call. It is advised for each entity
|
||||
using Net DIM to hold a struct net_dim as part of its data structure and use it
|
||||
as the main Net DIM API object. The struct net_dim_sample should hold the latest
|
||||
struct dim to the net_dim() function call. It is advised for each entity
|
||||
using Net DIM to hold a struct dim as part of its data structure and use it
|
||||
as the main Net DIM API object. The struct dim_sample should hold the latest
|
||||
bytes, packets and interrupts count. No need to perform any calculations, just
|
||||
include the raw data.
|
||||
|
||||
@ -132,19 +132,19 @@ usage is not complete but it should make the outline of the usage clear.
|
||||
|
||||
my_driver.c:
|
||||
|
||||
#include <linux/net_dim.h>
|
||||
#include <linux/dim.h>
|
||||
|
||||
/* Callback for net DIM to schedule on a decision to change moderation */
|
||||
void my_driver_do_dim_work(struct work_struct *work)
|
||||
{
|
||||
/* Get struct net_dim from struct work_struct */
|
||||
struct net_dim *dim = container_of(work, struct net_dim,
|
||||
work);
|
||||
/* Get struct dim from struct work_struct */
|
||||
struct dim *dim = container_of(work, struct dim,
|
||||
work);
|
||||
/* Do interrupt moderation related stuff */
|
||||
...
|
||||
|
||||
/* Signal net DIM work is done and it should move to next iteration */
|
||||
dim->state = NET_DIM_START_MEASURE;
|
||||
dim->state = DIM_START_MEASURE;
|
||||
}
|
||||
|
||||
/* My driver's interrupt handler */
|
||||
@ -152,13 +152,13 @@ int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
|
||||
{
|
||||
...
|
||||
/* A struct to hold current measured data */
|
||||
struct net_dim_sample dim_sample;
|
||||
struct dim_sample dim_sample;
|
||||
...
|
||||
/* Initiate data sample struct with current data */
|
||||
net_dim_sample(my_entity->events,
|
||||
my_entity->packets,
|
||||
my_entity->bytes,
|
||||
&dim_sample);
|
||||
dim_update_sample(my_entity->events,
|
||||
my_entity->packets,
|
||||
my_entity->bytes,
|
||||
&dim_sample);
|
||||
/* Call net DIM */
|
||||
net_dim(&my_entity->dim, dim_sample);
|
||||
...
|
||||
|
@ -9122,7 +9122,7 @@ F: drivers/auxdisplay/ks0108.c
|
||||
F: include/linux/ks0108.h
|
||||
|
||||
L3MDEV
|
||||
M: David Ahern <dsa@cumulusnetworks.com>
|
||||
M: David Ahern <dsahern@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: net/l3mdev
|
||||
@ -10255,7 +10255,7 @@ MEDIATEK ETHERNET DRIVER
|
||||
M: Felix Fietkau <nbd@openwrt.org>
|
||||
M: John Crispin <john@phrozen.org>
|
||||
M: Sean Wang <sean.wang@mediatek.com>
|
||||
M: Nelson Chang <nelson.chang@mediatek.com>
|
||||
M: Mark Lee <Mark-MC.Lee@mediatek.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/mediatek/
|
||||
@ -17433,7 +17433,7 @@ F: include/linux/regulator/
|
||||
K: regulator_get_optional
|
||||
|
||||
VRF
|
||||
M: David Ahern <dsa@cumulusnetworks.com>
|
||||
M: David Ahern <dsahern@kernel.org>
|
||||
M: Shrijeet Mukherjee <shrijeet@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -66,9 +66,21 @@
|
||||
pinctrl-1 = <&ephy_leds_pins>;
|
||||
status = "okay";
|
||||
|
||||
gmac0: mac@0 {
|
||||
compatible = "mediatek,eth-mac";
|
||||
reg = <0>;
|
||||
phy-mode = "2500base-x";
|
||||
fixed-link {
|
||||
speed = <2500>;
|
||||
full-duplex;
|
||||
pause;
|
||||
};
|
||||
};
|
||||
|
||||
gmac1: mac@1 {
|
||||
compatible = "mediatek,eth-mac";
|
||||
reg = <1>;
|
||||
phy-mode = "gmii";
|
||||
phy-handle = <&phy0>;
|
||||
};
|
||||
|
||||
@ -78,7 +90,6 @@
|
||||
|
||||
phy0: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
phy-mode = "gmii";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -468,14 +468,12 @@
|
||||
compatible = "mediatek,mt7629-sgmiisys", "syscon";
|
||||
reg = <0x1b128000 0x3000>;
|
||||
#clock-cells = <1>;
|
||||
mediatek,physpeed = "2500";
|
||||
};
|
||||
|
||||
sgmiisys1: syscon@1b130000 {
|
||||
compatible = "mediatek,mt7629-sgmiisys", "syscon";
|
||||
reg = <0x1b130000 0x3000>;
|
||||
#clock-cells = <1>;
|
||||
mediatek,physpeed = "2500";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -4039,7 +4039,7 @@ out:
|
||||
* this to-be-skipped slave to send a packet out.
|
||||
*/
|
||||
old_arr = rtnl_dereference(bond->slave_arr);
|
||||
for (idx = 0; idx < old_arr->count; idx++) {
|
||||
for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
|
||||
if (skipslave == old_arr->arr[idx]) {
|
||||
old_arr->arr[idx] =
|
||||
old_arr->arr[old_arr->count-1];
|
||||
|
@ -1845,7 +1845,6 @@ int b53_mirror_add(struct dsa_switch *ds, int port,
|
||||
loc = B53_EG_MIR_CTL;
|
||||
|
||||
b53_read16(dev, B53_MGMT_PAGE, loc, ®);
|
||||
reg &= ~MIRROR_MASK;
|
||||
reg |= BIT(port);
|
||||
b53_write16(dev, B53_MGMT_PAGE, loc, reg);
|
||||
|
||||
|
@ -1224,10 +1224,6 @@ static int ksz8795_switch_init(struct ksz_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
mutex_init(&dev->stats_mutex);
|
||||
mutex_init(&dev->alu_mutex);
|
||||
mutex_init(&dev->vlan_mutex);
|
||||
|
||||
dev->ds->ops = &ksz8795_switch_ops;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
|
||||
|
@ -25,6 +25,7 @@ KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT,
|
||||
|
||||
static int ksz8795_spi_probe(struct spi_device *spi)
|
||||
{
|
||||
struct regmap_config rc;
|
||||
struct ksz_device *dev;
|
||||
int i, ret;
|
||||
|
||||
@ -33,9 +34,9 @@ static int ksz8795_spi_probe(struct spi_device *spi)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
|
||||
dev->regmap[i] = devm_regmap_init_spi(spi,
|
||||
&ksz8795_regmap_config
|
||||
[i]);
|
||||
rc = ksz8795_regmap_config[i];
|
||||
rc.lock_arg = &dev->regmap_mutex;
|
||||
dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
|
||||
if (IS_ERR(dev->regmap[i])) {
|
||||
ret = PTR_ERR(dev->regmap[i]);
|
||||
dev_err(&spi->dev,
|
||||
|
@ -17,6 +17,7 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
|
||||
static int ksz9477_i2c_probe(struct i2c_client *i2c,
|
||||
const struct i2c_device_id *i2c_id)
|
||||
{
|
||||
struct regmap_config rc;
|
||||
struct ksz_device *dev;
|
||||
int i, ret;
|
||||
|
||||
@ -25,8 +26,9 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
|
||||
dev->regmap[i] = devm_regmap_init_i2c(i2c,
|
||||
&ksz9477_regmap_config[i]);
|
||||
rc = ksz9477_regmap_config[i];
|
||||
rc.lock_arg = &dev->regmap_mutex;
|
||||
dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
|
||||
if (IS_ERR(dev->regmap[i])) {
|
||||
ret = PTR_ERR(dev->regmap[i]);
|
||||
dev_err(&i2c->dev,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Microchip KSZ9477 register definitions
|
||||
*
|
||||
* Copyright (C) 2017-2018 Microchip Technology Inc.
|
||||
|
@ -24,6 +24,7 @@ KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
|
||||
|
||||
static int ksz9477_spi_probe(struct spi_device *spi)
|
||||
{
|
||||
struct regmap_config rc;
|
||||
struct ksz_device *dev;
|
||||
int i, ret;
|
||||
|
||||
@ -32,8 +33,9 @@ static int ksz9477_spi_probe(struct spi_device *spi)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
|
||||
dev->regmap[i] = devm_regmap_init_spi(spi,
|
||||
&ksz9477_regmap_config[i]);
|
||||
rc = ksz9477_regmap_config[i];
|
||||
rc.lock_arg = &dev->regmap_mutex;
|
||||
dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
|
||||
if (IS_ERR(dev->regmap[i])) {
|
||||
ret = PTR_ERR(dev->regmap[i]);
|
||||
dev_err(&spi->dev,
|
||||
|
@ -436,7 +436,7 @@ int ksz_switch_register(struct ksz_device *dev,
|
||||
}
|
||||
|
||||
mutex_init(&dev->dev_mutex);
|
||||
mutex_init(&dev->stats_mutex);
|
||||
mutex_init(&dev->regmap_mutex);
|
||||
mutex_init(&dev->alu_mutex);
|
||||
mutex_init(&dev->vlan_mutex);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Microchip switch driver common header
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Microchip switch driver common header
|
||||
*
|
||||
* Copyright (C) 2017-2019 Microchip Technology Inc.
|
||||
*/
|
||||
@ -47,7 +47,7 @@ struct ksz_device {
|
||||
const char *name;
|
||||
|
||||
struct mutex dev_mutex; /* device access */
|
||||
struct mutex stats_mutex; /* status access */
|
||||
struct mutex regmap_mutex; /* regmap access */
|
||||
struct mutex alu_mutex; /* ALU access */
|
||||
struct mutex vlan_mutex; /* vlan access */
|
||||
const struct ksz_dev_ops *dev_ops;
|
||||
@ -290,6 +290,18 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
|
||||
ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
|
||||
}
|
||||
|
||||
static inline void ksz_regmap_lock(void *__mtx)
|
||||
{
|
||||
struct mutex *mtx = __mtx;
|
||||
mutex_lock(mtx);
|
||||
}
|
||||
|
||||
static inline void ksz_regmap_unlock(void *__mtx)
|
||||
{
|
||||
struct mutex *mtx = __mtx;
|
||||
mutex_unlock(mtx);
|
||||
}
|
||||
|
||||
/* Regmap tables generation */
|
||||
#define KSZ_SPI_OP_RD 3
|
||||
#define KSZ_SPI_OP_WR 2
|
||||
@ -314,6 +326,8 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
|
||||
.write_flag_mask = \
|
||||
KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp, \
|
||||
regbits, regpad), \
|
||||
.lock = ksz_regmap_lock, \
|
||||
.unlock = ksz_regmap_unlock, \
|
||||
.reg_format_endian = REGMAP_ENDIAN_BIG, \
|
||||
.val_format_endian = REGMAP_ENDIAN_BIG \
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
|
||||
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
*/
|
||||
#ifndef _SJA1105_H
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
*/
|
||||
#ifndef _SJA1105_DYNAMIC_CONFIG_H
|
||||
#define _SJA1105_DYNAMIC_CONFIG_H
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
*/
|
||||
#ifndef _SJA1105_PTP_H
|
||||
#define _SJA1105_PTP_H
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright (c) 2016-2018, NXP Semiconductors
|
||||
/* SPDX-License-Identifier: BSD-3-Clause */
|
||||
/* Copyright (c) 2016-2018, NXP Semiconductors
|
||||
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
*/
|
||||
#ifndef _SJA1105_STATIC_CONFIG_H
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
||||
*/
|
||||
#ifndef _SJA1105_TAS_H
|
||||
#define _SJA1105_TAS_H
|
||||
|
@ -194,9 +194,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
|
||||
{
|
||||
struct aq_nic_s *aq_nic = netdev_priv(ndev);
|
||||
|
||||
aq_nic_set_packet_filter(aq_nic, ndev->flags);
|
||||
|
||||
aq_nic_set_multicast_list(aq_nic, ndev);
|
||||
(void)aq_nic_set_multicast_list(aq_nic, ndev);
|
||||
}
|
||||
|
||||
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
|
||||
|
@ -631,9 +631,12 @@ err_exit:
|
||||
|
||||
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
|
||||
{
|
||||
unsigned int packet_filter = self->packet_filter;
|
||||
const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
|
||||
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
|
||||
unsigned int packet_filter = ndev->flags;
|
||||
struct netdev_hw_addr *ha = NULL;
|
||||
unsigned int i = 0U;
|
||||
int err = 0;
|
||||
|
||||
self->mc_list.count = 0;
|
||||
if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
|
||||
@ -641,29 +644,28 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
|
||||
} else {
|
||||
netdev_for_each_uc_addr(ha, ndev) {
|
||||
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
|
||||
|
||||
if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
|
||||
packet_filter |= IFF_ALLMULTI;
|
||||
} else {
|
||||
netdev_for_each_mc_addr(ha, ndev) {
|
||||
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
|
||||
|
||||
if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
|
||||
break;
|
||||
cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
|
||||
if (cfg->is_mc_list_enabled) {
|
||||
if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
|
||||
packet_filter |= IFF_ALLMULTI;
|
||||
} else {
|
||||
netdev_for_each_mc_addr(ha, ndev) {
|
||||
ether_addr_copy(self->mc_list.ar[i++],
|
||||
ha->addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
|
||||
packet_filter |= IFF_MULTICAST;
|
||||
self->mc_list.count = i;
|
||||
self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
|
||||
self->mc_list.ar,
|
||||
self->mc_list.count);
|
||||
err = hw_ops->hw_multicast_list_set(self->aq_hw,
|
||||
self->mc_list.ar,
|
||||
self->mc_list.count);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
return aq_nic_set_packet_filter(self, packet_filter);
|
||||
}
|
||||
|
@ -313,6 +313,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
break;
|
||||
|
||||
buff->is_error |= buff_->is_error;
|
||||
buff->is_cso_err |= buff_->is_cso_err;
|
||||
|
||||
} while (!buff_->is_eop);
|
||||
|
||||
@ -320,7 +321,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
err = 0;
|
||||
goto err_exit;
|
||||
}
|
||||
if (buff->is_error) {
|
||||
if (buff->is_error || buff->is_cso_err) {
|
||||
buff_ = buff;
|
||||
do {
|
||||
next_ = buff_->next,
|
||||
|
@ -818,14 +818,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
|
||||
cfg->is_vlan_force_promisc);
|
||||
|
||||
hw_atl_rpfl2multicast_flr_en_set(self,
|
||||
IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
|
||||
IS_FILTER_ENABLED(IFF_ALLMULTI) &&
|
||||
IS_FILTER_ENABLED(IFF_MULTICAST), 0);
|
||||
|
||||
hw_atl_rpfl2_accept_all_mc_packets_set(self,
|
||||
IS_FILTER_ENABLED(IFF_ALLMULTI));
|
||||
IS_FILTER_ENABLED(IFF_ALLMULTI) &&
|
||||
IS_FILTER_ENABLED(IFF_MULTICAST));
|
||||
|
||||
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
|
||||
|
||||
cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
|
||||
|
||||
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
|
||||
hw_atl_rpfl2_uc_flr_en_set(self,
|
||||
@ -968,14 +969,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
|
||||
|
||||
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
|
||||
{
|
||||
int err;
|
||||
u32 val;
|
||||
|
||||
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
|
||||
|
||||
/* Invalidate Descriptor Cache to prevent writing to the cached
|
||||
* descriptors and to the data pointer of those descriptors
|
||||
*/
|
||||
hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
|
||||
hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
|
||||
|
||||
return aq_hw_err_from_flags(self);
|
||||
err = aq_hw_err_from_flags(self);
|
||||
|
||||
if (err)
|
||||
goto err_exit;
|
||||
|
||||
readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
|
||||
self, val, val == 1, 1000U, 10000U);
|
||||
|
||||
err_exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
|
||||
|
@ -606,12 +606,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
|
||||
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
|
||||
}
|
||||
|
||||
void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
|
||||
void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
|
||||
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
|
||||
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
|
||||
|
||||
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
|
||||
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
|
||||
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
|
||||
init);
|
||||
val ^ 1);
|
||||
}
|
||||
|
||||
u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
|
||||
{
|
||||
return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
|
||||
RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
|
||||
RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
|
||||
}
|
||||
|
||||
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
|
||||
|
@ -313,8 +313,11 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
|
||||
u32 rx_pkt_buff_size_per_tc,
|
||||
u32 buffer);
|
||||
|
||||
/* set rdm rx dma descriptor cache init */
|
||||
void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
|
||||
/* toggle rdm rx dma descriptor cache init */
|
||||
void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
|
||||
|
||||
/* get rdm rx dma descriptor cache init done */
|
||||
u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
|
||||
|
||||
/* set rx xoff enable (per tc) */
|
||||
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
|
||||
|
@ -318,6 +318,25 @@
|
||||
/* default value of bitfield rdm_desc_init_i */
|
||||
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
|
||||
|
||||
/* rdm_desc_init_done_i bitfield definitions
|
||||
* preprocessor definitions for the bitfield rdm_desc_init_done_i.
|
||||
* port="pif_rdm_desc_init_done_i"
|
||||
*/
|
||||
|
||||
/* register address for bitfield rdm_desc_init_done_i */
|
||||
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
|
||||
/* bitmask for bitfield rdm_desc_init_done_i */
|
||||
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
|
||||
/* inverted bitmask for bitfield rdm_desc_init_done_i */
|
||||
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
|
||||
/* lower bit position of bitfield rdm_desc_init_done_i */
|
||||
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
|
||||
/* width of bitfield rdm_desc_init_done_i */
|
||||
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
|
||||
/* default value of bitfield rdm_desc_init_done_i */
|
||||
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
|
||||
|
||||
|
||||
/* rx int_desc_wrb_en bitfield definitions
|
||||
* preprocessor definitions for the bitfield "int_desc_wrb_en".
|
||||
* port="pif_rdm_int_desc_wrb_en_i"
|
||||
|
@ -337,7 +337,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
|
||||
/* Convert PHY temperature from 1/256 degree Celsius
|
||||
* to 1/1000 degree Celsius.
|
||||
*/
|
||||
*temp = temp_res * 1000 / 256;
|
||||
*temp = (temp_res & 0xFFFF) * 1000 / 256;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ config NET_VENDOR_BROADCOM
|
||||
default y
|
||||
depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
|
||||
SIBYTE_SB1xxx_SOC
|
||||
select DIMLIB
|
||||
---help---
|
||||
If you have a network (Ethernet) chipset belonging to this class,
|
||||
say Y.
|
||||
@ -69,6 +68,7 @@ config BCMGENET
|
||||
select FIXED_PHY
|
||||
select BCM7XXX_PHY
|
||||
select MDIO_BCM_UNIMAC
|
||||
select DIMLIB
|
||||
help
|
||||
This driver supports the built-in Ethernet MACs found in the
|
||||
Broadcom BCM7xxx Set Top Box family chipset.
|
||||
@ -188,6 +188,7 @@ config SYSTEMPORT
|
||||
select MII
|
||||
select PHYLIB
|
||||
select FIXED_PHY
|
||||
select DIMLIB
|
||||
help
|
||||
This driver supports the built-in Ethernet MACs found in the
|
||||
Broadcom BCM7xxx Set Top Box family chipset using an internal
|
||||
@ -200,6 +201,7 @@ config BNXT
|
||||
select LIBCRC32C
|
||||
select NET_DEVLINK
|
||||
select PAGE_POOL
|
||||
select DIMLIB
|
||||
---help---
|
||||
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
|
||||
Ethernet cards. To compile this driver as a module, choose M here:
|
||||
|
@ -2018,6 +2018,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
|
||||
*/
|
||||
if (priv->internal_phy) {
|
||||
int0_enable |= UMAC_IRQ_LINK_EVENT;
|
||||
if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
|
||||
int0_enable |= UMAC_IRQ_PHY_DET_R;
|
||||
} else if (priv->ext_phy) {
|
||||
int0_enable |= UMAC_IRQ_LINK_EVENT;
|
||||
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
|
||||
@ -2611,11 +2613,14 @@ static void bcmgenet_irq_task(struct work_struct *work)
|
||||
priv->irq0_stat = 0;
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
if (status & UMAC_IRQ_PHY_DET_R &&
|
||||
priv->dev->phydev->autoneg != AUTONEG_ENABLE)
|
||||
phy_init_hw(priv->dev->phydev);
|
||||
|
||||
/* Link UP/DOWN event */
|
||||
if (status & UMAC_IRQ_LINK_EVENT) {
|
||||
priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
|
||||
if (status & UMAC_IRQ_LINK_EVENT)
|
||||
phy_mac_interrupt(priv->dev->phydev);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* bcmgenet_isr1: handle Rx and Tx priority queues */
|
||||
@ -2710,7 +2715,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
/* all other interested interrupts handled in bottom half */
|
||||
status &= UMAC_IRQ_LINK_EVENT;
|
||||
status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
|
||||
if (status) {
|
||||
/* Save irq status for bottom-half processing. */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
@ -2874,6 +2879,12 @@ static int bcmgenet_open(struct net_device *dev)
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
|
||||
|
||||
ret = bcmgenet_mii_connect(dev);
|
||||
if (ret) {
|
||||
netdev_err(dev, "failed to connect to PHY\n");
|
||||
goto err_clk_disable;
|
||||
}
|
||||
|
||||
/* take MAC out of reset */
|
||||
bcmgenet_umac_reset(priv);
|
||||
|
||||
@ -2883,6 +2894,12 @@ static int bcmgenet_open(struct net_device *dev)
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
|
||||
priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
|
||||
|
||||
ret = bcmgenet_mii_config(dev, true);
|
||||
if (ret) {
|
||||
netdev_err(dev, "unsupported PHY\n");
|
||||
goto err_disconnect_phy;
|
||||
}
|
||||
|
||||
bcmgenet_set_hw_addr(priv, dev->dev_addr);
|
||||
|
||||
if (priv->internal_phy) {
|
||||
@ -2898,7 +2915,7 @@ static int bcmgenet_open(struct net_device *dev)
|
||||
ret = bcmgenet_init_dma(priv);
|
||||
if (ret) {
|
||||
netdev_err(dev, "failed to initialize DMA\n");
|
||||
goto err_clk_disable;
|
||||
goto err_disconnect_phy;
|
||||
}
|
||||
|
||||
/* Always enable ring 16 - descriptor ring */
|
||||
@ -2921,25 +2938,19 @@ static int bcmgenet_open(struct net_device *dev)
|
||||
goto err_irq0;
|
||||
}
|
||||
|
||||
ret = bcmgenet_mii_probe(dev);
|
||||
if (ret) {
|
||||
netdev_err(dev, "failed to connect to PHY\n");
|
||||
goto err_irq1;
|
||||
}
|
||||
|
||||
bcmgenet_netif_start(dev);
|
||||
|
||||
netif_tx_start_all_queues(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_irq1:
|
||||
free_irq(priv->irq1, priv);
|
||||
err_irq0:
|
||||
free_irq(priv->irq0, priv);
|
||||
err_fini_dma:
|
||||
bcmgenet_dma_teardown(priv);
|
||||
bcmgenet_fini_dma(priv);
|
||||
err_disconnect_phy:
|
||||
phy_disconnect(dev->phydev);
|
||||
err_clk_disable:
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
|
||||
@ -3620,6 +3631,8 @@ static int bcmgenet_resume(struct device *d)
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
|
||||
|
||||
phy_init_hw(dev->phydev);
|
||||
|
||||
bcmgenet_umac_reset(priv);
|
||||
|
||||
init_umac(priv);
|
||||
@ -3628,8 +3641,6 @@ static int bcmgenet_resume(struct device *d)
|
||||
if (priv->wolopts)
|
||||
clk_disable_unprepare(priv->clk_wol);
|
||||
|
||||
phy_init_hw(dev->phydev);
|
||||
|
||||
/* Speed settings must be restored */
|
||||
bcmgenet_mii_config(priv->dev, false);
|
||||
|
||||
|
@ -366,6 +366,7 @@ struct bcmgenet_mib_counters {
|
||||
#define EXT_PWR_DOWN_PHY_EN (1 << 20)
|
||||
|
||||
#define EXT_RGMII_OOB_CTRL 0x0C
|
||||
#define RGMII_MODE_EN_V123 (1 << 0)
|
||||
#define RGMII_LINK (1 << 4)
|
||||
#define OOB_DISABLE (1 << 5)
|
||||
#define RGMII_MODE_EN (1 << 6)
|
||||
@ -719,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
|
||||
|
||||
/* MDIO routines */
|
||||
int bcmgenet_mii_init(struct net_device *dev);
|
||||
int bcmgenet_mii_connect(struct net_device *dev);
|
||||
int bcmgenet_mii_config(struct net_device *dev, bool init);
|
||||
int bcmgenet_mii_probe(struct net_device *dev);
|
||||
void bcmgenet_mii_exit(struct net_device *dev);
|
||||
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
|
||||
void bcmgenet_mii_setup(struct net_device *dev);
|
||||
|
@ -173,6 +173,46 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
|
||||
bcmgenet_fixed_phy_link_update);
|
||||
}
|
||||
|
||||
int bcmgenet_mii_connect(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct device_node *dn = priv->pdev->dev.of_node;
|
||||
struct phy_device *phydev;
|
||||
u32 phy_flags = 0;
|
||||
int ret;
|
||||
|
||||
/* Communicate the integrated PHY revision */
|
||||
if (priv->internal_phy)
|
||||
phy_flags = priv->gphy_rev;
|
||||
|
||||
/* Initialize link state variables that bcmgenet_mii_setup() uses */
|
||||
priv->old_link = -1;
|
||||
priv->old_speed = -1;
|
||||
priv->old_duplex = -1;
|
||||
priv->old_pause = -1;
|
||||
|
||||
if (dn) {
|
||||
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
|
||||
phy_flags, priv->phy_interface);
|
||||
if (!phydev) {
|
||||
pr_err("could not attach to PHY\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
phydev = dev->phydev;
|
||||
phydev->dev_flags = phy_flags;
|
||||
|
||||
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
|
||||
priv->phy_interface);
|
||||
if (ret) {
|
||||
pr_err("could not attach to PHY\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bcmgenet_mii_config(struct net_device *dev, bool init)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
@ -258,74 +298,29 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
|
||||
*/
|
||||
if (priv->ext_phy) {
|
||||
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
|
||||
reg |= RGMII_MODE_EN | id_mode_dis;
|
||||
reg |= id_mode_dis;
|
||||
if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
|
||||
reg |= RGMII_MODE_EN_V123;
|
||||
else
|
||||
reg |= RGMII_MODE_EN;
|
||||
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
|
||||
}
|
||||
|
||||
if (init)
|
||||
if (init) {
|
||||
linkmode_copy(phydev->advertising, phydev->supported);
|
||||
|
||||
/* The internal PHY has its link interrupts routed to the
|
||||
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
|
||||
* that prevents the signaling of link UP interrupts when
|
||||
* the link operates at 10Mbps, so fallback to polling for
|
||||
* those versions of GENET.
|
||||
*/
|
||||
if (priv->internal_phy && !GENET_IS_V5(priv))
|
||||
phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
|
||||
dev_info(kdev, "configuring instance for %s\n", phy_name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bcmgenet_mii_probe(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct device_node *dn = priv->pdev->dev.of_node;
|
||||
struct phy_device *phydev;
|
||||
u32 phy_flags;
|
||||
int ret;
|
||||
|
||||
/* Communicate the integrated PHY revision */
|
||||
phy_flags = priv->gphy_rev;
|
||||
|
||||
/* Initialize link state variables that bcmgenet_mii_setup() uses */
|
||||
priv->old_link = -1;
|
||||
priv->old_speed = -1;
|
||||
priv->old_duplex = -1;
|
||||
priv->old_pause = -1;
|
||||
|
||||
if (dn) {
|
||||
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
|
||||
phy_flags, priv->phy_interface);
|
||||
if (!phydev) {
|
||||
pr_err("could not attach to PHY\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
phydev = dev->phydev;
|
||||
phydev->dev_flags = phy_flags;
|
||||
|
||||
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
|
||||
priv->phy_interface);
|
||||
if (ret) {
|
||||
pr_err("could not attach to PHY\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/* Configure port multiplexer based on what the probed PHY device since
|
||||
* reading the 'max-speed' property determines the maximum supported
|
||||
* PHY speed which is needed for bcmgenet_mii_config() to configure
|
||||
* things appropriately.
|
||||
*/
|
||||
ret = bcmgenet_mii_config(dev, true);
|
||||
if (ret) {
|
||||
phy_disconnect(dev->phydev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
linkmode_copy(phydev->advertising, phydev->supported);
|
||||
|
||||
/* The internal PHY has its link interrupts routed to the
|
||||
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
|
||||
* that prevents the signaling of link UP interrupts when
|
||||
* the link operates at 10Mbps, so fallback to polling for
|
||||
* those versions of GENET.
|
||||
*/
|
||||
if (priv->internal_phy && !GENET_IS_V5(priv))
|
||||
dev->phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3405,17 +3405,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
|
||||
return err;
|
||||
}
|
||||
|
||||
*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
|
||||
*tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
|
||||
if (IS_ERR(*tx_clk))
|
||||
*tx_clk = NULL;
|
||||
return PTR_ERR(*tx_clk);
|
||||
|
||||
*rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
|
||||
*rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
|
||||
if (IS_ERR(*rx_clk))
|
||||
*rx_clk = NULL;
|
||||
return PTR_ERR(*rx_clk);
|
||||
|
||||
*tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
|
||||
*tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
|
||||
if (IS_ERR(*tsu_clk))
|
||||
*tsu_clk = NULL;
|
||||
return PTR_ERR(*tsu_clk);
|
||||
|
||||
err = clk_prepare_enable(*pclk);
|
||||
if (err) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
|
||||
* Copyright (c) 2003-2015, 2017 Cavium, Inc.
|
||||
*/
|
||||
|
@ -1235,6 +1235,8 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
|
||||
priv->rx_td_enabled = enable;
|
||||
}
|
||||
|
||||
static void update_tx_fqids(struct dpaa2_eth_priv *priv);
|
||||
|
||||
static int link_state_update(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
struct dpni_link_state state = {0};
|
||||
@ -1261,6 +1263,7 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
|
||||
goto out;
|
||||
|
||||
if (state.up) {
|
||||
update_tx_fqids(priv);
|
||||
netif_carrier_on(priv->net_dev);
|
||||
netif_tx_start_all_queues(priv->net_dev);
|
||||
} else {
|
||||
@ -2533,6 +2536,47 @@ static int set_pause(struct dpaa2_eth_priv *priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void update_tx_fqids(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
struct dpni_queue_id qid = {0};
|
||||
struct dpaa2_eth_fq *fq;
|
||||
struct dpni_queue queue;
|
||||
int i, j, err;
|
||||
|
||||
/* We only use Tx FQIDs for FQID-based enqueue, so check
|
||||
* if DPNI version supports it before updating FQIDs
|
||||
*/
|
||||
if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
|
||||
DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < priv->num_fqs; i++) {
|
||||
fq = &priv->fq[i];
|
||||
if (fq->type != DPAA2_TX_CONF_FQ)
|
||||
continue;
|
||||
for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
|
||||
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
|
||||
DPNI_QUEUE_TX, j, fq->flowid,
|
||||
&queue, &qid);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
fq->tx_fqid[j] = qid.fqid;
|
||||
if (fq->tx_fqid[j] == 0)
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
priv->enqueue = dpaa2_eth_enqueue_fq;
|
||||
|
||||
return;
|
||||
|
||||
out_err:
|
||||
netdev_info(priv->net_dev,
|
||||
"Error reading Tx FQID, fallback to QDID-based enqueue\n");
|
||||
priv->enqueue = dpaa2_eth_enqueue_qd;
|
||||
}
|
||||
|
||||
/* Configure the DPNI object this interface is associated with */
|
||||
static int setup_dpni(struct fsl_mc_device *ls_dev)
|
||||
{
|
||||
@ -3306,6 +3350,9 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
|
||||
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
|
||||
link_state_update(netdev_priv(net_dev));
|
||||
|
||||
if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
|
||||
set_mac_addr(netdev_priv(net_dev));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -3331,7 +3378,8 @@ static int setup_irqs(struct fsl_mc_device *ls_dev)
|
||||
}
|
||||
|
||||
err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
|
||||
DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
|
||||
DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
|
||||
DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
|
||||
if (err < 0) {
|
||||
dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
|
||||
goto free_irq;
|
||||
|
@ -133,9 +133,12 @@ int dpni_reset(struct fsl_mc_io *mc_io,
|
||||
*/
|
||||
#define DPNI_IRQ_INDEX 0
|
||||
/**
|
||||
* IRQ event - indicates a change in link state
|
||||
* IRQ events:
|
||||
* indicates a change in link state
|
||||
* indicates a change in endpoint
|
||||
*/
|
||||
#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
|
||||
#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002
|
||||
|
||||
int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
|
||||
u32 cmd_flags,
|
||||
|
@ -32,6 +32,8 @@
|
||||
|
||||
#define HNAE3_MOD_VERSION "1.0"
|
||||
|
||||
#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
|
||||
|
||||
/* Device IDs */
|
||||
#define HNAE3_DEV_ID_GE 0xA220
|
||||
#define HNAE3_DEV_ID_25GE 0xA221
|
||||
|
@ -906,6 +906,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
|
||||
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
|
||||
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
|
||||
|
||||
/* nic's msix numbers is always equals to the roce's. */
|
||||
hdev->num_nic_msi = hdev->num_roce_msi;
|
||||
|
||||
/* PF should have NIC vectors and Roce vectors,
|
||||
* NIC vectors are queued before Roce vectors.
|
||||
*/
|
||||
@ -915,6 +918,15 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
|
||||
hdev->num_msi =
|
||||
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
|
||||
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
|
||||
|
||||
hdev->num_nic_msi = hdev->num_msi;
|
||||
}
|
||||
|
||||
if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Just %u msi resources, not enough for pf(min:2).\n",
|
||||
hdev->num_nic_msi);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1507,6 +1519,10 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
|
||||
kinfo->rss_size = min_t(u16, hdev->rss_size_max,
|
||||
vport->alloc_tqps / hdev->tm_info.num_tc);
|
||||
|
||||
/* ensure one to one mapping between irq and queue at default */
|
||||
kinfo->rss_size = min_t(u16, kinfo->rss_size,
|
||||
(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2285,7 +2301,8 @@ static int hclge_init_msi(struct hclge_dev *hdev)
|
||||
int vectors;
|
||||
int i;
|
||||
|
||||
vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
|
||||
vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
|
||||
hdev->num_msi,
|
||||
PCI_IRQ_MSI | PCI_IRQ_MSIX);
|
||||
if (vectors < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
@ -2300,6 +2317,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
|
||||
|
||||
hdev->num_msi = vectors;
|
||||
hdev->num_msi_left = vectors;
|
||||
|
||||
hdev->base_msi_vector = pdev->irq;
|
||||
hdev->roce_base_vector = hdev->base_msi_vector +
|
||||
hdev->roce_base_msix_offset;
|
||||
@ -3903,6 +3921,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
|
||||
int alloc = 0;
|
||||
int i, j;
|
||||
|
||||
vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
|
||||
vector_num = min(hdev->num_msi_left, vector_num);
|
||||
|
||||
for (j = 0; j < vector_num; j++) {
|
||||
|
@ -763,6 +763,7 @@ struct hclge_dev {
|
||||
u32 base_msi_vector;
|
||||
u16 *vector_status;
|
||||
int *vector_irq;
|
||||
u16 num_nic_msi; /* Num of nic vectors for this PF */
|
||||
u16 num_roce_msi; /* Num of roce vectors for this PF */
|
||||
int roce_base_vector;
|
||||
|
||||
|
@ -537,9 +537,16 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
kinfo->rss_size = kinfo->req_rss_size;
|
||||
} else if (kinfo->rss_size > max_rss_size ||
|
||||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
|
||||
/* if user not set rss, the rss_size should compare with the
|
||||
* valid msi numbers to ensure one to one map between tqp and
|
||||
* irq as default.
|
||||
*/
|
||||
if (!kinfo->req_rss_size)
|
||||
max_rss_size = min_t(u16, max_rss_size,
|
||||
(hdev->num_nic_msi - 1) /
|
||||
kinfo->num_tc);
|
||||
|
||||
/* Set to the maximum specification value (max_rss_size). */
|
||||
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
|
||||
kinfo->rss_size, max_rss_size);
|
||||
kinfo->rss_size = max_rss_size;
|
||||
}
|
||||
|
||||
|
@ -411,6 +411,13 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
|
||||
kinfo->tqp[i] = &hdev->htqp[i].q;
|
||||
}
|
||||
|
||||
/* after init the max rss_size and tqps, adjust the default tqp numbers
|
||||
* and rss size with the actual vector numbers
|
||||
*/
|
||||
kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
|
||||
kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
|
||||
kinfo->rss_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -502,6 +509,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
|
||||
int alloc = 0;
|
||||
int i, j;
|
||||
|
||||
vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
|
||||
vector_num = min(hdev->num_msi_left, vector_num);
|
||||
|
||||
for (j = 0; j < vector_num; j++) {
|
||||
@ -2246,13 +2254,14 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
|
||||
int vectors;
|
||||
int i;
|
||||
|
||||
if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
|
||||
if (hnae3_dev_roce_supported(hdev))
|
||||
vectors = pci_alloc_irq_vectors(pdev,
|
||||
hdev->roce_base_msix_offset + 1,
|
||||
hdev->num_msi,
|
||||
PCI_IRQ_MSIX);
|
||||
else
|
||||
vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
|
||||
vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
|
||||
hdev->num_msi,
|
||||
PCI_IRQ_MSI | PCI_IRQ_MSIX);
|
||||
|
||||
if (vectors < 0) {
|
||||
@ -2268,6 +2277,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
|
||||
|
||||
hdev->num_msi = vectors;
|
||||
hdev->num_msi_left = vectors;
|
||||
|
||||
hdev->base_msi_vector = pdev->irq;
|
||||
hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
|
||||
|
||||
@ -2533,7 +2543,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
|
||||
|
||||
req = (struct hclgevf_query_res_cmd *)desc.data;
|
||||
|
||||
if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
|
||||
if (hnae3_dev_roce_supported(hdev)) {
|
||||
hdev->roce_base_msix_offset =
|
||||
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
|
||||
HCLGEVF_MSIX_OFT_ROCEE_M,
|
||||
@ -2542,6 +2552,9 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
|
||||
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
|
||||
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
|
||||
|
||||
/* nic's msix numbers is always equals to the roce's. */
|
||||
hdev->num_nic_msix = hdev->num_roce_msix;
|
||||
|
||||
/* VF should have NIC vectors and Roce vectors, NIC vectors
|
||||
* are queued before Roce vectors. The offset is fixed to 64.
|
||||
*/
|
||||
@ -2551,6 +2564,15 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
|
||||
hdev->num_msi =
|
||||
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
|
||||
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
|
||||
|
||||
hdev->num_nic_msix = hdev->num_msi;
|
||||
}
|
||||
|
||||
if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Just %u msi resources, not enough for vf(min:2).\n",
|
||||
hdev->num_nic_msix);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -270,6 +270,7 @@ struct hclgevf_dev {
|
||||
u16 num_msi;
|
||||
u16 num_msi_left;
|
||||
u16 num_msi_used;
|
||||
u16 num_nic_msix; /* Num of nic vectors for this VF */
|
||||
u16 num_roce_msix; /* Num of roce vectors for this VF */
|
||||
u16 roce_base_msix_offset;
|
||||
int roce_base_vector;
|
||||
|
@ -96,6 +96,8 @@
|
||||
|
||||
#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
|
||||
|
||||
#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
|
||||
|
||||
#define DMA_WBACK(ndev, addr, len) \
|
||||
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
|
||||
|
||||
@ -200,7 +202,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
|
||||
|
||||
unregister_netdev (dev);
|
||||
dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
|
||||
lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
|
||||
lp->dma_addr, LIB82596_DMA_ATTR);
|
||||
free_netdev (dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
|
||||
|
||||
dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
|
||||
&lp->dma_addr, GFP_KERNEL,
|
||||
DMA_ATTR_NON_CONSISTENT);
|
||||
LIB82596_DMA_ATTR);
|
||||
if (!dma) {
|
||||
printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
|
||||
return -ENOMEM;
|
||||
@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
|
||||
i = register_netdev(dev);
|
||||
if (i) {
|
||||
dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
|
||||
dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
|
||||
dma, lp->dma_addr, LIB82596_DMA_ATTR);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,8 @@
|
||||
|
||||
static const char sni_82596_string[] = "snirm_82596";
|
||||
|
||||
#define LIB82596_DMA_ATTR 0
|
||||
|
||||
#define DMA_WBACK(priv, addr, len) do { } while (0)
|
||||
#define DMA_INV(priv, addr, len) do { } while (0)
|
||||
#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
|
||||
@ -152,7 +154,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
|
||||
|
||||
unregister_netdev(dev);
|
||||
dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
|
||||
lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
|
||||
lp->dma_addr, LIB82596_DMA_ATTR);
|
||||
iounmap(lp->ca);
|
||||
iounmap(lp->mpu_port);
|
||||
free_netdev (dev);
|
||||
|
@ -2878,12 +2878,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
|
||||
|
||||
if (test_bit(0, &adapter->resetting) &&
|
||||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
u64 val = (0xff000000) | scrq->hw_irq;
|
||||
struct irq_desc *desc = irq_to_desc(scrq->irq);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
rc = plpar_hcall_norets(H_EOI, val);
|
||||
if (rc)
|
||||
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
|
||||
val, rc);
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
|
||||
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
|
||||
|
@ -261,6 +261,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
|
||||
ge_mode = 0;
|
||||
switch (state->interface) {
|
||||
case PHY_INTERFACE_MODE_MII:
|
||||
case PHY_INTERFACE_MODE_GMII:
|
||||
ge_mode = 1;
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_REVMII:
|
||||
|
@ -788,12 +788,10 @@ again:
|
||||
* it means that all the previous stes are the same,
|
||||
* if so, this rule is duplicated.
|
||||
*/
|
||||
if (mlx5dr_ste_is_last_in_rule(nic_matcher,
|
||||
matched_ste->ste_chain_location)) {
|
||||
mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
|
||||
return NULL;
|
||||
}
|
||||
return matched_ste;
|
||||
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
|
||||
return matched_ste;
|
||||
|
||||
mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
|
||||
}
|
||||
|
||||
if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
|
||||
|
@ -99,6 +99,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
|
||||
devlink = priv_to_devlink(mlxsw_sp->core);
|
||||
in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
|
||||
local_port);
|
||||
skb_push(skb, ETH_HLEN);
|
||||
devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
|
||||
consume_skb(skb);
|
||||
}
|
||||
|
@ -1356,9 +1356,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
|
||||
if (!is_valid_ether_addr(ndev->dev_addr))
|
||||
eth_hw_addr_random(ndev);
|
||||
|
||||
/* Reset the ethernet controller */
|
||||
__lpc_eth_reset(pldat);
|
||||
|
||||
/* then shut everything down to save power */
|
||||
__lpc_eth_shutdown(pldat);
|
||||
|
||||
|
@ -182,6 +182,8 @@ struct ionic_lif {
|
||||
|
||||
#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
|
||||
#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
|
||||
#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
|
||||
#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
|
||||
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
|
||||
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
|
||||
|
||||
|
@ -117,7 +117,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
|
||||
/* rx stats */
|
||||
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
|
||||
|
||||
if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
if (test_bit(IONIC_LIF_UP, lif->state) &&
|
||||
test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
/* tx debug stats */
|
||||
total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
|
||||
IONIC_NUM_TX_Q_STATS +
|
||||
@ -149,7 +150,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
|
||||
*buf += ETH_GSTRING_LEN;
|
||||
}
|
||||
|
||||
if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
if (test_bit(IONIC_LIF_UP, lif->state) &&
|
||||
test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
|
||||
snprintf(*buf, ETH_GSTRING_LEN,
|
||||
"txq_%d_%s",
|
||||
@ -187,7 +189,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
|
||||
*buf += ETH_GSTRING_LEN;
|
||||
}
|
||||
|
||||
if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
if (test_bit(IONIC_LIF_UP, lif->state) &&
|
||||
test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
|
||||
snprintf(*buf, ETH_GSTRING_LEN,
|
||||
"rxq_%d_cq_%s",
|
||||
@ -223,6 +226,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
|
||||
{
|
||||
struct ionic_lif_sw_stats lif_stats;
|
||||
struct ionic_qcq *txqcq, *rxqcq;
|
||||
struct ionic_tx_stats *txstats;
|
||||
struct ionic_rx_stats *rxstats;
|
||||
int i, q_num;
|
||||
|
||||
ionic_get_lif_stats(lif, &lif_stats);
|
||||
@ -233,15 +238,17 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
|
||||
}
|
||||
|
||||
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
|
||||
txqcq = lif_to_txqcq(lif, q_num);
|
||||
txstats = &lif_to_txstats(lif, q_num);
|
||||
|
||||
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
|
||||
**buf = IONIC_READ_STAT64(&txqcq->stats->tx,
|
||||
**buf = IONIC_READ_STAT64(txstats,
|
||||
&ionic_tx_stats_desc[i]);
|
||||
(*buf)++;
|
||||
}
|
||||
|
||||
if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
if (test_bit(IONIC_LIF_UP, lif->state) &&
|
||||
test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
txqcq = lif_to_txqcq(lif, q_num);
|
||||
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
|
||||
**buf = IONIC_READ_STAT64(&txqcq->q,
|
||||
&ionic_txq_stats_desc[i]);
|
||||
@ -258,22 +265,24 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
|
||||
(*buf)++;
|
||||
}
|
||||
for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
|
||||
**buf = txqcq->stats->tx.sg_cntr[i];
|
||||
**buf = txstats->sg_cntr[i];
|
||||
(*buf)++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
|
||||
rxqcq = lif_to_rxqcq(lif, q_num);
|
||||
rxstats = &lif_to_rxstats(lif, q_num);
|
||||
|
||||
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
|
||||
**buf = IONIC_READ_STAT64(&rxqcq->stats->rx,
|
||||
**buf = IONIC_READ_STAT64(rxstats,
|
||||
&ionic_rx_stats_desc[i]);
|
||||
(*buf)++;
|
||||
}
|
||||
|
||||
if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
if (test_bit(IONIC_LIF_UP, lif->state) &&
|
||||
test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
|
||||
rxqcq = lif_to_rxqcq(lif, q_num);
|
||||
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
|
||||
**buf = IONIC_READ_STAT64(&rxqcq->cq,
|
||||
&ionic_dbg_cq_stats_desc[i]);
|
||||
|
@ -4146,6 +4146,14 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
|
||||
rtl_lock_config_regs(tp);
|
||||
}
|
||||
|
||||
static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
|
||||
{
|
||||
if (mtu > ETH_DATA_LEN)
|
||||
rtl_hw_jumbo_enable(tp);
|
||||
else
|
||||
rtl_hw_jumbo_disable(tp);
|
||||
}
|
||||
|
||||
DECLARE_RTL_COND(rtl_chipcmd_cond)
|
||||
{
|
||||
return RTL_R8(tp, ChipCmd) & CmdReset;
|
||||
@ -4442,11 +4450,6 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
|
||||
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
|
||||
{
|
||||
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
|
||||
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN) {
|
||||
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
|
||||
PCI_EXP_DEVCTL_NOSNOOP_EN);
|
||||
}
|
||||
}
|
||||
|
||||
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
|
||||
@ -4462,9 +4465,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
|
||||
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
|
||||
|
||||
rtl_disable_clock_request(tp);
|
||||
}
|
||||
|
||||
@ -4490,9 +4490,6 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
|
||||
rtl_set_def_aspm_entry_latency(tp);
|
||||
|
||||
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
|
||||
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
|
||||
}
|
||||
|
||||
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
|
||||
@ -4503,9 +4500,6 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
|
||||
|
||||
/* Magic. */
|
||||
RTL_W8(tp, DBG_REG, 0x20);
|
||||
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
|
||||
}
|
||||
|
||||
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
|
||||
@ -4611,9 +4605,6 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
|
||||
|
||||
rtl_ephy_init(tp, e_info_8168e_1);
|
||||
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
|
||||
|
||||
rtl_disable_clock_request(tp);
|
||||
|
||||
/* Reset tx FIFO pointer */
|
||||
@ -4636,9 +4627,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
|
||||
|
||||
rtl_ephy_init(tp, e_info_8168e_2);
|
||||
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
|
||||
|
||||
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
|
||||
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
|
||||
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
|
||||
@ -5485,6 +5473,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
|
||||
rtl_set_rx_tx_desc_registers(tp);
|
||||
rtl_lock_config_regs(tp);
|
||||
|
||||
rtl_jumbo_config(tp, tp->dev->mtu);
|
||||
|
||||
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
|
||||
RTL_R16(tp, CPlusCmd);
|
||||
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
|
||||
@ -5498,10 +5488,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
|
||||
if (new_mtu > ETH_DATA_LEN)
|
||||
rtl_hw_jumbo_enable(tp);
|
||||
else
|
||||
rtl_hw_jumbo_disable(tp);
|
||||
rtl_jumbo_config(tp, new_mtu);
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
netdev_update_features(dev);
|
||||
|
@ -651,7 +651,8 @@ static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
netdev_info(dev, "Too many address, switching to promiscuous\n");
|
||||
if (!(readl(ioaddr + EMAC_RX_FRM_FLT) & EMAC_FRM_FLT_RXALL))
|
||||
netdev_info(dev, "Too many address, switching to promiscuous\n");
|
||||
v = EMAC_FRM_FLT_RXALL;
|
||||
}
|
||||
|
||||
|
@ -448,7 +448,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
|
||||
value |= GMAC_PACKET_FILTER_HPF;
|
||||
|
||||
/* Handle multiple unicast addresses */
|
||||
if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
|
||||
if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
|
||||
/* Switch to promiscuous mode if more than 128 addrs
|
||||
* are required
|
||||
*/
|
||||
|
@ -515,6 +515,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
|
||||
|
||||
if (!enable) {
|
||||
val |= PPSCMDx(index, 0x5);
|
||||
val |= PPSEN0;
|
||||
writel(val, ioaddr + MAC_PPS_CONTROL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2610,7 +2610,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
||||
}
|
||||
|
||||
if (priv->hw->pcs)
|
||||
stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
|
||||
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
|
||||
|
||||
/* set TX and RX rings length */
|
||||
stmmac_set_rings_length(priv);
|
||||
@ -4742,8 +4742,10 @@ int stmmac_suspend(struct device *dev)
|
||||
stmmac_mac_set(priv, priv->ioaddr, false);
|
||||
pinctrl_pm_select_sleep_state(priv->device);
|
||||
/* Disable clock in case of PWM is off */
|
||||
clk_disable(priv->plat->pclk);
|
||||
clk_disable(priv->plat->stmmac_clk);
|
||||
if (priv->plat->clk_ptp_ref)
|
||||
clk_disable_unprepare(priv->plat->clk_ptp_ref);
|
||||
clk_disable_unprepare(priv->plat->pclk);
|
||||
clk_disable_unprepare(priv->plat->stmmac_clk);
|
||||
}
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
@ -4806,8 +4808,10 @@ int stmmac_resume(struct device *dev)
|
||||
} else {
|
||||
pinctrl_pm_select_default_state(priv->device);
|
||||
/* enable the clk previously disabled */
|
||||
clk_enable(priv->plat->stmmac_clk);
|
||||
clk_enable(priv->plat->pclk);
|
||||
clk_prepare_enable(priv->plat->stmmac_clk);
|
||||
clk_prepare_enable(priv->plat->pclk);
|
||||
if (priv->plat->clk_ptp_ref)
|
||||
clk_prepare_enable(priv->plat->clk_ptp_ref);
|
||||
/* reset the phy so that it's ready */
|
||||
if (priv->mii)
|
||||
stmmac_mdio_reset(priv->mii);
|
||||
|
@ -164,7 +164,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
|
||||
/* structure describing a PTP hardware clock */
|
||||
static struct ptp_clock_info stmmac_ptp_clock_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "stmmac_ptp_clock",
|
||||
.name = "stmmac ptp",
|
||||
.max_adj = 62500000,
|
||||
.n_alarm = 0,
|
||||
.n_ext_ts = 0,
|
||||
|
@ -487,8 +487,8 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
|
||||
|
||||
static int stmmac_test_hfilt(struct stmmac_priv *priv)
|
||||
{
|
||||
unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
|
||||
unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
|
||||
unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
|
||||
unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
|
||||
struct stmmac_packet_attrs attr = { };
|
||||
int ret;
|
||||
|
||||
@ -496,6 +496,9 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = dev_mc_add(priv->dev, gd_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -573,6 +576,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
|
||||
|
||||
if (stmmac_filter_check(priv))
|
||||
return -EOPNOTSUPP;
|
||||
if (!priv->hw->multicast_filter_bins)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Remove all MC addresses */
|
||||
__dev_mc_unsync(priv->dev, NULL);
|
||||
@ -611,6 +616,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
|
||||
|
||||
if (stmmac_filter_check(priv))
|
||||
return -EOPNOTSUPP;
|
||||
if (!priv->hw->multicast_filter_bins)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Remove all UC addresses */
|
||||
__dev_uc_unsync(priv->dev, NULL);
|
||||
|
@ -510,7 +510,7 @@ static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct {
|
||||
static struct {
|
||||
int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
|
||||
struct stmmac_flow_entry *entry);
|
||||
} tc_flow_parsers[] = {
|
||||
|
@ -722,7 +722,7 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
|
||||
* cpdma_chan_split_pool - Splits ctrl pool between all channels.
|
||||
* Has to be called under ctlr lock
|
||||
*/
|
||||
int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
|
||||
static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
|
||||
{
|
||||
int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
|
||||
int free_rx_num = 0, free_tx_num = 0;
|
||||
|
@ -241,8 +241,8 @@ static struct pernet_operations nsim_fib_net_ops = {
|
||||
|
||||
void nsim_fib_exit(void)
|
||||
{
|
||||
unregister_pernet_subsys(&nsim_fib_net_ops);
|
||||
unregister_fib_notifier(&nsim_fib_nb);
|
||||
unregister_pernet_subsys(&nsim_fib_net_ops);
|
||||
}
|
||||
|
||||
int nsim_fib_init(void)
|
||||
@ -258,6 +258,7 @@ int nsim_fib_init(void)
|
||||
err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
|
||||
if (err < 0) {
|
||||
pr_err("Failed to register fib notifier\n");
|
||||
unregister_pernet_subsys(&nsim_fib_net_ops);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
|
@ -572,6 +572,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
|
||||
.name = _name, \
|
||||
/* PHY_BASIC_FEATURES */ \
|
||||
.flags = PHY_IS_INTERNAL, \
|
||||
.soft_reset = genphy_soft_reset, \
|
||||
.config_init = bcm7xxx_config_init, \
|
||||
.suspend = bcm7xxx_suspend, \
|
||||
.resume = bcm7xxx_config_init, \
|
||||
|
@ -341,6 +341,35 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
|
||||
return genphy_config_aneg(phydev);
|
||||
}
|
||||
|
||||
static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
|
||||
const u32 ksz_phy_id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
|
||||
return 0;
|
||||
|
||||
ret = phy_read(phydev, MII_BMSR);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same
|
||||
* exact PHY ID. However, they can be told apart by the extended
|
||||
* capability registers presence. The KSZ8051 PHY has them while
|
||||
* the switch does not.
|
||||
*/
|
||||
ret &= BMSR_ERCAP;
|
||||
if (ksz_phy_id == PHY_ID_KSZ8051)
|
||||
return ret;
|
||||
else
|
||||
return !ret;
|
||||
}
|
||||
|
||||
static int ksz8051_match_phy_device(struct phy_device *phydev)
|
||||
{
|
||||
return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
|
||||
}
|
||||
|
||||
static int ksz8081_config_init(struct phy_device *phydev)
|
||||
{
|
||||
/* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
|
||||
@ -364,6 +393,11 @@ static int ksz8061_config_init(struct phy_device *phydev)
|
||||
return kszphy_config_init(phydev);
|
||||
}
|
||||
|
||||
static int ksz8795_match_phy_device(struct phy_device *phydev)
|
||||
{
|
||||
return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
|
||||
}
|
||||
|
||||
static int ksz9021_load_values_from_of(struct phy_device *phydev,
|
||||
const struct device_node *of_node,
|
||||
u16 reg,
|
||||
@ -1017,8 +1051,6 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
}, {
|
||||
.phy_id = PHY_ID_KSZ8051,
|
||||
.phy_id_mask = MICREL_PHY_ID_MASK,
|
||||
.name = "Micrel KSZ8051",
|
||||
/* PHY_BASIC_FEATURES */
|
||||
.driver_data = &ksz8051_type,
|
||||
@ -1029,6 +1061,7 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.get_sset_count = kszphy_get_sset_count,
|
||||
.get_strings = kszphy_get_strings,
|
||||
.get_stats = kszphy_get_stats,
|
||||
.match_phy_device = ksz8051_match_phy_device,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
}, {
|
||||
@ -1141,13 +1174,12 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
}, {
|
||||
.phy_id = PHY_ID_KSZ8795,
|
||||
.phy_id_mask = MICREL_PHY_ID_MASK,
|
||||
.name = "Micrel KSZ8795",
|
||||
.name = "Micrel KSZ87XX Switch",
|
||||
/* PHY_BASIC_FEATURES */
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = ksz8873mll_config_aneg,
|
||||
.read_status = ksz8873mll_read_status,
|
||||
.match_phy_device = ksz8795_match_phy_device,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
}, {
|
||||
|
@ -323,6 +323,8 @@ int genphy_c45_read_pma(struct phy_device *phydev)
|
||||
{
|
||||
int val;
|
||||
|
||||
linkmode_zero(phydev->lp_advertising);
|
||||
|
||||
val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
|
||||
if (val < 0)
|
||||
return val;
|
||||
|
@ -572,9 +572,6 @@ int phy_start_aneg(struct phy_device *phydev)
|
||||
if (AUTONEG_DISABLE == phydev->autoneg)
|
||||
phy_sanitize_settings(phydev);
|
||||
|
||||
/* Invalidate LP advertising flags */
|
||||
linkmode_zero(phydev->lp_advertising);
|
||||
|
||||
err = phy_config_aneg(phydev);
|
||||
if (err < 0)
|
||||
goto out_unlock;
|
||||
|
@ -1787,7 +1787,14 @@ int genphy_read_lpa(struct phy_device *phydev)
|
||||
{
|
||||
int lpa, lpagb;
|
||||
|
||||
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
|
||||
if (phydev->autoneg == AUTONEG_ENABLE) {
|
||||
if (!phydev->autoneg_complete) {
|
||||
mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
|
||||
0);
|
||||
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (phydev->is_gigabit_capable) {
|
||||
lpagb = phy_read(phydev, MII_STAT1000);
|
||||
if (lpagb < 0)
|
||||
@ -1815,6 +1822,8 @@ int genphy_read_lpa(struct phy_device *phydev)
|
||||
return lpa;
|
||||
|
||||
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
|
||||
} else {
|
||||
linkmode_zero(phydev->lp_advertising);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -576,7 +576,7 @@ static int phylink_register_sfp(struct phylink *pl,
|
||||
|
||||
/**
|
||||
* phylink_create() - create a phylink instance
|
||||
* @ndev: a pointer to the &struct net_device
|
||||
* @config: a pointer to the target &struct phylink_config
|
||||
* @fwnode: a pointer to a &struct fwnode_handle describing the network
|
||||
* interface
|
||||
* @iface: the desired link mode defined by &typedef phy_interface_t
|
||||
|
@ -526,8 +526,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
|
||||
e = tun_flow_find(head, rxhash);
|
||||
if (likely(e)) {
|
||||
/* TODO: keep queueing to old queue until it's empty? */
|
||||
if (e->queue_index != queue_index)
|
||||
e->queue_index = queue_index;
|
||||
if (READ_ONCE(e->queue_index) != queue_index)
|
||||
WRITE_ONCE(e->queue_index, queue_index);
|
||||
if (e->updated != jiffies)
|
||||
e->updated = jiffies;
|
||||
sock_rps_record_flow_hash(e->rps_rxhash);
|
||||
|
@ -186,7 +186,7 @@ struct hso_tiocmget {
|
||||
int intr_completed;
|
||||
struct usb_endpoint_descriptor *endp;
|
||||
struct urb *urb;
|
||||
struct hso_serial_state_notification serial_state_notification;
|
||||
struct hso_serial_state_notification *serial_state_notification;
|
||||
u16 prev_UART_state_bitmap;
|
||||
struct uart_icount icount;
|
||||
};
|
||||
@ -1432,7 +1432,7 @@ static int tiocmget_submit_urb(struct hso_serial *serial,
|
||||
usb_rcvintpipe(usb,
|
||||
tiocmget->endp->
|
||||
bEndpointAddress & 0x7F),
|
||||
&tiocmget->serial_state_notification,
|
||||
tiocmget->serial_state_notification,
|
||||
sizeof(struct hso_serial_state_notification),
|
||||
tiocmget_intr_callback, serial,
|
||||
tiocmget->endp->bInterval);
|
||||
@ -1479,7 +1479,7 @@ static void tiocmget_intr_callback(struct urb *urb)
|
||||
/* wIndex should be the USB interface number of the port to which the
|
||||
* notification applies, which should always be the Modem port.
|
||||
*/
|
||||
serial_state_notification = &tiocmget->serial_state_notification;
|
||||
serial_state_notification = tiocmget->serial_state_notification;
|
||||
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
|
||||
serial_state_notification->bNotification != B_NOTIFICATION ||
|
||||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
|
||||
@ -2565,6 +2565,8 @@ static void hso_free_tiomget(struct hso_serial *serial)
|
||||
usb_free_urb(tiocmget->urb);
|
||||
tiocmget->urb = NULL;
|
||||
serial->tiocmget = NULL;
|
||||
kfree(tiocmget->serial_state_notification);
|
||||
tiocmget->serial_state_notification = NULL;
|
||||
kfree(tiocmget);
|
||||
}
|
||||
}
|
||||
@ -2615,10 +2617,13 @@ static struct hso_device *hso_create_bulk_serial_device(
|
||||
num_urbs = 2;
|
||||
serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
|
||||
GFP_KERNEL);
|
||||
serial->tiocmget->serial_state_notification
|
||||
= kzalloc(sizeof(struct hso_serial_state_notification),
|
||||
GFP_KERNEL);
|
||||
/* it isn't going to break our heart if serial->tiocmget
|
||||
* allocation fails don't bother checking this.
|
||||
*/
|
||||
if (serial->tiocmget) {
|
||||
if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
|
||||
tiocmget = serial->tiocmget;
|
||||
tiocmget->endp = hso_get_ep(interface,
|
||||
USB_ENDPOINT_XFER_INT,
|
||||
|
@ -3782,10 +3782,14 @@ static int lan78xx_probe(struct usb_interface *intf,
|
||||
/* driver requires remote-wakeup capability during autosuspend. */
|
||||
intf->needs_remote_wakeup = 1;
|
||||
|
||||
ret = lan78xx_phy_init(dev);
|
||||
if (ret < 0)
|
||||
goto out4;
|
||||
|
||||
ret = register_netdev(netdev);
|
||||
if (ret != 0) {
|
||||
netif_err(dev, probe, netdev, "couldn't register the device\n");
|
||||
goto out4;
|
||||
goto out5;
|
||||
}
|
||||
|
||||
usb_set_intfdata(intf, dev);
|
||||
@ -3798,14 +3802,10 @@ static int lan78xx_probe(struct usb_interface *intf,
|
||||
pm_runtime_set_autosuspend_delay(&udev->dev,
|
||||
DEFAULT_AUTOSUSPEND_DELAY);
|
||||
|
||||
ret = lan78xx_phy_init(dev);
|
||||
if (ret < 0)
|
||||
goto out5;
|
||||
|
||||
return 0;
|
||||
|
||||
out5:
|
||||
unregister_netdev(netdev);
|
||||
phy_disconnect(netdev->phydev);
|
||||
out4:
|
||||
usb_free_urb(dev->urb_intr);
|
||||
out3:
|
||||
|
@ -1327,6 +1327,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
|
||||
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
|
||||
|
@ -335,7 +335,7 @@ static void sr_set_multicast(struct net_device *net)
|
||||
static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
|
||||
{
|
||||
struct usbnet *dev = netdev_priv(net);
|
||||
__le16 res;
|
||||
__le16 res = 0;
|
||||
|
||||
mutex_lock(&dev->phy_mutex);
|
||||
sr_set_sw_mii(dev);
|
||||
|
@ -2118,12 +2118,15 @@ static int ath10k_init_uart(struct ath10k *ar)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!uart_print && ar->hw_params.uart_pin_workaround) {
|
||||
ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
|
||||
ar->hw_params.uart_pin);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set UART TX pin: %d", ret);
|
||||
return ret;
|
||||
if (!uart_print) {
|
||||
if (ar->hw_params.uart_pin_workaround) {
|
||||
ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
|
||||
ar->hw_params.uart_pin);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set UART TX pin: %d",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -162,12 +162,13 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc)
|
||||
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
|
||||
&tbl_rev);
|
||||
if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
|
||||
tbl_rev != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
@ -224,12 +225,13 @@ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
|
||||
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
|
||||
&tbl_rev);
|
||||
if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
|
||||
tbl_rev != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
@ -646,6 +646,7 @@ static struct scatterlist *alloc_sgtable(int size)
|
||||
if (new_page)
|
||||
__free_page(new_page);
|
||||
}
|
||||
kfree(table);
|
||||
return NULL;
|
||||
}
|
||||
alloc_size = min_t(int, size, PAGE_SIZE);
|
||||
|
@ -112,38 +112,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf);
|
||||
*/
|
||||
static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
|
||||
{
|
||||
return ofs + trans->cfg->trans.umac_prph_offset;
|
||||
return ofs + trans->trans_cfg->umac_prph_offset;
|
||||
}
|
||||
|
||||
static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
|
||||
{
|
||||
return iwl_read_prph_no_grab(trans, ofs +
|
||||
trans->cfg->trans.umac_prph_offset);
|
||||
trans->trans_cfg->umac_prph_offset);
|
||||
}
|
||||
|
||||
static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
|
||||
{
|
||||
return iwl_read_prph(trans, ofs + trans->cfg->trans.umac_prph_offset);
|
||||
return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset);
|
||||
}
|
||||
|
||||
static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
|
||||
u32 val)
|
||||
{
|
||||
iwl_write_prph_no_grab(trans, ofs + trans->cfg->trans.umac_prph_offset,
|
||||
iwl_write_prph_no_grab(trans, ofs + trans->trans_cfg->umac_prph_offset,
|
||||
val);
|
||||
}
|
||||
|
||||
static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
|
||||
u32 val)
|
||||
{
|
||||
iwl_write_prph(trans, ofs + trans->cfg->trans.umac_prph_offset, val);
|
||||
iwl_write_prph(trans, ofs + trans->trans_cfg->umac_prph_offset, val);
|
||||
}
|
||||
|
||||
static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
|
||||
u32 bits, u32 mask, int timeout)
|
||||
{
|
||||
return iwl_poll_prph_bit(trans, addr +
|
||||
trans->cfg->trans.umac_prph_offset,
|
||||
trans->trans_cfg->umac_prph_offset,
|
||||
bits, mask, timeout);
|
||||
}
|
||||
|
||||
|
@ -420,6 +420,9 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (mvm->trans->cfg->tx_with_siso_diversity)
|
||||
init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
mvm->rfkill_safe_init_done = false;
|
||||
@ -694,12 +697,13 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
|
||||
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
|
||||
ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
|
||||
if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
|
||||
tbl_rev != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
@ -731,13 +735,14 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
|
||||
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
|
||||
ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
|
||||
if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
|
||||
(wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
|
||||
(wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
|
||||
tbl_rev != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
@ -791,11 +796,16 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
|
||||
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
|
||||
ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
|
||||
if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (tbl_rev != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
mvm->geo_rev = tbl_rev;
|
||||
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
|
||||
for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
|
||||
@ -889,15 +899,17 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
|
||||
* firmware versions. Unfortunately, we don't have a TLV API
|
||||
* flag to rely on, so rely on the major version which is in
|
||||
* the first byte of ucode_ver. This was implemented
|
||||
* initially on version 38 and then backported to29 and 17.
|
||||
* The intention was to have it in 36 as well, but not all
|
||||
* 8000 family got this feature enabled. The 8000 family is
|
||||
* the only one using version 36, so skip this version
|
||||
* entirely.
|
||||
* initially on version 38 and then backported to 17. It was
|
||||
* also backported to 29, but only for 7265D devices. The
|
||||
* intention was to have it in 36 as well, but not all 8000
|
||||
* family got this feature enabled. The 8000 family is the
|
||||
* only one using version 36, so skip this version entirely.
|
||||
*/
|
||||
return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
|
||||
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
|
||||
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
|
||||
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
|
||||
(IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
|
||||
((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
|
||||
CSR_HW_REV_TYPE_7265D));
|
||||
}
|
||||
|
||||
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
||||
@ -1020,11 +1032,16 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
|
||||
ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
|
||||
|
||||
if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (tbl_rev != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
enabled = &wifi_pkg->package.elements[1];
|
||||
if (enabled->type != ACPI_TYPE_INTEGER ||
|
||||
(enabled->integer.value != 0 && enabled->integer.value != 1)) {
|
||||
|
@ -4881,11 +4881,11 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
||||
if (!iwl_mvm_has_new_rx_api(mvm))
|
||||
return;
|
||||
|
||||
notif->cookie = mvm->queue_sync_cookie;
|
||||
|
||||
if (notif->sync)
|
||||
if (notif->sync) {
|
||||
notif->cookie = mvm->queue_sync_cookie;
|
||||
atomic_set(&mvm->queue_sync_counter,
|
||||
mvm->trans->num_rx_queues);
|
||||
}
|
||||
|
||||
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
|
||||
size, !notif->sync);
|
||||
@ -4905,7 +4905,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
||||
|
||||
out:
|
||||
atomic_set(&mvm->queue_sync_counter, 0);
|
||||
mvm->queue_sync_cookie++;
|
||||
if (notif->sync)
|
||||
mvm->queue_sync_cookie++;
|
||||
}
|
||||
|
||||
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
|
||||
|
@ -107,13 +107,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
|
||||
/* allocate ucode sections in dram and set addresses */
|
||||
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
|
||||
if (ret) {
|
||||
dma_free_coherent(trans->dev,
|
||||
sizeof(*prph_scratch),
|
||||
prph_scratch,
|
||||
trans_pcie->prph_scratch_dma_addr);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err_free_prph_scratch;
|
||||
|
||||
|
||||
/* Allocate prph information
|
||||
* currently we don't assign to the prph info anything, but it would get
|
||||
@ -121,16 +117,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
|
||||
&trans_pcie->prph_info_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!prph_info)
|
||||
return -ENOMEM;
|
||||
if (!prph_info) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_prph_scratch;
|
||||
}
|
||||
|
||||
/* Allocate context info */
|
||||
ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
|
||||
sizeof(*ctxt_info_gen3),
|
||||
&trans_pcie->ctxt_info_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!ctxt_info_gen3)
|
||||
return -ENOMEM;
|
||||
if (!ctxt_info_gen3) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_prph_info;
|
||||
}
|
||||
|
||||
ctxt_info_gen3->prph_info_base_addr =
|
||||
cpu_to_le64(trans_pcie->prph_info_dma_addr);
|
||||
@ -186,6 +186,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_prph_info:
|
||||
dma_free_coherent(trans->dev,
|
||||
sizeof(*prph_info),
|
||||
prph_info,
|
||||
trans_pcie->prph_info_dma_addr);
|
||||
|
||||
err_free_prph_scratch:
|
||||
dma_free_coherent(trans->dev,
|
||||
sizeof(*prph_scratch),
|
||||
prph_scratch,
|
||||
trans_pcie->prph_scratch_dma_addr);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
|
||||
|
@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
|
||||
|
||||
/* 9000 Series */
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
|
||||
@ -643,34 +645,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
|
||||
@ -726,62 +728,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
|
||||
@ -821,34 +821,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
|
||||
|
||||
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
|
||||
|
@ -3272,11 +3272,17 @@ static struct iwl_trans_dump_data
|
||||
ptr = cmdq->write_ptr;
|
||||
for (i = 0; i < cmdq->n_window; i++) {
|
||||
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
|
||||
u8 tfdidx;
|
||||
u32 caplen, cmdlen;
|
||||
|
||||
if (trans->trans_cfg->use_tfh)
|
||||
tfdidx = idx;
|
||||
else
|
||||
tfdidx = ptr;
|
||||
|
||||
cmdlen = iwl_trans_pcie_get_cmdlen(trans,
|
||||
cmdq->tfds +
|
||||
tfd_size * ptr);
|
||||
(u8 *)cmdq->tfds +
|
||||
tfd_size * tfdidx);
|
||||
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
|
||||
|
||||
if (cmdlen) {
|
||||
@ -3450,6 +3456,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
spin_lock_init(&trans_pcie->reg_lock);
|
||||
mutex_init(&trans_pcie->mutex);
|
||||
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
|
||||
|
||||
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
|
||||
WQ_HIGHPRI | WQ_UNBOUND, 1);
|
||||
if (!trans_pcie->rba.alloc_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_trans;
|
||||
}
|
||||
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
|
||||
|
||||
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
|
||||
if (!trans_pcie->tso_hdr_page) {
|
||||
ret = -ENOMEM;
|
||||
@ -3584,10 +3599,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
trans_pcie->inta_mask = CSR_INI_SET_MASK;
|
||||
}
|
||||
|
||||
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
|
||||
WQ_HIGHPRI | WQ_UNBOUND, 1);
|
||||
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
|
||||
mutex_init(&trans_pcie->fw_mon_data.mutex);
|
||||
@ -3599,6 +3610,8 @@ out_free_ict:
|
||||
iwl_pcie_free_ict(trans);
|
||||
out_no_pci:
|
||||
free_percpu(trans_pcie->tso_hdr_page);
|
||||
destroy_workqueue(trans_pcie->rba.alloc_wq);
|
||||
out_free_trans:
|
||||
iwl_trans_free(trans);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -4026,7 +4026,7 @@ static int __init init_mac80211_hwsim(void)
|
||||
err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
|
||||
if (err < 0) {
|
||||
rtnl_unlock();
|
||||
goto out_free_radios;
|
||||
goto out_free_mon;
|
||||
}
|
||||
|
||||
err = register_netdevice(hwsim_mon);
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include <linux/leds.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/input-polldev.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/average.h>
|
||||
|
@ -555,7 +555,7 @@ static ssize_t rt2x00debug_write_restart_hw(struct file *file,
|
||||
{
|
||||
struct rt2x00debug_intf *intf = file->private_data;
|
||||
struct rt2x00_dev *rt2x00dev = intf->rt2x00dev;
|
||||
static unsigned long last_reset;
|
||||
static unsigned long last_reset = INITIAL_JIFFIES;
|
||||
|
||||
if (!rt2x00_has_cap_restart_hw(rt2x00dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -719,7 +719,6 @@ err_unmap:
|
||||
xenvif_unmap_frontend_data_rings(queue);
|
||||
netif_napi_del(&queue->napi);
|
||||
err:
|
||||
module_put(THIS_MODULE);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -547,18 +547,25 @@ static int pn533_usb_probe(struct usb_interface *interface,
|
||||
|
||||
rc = pn533_finalize_setup(priv);
|
||||
if (rc)
|
||||
goto error;
|
||||
goto err_deregister;
|
||||
|
||||
usb_set_intfdata(interface, phy);
|
||||
|
||||
return 0;
|
||||
|
||||
err_deregister:
|
||||
pn533_unregister_device(phy->priv);
|
||||
error:
|
||||
usb_kill_urb(phy->in_urb);
|
||||
usb_kill_urb(phy->out_urb);
|
||||
usb_kill_urb(phy->ack_urb);
|
||||
|
||||
usb_free_urb(phy->in_urb);
|
||||
usb_free_urb(phy->out_urb);
|
||||
usb_free_urb(phy->ack_urb);
|
||||
usb_put_dev(phy->udev);
|
||||
kfree(in_buf);
|
||||
kfree(phy->ack_buffer);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -97,8 +97,8 @@ config PTP_1588_CLOCK_PCH
|
||||
help
|
||||
This driver adds support for using the PCH EG20T as a PTP
|
||||
clock. The hardware supports time stamping of PTP packets
|
||||
when using the end-to-end delay (E2E) mechansim. The peer
|
||||
delay mechansim (P2P) is not supported.
|
||||
when using the end-to-end delay (E2E) mechanism. The peer
|
||||
delay mechanism (P2P) is not supported.
|
||||
|
||||
This clock is only useful if your PTP programs are getting
|
||||
hardware time stamps on the PTP Ethernet packets using the
|
||||
|
@ -2021,10 +2021,10 @@ static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
|
||||
static void qeth_l2_vnicc_init(struct qeth_card *card)
|
||||
{
|
||||
u32 *timeout = &card->options.vnicc.learning_timeout;
|
||||
bool enable, error = false;
|
||||
unsigned int chars_len, i;
|
||||
unsigned long chars_tmp;
|
||||
u32 sup_cmds, vnicc;
|
||||
bool enable, error;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "vniccini");
|
||||
/* reset rx_bcast */
|
||||
@ -2045,17 +2045,24 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
|
||||
chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
|
||||
for_each_set_bit(i, &chars_tmp, chars_len) {
|
||||
vnicc = BIT(i);
|
||||
qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
|
||||
if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
|
||||
!(sup_cmds & IPA_VNICC_GET_TIMEOUT))
|
||||
if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
|
||||
sup_cmds = 0;
|
||||
error = true;
|
||||
}
|
||||
if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
|
||||
(sup_cmds & IPA_VNICC_GET_TIMEOUT))
|
||||
card->options.vnicc.getset_timeout_sup |= vnicc;
|
||||
else
|
||||
card->options.vnicc.getset_timeout_sup &= ~vnicc;
|
||||
if (!(sup_cmds & IPA_VNICC_ENABLE) ||
|
||||
!(sup_cmds & IPA_VNICC_DISABLE))
|
||||
if ((sup_cmds & IPA_VNICC_ENABLE) &&
|
||||
(sup_cmds & IPA_VNICC_DISABLE))
|
||||
card->options.vnicc.set_char_sup |= vnicc;
|
||||
else
|
||||
card->options.vnicc.set_char_sup &= ~vnicc;
|
||||
}
|
||||
/* enforce assumed default values and recover settings, if changed */
|
||||
error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
|
||||
timeout);
|
||||
error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
|
||||
timeout);
|
||||
chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
|
||||
chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
|
||||
chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
|
||||
|
@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev,
|
||||
mappass->reqcopy = *req;
|
||||
icsk = inet_csk(mappass->sock->sk);
|
||||
queue = &icsk->icsk_accept_queue;
|
||||
data = queue->rskq_accept_head != NULL;
|
||||
data = READ_ONCE(queue->rskq_accept_head) != NULL;
|
||||
if (data) {
|
||||
mappass->reqcopy.cmd = 0;
|
||||
ret = 0;
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define PHY_ID_KSZ886X 0x00221430
|
||||
#define PHY_ID_KSZ8863 0x00221435
|
||||
|
||||
#define PHY_ID_KSZ8795 0x00221550
|
||||
#define PHY_ID_KSZ87XX 0x00221550
|
||||
|
||||
#define PHY_ID_KSZ9477 0x00221631
|
||||
|
||||
|
@ -3510,8 +3510,9 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
|
||||
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
|
||||
int skb_vlan_pop(struct sk_buff *skb);
|
||||
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
|
||||
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
|
||||
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
|
||||
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
|
||||
int mac_len);
|
||||
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
|
||||
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
|
||||
int skb_mpls_dec_ttl(struct sk_buff *skb);
|
||||
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
|
||||
|
@ -393,7 +393,7 @@ struct tcp_sock {
|
||||
/* fastopen_rsk points to request_sock that resulted in this big
|
||||
* socket. Used to retransmit SYNACKs etc.
|
||||
*/
|
||||
struct request_sock *fastopen_rsk;
|
||||
struct request_sock __rcu *fastopen_rsk;
|
||||
u32 *saved_syn;
|
||||
};
|
||||
|
||||
@ -447,8 +447,8 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
|
||||
|
||||
static inline bool tcp_passive_fastopen(const struct sock *sk)
|
||||
{
|
||||
return (sk->sk_state == TCP_SYN_RECV &&
|
||||
tcp_sk(sk)->fastopen_rsk != NULL);
|
||||
return sk->sk_state == TCP_SYN_RECV &&
|
||||
rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
|
||||
}
|
||||
|
||||
static inline void fastopen_queue_tune(struct sock *sk, int backlog)
|
||||
|
@ -5549,6 +5549,14 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
|
||||
*/
|
||||
const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
|
||||
|
||||
/**
|
||||
* regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom
|
||||
* @wiphy: wiphy for which pre-CAC capability is checked.
|
||||
*
|
||||
* Pre-CAC is allowed only in some regdomains (notable ETSI).
|
||||
*/
|
||||
bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
|
||||
|
||||
/**
|
||||
* DOC: Internal regulatory db functions
|
||||
*
|
||||
|
@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
|
||||
|
||||
/* Access to a connection */
|
||||
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
|
||||
int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
|
||||
void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
|
||||
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
|
||||
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
|
||||
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
|
||||
|
@ -52,6 +52,9 @@ struct bpf_prog;
|
||||
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
|
||||
|
||||
struct net {
|
||||
/* First cache line can be often dirtied.
|
||||
* Do not place here read-mostly fields.
|
||||
*/
|
||||
refcount_t passive; /* To decide when the network
|
||||
* namespace should be freed.
|
||||
*/
|
||||
@ -60,7 +63,13 @@ struct net {
|
||||
*/
|
||||
spinlock_t rules_mod_lock;
|
||||
|
||||
u32 hash_mix;
|
||||
unsigned int dev_unreg_count;
|
||||
|
||||
unsigned int dev_base_seq; /* protected by rtnl_mutex */
|
||||
int ifindex;
|
||||
|
||||
spinlock_t nsid_lock;
|
||||
atomic_t fnhe_genid;
|
||||
|
||||
struct list_head list; /* list of network namespaces */
|
||||
struct list_head exit_list; /* To linked to call pernet exit
|
||||
@ -76,11 +85,11 @@ struct net {
|
||||
#endif
|
||||
struct user_namespace *user_ns; /* Owning user namespace */
|
||||
struct ucounts *ucounts;
|
||||
spinlock_t nsid_lock;
|
||||
struct idr netns_ids;
|
||||
|
||||
struct ns_common ns;
|
||||
|
||||
struct list_head dev_base_head;
|
||||
struct proc_dir_entry *proc_net;
|
||||
struct proc_dir_entry *proc_net_stat;
|
||||
|
||||
@ -93,17 +102,18 @@ struct net {
|
||||
|
||||
struct uevent_sock *uevent_sock; /* uevent socket */
|
||||
|
||||
struct list_head dev_base_head;
|
||||
struct hlist_head *dev_name_head;
|
||||
struct hlist_head *dev_index_head;
|
||||
unsigned int dev_base_seq; /* protected by rtnl_mutex */
|
||||
int ifindex;
|
||||
unsigned int dev_unreg_count;
|
||||
/* Note that @hash_mix can be read millions times per second,
|
||||
* it is critical that it is on a read_mostly cache line.
|
||||
*/
|
||||
u32 hash_mix;
|
||||
|
||||
struct net_device *loopback_dev; /* The loopback */
|
||||
|
||||
/* core fib_rules */
|
||||
struct list_head rules_ops;
|
||||
|
||||
struct net_device *loopback_dev; /* The loopback */
|
||||
struct netns_core core;
|
||||
struct netns_mib mib;
|
||||
struct netns_packet packet;
|
||||
@ -171,7 +181,6 @@ struct net {
|
||||
struct sock *crypto_nlsk;
|
||||
#endif
|
||||
struct sock *diag_nlsk;
|
||||
atomic_t fnhe_genid;
|
||||
} __randomize_layout;
|
||||
|
||||
#include <linux/seq_file_net.h>
|
||||
|
@ -185,7 +185,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
|
||||
|
||||
static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
|
||||
{
|
||||
return queue->rskq_accept_head == NULL;
|
||||
return READ_ONCE(queue->rskq_accept_head) == NULL;
|
||||
}
|
||||
|
||||
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
|
||||
@ -197,7 +197,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
|
||||
req = queue->rskq_accept_head;
|
||||
if (req) {
|
||||
sk_acceptq_removed(parent);
|
||||
queue->rskq_accept_head = req->dl_next;
|
||||
WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
|
||||
if (queue->rskq_accept_head == NULL)
|
||||
queue->rskq_accept_tail = NULL;
|
||||
}
|
||||
|
@ -610,4 +610,9 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
|
||||
return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
|
||||
}
|
||||
|
||||
static inline bool sctp_newsk_ready(const struct sock *sk)
|
||||
{
|
||||
return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
|
||||
}
|
||||
|
||||
#endif /* __net_sctp_h__ */
|
||||
|
@ -878,12 +878,17 @@ static inline bool sk_acceptq_is_full(const struct sock *sk)
|
||||
*/
|
||||
static inline int sk_stream_min_wspace(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_wmem_queued >> 1;
|
||||
return READ_ONCE(sk->sk_wmem_queued) >> 1;
|
||||
}
|
||||
|
||||
static inline int sk_stream_wspace(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_sndbuf - sk->sk_wmem_queued;
|
||||
return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
|
||||
}
|
||||
|
||||
static inline void sk_wmem_queued_add(struct sock *sk, int val)
|
||||
{
|
||||
WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
|
||||
}
|
||||
|
||||
void sk_stream_write_space(struct sock *sk);
|
||||
@ -1207,7 +1212,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
|
||||
|
||||
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
|
||||
{
|
||||
if (sk->sk_wmem_queued >= sk->sk_sndbuf)
|
||||
if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
|
||||
return false;
|
||||
|
||||
return sk->sk_prot->stream_memory_free ?
|
||||
@ -1467,7 +1472,7 @@ DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
|
||||
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
|
||||
sk->sk_wmem_queued -= skb->truesize;
|
||||
sk_wmem_queued_add(sk, -skb->truesize);
|
||||
sk_mem_uncharge(sk, skb->truesize);
|
||||
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
|
||||
!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
|
||||
@ -2014,7 +2019,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
|
||||
skb->len += copy;
|
||||
skb->data_len += copy;
|
||||
skb->truesize += copy;
|
||||
sk->sk_wmem_queued += copy;
|
||||
sk_wmem_queued_add(sk, copy);
|
||||
sk_mem_charge(sk, copy);
|
||||
return 0;
|
||||
}
|
||||
@ -2220,10 +2225,14 @@ static inline void sk_wake_async(const struct sock *sk, int how, int band)
|
||||
|
||||
static inline void sk_stream_moderate_sndbuf(struct sock *sk)
|
||||
{
|
||||
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
|
||||
sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
|
||||
sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
|
||||
}
|
||||
u32 val;
|
||||
|
||||
if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
|
||||
return;
|
||||
|
||||
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
|
||||
|
||||
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
|
||||
}
|
||||
|
||||
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
|
||||
@ -2251,7 +2260,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
|
||||
*/
|
||||
static inline bool sock_writeable(const struct sock *sk)
|
||||
{
|
||||
return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
|
||||
return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
|
||||
}
|
||||
|
||||
static inline gfp_t gfp_any(void)
|
||||
@ -2271,7 +2280,9 @@ static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
|
||||
|
||||
static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
|
||||
{
|
||||
return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
|
||||
int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
|
||||
|
||||
return v ?: 1;
|
||||
}
|
||||
|
||||
/* Alas, with timeout socket operations are not restartable.
|
||||
|
@ -258,7 +258,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk)
|
||||
mem_cgroup_under_socket_pressure(sk->sk_memcg))
|
||||
return true;
|
||||
|
||||
return tcp_memory_pressure;
|
||||
return READ_ONCE(tcp_memory_pressure);
|
||||
}
|
||||
/*
|
||||
* The next routines deal with comparing 32 bit unsigned ints
|
||||
@ -1380,13 +1380,14 @@ static inline int tcp_win_from_space(const struct sock *sk, int space)
|
||||
/* Note: caller must be prepared to deal with negative returns */
|
||||
static inline int tcp_space(const struct sock *sk)
|
||||
{
|
||||
return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
|
||||
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
|
||||
READ_ONCE(sk->sk_backlog.len) -
|
||||
atomic_read(&sk->sk_rmem_alloc));
|
||||
}
|
||||
|
||||
static inline int tcp_full_space(const struct sock *sk)
|
||||
{
|
||||
return tcp_win_from_space(sk, sk->sk_rcvbuf);
|
||||
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
|
||||
}
|
||||
|
||||
extern void tcp_openreq_init_rwin(struct request_sock *req,
|
||||
@ -1916,7 +1917,8 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
|
||||
static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
|
||||
u32 notsent_bytes = READ_ONCE(tp->write_seq) -
|
||||
READ_ONCE(tp->snd_nxt);
|
||||
|
||||
return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
|
||||
}
|
||||
|
@ -519,10 +519,10 @@ TRACE_EVENT(rxrpc_local,
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_peer,
|
||||
TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
|
||||
TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
|
||||
int usage, const void *where),
|
||||
|
||||
TP_ARGS(peer, op, usage, where),
|
||||
TP_ARGS(peer_debug_id, op, usage, where),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, peer )
|
||||
@ -532,7 +532,7 @@ TRACE_EVENT(rxrpc_peer,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->peer = peer->debug_id;
|
||||
__entry->peer = peer_debug_id;
|
||||
__entry->op = op;
|
||||
__entry->usage = usage;
|
||||
__entry->where = where;
|
||||
@ -546,10 +546,10 @@ TRACE_EVENT(rxrpc_peer,
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_conn,
|
||||
TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
|
||||
TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
|
||||
int usage, const void *where),
|
||||
|
||||
TP_ARGS(conn, op, usage, where),
|
||||
TP_ARGS(conn_debug_id, op, usage, where),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, conn )
|
||||
@ -559,7 +559,7 @@ TRACE_EVENT(rxrpc_conn,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->conn = conn->debug_id;
|
||||
__entry->conn = conn_debug_id;
|
||||
__entry->op = op;
|
||||
__entry->usage = usage;
|
||||
__entry->where = where;
|
||||
@ -606,10 +606,10 @@ TRACE_EVENT(rxrpc_client,
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_call,
|
||||
TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
|
||||
TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
|
||||
int usage, const void *where, const void *aux),
|
||||
|
||||
TP_ARGS(call, op, usage, where, aux),
|
||||
TP_ARGS(call_debug_id, op, usage, where, aux),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, call )
|
||||
@ -620,7 +620,7 @@ TRACE_EVENT(rxrpc_call,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->call = call->debug_id;
|
||||
__entry->call = call_debug_id;
|
||||
__entry->op = op;
|
||||
__entry->usage = usage;
|
||||
__entry->where = where;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user