mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge in late fixes to prepare for the 6.9 net-next PR. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
ed1f164038
@ -290,7 +290,7 @@ attribute-sets:
|
||||
enum: eswitch-mode
|
||||
-
|
||||
name: eswitch-inline-mode
|
||||
type: u16
|
||||
type: u8
|
||||
enum: eswitch-inline-mode
|
||||
-
|
||||
name: dpipe-tables
|
||||
|
@ -131,9 +131,9 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
|
||||
reg = dpll_pin_registration_find(ref, ops, priv);
|
||||
if (WARN_ON(!reg))
|
||||
return -EINVAL;
|
||||
list_del(®->list);
|
||||
kfree(reg);
|
||||
if (refcount_dec_and_test(&ref->refcount)) {
|
||||
list_del(®->list);
|
||||
kfree(reg);
|
||||
xa_erase(xa_pins, i);
|
||||
WARN_ON(!list_empty(&ref->registration_list));
|
||||
kfree(ref);
|
||||
@ -211,9 +211,9 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
|
||||
reg = dpll_pin_registration_find(ref, ops, priv);
|
||||
if (WARN_ON(!reg))
|
||||
return;
|
||||
list_del(®->list);
|
||||
kfree(reg);
|
||||
if (refcount_dec_and_test(&ref->refcount)) {
|
||||
list_del(®->list);
|
||||
kfree(reg);
|
||||
xa_erase(xa_dplls, i);
|
||||
WARN_ON(!list_empty(&ref->registration_list));
|
||||
kfree(ref);
|
||||
|
@ -2259,6 +2259,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
|
||||
return ksz_irq_common_setup(dev, pirq);
|
||||
}
|
||||
|
||||
static int ksz_parse_drive_strength(struct ksz_device *dev);
|
||||
|
||||
static int ksz_setup(struct dsa_switch *ds)
|
||||
{
|
||||
struct ksz_device *dev = ds->priv;
|
||||
@ -2280,6 +2282,10 @@ static int ksz_setup(struct dsa_switch *ds)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ksz_parse_drive_strength(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* set broadcast storm protection 10% rate */
|
||||
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
|
||||
BROADCAST_STORM_RATE,
|
||||
@ -4328,10 +4334,6 @@ int ksz_switch_register(struct ksz_device *dev)
|
||||
for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
|
||||
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
|
||||
if (dev->dev->of_node) {
|
||||
ret = ksz_parse_drive_strength(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = of_get_phy_mode(dev->dev->of_node, &interface);
|
||||
if (ret == 0)
|
||||
dev->compat_interface = interface;
|
||||
|
@ -6678,6 +6678,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
|
||||
{
|
||||
struct rtnl_link_stats64 *net_stats, *stats_prev;
|
||||
struct rtnl_link_stats64 *vsi_stats;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
u64 pkts, bytes;
|
||||
int i;
|
||||
|
||||
@ -6723,21 +6724,18 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
|
||||
net_stats = &vsi->net_stats;
|
||||
stats_prev = &vsi->net_stats_prev;
|
||||
|
||||
/* clear prev counters after reset */
|
||||
if (vsi_stats->tx_packets < stats_prev->tx_packets ||
|
||||
vsi_stats->rx_packets < stats_prev->rx_packets) {
|
||||
stats_prev->tx_packets = 0;
|
||||
stats_prev->tx_bytes = 0;
|
||||
stats_prev->rx_packets = 0;
|
||||
stats_prev->rx_bytes = 0;
|
||||
/* Update netdev counters, but keep in mind that values could start at
|
||||
* random value after PF reset. And as we increase the reported stat by
|
||||
* diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
|
||||
* let's skip this round.
|
||||
*/
|
||||
if (likely(pf->stat_prev_loaded)) {
|
||||
net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
|
||||
net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
|
||||
net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
|
||||
net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
|
||||
}
|
||||
|
||||
/* update netdev counters */
|
||||
net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
|
||||
net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
|
||||
net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
|
||||
net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
|
||||
|
||||
stats_prev->tx_packets = vsi_stats->tx_packets;
|
||||
stats_prev->tx_bytes = vsi_stats->tx_bytes;
|
||||
stats_prev->rx_packets = vsi_stats->rx_packets;
|
||||
|
@ -6985,44 +6985,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
|
||||
static void igb_tsync_interrupt(struct igb_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 ack = 0, tsicr = rd32(E1000_TSICR);
|
||||
u32 tsicr = rd32(E1000_TSICR);
|
||||
struct ptp_clock_event event;
|
||||
|
||||
if (tsicr & TSINTR_SYS_WRAP) {
|
||||
event.type = PTP_CLOCK_PPS;
|
||||
if (adapter->ptp_caps.pps)
|
||||
ptp_clock_event(adapter->ptp_clock, &event);
|
||||
ack |= TSINTR_SYS_WRAP;
|
||||
}
|
||||
|
||||
if (tsicr & E1000_TSICR_TXTS) {
|
||||
/* retrieve hardware timestamp */
|
||||
schedule_work(&adapter->ptp_tx_work);
|
||||
ack |= E1000_TSICR_TXTS;
|
||||
}
|
||||
|
||||
if (tsicr & TSINTR_TT0) {
|
||||
if (tsicr & TSINTR_TT0)
|
||||
igb_perout(adapter, 0);
|
||||
ack |= TSINTR_TT0;
|
||||
}
|
||||
|
||||
if (tsicr & TSINTR_TT1) {
|
||||
if (tsicr & TSINTR_TT1)
|
||||
igb_perout(adapter, 1);
|
||||
ack |= TSINTR_TT1;
|
||||
}
|
||||
|
||||
if (tsicr & TSINTR_AUTT0) {
|
||||
if (tsicr & TSINTR_AUTT0)
|
||||
igb_extts(adapter, 0);
|
||||
ack |= TSINTR_AUTT0;
|
||||
}
|
||||
|
||||
if (tsicr & TSINTR_AUTT1) {
|
||||
if (tsicr & TSINTR_AUTT1)
|
||||
igb_extts(adapter, 1);
|
||||
ack |= TSINTR_AUTT1;
|
||||
}
|
||||
|
||||
/* acknowledge the interrupts */
|
||||
wr32(E1000_TSICR, ack);
|
||||
}
|
||||
|
||||
static irqreturn_t igb_msix_other(int irq, void *data)
|
||||
|
@ -5303,25 +5303,22 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
static void igc_tsync_interrupt(struct igc_adapter *adapter)
|
||||
{
|
||||
u32 ack, tsauxc, sec, nsec, tsicr;
|
||||
struct igc_hw *hw = &adapter->hw;
|
||||
u32 tsauxc, sec, nsec, tsicr;
|
||||
struct ptp_clock_event event;
|
||||
struct timespec64 ts;
|
||||
|
||||
tsicr = rd32(IGC_TSICR);
|
||||
ack = 0;
|
||||
|
||||
if (tsicr & IGC_TSICR_SYS_WRAP) {
|
||||
event.type = PTP_CLOCK_PPS;
|
||||
if (adapter->ptp_caps.pps)
|
||||
ptp_clock_event(adapter->ptp_clock, &event);
|
||||
ack |= IGC_TSICR_SYS_WRAP;
|
||||
}
|
||||
|
||||
if (tsicr & IGC_TSICR_TXTS) {
|
||||
/* retrieve hardware timestamp */
|
||||
igc_ptp_tx_tstamp_event(adapter);
|
||||
ack |= IGC_TSICR_TXTS;
|
||||
}
|
||||
|
||||
if (tsicr & IGC_TSICR_TT0) {
|
||||
@ -5335,7 +5332,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
|
||||
wr32(IGC_TSAUXC, tsauxc);
|
||||
adapter->perout[0].start = ts;
|
||||
spin_unlock(&adapter->tmreg_lock);
|
||||
ack |= IGC_TSICR_TT0;
|
||||
}
|
||||
|
||||
if (tsicr & IGC_TSICR_TT1) {
|
||||
@ -5349,7 +5345,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
|
||||
wr32(IGC_TSAUXC, tsauxc);
|
||||
adapter->perout[1].start = ts;
|
||||
spin_unlock(&adapter->tmreg_lock);
|
||||
ack |= IGC_TSICR_TT1;
|
||||
}
|
||||
|
||||
if (tsicr & IGC_TSICR_AUTT0) {
|
||||
@ -5359,7 +5354,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
|
||||
event.index = 0;
|
||||
event.timestamp = sec * NSEC_PER_SEC + nsec;
|
||||
ptp_clock_event(adapter->ptp_clock, &event);
|
||||
ack |= IGC_TSICR_AUTT0;
|
||||
}
|
||||
|
||||
if (tsicr & IGC_TSICR_AUTT1) {
|
||||
@ -5369,11 +5363,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
|
||||
event.index = 1;
|
||||
event.timestamp = sec * NSEC_PER_SEC + nsec;
|
||||
ptp_clock_event(adapter->ptp_clock, &event);
|
||||
ack |= IGC_TSICR_AUTT1;
|
||||
}
|
||||
|
||||
/* acknowledge the interrupts */
|
||||
wr32(IGC_TSICR, ack);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1235,8 +1235,8 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
|
||||
enum rvu_af_dl_param_id {
|
||||
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
|
||||
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
|
||||
RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
|
||||
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
|
||||
RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
|
||||
RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
|
||||
};
|
||||
|
||||
@ -1434,15 +1434,6 @@ static const struct devlink_param rvu_af_dl_params[] = {
|
||||
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
|
||||
rvu_af_dl_dwrr_mtu_validate),
|
||||
};
|
||||
|
||||
static const struct devlink_param rvu_af_dl_param_exact_match[] = {
|
||||
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
|
||||
"npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
|
||||
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
rvu_af_npc_exact_feature_get,
|
||||
rvu_af_npc_exact_feature_disable,
|
||||
rvu_af_npc_exact_feature_validate),
|
||||
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
|
||||
"npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
|
||||
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
@ -1457,6 +1448,15 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = {
|
||||
rvu_af_dl_nix_maxlf_validate),
|
||||
};
|
||||
|
||||
static const struct devlink_param rvu_af_dl_param_exact_match[] = {
|
||||
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
|
||||
"npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
|
||||
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
rvu_af_npc_exact_feature_get,
|
||||
rvu_af_npc_exact_feature_disable,
|
||||
rvu_af_npc_exact_feature_validate),
|
||||
};
|
||||
|
||||
/* Devlink switch mode */
|
||||
static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
|
||||
{
|
||||
|
@ -337,6 +337,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
|
||||
|
||||
acti_netdevs = kmalloc_array(entry->slave_cnt,
|
||||
sizeof(*acti_netdevs), GFP_KERNEL);
|
||||
if (!acti_netdevs) {
|
||||
schedule_delayed_work(&lag->work,
|
||||
NFP_FL_LAG_DELAY);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Include sanity check in the loop. It may be that a bond has
|
||||
* changed between processing the last notification and the
|
||||
|
@ -402,7 +402,7 @@ static int dp83822_config_init(struct phy_device *phydev)
|
||||
{
|
||||
struct dp83822_private *dp83822 = phydev->priv;
|
||||
struct device *dev = &phydev->mdio.dev;
|
||||
int rgmii_delay;
|
||||
int rgmii_delay = 0;
|
||||
s32 rx_int_delay;
|
||||
s32 tx_int_delay;
|
||||
int err = 0;
|
||||
@ -412,30 +412,33 @@ static int dp83822_config_init(struct phy_device *phydev)
|
||||
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
|
||||
true);
|
||||
|
||||
if (rx_int_delay <= 0)
|
||||
rgmii_delay = 0;
|
||||
else
|
||||
rgmii_delay = DP83822_RX_CLK_SHIFT;
|
||||
/* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
|
||||
if (rx_int_delay > 0)
|
||||
rgmii_delay |= DP83822_RX_CLK_SHIFT;
|
||||
|
||||
tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
|
||||
false);
|
||||
|
||||
/* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
|
||||
if (tx_int_delay <= 0)
|
||||
rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
|
||||
else
|
||||
rgmii_delay |= DP83822_TX_CLK_SHIFT;
|
||||
|
||||
if (rgmii_delay) {
|
||||
err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_RCSR, rgmii_delay);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
|
||||
DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
phy_set_bits_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
|
||||
err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
|
||||
err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (dp83822->fx_enabled) {
|
||||
|
@ -3128,7 +3128,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
|
||||
if (delay < 0)
|
||||
return delay;
|
||||
|
||||
if (delay && size == 0)
|
||||
if (size == 0)
|
||||
return delay;
|
||||
|
||||
if (delay < delay_values[0] || delay > delay_values[size - 1]) {
|
||||
|
@ -10077,7 +10077,7 @@ static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev)
|
||||
* driver supports it.
|
||||
*/
|
||||
if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN)
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
||||
/* The vendor mode is not always config #1, so to find it out. */
|
||||
c = udev->config;
|
||||
|
@ -198,7 +198,7 @@ static const struct nla_policy devlink_eswitch_set_nl_policy[DEVLINK_ATTR_ESWITC
|
||||
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
|
||||
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
|
||||
[DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_MAX(NLA_U16, 1),
|
||||
[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U16, 3),
|
||||
[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U8, 3),
|
||||
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = NLA_POLICY_MAX(NLA_U8, 1),
|
||||
};
|
||||
|
||||
|
@ -377,7 +377,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
||||
bool log_ecn_error)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
int err;
|
||||
int nh, err;
|
||||
|
||||
#ifdef CONFIG_NET_IPGRE_BROADCAST
|
||||
if (ipv4_is_multicast(iph->daddr)) {
|
||||
@ -403,8 +403,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
||||
tunnel->i_seqno = ntohl(tpi->seq) + 1;
|
||||
}
|
||||
|
||||
/* Save offset of outer header relative to skb->head,
|
||||
* because we are going to reset the network header to the inner header
|
||||
* and might change skb->head.
|
||||
*/
|
||||
nh = skb_network_header(skb) - skb->head;
|
||||
|
||||
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
|
||||
|
||||
if (!pskb_inet_may_pull(skb)) {
|
||||
DEV_STATS_INC(tunnel->dev, rx_length_errors);
|
||||
DEV_STATS_INC(tunnel->dev, rx_errors);
|
||||
goto drop;
|
||||
}
|
||||
iph = (struct iphdr *)(skb->head + nh);
|
||||
|
||||
err = IP_ECN_decapsulate(iph, skb);
|
||||
if (unlikely(err)) {
|
||||
if (log_ecn_error)
|
||||
|
@ -449,6 +449,11 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
|
||||
+ nla_total_size(16); /* src */
|
||||
}
|
||||
|
||||
static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
|
||||
{
|
||||
rt_genid_bump_ipv6(ops->fro_net);
|
||||
}
|
||||
|
||||
static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
|
||||
.family = AF_INET6,
|
||||
.rule_size = sizeof(struct fib6_rule),
|
||||
@ -461,6 +466,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
|
||||
.compare = fib6_rule_compare,
|
||||
.fill = fib6_rule_fill,
|
||||
.nlmsg_payload = fib6_rule_nlmsg_payload,
|
||||
.flush_cache = fib6_rule_flush_cache,
|
||||
.nlgroup = RTNLGRP_IPV6_RULE,
|
||||
.owner = THIS_MODULE,
|
||||
.fro_net = &init_net,
|
||||
|
Loading…
Reference in New Issue
Block a user