2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 13:43:55 +08:00

Merge branch 'hns3-next'

Guangbin Huang says:

====================
net: hns3: updates for -next

This series includes some updates for the HNS3 ethernet driver.

This series includes some optimizations, cleanups and one
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-08-28 11:20:05 +01:00
commit 4af874f40e
11 changed files with 92 additions and 82 deletions

View File

@ -9,7 +9,7 @@
enum HCLGE_MBX_OPCODE {
HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/
HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */
HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */

View File

@ -867,7 +867,7 @@ static void
hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
static const char * const str[] = {"no", "yes"};
const char * const str[] = {"no", "yes"};
unsigned long *caps = ae_dev->caps;
u32 i, state;

View File

@ -348,7 +348,7 @@ enum hns3_pkt_l3type {
HNS3_L3_TYPE_LLDP,
HNS3_L3_TYPE_BPDU,
HNS3_L3_TYPE_MAC_PAUSE,
HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */
/* reserved for 0xA~0xB */

View File

@ -362,41 +362,34 @@ static void hclge_set_default_capability(struct hclge_dev *hdev)
}
}
const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
{HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
{HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
{HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
{HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
{HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
{HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
{HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
{HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
{HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
{HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
{HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
{HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
{HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
};
static void hclge_parse_capability(struct hclge_dev *hdev,
struct hclge_query_version_cmd *cmd)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
u32 caps;
u32 caps, i;
caps = __le32_to_cpu(cmd->caps[0]);
if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B))
set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B))
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B))
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B))
set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_RAS_IMP_B))
set_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B))
set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PORT_VLAN_BYPASS_B)) {
set_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
}
for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++)
if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit))
set_bit(hclge_cmd_caps_bit_map0[i].local_bit,
ae_dev->caps);
}
static __le32 hclge_build_api_caps(void)

View File

@ -453,7 +453,7 @@ struct hclge_tc_thrd {
};
struct hclge_priv_buf {
struct hclge_waterline wl; /* Waterline for low and high*/
struct hclge_waterline wl; /* Waterline for low and high */
u32 buf_size; /* TC private buffer size */
u32 tx_buf_size;
u32 enable; /* Enable TC private buffer or not */
@ -1234,6 +1234,12 @@ struct hclge_phy_reg_cmd {
u8 rsv1[18];
};
/* capabilities bits map between imp firmware and local driver */
struct hclge_caps_bit_map {
u16 imp_bit;
u16 local_bit;
};
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{

View File

@ -234,9 +234,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
ret = hclge_notify_init_up(hdev);
if (ret)
return ret;
return hclge_notify_init_up(hdev);
}
return hclge_tm_dwrr_cfg(hdev);

View File

@ -993,44 +993,43 @@ static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
return 0;
}
static const struct hclge_speed_bit_map speed_bit_map[] = {
{HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
{HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
{HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
};
static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
{
u16 i;
for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
if (speed == speed_bit_map[i].speed) {
*speed_bit = speed_bit_map[i].speed_bit;
return 0;
}
}
return -EINVAL;
}
static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 speed_ability = hdev->hw.mac.speed_ability;
u32 speed_bit = 0;
int ret;
switch (speed) {
case HCLGE_MAC_SPEED_10M:
speed_bit = HCLGE_SUPPORT_10M_BIT;
break;
case HCLGE_MAC_SPEED_100M:
speed_bit = HCLGE_SUPPORT_100M_BIT;
break;
case HCLGE_MAC_SPEED_1G:
speed_bit = HCLGE_SUPPORT_1G_BIT;
break;
case HCLGE_MAC_SPEED_10G:
speed_bit = HCLGE_SUPPORT_10G_BIT;
break;
case HCLGE_MAC_SPEED_25G:
speed_bit = HCLGE_SUPPORT_25G_BIT;
break;
case HCLGE_MAC_SPEED_40G:
speed_bit = HCLGE_SUPPORT_40G_BIT;
break;
case HCLGE_MAC_SPEED_50G:
speed_bit = HCLGE_SUPPORT_50G_BIT;
break;
case HCLGE_MAC_SPEED_100G:
speed_bit = HCLGE_SUPPORT_100G_BIT;
break;
case HCLGE_MAC_SPEED_200G:
speed_bit = HCLGE_SUPPORT_200G_BIT;
break;
default:
return -EINVAL;
}
ret = hclge_get_speed_bit(speed, &speed_bit);
if (ret)
return ret;
if (speed_bit & speed_ability)
return 0;
@ -3422,7 +3421,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
hclge_enable_vector(&hdev->misc_vector, false);
event_cause = hclge_check_event_cause(hdev, &clearval);
/* vector 0 interrupt is shared with reset and mailbox source events.*/
/* vector 0 interrupt is shared with reset and mailbox source events. */
switch (event_cause) {
case HCLGE_VECTOR0_EVENT_ERR:
hclge_errhand_task_schedule(hdev);

View File

@ -1058,6 +1058,11 @@ struct hclge_vport {
struct list_head vlan_list; /* Store VF vlan table */
};
struct hclge_speed_bit_map {
u32 speed;
u32 speed_bit;
};
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,

View File

@ -66,6 +66,8 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data,
resp_msg->len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,

View File

@ -71,7 +71,7 @@ static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
static bool hclgevf_is_special_opcode(u16 opcode)
{
static const u16 spec_opcode[] = {0x30, 0x31, 0x32};
const u16 spec_opcode[] = {0x30, 0x31, 0x32};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@ -342,25 +342,26 @@ static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
}
const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
{HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
{HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
{HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
{HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
{HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
{HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
};
static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
struct hclgevf_query_version_cmd *cmd)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
u32 caps;
u32 caps, i;
caps = __le32_to_cpu(cmd->caps[0]);
if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B))
set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_RXD_ADV_LAYOUT_B))
set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++)
if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit))
set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit,
ae_dev->caps);
}
static __le32 hclgevf_build_api_caps(void)

View File

@ -296,6 +296,12 @@ struct hclgevf_dev_specs_1_cmd {
u8 rsv1[18];
};
/* capabilities bits map between imp firmware and local driver */
struct hclgevf_caps_bit_map {
u16 imp_bit;
u16 local_bit;
};
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);