mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'FFungible-ethernet-driver'
Dimitris Michailidis says: ==================== new Fungible Ethernet driver This patch series contains a new network driver for the Ethernet functionality of Fungible cards. It contains two modules. The first one in patch 2 is a library module that implements some of the device setup, queue managenent, and support for operating an admin queue. These are placed in a separate module because the cards provide a number of PCI functions handled by different types of drivers and all use the same common means to interact with the device. Each of the drivers will be relying on this library module for them. The remaining patches provide the Ethernet driver for the cards. v2: - Fix set_pauseparam, remove get_wol, remove module param (Andrew Lunn) - Fix a register poll loop (Andrew) - Replace constants defined with 'static const' - make W=1 C=1 is clean - Remove devlink FW update (Jakub) - Remove duplicate ethtool stats covered by structured API (Jakub) v3: - Make TLS stats unconditional (Andrew) - Remove inline from .c (Andrew) - Replace some ifdef with IS_ENABLED (Andrew) - Fix build failure on 32b arches (build robot) - Fix build issue with make O= (Jakub) v4: - Fix for newer bpf_warn_invalid_xdp_action() (Jakub) - Remove 32b dma_set_mask_and_coherent() v5: - Make XDP enter/exit non-disruptive to active traffic - Remove dormant port state - Style fixes, unused stuff removal (Jakub) v6: - When changing queue depth or numbers allocate the new queues before shutting down the existing ones (Jakub) v7: - Convert IRQ bookeeping to use XArray. - Changes to the numbers of Tx/Rx queues are now incremental and do not disrupt ongoing traffic. - Implement .ndo_eth_ioctl instead of .ndo_do_ioctl. - Replace deprecated irq_set_affinity_hint. - Remove TLS 1.3 support (Jakub) - Remove hwtstamp_config.flags check (Jakub) - Add locking in SR-IOV enable/disable. (Jakub) v8: - Remove dropping of <33B packets and the associated counter (Jakub) - Report CQE size. - Show last MAC stats when the netdev isn't running (Andrew) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1bb1c5bc54
@ -7931,6 +7931,12 @@ L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/fujitsu-tablet.c
|
||||
|
||||
FUNGIBLE ETHERNET DRIVERS
|
||||
M: Dimitris Michailidis <dmichail@fungible.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/fungible/
|
||||
|
||||
FUSE: FILESYSTEM IN USERSPACE
|
||||
M: Miklos Szeredi <miklos@szeredi.hu>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
|
@ -78,6 +78,7 @@ source "drivers/net/ethernet/ezchip/Kconfig"
|
||||
source "drivers/net/ethernet/faraday/Kconfig"
|
||||
source "drivers/net/ethernet/freescale/Kconfig"
|
||||
source "drivers/net/ethernet/fujitsu/Kconfig"
|
||||
source "drivers/net/ethernet/fungible/Kconfig"
|
||||
source "drivers/net/ethernet/google/Kconfig"
|
||||
source "drivers/net/ethernet/hisilicon/Kconfig"
|
||||
source "drivers/net/ethernet/huawei/Kconfig"
|
||||
|
@ -41,6 +41,7 @@ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
|
||||
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
|
||||
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
|
||||
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
|
||||
obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/
|
||||
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
|
||||
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
|
||||
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
|
||||
|
27
drivers/net/ethernet/fungible/Kconfig
Normal file
27
drivers/net/ethernet/fungible/Kconfig
Normal file
@ -0,0 +1,27 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Fungible network driver configuration
|
||||
#
|
||||
|
||||
config NET_VENDOR_FUNGIBLE
|
||||
bool "Fungible devices"
|
||||
default y
|
||||
help
|
||||
If you have a Fungible network device, say Y.
|
||||
|
||||
Note that the answer to this question doesn't directly affect the
|
||||
kernel: saying N will just cause the configurator to skip all
|
||||
the questions about Fungible cards. If you say Y, you will be asked
|
||||
for your specific card in the following questions.
|
||||
|
||||
if NET_VENDOR_FUNGIBLE
|
||||
|
||||
config FUN_CORE
|
||||
tristate
|
||||
help
|
||||
A service module offering basic common services to Fungible
|
||||
device drivers.
|
||||
|
||||
source "drivers/net/ethernet/fungible/funeth/Kconfig"
|
||||
|
||||
endif # NET_VENDOR_FUNGIBLE
|
7
drivers/net/ethernet/fungible/Makefile
Normal file
7
drivers/net/ethernet/fungible/Makefile
Normal file
@ -0,0 +1,7 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
#
|
||||
# Makefile for the Fungible network device drivers.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_FUN_CORE) += funcore/
|
||||
obj-$(CONFIG_FUN_ETH) += funeth/
|
5
drivers/net/ethernet/fungible/funcore/Makefile
Normal file
5
drivers/net/ethernet/fungible/funcore/Makefile
Normal file
@ -0,0 +1,5 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
|
||||
obj-$(CONFIG_FUN_CORE) += funcore.o
|
||||
|
||||
funcore-y := fun_dev.o fun_queue.o
|
843
drivers/net/ethernet/fungible/funcore/fun_dev.c
Normal file
843
drivers/net/ethernet/fungible/funcore/fun_dev.c
Normal file
@ -0,0 +1,843 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
|
||||
#include <linux/aer.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nvme.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include "fun_queue.h"
|
||||
#include "fun_dev.h"
|
||||
|
||||
#define FUN_ADMIN_CMD_TO_MS 3000
|
||||
|
||||
enum {
|
||||
AQA_ASQS_SHIFT = 0,
|
||||
AQA_ACQS_SHIFT = 16,
|
||||
AQA_MIN_QUEUE_SIZE = 2,
|
||||
AQA_MAX_QUEUE_SIZE = 4096
|
||||
};
|
||||
|
||||
/* context for admin commands */
|
||||
struct fun_cmd_ctx {
|
||||
fun_admin_callback_t cb; /* callback to invoke on completion */
|
||||
void *cb_data; /* user data provided to callback */
|
||||
int cpu; /* CPU where the cmd's tag was allocated */
|
||||
};
|
||||
|
||||
/* Context for synchronous admin commands. */
|
||||
struct fun_sync_cmd_ctx {
|
||||
struct completion compl;
|
||||
u8 *rsp_buf; /* caller provided response buffer */
|
||||
unsigned int rsp_len; /* response buffer size */
|
||||
u8 rsp_status; /* command response status */
|
||||
};
|
||||
|
||||
/* Wait for the CSTS.RDY bit to match @enabled. */
|
||||
static int fun_wait_ready(struct fun_dev *fdev, bool enabled)
|
||||
{
|
||||
unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg);
|
||||
u32 bit = enabled ? NVME_CSTS_RDY : 0;
|
||||
unsigned long deadline;
|
||||
|
||||
deadline = ((cap_to + 1) * HZ / 2) + jiffies; /* CAP.TO is in 500ms */
|
||||
|
||||
for (;;) {
|
||||
u32 csts = readl(fdev->bar + NVME_REG_CSTS);
|
||||
|
||||
if (csts == ~0) {
|
||||
dev_err(fdev->dev, "CSTS register read %#x\n", csts);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if ((csts & NVME_CSTS_RDY) == bit)
|
||||
return 0;
|
||||
|
||||
if (time_is_before_jiffies(deadline))
|
||||
break;
|
||||
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
dev_err(fdev->dev,
|
||||
"Timed out waiting for device to indicate RDY %u; aborting %s\n",
|
||||
enabled, enabled ? "initialization" : "reset");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* Check CSTS and return an error if it is unreadable or has unexpected
|
||||
* RDY value.
|
||||
*/
|
||||
static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy)
|
||||
{
|
||||
u32 csts = readl(fdev->bar + NVME_REG_CSTS);
|
||||
u32 actual_rdy = csts & NVME_CSTS_RDY;
|
||||
|
||||
if (csts == ~0) {
|
||||
dev_err(fdev->dev, "CSTS register read %#x\n", csts);
|
||||
return -EIO;
|
||||
}
|
||||
if (actual_rdy != expected_rdy) {
|
||||
dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check that CSTS RDY has the expected value. Then write a new value to the CC
|
||||
* register and wait for CSTS RDY to match the new CC ENABLE state.
|
||||
*/
|
||||
static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy)
|
||||
{
|
||||
int rc = fun_check_csts_rdy(fdev, initial_rdy);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
writel(fdev->cc_reg, fdev->bar + NVME_REG_CC);
|
||||
return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE));
|
||||
}
|
||||
|
||||
static int fun_disable_ctrl(struct fun_dev *fdev)
|
||||
{
|
||||
fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
|
||||
return fun_update_cc_enable(fdev, 1);
|
||||
}
|
||||
|
||||
static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2,
|
||||
u32 admin_sqesz_log2)
|
||||
{
|
||||
fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) |
|
||||
(admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) |
|
||||
((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) |
|
||||
NVME_CC_ENABLE;
|
||||
|
||||
return fun_update_cc_enable(fdev, 0);
|
||||
}
|
||||
|
||||
static int fun_map_bars(struct fun_dev *fdev, const char *name)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(fdev->dev);
|
||||
int err;
|
||||
|
||||
err = pci_request_mem_regions(pdev, name);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"Couldn't get PCI memory resources, err %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
fdev->bar = pci_ioremap_bar(pdev, 0);
|
||||
if (!fdev->bar) {
|
||||
dev_err(&pdev->dev, "Couldn't map BAR 0\n");
|
||||
pci_release_mem_regions(pdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fun_unmap_bars(struct fun_dev *fdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(fdev->dev);
|
||||
|
||||
if (fdev->bar) {
|
||||
iounmap(fdev->bar);
|
||||
fdev->bar = NULL;
|
||||
pci_release_mem_regions(pdev);
|
||||
}
|
||||
}
|
||||
|
||||
static int fun_set_dma_masks(struct device *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
dev_err(dev, "DMA mask configuration failed, err %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static irqreturn_t fun_admin_irq(int irq, void *data)
|
||||
{
|
||||
struct fun_queue *funq = data;
|
||||
|
||||
return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
static void fun_complete_admin_cmd(struct fun_queue *funq, void *data,
|
||||
void *entry, const struct fun_cqe_info *info)
|
||||
{
|
||||
const struct fun_admin_rsp_common *rsp_common = entry;
|
||||
struct fun_dev *fdev = funq->fdev;
|
||||
struct fun_cmd_ctx *cmd_ctx;
|
||||
int cpu;
|
||||
u16 cid;
|
||||
|
||||
if (info->sqhd == cpu_to_be16(0xffff)) {
|
||||
dev_dbg(fdev->dev, "adminq event");
|
||||
if (fdev->adminq_cb)
|
||||
fdev->adminq_cb(fdev, entry);
|
||||
return;
|
||||
}
|
||||
|
||||
cid = be16_to_cpu(rsp_common->cid);
|
||||
dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid,
|
||||
rsp_common->op, rsp_common->ret);
|
||||
|
||||
cmd_ctx = &fdev->cmd_ctx[cid];
|
||||
if (cmd_ctx->cpu < 0) {
|
||||
dev_err(fdev->dev,
|
||||
"admin CQE with CID=%u, op=%u does not match a pending command\n",
|
||||
cid, rsp_common->op);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cmd_ctx->cb)
|
||||
cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
|
||||
|
||||
cpu = cmd_ctx->cpu;
|
||||
cmd_ctx->cpu = -1;
|
||||
sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
|
||||
}
|
||||
|
||||
static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL);
|
||||
if (!fdev->cmd_ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ntags; i++)
|
||||
fdev->cmd_ctx[i].cpu = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate and enable an admin queue and assign it the first IRQ vector. */
|
||||
static int fun_enable_admin_queue(struct fun_dev *fdev,
|
||||
const struct fun_dev_params *areq)
|
||||
{
|
||||
struct fun_queue_alloc_req qreq = {
|
||||
.cqe_size_log2 = areq->cqe_size_log2,
|
||||
.sqe_size_log2 = areq->sqe_size_log2,
|
||||
.cq_depth = areq->cq_depth,
|
||||
.sq_depth = areq->sq_depth,
|
||||
.rq_depth = areq->rq_depth,
|
||||
};
|
||||
unsigned int ntags = areq->sq_depth - 1;
|
||||
struct fun_queue *funq;
|
||||
int rc;
|
||||
|
||||
if (fdev->admin_q)
|
||||
return -EEXIST;
|
||||
|
||||
if (areq->sq_depth < AQA_MIN_QUEUE_SIZE ||
|
||||
areq->sq_depth > AQA_MAX_QUEUE_SIZE ||
|
||||
areq->cq_depth < AQA_MIN_QUEUE_SIZE ||
|
||||
areq->cq_depth > AQA_MAX_QUEUE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq);
|
||||
if (!fdev->admin_q)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = fun_init_cmd_ctx(fdev, ntags);
|
||||
if (rc)
|
||||
goto free_q;
|
||||
|
||||
rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false,
|
||||
GFP_KERNEL, dev_to_node(fdev->dev));
|
||||
if (rc)
|
||||
goto free_cmd_ctx;
|
||||
|
||||
funq = fdev->admin_q;
|
||||
funq->cq_vector = 0;
|
||||
rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq);
|
||||
if (rc)
|
||||
goto free_sbq;
|
||||
|
||||
fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL);
|
||||
fdev->adminq_cb = areq->event_cb;
|
||||
|
||||
writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT |
|
||||
(funq->cq_depth - 1) << AQA_ACQS_SHIFT,
|
||||
fdev->bar + NVME_REG_AQA);
|
||||
|
||||
writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ);
|
||||
writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ);
|
||||
|
||||
rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2);
|
||||
if (rc)
|
||||
goto free_irq;
|
||||
|
||||
if (areq->rq_depth) {
|
||||
rc = fun_create_rq(funq);
|
||||
if (rc)
|
||||
goto disable_ctrl;
|
||||
|
||||
funq_rq_post(funq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
disable_ctrl:
|
||||
fun_disable_ctrl(fdev);
|
||||
free_irq:
|
||||
fun_free_irq(funq);
|
||||
free_sbq:
|
||||
sbitmap_queue_free(&fdev->admin_sbq);
|
||||
free_cmd_ctx:
|
||||
kvfree(fdev->cmd_ctx);
|
||||
fdev->cmd_ctx = NULL;
|
||||
free_q:
|
||||
fun_free_queue(fdev->admin_q);
|
||||
fdev->admin_q = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void fun_disable_admin_queue(struct fun_dev *fdev)
|
||||
{
|
||||
struct fun_queue *admq = fdev->admin_q;
|
||||
|
||||
if (!admq)
|
||||
return;
|
||||
|
||||
fun_disable_ctrl(fdev);
|
||||
|
||||
fun_free_irq(admq);
|
||||
__fun_process_cq(admq, 0);
|
||||
|
||||
sbitmap_queue_free(&fdev->admin_sbq);
|
||||
|
||||
kvfree(fdev->cmd_ctx);
|
||||
fdev->cmd_ctx = NULL;
|
||||
|
||||
fun_free_queue(admq);
|
||||
fdev->admin_q = NULL;
|
||||
}
|
||||
|
||||
/* Return %true if the admin queue has stopped servicing commands as can be
|
||||
* detected through registers. This isn't exhaustive and may provide false
|
||||
* negatives.
|
||||
*/
|
||||
static bool fun_adminq_stopped(struct fun_dev *fdev)
|
||||
{
|
||||
u32 csts = readl(fdev->bar + NVME_REG_CSTS);
|
||||
|
||||
return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY;
|
||||
}
|
||||
|
||||
static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup)
|
||||
{
|
||||
struct sbitmap_queue *sbq = &fdev->admin_sbq;
|
||||
struct sbq_wait_state *ws = &sbq->ws[0];
|
||||
DEFINE_SBQ_WAIT(wait);
|
||||
int tag;
|
||||
|
||||
for (;;) {
|
||||
sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE);
|
||||
if (fdev->suppress_cmds) {
|
||||
tag = -ESHUTDOWN;
|
||||
break;
|
||||
}
|
||||
tag = sbitmap_queue_get(sbq, cpup);
|
||||
if (tag >= 0)
|
||||
break;
|
||||
schedule();
|
||||
}
|
||||
|
||||
sbitmap_finish_wait(sbq, ws, &wait);
|
||||
return tag;
|
||||
}
|
||||
|
||||
/* Submit an asynchronous admin command. Caller is responsible for implementing
|
||||
* any waiting or timeout. Upon command completion the callback @cb is called.
|
||||
*/
|
||||
int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
|
||||
fun_admin_callback_t cb, void *cb_data, bool wait_ok)
|
||||
{
|
||||
struct fun_queue *funq = fdev->admin_q;
|
||||
unsigned int cmdsize = cmd->len8 * 8;
|
||||
struct fun_cmd_ctx *cmd_ctx;
|
||||
int tag, cpu, rc = 0;
|
||||
|
||||
if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2)))
|
||||
return -EMSGSIZE;
|
||||
|
||||
tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu);
|
||||
if (tag < 0) {
|
||||
if (!wait_ok)
|
||||
return -EAGAIN;
|
||||
tag = fun_wait_for_tag(fdev, &cpu);
|
||||
if (tag < 0)
|
||||
return tag;
|
||||
}
|
||||
|
||||
cmd->cid = cpu_to_be16(tag);
|
||||
|
||||
cmd_ctx = &fdev->cmd_ctx[tag];
|
||||
cmd_ctx->cb = cb;
|
||||
cmd_ctx->cb_data = cb_data;
|
||||
|
||||
spin_lock(&funq->sq_lock);
|
||||
|
||||
if (unlikely(fdev->suppress_cmds)) {
|
||||
rc = -ESHUTDOWN;
|
||||
sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu);
|
||||
} else {
|
||||
cmd_ctx->cpu = cpu;
|
||||
memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize);
|
||||
|
||||
dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail,
|
||||
cmd);
|
||||
|
||||
if (++funq->sq_tail == funq->sq_depth)
|
||||
funq->sq_tail = 0;
|
||||
writel(funq->sq_tail, funq->sq_db);
|
||||
}
|
||||
spin_unlock(&funq->sq_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Abandon a pending admin command by clearing the issuer's callback data.
|
||||
* Failure indicates that the command either has already completed or its
|
||||
* completion is racing with this call.
|
||||
*/
|
||||
static bool fun_abandon_admin_cmd(struct fun_dev *fd,
|
||||
const struct fun_admin_req_common *cmd,
|
||||
void *cb_data)
|
||||
{
|
||||
u16 cid = be16_to_cpu(cmd->cid);
|
||||
struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid];
|
||||
|
||||
return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data;
|
||||
}
|
||||
|
||||
/* Stop submission of new admin commands and wake up any processes waiting for
|
||||
* tags. Already submitted commands are left to complete or time out.
|
||||
*/
|
||||
static void fun_admin_stop(struct fun_dev *fdev)
|
||||
{
|
||||
spin_lock(&fdev->admin_q->sq_lock);
|
||||
fdev->suppress_cmds = true;
|
||||
spin_unlock(&fdev->admin_q->sq_lock);
|
||||
sbitmap_queue_wake_all(&fdev->admin_sbq);
|
||||
}
|
||||
|
||||
/* The callback for synchronous execution of admin commands. It copies the
|
||||
* command response to the caller's buffer and signals completion.
|
||||
*/
|
||||
static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data)
|
||||
{
|
||||
const struct fun_admin_rsp_common *rsp_common = rsp;
|
||||
struct fun_sync_cmd_ctx *ctx = cb_data;
|
||||
|
||||
if (!ctx)
|
||||
return; /* command issuer timed out and left */
|
||||
if (ctx->rsp_buf) {
|
||||
unsigned int rsp_len = rsp_common->len8 * 8;
|
||||
|
||||
if (unlikely(rsp_len > ctx->rsp_len)) {
|
||||
dev_err(fd->dev,
|
||||
"response for op %u is %uB > response buffer %uB\n",
|
||||
rsp_common->op, rsp_len, ctx->rsp_len);
|
||||
rsp_len = ctx->rsp_len;
|
||||
}
|
||||
memcpy(ctx->rsp_buf, rsp, rsp_len);
|
||||
}
|
||||
ctx->rsp_status = rsp_common->ret;
|
||||
complete(&ctx->compl);
|
||||
}
|
||||
|
||||
/* Submit a synchronous admin command. */
|
||||
int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
|
||||
struct fun_admin_req_common *cmd, void *rsp,
|
||||
size_t rspsize, unsigned int timeout)
|
||||
{
|
||||
struct fun_sync_cmd_ctx ctx = {
|
||||
.compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl),
|
||||
.rsp_buf = rsp,
|
||||
.rsp_len = rspsize,
|
||||
};
|
||||
unsigned int cmdlen = cmd->len8 * 8;
|
||||
unsigned long jiffies_left;
|
||||
int ret;
|
||||
|
||||
ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!timeout)
|
||||
timeout = FUN_ADMIN_CMD_TO_MS;
|
||||
|
||||
jiffies_left = wait_for_completion_timeout(&ctx.compl,
|
||||
msecs_to_jiffies(timeout));
|
||||
if (!jiffies_left) {
|
||||
/* The command timed out. Attempt to cancel it so we can return.
|
||||
* But if the command is in the process of completing we'll
|
||||
* wait for it.
|
||||
*/
|
||||
if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) {
|
||||
dev_err(fdev->dev, "admin command timed out: %*ph\n",
|
||||
cmdlen, cmd);
|
||||
fun_admin_stop(fdev);
|
||||
/* see if the timeout was due to a queue failure */
|
||||
if (fun_adminq_stopped(fdev))
|
||||
dev_err(fdev->dev,
|
||||
"device does not accept admin commands\n");
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
wait_for_completion(&ctx.compl);
|
||||
}
|
||||
|
||||
if (ctx.rsp_status) {
|
||||
dev_err(fdev->dev, "admin command failed, err %d: %*ph\n",
|
||||
ctx.rsp_status, cmdlen, cmd);
|
||||
}
|
||||
|
||||
return -ctx.rsp_status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd);
|
||||
|
||||
/* Return the number of device resources of the requested type. */
|
||||
int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res)
|
||||
{
|
||||
union {
|
||||
struct fun_admin_res_count_req req;
|
||||
struct fun_admin_res_count_rsp rsp;
|
||||
} cmd;
|
||||
int rc;
|
||||
|
||||
cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req));
|
||||
cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT,
|
||||
0, 0);
|
||||
|
||||
rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp,
|
||||
sizeof(cmd), 0);
|
||||
return rc ? rc : be32_to_cpu(cmd.rsp.count.data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_get_res_count);
|
||||
|
||||
/* Request that the instance of resource @res with the given id be deleted. */
|
||||
int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
|
||||
unsigned int flags, u32 id)
|
||||
{
|
||||
struct fun_admin_generic_destroy_req req = {
|
||||
.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)),
|
||||
.destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY,
|
||||
flags, id)
|
||||
};
|
||||
|
||||
return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_res_destroy);
|
||||
|
||||
/* Bind two entities of the given types and IDs. */
|
||||
int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
|
||||
unsigned int id0, enum fun_admin_bind_type type1,
|
||||
unsigned int id1)
|
||||
{
|
||||
struct {
|
||||
struct fun_admin_bind_req req;
|
||||
struct fun_admin_bind_entry entry[2];
|
||||
} cmd = {
|
||||
.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
|
||||
sizeof(cmd)),
|
||||
.entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
|
||||
.entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
|
||||
};
|
||||
|
||||
return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_bind);
|
||||
|
||||
static int fun_get_dev_limits(struct fun_dev *fdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(fdev->dev);
|
||||
unsigned int cq_count, sq_count, num_dbs;
|
||||
int rc;
|
||||
|
||||
rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
cq_count = rc;
|
||||
|
||||
rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
sq_count = rc;
|
||||
|
||||
/* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the
|
||||
* device must provide additional queues.
|
||||
*/
|
||||
if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
|
||||
return -EINVAL;
|
||||
|
||||
/* Calculate the max QID based on SQ/CQ/doorbell counts.
|
||||
* SQ/CQ doorbells alternate.
|
||||
*/
|
||||
num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) /
|
||||
(fdev->db_stride * 4);
|
||||
fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
|
||||
fdev->kern_end_qid = fdev->max_qid + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate all MSI-X vectors available on a function and at least @min_vecs. */
|
||||
static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs)
|
||||
{
|
||||
int vecs, num_msix = pci_msix_vec_count(pdev);
|
||||
|
||||
if (num_msix < 0)
|
||||
return num_msix;
|
||||
if (min_vecs > num_msix)
|
||||
return -ERANGE;
|
||||
|
||||
vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX);
|
||||
if (vecs > 0) {
|
||||
dev_info(&pdev->dev,
|
||||
"Allocated %d IRQ vectors of %d requested\n",
|
||||
vecs, num_msix);
|
||||
} else {
|
||||
dev_err(&pdev->dev,
|
||||
"Unable to allocate at least %u IRQ vectors\n",
|
||||
min_vecs);
|
||||
}
|
||||
return vecs;
|
||||
}
|
||||
|
||||
/* Allocate and initialize the IRQ manager state. */
|
||||
static int fun_alloc_irq_mgr(struct fun_dev *fdev)
|
||||
{
|
||||
fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL);
|
||||
if (!fdev->irq_map)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&fdev->irqmgr_lock);
|
||||
/* mark IRQ 0 allocated, it is used by the admin queue */
|
||||
__set_bit(0, fdev->irq_map);
|
||||
fdev->irqs_avail = fdev->num_irqs - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Reserve @nirqs of the currently available IRQs and return their indices. */
|
||||
int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices)
|
||||
{
|
||||
unsigned int b, n = 0;
|
||||
int err = -ENOSPC;
|
||||
|
||||
if (!nirqs)
|
||||
return 0;
|
||||
|
||||
spin_lock(&fdev->irqmgr_lock);
|
||||
if (nirqs > fdev->irqs_avail)
|
||||
goto unlock;
|
||||
|
||||
for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) {
|
||||
__set_bit(b, fdev->irq_map);
|
||||
irq_indices[n++] = b;
|
||||
if (n >= nirqs)
|
||||
break;
|
||||
}
|
||||
|
||||
WARN_ON(n < nirqs);
|
||||
fdev->irqs_avail -= n;
|
||||
err = n;
|
||||
unlock:
|
||||
spin_unlock(&fdev->irqmgr_lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(fun_reserve_irqs);
|
||||
|
||||
/* Release @nirqs previously allocated IRQS with the supplied indices. */
|
||||
void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
|
||||
u16 *irq_indices)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
spin_lock(&fdev->irqmgr_lock);
|
||||
for (i = 0; i < nirqs; i++)
|
||||
__clear_bit(irq_indices[i], fdev->irq_map);
|
||||
fdev->irqs_avail += nirqs;
|
||||
spin_unlock(&fdev->irqmgr_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(fun_release_irqs);
|
||||
|
||||
static void fun_serv_handler(struct work_struct *work)
|
||||
{
|
||||
struct fun_dev *fd = container_of(work, struct fun_dev, service_task);
|
||||
|
||||
if (test_bit(FUN_SERV_DISABLED, &fd->service_flags))
|
||||
return;
|
||||
if (fd->serv_cb)
|
||||
fd->serv_cb(fd);
|
||||
}
|
||||
|
||||
void fun_serv_stop(struct fun_dev *fd)
|
||||
{
|
||||
set_bit(FUN_SERV_DISABLED, &fd->service_flags);
|
||||
cancel_work_sync(&fd->service_task);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_serv_stop);
|
||||
|
||||
void fun_serv_restart(struct fun_dev *fd)
|
||||
{
|
||||
clear_bit(FUN_SERV_DISABLED, &fd->service_flags);
|
||||
if (fd->service_flags)
|
||||
schedule_work(&fd->service_task);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_serv_restart);
|
||||
|
||||
void fun_serv_sched(struct fun_dev *fd)
|
||||
{
|
||||
if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags))
|
||||
schedule_work(&fd->service_task);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_serv_sched);
|
||||
|
||||
/* Check and try to get the device into a proper state for initialization,
|
||||
* i.e., CSTS.RDY = CC.EN = 0.
|
||||
*/
|
||||
static int sanitize_dev(struct fun_dev *fdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP);
|
||||
fdev->cc_reg = readl(fdev->bar + NVME_REG_CC);
|
||||
|
||||
/* First get RDY to agree with the current EN. Give RDY the opportunity
|
||||
* to complete a potential recent EN change.
|
||||
*/
|
||||
rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Next, reset the device if EN is currently 1. */
|
||||
if (fdev->cc_reg & NVME_CC_ENABLE)
|
||||
rc = fun_disable_ctrl(fdev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Undo the device initialization of fun_dev_enable(). */
|
||||
void fun_dev_disable(struct fun_dev *fdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(fdev->dev);
|
||||
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
||||
if (fdev->fw_handle != FUN_HCI_ID_INVALID) {
|
||||
fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0,
|
||||
fdev->fw_handle);
|
||||
fdev->fw_handle = FUN_HCI_ID_INVALID;
|
||||
}
|
||||
|
||||
fun_disable_admin_queue(fdev);
|
||||
|
||||
bitmap_free(fdev->irq_map);
|
||||
pci_free_irq_vectors(pdev);
|
||||
|
||||
pci_clear_master(pdev);
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
fun_unmap_bars(fdev);
|
||||
}
|
||||
EXPORT_SYMBOL(fun_dev_disable);
|
||||
|
||||
/* Perform basic initialization of a device, including
|
||||
* - PCI config space setup and BAR0 mapping
|
||||
* - interrupt management initialization
|
||||
* - 1 admin queue setup
|
||||
* - determination of some device limits, such as number of queues.
|
||||
*/
|
||||
int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
|
||||
const struct fun_dev_params *areq, const char *name)
|
||||
{
|
||||
int rc;
|
||||
|
||||
fdev->dev = &pdev->dev;
|
||||
rc = fun_map_bars(fdev, name);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = fun_set_dma_masks(fdev->dev);
|
||||
if (rc)
|
||||
goto unmap;
|
||||
|
||||
rc = pci_enable_device_mem(pdev);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc);
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
|
||||
rc = sanitize_dev(fdev);
|
||||
if (rc)
|
||||
goto disable_dev;
|
||||
|
||||
fdev->fw_handle = FUN_HCI_ID_INVALID;
|
||||
fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1;
|
||||
fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg);
|
||||
fdev->dbs = fdev->bar + NVME_REG_DBS;
|
||||
|
||||
INIT_WORK(&fdev->service_task, fun_serv_handler);
|
||||
fdev->service_flags = FUN_SERV_DISABLED;
|
||||
fdev->serv_cb = areq->serv_cb;
|
||||
|
||||
rc = fun_alloc_irqs(pdev, areq->min_msix + 1); /* +1 for admin CQ */
|
||||
if (rc < 0)
|
||||
goto disable_dev;
|
||||
fdev->num_irqs = rc;
|
||||
|
||||
rc = fun_alloc_irq_mgr(fdev);
|
||||
if (rc)
|
||||
goto free_irqs;
|
||||
|
||||
pci_set_master(pdev);
|
||||
rc = fun_enable_admin_queue(fdev, areq);
|
||||
if (rc)
|
||||
goto free_irq_mgr;
|
||||
|
||||
rc = fun_get_dev_limits(fdev);
|
||||
if (rc < 0)
|
||||
goto disable_admin;
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_set_drvdata(pdev, fdev);
|
||||
pcie_print_link_status(pdev);
|
||||
dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n",
|
||||
fdev->q_depth, fdev->db_stride, fdev->max_qid,
|
||||
fdev->kern_end_qid);
|
||||
return 0;
|
||||
|
||||
disable_admin:
|
||||
fun_disable_admin_queue(fdev);
|
||||
free_irq_mgr:
|
||||
pci_clear_master(pdev);
|
||||
bitmap_free(fdev->irq_map);
|
||||
free_irqs:
|
||||
pci_free_irq_vectors(pdev);
|
||||
disable_dev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_disable_device(pdev);
|
||||
unmap:
|
||||
fun_unmap_bars(fdev);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(fun_dev_enable);
|
||||
|
||||
MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
|
||||
MODULE_DESCRIPTION("Core services driver for Fungible devices");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
150
drivers/net/ethernet/fungible/funcore/fun_dev.h
Normal file
150
drivers/net/ethernet/fungible/funcore/fun_dev.h
Normal file
@ -0,0 +1,150 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
|
||||
#ifndef _FUNDEV_H
|
||||
#define _FUNDEV_H
|
||||
|
||||
#include <linux/sbitmap.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include "fun_hci.h"
|
||||
|
||||
struct pci_dev;
|
||||
struct fun_dev;
|
||||
struct fun_queue;
|
||||
struct fun_cmd_ctx;
|
||||
struct fun_queue_alloc_req;
|
||||
|
||||
/* doorbell fields */
|
||||
enum {
|
||||
FUN_DB_QIDX_S = 0,
|
||||
FUN_DB_INTCOAL_ENTRIES_S = 16,
|
||||
FUN_DB_INTCOAL_ENTRIES_M = 0x7f,
|
||||
FUN_DB_INTCOAL_USEC_S = 23,
|
||||
FUN_DB_INTCOAL_USEC_M = 0x7f,
|
||||
FUN_DB_IRQ_S = 30,
|
||||
FUN_DB_IRQ_F = 1 << FUN_DB_IRQ_S,
|
||||
FUN_DB_IRQ_ARM_S = 31,
|
||||
FUN_DB_IRQ_ARM_F = 1U << FUN_DB_IRQ_ARM_S
|
||||
};
|
||||
|
||||
/* Callback for asynchronous admin commands.
|
||||
* Invoked on reception of command response.
|
||||
*/
|
||||
typedef void (*fun_admin_callback_t)(struct fun_dev *fdev, void *rsp,
|
||||
void *cb_data);
|
||||
|
||||
/* Callback for events/notifications received by an admin queue. */
|
||||
typedef void (*fun_admin_event_cb)(struct fun_dev *fdev, void *cqe);
|
||||
|
||||
/* Callback for pending work handled by the service task. */
|
||||
typedef void (*fun_serv_cb)(struct fun_dev *fd);
|
||||
|
||||
/* service task flags */
|
||||
enum {
|
||||
FUN_SERV_DISABLED, /* service task is disabled */
|
||||
FUN_SERV_FIRST_AVAIL
|
||||
};
|
||||
|
||||
/* Driver state associated with a PCI function. */
|
||||
struct fun_dev {
|
||||
struct device *dev;
|
||||
|
||||
void __iomem *bar; /* start of BAR0 mapping */
|
||||
u32 __iomem *dbs; /* start of doorbells in BAR0 mapping */
|
||||
|
||||
/* admin queue */
|
||||
struct fun_queue *admin_q;
|
||||
struct sbitmap_queue admin_sbq;
|
||||
struct fun_cmd_ctx *cmd_ctx;
|
||||
fun_admin_event_cb adminq_cb;
|
||||
bool suppress_cmds; /* if set don't write commands to SQ */
|
||||
|
||||
/* address increment between consecutive doorbells, in 4B units */
|
||||
unsigned int db_stride;
|
||||
|
||||
/* SW versions of device registers */
|
||||
u32 cc_reg; /* CC register */
|
||||
u64 cap_reg; /* CAPability register */
|
||||
|
||||
unsigned int q_depth; /* max queue depth supported by device */
|
||||
unsigned int max_qid; /* = #queues - 1, separately for SQs and CQs */
|
||||
unsigned int kern_end_qid; /* last qid in the kernel range + 1 */
|
||||
|
||||
unsigned int fw_handle;
|
||||
|
||||
/* IRQ manager */
|
||||
unsigned int num_irqs;
|
||||
unsigned int irqs_avail;
|
||||
spinlock_t irqmgr_lock;
|
||||
unsigned long *irq_map;
|
||||
|
||||
/* The service task handles work that needs a process context */
|
||||
struct work_struct service_task;
|
||||
unsigned long service_flags;
|
||||
fun_serv_cb serv_cb;
|
||||
};
|
||||
|
||||
struct fun_dev_params {
|
||||
u8 cqe_size_log2; /* admin q CQE size */
|
||||
u8 sqe_size_log2; /* admin q SQE size */
|
||||
|
||||
/* admin q depths */
|
||||
u16 cq_depth;
|
||||
u16 sq_depth;
|
||||
u16 rq_depth;
|
||||
|
||||
u16 min_msix; /* min vectors needed by requesting driver */
|
||||
|
||||
fun_admin_event_cb event_cb;
|
||||
fun_serv_cb serv_cb;
|
||||
};
|
||||
|
||||
/* Return the BAR address of a doorbell. */
|
||||
static inline u32 __iomem *fun_db_addr(const struct fun_dev *fdev,
|
||||
unsigned int db_index)
|
||||
{
|
||||
return &fdev->dbs[db_index * fdev->db_stride];
|
||||
}
|
||||
|
||||
/* Return the BAR address of an SQ doorbell. SQ and CQ DBs alternate,
|
||||
* SQs have even DB indices.
|
||||
*/
|
||||
static inline u32 __iomem *fun_sq_db_addr(const struct fun_dev *fdev,
|
||||
unsigned int sqid)
|
||||
{
|
||||
return fun_db_addr(fdev, sqid * 2);
|
||||
}
|
||||
|
||||
static inline u32 __iomem *fun_cq_db_addr(const struct fun_dev *fdev,
|
||||
unsigned int cqid)
|
||||
{
|
||||
return fun_db_addr(fdev, cqid * 2 + 1);
|
||||
}
|
||||
|
||||
int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res);
|
||||
int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
|
||||
unsigned int flags, u32 id);
|
||||
int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
|
||||
unsigned int id0, enum fun_admin_bind_type type1,
|
||||
unsigned int id1);
|
||||
|
||||
int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
|
||||
fun_admin_callback_t cb, void *cb_data, bool wait_ok);
|
||||
int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
|
||||
struct fun_admin_req_common *cmd, void *rsp,
|
||||
size_t rspsize, unsigned int timeout);
|
||||
|
||||
int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
|
||||
const struct fun_dev_params *areq, const char *name);
|
||||
void fun_dev_disable(struct fun_dev *fdev);
|
||||
|
||||
int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs,
|
||||
u16 *irq_indices);
|
||||
void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
|
||||
u16 *irq_indices);
|
||||
|
||||
void fun_serv_stop(struct fun_dev *fd);
|
||||
void fun_serv_restart(struct fun_dev *fd);
|
||||
void fun_serv_sched(struct fun_dev *fd);
|
||||
|
||||
#endif /* _FUNDEV_H */
|
1202
drivers/net/ethernet/fungible/funcore/fun_hci.h
Normal file
1202
drivers/net/ethernet/fungible/funcore/fun_hci.h
Normal file
File diff suppressed because it is too large
Load Diff
601
drivers/net/ethernet/fungible/funcore/fun_queue.c
Normal file
601
drivers/net/ethernet/fungible/funcore/fun_queue.c
Normal file
@ -0,0 +1,601 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "fun_dev.h"
|
||||
#include "fun_queue.h"
|
||||
|
||||
/* Allocate memory for a queue. This includes the memory for the HW descriptor
|
||||
* ring, an optional 64b HW write-back area, and an optional SW state ring.
|
||||
* Returns the virtual and DMA addresses of the HW ring, the VA of the SW ring,
|
||||
* and the VA of the write-back area.
|
||||
*/
|
||||
void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
|
||||
size_t hw_desc_sz, size_t sw_desc_sz, bool wb,
|
||||
int numa_node, dma_addr_t *dma_addr, void **sw_va,
|
||||
volatile __be64 **wb_va)
|
||||
{
|
||||
int dev_node = dev_to_node(dma_dev);
|
||||
size_t dma_sz;
|
||||
void *va;
|
||||
|
||||
if (numa_node == NUMA_NO_NODE)
|
||||
numa_node = dev_node;
|
||||
|
||||
/* Place optional write-back area at end of descriptor ring. */
|
||||
dma_sz = hw_desc_sz * depth;
|
||||
if (wb)
|
||||
dma_sz += sizeof(u64);
|
||||
|
||||
set_dev_node(dma_dev, numa_node);
|
||||
va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL);
|
||||
set_dev_node(dma_dev, dev_node);
|
||||
if (!va)
|
||||
return NULL;
|
||||
|
||||
if (sw_desc_sz) {
|
||||
*sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL,
|
||||
numa_node);
|
||||
if (!*sw_va) {
|
||||
dma_free_coherent(dma_dev, dma_sz, va, *dma_addr);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (wb)
|
||||
*wb_va = va + dma_sz - sizeof(u64);
|
||||
return va;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_alloc_ring_mem);
|
||||
|
||||
void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
|
||||
bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va)
|
||||
{
|
||||
if (hw_va) {
|
||||
size_t sz = depth * hw_desc_sz;
|
||||
|
||||
if (wb)
|
||||
sz += sizeof(u64);
|
||||
dma_free_coherent(dma_dev, sz, hw_va, dma_addr);
|
||||
}
|
||||
kvfree(sw_va);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_free_ring_mem);
|
||||
|
||||
/* Prepare and issue an admin command to create an SQ on the device with the
|
||||
* provided parameters. If the queue ID is auto-allocated by the device it is
|
||||
* returned in *sqidp.
|
||||
*/
|
||||
int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
|
||||
u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
|
||||
u8 coal_nentries, u8 coal_usec, u32 irq_num,
|
||||
u32 scan_start_id, u32 scan_end_id,
|
||||
u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp)
|
||||
{
|
||||
union {
|
||||
struct fun_admin_epsq_req req;
|
||||
struct fun_admin_generic_create_rsp rsp;
|
||||
} cmd;
|
||||
dma_addr_t wb_addr;
|
||||
u32 hw_qid;
|
||||
int rc;
|
||||
|
||||
if (sq_depth > fdev->q_depth)
|
||||
return -EINVAL;
|
||||
if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ)
|
||||
sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf));
|
||||
|
||||
wb_addr = dma_addr + (sq_depth << sqe_size_log2);
|
||||
|
||||
cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPSQ,
|
||||
sizeof(cmd.req));
|
||||
cmd.req.u.create =
|
||||
FUN_ADMIN_EPSQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
|
||||
sqid, cqid, sqe_size_log2,
|
||||
sq_depth - 1, dma_addr, 0,
|
||||
coal_nentries, coal_usec,
|
||||
irq_num, scan_start_id,
|
||||
scan_end_id, 0,
|
||||
rq_buf_size_log2,
|
||||
ilog2(sizeof(u64)), wb_addr);
|
||||
|
||||
rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
|
||||
&cmd.rsp, sizeof(cmd.rsp), 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
hw_qid = be32_to_cpu(cmd.rsp.id);
|
||||
*dbp = fun_sq_db_addr(fdev, hw_qid);
|
||||
if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
|
||||
*sqidp = hw_qid;
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_sq_create);
|
||||
|
||||
/* Prepare and issue an admin command to create a CQ on the device with the
|
||||
* provided parameters. If the queue ID is auto-allocated by the device it is
|
||||
* returned in *cqidp.
|
||||
*/
|
||||
int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
|
||||
u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
|
||||
u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
|
||||
u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp,
|
||||
u32 __iomem **dbp)
|
||||
{
|
||||
union {
|
||||
struct fun_admin_epcq_req req;
|
||||
struct fun_admin_generic_create_rsp rsp;
|
||||
} cmd;
|
||||
u32 hw_qid;
|
||||
int rc;
|
||||
|
||||
if (cq_depth > fdev->q_depth)
|
||||
return -EINVAL;
|
||||
|
||||
cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
|
||||
sizeof(cmd.req));
|
||||
cmd.req.u.create =
|
||||
FUN_ADMIN_EPCQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
|
||||
cqid, rqid, cqe_size_log2,
|
||||
cq_depth - 1, dma_addr, tailroom,
|
||||
headroom / 2, 0, coal_nentries,
|
||||
coal_usec, irq_num,
|
||||
scan_start_id, scan_end_id, 0);
|
||||
|
||||
rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
|
||||
&cmd.rsp, sizeof(cmd.rsp), 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
hw_qid = be32_to_cpu(cmd.rsp.id);
|
||||
*dbp = fun_cq_db_addr(fdev, hw_qid);
|
||||
if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
|
||||
*cqidp = hw_qid;
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fun_cq_create);
|
||||
|
||||
static bool fun_sq_is_head_wb(const struct fun_queue *funq)
|
||||
{
|
||||
return funq->sq_flags & FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS;
|
||||
}
|
||||
|
||||
static void fun_clean_rq(struct fun_queue *funq)
|
||||
{
|
||||
struct fun_dev *fdev = funq->fdev;
|
||||
struct fun_rq_info *rqinfo;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < funq->rq_depth; i++) {
|
||||
rqinfo = &funq->rq_info[i];
|
||||
if (rqinfo->page) {
|
||||
dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(rqinfo->page);
|
||||
rqinfo->page = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int fun_fill_rq(struct fun_queue *funq)
|
||||
{
|
||||
struct device *dev = funq->fdev->dev;
|
||||
int i, node = dev_to_node(dev);
|
||||
struct fun_rq_info *rqinfo;
|
||||
|
||||
for (i = 0; i < funq->rq_depth; i++) {
|
||||
rqinfo = &funq->rq_info[i];
|
||||
rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0);
|
||||
if (unlikely(!rqinfo->page))
|
||||
return -ENOMEM;
|
||||
|
||||
rqinfo->dma = dma_map_page(dev, rqinfo->page, 0,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, rqinfo->dma))) {
|
||||
put_page(rqinfo->page);
|
||||
rqinfo->page = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
funq->rqes[i] = FUN_EPRQ_RQBUF_INIT(rqinfo->dma);
|
||||
}
|
||||
|
||||
funq->rq_tail = funq->rq_depth - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fun_rq_update_pos(struct fun_queue *funq, int buf_offset)
|
||||
{
|
||||
if (buf_offset <= funq->rq_buf_offset) {
|
||||
struct fun_rq_info *rqinfo = &funq->rq_info[funq->rq_buf_idx];
|
||||
struct device *dev = funq->fdev->dev;
|
||||
|
||||
dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
funq->num_rqe_to_fill++;
|
||||
if (++funq->rq_buf_idx == funq->rq_depth)
|
||||
funq->rq_buf_idx = 0;
|
||||
}
|
||||
funq->rq_buf_offset = buf_offset;
|
||||
}
|
||||
|
||||
/* Given a command response with data scattered across >= 1 RQ buffers return
|
||||
* a pointer to a contiguous buffer containing all the data. If the data is in
|
||||
* one RQ buffer the start address within that buffer is returned, otherwise a
|
||||
* new buffer is allocated and the data is gathered into it.
|
||||
*/
|
||||
static void *fun_data_from_rq(struct fun_queue *funq,
|
||||
const struct fun_rsp_common *rsp, bool *need_free)
|
||||
{
|
||||
u32 bufoff, total_len, remaining, fragsize, dataoff;
|
||||
struct device *dma_dev = funq->fdev->dev;
|
||||
const struct fun_dataop_rqbuf *databuf;
|
||||
const struct fun_dataop_hdr *dataop;
|
||||
const struct fun_rq_info *rqinfo;
|
||||
void *data;
|
||||
|
||||
dataop = (void *)rsp + rsp->suboff8 * 8;
|
||||
total_len = be32_to_cpu(dataop->total_len);
|
||||
|
||||
if (likely(dataop->nsgl == 1)) {
|
||||
databuf = (struct fun_dataop_rqbuf *)dataop->imm;
|
||||
bufoff = be32_to_cpu(databuf->bufoff);
|
||||
fun_rq_update_pos(funq, bufoff);
|
||||
rqinfo = &funq->rq_info[funq->rq_buf_idx];
|
||||
dma_sync_single_for_cpu(dma_dev, rqinfo->dma + bufoff,
|
||||
total_len, DMA_FROM_DEVICE);
|
||||
*need_free = false;
|
||||
return page_address(rqinfo->page) + bufoff;
|
||||
}
|
||||
|
||||
/* For scattered completions gather the fragments into one buffer. */
|
||||
|
||||
data = kmalloc(total_len, GFP_ATOMIC);
|
||||
/* NULL is OK here. In case of failure we still need to consume the data
|
||||
* for proper buffer accounting but indicate an error in the response.
|
||||
*/
|
||||
if (likely(data))
|
||||
*need_free = true;
|
||||
|
||||
dataoff = 0;
|
||||
for (remaining = total_len; remaining; remaining -= fragsize) {
|
||||
fun_rq_update_pos(funq, 0);
|
||||
fragsize = min_t(unsigned int, PAGE_SIZE, remaining);
|
||||
if (data) {
|
||||
rqinfo = &funq->rq_info[funq->rq_buf_idx];
|
||||
dma_sync_single_for_cpu(dma_dev, rqinfo->dma, fragsize,
|
||||
DMA_FROM_DEVICE);
|
||||
memcpy(data + dataoff, page_address(rqinfo->page),
|
||||
fragsize);
|
||||
dataoff += fragsize;
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max)
|
||||
{
|
||||
const struct fun_cqe_info *info;
|
||||
struct fun_rsp_common *rsp;
|
||||
unsigned int new_cqes;
|
||||
u16 sf_p, flags;
|
||||
bool need_free;
|
||||
void *cqe;
|
||||
|
||||
if (!max)
|
||||
max = funq->cq_depth - 1;
|
||||
|
||||
for (new_cqes = 0; new_cqes < max; new_cqes++) {
|
||||
cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2);
|
||||
info = funq_cqe_info(funq, cqe);
|
||||
sf_p = be16_to_cpu(info->sf_p);
|
||||
|
||||
if ((sf_p & 1) != funq->cq_phase)
|
||||
break;
|
||||
|
||||
/* ensure the phase tag is read before other CQE fields */
|
||||
dma_rmb();
|
||||
|
||||
if (++funq->cq_head == funq->cq_depth) {
|
||||
funq->cq_head = 0;
|
||||
funq->cq_phase = !funq->cq_phase;
|
||||
}
|
||||
|
||||
rsp = cqe;
|
||||
flags = be16_to_cpu(rsp->flags);
|
||||
|
||||
need_free = false;
|
||||
if (unlikely(flags & FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF)) {
|
||||
rsp = fun_data_from_rq(funq, rsp, &need_free);
|
||||
if (!rsp) {
|
||||
rsp = cqe;
|
||||
rsp->len8 = 1;
|
||||
if (rsp->ret == 0)
|
||||
rsp->ret = ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (funq->cq_cb)
|
||||
funq->cq_cb(funq, funq->cb_data, rsp, info);
|
||||
if (need_free)
|
||||
kfree(rsp);
|
||||
}
|
||||
|
||||
dev_dbg(funq->fdev->dev, "CQ %u, new CQEs %u/%u, head %u, phase %u\n",
|
||||
funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase);
|
||||
return new_cqes;
|
||||
}
|
||||
|
||||
unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max)
|
||||
{
|
||||
unsigned int processed;
|
||||
u32 db;
|
||||
|
||||
processed = __fun_process_cq(funq, max);
|
||||
|
||||
if (funq->num_rqe_to_fill) {
|
||||
funq->rq_tail = (funq->rq_tail + funq->num_rqe_to_fill) %
|
||||
funq->rq_depth;
|
||||
funq->num_rqe_to_fill = 0;
|
||||
writel(funq->rq_tail, funq->rq_db);
|
||||
}
|
||||
|
||||
db = funq->cq_head | FUN_DB_IRQ_ARM_F;
|
||||
writel(db, funq->cq_db);
|
||||
return processed;
|
||||
}
|
||||
|
||||
static int fun_alloc_sqes(struct fun_queue *funq)
|
||||
{
|
||||
funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth,
|
||||
1 << funq->sqe_size_log2, 0,
|
||||
fun_sq_is_head_wb(funq),
|
||||
NUMA_NO_NODE, &funq->sq_dma_addr,
|
||||
NULL, &funq->sq_head);
|
||||
return funq->sq_cmds ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int fun_alloc_cqes(struct fun_queue *funq)
|
||||
{
|
||||
funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth,
|
||||
1 << funq->cqe_size_log2, 0, false,
|
||||
NUMA_NO_NODE, &funq->cq_dma_addr, NULL,
|
||||
NULL);
|
||||
return funq->cqes ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int fun_alloc_rqes(struct fun_queue *funq)
|
||||
{
|
||||
funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth,
|
||||
sizeof(*funq->rqes),
|
||||
sizeof(*funq->rq_info), false,
|
||||
NUMA_NO_NODE, &funq->rq_dma_addr,
|
||||
(void **)&funq->rq_info, NULL);
|
||||
return funq->rqes ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
/* Free a queue's structures. */
|
||||
void fun_free_queue(struct fun_queue *funq)
|
||||
{
|
||||
struct device *dev = funq->fdev->dev;
|
||||
|
||||
fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false,
|
||||
funq->cqes, funq->cq_dma_addr, NULL);
|
||||
fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2,
|
||||
fun_sq_is_head_wb(funq), funq->sq_cmds,
|
||||
funq->sq_dma_addr, NULL);
|
||||
|
||||
if (funq->rqes) {
|
||||
fun_clean_rq(funq);
|
||||
fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes),
|
||||
false, funq->rqes, funq->rq_dma_addr,
|
||||
funq->rq_info);
|
||||
}
|
||||
|
||||
kfree(funq);
|
||||
}
|
||||
|
||||
/* Allocate and initialize a funq's structures. */
|
||||
struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
|
||||
const struct fun_queue_alloc_req *req)
|
||||
{
|
||||
struct fun_queue *funq = kzalloc(sizeof(*funq), GFP_KERNEL);
|
||||
|
||||
if (!funq)
|
||||
return NULL;
|
||||
|
||||
funq->fdev = fdev;
|
||||
spin_lock_init(&funq->sq_lock);
|
||||
|
||||
funq->qid = qid;
|
||||
|
||||
/* Initial CQ/SQ/RQ ids */
|
||||
if (req->rq_depth) {
|
||||
funq->cqid = 2 * qid;
|
||||
if (funq->qid) {
|
||||
/* I/O Q: use rqid = cqid, sqid = +1 */
|
||||
funq->rqid = funq->cqid;
|
||||
funq->sqid = funq->rqid + 1;
|
||||
} else {
|
||||
/* Admin Q: sqid is always 0, use ID 1 for RQ */
|
||||
funq->sqid = 0;
|
||||
funq->rqid = 1;
|
||||
}
|
||||
} else {
|
||||
funq->cqid = qid;
|
||||
funq->sqid = qid;
|
||||
}
|
||||
|
||||
funq->cq_flags = req->cq_flags;
|
||||
funq->sq_flags = req->sq_flags;
|
||||
|
||||
funq->cqe_size_log2 = req->cqe_size_log2;
|
||||
funq->sqe_size_log2 = req->sqe_size_log2;
|
||||
|
||||
funq->cq_depth = req->cq_depth;
|
||||
funq->sq_depth = req->sq_depth;
|
||||
|
||||
funq->cq_intcoal_nentries = req->cq_intcoal_nentries;
|
||||
funq->cq_intcoal_usec = req->cq_intcoal_usec;
|
||||
|
||||
funq->sq_intcoal_nentries = req->sq_intcoal_nentries;
|
||||
funq->sq_intcoal_usec = req->sq_intcoal_usec;
|
||||
|
||||
if (fun_alloc_cqes(funq))
|
||||
goto free_funq;
|
||||
|
||||
funq->cq_phase = 1;
|
||||
|
||||
if (fun_alloc_sqes(funq))
|
||||
goto free_funq;
|
||||
|
||||
if (req->rq_depth) {
|
||||
funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ;
|
||||
funq->rq_depth = req->rq_depth;
|
||||
funq->rq_buf_offset = -1;
|
||||
|
||||
if (fun_alloc_rqes(funq) || fun_fill_rq(funq))
|
||||
goto free_funq;
|
||||
}
|
||||
|
||||
funq->cq_vector = -1;
|
||||
funq->cqe_info_offset = (1 << funq->cqe_size_log2) - sizeof(struct fun_cqe_info);
|
||||
|
||||
/* SQ/CQ 0 are implicitly created, assign their doorbells now.
|
||||
* Other queues are assigned doorbells at their explicit creation.
|
||||
*/
|
||||
if (funq->sqid == 0)
|
||||
funq->sq_db = fun_sq_db_addr(fdev, 0);
|
||||
if (funq->cqid == 0)
|
||||
funq->cq_db = fun_cq_db_addr(fdev, 0);
|
||||
|
||||
return funq;
|
||||
|
||||
free_funq:
|
||||
fun_free_queue(funq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Create a funq's CQ on the device. */
|
||||
static int fun_create_cq(struct fun_queue *funq)
|
||||
{
|
||||
struct fun_dev *fdev = funq->fdev;
|
||||
unsigned int rqid;
|
||||
int rc;
|
||||
|
||||
rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
|
||||
funq->rqid : FUN_HCI_ID_INVALID;
|
||||
rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
|
||||
funq->cqe_size_log2, funq->cq_depth,
|
||||
funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
|
||||
funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
|
||||
&funq->cqid, &funq->cq_db);
|
||||
if (!rc)
|
||||
dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Create a funq's SQ on the device. */
|
||||
static int fun_create_sq(struct fun_queue *funq)
|
||||
{
|
||||
struct fun_dev *fdev = funq->fdev;
|
||||
int rc;
|
||||
|
||||
rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
|
||||
funq->sqe_size_log2, funq->sq_depth,
|
||||
funq->sq_dma_addr, funq->sq_intcoal_nentries,
|
||||
funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
|
||||
0, &funq->sqid, &funq->sq_db);
|
||||
if (!rc)
|
||||
dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Create a funq's RQ on the device. */
|
||||
int fun_create_rq(struct fun_queue *funq)
|
||||
{
|
||||
struct fun_dev *fdev = funq->fdev;
|
||||
int rc;
|
||||
|
||||
rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0,
|
||||
funq->rq_depth, funq->rq_dma_addr, 0, 0,
|
||||
funq->cq_vector, 0, 0, PAGE_SHIFT, &funq->rqid,
|
||||
&funq->rq_db);
|
||||
if (!rc)
|
||||
dev_dbg(fdev->dev, "created RQ %u\n", funq->rqid);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static unsigned int funq_irq(struct fun_queue *funq)
|
||||
{
|
||||
return pci_irq_vector(to_pci_dev(funq->fdev->dev), funq->cq_vector);
|
||||
}
|
||||
|
||||
int fun_request_irq(struct fun_queue *funq, const char *devname,
|
||||
irq_handler_t handler, void *data)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (funq->cq_vector < 0)
|
||||
return -EINVAL;
|
||||
|
||||
funq->irq_handler = handler;
|
||||
funq->irq_data = data;
|
||||
|
||||
snprintf(funq->irqname, sizeof(funq->irqname),
|
||||
funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid);
|
||||
|
||||
rc = request_irq(funq_irq(funq), handler, 0, funq->irqname, data);
|
||||
if (rc)
|
||||
funq->irq_handler = NULL;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Create all component queues of a funq on the device. */
|
||||
int fun_create_queue(struct fun_queue *funq)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = fun_create_cq(funq);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (funq->rq_depth) {
|
||||
rc = fun_create_rq(funq);
|
||||
if (rc)
|
||||
goto release_cq;
|
||||
}
|
||||
|
||||
rc = fun_create_sq(funq);
|
||||
if (rc)
|
||||
goto release_rq;
|
||||
|
||||
return 0;
|
||||
|
||||
release_rq:
|
||||
fun_destroy_sq(funq->fdev, funq->rqid);
|
||||
release_cq:
|
||||
fun_destroy_cq(funq->fdev, funq->cqid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void fun_free_irq(struct fun_queue *funq)
|
||||
{
|
||||
if (funq->irq_handler) {
|
||||
unsigned int vector = funq_irq(funq);
|
||||
|
||||
free_irq(vector, funq->irq_data);
|
||||
funq->irq_handler = NULL;
|
||||
funq->irq_data = NULL;
|
||||
}
|
||||
}
|
175
drivers/net/ethernet/fungible/funcore/fun_queue.h
Normal file
175
drivers/net/ethernet/fungible/funcore/fun_queue.h
Normal file
@ -0,0 +1,175 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
|
||||
#ifndef _FUN_QEUEUE_H
|
||||
#define _FUN_QEUEUE_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
struct device;
|
||||
struct fun_dev;
|
||||
struct fun_queue;
|
||||
struct fun_cqe_info;
|
||||
struct fun_rsp_common;
|
||||
|
||||
typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
|
||||
const struct fun_cqe_info *info);
|
||||
|
||||
struct fun_rq_info {
|
||||
dma_addr_t dma;
|
||||
struct page *page;
|
||||
};
|
||||
|
||||
/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
|
||||
struct fun_queue {
|
||||
struct fun_dev *fdev;
|
||||
spinlock_t sq_lock;
|
||||
|
||||
dma_addr_t cq_dma_addr;
|
||||
dma_addr_t sq_dma_addr;
|
||||
dma_addr_t rq_dma_addr;
|
||||
|
||||
u32 __iomem *cq_db;
|
||||
u32 __iomem *sq_db;
|
||||
u32 __iomem *rq_db;
|
||||
|
||||
void *cqes;
|
||||
void *sq_cmds;
|
||||
struct fun_eprq_rqbuf *rqes;
|
||||
struct fun_rq_info *rq_info;
|
||||
|
||||
u32 cqid;
|
||||
u32 sqid;
|
||||
u32 rqid;
|
||||
|
||||
u32 cq_depth;
|
||||
u32 sq_depth;
|
||||
u32 rq_depth;
|
||||
|
||||
u16 cq_head;
|
||||
u16 sq_tail;
|
||||
u16 rq_tail;
|
||||
|
||||
u8 cqe_size_log2;
|
||||
u8 sqe_size_log2;
|
||||
|
||||
u16 cqe_info_offset;
|
||||
|
||||
u16 rq_buf_idx;
|
||||
int rq_buf_offset;
|
||||
u16 num_rqe_to_fill;
|
||||
|
||||
u8 cq_intcoal_usec;
|
||||
u8 cq_intcoal_nentries;
|
||||
u8 sq_intcoal_usec;
|
||||
u8 sq_intcoal_nentries;
|
||||
|
||||
u16 cq_flags;
|
||||
u16 sq_flags;
|
||||
u16 rq_flags;
|
||||
|
||||
/* SQ head writeback */
|
||||
u16 sq_comp;
|
||||
|
||||
volatile __be64 *sq_head;
|
||||
|
||||
cq_callback_t cq_cb;
|
||||
void *cb_data;
|
||||
|
||||
irq_handler_t irq_handler;
|
||||
void *irq_data;
|
||||
s16 cq_vector;
|
||||
u8 cq_phase;
|
||||
|
||||
/* I/O q index */
|
||||
u16 qid;
|
||||
|
||||
char irqname[24];
|
||||
};
|
||||
|
||||
static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
|
||||
{
|
||||
return funq->sq_cmds + (pos << funq->sqe_size_log2);
|
||||
}
|
||||
|
||||
static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
|
||||
{
|
||||
if (++tail == funq->sq_depth)
|
||||
tail = 0;
|
||||
funq->sq_tail = tail;
|
||||
writel(tail, funq->sq_db);
|
||||
}
|
||||
|
||||
static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
|
||||
void *cqe)
|
||||
{
|
||||
return cqe + funq->cqe_info_offset;
|
||||
}
|
||||
|
||||
static inline void funq_rq_post(struct fun_queue *funq)
|
||||
{
|
||||
writel(funq->rq_tail, funq->rq_db);
|
||||
}
|
||||
|
||||
struct fun_queue_alloc_req {
|
||||
u8 cqe_size_log2;
|
||||
u8 sqe_size_log2;
|
||||
|
||||
u16 cq_flags;
|
||||
u16 sq_flags;
|
||||
u16 rq_flags;
|
||||
|
||||
u32 cq_depth;
|
||||
u32 sq_depth;
|
||||
u32 rq_depth;
|
||||
|
||||
u8 cq_intcoal_usec;
|
||||
u8 cq_intcoal_nentries;
|
||||
u8 sq_intcoal_usec;
|
||||
u8 sq_intcoal_nentries;
|
||||
};
|
||||
|
||||
int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
|
||||
u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
|
||||
u8 coal_nentries, u8 coal_usec, u32 irq_num,
|
||||
u32 scan_start_id, u32 scan_end_id,
|
||||
u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
|
||||
int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
|
||||
u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
|
||||
u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
|
||||
u32 irq_num, u32 scan_start_id, u32 scan_end_id,
|
||||
u32 *cqidp, u32 __iomem **dbp);
|
||||
void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
|
||||
size_t hw_desc_sz, size_t sw_desc_size, bool wb,
|
||||
int numa_node, dma_addr_t *dma_addr, void **sw_va,
|
||||
volatile __be64 **wb_va);
|
||||
void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
|
||||
bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
|
||||
|
||||
#define fun_destroy_sq(fdev, sqid) \
|
||||
fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
|
||||
#define fun_destroy_cq(fdev, cqid) \
|
||||
fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
|
||||
|
||||
struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
|
||||
const struct fun_queue_alloc_req *req);
|
||||
void fun_free_queue(struct fun_queue *funq);
|
||||
|
||||
static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
|
||||
void *cb_data)
|
||||
{
|
||||
funq->cq_cb = cb;
|
||||
funq->cb_data = cb_data;
|
||||
}
|
||||
|
||||
int fun_create_rq(struct fun_queue *funq);
|
||||
int fun_create_queue(struct fun_queue *funq);
|
||||
|
||||
void fun_free_irq(struct fun_queue *funq);
|
||||
int fun_request_irq(struct fun_queue *funq, const char *devname,
|
||||
irq_handler_t handler, void *data);
|
||||
|
||||
unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
|
||||
unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
|
||||
|
||||
#endif /* _FUN_QEUEUE_H */
|
17
drivers/net/ethernet/fungible/funeth/Kconfig
Normal file
17
drivers/net/ethernet/fungible/funeth/Kconfig
Normal file
@ -0,0 +1,17 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Fungible Ethernet driver configuration
|
||||
#
|
||||
|
||||
config FUN_ETH
|
||||
tristate "Fungible Ethernet device driver"
|
||||
depends on PCI && PCI_MSI
|
||||
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
|
||||
select NET_DEVLINK
|
||||
select FUN_CORE
|
||||
help
|
||||
This driver supports the Ethernet functionality of Fungible adapters.
|
||||
It works with both physical and virtual functions.
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called funeth.
|
10
drivers/net/ethernet/fungible/funeth/Makefile
Normal file
10
drivers/net/ethernet/fungible/funeth/Makefile
Normal file
@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
|
||||
ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
|
||||
|
||||
obj-$(CONFIG_FUN_ETH) += funeth.o
|
||||
|
||||
funeth-y := funeth_main.o funeth_rx.o funeth_tx.o funeth_devlink.o \
|
||||
funeth_ethtool.o
|
||||
|
||||
funeth-$(CONFIG_TLS_DEVICE) += funeth_ktls.o
|
97
drivers/net/ethernet/fungible/funeth/fun_port.h
Normal file
97
drivers/net/ethernet/fungible/funeth/fun_port.h
Normal file
@ -0,0 +1,97 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
|
||||
#ifndef _FUN_PORT_H
|
||||
#define _FUN_PORT_H
|
||||
|
||||
enum port_mac_rx_stats {
|
||||
PORT_MAC_RX_etherStatsOctets = 0x0,
|
||||
PORT_MAC_RX_OctetsReceivedOK = 0x1,
|
||||
PORT_MAC_RX_aAlignmentErrors = 0x2,
|
||||
PORT_MAC_RX_aPAUSEMACCtrlFramesReceived = 0x3,
|
||||
PORT_MAC_RX_aFrameTooLongErrors = 0x4,
|
||||
PORT_MAC_RX_aInRangeLengthErrors = 0x5,
|
||||
PORT_MAC_RX_aFramesReceivedOK = 0x6,
|
||||
PORT_MAC_RX_aFrameCheckSequenceErrors = 0x7,
|
||||
PORT_MAC_RX_VLANReceivedOK = 0x8,
|
||||
PORT_MAC_RX_ifInErrors = 0x9,
|
||||
PORT_MAC_RX_ifInUcastPkts = 0xa,
|
||||
PORT_MAC_RX_ifInMulticastPkts = 0xb,
|
||||
PORT_MAC_RX_ifInBroadcastPkts = 0xc,
|
||||
PORT_MAC_RX_etherStatsDropEvents = 0xd,
|
||||
PORT_MAC_RX_etherStatsPkts = 0xe,
|
||||
PORT_MAC_RX_etherStatsUndersizePkts = 0xf,
|
||||
PORT_MAC_RX_etherStatsPkts64Octets = 0x10,
|
||||
PORT_MAC_RX_etherStatsPkts65to127Octets = 0x11,
|
||||
PORT_MAC_RX_etherStatsPkts128to255Octets = 0x12,
|
||||
PORT_MAC_RX_etherStatsPkts256to511Octets = 0x13,
|
||||
PORT_MAC_RX_etherStatsPkts512to1023Octets = 0x14,
|
||||
PORT_MAC_RX_etherStatsPkts1024to1518Octets = 0x15,
|
||||
PORT_MAC_RX_etherStatsPkts1519toMaxOctets = 0x16,
|
||||
PORT_MAC_RX_etherStatsOversizePkts = 0x17,
|
||||
PORT_MAC_RX_etherStatsJabbers = 0x18,
|
||||
PORT_MAC_RX_etherStatsFragments = 0x19,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_0 = 0x1a,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_1 = 0x1b,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_2 = 0x1c,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_3 = 0x1d,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_4 = 0x1e,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_5 = 0x1f,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_6 = 0x20,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_7 = 0x21,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_8 = 0x22,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_9 = 0x23,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_10 = 0x24,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_11 = 0x25,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_12 = 0x26,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_13 = 0x27,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_14 = 0x28,
|
||||
PORT_MAC_RX_CBFCPAUSEFramesReceived_15 = 0x29,
|
||||
PORT_MAC_RX_MACControlFramesReceived = 0x2a,
|
||||
PORT_MAC_RX_STATS_MAX = 0x2b,
|
||||
};
|
||||
|
||||
enum port_mac_tx_stats {
|
||||
PORT_MAC_TX_etherStatsOctets = 0x0,
|
||||
PORT_MAC_TX_OctetsTransmittedOK = 0x1,
|
||||
PORT_MAC_TX_aPAUSEMACCtrlFramesTransmitted = 0x2,
|
||||
PORT_MAC_TX_aFramesTransmittedOK = 0x3,
|
||||
PORT_MAC_TX_VLANTransmittedOK = 0x4,
|
||||
PORT_MAC_TX_ifOutErrors = 0x5,
|
||||
PORT_MAC_TX_ifOutUcastPkts = 0x6,
|
||||
PORT_MAC_TX_ifOutMulticastPkts = 0x7,
|
||||
PORT_MAC_TX_ifOutBroadcastPkts = 0x8,
|
||||
PORT_MAC_TX_etherStatsPkts64Octets = 0x9,
|
||||
PORT_MAC_TX_etherStatsPkts65to127Octets = 0xa,
|
||||
PORT_MAC_TX_etherStatsPkts128to255Octets = 0xb,
|
||||
PORT_MAC_TX_etherStatsPkts256to511Octets = 0xc,
|
||||
PORT_MAC_TX_etherStatsPkts512to1023Octets = 0xd,
|
||||
PORT_MAC_TX_etherStatsPkts1024to1518Octets = 0xe,
|
||||
PORT_MAC_TX_etherStatsPkts1519toMaxOctets = 0xf,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_0 = 0x10,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_1 = 0x11,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_2 = 0x12,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_3 = 0x13,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_4 = 0x14,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_5 = 0x15,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_6 = 0x16,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_7 = 0x17,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_8 = 0x18,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_9 = 0x19,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_10 = 0x1a,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_11 = 0x1b,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_12 = 0x1c,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_13 = 0x1d,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_14 = 0x1e,
|
||||
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_15 = 0x1f,
|
||||
PORT_MAC_TX_MACControlFramesTransmitted = 0x20,
|
||||
PORT_MAC_TX_etherStatsPkts = 0x21,
|
||||
PORT_MAC_TX_STATS_MAX = 0x22,
|
||||
};
|
||||
|
||||
enum port_mac_fec_stats {
|
||||
PORT_MAC_FEC_Correctable = 0x0,
|
||||
PORT_MAC_FEC_Uncorrectable = 0x1,
|
||||
PORT_MAC_FEC_STATS_MAX = 0x2,
|
||||
};
|
||||
|
||||
#endif /* _FUN_PORT_H */
|
171
drivers/net/ethernet/fungible/funeth/funeth.h
Normal file
171
drivers/net/ethernet/fungible/funeth/funeth.h
Normal file
@ -0,0 +1,171 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
|
||||
#ifndef _FUNETH_H
|
||||
#define _FUNETH_H
|
||||
|
||||
#include <uapi/linux/if_ether.h>
|
||||
#include <uapi/linux/net_tstamp.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <net/devlink.h>
|
||||
#include "fun_dev.h"
|
||||
|
||||
#define ADMIN_SQE_SIZE SZ_128
|
||||
#define ADMIN_CQE_SIZE SZ_64
|
||||
#define ADMIN_RSP_MAX_LEN (ADMIN_CQE_SIZE - sizeof(struct fun_cqe_info))
|
||||
|
||||
#define FUN_MAX_MTU 9024
|
||||
|
||||
#define SQ_DEPTH 512U
|
||||
#define CQ_DEPTH 1024U
|
||||
#define RQ_DEPTH (512U / (PAGE_SIZE / 4096))
|
||||
|
||||
#define CQ_INTCOAL_USEC 10
|
||||
#define CQ_INTCOAL_NPKT 16
|
||||
#define SQ_INTCOAL_USEC 10
|
||||
#define SQ_INTCOAL_NPKT 16
|
||||
|
||||
#define INVALID_LPORT 0xffff
|
||||
|
||||
#define FUN_PORT_CAP_PAUSE_MASK (FUN_PORT_CAP_TX_PAUSE | FUN_PORT_CAP_RX_PAUSE)
|
||||
|
||||
struct fun_vport_info {
|
||||
u8 mac[ETH_ALEN];
|
||||
u16 vlan;
|
||||
__be16 vlan_proto;
|
||||
u8 qos;
|
||||
u8 spoofchk:1;
|
||||
u8 trusted:1;
|
||||
unsigned int max_rate;
|
||||
};
|
||||
|
||||
/* "subclass" of fun_dev for Ethernet functions */
|
||||
struct fun_ethdev {
|
||||
struct fun_dev fdev;
|
||||
|
||||
/* the function's network ports */
|
||||
struct net_device **netdevs;
|
||||
unsigned int num_ports;
|
||||
|
||||
/* configuration for the function's virtual ports */
|
||||
unsigned int num_vports;
|
||||
struct fun_vport_info *vport_info;
|
||||
|
||||
struct mutex state_mutex; /* nests inside RTNL if both taken */
|
||||
|
||||
unsigned int nsqs_per_port;
|
||||
};
|
||||
|
||||
static inline struct fun_ethdev *to_fun_ethdev(struct fun_dev *p)
|
||||
{
|
||||
return container_of(p, struct fun_ethdev, fdev);
|
||||
}
|
||||
|
||||
struct fun_qset {
|
||||
struct funeth_rxq **rxqs;
|
||||
struct funeth_txq **txqs;
|
||||
struct funeth_txq **xdpqs;
|
||||
unsigned int nrxqs;
|
||||
unsigned int ntxqs;
|
||||
unsigned int nxdpqs;
|
||||
unsigned int rxq_start;
|
||||
unsigned int txq_start;
|
||||
unsigned int xdpq_start;
|
||||
unsigned int cq_depth;
|
||||
unsigned int rq_depth;
|
||||
unsigned int sq_depth;
|
||||
int state;
|
||||
};
|
||||
|
||||
/* Per netdevice driver state, i.e., netdev_priv. */
|
||||
struct funeth_priv {
|
||||
struct fun_dev *fdev;
|
||||
struct pci_dev *pdev;
|
||||
struct net_device *netdev;
|
||||
|
||||
struct funeth_rxq * __rcu *rxqs;
|
||||
struct funeth_txq **txqs;
|
||||
struct funeth_txq * __rcu *xdpqs;
|
||||
|
||||
struct xarray irqs;
|
||||
unsigned int num_tx_irqs;
|
||||
unsigned int num_rx_irqs;
|
||||
unsigned int rx_irq_ofst;
|
||||
|
||||
unsigned int lane_attrs;
|
||||
u16 lport;
|
||||
|
||||
/* link settings */
|
||||
u64 port_caps;
|
||||
u64 advertising;
|
||||
u64 lp_advertising;
|
||||
unsigned int link_speed;
|
||||
u8 xcvr_type;
|
||||
u8 active_fc;
|
||||
u8 active_fec;
|
||||
u8 link_down_reason;
|
||||
seqcount_t link_seq;
|
||||
|
||||
u32 msg_enable;
|
||||
|
||||
unsigned int num_xdpqs;
|
||||
|
||||
/* ethtool, etc. config parameters */
|
||||
unsigned int sq_depth;
|
||||
unsigned int rq_depth;
|
||||
unsigned int cq_depth;
|
||||
unsigned int cq_irq_db;
|
||||
u8 tx_coal_usec;
|
||||
u8 tx_coal_count;
|
||||
u8 rx_coal_usec;
|
||||
u8 rx_coal_count;
|
||||
|
||||
struct hwtstamp_config hwtstamp_cfg;
|
||||
|
||||
/* cumulative queue stats from earlier queue instances */
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
u64 tx_dropped;
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 rx_dropped;
|
||||
|
||||
/* RSS */
|
||||
unsigned int rss_hw_id;
|
||||
enum fun_eth_hash_alg hash_algo;
|
||||
u8 rss_key[FUN_ETH_RSS_MAX_KEY_SIZE];
|
||||
unsigned int indir_table_nentries;
|
||||
u32 indir_table[FUN_ETH_RSS_MAX_INDIR_ENT];
|
||||
dma_addr_t rss_dma_addr;
|
||||
void *rss_cfg;
|
||||
|
||||
/* DMA area for port stats */
|
||||
dma_addr_t stats_dma_addr;
|
||||
__be64 *stats;
|
||||
|
||||
struct bpf_prog *xdp_prog;
|
||||
|
||||
struct devlink_port dl_port;
|
||||
|
||||
/* kTLS state */
|
||||
unsigned int ktls_id;
|
||||
atomic64_t tx_tls_add;
|
||||
atomic64_t tx_tls_del;
|
||||
atomic64_t tx_tls_resync;
|
||||
};
|
||||
|
||||
void fun_set_ethtool_ops(struct net_device *netdev);
|
||||
int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data);
|
||||
int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data);
|
||||
int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid);
|
||||
int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
|
||||
struct netlink_ext_ack *extack);
|
||||
int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
|
||||
unsigned int nrx);
|
||||
void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
|
||||
unsigned int nrx);
|
||||
int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
|
||||
const u32 *qtable, u8 op);
|
||||
|
||||
#endif /* _FUNETH_H */
|
40
drivers/net/ethernet/fungible/funeth/funeth_devlink.c
Normal file
40
drivers/net/ethernet/fungible/funeth/funeth_devlink.c
Normal file
@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
|
||||
#include "funeth.h"
|
||||
#include "funeth_devlink.h"
|
||||
|
||||
static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct devlink_ops fun_dl_ops = {
|
||||
.info_get = fun_dl_info_get,
|
||||
};
|
||||
|
||||
struct devlink *fun_devlink_alloc(struct device *dev)
|
||||
{
|
||||
return devlink_alloc(&fun_dl_ops, sizeof(struct fun_ethdev), dev);
|
||||
}
|
||||
|
||||
void fun_devlink_free(struct devlink *devlink)
|
||||
{
|
||||
devlink_free(devlink);
|
||||
}
|
||||
|
||||
void fun_devlink_register(struct devlink *devlink)
|
||||
{
|
||||
devlink_register(devlink);
|
||||
}
|
||||
|
||||
void fun_devlink_unregister(struct devlink *devlink)
|
||||
{
|
||||
devlink_unregister(devlink);
|
||||
}
|
13
drivers/net/ethernet/fungible/funeth/funeth_devlink.h
Normal file
13
drivers/net/ethernet/fungible/funeth/funeth_devlink.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
|
||||
#ifndef __FUNETH_DEVLINK_H
|
||||
#define __FUNETH_DEVLINK_H
|
||||
|
||||
#include <net/devlink.h>
|
||||
|
||||
struct devlink *fun_devlink_alloc(struct device *dev);
|
||||
void fun_devlink_free(struct devlink *devlink);
|
||||
void fun_devlink_register(struct devlink *devlink);
|
||||
void fun_devlink_unregister(struct devlink *devlink);
|
||||
|
||||
#endif /* __FUNETH_DEVLINK_H */
|
1162
drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
Normal file
1162
drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
Normal file
File diff suppressed because it is too large
Load Diff
155
drivers/net/ethernet/fungible/funeth/funeth_ktls.c
Normal file
155
drivers/net/ethernet/fungible/funeth/funeth_ktls.c
Normal file
@ -0,0 +1,155 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
|
||||
#include "funeth.h"
|
||||
#include "funeth_ktls.h"
|
||||
|
||||
static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id)
|
||||
{
|
||||
struct fun_admin_ktls_create_req req = {
|
||||
.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
|
||||
sizeof(req)),
|
||||
.subop = FUN_ADMIN_SUBOP_CREATE,
|
||||
.id = cpu_to_be32(id),
|
||||
};
|
||||
|
||||
return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
|
||||
}
|
||||
|
||||
static int fun_ktls_add(struct net_device *netdev, struct sock *sk,
|
||||
enum tls_offload_ctx_dir direction,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 start_offload_tcp_sn)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(netdev);
|
||||
struct fun_admin_ktls_modify_req req = {
|
||||
.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
|
||||
sizeof(req)),
|
||||
.subop = FUN_ADMIN_SUBOP_MODIFY,
|
||||
.id = cpu_to_be32(fp->ktls_id),
|
||||
.tcp_seq = cpu_to_be32(start_offload_tcp_sn),
|
||||
};
|
||||
struct fun_admin_ktls_modify_rsp rsp;
|
||||
struct fun_ktls_tx_ctx *tx_ctx;
|
||||
int rc;
|
||||
|
||||
if (direction != TLS_OFFLOAD_CTX_DIR_TX)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (crypto_info->version == TLS_1_2_VERSION)
|
||||
req.version = FUN_KTLS_TLSV2;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (crypto_info->cipher_type) {
|
||||
case TLS_CIPHER_AES_GCM_128: {
|
||||
struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info;
|
||||
|
||||
req.cipher = FUN_KTLS_CIPHER_AES_GCM_128;
|
||||
memcpy(req.key, c->key, sizeof(c->key));
|
||||
memcpy(req.iv, c->iv, sizeof(c->iv));
|
||||
memcpy(req.salt, c->salt, sizeof(c->salt));
|
||||
memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp,
|
||||
sizeof(rsp), 0);
|
||||
memzero_explicit(&req, sizeof(req));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
tx_ctx = tls_driver_ctx(sk, direction);
|
||||
tx_ctx->tlsid = rsp.tlsid;
|
||||
tx_ctx->next_seq = start_offload_tcp_sn;
|
||||
atomic64_inc(&fp->tx_tls_add);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fun_ktls_del(struct net_device *netdev,
|
||||
struct tls_context *tls_ctx,
|
||||
enum tls_offload_ctx_dir direction)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(netdev);
|
||||
struct fun_admin_ktls_modify_req req;
|
||||
struct fun_ktls_tx_ctx *tx_ctx;
|
||||
|
||||
if (direction != TLS_OFFLOAD_CTX_DIR_TX)
|
||||
return;
|
||||
|
||||
tx_ctx = __tls_driver_ctx(tls_ctx, direction);
|
||||
|
||||
req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
|
||||
offsetof(struct fun_admin_ktls_modify_req, tcp_seq));
|
||||
req.subop = FUN_ADMIN_SUBOP_MODIFY;
|
||||
req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE);
|
||||
req.id = cpu_to_be32(fp->ktls_id);
|
||||
req.tlsid = tx_ctx->tlsid;
|
||||
|
||||
fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
|
||||
atomic64_inc(&fp->tx_tls_del);
|
||||
}
|
||||
|
||||
static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
|
||||
u8 *rcd_sn, enum tls_offload_ctx_dir direction)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(netdev);
|
||||
struct fun_admin_ktls_modify_req req;
|
||||
struct fun_ktls_tx_ctx *tx_ctx;
|
||||
int rc;
|
||||
|
||||
if (direction != TLS_OFFLOAD_CTX_DIR_TX)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
tx_ctx = tls_driver_ctx(sk, direction);
|
||||
|
||||
req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
|
||||
offsetof(struct fun_admin_ktls_modify_req, key));
|
||||
req.subop = FUN_ADMIN_SUBOP_MODIFY;
|
||||
req.flags = 0;
|
||||
req.id = cpu_to_be32(fp->ktls_id);
|
||||
req.tlsid = tx_ctx->tlsid;
|
||||
req.tcp_seq = cpu_to_be32(seq);
|
||||
req.version = 0;
|
||||
req.cipher = 0;
|
||||
memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq));
|
||||
|
||||
atomic64_inc(&fp->tx_tls_resync);
|
||||
rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
|
||||
if (!rc)
|
||||
tx_ctx->next_seq = seq;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct tlsdev_ops fun_ktls_ops = {
|
||||
.tls_dev_add = fun_ktls_add,
|
||||
.tls_dev_del = fun_ktls_del,
|
||||
.tls_dev_resync = fun_ktls_resync,
|
||||
};
|
||||
|
||||
int fun_ktls_init(struct net_device *netdev)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(netdev);
|
||||
int rc;
|
||||
|
||||
rc = fun_admin_ktls_create(fp, netdev->dev_port);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
fp->ktls_id = netdev->dev_port;
|
||||
netdev->tlsdev_ops = &fun_ktls_ops;
|
||||
netdev->hw_features |= NETIF_F_HW_TLS_TX;
|
||||
netdev->features |= NETIF_F_HW_TLS_TX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fun_ktls_cleanup(struct funeth_priv *fp)
|
||||
{
|
||||
if (fp->ktls_id == FUN_HCI_ID_INVALID)
|
||||
return;
|
||||
|
||||
fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id);
|
||||
fp->ktls_id = FUN_HCI_ID_INVALID;
|
||||
}
|
31
drivers/net/ethernet/fungible/funeth/funeth_ktls.h
Normal file
31
drivers/net/ethernet/fungible/funeth/funeth_ktls.h
Normal file
@ -0,0 +1,31 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
|
||||
#ifndef _FUN_KTLS_H
|
||||
#define _FUN_KTLS_H
|
||||
|
||||
struct net_device;
|
||||
struct funeth_priv;
|
||||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
#include <net/tls.h>
|
||||
|
||||
struct fun_ktls_tx_ctx {
|
||||
__be64 tlsid;
|
||||
u32 next_seq;
|
||||
};
|
||||
|
||||
int fun_ktls_init(struct net_device *netdev);
|
||||
void fun_ktls_cleanup(struct funeth_priv *fp);
|
||||
|
||||
#else
|
||||
|
||||
static inline void fun_ktls_init(struct net_device *netdev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void fun_ktls_cleanup(struct funeth_priv *fp)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _FUN_KTLS_H */
|
2091
drivers/net/ethernet/fungible/funeth/funeth_main.c
Normal file
2091
drivers/net/ethernet/fungible/funeth/funeth_main.c
Normal file
File diff suppressed because it is too large
Load Diff
826
drivers/net/ethernet/fungible/funeth/funeth_rx.c
Normal file
826
drivers/net/ethernet/fungible/funeth/funeth_rx.c
Normal file
@ -0,0 +1,826 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include "funeth_txrx.h"
|
||||
#include "funeth.h"
|
||||
#include "fun_queue.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "funeth_trace.h"
|
||||
|
||||
/* Given the device's max supported MTU and pages of at least 4KB a packet can
|
||||
* be scattered into at most 4 buffers.
|
||||
*/
|
||||
#define RX_MAX_FRAGS 4
|
||||
|
||||
/* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */
|
||||
#define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
|
||||
|
||||
/* We try to reuse pages for our buffers. To avoid frequent page ref writes we
|
||||
* take EXTRA_PAGE_REFS references at once and then hand them out one per packet
|
||||
* occupying the buffer.
|
||||
*/
|
||||
#define EXTRA_PAGE_REFS 1000000
|
||||
#define MIN_PAGE_REFS 1000
|
||||
|
||||
enum {
|
||||
FUN_XDP_FLUSH_REDIR = 1,
|
||||
FUN_XDP_FLUSH_TX = 2,
|
||||
};
|
||||
|
||||
/* See if a page is running low on refs we are holding and if so take more. */
|
||||
static void refresh_refs(struct funeth_rxbuf *buf)
|
||||
{
|
||||
if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) {
|
||||
buf->pg_refs += EXTRA_PAGE_REFS;
|
||||
page_ref_add(buf->page, EXTRA_PAGE_REFS);
|
||||
}
|
||||
}
|
||||
|
||||
/* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its
|
||||
* page is worth retaining and there's room for it. Otherwise the page is
|
||||
* unmapped and our references released.
|
||||
*/
|
||||
static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
|
||||
{
|
||||
struct funeth_rx_cache *c = &q->cache;
|
||||
|
||||
if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) {
|
||||
c->bufs[c->prod_cnt & c->mask] = *buf;
|
||||
c->prod_cnt++;
|
||||
} else {
|
||||
dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
__page_frag_cache_drain(buf->page, buf->pg_refs);
|
||||
}
|
||||
}
|
||||
|
||||
/* Get a page from the Rx buffer cache. We only consider the next available
|
||||
* page and return it if we own all its references.
|
||||
*/
|
||||
static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
|
||||
{
|
||||
struct funeth_rx_cache *c = &q->cache;
|
||||
struct funeth_rxbuf *buf;
|
||||
|
||||
if (c->prod_cnt == c->cons_cnt)
|
||||
return false; /* empty cache */
|
||||
|
||||
buf = &c->bufs[c->cons_cnt & c->mask];
|
||||
if (page_ref_count(buf->page) == buf->pg_refs) {
|
||||
dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
*rb = *buf;
|
||||
buf->page = NULL;
|
||||
refresh_refs(rb);
|
||||
c->cons_cnt++;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Page can't be reused. If the cache is full drop this page. */
|
||||
if (c->prod_cnt - c->cons_cnt > c->mask) {
|
||||
dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
__page_frag_cache_drain(buf->page, buf->pg_refs);
|
||||
buf->page = NULL;
|
||||
c->cons_cnt++;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Allocate and DMA-map a page for receive. */
|
||||
static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
|
||||
int node, gfp_t gfp)
|
||||
{
|
||||
struct page *p;
|
||||
|
||||
if (cache_get(q, rb))
|
||||
return 0;
|
||||
|
||||
p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0);
|
||||
if (unlikely(!p))
|
||||
return -ENOMEM;
|
||||
|
||||
rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) {
|
||||
FUN_QSTAT_INC(q, rx_map_err);
|
||||
__free_page(p);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
FUN_QSTAT_INC(q, rx_page_alloc);
|
||||
|
||||
rb->page = p;
|
||||
rb->pg_refs = 1;
|
||||
refresh_refs(rb);
|
||||
rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
|
||||
{
|
||||
if (rb->page) {
|
||||
dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
__page_frag_cache_drain(rb->page, rb->pg_refs);
|
||||
rb->page = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Run the XDP program assigned to an Rx queue.
|
||||
* Return %NULL if the buffer is consumed, or the virtual address of the packet
|
||||
* to turn into an skb.
|
||||
*/
|
||||
static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
|
||||
int ref_ok, struct funeth_txq *xdp_q)
|
||||
{
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct xdp_buff xdp;
|
||||
u32 act;
|
||||
|
||||
/* VA includes the headroom, frag size includes headroom + tailroom */
|
||||
xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN),
|
||||
&q->xdp_rxq);
|
||||
xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) -
|
||||
(FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false);
|
||||
|
||||
xdp_prog = READ_ONCE(q->xdp_prog);
|
||||
act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
|
||||
switch (act) {
|
||||
case XDP_PASS:
|
||||
/* remove headroom, which may not be FUN_XDP_HEADROOM now */
|
||||
skb_frag_size_set(frags, xdp.data_end - xdp.data);
|
||||
skb_frag_off_add(frags, xdp.data - xdp.data_hard_start);
|
||||
goto pass;
|
||||
case XDP_TX:
|
||||
if (unlikely(!ref_ok))
|
||||
goto pass;
|
||||
if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
|
||||
goto xdp_error;
|
||||
FUN_QSTAT_INC(q, xdp_tx);
|
||||
q->xdp_flush |= FUN_XDP_FLUSH_TX;
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
if (unlikely(!ref_ok))
|
||||
goto pass;
|
||||
if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog)))
|
||||
goto xdp_error;
|
||||
FUN_QSTAT_INC(q, xdp_redir);
|
||||
q->xdp_flush |= FUN_XDP_FLUSH_REDIR;
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(q->netdev, xdp_prog, act);
|
||||
xdp_error:
|
||||
q->cur_buf->pg_refs++; /* return frags' page reference */
|
||||
FUN_QSTAT_INC(q, xdp_err);
|
||||
break;
|
||||
case XDP_DROP:
|
||||
q->cur_buf->pg_refs++;
|
||||
FUN_QSTAT_INC(q, xdp_drops);
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
pass:
|
||||
return xdp.data;
|
||||
}
|
||||
|
||||
/* A CQE contains a fixed completion structure along with optional metadata and
|
||||
* even packet data. Given the start address of a CQE return the start of the
|
||||
* contained fixed structure, which lies at the end.
|
||||
*/
|
||||
static const void *cqe_to_info(const void *cqe)
|
||||
{
|
||||
return cqe + FUNETH_CQE_INFO_OFFSET;
|
||||
}
|
||||
|
||||
/* The inverse of cqe_to_info(). */
|
||||
static const void *info_to_cqe(const void *cqe_info)
|
||||
{
|
||||
return cqe_info - FUNETH_CQE_INFO_OFFSET;
|
||||
}
|
||||
|
||||
/* Return the type of hash provided by the device based on the L3 and L4
|
||||
* protocols it parsed for the packet.
|
||||
*/
|
||||
static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse)
|
||||
{
|
||||
static const enum pkt_hash_types htype_map[] = {
|
||||
PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
|
||||
PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4,
|
||||
PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
|
||||
PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3
|
||||
};
|
||||
u16 key;
|
||||
|
||||
/* Build the key from the TCP/UDP and IP/IPv6 bits */
|
||||
key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) |
|
||||
((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1);
|
||||
|
||||
return htype_map[key];
|
||||
}
|
||||
|
||||
/* Each received packet can be scattered across several Rx buffers or can
|
||||
* share a buffer with previously received packets depending on the buffer
|
||||
* and packet sizes and the room available in the most recently used buffer.
|
||||
*
|
||||
* The rules are:
|
||||
* - If the buffer at the head of an RQ has not been used it gets (part of) the
|
||||
* next incoming packet.
|
||||
* - Otherwise, if the packet fully fits in the buffer's remaining space the
|
||||
* packet is written there.
|
||||
* - Otherwise, the packet goes into the next Rx buffer.
|
||||
*
|
||||
* This function returns the Rx buffer for a packet or fragment thereof of the
|
||||
* given length. If it isn't @buf it either recycles or frees that buffer
|
||||
* before advancing the queue to the next buffer.
|
||||
*
|
||||
* If called repeatedly with the remaining length of a packet it will walk
|
||||
* through all the buffers containing the packet.
|
||||
*/
|
||||
static struct funeth_rxbuf *
|
||||
get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
|
||||
{
|
||||
if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset)
|
||||
return buf; /* @buf holds (part of) the packet */
|
||||
|
||||
/* The packet occupies part of the next buffer. Move there after
|
||||
* replenishing the current buffer slot either with the spare page or
|
||||
* by reusing the slot's existing page. Note that if a spare page isn't
|
||||
* available and the current packet occupies @buf it is a multi-frag
|
||||
* packet that will be dropped leaving @buf available for reuse.
|
||||
*/
|
||||
if ((page_ref_count(buf->page) == buf->pg_refs &&
|
||||
buf->node == numa_mem_id()) || !q->spare_buf.page) {
|
||||
dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
refresh_refs(buf);
|
||||
} else {
|
||||
cache_offer(q, buf);
|
||||
*buf = q->spare_buf;
|
||||
q->spare_buf.page = NULL;
|
||||
q->rqes[q->rq_cons & q->rq_mask] =
|
||||
FUN_EPRQ_RQBUF_INIT(buf->dma_addr);
|
||||
}
|
||||
q->buf_offset = 0;
|
||||
q->rq_cons++;
|
||||
return &q->bufs[q->rq_cons & q->rq_mask];
|
||||
}
|
||||
|
||||
/* Gather the page fragments making up the first Rx packet on @q. Its total
|
||||
* length @tot_len includes optional head- and tail-rooms.
|
||||
*
|
||||
* Return 0 if the device retains ownership of at least some of the pages.
|
||||
* In this case the caller may only copy the packet.
|
||||
*
|
||||
* A non-zero return value gives the caller permission to use references to the
|
||||
* pages, e.g., attach them to skbs. Additionally, if the value is <0 at least
|
||||
* one of the pages is PF_MEMALLOC.
|
||||
*
|
||||
* Regardless of outcome the caller is granted a reference to each of the pages.
|
||||
*/
|
||||
static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
|
||||
skb_frag_t *frags)
|
||||
{
|
||||
struct funeth_rxbuf *buf = q->cur_buf;
|
||||
unsigned int frag_len;
|
||||
int ref_ok = 1;
|
||||
|
||||
for (;;) {
|
||||
buf = get_buf(q, buf, tot_len);
|
||||
|
||||
/* We always keep the RQ full of buffers so before we can give
|
||||
* one of our pages to the stack we require that we can obtain
|
||||
* a replacement page. If we can't the packet will either be
|
||||
* copied or dropped so we can retain ownership of the page and
|
||||
* reuse it.
|
||||
*/
|
||||
if (!q->spare_buf.page &&
|
||||
funeth_alloc_page(q, &q->spare_buf, numa_mem_id(),
|
||||
GFP_ATOMIC | __GFP_MEMALLOC))
|
||||
ref_ok = 0;
|
||||
|
||||
frag_len = min_t(unsigned int, tot_len,
|
||||
PAGE_SIZE - q->buf_offset);
|
||||
dma_sync_single_for_cpu(q->dma_dev,
|
||||
buf->dma_addr + q->buf_offset,
|
||||
frag_len, DMA_FROM_DEVICE);
|
||||
buf->pg_refs--;
|
||||
if (ref_ok)
|
||||
ref_ok |= buf->node;
|
||||
|
||||
__skb_frag_set_page(frags, buf->page);
|
||||
skb_frag_off_set(frags, q->buf_offset);
|
||||
skb_frag_size_set(frags++, frag_len);
|
||||
|
||||
tot_len -= frag_len;
|
||||
if (!tot_len)
|
||||
break;
|
||||
|
||||
q->buf_offset = PAGE_SIZE;
|
||||
}
|
||||
q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN);
|
||||
q->cur_buf = buf;
|
||||
return ref_ok;
|
||||
}
|
||||
|
||||
static bool rx_hwtstamp_enabled(const struct net_device *dev)
|
||||
{
|
||||
const struct funeth_priv *d = netdev_priv(dev);
|
||||
|
||||
return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL;
|
||||
}
|
||||
|
||||
/* Advance the CQ pointers and phase tag to the next CQE. */
|
||||
static void advance_cq(struct funeth_rxq *q)
|
||||
{
|
||||
if (unlikely(q->cq_head == q->cq_mask)) {
|
||||
q->cq_head = 0;
|
||||
q->phase ^= 1;
|
||||
q->next_cqe_info = cqe_to_info(q->cqes);
|
||||
} else {
|
||||
q->cq_head++;
|
||||
q->next_cqe_info += FUNETH_CQE_SIZE;
|
||||
}
|
||||
prefetch(q->next_cqe_info);
|
||||
}
|
||||
|
||||
/* Process the packet represented by the head CQE of @q. Gather the packet's
|
||||
* fragments, run it through the optional XDP program, and if needed construct
|
||||
* an skb and pass it to the stack.
|
||||
*/
|
||||
static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
|
||||
{
|
||||
const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info);
|
||||
unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len);
|
||||
struct net_device *ndev = q->netdev;
|
||||
skb_frag_t frags[RX_MAX_FRAGS];
|
||||
struct skb_shared_info *si;
|
||||
unsigned int headroom;
|
||||
gro_result_t gro_res;
|
||||
struct sk_buff *skb;
|
||||
int ref_ok;
|
||||
void *va;
|
||||
u16 cv;
|
||||
|
||||
u64_stats_update_begin(&q->syncp);
|
||||
q->stats.rx_pkts++;
|
||||
q->stats.rx_bytes += pkt_len;
|
||||
u64_stats_update_end(&q->syncp);
|
||||
|
||||
advance_cq(q);
|
||||
|
||||
/* account for head- and tail-room, present only for 1-buffer packets */
|
||||
tot_len = pkt_len;
|
||||
headroom = be16_to_cpu(rxreq->headroom);
|
||||
if (likely(headroom))
|
||||
tot_len += FUN_RX_TAILROOM + headroom;
|
||||
|
||||
ref_ok = fun_gather_pkt(q, tot_len, frags);
|
||||
va = skb_frag_address(frags);
|
||||
if (xdp_q && headroom == FUN_XDP_HEADROOM) {
|
||||
va = fun_run_xdp(q, frags, va, ref_ok, xdp_q);
|
||||
if (!va)
|
||||
return;
|
||||
headroom = 0; /* XDP_PASS trims it */
|
||||
}
|
||||
if (unlikely(!ref_ok))
|
||||
goto no_mem;
|
||||
|
||||
if (likely(headroom)) {
|
||||
/* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */
|
||||
prefetch(va + headroom);
|
||||
skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN));
|
||||
if (unlikely(!skb))
|
||||
goto no_mem;
|
||||
|
||||
skb_reserve(skb, headroom);
|
||||
__skb_put(skb, pkt_len);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
} else {
|
||||
prefetch(va);
|
||||
skb = napi_get_frags(q->napi);
|
||||
if (unlikely(!skb))
|
||||
goto no_mem;
|
||||
|
||||
if (ref_ok < 0)
|
||||
skb->pfmemalloc = 1;
|
||||
|
||||
si = skb_shinfo(skb);
|
||||
si->nr_frags = rxreq->nsgl;
|
||||
for (i = 0; i < si->nr_frags; i++)
|
||||
si->frags[i] = frags[i];
|
||||
|
||||
skb->len = pkt_len;
|
||||
skb->data_len = pkt_len;
|
||||
skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN);
|
||||
}
|
||||
|
||||
skb_record_rx_queue(skb, q->qidx);
|
||||
cv = be16_to_cpu(rxreq->pkt_cv);
|
||||
if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash))
|
||||
skb_set_hash(skb, be32_to_cpu(rxreq->hash),
|
||||
cqe_to_pkt_hash_type(cv));
|
||||
if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) {
|
||||
FUN_QSTAT_INC(q, rx_cso);
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = be16_to_cpu(rxreq->csum) - 1;
|
||||
}
|
||||
if (unlikely(rx_hwtstamp_enabled(q->netdev)))
|
||||
skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp);
|
||||
|
||||
trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv);
|
||||
|
||||
gro_res = skb->data_len ? napi_gro_frags(q->napi) :
|
||||
napi_gro_receive(q->napi, skb);
|
||||
if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE)
|
||||
FUN_QSTAT_INC(q, gro_merged);
|
||||
else if (gro_res == GRO_HELD)
|
||||
FUN_QSTAT_INC(q, gro_pkts);
|
||||
return;
|
||||
|
||||
no_mem:
|
||||
FUN_QSTAT_INC(q, rx_mem_drops);
|
||||
|
||||
/* Release the references we've been granted for the frag pages.
|
||||
* We return the ref of the last frag and free the rest.
|
||||
*/
|
||||
q->cur_buf->pg_refs++;
|
||||
for (i = 0; i < rxreq->nsgl - 1; i++)
|
||||
__free_page(skb_frag_page(frags + i));
|
||||
}
|
||||
|
||||
/* Return 0 if the phase tag of the CQE at the CQ's head matches expectations
|
||||
* indicating the CQE is new.
|
||||
*/
|
||||
static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase)
|
||||
{
|
||||
u16 sf_p = be16_to_cpu(ci->sf_p);
|
||||
|
||||
return (sf_p & 1) ^ phase;
|
||||
}
|
||||
|
||||
/* Walk through a CQ identifying and processing fresh CQEs up to the given
|
||||
* budget. Return the remaining budget.
|
||||
*/
|
||||
static int fun_process_cqes(struct funeth_rxq *q, int budget)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(q->netdev);
|
||||
struct funeth_txq **xdpqs, *xdp_q = NULL;
|
||||
|
||||
xdpqs = rcu_dereference_bh(fp->xdpqs);
|
||||
if (xdpqs)
|
||||
xdp_q = xdpqs[smp_processor_id()];
|
||||
|
||||
while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) {
|
||||
/* access other descriptor fields after the phase check */
|
||||
dma_rmb();
|
||||
|
||||
fun_handle_cqe_pkt(q, xdp_q);
|
||||
budget--;
|
||||
}
|
||||
|
||||
if (unlikely(q->xdp_flush)) {
|
||||
if (q->xdp_flush & FUN_XDP_FLUSH_TX)
|
||||
fun_txq_wr_db(xdp_q);
|
||||
if (q->xdp_flush & FUN_XDP_FLUSH_REDIR)
|
||||
xdp_do_flush();
|
||||
q->xdp_flush = 0;
|
||||
}
|
||||
|
||||
return budget;
|
||||
}
|
||||
|
||||
/* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ
|
||||
* doorbells as needed.
|
||||
*/
|
||||
int fun_rxq_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
|
||||
struct funeth_rxq *q = irq->rxq;
|
||||
int work_done = budget - fun_process_cqes(q, budget);
|
||||
u32 cq_db_val = q->cq_head;
|
||||
|
||||
if (unlikely(work_done >= budget))
|
||||
FUN_QSTAT_INC(q, rx_budget);
|
||||
else if (napi_complete_done(napi, work_done))
|
||||
cq_db_val |= q->irq_db_val;
|
||||
|
||||
/* check whether to post new Rx buffers */
|
||||
if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) {
|
||||
u64_stats_update_begin(&q->syncp);
|
||||
q->stats.rx_bufs += q->rq_cons - q->rq_cons_db;
|
||||
u64_stats_update_end(&q->syncp);
|
||||
q->rq_cons_db = q->rq_cons;
|
||||
writel((q->rq_cons - 1) & q->rq_mask, q->rq_db);
|
||||
}
|
||||
|
||||
writel(cq_db_val, q->cq_db);
|
||||
return work_done;
|
||||
}
|
||||
|
||||
/* Free the Rx buffers of an Rx queue. */
|
||||
static void fun_rxq_free_bufs(struct funeth_rxq *q)
|
||||
{
|
||||
struct funeth_rxbuf *b = q->bufs;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i <= q->rq_mask; i++, b++)
|
||||
funeth_free_page(q, b);
|
||||
|
||||
funeth_free_page(q, &q->spare_buf);
|
||||
q->cur_buf = NULL;
|
||||
}
|
||||
|
||||
/* Initially provision an Rx queue with Rx buffers. */
|
||||
static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
|
||||
{
|
||||
struct funeth_rxbuf *b = q->bufs;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i <= q->rq_mask; i++, b++) {
|
||||
if (funeth_alloc_page(q, b, node, GFP_KERNEL)) {
|
||||
fun_rxq_free_bufs(q);
|
||||
return -ENOMEM;
|
||||
}
|
||||
q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr);
|
||||
}
|
||||
q->cur_buf = q->bufs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initialize a used-buffer cache of the given depth. */
|
||||
static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth,
|
||||
int node)
|
||||
{
|
||||
c->mask = depth - 1;
|
||||
c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node);
|
||||
return c->bufs ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
/* Deallocate an Rx queue's used-buffer cache and its contents. */
|
||||
static void fun_rxq_free_cache(struct funeth_rxq *q)
|
||||
{
|
||||
struct funeth_rxbuf *b = q->cache.bufs;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i <= q->cache.mask; i++, b++)
|
||||
funeth_free_page(q, b);
|
||||
|
||||
kvfree(q->cache.bufs);
|
||||
q->cache.bufs = NULL;
|
||||
}
|
||||
|
||||
int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(q->netdev);
|
||||
struct fun_admin_epcq_req cmd;
|
||||
u16 headroom;
|
||||
int err;
|
||||
|
||||
headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
|
||||
if (headroom != q->headroom) {
|
||||
cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
|
||||
sizeof(cmd));
|
||||
cmd.u.modify =
|
||||
FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY,
|
||||
0, q->hw_cqid, headroom);
|
||||
err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0,
|
||||
0);
|
||||
if (err)
|
||||
return err;
|
||||
q->headroom = headroom;
|
||||
}
|
||||
|
||||
WRITE_ONCE(q->xdp_prog, prog);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create an Rx queue, allocating the host memory it needs. */
|
||||
static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev,
|
||||
unsigned int qidx,
|
||||
unsigned int ncqe,
|
||||
unsigned int nrqe,
|
||||
struct fun_irq *irq)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(dev);
|
||||
struct funeth_rxq *q;
|
||||
int err = -ENOMEM;
|
||||
int numa_node;
|
||||
|
||||
numa_node = fun_irq_node(irq);
|
||||
q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
|
||||
if (!q)
|
||||
goto err;
|
||||
|
||||
q->qidx = qidx;
|
||||
q->netdev = dev;
|
||||
q->cq_mask = ncqe - 1;
|
||||
q->rq_mask = nrqe - 1;
|
||||
q->numa_node = numa_node;
|
||||
q->rq_db_thres = nrqe / 4;
|
||||
u64_stats_init(&q->syncp);
|
||||
q->dma_dev = &fp->pdev->dev;
|
||||
|
||||
q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes),
|
||||
sizeof(*q->bufs), false, numa_node,
|
||||
&q->rq_dma_addr, (void **)&q->bufs, NULL);
|
||||
if (!q->rqes)
|
||||
goto free_q;
|
||||
|
||||
q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0,
|
||||
false, numa_node, &q->cq_dma_addr, NULL,
|
||||
NULL);
|
||||
if (!q->cqes)
|
||||
goto free_rqes;
|
||||
|
||||
err = fun_rxq_init_cache(&q->cache, nrqe, numa_node);
|
||||
if (err)
|
||||
goto free_cqes;
|
||||
|
||||
err = fun_rxq_alloc_bufs(q, numa_node);
|
||||
if (err)
|
||||
goto free_cache;
|
||||
|
||||
q->stats.rx_bufs = q->rq_mask;
|
||||
q->init_state = FUN_QSTATE_INIT_SW;
|
||||
return q;
|
||||
|
||||
free_cache:
|
||||
fun_rxq_free_cache(q);
|
||||
free_cqes:
|
||||
dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes,
|
||||
q->cq_dma_addr);
|
||||
free_rqes:
|
||||
fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes,
|
||||
q->rq_dma_addr, q->bufs);
|
||||
free_q:
|
||||
kfree(q);
|
||||
err:
|
||||
netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void fun_rxq_free_sw(struct funeth_rxq *q)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(q->netdev);
|
||||
|
||||
fun_rxq_free_cache(q);
|
||||
fun_rxq_free_bufs(q);
|
||||
fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false,
|
||||
q->rqes, q->rq_dma_addr, q->bufs);
|
||||
dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE,
|
||||
q->cqes, q->cq_dma_addr);
|
||||
|
||||
/* Before freeing the queue transfer key counters to the device. */
|
||||
fp->rx_packets += q->stats.rx_pkts;
|
||||
fp->rx_bytes += q->stats.rx_bytes;
|
||||
fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops;
|
||||
|
||||
kfree(q);
|
||||
}
|
||||
|
||||
/* Create an Rx queue's resources on the device. */
|
||||
int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(q->netdev);
|
||||
unsigned int ncqe = q->cq_mask + 1;
|
||||
unsigned int nrqe = q->rq_mask + 1;
|
||||
int err;
|
||||
|
||||
err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx,
|
||||
irq->napi.napi_id);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED,
|
||||
NULL);
|
||||
if (err)
|
||||
goto xdp_unreg;
|
||||
|
||||
q->phase = 1;
|
||||
q->irq_cnt = 0;
|
||||
q->cq_head = 0;
|
||||
q->rq_cons = 0;
|
||||
q->rq_cons_db = 0;
|
||||
q->buf_offset = 0;
|
||||
q->napi = &irq->napi;
|
||||
q->irq_db_val = fp->cq_irq_db;
|
||||
q->next_cqe_info = cqe_to_info(q->cqes);
|
||||
|
||||
q->xdp_prog = fp->xdp_prog;
|
||||
q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
|
||||
|
||||
err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
|
||||
FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0,
|
||||
FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0,
|
||||
0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT,
|
||||
&q->hw_sqid, &q->rq_db);
|
||||
if (err)
|
||||
goto xdp_unreg;
|
||||
|
||||
err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
|
||||
FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0,
|
||||
q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe,
|
||||
q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0,
|
||||
irq->irq_idx, 0, fp->fdev->kern_end_qid,
|
||||
&q->hw_cqid, &q->cq_db);
|
||||
if (err)
|
||||
goto free_rq;
|
||||
|
||||
irq->rxq = q;
|
||||
writel(q->rq_mask, q->rq_db);
|
||||
q->init_state = FUN_QSTATE_INIT_FULL;
|
||||
|
||||
netif_info(fp, ifup, q->netdev,
|
||||
"Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n",
|
||||
q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx,
|
||||
q->numa_node, q->headroom);
|
||||
return 0;
|
||||
|
||||
free_rq:
|
||||
fun_destroy_sq(fp->fdev, q->hw_sqid);
|
||||
xdp_unreg:
|
||||
xdp_rxq_info_unreg(&q->xdp_rxq);
|
||||
out:
|
||||
netdev_err(q->netdev,
|
||||
"Failed to create Rx queue %u on device, error %d\n",
|
||||
q->qidx, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void fun_rxq_free_dev(struct funeth_rxq *q)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(q->netdev);
|
||||
struct fun_irq *irq;
|
||||
|
||||
if (q->init_state < FUN_QSTATE_INIT_FULL)
|
||||
return;
|
||||
|
||||
irq = container_of(q->napi, struct fun_irq, napi);
|
||||
netif_info(fp, ifdown, q->netdev,
|
||||
"Freeing Rx queue %u (id %u/%u), IRQ %u\n",
|
||||
q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx);
|
||||
|
||||
irq->rxq = NULL;
|
||||
xdp_rxq_info_unreg(&q->xdp_rxq);
|
||||
fun_destroy_sq(fp->fdev, q->hw_sqid);
|
||||
fun_destroy_cq(fp->fdev, q->hw_cqid);
|
||||
q->init_state = FUN_QSTATE_INIT_SW;
|
||||
}
|
||||
|
||||
/* Create or advance an Rx queue, allocating all the host and device resources
|
||||
* needed to reach the target state.
|
||||
*/
|
||||
int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
|
||||
unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
|
||||
int state, struct funeth_rxq **qp)
|
||||
{
|
||||
struct funeth_rxq *q = *qp;
|
||||
int err;
|
||||
|
||||
if (!q) {
|
||||
q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq);
|
||||
if (IS_ERR(q))
|
||||
return PTR_ERR(q);
|
||||
}
|
||||
|
||||
if (q->init_state >= state)
|
||||
goto out;
|
||||
|
||||
err = fun_rxq_create_dev(q, irq);
|
||||
if (err) {
|
||||
if (!*qp)
|
||||
fun_rxq_free_sw(q);
|
||||
return err;
|
||||
}
|
||||
|
||||
out:
|
||||
*qp = q;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Free Rx queue resources until it reaches the target state. */
|
||||
struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
|
||||
{
|
||||
if (state < FUN_QSTATE_INIT_FULL)
|
||||
fun_rxq_free_dev(q);
|
||||
|
||||
if (state == FUN_QSTATE_DESTROYED) {
|
||||
fun_rxq_free_sw(q);
|
||||
q = NULL;
|
||||
}
|
||||
|
||||
return q;
|
||||
}
|
117
drivers/net/ethernet/fungible/funeth/funeth_trace.h
Normal file
117
drivers/net/ethernet/fungible/funeth/funeth_trace.h
Normal file
@ -0,0 +1,117 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM funeth
|
||||
|
||||
#if !defined(_TRACE_FUNETH_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_FUNETH_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#include "funeth_txrx.h"
|
||||
|
||||
TRACE_EVENT(funeth_tx,
|
||||
|
||||
TP_PROTO(const struct funeth_txq *txq,
|
||||
u32 len,
|
||||
u32 sqe_idx,
|
||||
u32 ngle),
|
||||
|
||||
TP_ARGS(txq, len, sqe_idx, ngle),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, qidx)
|
||||
__field(u32, len)
|
||||
__field(u32, sqe_idx)
|
||||
__field(u32, ngle)
|
||||
__string(devname, txq->netdev->name)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->qidx = txq->qidx;
|
||||
__entry->len = len;
|
||||
__entry->sqe_idx = sqe_idx;
|
||||
__entry->ngle = ngle;
|
||||
__assign_str(devname, txq->netdev->name);
|
||||
),
|
||||
|
||||
TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u",
|
||||
__get_str(devname), __entry->qidx, __entry->sqe_idx,
|
||||
__entry->len, __entry->ngle)
|
||||
);
|
||||
|
||||
TRACE_EVENT(funeth_tx_free,
|
||||
|
||||
TP_PROTO(const struct funeth_txq *txq,
|
||||
u32 sqe_idx,
|
||||
u32 num_sqes,
|
||||
u32 hw_head),
|
||||
|
||||
TP_ARGS(txq, sqe_idx, num_sqes, hw_head),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, qidx)
|
||||
__field(u32, sqe_idx)
|
||||
__field(u32, num_sqes)
|
||||
__field(u32, hw_head)
|
||||
__string(devname, txq->netdev->name)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->qidx = txq->qidx;
|
||||
__entry->sqe_idx = sqe_idx;
|
||||
__entry->num_sqes = num_sqes;
|
||||
__entry->hw_head = hw_head;
|
||||
__assign_str(devname, txq->netdev->name);
|
||||
),
|
||||
|
||||
TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u",
|
||||
__get_str(devname), __entry->qidx, __entry->sqe_idx,
|
||||
__entry->num_sqes, __entry->hw_head)
|
||||
);
|
||||
|
||||
TRACE_EVENT(funeth_rx,
|
||||
|
||||
TP_PROTO(const struct funeth_rxq *rxq,
|
||||
u32 num_rqes,
|
||||
u32 pkt_len,
|
||||
u32 hash,
|
||||
u32 cls_vec),
|
||||
|
||||
TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, qidx)
|
||||
__field(u32, cq_head)
|
||||
__field(u32, num_rqes)
|
||||
__field(u32, len)
|
||||
__field(u32, hash)
|
||||
__field(u32, cls_vec)
|
||||
__string(devname, rxq->netdev->name)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->qidx = rxq->qidx;
|
||||
__entry->cq_head = rxq->cq_head;
|
||||
__entry->num_rqes = num_rqes;
|
||||
__entry->len = pkt_len;
|
||||
__entry->hash = hash;
|
||||
__entry->cls_vec = cls_vec;
|
||||
__assign_str(devname, rxq->netdev->name);
|
||||
),
|
||||
|
||||
TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x",
|
||||
__get_str(devname), __entry->qidx, __entry->cq_head,
|
||||
__entry->num_rqes, __entry->len, __entry->hash,
|
||||
__entry->cls_vec)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_FUNETH_H */
|
||||
|
||||
/* Below must be outside protection. */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE funeth_trace
|
||||
|
||||
#include <trace/define_trace.h>
|
762
drivers/net/ethernet/fungible/funeth/funeth_tx.c
Normal file
762
drivers/net/ethernet/fungible/funeth/funeth_tx.c
Normal file
@ -0,0 +1,762 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <uapi/linux/udp.h>
|
||||
#include "funeth.h"
|
||||
#include "funeth_txrx.h"
|
||||
#include "funeth_trace.h"
|
||||
#include "fun_queue.h"
|
||||
|
||||
#define FUN_XDP_CLEAN_THRES 32
|
||||
#define FUN_XDP_CLEAN_BATCH 16
|
||||
|
||||
/* DMA-map a packet and return the (length, DMA_address) pairs for its
|
||||
* segments. If a mapping error occurs -ENOMEM is returned.
|
||||
*/
|
||||
static int map_skb(const struct sk_buff *skb, struct device *dev,
|
||||
dma_addr_t *addr, unsigned int *len)
|
||||
{
|
||||
const struct skb_shared_info *si;
|
||||
const skb_frag_t *fp, *end;
|
||||
|
||||
*len = skb_headlen(skb);
|
||||
*addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, *addr))
|
||||
return -ENOMEM;
|
||||
|
||||
si = skb_shinfo(skb);
|
||||
end = &si->frags[si->nr_frags];
|
||||
|
||||
for (fp = si->frags; fp < end; fp++) {
|
||||
*++len = skb_frag_size(fp);
|
||||
*++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, *addr))
|
||||
goto unwind;
|
||||
}
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
while (fp-- > si->frags)
|
||||
dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
|
||||
|
||||
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Return the address just past the end of a Tx queue's descriptor ring.
|
||||
* It exploits the fact that the HW writeback area is just after the end
|
||||
* of the descriptor ring.
|
||||
*/
|
||||
static void *txq_end(const struct funeth_txq *q)
|
||||
{
|
||||
return (void *)q->hw_wb;
|
||||
}
|
||||
|
||||
/* Return the amount of space within a Tx ring from the given address to the
|
||||
* end.
|
||||
*/
|
||||
static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
|
||||
{
|
||||
return txq_end(q) - p;
|
||||
}
|
||||
|
||||
/* Return the number of Tx descriptors occupied by a Tx request. */
|
||||
static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
|
||||
{
|
||||
return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
|
||||
}
|
||||
|
||||
static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
|
||||
{
|
||||
return *(__be16 *)&tcp_flag_word(th);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_TLS_DEVICE)
|
||||
#include "funeth_ktls.h"
|
||||
|
||||
static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
|
||||
unsigned int *tls_len)
|
||||
{
|
||||
const struct fun_ktls_tx_ctx *tls_ctx;
|
||||
u32 datalen, seq;
|
||||
|
||||
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
||||
if (!datalen)
|
||||
return skb;
|
||||
|
||||
if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
|
||||
seq = ntohl(tcp_hdr(skb)->seq);
|
||||
tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
|
||||
|
||||
if (likely(tls_ctx->next_seq == seq)) {
|
||||
*tls_len = datalen;
|
||||
return skb;
|
||||
}
|
||||
if (seq - tls_ctx->next_seq < U32_MAX / 4) {
|
||||
tls_offload_tx_resync_request(skb->sk, seq,
|
||||
tls_ctx->next_seq);
|
||||
}
|
||||
}
|
||||
|
||||
FUN_QSTAT_INC(q, tx_tls_fallback);
|
||||
skb = tls_encrypt_skb(skb);
|
||||
if (!skb)
|
||||
FUN_QSTAT_INC(q, tx_tls_drops);
|
||||
|
||||
return skb;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Write as many descriptors as needed for the supplied skb starting at the
|
||||
* current producer location. The caller has made certain enough descriptors
|
||||
* are available.
|
||||
*
|
||||
* Returns the number of descriptors written, 0 on error.
|
||||
*/
|
||||
static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
|
||||
unsigned int tls_len)
|
||||
{
|
||||
unsigned int extra_bytes = 0, extra_pkts = 0;
|
||||
unsigned int idx = q->prod_cnt & q->mask;
|
||||
const struct skb_shared_info *shinfo;
|
||||
unsigned int lens[MAX_SKB_FRAGS + 1];
|
||||
dma_addr_t addrs[MAX_SKB_FRAGS + 1];
|
||||
struct fun_eth_tx_req *req;
|
||||
struct fun_dataop_gl *gle;
|
||||
const struct tcphdr *th;
|
||||
unsigned int ngle, i;
|
||||
u16 flags;
|
||||
|
||||
if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
|
||||
FUN_QSTAT_INC(q, tx_map_err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
req = fun_tx_desc_addr(q, idx);
|
||||
req->op = FUN_ETH_OP_TX;
|
||||
req->len8 = 0;
|
||||
req->flags = 0;
|
||||
req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
|
||||
req->repr_idn = 0;
|
||||
req->encap_proto = 0;
|
||||
|
||||
shinfo = skb_shinfo(skb);
|
||||
if (likely(shinfo->gso_size)) {
|
||||
if (skb->encapsulation) {
|
||||
u16 ol4_ofst;
|
||||
|
||||
flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
|
||||
FUN_ETH_UPDATE_INNER_L4_CKSUM |
|
||||
FUN_ETH_UPDATE_OUTER_L3_LEN;
|
||||
if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
|
||||
SKB_GSO_UDP_TUNNEL_CSUM)) {
|
||||
flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
|
||||
FUN_ETH_OUTER_UDP;
|
||||
if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
|
||||
flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
|
||||
ol4_ofst = skb_transport_offset(skb);
|
||||
} else {
|
||||
ol4_ofst = skb_inner_network_offset(skb);
|
||||
}
|
||||
|
||||
if (ip_hdr(skb)->version == 4)
|
||||
flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
|
||||
else
|
||||
flags |= FUN_ETH_OUTER_IPV6;
|
||||
|
||||
if (skb->inner_network_header) {
|
||||
if (inner_ip_hdr(skb)->version == 4)
|
||||
flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
|
||||
FUN_ETH_UPDATE_INNER_L3_LEN;
|
||||
else
|
||||
flags |= FUN_ETH_INNER_IPV6 |
|
||||
FUN_ETH_UPDATE_INNER_L3_LEN;
|
||||
}
|
||||
th = inner_tcp_hdr(skb);
|
||||
fun_eth_offload_init(&req->offload, flags,
|
||||
shinfo->gso_size,
|
||||
tcp_hdr_doff_flags(th), 0,
|
||||
skb_inner_network_offset(skb),
|
||||
skb_inner_transport_offset(skb),
|
||||
skb_network_offset(skb), ol4_ofst);
|
||||
FUN_QSTAT_INC(q, tx_encap_tso);
|
||||
} else {
|
||||
/* HW considers one set of headers as inner */
|
||||
flags = FUN_ETH_INNER_LSO |
|
||||
FUN_ETH_UPDATE_INNER_L4_CKSUM |
|
||||
FUN_ETH_UPDATE_INNER_L3_LEN;
|
||||
if (shinfo->gso_type & SKB_GSO_TCPV6)
|
||||
flags |= FUN_ETH_INNER_IPV6;
|
||||
else
|
||||
flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
|
||||
th = tcp_hdr(skb);
|
||||
fun_eth_offload_init(&req->offload, flags,
|
||||
shinfo->gso_size,
|
||||
tcp_hdr_doff_flags(th), 0,
|
||||
skb_network_offset(skb),
|
||||
skb_transport_offset(skb), 0, 0);
|
||||
FUN_QSTAT_INC(q, tx_tso);
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&q->syncp);
|
||||
q->stats.tx_cso += shinfo->gso_segs;
|
||||
u64_stats_update_end(&q->syncp);
|
||||
|
||||
extra_pkts = shinfo->gso_segs - 1;
|
||||
extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
|
||||
__tcp_hdrlen(th)) * extra_pkts;
|
||||
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
|
||||
if (skb->csum_offset == offsetof(struct udphdr, check))
|
||||
flags |= FUN_ETH_INNER_UDP;
|
||||
fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
|
||||
skb_checksum_start_offset(skb), 0, 0);
|
||||
FUN_QSTAT_INC(q, tx_cso);
|
||||
} else {
|
||||
fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
ngle = shinfo->nr_frags + 1;
|
||||
req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
|
||||
req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
|
||||
|
||||
for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
|
||||
i < ngle && txq_to_end(q, gle); i++, gle++)
|
||||
fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
|
||||
|
||||
if (txq_to_end(q, gle) == 0) {
|
||||
gle = (struct fun_dataop_gl *)q->desc;
|
||||
for ( ; i < ngle; i++, gle++)
|
||||
fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
|
||||
struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
|
||||
struct fun_ktls_tx_ctx *tls_ctx;
|
||||
|
||||
req->len8 += FUNETH_TLS_SZ / 8;
|
||||
req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
|
||||
|
||||
tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
|
||||
tls->tlsid = tls_ctx->tlsid;
|
||||
tls_ctx->next_seq += tls_len;
|
||||
|
||||
u64_stats_update_begin(&q->syncp);
|
||||
q->stats.tx_tls_bytes += tls_len;
|
||||
q->stats.tx_tls_pkts += 1 + extra_pkts;
|
||||
u64_stats_update_end(&q->syncp);
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&q->syncp);
|
||||
q->stats.tx_bytes += skb->len + extra_bytes;
|
||||
q->stats.tx_pkts += 1 + extra_pkts;
|
||||
u64_stats_update_end(&q->syncp);
|
||||
|
||||
q->info[idx].skb = skb;
|
||||
|
||||
trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
|
||||
return tx_req_ndesc(req);
|
||||
}
|
||||
|
||||
/* Return the number of available descriptors of a Tx queue.
|
||||
* HW assumes head==tail means the ring is empty so we need to keep one
|
||||
* descriptor unused.
|
||||
*/
|
||||
static unsigned int fun_txq_avail(const struct funeth_txq *q)
|
||||
{
|
||||
return q->mask - q->prod_cnt + q->cons_cnt;
|
||||
}
|
||||
|
||||
/* Stop a queue if it can't handle another worst-case packet. */
|
||||
static void fun_tx_check_stop(struct funeth_txq *q)
|
||||
{
|
||||
if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
|
||||
return;
|
||||
|
||||
netif_tx_stop_queue(q->ndq);
|
||||
|
||||
/* NAPI reclaim is freeing packets in parallel with us and we may race.
|
||||
* We have stopped the queue but check again after synchronizing with
|
||||
* reclaim.
|
||||
*/
|
||||
smp_mb();
|
||||
if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
|
||||
FUN_QSTAT_INC(q, tx_nstops);
|
||||
else
|
||||
netif_tx_start_queue(q->ndq);
|
||||
}
|
||||
|
||||
/* Return true if a queue has enough space to restart. Current condition is
|
||||
* that the queue must be >= 1/4 empty.
|
||||
*/
|
||||
static bool fun_txq_may_restart(struct funeth_txq *q)
|
||||
{
|
||||
return fun_txq_avail(q) >= q->mask / 4;
|
||||
}
|
||||
|
||||
netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(netdev);
|
||||
unsigned int qid = skb_get_queue_mapping(skb);
|
||||
struct funeth_txq *q = fp->txqs[qid];
|
||||
unsigned int tls_len = 0;
|
||||
unsigned int ndesc;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk &&
|
||||
tls_is_sk_tx_device_offloaded(skb->sk)) {
|
||||
skb = fun_tls_tx(skb, q, &tls_len);
|
||||
if (unlikely(!skb))
|
||||
goto dropped;
|
||||
}
|
||||
|
||||
ndesc = write_pkt_desc(skb, q, tls_len);
|
||||
if (unlikely(!ndesc)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
goto dropped;
|
||||
}
|
||||
|
||||
q->prod_cnt += ndesc;
|
||||
fun_tx_check_stop(q);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
|
||||
fun_txq_wr_db(q);
|
||||
else
|
||||
FUN_QSTAT_INC(q, tx_more);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dropped:
|
||||
/* A dropped packet may be the last one in a xmit_more train,
|
||||
* ring the doorbell just in case.
|
||||
*/
|
||||
if (!netdev_xmit_more())
|
||||
fun_txq_wr_db(q);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Return a Tx queue's HW head index written back to host memory. */
|
||||
static u16 txq_hw_head(const struct funeth_txq *q)
|
||||
{
|
||||
return (u16)be64_to_cpu(*q->hw_wb);
|
||||
}
|
||||
|
||||
/* Unmap the Tx packet starting at the given descriptor index and
|
||||
* return the number of Tx descriptors it occupied.
|
||||
*/
|
||||
static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx)
|
||||
{
|
||||
const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
|
||||
unsigned int ngle = req->dataop.ngather;
|
||||
struct fun_dataop_gl *gle;
|
||||
|
||||
if (ngle) {
|
||||
gle = (struct fun_dataop_gl *)req->dataop.imm;
|
||||
dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
|
||||
be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
|
||||
|
||||
for (gle++; --ngle && txq_to_end(q, gle); gle++)
|
||||
dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
|
||||
be32_to_cpu(gle->sgl_len),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
|
||||
dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
|
||||
be32_to_cpu(gle->sgl_len),
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
return tx_req_ndesc(req);
|
||||
}
|
||||
|
||||
/* Reclaim completed Tx descriptors and free their packets. Restart a stopped
|
||||
* queue if we freed enough descriptors.
|
||||
*
|
||||
* Return true if we exhausted the budget while there is more work to be done.
|
||||
*/
|
||||
static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
|
||||
{
|
||||
unsigned int npkts = 0, nbytes = 0, ndesc = 0;
|
||||
unsigned int head, limit, reclaim_idx;
|
||||
|
||||
/* budget may be 0, e.g., netpoll */
|
||||
limit = budget ? budget : UINT_MAX;
|
||||
|
||||
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
|
||||
head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
|
||||
/* The HW head is continually updated, ensure we don't read
|
||||
* descriptor state before the head tells us to reclaim it.
|
||||
* On the enqueue side the doorbell is an implicit write
|
||||
* barrier.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
do {
|
||||
unsigned int pkt_desc = unmap_skb(q, reclaim_idx);
|
||||
struct sk_buff *skb = q->info[reclaim_idx].skb;
|
||||
|
||||
trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
|
||||
|
||||
nbytes += skb->len;
|
||||
napi_consume_skb(skb, budget);
|
||||
ndesc += pkt_desc;
|
||||
reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
|
||||
npkts++;
|
||||
} while (reclaim_idx != head && npkts < limit);
|
||||
}
|
||||
|
||||
q->cons_cnt += ndesc;
|
||||
netdev_tx_completed_queue(q->ndq, npkts, nbytes);
|
||||
smp_mb(); /* pairs with the one in fun_tx_check_stop() */
|
||||
|
||||
if (unlikely(netif_tx_queue_stopped(q->ndq) &&
|
||||
fun_txq_may_restart(q))) {
|
||||
netif_tx_wake_queue(q->ndq);
|
||||
FUN_QSTAT_INC(q, tx_nrestarts);
|
||||
}
|
||||
|
||||
return reclaim_idx != head;
|
||||
}
|
||||
|
||||
/* The NAPI handler for Tx queues. */
|
||||
int fun_txq_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
|
||||
struct funeth_txq *q = irq->txq;
|
||||
unsigned int db_val;
|
||||
|
||||
if (fun_txq_reclaim(q, budget))
|
||||
return budget; /* exhausted budget */
|
||||
|
||||
napi_complete(napi); /* exhausted pending work */
|
||||
db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
|
||||
writel(db_val, q->db);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx)
|
||||
{
|
||||
const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
|
||||
const struct fun_dataop_gl *gle;
|
||||
|
||||
gle = (const struct fun_dataop_gl *)req->dataop.imm;
|
||||
dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
|
||||
be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/* Reclaim up to @budget completed Tx descriptors from a TX XDP queue. */
|
||||
static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
|
||||
{
|
||||
unsigned int npkts = 0, head, reclaim_idx;
|
||||
|
||||
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
|
||||
head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
|
||||
/* The HW head is continually updated, ensure we don't read
|
||||
* descriptor state before the head tells us to reclaim it.
|
||||
* On the enqueue side the doorbell is an implicit write
|
||||
* barrier.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
do {
|
||||
fun_xdp_unmap(q, reclaim_idx);
|
||||
page_frag_free(q->info[reclaim_idx].vaddr);
|
||||
|
||||
trace_funeth_tx_free(q, reclaim_idx, 1, head);
|
||||
|
||||
reclaim_idx = (reclaim_idx + 1) & q->mask;
|
||||
npkts++;
|
||||
} while (reclaim_idx != head && npkts < budget);
|
||||
}
|
||||
|
||||
q->cons_cnt += npkts;
|
||||
return npkts;
|
||||
}
|
||||
|
||||
bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
|
||||
{
|
||||
struct fun_eth_tx_req *req;
|
||||
struct fun_dataop_gl *gle;
|
||||
unsigned int idx;
|
||||
dma_addr_t dma;
|
||||
|
||||
if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
|
||||
fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
|
||||
|
||||
if (!unlikely(fun_txq_avail(q))) {
|
||||
FUN_QSTAT_INC(q, tx_xdp_full);
|
||||
return false;
|
||||
}
|
||||
|
||||
dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
|
||||
FUN_QSTAT_INC(q, tx_map_err);
|
||||
return false;
|
||||
}
|
||||
|
||||
idx = q->prod_cnt & q->mask;
|
||||
req = fun_tx_desc_addr(q, idx);
|
||||
req->op = FUN_ETH_OP_TX;
|
||||
req->len8 = (sizeof(*req) + sizeof(*gle)) / 8;
|
||||
req->flags = 0;
|
||||
req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
|
||||
req->repr_idn = 0;
|
||||
req->encap_proto = 0;
|
||||
fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
|
||||
|
||||
gle = (struct fun_dataop_gl *)req->dataop.imm;
|
||||
fun_dataop_gl_init(gle, 0, 0, len, dma);
|
||||
|
||||
q->info[idx].vaddr = data;
|
||||
|
||||
u64_stats_update_begin(&q->syncp);
|
||||
q->stats.tx_bytes += len;
|
||||
q->stats.tx_pkts++;
|
||||
u64_stats_update_end(&q->syncp);
|
||||
|
||||
trace_funeth_tx(q, len, idx, 1);
|
||||
q->prod_cnt++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int fun_xdp_xmit_frames(struct net_device *dev, int n,
|
||||
struct xdp_frame **frames, u32 flags)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(dev);
|
||||
struct funeth_txq *q, **xdpqs;
|
||||
int i, q_idx;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
xdpqs = rcu_dereference_bh(fp->xdpqs);
|
||||
if (unlikely(!xdpqs))
|
||||
return -ENETDOWN;
|
||||
|
||||
q_idx = smp_processor_id();
|
||||
if (unlikely(q_idx >= fp->num_xdpqs))
|
||||
return -ENXIO;
|
||||
|
||||
for (q = xdpqs[q_idx], i = 0; i < n; i++) {
|
||||
const struct xdp_frame *xdpf = frames[i];
|
||||
|
||||
if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||
fun_txq_wr_db(q);
|
||||
return i;
|
||||
}
|
||||
|
||||
/* Purge a Tx queue of any queued packets. Should be called once HW access
|
||||
* to the packets has been revoked, e.g., after the queue has been disabled.
|
||||
*/
|
||||
static void fun_txq_purge(struct funeth_txq *q)
|
||||
{
|
||||
while (q->cons_cnt != q->prod_cnt) {
|
||||
unsigned int idx = q->cons_cnt & q->mask;
|
||||
|
||||
q->cons_cnt += unmap_skb(q, idx);
|
||||
dev_kfree_skb_any(q->info[idx].skb);
|
||||
}
|
||||
netdev_tx_reset_queue(q->ndq);
|
||||
}
|
||||
|
||||
static void fun_xdpq_purge(struct funeth_txq *q)
|
||||
{
|
||||
while (q->cons_cnt != q->prod_cnt) {
|
||||
unsigned int idx = q->cons_cnt & q->mask;
|
||||
|
||||
fun_xdp_unmap(q, idx);
|
||||
page_frag_free(q->info[idx].vaddr);
|
||||
q->cons_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Create a Tx queue, allocating all the host resources needed. */
|
||||
static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
|
||||
unsigned int qidx,
|
||||
unsigned int ndesc,
|
||||
struct fun_irq *irq)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(dev);
|
||||
struct funeth_txq *q;
|
||||
int numa_node;
|
||||
|
||||
if (irq)
|
||||
numa_node = fun_irq_node(irq); /* skb Tx queue */
|
||||
else
|
||||
numa_node = cpu_to_node(qidx); /* XDP Tx queue */
|
||||
|
||||
q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
|
||||
if (!q)
|
||||
goto err;
|
||||
|
||||
q->dma_dev = &fp->pdev->dev;
|
||||
q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
|
||||
sizeof(*q->info), true, numa_node,
|
||||
&q->dma_addr, (void **)&q->info,
|
||||
&q->hw_wb);
|
||||
if (!q->desc)
|
||||
goto free_q;
|
||||
|
||||
q->netdev = dev;
|
||||
q->mask = ndesc - 1;
|
||||
q->qidx = qidx;
|
||||
q->numa_node = numa_node;
|
||||
u64_stats_init(&q->syncp);
|
||||
q->init_state = FUN_QSTATE_INIT_SW;
|
||||
return q;
|
||||
|
||||
free_q:
|
||||
kfree(q);
|
||||
err:
|
||||
netdev_err(dev, "Can't allocate memory for %s queue %u\n",
|
||||
irq ? "Tx" : "XDP", qidx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void fun_txq_free_sw(struct funeth_txq *q)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(q->netdev);
|
||||
|
||||
fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
|
||||
q->desc, q->dma_addr, q->info);
|
||||
|
||||
fp->tx_packets += q->stats.tx_pkts;
|
||||
fp->tx_bytes += q->stats.tx_bytes;
|
||||
fp->tx_dropped += q->stats.tx_map_err;
|
||||
|
||||
kfree(q);
|
||||
}
|
||||
|
||||
/* Allocate the device portion of a Tx queue. */
|
||||
int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(q->netdev);
|
||||
unsigned int irq_idx, ndesc = q->mask + 1;
|
||||
int err;
|
||||
|
||||
q->irq = irq;
|
||||
*q->hw_wb = 0;
|
||||
q->prod_cnt = 0;
|
||||
q->cons_cnt = 0;
|
||||
irq_idx = irq ? irq->irq_idx : 0;
|
||||
|
||||
err = fun_sq_create(fp->fdev,
|
||||
FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
|
||||
FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
|
||||
FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
|
||||
q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
|
||||
irq_idx, 0, fp->fdev->kern_end_qid, 0,
|
||||
&q->hw_qid, &q->db);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = fun_create_and_bind_tx(fp, q->hw_qid);
|
||||
if (err < 0)
|
||||
goto free_devq;
|
||||
q->ethid = err;
|
||||
|
||||
if (irq) {
|
||||
irq->txq = q;
|
||||
q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
|
||||
q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
|
||||
fp->tx_coal_count);
|
||||
writel(q->irq_db_val, q->db);
|
||||
}
|
||||
|
||||
q->init_state = FUN_QSTATE_INIT_FULL;
|
||||
netif_info(fp, ifup, q->netdev,
|
||||
"%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
|
||||
irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
|
||||
q->ethid, q->numa_node);
|
||||
return 0;
|
||||
|
||||
free_devq:
|
||||
fun_destroy_sq(fp->fdev, q->hw_qid);
|
||||
out:
|
||||
netdev_err(q->netdev,
|
||||
"Failed to create %s queue %u on device, error %d\n",
|
||||
irq ? "Tx" : "XDP", q->qidx, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void fun_txq_free_dev(struct funeth_txq *q)
|
||||
{
|
||||
struct funeth_priv *fp = netdev_priv(q->netdev);
|
||||
|
||||
if (q->init_state < FUN_QSTATE_INIT_FULL)
|
||||
return;
|
||||
|
||||
netif_info(fp, ifdown, q->netdev,
|
||||
"Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
|
||||
q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
|
||||
q->irq ? q->irq->irq_idx : 0, q->ethid);
|
||||
|
||||
fun_destroy_sq(fp->fdev, q->hw_qid);
|
||||
fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
|
||||
|
||||
if (q->irq) {
|
||||
q->irq->txq = NULL;
|
||||
fun_txq_purge(q);
|
||||
} else {
|
||||
fun_xdpq_purge(q);
|
||||
}
|
||||
|
||||
q->init_state = FUN_QSTATE_INIT_SW;
|
||||
}
|
||||
|
||||
/* Create or advance a Tx queue, allocating all the host and device resources
|
||||
* needed to reach the target state.
|
||||
*/
|
||||
int funeth_txq_create(struct net_device *dev, unsigned int qidx,
|
||||
unsigned int ndesc, struct fun_irq *irq, int state,
|
||||
struct funeth_txq **qp)
|
||||
{
|
||||
struct funeth_txq *q = *qp;
|
||||
int err;
|
||||
|
||||
if (!q)
|
||||
q = fun_txq_create_sw(dev, qidx, ndesc, irq);
|
||||
if (!q)
|
||||
return -ENOMEM;
|
||||
|
||||
if (q->init_state >= state)
|
||||
goto out;
|
||||
|
||||
err = fun_txq_create_dev(q, irq);
|
||||
if (err) {
|
||||
if (!*qp)
|
||||
fun_txq_free_sw(q);
|
||||
return err;
|
||||
}
|
||||
|
||||
out:
|
||||
*qp = q;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Free Tx queue resources until it reaches the target state.
|
||||
* The queue must be already disconnected from the stack.
|
||||
*/
|
||||
struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
|
||||
{
|
||||
if (state < FUN_QSTATE_INIT_FULL)
|
||||
fun_txq_free_dev(q);
|
||||
|
||||
if (state == FUN_QSTATE_DESTROYED) {
|
||||
fun_txq_free_sw(q);
|
||||
q = NULL;
|
||||
}
|
||||
|
||||
return q;
|
||||
}
|
264
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
Normal file
264
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
Normal file
@ -0,0 +1,264 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
|
||||
#ifndef _FUNETH_TXRX_H
|
||||
#define _FUNETH_TXRX_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/u64_stats_sync.h>
|
||||
|
||||
/* Tx descriptor size */
|
||||
#define FUNETH_SQE_SIZE 64U
|
||||
|
||||
/* Size of device headers per Tx packet */
|
||||
#define FUNETH_FUNOS_HDR_SZ (sizeof(struct fun_eth_tx_req))
|
||||
|
||||
/* Number of gather list entries per Tx descriptor */
|
||||
#define FUNETH_GLE_PER_DESC (FUNETH_SQE_SIZE / sizeof(struct fun_dataop_gl))
|
||||
|
||||
/* Max gather list size in bytes for an sk_buff. */
|
||||
#define FUNETH_MAX_GL_SZ ((MAX_SKB_FRAGS + 1) * sizeof(struct fun_dataop_gl))
|
||||
|
||||
#if IS_ENABLED(CONFIG_TLS_DEVICE)
|
||||
# define FUNETH_TLS_SZ sizeof(struct fun_eth_tls)
|
||||
#else
|
||||
# define FUNETH_TLS_SZ 0
|
||||
#endif
|
||||
|
||||
/* Max number of Tx descriptors for an sk_buff using a gather list. */
|
||||
#define FUNETH_MAX_GL_DESC \
|
||||
DIV_ROUND_UP((FUNETH_FUNOS_HDR_SZ + FUNETH_MAX_GL_SZ + FUNETH_TLS_SZ), \
|
||||
FUNETH_SQE_SIZE)
|
||||
|
||||
/* Max number of Tx descriptors for any packet. */
|
||||
#define FUNETH_MAX_PKT_DESC FUNETH_MAX_GL_DESC
|
||||
|
||||
/* Rx CQ descriptor size. */
|
||||
#define FUNETH_CQE_SIZE 64U
|
||||
|
||||
/* Offset of cqe_info within a CQE. */
|
||||
#define FUNETH_CQE_INFO_OFFSET (FUNETH_CQE_SIZE - sizeof(struct fun_cqe_info))
|
||||
|
||||
/* Construct the IRQ portion of a CQ doorbell. The resulting value arms the
|
||||
* interrupt with the supplied time delay and packet count moderation settings.
|
||||
*/
|
||||
#define FUN_IRQ_CQ_DB(usec, pkts) \
|
||||
(FUN_DB_IRQ_ARM_F | ((usec) << FUN_DB_INTCOAL_USEC_S) | \
|
||||
((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
|
||||
|
||||
/* As above for SQ doorbells. */
|
||||
#define FUN_IRQ_SQ_DB(usec, pkts) \
|
||||
(FUN_DB_IRQ_ARM_F | \
|
||||
((usec) << FUN_DB_INTCOAL_USEC_S) | \
|
||||
((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
|
||||
|
||||
/* Per packet tailroom. Present only for 1-frag packets. */
|
||||
#define FUN_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
|
||||
|
||||
/* Per packet headroom for XDP. Preferred over XDP_PACKET_HEADROOM to
|
||||
* accommodate two packets per buffer for 4K pages and 1500B MTUs.
|
||||
*/
|
||||
#define FUN_XDP_HEADROOM 192
|
||||
|
||||
/* Initialization state of a queue. */
|
||||
enum {
|
||||
FUN_QSTATE_DESTROYED, /* what queue? */
|
||||
FUN_QSTATE_INIT_SW, /* exists in SW, not on the device */
|
||||
FUN_QSTATE_INIT_FULL, /* exists both in SW and on device */
|
||||
};
|
||||
|
||||
/* Initialization state of an interrupt. */
|
||||
enum {
|
||||
FUN_IRQ_INIT, /* initialized and in the XArray but inactive */
|
||||
FUN_IRQ_REQUESTED, /* request_irq() done */
|
||||
FUN_IRQ_ENABLED, /* processing enabled */
|
||||
FUN_IRQ_DISABLED, /* processing disabled */
|
||||
};
|
||||
|
||||
struct bpf_prog;
|
||||
|
||||
struct funeth_txq_stats { /* per Tx queue SW counters */
|
||||
u64 tx_pkts; /* # of Tx packets */
|
||||
u64 tx_bytes; /* total bytes of Tx packets */
|
||||
u64 tx_cso; /* # of packets with checksum offload */
|
||||
u64 tx_tso; /* # of non-encapsulated TSO super-packets */
|
||||
u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
|
||||
u64 tx_more; /* # of DBs elided due to xmit_more */
|
||||
u64 tx_nstops; /* # of times the queue has stopped */
|
||||
u64 tx_nrestarts; /* # of times the queue has restarted */
|
||||
u64 tx_map_err; /* # of packets dropped due to DMA mapping errors */
|
||||
u64 tx_xdp_full; /* # of XDP packets that could not be enqueued */
|
||||
u64 tx_tls_pkts; /* # of Tx TLS packets offloaded to HW */
|
||||
u64 tx_tls_bytes; /* Tx bytes of HW-handled TLS payload */
|
||||
u64 tx_tls_fallback; /* attempted Tx TLS offloads punted to SW */
|
||||
u64 tx_tls_drops; /* attempted Tx TLS offloads dropped */
|
||||
};
|
||||
|
||||
struct funeth_tx_info { /* per Tx descriptor state */
|
||||
union {
|
||||
struct sk_buff *skb; /* associated packet */
|
||||
void *vaddr; /* start address for XDP */
|
||||
};
|
||||
};
|
||||
|
||||
struct funeth_txq {
|
||||
/* RO cacheline of frequently accessed data */
|
||||
u32 mask; /* queue depth - 1 */
|
||||
u32 hw_qid; /* device ID of the queue */
|
||||
void *desc; /* base address of descriptor ring */
|
||||
struct funeth_tx_info *info;
|
||||
struct device *dma_dev; /* device for DMA mappings */
|
||||
volatile __be64 *hw_wb; /* HW write-back location */
|
||||
u32 __iomem *db; /* SQ doorbell register address */
|
||||
struct netdev_queue *ndq;
|
||||
dma_addr_t dma_addr; /* DMA address of descriptor ring */
|
||||
/* producer R/W cacheline */
|
||||
u16 qidx; /* queue index within net_device */
|
||||
u16 ethid;
|
||||
u32 prod_cnt; /* producer counter */
|
||||
struct funeth_txq_stats stats;
|
||||
/* shared R/W cacheline, primarily accessed by consumer */
|
||||
u32 irq_db_val; /* value written to IRQ doorbell */
|
||||
u32 cons_cnt; /* consumer (cleanup) counter */
|
||||
struct net_device *netdev;
|
||||
struct fun_irq *irq;
|
||||
int numa_node;
|
||||
u8 init_state; /* queue initialization state */
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
struct funeth_rxq_stats { /* per Rx queue SW counters */
|
||||
u64 rx_pkts; /* # of received packets, including SW drops */
|
||||
u64 rx_bytes; /* total size of received packets */
|
||||
u64 rx_cso; /* # of packets with checksum offload */
|
||||
u64 rx_bufs; /* total # of Rx buffers provided to device */
|
||||
u64 gro_pkts; /* # of GRO superpackets */
|
||||
u64 gro_merged; /* # of pkts merged into existing GRO superpackets */
|
||||
u64 rx_page_alloc; /* # of page allocations for Rx buffers */
|
||||
u64 rx_budget; /* NAPI iterations that exhausted their budget */
|
||||
u64 rx_mem_drops; /* # of packets dropped due to memory shortage */
|
||||
u64 rx_map_err; /* # of page DMA mapping errors */
|
||||
u64 xdp_drops; /* XDP_DROPped packets */
|
||||
u64 xdp_tx; /* successful XDP transmits */
|
||||
u64 xdp_redir; /* successful XDP redirects */
|
||||
u64 xdp_err; /* packets dropped due to XDP errors */
|
||||
};
|
||||
|
||||
struct funeth_rxbuf { /* per Rx buffer state */
|
||||
struct page *page; /* associated page */
|
||||
dma_addr_t dma_addr; /* DMA address of page start */
|
||||
int pg_refs; /* page refs held by driver */
|
||||
int node; /* page node, or -1 if it is PF_MEMALLOC */
|
||||
};
|
||||
|
||||
struct funeth_rx_cache { /* cache of DMA-mapped previously used buffers */
|
||||
struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
|
||||
unsigned int prod_cnt; /* producer counter */
|
||||
unsigned int cons_cnt; /* consumer counter */
|
||||
unsigned int mask; /* depth - 1 */
|
||||
};
|
||||
|
||||
/* An Rx queue consists of a CQ and an SQ used to provide Rx buffers. */
|
||||
struct funeth_rxq {
|
||||
struct net_device *netdev;
|
||||
struct napi_struct *napi;
|
||||
struct device *dma_dev; /* device for DMA mappings */
|
||||
void *cqes; /* base of CQ descriptor ring */
|
||||
const void *next_cqe_info; /* fun_cqe_info of next CQE */
|
||||
u32 __iomem *cq_db; /* CQ doorbell register address */
|
||||
unsigned int cq_head; /* CQ head index */
|
||||
unsigned int cq_mask; /* CQ depth - 1 */
|
||||
u16 phase; /* CQ phase tag */
|
||||
u16 qidx; /* queue index within net_device */
|
||||
unsigned int irq_db_val; /* IRQ info for CQ doorbell */
|
||||
struct fun_eprq_rqbuf *rqes; /* base of RQ descriptor ring */
|
||||
struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
|
||||
struct funeth_rxbuf *cur_buf; /* currently active buffer */
|
||||
u32 __iomem *rq_db; /* RQ doorbell register address */
|
||||
unsigned int rq_cons; /* RQ consumer counter */
|
||||
unsigned int rq_mask; /* RQ depth - 1 */
|
||||
unsigned int buf_offset; /* offset of next pkt in head buffer */
|
||||
u8 xdp_flush; /* XDP flush types needed at NAPI end */
|
||||
u8 init_state; /* queue initialization state */
|
||||
u16 headroom; /* per packet headroom */
|
||||
unsigned int rq_cons_db; /* value of rq_cons at last RQ db */
|
||||
unsigned int rq_db_thres; /* # of new buffers needed to write RQ db */
|
||||
struct funeth_rxbuf spare_buf; /* spare for next buffer replacement */
|
||||
struct funeth_rx_cache cache; /* used buffer cache */
|
||||
struct bpf_prog *xdp_prog; /* optional XDP BPF program */
|
||||
struct funeth_rxq_stats stats;
|
||||
dma_addr_t cq_dma_addr; /* DMA address of CQE ring */
|
||||
dma_addr_t rq_dma_addr; /* DMA address of RQE ring */
|
||||
u16 irq_cnt;
|
||||
u32 hw_cqid; /* device ID of the queue's CQ */
|
||||
u32 hw_sqid; /* device ID of the queue's SQ */
|
||||
int numa_node;
|
||||
struct u64_stats_sync syncp;
|
||||
struct xdp_rxq_info xdp_rxq;
|
||||
};
|
||||
|
||||
#define FUN_QSTAT_INC(q, counter) \
|
||||
do { \
|
||||
u64_stats_update_begin(&(q)->syncp); \
|
||||
(q)->stats.counter++; \
|
||||
u64_stats_update_end(&(q)->syncp); \
|
||||
} while (0)
|
||||
|
||||
#define FUN_QSTAT_READ(q, seq, stats_copy) \
|
||||
do { \
|
||||
seq = u64_stats_fetch_begin(&(q)->syncp); \
|
||||
stats_copy = (q)->stats; \
|
||||
} while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
|
||||
|
||||
#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
|
||||
|
||||
struct fun_irq {
|
||||
struct napi_struct napi;
|
||||
struct funeth_txq *txq;
|
||||
struct funeth_rxq *rxq;
|
||||
u8 state;
|
||||
u16 irq_idx; /* index of MSI-X interrupt */
|
||||
int irq; /* Linux IRQ vector */
|
||||
cpumask_t affinity_mask; /* IRQ affinity */
|
||||
struct irq_affinity_notify aff_notify;
|
||||
char name[FUN_INT_NAME_LEN];
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
/* Return the start address of the idx-th Tx descriptor. */
|
||||
static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
|
||||
unsigned int idx)
|
||||
{
|
||||
return q->desc + idx * FUNETH_SQE_SIZE;
|
||||
}
|
||||
|
||||
static inline void fun_txq_wr_db(const struct funeth_txq *q)
|
||||
{
|
||||
unsigned int tail = q->prod_cnt & q->mask;
|
||||
|
||||
writel(tail, q->db);
|
||||
}
|
||||
|
||||
static inline int fun_irq_node(const struct fun_irq *p)
|
||||
{
|
||||
return local_memory_node(cpu_to_node(cpumask_first(&p->affinity_mask)));
|
||||
}
|
||||
|
||||
int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
|
||||
int fun_txq_napi_poll(struct napi_struct *napi, int budget);
|
||||
netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
|
||||
bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
|
||||
int fun_xdp_xmit_frames(struct net_device *dev, int n,
|
||||
struct xdp_frame **frames, u32 flags);
|
||||
|
||||
int funeth_txq_create(struct net_device *dev, unsigned int qidx,
|
||||
unsigned int ndesc, struct fun_irq *irq, int state,
|
||||
struct funeth_txq **qp);
|
||||
int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq);
|
||||
struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state);
|
||||
int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
|
||||
unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
|
||||
int state, struct funeth_rxq **qp);
|
||||
int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq);
|
||||
struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state);
|
||||
int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog);
|
||||
|
||||
#endif /* _FUNETH_TXRX_H */
|
@ -2561,6 +2561,8 @@
|
||||
|
||||
#define PCI_VENDOR_ID_HYGON 0x1d94
|
||||
|
||||
#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
|
||||
|
||||
#define PCI_VENDOR_ID_HXT 0x1dbf
|
||||
|
||||
#define PCI_VENDOR_ID_TEKRAM 0x1de1
|
||||
|
Loading…
Reference in New Issue
Block a user