Merge branch 'page_pool-bnxt_en-unlink-old-page-pool-in-queue-api-using-helper'

David Wei says:

====================
page_pool: bnxt_en: unlink old page pool in queue api using helper

56ef27e3 unexported page_pool_unlink_napi() and renamed it to
page_pool_disable_direct_recycling(). This is because there was no
in-tree user of page_pool_unlink_napi().

Since then Rx queue API and an implementation in bnxt got merged. In the
bnxt implementation, it broadly follows the following steps: allocate
new queue memory + page pool, stop old rx queue, swap, then destroy old
queue memory + page pool.

The existing NAPI instance is re-used so when the old page pool that is
no longer used but still linked to this shared NAPI instance is
destroyed, it will trigger warnings.

In my initial patches I unlinked a page pool from a NAPI instance
directly. Instead, export page_pool_disable_direct_recycling() and call
that instead to avoid having a driver touch a core struct.
====================

Link: https://patch.msgid.link/20240627030200.3647145-1-dw@davidwei.uk
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2024-07-02 15:00:14 +02:00
commit e27d7168f0
3 changed files with 4 additions and 6 deletions

View File

@ -15081,11 +15081,6 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
bnxt_free_one_rx_ring(bp, rxr); bnxt_free_one_rx_ring(bp, rxr);
bnxt_free_one_rx_agg_ring(bp, rxr); bnxt_free_one_rx_agg_ring(bp, rxr);
/* At this point, this NAPI instance has another page pool associated
* with it. Disconnect here before freeing the old page pool to avoid
* warnings.
*/
rxr->page_pool->p.napi = NULL;
page_pool_destroy(rxr->page_pool); page_pool_destroy(rxr->page_pool);
rxr->page_pool = NULL; rxr->page_pool = NULL;
@ -15205,6 +15200,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
bnxt_hwrm_rx_ring_free(bp, rxr, false); bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0; rxr->rx_next_cons = 0;
page_pool_disable_direct_recycling(rxr->page_pool);
memcpy(qmem, rxr, sizeof(*rxr)); memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem); bnxt_init_rx_ring_struct(bp, qmem);

View File

@ -229,6 +229,7 @@ struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
struct xdp_mem_info; struct xdp_mem_info;
#ifdef CONFIG_PAGE_POOL #ifdef CONFIG_PAGE_POOL
void page_pool_disable_direct_recycling(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
const struct xdp_mem_info *mem); const struct xdp_mem_info *mem);

View File

@ -1014,7 +1014,7 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id; pool->xdp_mem_id = mem->id;
} }
static void page_pool_disable_direct_recycling(struct page_pool *pool) void page_pool_disable_direct_recycling(struct page_pool *pool)
{ {
/* Disable direct recycling based on pool->cpuid. /* Disable direct recycling based on pool->cpuid.
* Paired with READ_ONCE() in page_pool_napi_local(). * Paired with READ_ONCE() in page_pool_napi_local().
@ -1032,6 +1032,7 @@ static void page_pool_disable_direct_recycling(struct page_pool *pool)
WRITE_ONCE(pool->p.napi, NULL); WRITE_ONCE(pool->p.napi, NULL);
} }
EXPORT_SYMBOL(page_pool_disable_direct_recycling);
void page_pool_destroy(struct page_pool *pool) void page_pool_destroy(struct page_pool *pool)
{ {