2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-30 16:13:54 +08:00

bpf: introduce frags support to bpf_prog_test_run_xdp()

Introduce the capability to allocate a xdp frags in
bpf_prog_test_run_xdp routine. This is a preliminary patch to
introduce the selftests for new xdp frags ebpf helpers

Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/b7c0e425a9287f00f601c4fc0de54738ec6ceeea.1642758637.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Lorenzo Bianconi 2022-01-21 11:09:58 +01:00 committed by Alexei Starovoitov
parent be3d72a289
commit 1c19499825

View File

@ -876,16 +876,16 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
u32 headroom = XDP_PACKET_HEADROOM;
u32 size = kattr->test.data_size_in;
u32 headroom = XDP_PACKET_HEADROOM;
u32 retval, duration, max_data_sz;
u32 repeat = kattr->test.repeat;
struct netdev_rx_queue *rxqueue;
struct skb_shared_info *sinfo;
struct xdp_buff xdp = {};
u32 retval, duration;
int i, ret = -EINVAL;
struct xdp_md *ctx;
u32 max_data_sz;
void *data;
int ret = -EINVAL;
if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
prog->expected_attach_type == BPF_XDP_CPUMAP)
@ -905,27 +905,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
headroom -= ctx->data;
}
/* XDP have extra tailroom as (most) drivers use full page */
max_data_sz = 4096 - headroom - tailroom;
size = min_t(u32, size, max_data_sz);
data = bpf_test_init(kattr, kattr->test.data_size_in,
max_data_sz, headroom, tailroom);
data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
goto free_ctx;
}
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
&rxqueue->xdp_rxq);
rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
xdp_prepare_buff(&xdp, data, headroom, size, true);
sinfo = xdp_get_shared_info_from_buff(&xdp);
ret = xdp_convert_md_to_buff(ctx, &xdp);
if (ret)
goto free_data;
if (unlikely(kattr->test.data_size_in > size)) {
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
while (size < kattr->test.data_size_in) {
struct page *page;
skb_frag_t *frag;
int data_len;
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto out;
}
frag = &sinfo->frags[sinfo->nr_frags++];
__skb_frag_set_page(frag, page);
data_len = min_t(int, kattr->test.data_size_in - size,
PAGE_SIZE);
skb_frag_size_set(frag, data_len);
if (copy_from_user(page_address(page), data_in + size,
data_len)) {
ret = -EFAULT;
goto out;
}
sinfo->xdp_frags_size += data_len;
size += data_len;
}
xdp_buff_set_frags_flag(&xdp);
}
if (repeat > 1)
bpf_prog_change_xdp(NULL, prog);
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
/* We convert the xdp_buff back to an xdp_md before checking the return
* code so the reference count of any held netdevice will be decremented
@ -935,10 +968,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (ret)
goto out;
if (xdp.data_meta != data + headroom ||
xdp.data_end != xdp.data_meta + size)
size = xdp.data_end - xdp.data_meta;
size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval,
duration);
if (!ret)
@ -949,6 +979,8 @@ out:
if (repeat > 1)
bpf_prog_change_xdp(prog, NULL);
free_data:
for (i = 0; i < sinfo->nr_frags; i++)
__free_page(skb_frag_page(&sinfo->frags[i]));
kfree(data);
free_ctx:
kfree(ctx);