2023-04-07 07:41:47 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
|
|
|
* Copyright (c) 2016 Facebook
|
|
|
|
* Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
|
|
|
|
*/
|
|
|
|
#include <uapi/linux/btf.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include <linux/bpf_verifier.h>
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
#include <linux/math64.h>
|
2023-04-07 07:41:47 +08:00
|
|
|
|
2023-11-18 11:46:16 +08:00
|
|
|
#define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
|
|
|
|
|
2023-04-07 07:41:59 +08:00
|
|
|
static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
|
2023-04-07 07:41:47 +08:00
|
|
|
{
|
2023-04-07 07:42:00 +08:00
|
|
|
/* ubuf and len_total should both be specified (or not) together */
|
|
|
|
if (!!log->ubuf != !!log->len_total)
|
|
|
|
return false;
|
|
|
|
/* log buf without log_level is meaningless */
|
|
|
|
if (log->ubuf && log->level == 0)
|
|
|
|
return false;
|
|
|
|
if (log->level & ~BPF_LOG_MASK)
|
|
|
|
return false;
|
|
|
|
if (log->len_total > UINT_MAX >> 2)
|
|
|
|
return false;
|
|
|
|
return true;
|
2023-04-07 07:41:47 +08:00
|
|
|
}
|
|
|
|
|
2023-04-07 07:41:59 +08:00
|
|
|
int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
|
|
|
|
char __user *log_buf, u32 log_size)
|
|
|
|
{
|
|
|
|
log->level = log_level;
|
|
|
|
log->ubuf = log_buf;
|
|
|
|
log->len_total = log_size;
|
|
|
|
|
|
|
|
/* log attributes have to be sane */
|
|
|
|
if (!bpf_verifier_log_attr_valid(log))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len)
|
|
|
|
{
|
|
|
|
/* add_len includes terminal \0, so no need for +1. */
|
|
|
|
u64 len = log->end_pos + add_len;
|
|
|
|
|
|
|
|
/* log->len_max could be larger than our current len due to
|
|
|
|
* bpf_vlog_reset() calls, so we maintain the max of any length at any
|
|
|
|
* previous point
|
|
|
|
*/
|
|
|
|
if (len > UINT_MAX)
|
|
|
|
log->len_max = UINT_MAX;
|
|
|
|
else if (len > log->len_max)
|
|
|
|
log->len_max = len;
|
|
|
|
}
|
|
|
|
|
2023-04-07 07:41:47 +08:00
|
|
|
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
|
|
|
|
va_list args)
|
|
|
|
{
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
u64 cur_pos;
|
|
|
|
u32 new_n, n;
|
2023-04-07 07:41:47 +08:00
|
|
|
|
|
|
|
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
|
|
|
|
|
|
|
|
if (log->level == BPF_LOG_KERNEL) {
|
|
|
|
bool newline = n > 0 && log->kbuf[n - 1] == '\n';
|
|
|
|
|
|
|
|
pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
n += 1; /* include terminating zero */
|
|
|
|
bpf_vlog_update_len_max(log, n);
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
if (log->level & BPF_LOG_FIXED) {
|
|
|
|
/* check if we have at least something to put into user buf */
|
|
|
|
new_n = 0;
|
|
|
|
if (log->end_pos < log->len_total) {
|
|
|
|
new_n = min_t(u32, log->len_total - log->end_pos, n);
|
|
|
|
log->kbuf[new_n - 1] = '\0';
|
|
|
|
}
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
cur_pos = log->end_pos;
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
log->end_pos += n - 1; /* don't count terminating '\0' */
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
|
|
|
|
if (log->ubuf && new_n &&
|
|
|
|
copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n))
|
|
|
|
goto fail;
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
} else {
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
u64 new_end, new_start;
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
u32 buf_start, buf_end, new_n;
|
|
|
|
|
|
|
|
new_end = log->end_pos + n;
|
|
|
|
if (new_end - log->start_pos >= log->len_total)
|
|
|
|
new_start = new_end - log->len_total;
|
|
|
|
else
|
|
|
|
new_start = log->start_pos;
|
2023-04-07 07:42:00 +08:00
|
|
|
|
|
|
|
log->start_pos = new_start;
|
|
|
|
log->end_pos = new_end - 1; /* don't count terminating '\0' */
|
|
|
|
|
|
|
|
if (!log->ubuf)
|
|
|
|
return;
|
|
|
|
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
new_n = min(n, log->len_total);
|
|
|
|
cur_pos = new_end - new_n;
|
|
|
|
div_u64_rem(cur_pos, log->len_total, &buf_start);
|
|
|
|
div_u64_rem(new_end, log->len_total, &buf_end);
|
|
|
|
/* new_end and buf_end are exclusive indices, so if buf_end is
|
|
|
|
* exactly zero, then it actually points right to the end of
|
|
|
|
* ubuf and there is no wrap around
|
|
|
|
*/
|
|
|
|
if (buf_end == 0)
|
|
|
|
buf_end = log->len_total;
|
|
|
|
|
|
|
|
/* if buf_start > buf_end, we wrapped around;
|
|
|
|
* if buf_start == buf_end, then we fill ubuf completely; we
|
|
|
|
* can't have buf_start == buf_end to mean that there is
|
|
|
|
* nothing to write, because we always write at least
|
|
|
|
* something, even if terminal '\0'
|
|
|
|
*/
|
|
|
|
if (buf_start < buf_end) {
|
|
|
|
/* message fits within contiguous chunk of ubuf */
|
|
|
|
if (copy_to_user(log->ubuf + buf_start,
|
|
|
|
log->kbuf + n - new_n,
|
|
|
|
buf_end - buf_start))
|
|
|
|
goto fail;
|
|
|
|
} else {
|
|
|
|
/* message wraps around the end of ubuf, copy in two chunks */
|
|
|
|
if (copy_to_user(log->ubuf + buf_start,
|
|
|
|
log->kbuf + n - new_n,
|
|
|
|
log->len_total - buf_start))
|
|
|
|
goto fail;
|
|
|
|
if (copy_to_user(log->ubuf,
|
|
|
|
log->kbuf + n - buf_end,
|
|
|
|
buf_end))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
fail:
|
|
|
|
log->ubuf = NULL;
|
2023-04-07 07:41:47 +08:00
|
|
|
}
|
|
|
|
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos)
|
2023-04-07 07:41:47 +08:00
|
|
|
{
|
|
|
|
char zero = 0;
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
u32 pos;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(new_pos > log->end_pos))
|
|
|
|
return;
|
2023-04-07 07:41:47 +08:00
|
|
|
|
2023-04-07 07:41:53 +08:00
|
|
|
if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL)
|
2023-04-07 07:41:47 +08:00
|
|
|
return;
|
|
|
|
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
/* if position to which we reset is beyond current log window,
|
|
|
|
* then we didn't preserve any useful content and should adjust
|
|
|
|
* start_pos to end up with an empty log (start_pos == end_pos)
|
|
|
|
*/
|
|
|
|
log->end_pos = new_pos;
|
|
|
|
if (log->end_pos < log->start_pos)
|
|
|
|
log->start_pos = log->end_pos;
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
|
2023-04-07 07:42:00 +08:00
|
|
|
if (!log->ubuf)
|
|
|
|
return;
|
|
|
|
|
bpf: Keep track of total log content size in both fixed and rolling modes
Change how we do accounting in BPF_LOG_FIXED mode and adopt log->end_pos
as *logical* log position. This means that we can go beyond physical log
buffer size now and be able to tell what log buffer size should be to
fit entire log contents without -ENOSPC.
To do this for BPF_LOG_FIXED mode, we need to remove a short-circuiting
logic of not vsnprintf()'ing further log content once we filled up
user-provided buffer, which is done by bpf_verifier_log_needed() checks.
We modify these checks to always keep going if log->level is non-zero
(i.e., log is requested), even if log->ubuf was NULL'ed out due to
copying data to user-space, or if entire log buffer is physically full.
We adopt bpf_verifier_vlog() routine to work correctly with
log->ubuf == NULL condition, performing log formatting into temporary
kernel buffer, doing all the necessary accounting, but just avoiding
copying data out if buffer is full or NULL'ed out.
With these changes, it's now possible to do this sort of determination of
log contents size in both BPF_LOG_FIXED and default rolling log mode.
We need to keep in mind bpf_vlog_reset(), though, which shrinks log
contents after successful verification of a particular code path. This
log reset means that log->end_pos isn't always increasing, so to return
back to users what should be the log buffer size to fit all log content
without causing -ENOSPC even in the presence of log resetting, we need
to keep maximum over "lifetime" of logging. We do this accounting in
bpf_vlog_update_len_max() helper.
A related and subtle aspect is that with this logical log->end_pos even in
BPF_LOG_FIXED mode we could temporary "overflow" buffer, but then reset
it back with bpf_vlog_reset() to a position inside user-supplied
log_buf. In such situation we still want to properly maintain
terminating zero. We will eventually return -ENOSPC even if final log
buffer is small (we detect this through log->len_max check). This
behavior is simpler to reason about and is consistent with current
behavior of verifier log. Handling of this required a small addition to
bpf_vlog_reset() logic to avoid doing put_user() beyond physical log
buffer dimensions.
Another issue to keep in mind is that we limit log buffer size to 32-bit
value and keep such log length as u32, but theoretically verifier could
produce huge log stretching beyond 4GB. Instead of keeping (and later
returning) 64-bit log length, we cap it at UINT_MAX. Current UAPI makes
it impossible to specify log buffer size bigger than 4GB anyways, so we
don't really loose anything here and keep everything consistently 32-bit
in UAPI. This property will be utilized in next patch.
Doing the same determination of maximum log buffer for rolling mode is
trivial, as log->end_pos and log->start_pos are already logical
positions, so there is nothing new there.
These changes do incidentally fix one small issue with previous logging
logic. Previously, if use provided log buffer of size N, and actual log
output was exactly N-1 bytes + terminating \0, kernel logic coun't
distinguish this condition from log truncation scenario which would end
up with truncated log contents of N-1 bytes + terminating \0 as well.
But now with log->end_pos being logical position that could go beyond
actual log buffer size, we can distinguish these two conditions, which
we do in this patch. This plays nicely with returning log_size_actual
(implemented in UAPI in the next patch), as we can now guarantee that if
user takes such log_size_actual and provides log buffer of that exact
size, they will not get -ENOSPC in return.
All in all, all these changes do conceptually unify fixed and rolling
log modes much better, and allow a nice feature requested by users:
knowing what should be the size of the buffer to avoid -ENOSPC.
We'll plumb this through the UAPI and the code in the next patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-12-andrii@kernel.org
2023-04-07 07:41:57 +08:00
|
|
|
if (log->level & BPF_LOG_FIXED)
|
|
|
|
pos = log->end_pos + 1;
|
|
|
|
else
|
|
|
|
div_u64_rem(new_pos, log->len_total, &pos);
|
|
|
|
|
2023-04-07 07:42:00 +08:00
|
|
|
if (pos < log->len_total && put_user(zero, log->ubuf + pos))
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
log->ubuf = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bpf_vlog_reverse_kbuf(char *buf, int len)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
for (i = 0, j = len - 1; i < j; i++, j--)
|
|
|
|
swap(buf[i], buf[j]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end)
|
|
|
|
{
|
|
|
|
/* we split log->kbuf into two equal parts for both ends of array */
|
|
|
|
int n = sizeof(log->kbuf) / 2, nn;
|
|
|
|
char *lbuf = log->kbuf, *rbuf = log->kbuf + n;
|
|
|
|
|
|
|
|
/* Read ubuf's section [start, end) two chunks at a time, from left
|
|
|
|
* and right side; within each chunk, swap all the bytes; after that
|
|
|
|
* reverse the order of lbuf and rbuf and write result back to ubuf.
|
|
|
|
* This way we'll end up with swapped contents of specified
|
|
|
|
* [start, end) ubuf segment.
|
|
|
|
*/
|
|
|
|
while (end - start > 1) {
|
|
|
|
nn = min(n, (end - start ) / 2);
|
|
|
|
|
|
|
|
if (copy_from_user(lbuf, log->ubuf + start, nn))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_from_user(rbuf, log->ubuf + end - nn, nn))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
bpf_vlog_reverse_kbuf(lbuf, nn);
|
|
|
|
bpf_vlog_reverse_kbuf(rbuf, nn);
|
|
|
|
|
|
|
|
/* we write lbuf to the right end of ubuf, while rbuf to the
|
|
|
|
* left one to end up with properly reversed overall ubuf
|
|
|
|
*/
|
|
|
|
if (copy_to_user(log->ubuf + start, rbuf, nn))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(log->ubuf + end - nn, lbuf, nn))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
start += nn;
|
|
|
|
end -= nn;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-04-07 07:41:59 +08:00
|
|
|
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual)
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
{
|
|
|
|
u32 sublen;
|
|
|
|
int err;
|
|
|
|
|
2023-04-07 07:41:59 +08:00
|
|
|
*log_size_actual = 0;
|
|
|
|
if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL)
|
|
|
|
return 0;
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
|
2023-04-07 07:41:59 +08:00
|
|
|
if (!log->ubuf)
|
|
|
|
goto skip_log_rotate;
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
/* If we never truncated log, there is nothing to move around. */
|
2023-04-07 07:42:00 +08:00
|
|
|
if (log->start_pos == 0)
|
2023-04-07 07:41:59 +08:00
|
|
|
goto skip_log_rotate;
|
bpf: Switch BPF verifier log to be a rotating log by default
Currently, if user-supplied log buffer to collect BPF verifier log turns
out to be too small to contain full log, bpf() syscall returns -ENOSPC,
fails BPF program verification/load, and preserves first N-1 bytes of
the verifier log (where N is the size of user-supplied buffer).
This is problematic in a bunch of common scenarios, especially when
working with real-world BPF programs that tend to be pretty complex as
far as verification goes and require big log buffers. Typically, it's
when debugging tricky cases at log level 2 (verbose). Also, when BPF program
is successfully validated, log level 2 is the only way to actually see
verifier state progression and all the important details.
Even with log level 1, it's possible to get -ENOSPC even if the final
verifier log fits in log buffer, if there is a code path that's deep
enough to fill up entire log, even if normally it would be reset later
on (there is a logic to chop off successfully validated portions of BPF
verifier log).
In short, it's not always possible to pre-size log buffer. Also, what's
worse, in practice, the end of the log most often is way more important
than the beginning, but verifier stops emitting log as soon as initial
log buffer is filled up.
This patch switches BPF verifier log behavior to effectively behave as
rotating log. That is, if user-supplied log buffer turns out to be too
short, verifier will keep overwriting previously written log,
effectively treating user's log buffer as a ring buffer. -ENOSPC is
still going to be returned at the end, to notify user that log contents
was truncated, but the important last N bytes of the log would be
returned, which might be all that user really needs. This consistent
-ENOSPC behavior, regardless of rotating or fixed log behavior, allows
to prevent backwards compatibility breakage. The only user-visible
change is which portion of verifier log user ends up seeing *if buffer
is too small*. Given contents of verifier log itself is not an ABI,
there is no breakage due to this behavior change. Specialized tools that
rely on specific contents of verifier log in -ENOSPC scenario are
expected to be easily adapted to accommodate old and new behaviors.
Importantly, though, to preserve good user experience and not require
every user-space application to adopt to this new behavior, before
exiting to user-space verifier will rotate log (in place) to make it
start at the very beginning of user buffer as a continuous
zero-terminated string. The contents will be a chopped off N-1 last
bytes of full verifier log, of course.
Given beginning of log is sometimes important as well, we add
BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows
tools like veristat to request first part of verifier log, if necessary.
BPF_LOG_FIXED flag is also a simple and straightforward way to check if
BPF verifier supports rotating behavior.
On the implementation side, conceptually, it's all simple. We maintain
64-bit logical start and end positions. If we need to truncate the log,
start position will be adjusted accordingly to lag end position by
N bytes. We then use those logical positions to calculate their matching
actual positions in user buffer and handle wrap around the end of the
buffer properly. Finally, right before returning from bpf_check(), we
rotate user log buffer contents in-place as necessary, to make log
contents contiguous. See comments in relevant functions for details.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Lorenz Bauer <lmb@isovalent.com>
Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org
2023-04-07 07:41:49 +08:00
|
|
|
|
|
|
|
/* Otherwise we need to rotate log contents to make it start from the
|
|
|
|
* buffer beginning and be a continuous zero-terminated string. Note
|
|
|
|
* that if log->start_pos != 0 then we definitely filled up entire log
|
|
|
|
* buffer with no gaps, and we just need to shift buffer contents to
|
|
|
|
* the left by (log->start_pos % log->len_total) bytes.
|
|
|
|
*
|
|
|
|
* Unfortunately, user buffer could be huge and we don't want to
|
|
|
|
* allocate temporary kernel memory of the same size just to shift
|
|
|
|
* contents in a straightforward fashion. Instead, we'll be clever and
|
|
|
|
* do in-place array rotation. This is a leetcode-style problem, which
|
|
|
|
* could be solved by three rotations.
|
|
|
|
*
|
|
|
|
* Let's say we have log buffer that has to be shifted left by 7 bytes
|
|
|
|
* (spaces and vertical bar is just for demonstrative purposes):
|
|
|
|
* E F G H I J K | A B C D
|
|
|
|
*
|
|
|
|
* First, we reverse entire array:
|
|
|
|
* D C B A | K J I H G F E
|
|
|
|
*
|
|
|
|
* Then we rotate first 4 bytes (DCBA) and separately last 7 bytes
|
|
|
|
* (KJIHGFE), resulting in a properly rotated array:
|
|
|
|
* A B C D | E F G H I J K
|
|
|
|
*
|
|
|
|
* We'll utilize log->kbuf to read user memory chunk by chunk, swap
|
|
|
|
* bytes, and write them back. Doing it byte-by-byte would be
|
|
|
|
* unnecessarily inefficient. Altogether we are going to read and
|
|
|
|
* write each byte twice, for total 4 memory copies between kernel and
|
|
|
|
* user space.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* length of the chopped off part that will be the beginning;
|
|
|
|
* len(ABCD) in the example above
|
|
|
|
*/
|
|
|
|
div_u64_rem(log->start_pos, log->len_total, &sublen);
|
|
|
|
sublen = log->len_total - sublen;
|
|
|
|
|
|
|
|
err = bpf_vlog_reverse_ubuf(log, 0, log->len_total);
|
|
|
|
err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen);
|
|
|
|
err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total);
|
|
|
|
if (err)
|
2023-04-07 07:41:47 +08:00
|
|
|
log->ubuf = NULL;
|
2023-04-07 07:41:59 +08:00
|
|
|
|
|
|
|
skip_log_rotate:
|
|
|
|
*log_size_actual = log->len_max;
|
|
|
|
|
|
|
|
/* properly initialized log has either both ubuf!=NULL and len_total>0
|
|
|
|
* or ubuf==NULL and len_total==0, so if this condition doesn't hold,
|
|
|
|
* we got a fault somewhere along the way, so report it back
|
|
|
|
*/
|
|
|
|
if (!!log->ubuf != !!log->len_total)
|
|
|
|
return -EFAULT;
|
|
|
|
|
2023-04-07 07:42:00 +08:00
|
|
|
/* did truncation actually happen? */
|
|
|
|
if (log->ubuf && log->len_max > log->len_total)
|
2023-04-07 07:41:59 +08:00
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
return 0;
|
2023-04-07 07:41:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* log_level controls verbosity level of eBPF verifier.
|
|
|
|
* bpf_verifier_log_write() is used to dump the verification trace to the log,
|
|
|
|
* so the user can figure out what's wrong with the program
|
|
|
|
*/
|
|
|
|
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
|
|
|
|
const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
if (!bpf_verifier_log_needed(&env->log))
|
|
|
|
return;
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
bpf_verifier_vlog(&env->log, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
|
|
|
|
|
|
|
|
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
|
|
|
|
const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
if (!bpf_verifier_log_needed(log))
|
|
|
|
return;
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
bpf_verifier_vlog(log, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(bpf_log);
|
2023-11-18 11:46:16 +08:00
|
|
|
|
|
|
|
static const struct bpf_line_info *
|
|
|
|
find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
|
|
|
|
{
|
|
|
|
const struct bpf_line_info *linfo;
|
|
|
|
const struct bpf_prog *prog;
|
|
|
|
u32 i, nr_linfo;
|
|
|
|
|
|
|
|
prog = env->prog;
|
|
|
|
nr_linfo = prog->aux->nr_linfo;
|
|
|
|
|
|
|
|
if (!nr_linfo || insn_off >= prog->len)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
linfo = prog->aux->linfo;
|
|
|
|
for (i = 1; i < nr_linfo; i++)
|
|
|
|
if (insn_off < linfo[i].insn_off)
|
|
|
|
break;
|
|
|
|
|
|
|
|
return &linfo[i - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *ltrim(const char *s)
|
|
|
|
{
|
|
|
|
while (isspace(*s))
|
|
|
|
s++;
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
|
|
|
|
u32 insn_off,
|
|
|
|
const char *prefix_fmt, ...)
|
|
|
|
{
|
|
|
|
const struct bpf_line_info *linfo;
|
|
|
|
|
|
|
|
if (!bpf_verifier_log_needed(&env->log))
|
|
|
|
return;
|
|
|
|
|
|
|
|
linfo = find_linfo(env, insn_off);
|
|
|
|
if (!linfo || linfo == env->prev_linfo)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (prefix_fmt) {
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
va_start(args, prefix_fmt);
|
|
|
|
bpf_verifier_vlog(&env->log, prefix_fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
verbose(env, "%s\n",
|
|
|
|
ltrim(btf_name_by_offset(env->prog->aux->btf,
|
|
|
|
linfo->line_off)));
|
|
|
|
|
|
|
|
env->prev_linfo = linfo;
|
|
|
|
}
|
2023-11-18 11:46:17 +08:00
|
|
|
|
|
|
|
static const char *btf_type_name(const struct btf *btf, u32 id)
|
|
|
|
{
|
|
|
|
return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* string representation of 'enum bpf_reg_type'
|
|
|
|
*
|
|
|
|
* Note that reg_type_str() can not appear more than once in a single verbose()
|
|
|
|
* statement.
|
|
|
|
*/
|
|
|
|
const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type)
|
|
|
|
{
|
|
|
|
char postfix[16] = {0}, prefix[64] = {0};
|
|
|
|
static const char * const str[] = {
|
|
|
|
[NOT_INIT] = "?",
|
|
|
|
[SCALAR_VALUE] = "scalar",
|
|
|
|
[PTR_TO_CTX] = "ctx",
|
|
|
|
[CONST_PTR_TO_MAP] = "map_ptr",
|
|
|
|
[PTR_TO_MAP_VALUE] = "map_value",
|
|
|
|
[PTR_TO_STACK] = "fp",
|
|
|
|
[PTR_TO_PACKET] = "pkt",
|
|
|
|
[PTR_TO_PACKET_META] = "pkt_meta",
|
|
|
|
[PTR_TO_PACKET_END] = "pkt_end",
|
|
|
|
[PTR_TO_FLOW_KEYS] = "flow_keys",
|
|
|
|
[PTR_TO_SOCKET] = "sock",
|
|
|
|
[PTR_TO_SOCK_COMMON] = "sock_common",
|
|
|
|
[PTR_TO_TCP_SOCK] = "tcp_sock",
|
|
|
|
[PTR_TO_TP_BUFFER] = "tp_buffer",
|
|
|
|
[PTR_TO_XDP_SOCK] = "xdp_sock",
|
|
|
|
[PTR_TO_BTF_ID] = "ptr_",
|
|
|
|
[PTR_TO_MEM] = "mem",
|
|
|
|
[PTR_TO_BUF] = "buf",
|
|
|
|
[PTR_TO_FUNC] = "func",
|
|
|
|
[PTR_TO_MAP_KEY] = "map_key",
|
|
|
|
[CONST_PTR_TO_DYNPTR] = "dynptr_ptr",
|
|
|
|
};
|
|
|
|
|
|
|
|
if (type & PTR_MAYBE_NULL) {
|
|
|
|
if (base_type(type) == PTR_TO_BTF_ID)
|
|
|
|
strncpy(postfix, "or_null_", 16);
|
|
|
|
else
|
|
|
|
strncpy(postfix, "_or_null", 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
|
|
|
|
type & MEM_RDONLY ? "rdonly_" : "",
|
|
|
|
type & MEM_RINGBUF ? "ringbuf_" : "",
|
|
|
|
type & MEM_USER ? "user_" : "",
|
|
|
|
type & MEM_PERCPU ? "percpu_" : "",
|
|
|
|
type & MEM_RCU ? "rcu_" : "",
|
|
|
|
type & PTR_UNTRUSTED ? "untrusted_" : "",
|
|
|
|
type & PTR_TRUSTED ? "trusted_" : ""
|
|
|
|
);
|
|
|
|
|
|
|
|
snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
|
|
|
|
prefix, str[base_type(type)], postfix);
|
|
|
|
return env->tmp_str_buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *dynptr_type_str(enum bpf_dynptr_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case BPF_DYNPTR_TYPE_LOCAL:
|
|
|
|
return "local";
|
|
|
|
case BPF_DYNPTR_TYPE_RINGBUF:
|
|
|
|
return "ringbuf";
|
|
|
|
case BPF_DYNPTR_TYPE_SKB:
|
|
|
|
return "skb";
|
|
|
|
case BPF_DYNPTR_TYPE_XDP:
|
|
|
|
return "xdp";
|
|
|
|
case BPF_DYNPTR_TYPE_INVALID:
|
|
|
|
return "<invalid>";
|
|
|
|
default:
|
|
|
|
WARN_ONCE(1, "unknown dynptr type %d\n", type);
|
|
|
|
return "<unknown>";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *iter_type_str(const struct btf *btf, u32 btf_id)
|
|
|
|
{
|
|
|
|
if (!btf || btf_id == 0)
|
|
|
|
return "<invalid>";
|
|
|
|
|
|
|
|
/* we already validated that type is valid and has conforming name */
|
|
|
|
return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *iter_state_str(enum bpf_iter_state state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case BPF_ITER_STATE_ACTIVE:
|
|
|
|
return "active";
|
|
|
|
case BPF_ITER_STATE_DRAINED:
|
|
|
|
return "drained";
|
|
|
|
case BPF_ITER_STATE_INVALID:
|
|
|
|
return "<invalid>";
|
|
|
|
default:
|
|
|
|
WARN_ONCE(1, "unknown iter state %d\n", state);
|
|
|
|
return "<unknown>";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static char slot_type_char[] = {
|
|
|
|
[STACK_INVALID] = '?',
|
|
|
|
[STACK_SPILL] = 'r',
|
|
|
|
[STACK_MISC] = 'm',
|
|
|
|
[STACK_ZERO] = '0',
|
|
|
|
[STACK_DYNPTR] = 'd',
|
|
|
|
[STACK_ITER] = 'i',
|
|
|
|
};
|
|
|
|
|
|
|
|
static void print_liveness(struct bpf_verifier_env *env,
|
|
|
|
enum bpf_reg_liveness live)
|
|
|
|
{
|
|
|
|
if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
|
|
|
|
verbose(env, "_");
|
|
|
|
if (live & REG_LIVE_READ)
|
|
|
|
verbose(env, "r");
|
|
|
|
if (live & REG_LIVE_WRITTEN)
|
|
|
|
verbose(env, "w");
|
|
|
|
if (live & REG_LIVE_DONE)
|
|
|
|
verbose(env, "D");
|
|
|
|
}
|
|
|
|
|
bpf: smarter verifier log number printing logic
Instead of always printing numbers as either decimals (and in some
cases, like for "imm=%llx", in hexadecimals), decide the form based on
actual values. For numbers in a reasonably small range (currently,
[0, U16_MAX] for unsigned values, and [S16_MIN, S16_MAX] for signed ones),
emit them as decimals. In all other cases, even for signed values,
emit them in hexadecimals.
For large values hex form is often times way more useful: it's easier to
see an exact difference between 0xffffffff80000000 and 0xffffffff7fffffff,
than between 18446744071562067966 and 18446744071562067967, as one
particular example.
Small values representing small pointer offsets or application
constants, on the other hand, are way more useful to be represented in
decimal notation.
Adjust reg_bounds register state parsing logic to take into account this
change.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-8-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:22 +08:00
|
|
|
#define UNUM_MAX_DECIMAL U16_MAX
|
|
|
|
#define SNUM_MAX_DECIMAL S16_MAX
|
|
|
|
#define SNUM_MIN_DECIMAL S16_MIN
|
|
|
|
|
|
|
|
static bool is_unum_decimal(u64 num)
|
|
|
|
{
|
|
|
|
return num <= UNUM_MAX_DECIMAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_snum_decimal(s64 num)
|
|
|
|
{
|
|
|
|
return num >= SNUM_MIN_DECIMAL && num <= SNUM_MAX_DECIMAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void verbose_unum(struct bpf_verifier_env *env, u64 num)
|
|
|
|
{
|
|
|
|
if (is_unum_decimal(num))
|
|
|
|
verbose(env, "%llu", num);
|
|
|
|
else
|
|
|
|
verbose(env, "%#llx", num);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void verbose_snum(struct bpf_verifier_env *env, s64 num)
|
|
|
|
{
|
|
|
|
if (is_snum_decimal(num))
|
|
|
|
verbose(env, "%lld", num);
|
|
|
|
else
|
|
|
|
verbose(env, "%#llx", num);
|
|
|
|
}
|
|
|
|
|
2023-12-03 01:57:05 +08:00
|
|
|
int tnum_strn(char *str, size_t size, struct tnum a)
|
|
|
|
{
|
|
|
|
/* print as a constant, if tnum is fully known */
|
|
|
|
if (a.mask == 0) {
|
|
|
|
if (is_unum_decimal(a.value))
|
|
|
|
return snprintf(str, size, "%llu", a.value);
|
|
|
|
else
|
|
|
|
return snprintf(str, size, "%#llx", a.value);
|
|
|
|
}
|
|
|
|
return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tnum_strn);
|
|
|
|
|
2023-11-18 11:46:17 +08:00
|
|
|
static void print_scalar_ranges(struct bpf_verifier_env *env,
|
|
|
|
const struct bpf_reg_state *reg,
|
|
|
|
const char **sep)
|
|
|
|
{
|
bpf: smarter verifier log number printing logic
Instead of always printing numbers as either decimals (and in some
cases, like for "imm=%llx", in hexadecimals), decide the form based on
actual values. For numbers in a reasonably small range (currently,
[0, U16_MAX] for unsigned values, and [S16_MIN, S16_MAX] for signed ones),
emit them as decimals. In all other cases, even for signed values,
emit them in hexadecimals.
For large values hex form is often times way more useful: it's easier to
see an exact difference between 0xffffffff80000000 and 0xffffffff7fffffff,
than between 18446744071562067966 and 18446744071562067967, as one
particular example.
Small values representing small pointer offsets or application
constants, on the other hand, are way more useful to be represented in
decimal notation.
Adjust reg_bounds register state parsing logic to take into account this
change.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-8-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:22 +08:00
|
|
|
/* For signed ranges, we want to unify 64-bit and 32-bit values in the
|
|
|
|
* output as much as possible, but there is a bit of a complication.
|
|
|
|
* If we choose to print values as decimals, this is natural to do,
|
|
|
|
* because negative 64-bit and 32-bit values >= -S32_MIN have the same
|
|
|
|
* representation due to sign extension. But if we choose to print
|
|
|
|
* them in hex format (see is_snum_decimal()), then sign extension is
|
|
|
|
* misleading.
|
|
|
|
* E.g., smin=-2 and smin32=-2 are exactly the same in decimal, but in
|
|
|
|
* hex they will be smin=0xfffffffffffffffe and smin32=0xfffffffe, two
|
|
|
|
* very different numbers.
|
|
|
|
* So we avoid sign extension if we choose to print values in hex.
|
|
|
|
*/
|
2023-11-18 11:46:17 +08:00
|
|
|
struct {
|
|
|
|
const char *name;
|
|
|
|
u64 val;
|
|
|
|
bool omit;
|
|
|
|
} minmaxs[] = {
|
|
|
|
{"smin", reg->smin_value, reg->smin_value == S64_MIN},
|
|
|
|
{"smax", reg->smax_value, reg->smax_value == S64_MAX},
|
|
|
|
{"umin", reg->umin_value, reg->umin_value == 0},
|
|
|
|
{"umax", reg->umax_value, reg->umax_value == U64_MAX},
|
bpf: smarter verifier log number printing logic
Instead of always printing numbers as either decimals (and in some
cases, like for "imm=%llx", in hexadecimals), decide the form based on
actual values. For numbers in a reasonably small range (currently,
[0, U16_MAX] for unsigned values, and [S16_MIN, S16_MAX] for signed ones),
emit them as decimals. In all other cases, even for signed values,
emit them in hexadecimals.
For large values hex form is often times way more useful: it's easier to
see an exact difference between 0xffffffff80000000 and 0xffffffff7fffffff,
than between 18446744071562067966 and 18446744071562067967, as one
particular example.
Small values representing small pointer offsets or application
constants, on the other hand, are way more useful to be represented in
decimal notation.
Adjust reg_bounds register state parsing logic to take into account this
change.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-8-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:22 +08:00
|
|
|
{"smin32",
|
|
|
|
is_snum_decimal((s64)reg->s32_min_value)
|
|
|
|
? (s64)reg->s32_min_value
|
|
|
|
: (u32)reg->s32_min_value, reg->s32_min_value == S32_MIN},
|
|
|
|
{"smax32",
|
|
|
|
is_snum_decimal((s64)reg->s32_max_value)
|
|
|
|
? (s64)reg->s32_max_value
|
|
|
|
: (u32)reg->s32_max_value, reg->s32_max_value == S32_MAX},
|
2023-11-18 11:46:17 +08:00
|
|
|
{"umin32", reg->u32_min_value, reg->u32_min_value == 0},
|
|
|
|
{"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX},
|
|
|
|
}, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
|
|
|
|
bool neg1, neg2;
|
|
|
|
|
|
|
|
for (m1 = &minmaxs[0]; m1 < mend; m1++) {
|
|
|
|
if (m1->omit)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
neg1 = m1->name[0] == 's' && (s64)m1->val < 0;
|
|
|
|
|
|
|
|
verbose(env, "%s%s=", *sep, m1->name);
|
|
|
|
*sep = ",";
|
|
|
|
|
|
|
|
for (m2 = m1 + 2; m2 < mend; m2 += 2) {
|
|
|
|
if (m2->omit || m2->val != m1->val)
|
|
|
|
continue;
|
|
|
|
/* don't mix negatives with positives */
|
|
|
|
neg2 = m2->name[0] == 's' && (s64)m2->val < 0;
|
|
|
|
if (neg2 != neg1)
|
|
|
|
continue;
|
|
|
|
m2->omit = true;
|
|
|
|
verbose(env, "%s=", m2->name);
|
|
|
|
}
|
|
|
|
|
bpf: smarter verifier log number printing logic
Instead of always printing numbers as either decimals (and in some
cases, like for "imm=%llx", in hexadecimals), decide the form based on
actual values. For numbers in a reasonably small range (currently,
[0, U16_MAX] for unsigned values, and [S16_MIN, S16_MAX] for signed ones),
emit them as decimals. In all other cases, even for signed values,
emit them in hexadecimals.
For large values hex form is often times way more useful: it's easier to
see an exact difference between 0xffffffff80000000 and 0xffffffff7fffffff,
than between 18446744071562067966 and 18446744071562067967, as one
particular example.
Small values representing small pointer offsets or application
constants, on the other hand, are way more useful to be represented in
decimal notation.
Adjust reg_bounds register state parsing logic to take into account this
change.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-8-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:22 +08:00
|
|
|
if (m1->name[0] == 's')
|
|
|
|
verbose_snum(env, m1->val);
|
|
|
|
else
|
|
|
|
verbose_unum(env, m1->val);
|
2023-11-18 11:46:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
bpf: emit map name in register state if applicable and available
In complicated real-world applications, whenever debugging some
verification error through verifier log, it often would be very useful
to see map name for PTR_TO_MAP_VALUE register. Usually this needs to be
inferred from key/value sizes and maybe trying to guess C code location,
but it's not always clear.
Given verifier has the name, and it's never too long, let's just emit it
for ptr_to_map_key, ptr_to_map_value, and const_ptr_to_map registers. We
reshuffle the order a bit, so that map name, key size, and value size
appear before offset and immediate values, which seems like a more
logical order.
Current output:
R1_w=map_ptr(map=array_map,ks=4,vs=8,off=0,imm=0)
But we'll get rid of useless off=0 and imm=0 parts in the next patch.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-6-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:20 +08:00
|
|
|
static bool type_is_map_ptr(enum bpf_reg_type t) {
|
|
|
|
switch (base_type(t)) {
|
|
|
|
case CONST_PTR_TO_MAP:
|
|
|
|
case PTR_TO_MAP_KEY:
|
|
|
|
case PTR_TO_MAP_VALUE:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-05 07:39:20 +08:00
|
|
|
/*
|
|
|
|
* _a stands for append, was shortened to avoid multiline statements below.
|
|
|
|
* This macro is used to output a comma separated list of attributes.
|
|
|
|
*/
|
|
|
|
#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, ##__VA_ARGS__); sep = ","; })
|
|
|
|
|
2023-11-18 11:46:23 +08:00
|
|
|
static void print_reg_state(struct bpf_verifier_env *env,
|
|
|
|
const struct bpf_func_state *state,
|
|
|
|
const struct bpf_reg_state *reg)
|
2023-11-18 11:46:18 +08:00
|
|
|
{
|
|
|
|
enum bpf_reg_type t;
|
|
|
|
const char *sep = "";
|
|
|
|
|
|
|
|
t = reg->type;
|
|
|
|
if (t == SCALAR_VALUE && reg->precise)
|
|
|
|
verbose(env, "P");
|
2023-11-18 11:46:23 +08:00
|
|
|
if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
|
2023-11-18 11:46:18 +08:00
|
|
|
/* reg->off should be 0 for SCALAR_VALUE */
|
bpf: smarter verifier log number printing logic
Instead of always printing numbers as either decimals (and in some
cases, like for "imm=%llx", in hexadecimals), decide the form based on
actual values. For numbers in a reasonably small range (currently,
[0, U16_MAX] for unsigned values, and [S16_MIN, S16_MAX] for signed ones),
emit them as decimals. In all other cases, even for signed values,
emit them in hexadecimals.
For large values hex form is often times way more useful: it's easier to
see an exact difference between 0xffffffff80000000 and 0xffffffff7fffffff,
than between 18446744071562067966 and 18446744071562067967, as one
particular example.
Small values representing small pointer offsets or application
constants, on the other hand, are way more useful to be represented in
decimal notation.
Adjust reg_bounds register state parsing logic to take into account this
change.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-8-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:22 +08:00
|
|
|
verbose_snum(env, reg->var_off.value + reg->off);
|
2023-11-18 11:46:18 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
verbose(env, "%s", reg_type_str(env, t));
|
2023-11-18 11:46:23 +08:00
|
|
|
if (t == PTR_TO_STACK) {
|
|
|
|
if (state->frameno != reg->frameno)
|
|
|
|
verbose(env, "[%d]", reg->frameno);
|
|
|
|
if (tnum_is_const(reg->var_off)) {
|
|
|
|
verbose_snum(env, reg->var_off.value + reg->off);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2023-11-18 11:46:18 +08:00
|
|
|
if (base_type(t) == PTR_TO_BTF_ID)
|
|
|
|
verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
|
|
|
|
verbose(env, "(");
|
|
|
|
if (reg->id)
|
|
|
|
verbose_a("id=%d", reg->id);
|
|
|
|
if (reg->ref_obj_id)
|
|
|
|
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
|
|
|
|
if (type_is_non_owning_ref(reg->type))
|
|
|
|
verbose_a("%s", "non_own_ref");
|
bpf: emit map name in register state if applicable and available
In complicated real-world applications, whenever debugging some
verification error through verifier log, it often would be very useful
to see map name for PTR_TO_MAP_VALUE register. Usually this needs to be
inferred from key/value sizes and maybe trying to guess C code location,
but it's not always clear.
Given verifier has the name, and it's never too long, let's just emit it
for ptr_to_map_key, ptr_to_map_value, and const_ptr_to_map registers. We
reshuffle the order a bit, so that map name, key size, and value size
appear before offset and immediate values, which seems like a more
logical order.
Current output:
R1_w=map_ptr(map=array_map,ks=4,vs=8,off=0,imm=0)
But we'll get rid of useless off=0 and imm=0 parts in the next patch.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-6-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:20 +08:00
|
|
|
if (type_is_map_ptr(t)) {
|
|
|
|
if (reg->map_ptr->name[0])
|
|
|
|
verbose_a("map=%s", reg->map_ptr->name);
|
|
|
|
verbose_a("ks=%d,vs=%d",
|
|
|
|
reg->map_ptr->key_size,
|
|
|
|
reg->map_ptr->value_size);
|
|
|
|
}
|
bpf: smarter verifier log number printing logic
Instead of always printing numbers as either decimals (and in some
cases, like for "imm=%llx", in hexadecimals), decide the form based on
actual values. For numbers in a reasonably small range (currently,
[0, U16_MAX] for unsigned values, and [S16_MIN, S16_MAX] for signed ones),
emit them as decimals. In all other cases, even for signed values,
emit them in hexadecimals.
For large values hex form is often times way more useful: it's easier to
see an exact difference between 0xffffffff80000000 and 0xffffffff7fffffff,
than between 18446744071562067966 and 18446744071562067967, as one
particular example.
Small values representing small pointer offsets or application
constants, on the other hand, are way more useful to be represented in
decimal notation.
Adjust reg_bounds register state parsing logic to take into account this
change.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-8-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:22 +08:00
|
|
|
if (t != SCALAR_VALUE && reg->off) {
|
|
|
|
verbose_a("off=");
|
|
|
|
verbose_snum(env, reg->off);
|
|
|
|
}
|
|
|
|
if (type_is_pkt_pointer(t)) {
|
|
|
|
verbose_a("r=");
|
|
|
|
verbose_unum(env, reg->range);
|
|
|
|
}
|
2023-12-05 07:39:19 +08:00
|
|
|
if (base_type(t) == PTR_TO_MEM) {
|
|
|
|
verbose_a("sz=");
|
|
|
|
verbose_unum(env, reg->mem_size);
|
|
|
|
}
|
2023-12-05 07:39:20 +08:00
|
|
|
if (t == CONST_PTR_TO_DYNPTR)
|
|
|
|
verbose_a("type=%s", dynptr_type_str(reg->dynptr.type));
|
2023-11-18 11:46:18 +08:00
|
|
|
if (tnum_is_const(reg->var_off)) {
|
2023-11-18 11:46:21 +08:00
|
|
|
/* a pointer register with fixed offset */
|
bpf: smarter verifier log number printing logic
Instead of always printing numbers as either decimals (and in some
cases, like for "imm=%llx", in hexadecimals), decide the form based on
actual values. For numbers in a reasonably small range (currently,
[0, U16_MAX] for unsigned values, and [S16_MIN, S16_MAX] for signed ones),
emit them as decimals. In all other cases, even for signed values,
emit them in hexadecimals.
For large values hex form is often times way more useful: it's easier to
see an exact difference between 0xffffffff80000000 and 0xffffffff7fffffff,
than between 18446744071562067966 and 18446744071562067967, as one
particular example.
Small values representing small pointer offsets or application
constants, on the other hand, are way more useful to be represented in
decimal notation.
Adjust reg_bounds register state parsing logic to take into account this
change.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-8-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:22 +08:00
|
|
|
if (reg->var_off.value) {
|
|
|
|
verbose_a("imm=");
|
|
|
|
verbose_snum(env, reg->var_off.value);
|
|
|
|
}
|
2023-11-18 11:46:18 +08:00
|
|
|
} else {
|
|
|
|
print_scalar_ranges(env, reg, &sep);
|
|
|
|
if (!tnum_is_unknown(reg->var_off)) {
|
|
|
|
char tn_buf[48];
|
|
|
|
|
|
|
|
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
|
|
|
verbose_a("var_off=%s", tn_buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
verbose(env, ")");
|
|
|
|
}
|
|
|
|
|
2023-11-18 11:46:17 +08:00
|
|
|
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state,
|
|
|
|
bool print_all)
|
|
|
|
{
|
|
|
|
const struct bpf_reg_state *reg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (state->frameno)
|
|
|
|
verbose(env, " frame%d:", state->frameno);
|
|
|
|
for (i = 0; i < MAX_BPF_REG; i++) {
|
|
|
|
reg = &state->regs[i];
|
2023-11-18 11:46:18 +08:00
|
|
|
if (reg->type == NOT_INIT)
|
2023-11-18 11:46:17 +08:00
|
|
|
continue;
|
|
|
|
if (!print_all && !reg_scratched(env, i))
|
|
|
|
continue;
|
|
|
|
verbose(env, " R%d", i);
|
|
|
|
print_liveness(env, reg->live);
|
|
|
|
verbose(env, "=");
|
2023-11-18 11:46:23 +08:00
|
|
|
print_reg_state(env, state, reg);
|
2023-11-18 11:46:17 +08:00
|
|
|
}
|
|
|
|
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
|
|
|
char types_buf[BPF_REG_SIZE + 1];
|
2023-12-05 07:39:20 +08:00
|
|
|
const char *sep = "";
|
2023-11-18 11:46:17 +08:00
|
|
|
bool valid = false;
|
bpf: print spilled register state in stack slot
Print the same register state representation when printing stack state,
as we do for normal registers. Note that if stack slot contains
subregister spill (1, 2, or 4 byte long), we'll still emit "m0?" mask
for those bytes that are not part of spilled register.
While means we can get something like fp-8=0000scalar() for a 4-byte
spill with other 4 bytes still being STACK_ZERO.
Some example before and after, taken from the log of
pyperf_subprogs.bpf.o:
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx(off=0,imm=0)
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0)
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=????scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf))
While at it, do a few other simple clean ups:
- skip slot if it's not scratched before detecting whether it's valid;
- move taking spilled_reg pointer outside of switch (only DYNPTR has
to adjust that to get to the "main" slot);
- don't recalculate types_buf second time for MISC/ZERO/default case.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:19 +08:00
|
|
|
u8 slot_type;
|
2023-11-18 11:46:17 +08:00
|
|
|
int j;
|
|
|
|
|
bpf: print spilled register state in stack slot
Print the same register state representation when printing stack state,
as we do for normal registers. Note that if stack slot contains
subregister spill (1, 2, or 4 byte long), we'll still emit "m0?" mask
for those bytes that are not part of spilled register.
While means we can get something like fp-8=0000scalar() for a 4-byte
spill with other 4 bytes still being STACK_ZERO.
Some example before and after, taken from the log of
pyperf_subprogs.bpf.o:
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx(off=0,imm=0)
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0)
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=????scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf))
While at it, do a few other simple clean ups:
- skip slot if it's not scratched before detecting whether it's valid;
- move taking spilled_reg pointer outside of switch (only DYNPTR has
to adjust that to get to the "main" slot);
- don't recalculate types_buf second time for MISC/ZERO/default case.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:19 +08:00
|
|
|
if (!print_all && !stack_slot_scratched(env, i))
|
|
|
|
continue;
|
|
|
|
|
2023-11-18 11:46:17 +08:00
|
|
|
for (j = 0; j < BPF_REG_SIZE; j++) {
|
bpf: print spilled register state in stack slot
Print the same register state representation when printing stack state,
as we do for normal registers. Note that if stack slot contains
subregister spill (1, 2, or 4 byte long), we'll still emit "m0?" mask
for those bytes that are not part of spilled register.
While means we can get something like fp-8=0000scalar() for a 4-byte
spill with other 4 bytes still being STACK_ZERO.
Some example before and after, taken from the log of
pyperf_subprogs.bpf.o:
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx(off=0,imm=0)
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0)
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=????scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf))
While at it, do a few other simple clean ups:
- skip slot if it's not scratched before detecting whether it's valid;
- move taking spilled_reg pointer outside of switch (only DYNPTR has
to adjust that to get to the "main" slot);
- don't recalculate types_buf second time for MISC/ZERO/default case.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:19 +08:00
|
|
|
slot_type = state->stack[i].slot_type[j];
|
|
|
|
if (slot_type != STACK_INVALID)
|
2023-11-18 11:46:17 +08:00
|
|
|
valid = true;
|
bpf: print spilled register state in stack slot
Print the same register state representation when printing stack state,
as we do for normal registers. Note that if stack slot contains
subregister spill (1, 2, or 4 byte long), we'll still emit "m0?" mask
for those bytes that are not part of spilled register.
While means we can get something like fp-8=0000scalar() for a 4-byte
spill with other 4 bytes still being STACK_ZERO.
Some example before and after, taken from the log of
pyperf_subprogs.bpf.o:
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx(off=0,imm=0)
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0)
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=????scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf))
While at it, do a few other simple clean ups:
- skip slot if it's not scratched before detecting whether it's valid;
- move taking spilled_reg pointer outside of switch (only DYNPTR has
to adjust that to get to the "main" slot);
- don't recalculate types_buf second time for MISC/ZERO/default case.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:19 +08:00
|
|
|
types_buf[j] = slot_type_char[slot_type];
|
2023-11-18 11:46:17 +08:00
|
|
|
}
|
|
|
|
types_buf[BPF_REG_SIZE] = 0;
|
|
|
|
if (!valid)
|
|
|
|
continue;
|
bpf: print spilled register state in stack slot
Print the same register state representation when printing stack state,
as we do for normal registers. Note that if stack slot contains
subregister spill (1, 2, or 4 byte long), we'll still emit "m0?" mask
for those bytes that are not part of spilled register.
While means we can get something like fp-8=0000scalar() for a 4-byte
spill with other 4 bytes still being STACK_ZERO.
Some example before and after, taken from the log of
pyperf_subprogs.bpf.o:
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx(off=0,imm=0)
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0)
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=????scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf))
While at it, do a few other simple clean ups:
- skip slot if it's not scratched before detecting whether it's valid;
- move taking spilled_reg pointer outside of switch (only DYNPTR has
to adjust that to get to the "main" slot);
- don't recalculate types_buf second time for MISC/ZERO/default case.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:19 +08:00
|
|
|
|
|
|
|
reg = &state->stack[i].spilled_ptr;
|
2023-11-18 11:46:17 +08:00
|
|
|
switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
|
|
|
|
case STACK_SPILL:
|
bpf: print spilled register state in stack slot
Print the same register state representation when printing stack state,
as we do for normal registers. Note that if stack slot contains
subregister spill (1, 2, or 4 byte long), we'll still emit "m0?" mask
for those bytes that are not part of spilled register.
While means we can get something like fp-8=0000scalar() for a 4-byte
spill with other 4 bytes still being STACK_ZERO.
Some example before and after, taken from the log of
pyperf_subprogs.bpf.o:
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx(off=0,imm=0)
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0)
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=????scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf))
While at it, do a few other simple clean ups:
- skip slot if it's not scratched before detecting whether it's valid;
- move taking spilled_reg pointer outside of switch (only DYNPTR has
to adjust that to get to the "main" slot);
- don't recalculate types_buf second time for MISC/ZERO/default case.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:19 +08:00
|
|
|
/* print MISC/ZERO/INVALID slots above subreg spill */
|
|
|
|
for (j = 0; j < BPF_REG_SIZE; j++)
|
|
|
|
if (state->stack[i].slot_type[j] == STACK_SPILL)
|
|
|
|
break;
|
|
|
|
types_buf[j] = '\0';
|
2023-11-18 11:46:17 +08:00
|
|
|
|
|
|
|
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
|
|
|
|
print_liveness(env, reg->live);
|
bpf: print spilled register state in stack slot
Print the same register state representation when printing stack state,
as we do for normal registers. Note that if stack slot contains
subregister spill (1, 2, or 4 byte long), we'll still emit "m0?" mask
for those bytes that are not part of spilled register.
While means we can get something like fp-8=0000scalar() for a 4-byte
spill with other 4 bytes still being STACK_ZERO.
Some example before and after, taken from the log of
pyperf_subprogs.bpf.o:
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx(off=0,imm=0)
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0)
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=????scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf))
While at it, do a few other simple clean ups:
- skip slot if it's not scratched before detecting whether it's valid;
- move taking spilled_reg pointer outside of switch (only DYNPTR has
to adjust that to get to the "main" slot);
- don't recalculate types_buf second time for MISC/ZERO/default case.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:19 +08:00
|
|
|
verbose(env, "=%s", types_buf);
|
2023-11-18 11:46:23 +08:00
|
|
|
print_reg_state(env, state, reg);
|
2023-11-18 11:46:17 +08:00
|
|
|
break;
|
|
|
|
case STACK_DYNPTR:
|
bpf: print spilled register state in stack slot
Print the same register state representation when printing stack state,
as we do for normal registers. Note that if stack slot contains
subregister spill (1, 2, or 4 byte long), we'll still emit "m0?" mask
for those bytes that are not part of spilled register.
While means we can get something like fp-8=0000scalar() for a 4-byte
spill with other 4 bytes still being STACK_ZERO.
Some example before and after, taken from the log of
pyperf_subprogs.bpf.o:
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx
49: (7b) *(u64 *)(r10 -256) = r1 ; frame1: R1_w=ctx(off=0,imm=0) R10=fp0 fp-256_w=ctx(off=0,imm=0)
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null
150: (7b) *(u64 *)(r10 -264) = r0 ; frame1: R0_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0) R10=fp0 fp-264_w=map_value_or_null(id=6,off=0,ks=192,vs=4,imm=0)
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=
5192: (61) r1 = *(u32 *)(r10 -272) ; frame1: R1_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf)) R10=fp0 fp-272=????scalar(smin=smin32=0,smax=umax=smax32=umax32=15,var_off=(0x0; 0xf))
While at it, do a few other simple clean ups:
- skip slot if it's not scratched before detecting whether it's valid;
- move taking spilled_reg pointer outside of switch (only DYNPTR has
to adjust that to get to the "main" slot);
- don't recalculate types_buf second time for MISC/ZERO/default case.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231118034623.3320920-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-18 11:46:19 +08:00
|
|
|
/* skip to main dynptr slot */
|
2023-11-18 11:46:17 +08:00
|
|
|
i += BPF_DYNPTR_NR_SLOTS - 1;
|
|
|
|
reg = &state->stack[i].spilled_ptr;
|
|
|
|
|
|
|
|
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
|
|
|
|
print_liveness(env, reg->live);
|
2023-12-05 07:39:20 +08:00
|
|
|
verbose(env, "=dynptr_%s(", dynptr_type_str(reg->dynptr.type));
|
|
|
|
if (reg->id)
|
|
|
|
verbose_a("id=%d", reg->id);
|
2023-11-18 11:46:17 +08:00
|
|
|
if (reg->ref_obj_id)
|
2023-12-05 07:39:20 +08:00
|
|
|
verbose_a("ref_id=%d", reg->ref_obj_id);
|
|
|
|
if (reg->dynptr_id)
|
|
|
|
verbose_a("dynptr_id=%d", reg->dynptr_id);
|
|
|
|
verbose(env, ")");
|
2023-11-18 11:46:17 +08:00
|
|
|
break;
|
|
|
|
case STACK_ITER:
|
|
|
|
/* only main slot has ref_obj_id set; skip others */
|
|
|
|
if (!reg->ref_obj_id)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
|
|
|
|
print_liveness(env, reg->live);
|
|
|
|
verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
|
|
|
|
iter_type_str(reg->iter.btf, reg->iter.btf_id),
|
|
|
|
reg->ref_obj_id, iter_state_str(reg->iter.state),
|
|
|
|
reg->iter.depth);
|
|
|
|
break;
|
|
|
|
case STACK_MISC:
|
|
|
|
case STACK_ZERO:
|
|
|
|
default:
|
|
|
|
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
|
|
|
|
print_liveness(env, reg->live);
|
|
|
|
verbose(env, "=%s", types_buf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (state->acquired_refs && state->refs[0].id) {
|
|
|
|
verbose(env, " refs=%d", state->refs[0].id);
|
|
|
|
for (i = 1; i < state->acquired_refs; i++)
|
|
|
|
if (state->refs[i].id)
|
|
|
|
verbose(env, ",%d", state->refs[i].id);
|
|
|
|
}
|
|
|
|
if (state->in_callback_fn)
|
|
|
|
verbose(env, " cb");
|
|
|
|
if (state->in_async_callback_fn)
|
|
|
|
verbose(env, " async_cb");
|
|
|
|
verbose(env, "\n");
|
|
|
|
if (!print_all)
|
|
|
|
mark_verifier_state_clean(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 vlog_alignment(u32 pos)
|
|
|
|
{
|
|
|
|
return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
|
|
|
|
BPF_LOG_MIN_ALIGNMENT) - pos - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state)
|
|
|
|
{
|
|
|
|
if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
|
|
|
|
/* remove new line character */
|
|
|
|
bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
|
|
|
|
verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
|
|
|
|
} else {
|
|
|
|
verbose(env, "%d:", env->insn_idx);
|
|
|
|
}
|
|
|
|
print_verifier_state(env, state, false);
|
|
|
|
}
|