linux/kernel/trace/trace_probe_tmpl.h
Andreas Ziegler f6675872db tracing: probeevent: Correctly update remaining space in dynamic area
Commit 9178412ddf ("tracing: probeevent: Return consumed
bytes of dynamic area") improved the string fetching
mechanism by returning the number of required bytes after
copying the argument to the dynamic area. However, this
return value is now only used to increment the pointer
inside the dynamic area but misses updating the 'maxlen'
variable which indicates the remaining space in the dynamic
area.

This means that fetch_store_string() always reads the *total*
size of the dynamic area from the data_loc pointer instead of
the *remaining* size (and passes it along to
strncpy_from_{user,unsafe}) even if we're already about to
copy data into the middle of the dynamic area.

Link: http://lkml.kernel.org/r/20190206190013.16405-1-andreas.ziegler@fau.de

Cc: Ingo Molnar <mingo@redhat.com>
Cc: stable@vger.kernel.org
Fixes: 9178412ddf ("tracing: probeevent: Return consumed bytes of dynamic area")
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Andreas Ziegler <andreas.ziegler@fau.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-11 15:58:30 -05:00

219 lines
4.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Traceprobe fetch helper inlines
*/
static nokprobe_inline void
fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf)
{
switch (code->size) {
case 1:
*(u8 *)buf = (u8)val;
break;
case 2:
*(u16 *)buf = (u16)val;
break;
case 4:
*(u32 *)buf = (u32)val;
break;
case 8:
//TBD: 32bit signed
*(u64 *)buf = (u64)val;
break;
default:
*(unsigned long *)buf = val;
}
}
static nokprobe_inline void
fetch_apply_bitfield(struct fetch_insn *code, void *buf)
{
switch (code->basesize) {
case 1:
*(u8 *)buf <<= code->lshift;
*(u8 *)buf >>= code->rshift;
break;
case 2:
*(u16 *)buf <<= code->lshift;
*(u16 *)buf >>= code->rshift;
break;
case 4:
*(u32 *)buf <<= code->lshift;
*(u32 *)buf >>= code->rshift;
break;
case 8:
*(u64 *)buf <<= code->lshift;
*(u64 *)buf >>= code->rshift;
break;
}
}
/*
* These functions must be defined for each callsite.
* Return consumed dynamic data size (>= 0), or error (< 0).
* If dest is NULL, don't store result and return required dynamic data size.
*/
static int
process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs,
void *dest, void *base);
static nokprobe_inline int fetch_store_strlen(unsigned long addr);
static nokprobe_inline int
fetch_store_string(unsigned long addr, void *dest, void *base);
static nokprobe_inline int
probe_mem_read(void *dest, void *src, size_t size);
/* From the 2nd stage, routine is same */
static nokprobe_inline int
process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
void *dest, void *base)
{
struct fetch_insn *s3 = NULL;
int total = 0, ret = 0, i = 0;
u32 loc = 0;
unsigned long lval = val;
stage2:
/* 2nd stage: dereference memory if needed */
while (code->op == FETCH_OP_DEREF) {
lval = val;
ret = probe_mem_read(&val, (void *)val + code->offset,
sizeof(val));
if (ret)
return ret;
code++;
}
s3 = code;
stage3:
/* 3rd stage: store value to buffer */
if (unlikely(!dest)) {
if (code->op == FETCH_OP_ST_STRING) {
ret += fetch_store_strlen(val + code->offset);
code++;
goto array;
} else
return -EILSEQ;
}
switch (code->op) {
case FETCH_OP_ST_RAW:
fetch_store_raw(val, code, dest);
break;
case FETCH_OP_ST_MEM:
probe_mem_read(dest, (void *)val + code->offset, code->size);
break;
case FETCH_OP_ST_STRING:
loc = *(u32 *)dest;
ret = fetch_store_string(val + code->offset, dest, base);
break;
default:
return -EILSEQ;
}
code++;
/* 4th stage: modify stored value if needed */
if (code->op == FETCH_OP_MOD_BF) {
fetch_apply_bitfield(code, dest);
code++;
}
array:
/* the last stage: Loop on array */
if (code->op == FETCH_OP_LP_ARRAY) {
total += ret;
if (++i < code->param) {
code = s3;
if (s3->op != FETCH_OP_ST_STRING) {
dest += s3->size;
val += s3->size;
goto stage3;
}
code--;
val = lval + sizeof(char *);
if (dest) {
dest += sizeof(u32);
*(u32 *)dest = update_data_loc(loc, ret);
}
goto stage2;
}
code++;
ret = total;
}
return code->op == FETCH_OP_END ? ret : -EILSEQ;
}
/* Sum up total data length for dynamic arraies (strings) */
static nokprobe_inline int
__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
{
struct probe_arg *arg;
int i, len, ret = 0;
for (i = 0; i < tp->nr_args; i++) {
arg = tp->args + i;
if (unlikely(arg->dynamic)) {
len = process_fetch_insn(arg->code, regs, NULL, NULL);
if (len > 0)
ret += len;
}
}
return ret;
}
/* Store the value of each argument */
static nokprobe_inline void
store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
int header_size, int maxlen)
{
struct probe_arg *arg;
void *base = data - header_size;
void *dyndata = data + tp->size;
u32 *dl; /* Data location */
int ret, i;
for (i = 0; i < tp->nr_args; i++) {
arg = tp->args + i;
dl = data + arg->offset;
/* Point the dynamic data area if needed */
if (unlikely(arg->dynamic))
*dl = make_data_loc(maxlen, dyndata - base);
ret = process_fetch_insn(arg->code, regs, dl, base);
if (unlikely(ret < 0 && arg->dynamic)) {
*dl = make_data_loc(0, dyndata - base);
} else {
dyndata += ret;
maxlen -= ret;
}
}
}
static inline int
print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
u8 *data, void *field)
{
void *p;
int i, j;
for (i = 0; i < nr_args; i++) {
struct probe_arg *a = args + i;
trace_seq_printf(s, " %s=", a->name);
if (likely(!a->count)) {
if (!a->type->print(s, data + a->offset, field))
return -ENOMEM;
continue;
}
trace_seq_putc(s, '{');
p = data + a->offset;
for (j = 0; j < a->count; j++) {
if (!a->type->print(s, p, field))
return -ENOMEM;
trace_seq_putc(s, j == a->count - 1 ? '}' : ',');
p += a->type->size;
}
}
return 0;
}