mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
tracing/user_events: Track refcount consistently via put/get
Various parts of the code today track user_event's refcnt field directly via a refcount_add/dec. This makes it hard to modify the behavior of the last reference decrement in all code paths consistently. For example, in the future we will auto-delete events upon the last reference going away. This last reference could happen in many places, but we want it to be consistently handled. Add user_event_get() and user_event_put() for the add/dec. Update all places where direct refcounts are being used to utilize these new functions. In each location pass if event_mutex is locked or not. This allows us to drop events automatically in future patches clearly. Ensure when caller states the lock is held, it really is (or is not) held. Link: https://lkml.kernel.org/r/20230614163336.5797-3-beaub@linux.microsoft.com Signed-off-by: Beau Belgrave <beaub@linux.microsoft.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
b08d725805
commit
f0dbf6fd0b
@ -177,6 +177,28 @@ static u32 user_event_key(char *name)
|
||||
return jhash(name, strlen(name), 0);
|
||||
}
|
||||
|
||||
static struct user_event *user_event_get(struct user_event *user)
|
||||
{
|
||||
refcount_inc(&user->refcnt);
|
||||
|
||||
return user;
|
||||
}
|
||||
|
||||
static void user_event_put(struct user_event *user, bool locked)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
if (locked)
|
||||
lockdep_assert_held(&event_mutex);
|
||||
else
|
||||
lockdep_assert_not_held(&event_mutex);
|
||||
#endif
|
||||
|
||||
if (unlikely(!user))
|
||||
return;
|
||||
|
||||
refcount_dec(&user->refcnt);
|
||||
}
|
||||
|
||||
static void user_event_group_destroy(struct user_event_group *group)
|
||||
{
|
||||
kfree(group->system_name);
|
||||
@ -228,12 +250,13 @@ error:
|
||||
return NULL;
|
||||
};
|
||||
|
||||
static void user_event_enabler_destroy(struct user_event_enabler *enabler)
|
||||
static void user_event_enabler_destroy(struct user_event_enabler *enabler,
|
||||
bool locked)
|
||||
{
|
||||
list_del_rcu(&enabler->mm_enablers_link);
|
||||
|
||||
/* No longer tracking the event via the enabler */
|
||||
refcount_dec(&enabler->event->refcnt);
|
||||
user_event_put(enabler->event, locked);
|
||||
|
||||
kfree(enabler);
|
||||
}
|
||||
@ -295,7 +318,7 @@ static void user_event_enabler_fault_fixup(struct work_struct *work)
|
||||
|
||||
/* User asked for enabler to be removed during fault */
|
||||
if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
|
||||
user_event_enabler_destroy(enabler);
|
||||
user_event_enabler_destroy(enabler, true);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -470,14 +493,12 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig,
|
||||
if (!enabler)
|
||||
return false;
|
||||
|
||||
enabler->event = orig->event;
|
||||
enabler->event = user_event_get(orig->event);
|
||||
enabler->addr = orig->addr;
|
||||
|
||||
/* Only dup part of value (ignore future flags, etc) */
|
||||
enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
|
||||
|
||||
refcount_inc(&enabler->event->refcnt);
|
||||
|
||||
/* Enablers not exposed yet, RCU not required */
|
||||
list_add(&enabler->mm_enablers_link, &mm->enablers);
|
||||
|
||||
@ -594,7 +615,7 @@ static void user_event_mm_destroy(struct user_event_mm *mm)
|
||||
struct user_event_enabler *enabler, *next;
|
||||
|
||||
list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
|
||||
user_event_enabler_destroy(enabler);
|
||||
user_event_enabler_destroy(enabler, false);
|
||||
|
||||
mmdrop(mm->mm);
|
||||
kfree(mm);
|
||||
@ -749,7 +770,7 @@ retry:
|
||||
* exit or run exec(), which includes forks and clones.
|
||||
*/
|
||||
if (!*write_result) {
|
||||
refcount_inc(&enabler->event->refcnt);
|
||||
user_event_get(user);
|
||||
list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
|
||||
}
|
||||
|
||||
@ -1337,10 +1358,8 @@ static struct user_event *find_user_event(struct user_event_group *group,
|
||||
*outkey = key;
|
||||
|
||||
hash_for_each_possible(group->register_table, user, node, key)
|
||||
if (!strcmp(EVENT_NAME(user), name)) {
|
||||
refcount_inc(&user->refcnt);
|
||||
return user;
|
||||
}
|
||||
if (!strcmp(EVENT_NAME(user), name))
|
||||
return user_event_get(user);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -1554,12 +1573,12 @@ static int user_event_reg(struct trace_event_call *call,
|
||||
|
||||
return ret;
|
||||
inc:
|
||||
refcount_inc(&user->refcnt);
|
||||
user_event_get(user);
|
||||
update_enable_bit_for(user);
|
||||
return 0;
|
||||
dec:
|
||||
update_enable_bit_for(user);
|
||||
refcount_dec(&user->refcnt);
|
||||
user_event_put(user, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1593,7 +1612,7 @@ static int user_event_create(const char *raw_command)
|
||||
ret = user_event_parse_cmd(group, name, &user, 0);
|
||||
|
||||
if (!ret)
|
||||
refcount_dec(&user->refcnt);
|
||||
user_event_put(user, false);
|
||||
|
||||
mutex_unlock(&group->reg_mutex);
|
||||
|
||||
@ -1794,7 +1813,7 @@ static int user_event_parse(struct user_event_group *group, char *name,
|
||||
|
||||
return 0;
|
||||
error:
|
||||
refcount_dec(&user->refcnt);
|
||||
user_event_put(user, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1883,7 +1902,7 @@ static int delete_user_event(struct user_event_group *group, char *name)
|
||||
if (!user)
|
||||
return -ENOENT;
|
||||
|
||||
refcount_dec(&user->refcnt);
|
||||
user_event_put(user, true);
|
||||
|
||||
if (!user_event_last_ref(user))
|
||||
return -EBUSY;
|
||||
@ -2042,9 +2061,7 @@ static int user_events_ref_add(struct user_event_file_info *info,
|
||||
for (i = 0; i < count; ++i)
|
||||
new_refs->events[i] = refs->events[i];
|
||||
|
||||
new_refs->events[i] = user;
|
||||
|
||||
refcount_inc(&user->refcnt);
|
||||
new_refs->events[i] = user_event_get(user);
|
||||
|
||||
rcu_assign_pointer(info->refs, new_refs);
|
||||
|
||||
@ -2158,7 +2175,7 @@ static long user_events_ioctl_reg(struct user_event_file_info *info,
|
||||
ret = user_events_ref_add(info, user);
|
||||
|
||||
/* No longer need parse ref, ref_add either worked or not */
|
||||
refcount_dec(&user->refcnt);
|
||||
user_event_put(user, false);
|
||||
|
||||
/* Positive number is index and valid */
|
||||
if (ret < 0)
|
||||
@ -2307,7 +2324,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
|
||||
set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
|
||||
|
||||
if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
|
||||
user_event_enabler_destroy(enabler);
|
||||
user_event_enabler_destroy(enabler, true);
|
||||
|
||||
/* Removed at least one */
|
||||
ret = 0;
|
||||
@ -2365,7 +2382,6 @@ static int user_events_release(struct inode *node, struct file *file)
|
||||
struct user_event_file_info *info = file->private_data;
|
||||
struct user_event_group *group;
|
||||
struct user_event_refs *refs;
|
||||
struct user_event *user;
|
||||
int i;
|
||||
|
||||
if (!info)
|
||||
@ -2389,12 +2405,9 @@ static int user_events_release(struct inode *node, struct file *file)
|
||||
* The underlying user_events are ref counted, and cannot be freed.
|
||||
* After this decrement, the user_events may be freed elsewhere.
|
||||
*/
|
||||
for (i = 0; i < refs->count; ++i) {
|
||||
user = refs->events[i];
|
||||
for (i = 0; i < refs->count; ++i)
|
||||
user_event_put(refs->events[i], false);
|
||||
|
||||
if (user)
|
||||
refcount_dec(&user->refcnt);
|
||||
}
|
||||
out:
|
||||
file->private_data = NULL;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user