mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
tracing/user_events: Document user_event_mm one-shot list usage
During 6.4 development it became clear that the one-shot list used by the user_event_mm's next field was confusing to others. It is not clear how this list is protected or what the next field usage is for unless you are familiar with the code. Add comments into the user_event_mm struct indicating lock requirement and usage. Also document how and why this approach was used via comments in both user_event_enabler_update() and user_event_mm_get_all() and the rules to properly use it. Link: https://lkml.kernel.org/r/20230519230741.669-5-beaub@linux.microsoft.com Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wicngggxVpbnrYHjRTwGE0WYscPRM+L2HO2BF8ia1EXgQ@mail.gmail.com/ Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Beau Belgrave <beaub@linux.microsoft.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
dcbd1ac266
commit
ff9e1632d6
@ -20,6 +20,7 @@ struct user_event_mm {
|
||||
struct list_head mms_link;
|
||||
struct list_head enablers;
|
||||
struct mm_struct *mm;
|
||||
/* Used for one-shot lists, protected by event_mutex */
|
||||
struct user_event_mm *next;
|
||||
refcount_t refcnt;
|
||||
refcount_t tasks;
|
||||
|
@ -451,12 +451,25 @@ static bool user_event_enabler_exists(struct user_event_mm *mm,
|
||||
static void user_event_enabler_update(struct user_event *user)
|
||||
{
|
||||
struct user_event_enabler *enabler;
|
||||
struct user_event_mm *mm = user_event_mm_get_all(user);
|
||||
struct user_event_mm *next;
|
||||
struct user_event_mm *mm;
|
||||
int attempt;
|
||||
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
/*
|
||||
* We need to build a one-shot list of all the mms that have an
|
||||
* enabler for the user_event passed in. This list is only valid
|
||||
* while holding the event_mutex. The only reason for this is due
|
||||
* to the global mm list being RCU protected and we use methods
|
||||
* which can wait (mmap_read_lock and pin_user_pages_remote).
|
||||
*
|
||||
* NOTE: user_event_mm_get_all() increments the ref count of each
|
||||
* mm that is added to the list to prevent removal timing windows.
|
||||
* We must always put each mm after they are used, which may wait.
|
||||
*/
|
||||
mm = user_event_mm_get_all(user);
|
||||
|
||||
while (mm) {
|
||||
next = mm->next;
|
||||
mmap_read_lock(mm->mm);
|
||||
@ -515,6 +528,14 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
|
||||
struct user_event_enabler *enabler;
|
||||
struct user_event_mm *mm;
|
||||
|
||||
/*
|
||||
* We use the mm->next field to build a one-shot list from the global
|
||||
* RCU protected list. To build this list the event_mutex must be held.
|
||||
* This lets us build a list without requiring allocs that could fail
|
||||
* when user based events are most wanted for diagnostics.
|
||||
*/
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
/*
|
||||
* We do not want to block fork/exec while enablements are being
|
||||
* updated, so we use RCU to walk the current tasks that have used
|
||||
|
Loading…
Reference in New Issue
Block a user