2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 18:53:52 +08:00

ep_insert(): don't open-code ep_remove() on failure exits

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2020-09-26 18:09:29 -04:00
parent 57804b1cc4
commit e3e096e7fc

View File

@ -1384,12 +1384,16 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
epi->next = EP_UNACTIVE_PTR; epi->next = EP_UNACTIVE_PTR;
if (epi->event.events & EPOLLWAKEUP) { if (epi->event.events & EPOLLWAKEUP) {
error = ep_create_wakeup_source(epi); error = ep_create_wakeup_source(epi);
if (error) if (error) {
goto error_create_wakeup_source; kmem_cache_free(epi_cache, epi);
return error;
}
} else { } else {
RCU_INIT_POINTER(epi->ws, NULL); RCU_INIT_POINTER(epi->ws, NULL);
} }
atomic_long_inc(&ep->user->epoll_watches);
/* Add the current item to the list of active epoll hook for this file */ /* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock); spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
@ -1402,9 +1406,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
ep_rbtree_insert(ep, epi); ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */ /* now check if we've created too many backpaths */
error = -EINVAL; if (unlikely(full_check && reverse_path_check())) {
if (full_check && reverse_path_check()) ep_remove(ep, epi);
goto error_remove_epi; return -EINVAL;
}
/* Initialize the poll table using the queue callback */ /* Initialize the poll table using the queue callback */
epq.epi = epi; epq.epi = epi;
@ -1424,9 +1429,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
* install process. Namely an allocation for a wait queue failed due * install process. Namely an allocation for a wait queue failed due
* high memory pressure. * high memory pressure.
*/ */
error = -ENOMEM; if (unlikely(!epq.epi)) {
if (!epq.epi) ep_remove(ep, epi);
goto error_unregister; return -ENOMEM;
}
/* We have to drop the new item inside our item list to keep track of it */ /* We have to drop the new item inside our item list to keep track of it */
write_lock_irq(&ep->lock); write_lock_irq(&ep->lock);
@ -1448,40 +1454,11 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
write_unlock_irq(&ep->lock); write_unlock_irq(&ep->lock);
atomic_long_inc(&ep->user->epoll_watches);
/* We have to call this outside the lock */ /* We have to call this outside the lock */
if (pwake) if (pwake)
ep_poll_safewake(ep, NULL); ep_poll_safewake(ep, NULL);
return 0; return 0;
error_unregister:
ep_unregister_pollwait(ep, epi);
error_remove_epi:
spin_lock(&tfile->f_lock);
list_del_rcu(&epi->fllink);
spin_unlock(&tfile->f_lock);
rb_erase_cached(&epi->rbn, &ep->rbr);
/*
* We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist
* list, since that is used/cleaned only inside a section bound by "mtx".
* And ep_insert() is called with "mtx" held.
*/
write_lock_irq(&ep->lock);
if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
write_unlock_irq(&ep->lock);
wakeup_source_unregister(ep_wakeup_source(epi));
error_create_wakeup_source:
kmem_cache_free(epi_cache, epi);
return error;
} }
/* /*