From: Heiher <r...@hev.cc> Take the case where we have:
t0 | (ew) e0 | (et) e1 | (lt) s0 t0: thread 0 e0: epoll fd 0 e1: epoll fd 1 s0: socket fd 0 ew: epoll_wait et: edge-trigger lt: level-trigger We only need to wakeup nested epoll fds if something has been queued to the overflow list, since the ep_poll() traverses the rdllist during recursive poll and thus events on the overflow list may not be visible yet. Test code: #include <unistd.h> #include <sys/epoll.h> #include <sys/socket.h> int main(int argc, char *argv[]) { int sfd[2]; int efd[2]; struct epoll_event e; if (socketpair(AF_UNIX, SOCK_STREAM, 0, sfd) < 0) goto out; efd[0] = epoll_create(1); if (efd[0] < 0) goto out; efd[1] = epoll_create(1); if (efd[1] < 0) goto out; e.events = EPOLLIN; if (epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e) < 0) goto out; e.events = EPOLLIN | EPOLLET; if (epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e) < 0) goto out; if (write(sfd[1], "w", 1) != 1) goto out; if (epoll_wait(efd[0], &e, 1, 0) != 1) goto out; if (epoll_wait(efd[0], &e, 1, 0) != 0) goto out; close(efd[0]); close(efd[1]); close(sfd[0]); close(sfd[1]); return 0; out: return -1; } More tests: https://github.com/heiher/epoll-wakeup Cc: Al Viro <v...@zeniv.linux.org.uk> Cc: Andrew Morton <a...@linux-foundation.org> Cc: Davide Libenzi <davi...@xmailserver.org> Cc: Davidlohr Bueso <d...@stgolabs.net> Cc: Dominik Brodowski <li...@dominikbrodowski.net> Cc: Eric Wong <e...@80x24.org> Cc: Jason Baron <jba...@akamai.com> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Roman Penyaev <rpeny...@suse.de> Cc: Sridhar Samudrala <sridhar.samudr...@intel.com> Cc: linux-kernel@vger.kernel.org Cc: linux-fsde...@vger.kernel.org Signed-off-by: hev <r...@hev.cc> --- fs/eventpoll.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index c4159bcc05d9..a0c07f6653c6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -704,12 +704,21 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, res = (*sproc)(ep, &txlist, priv); write_lock_irq(&ep->lock); + nepi = READ_ONCE(ep->ovflist); + /* + * We only need to wakeup nested epoll fds if something has been queued + * to the overflow list, since the ep_poll() traverses the rdllist + * during recursive poll and thus events on the overflow list may not be + * visible yet. + */ + if (nepi != NULL) + pwake++; /* * During the time we spent inside the "sproc" callback, some * other events might have been queued by the poll callback. * We re-insert them inside the main ready-list here. */ - for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL; + for (; (epi = nepi) != NULL; nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { /* * We need to check if the item is already in the list. @@ -755,7 +764,7 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, mutex_unlock(&ep->mtx); /* We have to call this outside the lock */ - if (pwake) + if (pwake == 2) ep_poll_safewake(&ep->poll_wait); return res; -- 2.23.0