On 2019/10/14 16:08, yangerkun wrote:
The sequence for timeout req may overflow, and it will lead to wrong
order of timeout req list. And we should consider two situation:

1. ctx->cached_sq_head + count - 1 may overflow;
2. cached_sq_head of now may overflow compare with before
cached_sq_head.

Fix the wrong logic by add record of count and use type long long to
record the overflow.

Signed-off-by: yangerkun <[email protected]>
---
  fs/io_uring.c | 31 +++++++++++++++++++++++++------
  1 file changed, 25 insertions(+), 6 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 76fdbe84aff5..9cc96f68b370 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -288,6 +288,7 @@ struct io_poll_iocb {
  struct io_timeout {
        struct file                     *file;
        struct hrtimer                  timer;
+       unsigned                        count;
  };
/*
@@ -1884,7 +1885,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer 
*timer)
static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  {
-       unsigned count, req_dist, tail_index;
+       unsigned count;
        struct io_ring_ctx *ctx = req->ctx;
        struct list_head *entry;
        struct timespec64 ts;
@@ -1907,21 +1908,39 @@ static int io_timeout(struct io_kiocb *req, const 
struct io_uring_sqe *sqe)
                count = 1;
req->sequence = ctx->cached_sq_head + count - 1;
+       req->timeout.count = count;
        req->flags |= REQ_F_TIMEOUT;
/*
         * Insertion sort, ensuring the first entry in the list is always
         * the one we need first.
         */
-       tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
-       req_dist = req->sequence - tail_index;
        spin_lock_irq(&ctx->completion_lock);
        list_for_each_prev(entry, &ctx->timeout_list) {
                struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
-               unsigned dist;
+               unsigned nxt_sq_head;
+               long long tmp, tmp_nxt;
- dist = nxt->sequence - tail_index;
-               if (req_dist >= dist)
+               /* count bigger than before should break directly. */
+               if (count >= nxt->timeout.count)
+                       break;
+
+               /*
+                * Since cached_sq_head + count - 1 can overflow, use type long
+                * long to store it.
+                */
+               tmp = (long long)ctx->cached_sq_head + count - 1;
+               nxt_sq_head = nxt->sequence - nxt->timeout.count + 1;
+               tmp_nxt = (long long)nxt_sq_head + nxt->timeout.count - 1;
+
+               /*
+                * cached_sq_head may overflow, and it will never overflow twice
+                * once there is some timeout req still be valid.
+                */
+               if (ctx->cached_sq_head < nxt_sq_head)
+                       tmp_nxt += UINT_MAX;

Should be "tmp += UINT_MAX;"
So sorry for this, will resend the patch.


+
+               if (tmp >= tmp_nxt)
                        break;
        }
        list_add(&req->list, entry);


Reply via email to