From f1ecf39dce35cd85034af8cc08af76efb6690677 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Mon, 6 Dec 2021 17:11:09 +0800 Subject: [PATCH 1/2] anolis: io_uring: ensure using TWA_SIGNAL for task_work uncondtionally ANBZ: #103 While backporting io_uring async buffered reads feature, we should have replaced most task_work_add() calls with io_req_task_work_add(), but seems that we failed to do it, which causes task works maybe not executed in time. Use TWA_SIGNAL to fix this bug. Reviewed-by: Joseph Qi Reviewed-by: Hao Xu Signed-off-by: Xiaoguang Wang --- fs/io_uring.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 37fe5d0b2cc4..55ebbe3f863f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1584,12 +1584,12 @@ static void __io_free_req(struct io_kiocb *req) int ret; init_task_work(&req->task_work, io_req_task_file_table_put); - ret = task_work_add(req->task, &req->task_work, TWA_RESUME); + ret = task_work_add(req->task, &req->task_work, TWA_SIGNAL); if (unlikely(ret)) { struct task_struct *tsk; tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, 0); + task_work_add(tsk, &req->task_work, TWA_NONE); } } } @@ -2260,7 +2260,7 @@ static bool io_rw_reissue(struct io_kiocb *req, long res) init_task_work(&req->task_work, io_rw_resubmit); percpu_ref_get(&req->ctx->refs); - ret = task_work_add(tsk, &req->task_work, TWA_RESUME); + ret = task_work_add(tsk, &req->task_work, TWA_SIGNAL); if (!ret) return true; #endif @@ -2986,12 +2986,12 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, /* submit ref gets dropped, acquire a new one */ refcount_inc(&req->refs); tsk = req->task; - ret = task_work_add(tsk, &rw->task_work, TWA_RESUME); + ret = task_work_add(tsk, &rw->task_work, TWA_SIGNAL); if (unlikely(ret)) { /* queue just for cancelation */ init_task_work(&rw->task_work, io_async_buf_cancel); tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &rw->task_work, TWA_RESUME); + task_work_add(tsk, &rw->task_work, TWA_SIGNAL); } wake_up_process(tsk); return 1; -- Gitee From e4910bc79ff253ecf742ead62f449e38a5d56887 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 5 Aug 2020 12:58:23 -0600 Subject: [PATCH 2/2] io_uring: set ctx sq/cq entry count earlier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #112 commit bd74048108c179cea0ff52979506164c80f29da7 upstream If we hit an earlier error path in io_uring_create(), then we will have accounted memory, but not set ctx->{sq,cq}_entries yet. Then when the ring is torn down in error, we use those values to unaccount the memory. Ensure we set the ctx entries before we're able to hit a potential error path. Cc: stable@vger.kernel.org Reported-by: Tomáš Chaloupka Tested-by: Tomáš Chaloupka Reviewed-by: Stefano Garzarella Signed-off-by: Jens Axboe Reviewed-by: Joseph Qi Reviewed-by: Hao Xu Signed-off-by: Xiaoguang Wang --- fs/io_uring.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 55ebbe3f863f..38a719b24b3e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8887,6 +8887,10 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, struct io_rings *rings; size_t size, sq_array_offset; + /* make sure these are sane, as we already accounted them */ + ctx->sq_entries = p->sq_entries; + ctx->cq_entries = p->cq_entries; + size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset); if (size == SIZE_MAX) return -EOVERFLOW; @@ -8903,8 +8907,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, rings->cq_ring_entries = p->cq_entries; ctx->sq_mask = rings->sq_ring_mask; ctx->cq_mask = rings->cq_ring_mask; - ctx->sq_entries = rings->sq_ring_entries; - ctx->cq_entries = rings->cq_ring_entries; size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); if (size == SIZE_MAX) { -- Gitee