From ebc60fa3e9b0b878b9cfc1da253276e688f703c2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 2 Nov 2024 16:12:34 +0800 Subject: [PATCH 1/2] io_uring: always lock __io_cqring_overflow_flush mainline inclusion from mainline-v6.10-rc1 commit 8d09a88ef9d3cb7d21d45c39b7b7c31298d23998 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYRF9 CVE: CVE-2024-50060 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8d09a88ef9d3cb7d21d45c39b7b7c31298d23998 -------------------------------- Conditional locking is never great, in case of __io_cqring_overflow_flush(), which is a slow path, it's not justified. Don't handle IOPOLL separately, always grab uring_lock for overflow flushing. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/162947df299aa12693ac4b305dacedab32ec7976.1712708261.git.asml.silence@gmail.com Signed-off-by: Jens Axboe Conflicts: io_uring/io_uring.c [Context conflicts because there are no commits: 408024b95927 ("io_uring: open code io_cqring_overflow_flush()") a85381d8326d ("io_uring: skip overflow CQE posting for dying ring") 52ea806ad983 ("io_uring: finish waiting before flushing overflow entries") etc.] Signed-off-by: Baokun Li --- io_uring/io_uring.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 9b92de913772..94d429483567 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1663,6 +1663,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) { bool all_flushed, posted; + lockdep_assert_held(&ctx->uring_lock); + if (!force && __io_cqring_events(ctx) == ctx->cq_entries) return false; @@ -1706,12 +1708,9 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx) bool ret = true; if (test_bit(0, &ctx->check_cq_overflow)) { - /* iopoll syncs against uring_lock, not completion_lock */ - if (ctx->flags & IORING_SETUP_IOPOLL) - mutex_lock(&ctx->uring_lock); + mutex_lock(&ctx->uring_lock); ret = __io_cqring_overflow_flush(ctx, false); - if (ctx->flags & IORING_SETUP_IOPOLL) - mutex_unlock(&ctx->uring_lock); + mutex_unlock(&ctx->uring_lock); } return ret; -- Gitee From c6f95e843506aed13d26488417c0000e72e0da86 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 2 Nov 2024 16:12:35 +0800 Subject: [PATCH 2/2] io_uring: check if we need to reschedule during overflow flush stable inclusion from stable-v6.6.57 commit f4ce3b5d26ce149e77e6b8e8f2058aa80e5b034e category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYRF9 CVE: CVE-2024-50060 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=f4ce3b5d26ce149e77e6b8e8f2058aa80e5b034e -------------------------------- [ Upstream commit eac2ca2d682f94f46b1973bdf5e77d85d77b8e53 ] In terms of normal application usage, this list will always be empty. And if an application does overflow a bit, it'll have a few entries. However, nothing obviously prevents syzbot from running a test case that generates a ton of overflow entries, and then flushing them can take quite a while. Check for needing to reschedule while flushing, and drop our locks and do so if necessary. There's no state to maintain here as overflows always prune from head-of-list, hence it's fine to drop and reacquire the locks at the end of the loop. Link: https://lore.kernel.org/io-uring/66ed061d.050a0220.29194.0053.GAE@google.com/ Reported-by: syzbot+5fca234bd7eb378ff78e@syzkaller.appspotmail.com Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Conflicts: io_uring/io_uring.c [Context conflicts because there are no commits: 253993210bd8 ("io_uring: introduce locking helpers for CQE posting") 1b346e4aa8e7 ("io_uring: don't check overflow flush failures") etc.] Signed-off-by: Baokun Li --- io_uring/io_uring.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 94d429483567..bfaa7fd63a8a 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1686,6 +1686,23 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) posted = true; list_del(&ocqe->list); kfree(ocqe); + + /* + * For silly syzbot cases that deliberately overflow by huge + * amounts, check if we need to resched and drop and + * reacquire the locks if so. Nothing real would ever hit this. + * Ideally we'd have a non-posting unlock for this, but hard + * to care for a non-real case. + */ + if (need_resched()) { + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + io_cqring_ev_posted(ctx); + mutex_unlock(&ctx->uring_lock); + cond_resched(); + mutex_lock(&ctx->uring_lock); + spin_lock(&ctx->completion_lock); + } } all_flushed = list_empty(&ctx->cq_overflow_list); -- Gitee