diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 9b92de913772cc6f1655e8d6fda9827897fd5f57..bfaa7fd63a8a2939f4d1b7c66e5bb8b10179c7fb 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1663,6 +1663,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) { bool all_flushed, posted; + lockdep_assert_held(&ctx->uring_lock); + if (!force && __io_cqring_events(ctx) == ctx->cq_entries) return false; @@ -1684,6 +1686,23 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) posted = true; list_del(&ocqe->list); kfree(ocqe); + + /* + * For silly syzbot cases that deliberately overflow by huge + * amounts, check if we need to resched and drop and + * reacquire the locks if so. Nothing real would ever hit this. + * Ideally we'd have a non-posting unlock for this, but hard + * to care for a non-real case. + */ + if (need_resched()) { + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + io_cqring_ev_posted(ctx); + mutex_unlock(&ctx->uring_lock); + cond_resched(); + mutex_lock(&ctx->uring_lock); + spin_lock(&ctx->completion_lock); + } } all_flushed = list_empty(&ctx->cq_overflow_list); @@ -1706,12 +1725,9 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx) bool ret = true; if (test_bit(0, &ctx->check_cq_overflow)) { - /* iopoll syncs against uring_lock, not completion_lock */ - if (ctx->flags & IORING_SETUP_IOPOLL) - mutex_lock(&ctx->uring_lock); + mutex_lock(&ctx->uring_lock); ret = __io_cqring_overflow_flush(ctx, false); - if (ctx->flags & IORING_SETUP_IOPOLL) - mutex_unlock(&ctx->uring_lock); + mutex_unlock(&ctx->uring_lock); } return ret;