Merge tag 'io_uring-6.2-2023-01-27' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:
 "Two small fixes for this release:

   - Sanitize how async prep is done for drain requests, so we ensure
     that it always gets done (Dylan)

   - A ring provided buffer recycling fix for multishot receive (me)"

* tag 'io_uring-6.2-2023-01-27' of git://git.kernel.dk/linux:
  io_uring: always prep_async for drain requests
  io_uring/net: cache provided buffer group value for multishot receives
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 0a4efad..db623b3 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1765,17 +1765,12 @@
 	}
 	spin_unlock(&ctx->completion_lock);
 
-	ret = io_req_prep_async(req);
-	if (ret) {
-fail:
-		io_req_defer_failed(req, ret);
-		return;
-	}
 	io_prep_async_link(req);
 	de = kmalloc(sizeof(*de), GFP_KERNEL);
 	if (!de) {
 		ret = -ENOMEM;
-		goto fail;
+		io_req_defer_failed(req, ret);
+		return;
 	}
 
 	spin_lock(&ctx->completion_lock);
@@ -2048,13 +2043,16 @@
 		req->flags &= ~REQ_F_HARDLINK;
 		req->flags |= REQ_F_LINK;
 		io_req_defer_failed(req, req->cqe.res);
-	} else if (unlikely(req->ctx->drain_active)) {
-		io_drain_req(req);
 	} else {
 		int ret = io_req_prep_async(req);
 
-		if (unlikely(ret))
+		if (unlikely(ret)) {
 			io_req_defer_failed(req, ret);
+			return;
+		}
+
+		if (unlikely(req->ctx->drain_active))
+			io_drain_req(req);
 		else
 			io_queue_iowq(req, NULL);
 	}
diff --git a/io_uring/net.c b/io_uring/net.c
index fbc34a7..90326b2 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -62,6 +62,7 @@
 	u16				flags;
 	/* initialised and used only by !msg send variants */
 	u16				addr_len;
+	u16				buf_group;
 	void __user			*addr;
 	/* used only for send zerocopy */
 	struct io_kiocb 		*notif;
@@ -580,6 +581,15 @@
 		if (req->opcode == IORING_OP_RECV && sr->len)
 			return -EINVAL;
 		req->flags |= REQ_F_APOLL_MULTISHOT;
+		/*
+		 * Store the buffer group for this multishot receive separately,
+		 * as if we end up doing an io-wq based issue that selects a
+		 * buffer, it has to be committed immediately and that will
+		 * clear ->buf_list. This means we lose the link to the buffer
+		 * list, and the eventual buffer put on completion then cannot
+		 * restore it.
+		 */
+		sr->buf_group = req->buf_index;
 	}
 
 #ifdef CONFIG_COMPAT
@@ -596,6 +606,7 @@
 
 	sr->done_io = 0;
 	sr->len = 0; /* get from the provided buffer */
+	req->buf_index = sr->buf_group;
 }
 
 /*