Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/fs/nfs/pagelist.c |
| 4 | * |
| 5 | * A set of helper functions for managing NFS read and write requests. |
| 6 | * The main purpose of these routines is to provide support for the |
| 7 | * coalescing of several requests into a single RPC call. |
| 8 | * |
| 9 | * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> |
| 10 | * |
| 11 | */ |
| 12 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/slab.h> |
| 14 | #include <linux/file.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 15 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/sunrpc/clnt.h> |
Trond Myklebust | 1313e60 | 2012-01-17 22:04:24 -0500 | [diff] [blame] | 17 | #include <linux/nfs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/nfs3.h> |
| 19 | #include <linux/nfs4.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/nfs_fs.h> |
Trond Myklebust | 33344e0 | 2019-04-07 13:59:08 -0400 | [diff] [blame] | 21 | #include <linux/nfs_page.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/nfs_mount.h> |
Paul Gortmaker | afeacc8 | 2011-05-26 16:00:52 -0400 | [diff] [blame] | 23 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Trond Myklebust | 8d5658c | 2007-04-10 09:26:35 -0400 | [diff] [blame] | 25 | #include "internal.h" |
Fred Isaman | bae724e | 2011-03-01 01:34:15 +0000 | [diff] [blame] | 26 | #include "pnfs.h" |
Chuck Lever | cd2ed9b | 2020-05-12 17:14:11 -0400 | [diff] [blame] | 27 | #include "nfstrace.h" |
Trond Myklebust | 8d5658c | 2007-04-10 09:26:35 -0400 | [diff] [blame] | 28 | |
Anna Schumaker | 0eecb21 | 2014-05-06 09:12:32 -0400 | [diff] [blame] | 29 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | static struct kmem_cache *nfs_page_cachep; |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 32 | static const struct rpc_call_ops nfs_pgio_common_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 34 | static struct nfs_pgio_mirror * |
| 35 | nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx) |
| 36 | { |
| 37 | if (desc->pg_ops->pg_get_mirror) |
| 38 | return desc->pg_ops->pg_get_mirror(desc, idx); |
| 39 | return &desc->pg_mirrors[0]; |
| 40 | } |
| 41 | |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 42 | struct nfs_pgio_mirror * |
| 43 | nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc) |
| 44 | { |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 45 | return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx); |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 46 | } |
| 47 | EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror); |
| 48 | |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 49 | static u32 |
| 50 | nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx) |
| 51 | { |
| 52 | if (desc->pg_ops->pg_set_mirror) |
| 53 | return desc->pg_ops->pg_set_mirror(desc, idx); |
| 54 | return desc->pg_mirror_idx; |
| 55 | } |
| 56 | |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 57 | void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, |
| 58 | struct nfs_pgio_header *hdr, |
| 59 | void (*release)(struct nfs_pgio_header *hdr)) |
| 60 | { |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 61 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 62 | |
| 63 | |
| 64 | hdr->req = nfs_list_entry(mirror->pg_list.next); |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 65 | hdr->inode = desc->pg_inode; |
Trond Myklebust | 9fcd596 | 2019-04-07 13:59:11 -0400 | [diff] [blame] | 66 | hdr->cred = nfs_req_openctx(hdr->req)->cred; |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 67 | hdr->io_start = req_offset(hdr->req); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 68 | hdr->good_bytes = mirror->pg_count; |
Trond Myklebust | 919e3bd | 2017-06-20 19:35:37 -0400 | [diff] [blame] | 69 | hdr->io_completion = desc->pg_io_completion; |
Fred Isaman | 584aa81 | 2012-04-20 14:47:51 -0400 | [diff] [blame] | 70 | hdr->dreq = desc->pg_dreq; |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 71 | hdr->release = release; |
Fred Isaman | 061ae2e | 2012-04-20 14:47:48 -0400 | [diff] [blame] | 72 | hdr->completion_ops = desc->pg_completion_ops; |
Fred Isaman | 584aa81 | 2012-04-20 14:47:51 -0400 | [diff] [blame] | 73 | if (hdr->completion_ops->init_hdr) |
| 74 | hdr->completion_ops->init_hdr(hdr); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 75 | |
| 76 | hdr->pgio_mirror_idx = desc->pg_mirror_idx; |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 77 | } |
Bryan Schumaker | 89d77c8 | 2012-07-30 16:05:25 -0400 | [diff] [blame] | 78 | EXPORT_SYMBOL_GPL(nfs_pgheader_init); |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 79 | |
| 80 | void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) |
| 81 | { |
Trond Myklebust | 1c6c4b7 | 2018-09-25 12:34:43 -0400 | [diff] [blame] | 82 | unsigned int new = pos - hdr->io_start; |
| 83 | |
Chuck Lever | cd2ed9b | 2020-05-12 17:14:11 -0400 | [diff] [blame] | 84 | trace_nfs_pgio_error(hdr, error, pos); |
Trond Myklebust | 1c6c4b7 | 2018-09-25 12:34:43 -0400 | [diff] [blame] | 85 | if (hdr->good_bytes > new) { |
| 86 | hdr->good_bytes = new; |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 87 | clear_bit(NFS_IOHDR_EOF, &hdr->flags); |
Trond Myklebust | 1c6c4b7 | 2018-09-25 12:34:43 -0400 | [diff] [blame] | 88 | if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)) |
| 89 | hdr->error = error; |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 90 | } |
Fred Isaman | 4db6e0b | 2012-04-20 14:47:46 -0400 | [diff] [blame] | 91 | } |
| 92 | |
Trond Myklebust | 0bae835 | 2022-03-21 13:48:36 -0400 | [diff] [blame] | 93 | static inline struct nfs_page *nfs_page_alloc(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | { |
Trond Myklebust | 0bae835 | 2022-03-21 13:48:36 -0400 | [diff] [blame] | 95 | struct nfs_page *p = |
| 96 | kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask()); |
Jesper Juhl | 72895b1 | 2010-12-09 23:17:15 +0100 | [diff] [blame] | 97 | if (p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | INIT_LIST_HEAD(&p->wb_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | return p; |
| 100 | } |
| 101 | |
| 102 | static inline void |
| 103 | nfs_page_free(struct nfs_page *p) |
| 104 | { |
| 105 | kmem_cache_free(nfs_page_cachep, p); |
| 106 | } |
| 107 | |
Trond Myklebust | 577b423 | 2013-04-08 21:38:12 -0400 | [diff] [blame] | 108 | /** |
| 109 | * nfs_iocounter_wait - wait for i/o to complete |
Benjamin Coddington | 210c7c1 | 2016-01-06 10:40:18 -0500 | [diff] [blame] | 110 | * @l_ctx: nfs_lock_context with io_counter to use |
Trond Myklebust | 577b423 | 2013-04-08 21:38:12 -0400 | [diff] [blame] | 111 | * |
| 112 | * returns -ERESTARTSYS if interrupted by a fatal signal. |
| 113 | * Otherwise returns 0 once the io_count hits 0. |
| 114 | */ |
| 115 | int |
Benjamin Coddington | 210c7c1 | 2016-01-06 10:40:18 -0500 | [diff] [blame] | 116 | nfs_iocounter_wait(struct nfs_lock_context *l_ctx) |
Trond Myklebust | 577b423 | 2013-04-08 21:38:12 -0400 | [diff] [blame] | 117 | { |
Peter Zijlstra | 723c921 | 2018-03-15 11:44:34 +0100 | [diff] [blame] | 118 | return wait_var_event_killable(&l_ctx->io_count, |
| 119 | !atomic_read(&l_ctx->io_count)); |
Trond Myklebust | 577b423 | 2013-04-08 21:38:12 -0400 | [diff] [blame] | 120 | } |
| 121 | |
Benjamin Coddington | 7d6ddf8 | 2017-04-11 12:50:10 -0400 | [diff] [blame] | 122 | /** |
| 123 | * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O |
| 124 | * to complete |
| 125 | * @task: the rpc_task that should wait |
| 126 | * @l_ctx: nfs_lock_context with io_counter to check |
| 127 | * |
| 128 | * Returns true if there is outstanding I/O to wait on and the |
| 129 | * task has been put to sleep. |
| 130 | */ |
| 131 | bool |
| 132 | nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) |
| 133 | { |
| 134 | struct inode *inode = d_inode(l_ctx->open_context->dentry); |
| 135 | bool ret = false; |
| 136 | |
| 137 | if (atomic_read(&l_ctx->io_count) > 0) { |
| 138 | rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL); |
| 139 | ret = true; |
| 140 | } |
| 141 | |
| 142 | if (atomic_read(&l_ctx->io_count) == 0) { |
| 143 | rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task); |
| 144 | ret = false; |
| 145 | } |
| 146 | |
| 147 | return ret; |
| 148 | } |
| 149 | EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); |
| 150 | |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 151 | /* |
Trond Myklebust | e00ed89 | 2020-03-30 12:40:47 -0400 | [diff] [blame] | 152 | * nfs_page_lock_head_request - page lock the head of the page group |
| 153 | * @req: any member of the page group |
| 154 | */ |
| 155 | struct nfs_page * |
| 156 | nfs_page_group_lock_head(struct nfs_page *req) |
| 157 | { |
| 158 | struct nfs_page *head = req->wb_head; |
| 159 | |
| 160 | while (!nfs_lock_request(head)) { |
| 161 | int ret = nfs_wait_on_request(head); |
| 162 | if (ret < 0) |
| 163 | return ERR_PTR(ret); |
| 164 | } |
| 165 | if (head != req) |
| 166 | kref_get(&head->wb_kref); |
| 167 | return head; |
| 168 | } |
| 169 | |
| 170 | /* |
Trond Myklebust | a62f8e3 | 2020-03-30 11:12:16 -0400 | [diff] [blame] | 171 | * nfs_unroll_locks - unlock all newly locked reqs and wait on @req |
| 172 | * @head: head request of page group, must be holding head lock |
| 173 | * @req: request that couldn't lock and needs to wait on the req bit lock |
| 174 | * |
| 175 | * This is a helper function for nfs_lock_and_join_requests |
| 176 | * returns 0 on success, < 0 on error. |
| 177 | */ |
| 178 | static void |
| 179 | nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req) |
| 180 | { |
| 181 | struct nfs_page *tmp; |
| 182 | |
| 183 | /* relinquish all the locks successfully grabbed this run */ |
| 184 | for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { |
| 185 | if (!kref_read(&tmp->wb_kref)) |
| 186 | continue; |
| 187 | nfs_unlock_and_release_request(tmp); |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * nfs_page_group_lock_subreq - try to lock a subrequest |
| 193 | * @head: head request of page group |
| 194 | * @subreq: request to lock |
| 195 | * |
| 196 | * This is a helper function for nfs_lock_and_join_requests which |
| 197 | * must be called with the head request and page group both locked. |
| 198 | * On error, it returns with the page group unlocked. |
| 199 | */ |
| 200 | static int |
| 201 | nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) |
| 202 | { |
| 203 | int ret; |
| 204 | |
| 205 | if (!kref_get_unless_zero(&subreq->wb_kref)) |
| 206 | return 0; |
| 207 | while (!nfs_lock_request(subreq)) { |
| 208 | nfs_page_group_unlock(head); |
| 209 | ret = nfs_wait_on_request(subreq); |
| 210 | if (!ret) |
| 211 | ret = nfs_page_group_lock(head); |
| 212 | if (ret < 0) { |
| 213 | nfs_unroll_locks(head, subreq); |
| 214 | nfs_release_request(subreq); |
| 215 | return ret; |
| 216 | } |
| 217 | } |
| 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | /* |
| 222 | * nfs_page_group_lock_subrequests - try to lock the subrequests |
| 223 | * @head: head request of page group |
| 224 | * |
| 225 | * This is a helper function for nfs_lock_and_join_requests which |
Trond Myklebust | e00ed89 | 2020-03-30 12:40:47 -0400 | [diff] [blame] | 226 | * must be called with the head request locked. |
Trond Myklebust | a62f8e3 | 2020-03-30 11:12:16 -0400 | [diff] [blame] | 227 | */ |
| 228 | int nfs_page_group_lock_subrequests(struct nfs_page *head) |
| 229 | { |
| 230 | struct nfs_page *subreq; |
| 231 | int ret; |
| 232 | |
Trond Myklebust | e00ed89 | 2020-03-30 12:40:47 -0400 | [diff] [blame] | 233 | ret = nfs_page_group_lock(head); |
| 234 | if (ret < 0) |
| 235 | return ret; |
Trond Myklebust | a62f8e3 | 2020-03-30 11:12:16 -0400 | [diff] [blame] | 236 | /* lock each request in the page group */ |
| 237 | for (subreq = head->wb_this_page; subreq != head; |
| 238 | subreq = subreq->wb_this_page) { |
| 239 | ret = nfs_page_group_lock_subreq(head, subreq); |
| 240 | if (ret < 0) |
| 241 | return ret; |
| 242 | } |
Trond Myklebust | e00ed89 | 2020-03-30 12:40:47 -0400 | [diff] [blame] | 243 | nfs_page_group_unlock(head); |
Trond Myklebust | a62f8e3 | 2020-03-30 11:12:16 -0400 | [diff] [blame] | 244 | return 0; |
| 245 | } |
| 246 | |
| 247 | /* |
Trond Myklebust | 08ca8b2 | 2020-04-01 13:04:49 -0400 | [diff] [blame] | 248 | * nfs_page_set_headlock - set the request PG_HEADLOCK |
| 249 | * @req: request that is to be locked |
| 250 | * |
| 251 | * this lock must be held when modifying req->wb_head |
| 252 | * |
| 253 | * return 0 on success, < 0 on error |
| 254 | */ |
| 255 | int |
| 256 | nfs_page_set_headlock(struct nfs_page *req) |
| 257 | { |
| 258 | if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) |
| 259 | return 0; |
| 260 | |
| 261 | set_bit(PG_CONTENDED1, &req->wb_flags); |
| 262 | smp_mb__after_atomic(); |
| 263 | return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, |
| 264 | TASK_UNINTERRUPTIBLE); |
| 265 | } |
| 266 | |
| 267 | /* |
| 268 | * nfs_page_clear_headlock - clear the request PG_HEADLOCK |
| 269 | * @req: request that is to be locked |
| 270 | */ |
| 271 | void |
| 272 | nfs_page_clear_headlock(struct nfs_page *req) |
| 273 | { |
Trond Myklebust | 43d20e8 | 2021-07-13 12:28:22 -0400 | [diff] [blame] | 274 | clear_bit_unlock(PG_HEADLOCK, &req->wb_flags); |
Trond Myklebust | 08ca8b2 | 2020-04-01 13:04:49 -0400 | [diff] [blame] | 275 | smp_mb__after_atomic(); |
| 276 | if (!test_bit(PG_CONTENDED1, &req->wb_flags)) |
| 277 | return; |
| 278 | wake_up_bit(&req->wb_flags, PG_HEADLOCK); |
| 279 | } |
| 280 | |
| 281 | /* |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 282 | * nfs_page_group_lock - lock the head of the page group |
Trond Myklebust | 08ca8b2 | 2020-04-01 13:04:49 -0400 | [diff] [blame] | 283 | * @req: request in group that is to be locked |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 284 | * |
Trond Myklebust | 1344b7e | 2017-07-17 10:54:14 -0400 | [diff] [blame] | 285 | * this lock must be held when traversing or modifying the page |
| 286 | * group list |
Weston Andros Adamson | e702920 | 2014-07-17 20:42:15 -0400 | [diff] [blame] | 287 | * |
Trond Myklebust | 1344b7e | 2017-07-17 10:54:14 -0400 | [diff] [blame] | 288 | * return 0 on success, < 0 on error |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 289 | */ |
Weston Andros Adamson | e702920 | 2014-07-17 20:42:15 -0400 | [diff] [blame] | 290 | int |
Trond Myklebust | 1344b7e | 2017-07-17 10:54:14 -0400 | [diff] [blame] | 291 | nfs_page_group_lock(struct nfs_page *req) |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 292 | { |
Trond Myklebust | 08ca8b2 | 2020-04-01 13:04:49 -0400 | [diff] [blame] | 293 | int ret; |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 294 | |
Trond Myklebust | 08ca8b2 | 2020-04-01 13:04:49 -0400 | [diff] [blame] | 295 | ret = nfs_page_set_headlock(req); |
| 296 | if (ret || req->wb_head == req) |
| 297 | return ret; |
| 298 | return nfs_page_set_headlock(req->wb_head); |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | /* |
| 302 | * nfs_page_group_unlock - unlock the head of the page group |
Trond Myklebust | 08ca8b2 | 2020-04-01 13:04:49 -0400 | [diff] [blame] | 303 | * @req: request in group that is to be unlocked |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 304 | */ |
| 305 | void |
| 306 | nfs_page_group_unlock(struct nfs_page *req) |
| 307 | { |
Trond Myklebust | 08ca8b2 | 2020-04-01 13:04:49 -0400 | [diff] [blame] | 308 | if (req != req->wb_head) |
| 309 | nfs_page_clear_headlock(req->wb_head); |
| 310 | nfs_page_clear_headlock(req); |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | /* |
| 314 | * nfs_page_group_sync_on_bit_locked |
| 315 | * |
| 316 | * must be called with page group lock held |
| 317 | */ |
| 318 | static bool |
| 319 | nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) |
| 320 | { |
| 321 | struct nfs_page *head = req->wb_head; |
| 322 | struct nfs_page *tmp; |
| 323 | |
| 324 | WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags)); |
| 325 | WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags)); |
| 326 | |
| 327 | tmp = req->wb_this_page; |
| 328 | while (tmp != req) { |
| 329 | if (!test_bit(bit, &tmp->wb_flags)) |
| 330 | return false; |
| 331 | tmp = tmp->wb_this_page; |
| 332 | } |
| 333 | |
| 334 | /* true! reset all bits */ |
| 335 | tmp = req; |
| 336 | do { |
| 337 | clear_bit(bit, &tmp->wb_flags); |
| 338 | tmp = tmp->wb_this_page; |
| 339 | } while (tmp != req); |
| 340 | |
| 341 | return true; |
| 342 | } |
| 343 | |
| 344 | /* |
| 345 | * nfs_page_group_sync_on_bit - set bit on current request, but only |
| 346 | * return true if the bit is set for all requests in page group |
| 347 | * @req - request in page group |
| 348 | * @bit - PG_* bit that is used to sync page group |
| 349 | */ |
| 350 | bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) |
| 351 | { |
| 352 | bool ret; |
| 353 | |
Trond Myklebust | 1344b7e | 2017-07-17 10:54:14 -0400 | [diff] [blame] | 354 | nfs_page_group_lock(req); |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 355 | ret = nfs_page_group_sync_on_bit_locked(req, bit); |
| 356 | nfs_page_group_unlock(req); |
| 357 | |
| 358 | return ret; |
| 359 | } |
| 360 | |
| 361 | /* |
| 362 | * nfs_page_group_init - Initialize the page group linkage for @req |
| 363 | * @req - a new nfs request |
| 364 | * @prev - the previous request in page group, or NULL if @req is the first |
| 365 | * or only request in the group (the head). |
| 366 | */ |
| 367 | static inline void |
| 368 | nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) |
| 369 | { |
Weston Andros Adamson | cb1410c | 2014-11-12 12:08:00 -0500 | [diff] [blame] | 370 | struct inode *inode; |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 371 | WARN_ON_ONCE(prev == req); |
| 372 | |
| 373 | if (!prev) { |
Weston Andros Adamson | 85710a8 | 2014-07-11 10:20:46 -0400 | [diff] [blame] | 374 | /* a head request */ |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 375 | req->wb_head = req; |
| 376 | req->wb_this_page = req; |
| 377 | } else { |
Weston Andros Adamson | 85710a8 | 2014-07-11 10:20:46 -0400 | [diff] [blame] | 378 | /* a subrequest */ |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 379 | WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); |
| 380 | WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); |
| 381 | req->wb_head = prev->wb_head; |
| 382 | req->wb_this_page = prev->wb_this_page; |
| 383 | prev->wb_this_page = req; |
| 384 | |
Weston Andros Adamson | 85710a8 | 2014-07-11 10:20:46 -0400 | [diff] [blame] | 385 | /* All subrequests take a ref on the head request until |
| 386 | * nfs_page_group_destroy is called */ |
| 387 | kref_get(&req->wb_head->wb_kref); |
| 388 | |
Weston Andros Adamson | cb1410c | 2014-11-12 12:08:00 -0500 | [diff] [blame] | 389 | /* grab extra ref and bump the request count if head request |
| 390 | * has extra ref from the write/commit path to handle handoff |
| 391 | * between write and commit lists. */ |
Weston Andros Adamson | 17089a2 | 2014-07-11 10:20:45 -0400 | [diff] [blame] | 392 | if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) { |
Weston Andros Adamson | cb1410c | 2014-11-12 12:08:00 -0500 | [diff] [blame] | 393 | inode = page_file_mapping(req->wb_page)->host; |
Weston Andros Adamson | 17089a2 | 2014-07-11 10:20:45 -0400 | [diff] [blame] | 394 | set_bit(PG_INODE_REF, &req->wb_flags); |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 395 | kref_get(&req->wb_kref); |
Trond Myklebust | a6b6d5b | 2017-08-01 15:39:46 -0400 | [diff] [blame] | 396 | atomic_long_inc(&NFS_I(inode)->nrequests); |
Weston Andros Adamson | 17089a2 | 2014-07-11 10:20:45 -0400 | [diff] [blame] | 397 | } |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 398 | } |
| 399 | } |
| 400 | |
| 401 | /* |
| 402 | * nfs_page_group_destroy - sync the destruction of page groups |
| 403 | * @req - request that no longer needs the page group |
| 404 | * |
| 405 | * releases the page group reference from each member once all |
| 406 | * members have called this function. |
| 407 | */ |
| 408 | static void |
| 409 | nfs_page_group_destroy(struct kref *kref) |
| 410 | { |
| 411 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); |
Trond Myklebust | 08fead2 | 2017-07-18 19:31:10 -0400 | [diff] [blame] | 412 | struct nfs_page *head = req->wb_head; |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 413 | struct nfs_page *tmp, *next; |
| 414 | |
| 415 | if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) |
Trond Myklebust | 08fead2 | 2017-07-18 19:31:10 -0400 | [diff] [blame] | 416 | goto out; |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 417 | |
| 418 | tmp = req; |
| 419 | do { |
| 420 | next = tmp->wb_this_page; |
| 421 | /* unlink and free */ |
| 422 | tmp->wb_this_page = tmp; |
| 423 | tmp->wb_head = tmp; |
| 424 | nfs_free_request(tmp); |
| 425 | tmp = next; |
| 426 | } while (tmp != req); |
Trond Myklebust | 08fead2 | 2017-07-18 19:31:10 -0400 | [diff] [blame] | 427 | out: |
| 428 | /* subrequests must release the ref on the head request */ |
| 429 | if (head != req) |
| 430 | nfs_release_request(head); |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 431 | } |
| 432 | |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 433 | static struct nfs_page * |
| 434 | __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page, |
Trond Myklebust | 28b1d3f5 | 2019-04-07 13:59:07 -0400 | [diff] [blame] | 435 | unsigned int pgbase, unsigned int offset, |
| 436 | unsigned int count) |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 437 | { |
| 438 | struct nfs_page *req; |
| 439 | struct nfs_open_context *ctx = l_ctx->open_context; |
| 440 | |
| 441 | if (test_bit(NFS_CONTEXT_BAD, &ctx->flags)) |
| 442 | return ERR_PTR(-EBADF); |
| 443 | /* try to allocate the request struct */ |
| 444 | req = nfs_page_alloc(); |
| 445 | if (req == NULL) |
| 446 | return ERR_PTR(-ENOMEM); |
| 447 | |
| 448 | req->wb_lock_context = l_ctx; |
| 449 | refcount_inc(&l_ctx->count); |
| 450 | atomic_inc(&l_ctx->io_count); |
| 451 | |
| 452 | /* Initialize the request struct. Initially, we assume a |
| 453 | * long write-back delay. This will be adjusted in |
| 454 | * update_nfs_request below if the region is not locked. */ |
| 455 | req->wb_page = page; |
| 456 | if (page) { |
| 457 | req->wb_index = page_index(page); |
| 458 | get_page(page); |
| 459 | } |
| 460 | req->wb_offset = offset; |
| 461 | req->wb_pgbase = pgbase; |
| 462 | req->wb_bytes = count; |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 463 | kref_init(&req->wb_kref); |
Trond Myklebust | 33344e0 | 2019-04-07 13:59:08 -0400 | [diff] [blame] | 464 | req->wb_nio = 0; |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 465 | return req; |
| 466 | } |
| 467 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | /** |
| 469 | * nfs_create_request - Create an NFS read/write request. |
Chuck Lever | c02f557 | 2011-10-25 12:17:43 -0400 | [diff] [blame] | 470 | * @ctx: open context to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | * @page: page to write |
| 472 | * @offset: starting offset within the page for the write |
| 473 | * @count: number of bytes to read/write |
| 474 | * |
| 475 | * The page must be locked by the caller. This makes sure we never |
Jason Uhlenkott | a19b89c | 2007-04-26 17:25:51 -0700 | [diff] [blame] | 476 | * create two different requests for the same page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | * User should ensure it is safe to sleep in this function. |
| 478 | */ |
| 479 | struct nfs_page * |
Weston Andros Adamson | 8c8f1ac1 | 2014-05-15 11:56:42 -0400 | [diff] [blame] | 480 | nfs_create_request(struct nfs_open_context *ctx, struct page *page, |
Trond Myklebust | 28b1d3f5 | 2019-04-07 13:59:07 -0400 | [diff] [blame] | 481 | unsigned int offset, unsigned int count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | { |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 483 | struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx); |
| 484 | struct nfs_page *ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 486 | if (IS_ERR(l_ctx)) |
Trond Myklebust | b3c54de | 2012-08-13 17:15:50 -0400 | [diff] [blame] | 487 | return ERR_CAST(l_ctx); |
Trond Myklebust | 28b1d3f5 | 2019-04-07 13:59:07 -0400 | [diff] [blame] | 488 | ret = __nfs_create_request(l_ctx, page, offset, offset, count); |
| 489 | if (!IS_ERR(ret)) |
| 490 | nfs_page_group_init(ret, NULL); |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 491 | nfs_put_lock_context(l_ctx); |
| 492 | return ret; |
| 493 | } |
Jeff Layton | 015f0212d | 2010-10-28 10:10:37 -0400 | [diff] [blame] | 494 | |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 495 | static struct nfs_page * |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 496 | nfs_create_subreq(struct nfs_page *req, |
| 497 | unsigned int pgbase, |
| 498 | unsigned int offset, |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 499 | unsigned int count) |
| 500 | { |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 501 | struct nfs_page *last; |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 502 | struct nfs_page *ret; |
| 503 | |
Trond Myklebust | 28b1d3f5 | 2019-04-07 13:59:07 -0400 | [diff] [blame] | 504 | ret = __nfs_create_request(req->wb_lock_context, req->wb_page, |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 505 | pgbase, offset, count); |
| 506 | if (!IS_ERR(ret)) { |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 507 | /* find the last request */ |
| 508 | for (last = req->wb_head; |
| 509 | last->wb_this_page != req->wb_head; |
| 510 | last = last->wb_this_page) |
| 511 | ; |
| 512 | |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 513 | nfs_lock_request(ret); |
| 514 | ret->wb_index = req->wb_index; |
Trond Myklebust | 28b1d3f5 | 2019-04-07 13:59:07 -0400 | [diff] [blame] | 515 | nfs_page_group_init(ret, last); |
Trond Myklebust | 33344e0 | 2019-04-07 13:59:08 -0400 | [diff] [blame] | 516 | ret->wb_nio = req->wb_nio; |
Anna Schumaker | 67911c8 | 2016-01-19 14:37:30 -0500 | [diff] [blame] | 517 | } |
Trond Myklebust | c917cfa | 2019-04-07 13:59:06 -0400 | [diff] [blame] | 518 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | } |
| 520 | |
| 521 | /** |
Trond Myklebust | 1d1afcb | 2012-05-09 14:04:55 -0400 | [diff] [blame] | 522 | * nfs_unlock_request - Unlock request and wake up sleepers. |
Trond Myklebust | 302fad7 | 2019-02-18 13:32:38 -0500 | [diff] [blame] | 523 | * @req: pointer to request |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | */ |
Trond Myklebust | 1d1afcb | 2012-05-09 14:04:55 -0400 | [diff] [blame] | 525 | void nfs_unlock_request(struct nfs_page *req) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | { |
Trond Myklebust | 43d20e8 | 2021-07-13 12:28:22 -0400 | [diff] [blame] | 527 | clear_bit_unlock(PG_BUSY, &req->wb_flags); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 528 | smp_mb__after_atomic(); |
Trond Myklebust | b4f937c | 2017-07-11 17:53:48 -0400 | [diff] [blame] | 529 | if (!test_bit(PG_CONTENDED2, &req->wb_flags)) |
| 530 | return; |
Trond Myklebust | 464a98b | 2005-06-22 17:16:21 +0000 | [diff] [blame] | 531 | wake_up_bit(&req->wb_flags, PG_BUSY); |
Trond Myklebust | 3aff4eb | 2012-05-09 14:30:35 -0400 | [diff] [blame] | 532 | } |
| 533 | |
| 534 | /** |
Trond Myklebust | 1d1afcb | 2012-05-09 14:04:55 -0400 | [diff] [blame] | 535 | * nfs_unlock_and_release_request - Unlock request and release the nfs_page |
Trond Myklebust | 302fad7 | 2019-02-18 13:32:38 -0500 | [diff] [blame] | 536 | * @req: pointer to request |
Trond Myklebust | 3aff4eb | 2012-05-09 14:30:35 -0400 | [diff] [blame] | 537 | */ |
Trond Myklebust | 1d1afcb | 2012-05-09 14:04:55 -0400 | [diff] [blame] | 538 | void nfs_unlock_and_release_request(struct nfs_page *req) |
Trond Myklebust | 3aff4eb | 2012-05-09 14:30:35 -0400 | [diff] [blame] | 539 | { |
Trond Myklebust | 1d1afcb | 2012-05-09 14:04:55 -0400 | [diff] [blame] | 540 | nfs_unlock_request(req); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | nfs_release_request(req); |
| 542 | } |
| 543 | |
Trond Myklebust | 4d65c52 | 2011-03-25 14:15:11 -0400 | [diff] [blame] | 544 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | * nfs_clear_request - Free up all resources allocated to the request |
| 546 | * @req: |
| 547 | * |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 548 | * Release page and open context resources associated with a read/write |
| 549 | * request after it has completed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | */ |
Trond Myklebust | 4d65c52 | 2011-03-25 14:15:11 -0400 | [diff] [blame] | 551 | static void nfs_clear_request(struct nfs_page *req) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | { |
Trond Myklebust | cd52ed3 | 2006-03-20 13:44:04 -0500 | [diff] [blame] | 553 | struct page *page = req->wb_page; |
Trond Myklebust | f11ac8d | 2010-06-25 16:35:53 -0400 | [diff] [blame] | 554 | struct nfs_lock_context *l_ctx = req->wb_lock_context; |
Trond Myklebust | c79d183 | 2019-04-07 13:59:12 -0400 | [diff] [blame] | 555 | struct nfs_open_context *ctx; |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 556 | |
Trond Myklebust | cd52ed3 | 2006-03-20 13:44:04 -0500 | [diff] [blame] | 557 | if (page != NULL) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 558 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | req->wb_page = NULL; |
| 560 | } |
Trond Myklebust | f11ac8d | 2010-06-25 16:35:53 -0400 | [diff] [blame] | 561 | if (l_ctx != NULL) { |
Benjamin Coddington | 7d6ddf8 | 2017-04-11 12:50:10 -0400 | [diff] [blame] | 562 | if (atomic_dec_and_test(&l_ctx->io_count)) { |
Peter Zijlstra | 723c921 | 2018-03-15 11:44:34 +0100 | [diff] [blame] | 563 | wake_up_var(&l_ctx->io_count); |
Trond Myklebust | c79d183 | 2019-04-07 13:59:12 -0400 | [diff] [blame] | 564 | ctx = l_ctx->open_context; |
Benjamin Coddington | 7d6ddf8 | 2017-04-11 12:50:10 -0400 | [diff] [blame] | 565 | if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags)) |
| 566 | rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq); |
| 567 | } |
Trond Myklebust | f11ac8d | 2010-06-25 16:35:53 -0400 | [diff] [blame] | 568 | nfs_put_lock_context(l_ctx); |
| 569 | req->wb_lock_context = NULL; |
| 570 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | } |
| 572 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | /** |
Trond Myklebust | 6453bcd | 2021-03-04 20:29:50 -0500 | [diff] [blame] | 574 | * nfs_free_request - Release the count on an NFS read/write request |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | * @req: request to release |
| 576 | * |
| 577 | * Note: Should never be called with the spinlock held! |
| 578 | */ |
Weston Andros Adamson | d458138 | 2014-07-11 10:20:48 -0400 | [diff] [blame] | 579 | void nfs_free_request(struct nfs_page *req) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | { |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 581 | WARN_ON_ONCE(req->wb_this_page != req); |
| 582 | |
| 583 | /* extra debug: make sure no sync bits are still set */ |
| 584 | WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); |
Weston Andros Adamson | 67d0338 | 2014-05-15 11:56:46 -0400 | [diff] [blame] | 585 | WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags)); |
| 586 | WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags)); |
Weston Andros Adamson | 20633f0 | 2014-05-15 11:56:47 -0400 | [diff] [blame] | 587 | WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags)); |
| 588 | WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | |
Trond Myklebust | bb6fbc4 | 2010-03-11 09:19:35 -0500 | [diff] [blame] | 590 | /* Release struct file and open context */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | nfs_clear_request(req); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | nfs_page_free(req); |
| 593 | } |
| 594 | |
Trond Myklebust | c03b402 | 2007-06-17 13:26:38 -0400 | [diff] [blame] | 595 | void nfs_release_request(struct nfs_page *req) |
| 596 | { |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 597 | kref_put(&req->wb_kref, nfs_page_group_destroy); |
Trond Myklebust | 9f557cd | 2010-02-03 08:27:22 -0500 | [diff] [blame] | 598 | } |
Trond Myklebust | 2ce209c | 2017-08-01 17:29:29 -0400 | [diff] [blame] | 599 | EXPORT_SYMBOL_GPL(nfs_release_request); |
Trond Myklebust | 9f557cd | 2010-02-03 08:27:22 -0500 | [diff] [blame] | 600 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | /** |
| 602 | * nfs_wait_on_request - Wait for a request to complete. |
| 603 | * @req: request to wait upon. |
| 604 | * |
Matthew Wilcox | 150030b | 2007-12-06 16:24:39 -0500 | [diff] [blame] | 605 | * Interruptible by fatal signals only. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | * The user is responsible for holding a count on the request. |
| 607 | */ |
| 608 | int |
| 609 | nfs_wait_on_request(struct nfs_page *req) |
| 610 | { |
Trond Myklebust | b4f937c | 2017-07-11 17:53:48 -0400 | [diff] [blame] | 611 | if (!test_bit(PG_BUSY, &req->wb_flags)) |
| 612 | return 0; |
| 613 | set_bit(PG_CONTENDED2, &req->wb_flags); |
| 614 | smp_mb__after_atomic(); |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 615 | return wait_on_bit_io(&req->wb_flags, PG_BUSY, |
| 616 | TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | } |
Trond Myklebust | 2ce209c | 2017-08-01 17:29:29 -0400 | [diff] [blame] | 618 | EXPORT_SYMBOL_GPL(nfs_wait_on_request); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | |
Weston Andros Adamson | b4fdac1 | 2014-05-15 11:56:43 -0400 | [diff] [blame] | 620 | /* |
| 621 | * nfs_generic_pg_test - determine if requests can be coalesced |
| 622 | * @desc: pointer to descriptor |
| 623 | * @prev: previous request in desc, or NULL |
| 624 | * @req: this request |
| 625 | * |
Pavel Tikhomirov | ac0aa5e | 2018-11-14 11:12:05 +0300 | [diff] [blame] | 626 | * Returns zero if @req cannot be coalesced into @desc, otherwise it returns |
Weston Andros Adamson | b4fdac1 | 2014-05-15 11:56:43 -0400 | [diff] [blame] | 627 | * the size of the request. |
| 628 | */ |
| 629 | size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, |
| 630 | struct nfs_page *prev, struct nfs_page *req) |
Boaz Harrosh | 5b36c7d | 2011-05-29 11:45:39 +0300 | [diff] [blame] | 631 | { |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 632 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 633 | |
| 634 | |
| 635 | if (mirror->pg_count > mirror->pg_bsize) { |
Weston Andros Adamson | f0cb9ab | 2014-05-15 11:56:52 -0400 | [diff] [blame] | 636 | /* should never happen */ |
| 637 | WARN_ON_ONCE(1); |
Boaz Harrosh | 5b36c7d | 2011-05-29 11:45:39 +0300 | [diff] [blame] | 638 | return 0; |
Weston Andros Adamson | f0cb9ab | 2014-05-15 11:56:52 -0400 | [diff] [blame] | 639 | } |
Boaz Harrosh | 5b36c7d | 2011-05-29 11:45:39 +0300 | [diff] [blame] | 640 | |
Christoph Hellwig | 2e11f82 | 2014-08-21 11:09:17 -0500 | [diff] [blame] | 641 | /* |
| 642 | * Limit the request size so that we can still allocate a page array |
| 643 | * for it without upsetting the slab allocator. |
| 644 | */ |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 645 | if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * |
Peng Tao | 048883e | 2015-09-11 11:14:06 +0800 | [diff] [blame] | 646 | sizeof(struct page *) > PAGE_SIZE) |
Christoph Hellwig | 2e11f82 | 2014-08-21 11:09:17 -0500 | [diff] [blame] | 647 | return 0; |
| 648 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 649 | return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); |
Boaz Harrosh | 5b36c7d | 2011-05-29 11:45:39 +0300 | [diff] [blame] | 650 | } |
Benny Halevy | 19345cb | 2011-06-19 18:33:46 -0400 | [diff] [blame] | 651 | EXPORT_SYMBOL_GPL(nfs_generic_pg_test); |
Boaz Harrosh | 5b36c7d | 2011-05-29 11:45:39 +0300 | [diff] [blame] | 652 | |
Weston Andros Adamson | 1e7f3a4 | 2014-06-09 11:48:33 -0400 | [diff] [blame] | 653 | struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops) |
Anna Schumaker | 00bfa30 | 2014-05-06 09:12:29 -0400 | [diff] [blame] | 654 | { |
Weston Andros Adamson | 1e7f3a4 | 2014-06-09 11:48:33 -0400 | [diff] [blame] | 655 | struct nfs_pgio_header *hdr = ops->rw_alloc_header(); |
Anna Schumaker | 00bfa30 | 2014-05-06 09:12:29 -0400 | [diff] [blame] | 656 | |
Weston Andros Adamson | 1e7f3a4 | 2014-06-09 11:48:33 -0400 | [diff] [blame] | 657 | if (hdr) { |
Anna Schumaker | 4a0de55 | 2014-05-06 09:12:30 -0400 | [diff] [blame] | 658 | INIT_LIST_HEAD(&hdr->pages); |
Anna Schumaker | 4a0de55 | 2014-05-06 09:12:30 -0400 | [diff] [blame] | 659 | hdr->rw_ops = ops; |
| 660 | } |
Weston Andros Adamson | 1e7f3a4 | 2014-06-09 11:48:33 -0400 | [diff] [blame] | 661 | return hdr; |
Anna Schumaker | 4a0de55 | 2014-05-06 09:12:30 -0400 | [diff] [blame] | 662 | } |
Weston Andros Adamson | 1e7f3a4 | 2014-06-09 11:48:33 -0400 | [diff] [blame] | 663 | EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc); |
Anna Schumaker | 4a0de55 | 2014-05-06 09:12:30 -0400 | [diff] [blame] | 664 | |
Anna Schumaker | 4a0de55 | 2014-05-06 09:12:30 -0400 | [diff] [blame] | 665 | /** |
Weston Andros Adamson | 4714fb5 | 2014-06-09 11:48:37 -0400 | [diff] [blame] | 666 | * nfs_pgio_data_destroy - make @hdr suitable for reuse |
| 667 | * |
| 668 | * Frees memory and releases refs from nfs_generic_pgio, so that it may |
| 669 | * be called again. |
| 670 | * |
| 671 | * @hdr: A header that has had nfs_generic_pgio called |
Anna Schumaker | 00bfa30 | 2014-05-06 09:12:29 -0400 | [diff] [blame] | 672 | */ |
Trond Myklebust | 196639e | 2017-09-08 21:28:11 -0400 | [diff] [blame] | 673 | static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr) |
Anna Schumaker | 00bfa30 | 2014-05-06 09:12:29 -0400 | [diff] [blame] | 674 | { |
Trond Myklebust | 3caa0c6 | 2014-10-13 10:26:43 -0400 | [diff] [blame] | 675 | if (hdr->args.context) |
| 676 | put_nfs_open_context(hdr->args.context); |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 677 | if (hdr->page_array.pagevec != hdr->page_array.page_array) |
| 678 | kfree(hdr->page_array.pagevec); |
Anna Schumaker | 00bfa30 | 2014-05-06 09:12:29 -0400 | [diff] [blame] | 679 | } |
Trond Myklebust | 196639e | 2017-09-08 21:28:11 -0400 | [diff] [blame] | 680 | |
| 681 | /* |
| 682 | * nfs_pgio_header_free - Free a read or write header |
| 683 | * @hdr: The header to free |
| 684 | */ |
| 685 | void nfs_pgio_header_free(struct nfs_pgio_header *hdr) |
| 686 | { |
| 687 | nfs_pgio_data_destroy(hdr); |
| 688 | hdr->rw_ops->rw_free_header(hdr); |
| 689 | } |
| 690 | EXPORT_SYMBOL_GPL(nfs_pgio_header_free); |
Anna Schumaker | 00bfa30 | 2014-05-06 09:12:29 -0400 | [diff] [blame] | 691 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | /** |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 693 | * nfs_pgio_rpcsetup - Set up arguments for a pageio call |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 694 | * @hdr: The pageio hdr |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 695 | * @count: Number of bytes to read |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 696 | * @how: How to commit data (writes only) |
| 697 | * @cinfo: Commit information for the call (writes only) |
| 698 | */ |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 699 | static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, |
Benjamin Coddington | e545735 | 2017-12-08 12:52:37 -0500 | [diff] [blame] | 700 | unsigned int count, |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 701 | int how, struct nfs_commit_info *cinfo) |
| 702 | { |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 703 | struct nfs_page *req = hdr->req; |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 704 | |
| 705 | /* Set up the RPC argument and reply structs |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 706 | * NB: take care not to mess about with hdr->commit et al. */ |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 707 | |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 708 | hdr->args.fh = NFS_FH(hdr->inode); |
Benjamin Coddington | e545735 | 2017-12-08 12:52:37 -0500 | [diff] [blame] | 709 | hdr->args.offset = req_offset(req); |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 710 | /* pnfs_set_layoutcommit needs this */ |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 711 | hdr->mds_offset = hdr->args.offset; |
Benjamin Coddington | e545735 | 2017-12-08 12:52:37 -0500 | [diff] [blame] | 712 | hdr->args.pgbase = req->wb_pgbase; |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 713 | hdr->args.pages = hdr->page_array.pagevec; |
| 714 | hdr->args.count = count; |
Trond Myklebust | 9fcd596 | 2019-04-07 13:59:11 -0400 | [diff] [blame] | 715 | hdr->args.context = get_nfs_open_context(nfs_req_openctx(req)); |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 716 | hdr->args.lock_context = req->wb_lock_context; |
| 717 | hdr->args.stable = NFS_UNSTABLE; |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 718 | switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { |
| 719 | case 0: |
| 720 | break; |
| 721 | case FLUSH_COND_STABLE: |
| 722 | if (nfs_reqs_to_commit(cinfo)) |
| 723 | break; |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 724 | fallthrough; |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 725 | default: |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 726 | hdr->args.stable = NFS_FILE_SYNC; |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 727 | } |
| 728 | |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 729 | hdr->res.fattr = &hdr->fattr; |
Trond Myklebust | 17d8c5d | 2019-08-14 14:19:09 -0400 | [diff] [blame] | 730 | hdr->res.count = 0; |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 731 | hdr->res.eof = 0; |
Weston Andros Adamson | c65e625 | 2014-06-09 11:48:36 -0400 | [diff] [blame] | 732 | hdr->res.verf = &hdr->verf; |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 733 | nfs_fattr_init(&hdr->fattr); |
Anna Schumaker | ce59515 | 2014-05-06 09:12:34 -0400 | [diff] [blame] | 734 | } |
| 735 | |
| 736 | /** |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 737 | * nfs_pgio_prepare - Prepare pageio hdr to go over the wire |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 738 | * @task: The current task |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 739 | * @calldata: pageio header to prepare |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 740 | */ |
Anna Schumaker | 6f92fa4 | 2014-05-06 09:12:33 -0400 | [diff] [blame] | 741 | static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 742 | { |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 743 | struct nfs_pgio_header *hdr = calldata; |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 744 | int err; |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 745 | err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr); |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 746 | if (err) |
| 747 | rpc_exit(task, err); |
| 748 | } |
| 749 | |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 750 | int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, |
NeilBrown | a52458b | 2018-12-03 11:30:31 +1100 | [diff] [blame] | 751 | const struct cred *cred, const struct nfs_rpc_ops *rpc_ops, |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 752 | const struct rpc_call_ops *call_ops, int how, int flags) |
| 753 | { |
| 754 | struct rpc_task *task; |
| 755 | struct rpc_message msg = { |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 756 | .rpc_argp = &hdr->args, |
| 757 | .rpc_resp = &hdr->res, |
Peng Tao | 46a5ab4 | 2014-06-13 23:02:25 +0800 | [diff] [blame] | 758 | .rpc_cred = cred, |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 759 | }; |
| 760 | struct rpc_task_setup task_setup_data = { |
| 761 | .rpc_client = clnt, |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 762 | .task = &hdr->task, |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 763 | .rpc_message = &msg, |
| 764 | .callback_ops = call_ops, |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 765 | .callback_data = hdr, |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 766 | .workqueue = nfsiod_workqueue, |
Trond Myklebust | 4fa7ef6 | 2020-05-13 09:55:36 -0400 | [diff] [blame] | 767 | .flags = RPC_TASK_ASYNC | flags, |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 768 | }; |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 769 | |
Olga Kornievskaia | 118f09e | 2022-05-25 12:12:59 -0400 | [diff] [blame] | 770 | if (nfs_server_capable(hdr->inode, NFS_CAP_MOVEABLE)) |
| 771 | task_setup_data.flags |= RPC_TASK_MOVEABLE; |
| 772 | |
Tom Haynes | abde71f | 2014-06-09 13:12:20 -0700 | [diff] [blame] | 773 | hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how); |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 774 | |
Kinglong Mee | b4839eb | 2015-07-01 12:00:13 +0800 | [diff] [blame] | 775 | dprintk("NFS: initiated pgio call " |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 776 | "(req %s/%llu, %u bytes @ offset %llu)\n", |
Anna Schumaker | 343ae53 | 2014-06-20 13:30:26 -0400 | [diff] [blame] | 777 | hdr->inode->i_sb->s_id, |
| 778 | (unsigned long long)NFS_FILEID(hdr->inode), |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 779 | hdr->args.count, |
| 780 | (unsigned long long)hdr->args.offset); |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 781 | |
| 782 | task = rpc_run_task(&task_setup_data); |
Trond Myklebust | 1de3af9 | 2020-03-28 11:39:29 -0400 | [diff] [blame] | 783 | if (IS_ERR(task)) |
| 784 | return PTR_ERR(task); |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 785 | rpc_put_task(task); |
Trond Myklebust | 1de3af9 | 2020-03-28 11:39:29 -0400 | [diff] [blame] | 786 | return 0; |
Anna Schumaker | 1ed26f3 | 2014-05-06 09:12:37 -0400 | [diff] [blame] | 787 | } |
| 788 | EXPORT_SYMBOL_GPL(nfs_initiate_pgio); |
| 789 | |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 790 | /** |
Anna Schumaker | 844c9e6 | 2014-05-06 09:12:35 -0400 | [diff] [blame] | 791 | * nfs_pgio_error - Clean up from a pageio error |
Anna Schumaker | 844c9e6 | 2014-05-06 09:12:35 -0400 | [diff] [blame] | 792 | * @hdr: pageio header |
| 793 | */ |
Peng Tao | 2bff228 | 2015-12-05 02:03:17 +0800 | [diff] [blame] | 794 | static void nfs_pgio_error(struct nfs_pgio_header *hdr) |
Anna Schumaker | 844c9e6 | 2014-05-06 09:12:35 -0400 | [diff] [blame] | 795 | { |
Anna Schumaker | 844c9e6 | 2014-05-06 09:12:35 -0400 | [diff] [blame] | 796 | set_bit(NFS_IOHDR_REDO, &hdr->flags); |
Weston Andros Adamson | 4714fb5 | 2014-06-09 11:48:37 -0400 | [diff] [blame] | 797 | hdr->completion_ops->completion(hdr); |
Anna Schumaker | 844c9e6 | 2014-05-06 09:12:35 -0400 | [diff] [blame] | 798 | } |
| 799 | |
| 800 | /** |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 801 | * nfs_pgio_release - Release pageio data |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 802 | * @calldata: The pageio header to release |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 803 | */ |
Anna Schumaker | 6f92fa4 | 2014-05-06 09:12:33 -0400 | [diff] [blame] | 804 | static void nfs_pgio_release(void *calldata) |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 805 | { |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 806 | struct nfs_pgio_header *hdr = calldata; |
Weston Andros Adamson | 4714fb5 | 2014-06-09 11:48:37 -0400 | [diff] [blame] | 807 | hdr->completion_ops->completion(hdr); |
Anna Schumaker | a4cdda5 | 2014-05-06 09:12:31 -0400 | [diff] [blame] | 808 | } |
| 809 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 810 | static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror, |
| 811 | unsigned int bsize) |
| 812 | { |
| 813 | INIT_LIST_HEAD(&mirror->pg_list); |
| 814 | mirror->pg_bytes_written = 0; |
| 815 | mirror->pg_count = 0; |
| 816 | mirror->pg_bsize = bsize; |
| 817 | mirror->pg_base = 0; |
| 818 | mirror->pg_recoalesce = 0; |
| 819 | } |
| 820 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | /** |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 822 | * nfs_pageio_init - initialise a page io descriptor |
| 823 | * @desc: pointer to descriptor |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 824 | * @inode: pointer to inode |
Yijing Wang | dfad700 | 2015-06-18 19:37:13 +0800 | [diff] [blame] | 825 | * @pg_ops: pointer to pageio operations |
| 826 | * @compl_ops: pointer to pageio completion operations |
| 827 | * @rw_ops: pointer to nfs read/write operations |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 828 | * @bsize: io block size |
| 829 | * @io_flags: extra parameters for the io function |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 830 | */ |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 831 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
| 832 | struct inode *inode, |
Trond Myklebust | 1751c36 | 2011-06-10 13:30:23 -0400 | [diff] [blame] | 833 | const struct nfs_pageio_ops *pg_ops, |
Fred Isaman | 061ae2e | 2012-04-20 14:47:48 -0400 | [diff] [blame] | 834 | const struct nfs_pgio_completion_ops *compl_ops, |
Anna Schumaker | 4a0de55 | 2014-05-06 09:12:30 -0400 | [diff] [blame] | 835 | const struct nfs_rw_ops *rw_ops, |
Trond Myklebust | 84dde76 | 2007-05-04 14:44:06 -0400 | [diff] [blame] | 836 | size_t bsize, |
Trond Myklebust | 3bde7af | 2017-08-20 11:33:25 -0400 | [diff] [blame] | 837 | int io_flags) |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 838 | { |
Trond Myklebust | b31268a | 2011-03-21 17:02:00 -0400 | [diff] [blame] | 839 | desc->pg_moreio = 0; |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 840 | desc->pg_inode = inode; |
Trond Myklebust | 1751c36 | 2011-06-10 13:30:23 -0400 | [diff] [blame] | 841 | desc->pg_ops = pg_ops; |
Fred Isaman | 061ae2e | 2012-04-20 14:47:48 -0400 | [diff] [blame] | 842 | desc->pg_completion_ops = compl_ops; |
Anna Schumaker | 4a0de55 | 2014-05-06 09:12:30 -0400 | [diff] [blame] | 843 | desc->pg_rw_ops = rw_ops; |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 844 | desc->pg_ioflags = io_flags; |
| 845 | desc->pg_error = 0; |
Fred Isaman | 94ad1c8 | 2011-03-01 01:34:14 +0000 | [diff] [blame] | 846 | desc->pg_lseg = NULL; |
Trond Myklebust | 919e3bd | 2017-06-20 19:35:37 -0400 | [diff] [blame] | 847 | desc->pg_io_completion = NULL; |
Fred Isaman | 584aa81 | 2012-04-20 14:47:51 -0400 | [diff] [blame] | 848 | desc->pg_dreq = NULL; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 849 | desc->pg_bsize = bsize; |
| 850 | |
| 851 | desc->pg_mirror_count = 1; |
| 852 | desc->pg_mirror_idx = 0; |
| 853 | |
Trond Myklebust | 14abcb0 | 2017-08-19 10:10:34 -0400 | [diff] [blame] | 854 | desc->pg_mirrors_dynamic = NULL; |
| 855 | desc->pg_mirrors = desc->pg_mirrors_static; |
| 856 | nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize); |
Trond Myklebust | 33344e0 | 2019-04-07 13:59:08 -0400 | [diff] [blame] | 857 | desc->pg_maxretrans = 0; |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 858 | } |
| 859 | |
Anna Schumaker | 0eecb21 | 2014-05-06 09:12:32 -0400 | [diff] [blame] | 860 | /** |
| 861 | * nfs_pgio_result - Basic pageio error handling |
| 862 | * @task: The task that ran |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 863 | * @calldata: Pageio header to check |
Anna Schumaker | 0eecb21 | 2014-05-06 09:12:32 -0400 | [diff] [blame] | 864 | */ |
Anna Schumaker | 6f92fa4 | 2014-05-06 09:12:33 -0400 | [diff] [blame] | 865 | static void nfs_pgio_result(struct rpc_task *task, void *calldata) |
Anna Schumaker | 0eecb21 | 2014-05-06 09:12:32 -0400 | [diff] [blame] | 866 | { |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 867 | struct nfs_pgio_header *hdr = calldata; |
| 868 | struct inode *inode = hdr->inode; |
Anna Schumaker | 0eecb21 | 2014-05-06 09:12:32 -0400 | [diff] [blame] | 869 | |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 870 | if (hdr->rw_ops->rw_done(task, hdr, inode) != 0) |
Anna Schumaker | 0eecb21 | 2014-05-06 09:12:32 -0400 | [diff] [blame] | 871 | return; |
| 872 | if (task->tk_status < 0) |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 873 | nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset); |
Anna Schumaker | 0eecb21 | 2014-05-06 09:12:32 -0400 | [diff] [blame] | 874 | else |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 875 | hdr->rw_ops->rw_result(task, hdr); |
Anna Schumaker | 0eecb21 | 2014-05-06 09:12:32 -0400 | [diff] [blame] | 876 | } |
| 877 | |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 878 | /* |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 879 | * Create an RPC task for the given read or write request and kick it. |
| 880 | * The page must have been locked by the caller. |
| 881 | * |
| 882 | * It may happen that the page we're passed is not marked dirty. |
| 883 | * This is the case if nfs_updatepage detects a conflicting request |
| 884 | * that has been written but not committed. |
| 885 | */ |
Weston Andros Adamson | f0cb9ab | 2014-05-15 11:56:52 -0400 | [diff] [blame] | 886 | int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, |
| 887 | struct nfs_pgio_header *hdr) |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 888 | { |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 889 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 890 | |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 891 | struct nfs_page *req; |
Weston Andros Adamson | bba5c18 | 2014-08-14 17:39:32 -0400 | [diff] [blame] | 892 | struct page **pages, |
| 893 | *last_page; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 894 | struct list_head *head = &mirror->pg_list; |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 895 | struct nfs_commit_info cinfo; |
Benjamin Coddington | 8ef9b0b | 2017-04-19 10:11:34 -0400 | [diff] [blame] | 896 | struct nfs_page_array *pg_array = &hdr->page_array; |
Weston Andros Adamson | bba5c18 | 2014-08-14 17:39:32 -0400 | [diff] [blame] | 897 | unsigned int pagecount, pageused; |
Trond Myklebust | 0bae835 | 2022-03-21 13:48:36 -0400 | [diff] [blame] | 898 | gfp_t gfp_flags = nfs_io_gfp_mask(); |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 899 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 900 | pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); |
Benjamin Coddington | 2eb3aea7d | 2017-06-09 11:03:23 -0400 | [diff] [blame] | 901 | pg_array->npages = pagecount; |
Benjamin Coddington | 8ef9b0b | 2017-04-19 10:11:34 -0400 | [diff] [blame] | 902 | |
| 903 | if (pagecount <= ARRAY_SIZE(pg_array->page_array)) |
| 904 | pg_array->pagevec = pg_array->page_array; |
| 905 | else { |
Benjamin Coddington | 8ef9b0b | 2017-04-19 10:11:34 -0400 | [diff] [blame] | 906 | pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags); |
| 907 | if (!pg_array->pagevec) { |
| 908 | pg_array->npages = 0; |
| 909 | nfs_pgio_error(hdr); |
| 910 | desc->pg_error = -ENOMEM; |
| 911 | return desc->pg_error; |
| 912 | } |
Peng Tao | 2bff228 | 2015-12-05 02:03:17 +0800 | [diff] [blame] | 913 | } |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 914 | |
| 915 | nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); |
Weston Andros Adamson | d45f60c | 2014-06-09 11:48:35 -0400 | [diff] [blame] | 916 | pages = hdr->page_array.pagevec; |
Weston Andros Adamson | bba5c18 | 2014-08-14 17:39:32 -0400 | [diff] [blame] | 917 | last_page = NULL; |
| 918 | pageused = 0; |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 919 | while (!list_empty(head)) { |
| 920 | req = nfs_list_entry(head->next); |
Trond Myklebust | 078b5fd | 2019-02-18 11:35:54 -0500 | [diff] [blame] | 921 | nfs_list_move_request(req, &hdr->pages); |
Weston Andros Adamson | bba5c18 | 2014-08-14 17:39:32 -0400 | [diff] [blame] | 922 | |
Weston Andros Adamson | bba5c18 | 2014-08-14 17:39:32 -0400 | [diff] [blame] | 923 | if (!last_page || last_page != req->wb_page) { |
Weston Andros Adamson | bba5c18 | 2014-08-14 17:39:32 -0400 | [diff] [blame] | 924 | pageused++; |
Trond Myklebust | b8fb9c3 | 2014-10-13 10:56:12 -0400 | [diff] [blame] | 925 | if (pageused > pagecount) |
| 926 | break; |
| 927 | *pages++ = last_page = req->wb_page; |
Weston Andros Adamson | bba5c18 | 2014-08-14 17:39:32 -0400 | [diff] [blame] | 928 | } |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 929 | } |
Peng Tao | 2bff228 | 2015-12-05 02:03:17 +0800 | [diff] [blame] | 930 | if (WARN_ON_ONCE(pageused != pagecount)) { |
| 931 | nfs_pgio_error(hdr); |
| 932 | desc->pg_error = -EINVAL; |
| 933 | return desc->pg_error; |
| 934 | } |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 935 | |
| 936 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && |
| 937 | (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) |
| 938 | desc->pg_ioflags &= ~FLUSH_COND_STABLE; |
| 939 | |
| 940 | /* Set up the argument struct */ |
Benjamin Coddington | e545735 | 2017-12-08 12:52:37 -0500 | [diff] [blame] | 941 | nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo); |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 942 | desc->pg_rpc_callops = &nfs_pgio_common_ops; |
| 943 | return 0; |
| 944 | } |
Weston Andros Adamson | f0cb9ab | 2014-05-15 11:56:52 -0400 | [diff] [blame] | 945 | EXPORT_SYMBOL_GPL(nfs_generic_pgio); |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 946 | |
Anna Schumaker | 41d8d5b | 2014-05-06 09:12:40 -0400 | [diff] [blame] | 947 | static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) |
Anna Schumaker | cf485fc | 2014-05-06 09:12:39 -0400 | [diff] [blame] | 948 | { |
Anna Schumaker | cf485fc | 2014-05-06 09:12:39 -0400 | [diff] [blame] | 949 | struct nfs_pgio_header *hdr; |
| 950 | int ret; |
Olga Kornievskaia | 85e39fe | 2021-06-23 23:28:51 -0400 | [diff] [blame] | 951 | unsigned short task_flags = 0; |
Anna Schumaker | cf485fc | 2014-05-06 09:12:39 -0400 | [diff] [blame] | 952 | |
Weston Andros Adamson | 1e7f3a4 | 2014-06-09 11:48:33 -0400 | [diff] [blame] | 953 | hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); |
| 954 | if (!hdr) { |
Peng Tao | 2bff228 | 2015-12-05 02:03:17 +0800 | [diff] [blame] | 955 | desc->pg_error = -ENOMEM; |
| 956 | return desc->pg_error; |
Anna Schumaker | cf485fc | 2014-05-06 09:12:39 -0400 | [diff] [blame] | 957 | } |
Weston Andros Adamson | 1e7f3a4 | 2014-06-09 11:48:33 -0400 | [diff] [blame] | 958 | nfs_pgheader_init(desc, hdr, nfs_pgio_header_free); |
Anna Schumaker | cf485fc | 2014-05-06 09:12:39 -0400 | [diff] [blame] | 959 | ret = nfs_generic_pgio(desc, hdr); |
Olga Kornievskaia | 85e39fe | 2021-06-23 23:28:51 -0400 | [diff] [blame] | 960 | if (ret == 0) { |
| 961 | if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion) |
| 962 | task_flags = RPC_TASK_MOVEABLE; |
Weston Andros Adamson | 7f71472 | 2014-05-15 11:56:53 -0400 | [diff] [blame] | 963 | ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode), |
Peng Tao | 46a5ab4 | 2014-06-13 23:02:25 +0800 | [diff] [blame] | 964 | hdr, |
| 965 | hdr->cred, |
| 966 | NFS_PROTO(hdr->inode), |
Tom Haynes | abde71f | 2014-06-09 13:12:20 -0700 | [diff] [blame] | 967 | desc->pg_rpc_callops, |
Trond Myklebust | 4fa7ef6 | 2020-05-13 09:55:36 -0400 | [diff] [blame] | 968 | desc->pg_ioflags, |
Olga Kornievskaia | 85e39fe | 2021-06-23 23:28:51 -0400 | [diff] [blame] | 969 | RPC_TASK_CRED_NOREF | task_flags); |
| 970 | } |
Anna Schumaker | cf485fc | 2014-05-06 09:12:39 -0400 | [diff] [blame] | 971 | return ret; |
| 972 | } |
| 973 | |
Trond Myklebust | 14abcb0 | 2017-08-19 10:10:34 -0400 | [diff] [blame] | 974 | static struct nfs_pgio_mirror * |
| 975 | nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc, |
| 976 | unsigned int mirror_count) |
| 977 | { |
| 978 | struct nfs_pgio_mirror *ret; |
| 979 | unsigned int i; |
| 980 | |
| 981 | kfree(desc->pg_mirrors_dynamic); |
| 982 | desc->pg_mirrors_dynamic = NULL; |
| 983 | if (mirror_count == 1) |
| 984 | return desc->pg_mirrors_static; |
Trond Myklebust | 0bae835 | 2022-03-21 13:48:36 -0400 | [diff] [blame] | 985 | ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask()); |
Trond Myklebust | 14abcb0 | 2017-08-19 10:10:34 -0400 | [diff] [blame] | 986 | if (ret != NULL) { |
| 987 | for (i = 0; i < mirror_count; i++) |
| 988 | nfs_pageio_mirror_init(&ret[i], desc->pg_bsize); |
| 989 | desc->pg_mirrors_dynamic = ret; |
| 990 | } |
| 991 | return ret; |
| 992 | } |
| 993 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 994 | /* |
| 995 | * nfs_pageio_setup_mirroring - determine if mirroring is to be used |
| 996 | * by calling the pg_get_mirror_count op |
| 997 | */ |
Trond Myklebust | 14abcb0 | 2017-08-19 10:10:34 -0400 | [diff] [blame] | 998 | static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 999 | struct nfs_page *req) |
| 1000 | { |
Trond Myklebust | 14abcb0 | 2017-08-19 10:10:34 -0400 | [diff] [blame] | 1001 | unsigned int mirror_count = 1; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1002 | |
Trond Myklebust | 14abcb0 | 2017-08-19 10:10:34 -0400 | [diff] [blame] | 1003 | if (pgio->pg_ops->pg_get_mirror_count) |
| 1004 | mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req); |
| 1005 | if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0) |
| 1006 | return; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1007 | |
Trond Myklebust | 14abcb0 | 2017-08-19 10:10:34 -0400 | [diff] [blame] | 1008 | if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) { |
| 1009 | pgio->pg_error = -EINVAL; |
| 1010 | return; |
| 1011 | } |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1012 | |
Trond Myklebust | 14abcb0 | 2017-08-19 10:10:34 -0400 | [diff] [blame] | 1013 | pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count); |
| 1014 | if (pgio->pg_mirrors == NULL) { |
| 1015 | pgio->pg_error = -ENOMEM; |
| 1016 | pgio->pg_mirrors = pgio->pg_mirrors_static; |
| 1017 | mirror_count = 1; |
| 1018 | } |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1019 | pgio->pg_mirror_count = mirror_count; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1020 | } |
| 1021 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1022 | static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio) |
| 1023 | { |
| 1024 | pgio->pg_mirror_count = 1; |
| 1025 | pgio->pg_mirror_idx = 0; |
| 1026 | pgio->pg_mirrors = pgio->pg_mirrors_static; |
| 1027 | kfree(pgio->pg_mirrors_dynamic); |
| 1028 | pgio->pg_mirrors_dynamic = NULL; |
| 1029 | } |
| 1030 | |
Trond Myklebust | 4109bb7 | 2013-09-06 11:09:38 -0400 | [diff] [blame] | 1031 | static bool nfs_match_lock_context(const struct nfs_lock_context *l1, |
| 1032 | const struct nfs_lock_context *l2) |
| 1033 | { |
NeilBrown | d51fdb8 | 2016-10-13 15:26:47 +1100 | [diff] [blame] | 1034 | return l1->lockowner == l2->lockowner; |
Trond Myklebust | 4109bb7 | 2013-09-06 11:09:38 -0400 | [diff] [blame] | 1035 | } |
| 1036 | |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1037 | /** |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1038 | * nfs_coalesce_size - test two requests for compatibility |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1039 | * @prev: pointer to nfs_page |
| 1040 | * @req: pointer to nfs_page |
Trond Myklebust | 302fad7 | 2019-02-18 13:32:38 -0500 | [diff] [blame] | 1041 | * @pgio: pointer to nfs_pagio_descriptor |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1042 | * |
| 1043 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the |
| 1044 | * page data area they describe is contiguous, and that their RPC |
| 1045 | * credentials, NFSv4 open state, and lockowners are the same. |
| 1046 | * |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1047 | * Returns size of the request that can be coalesced |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1048 | */ |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1049 | static unsigned int nfs_coalesce_size(struct nfs_page *prev, |
Benny Halevy | 18ad0a9 | 2011-05-25 21:03:56 +0300 | [diff] [blame] | 1050 | struct nfs_page *req, |
| 1051 | struct nfs_pageio_descriptor *pgio) |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1052 | { |
Jeff Layton | 5263e31 | 2015-01-16 15:05:55 -0500 | [diff] [blame] | 1053 | struct file_lock_context *flctx; |
Weston Andros Adamson | b4fdac1 | 2014-05-15 11:56:43 -0400 | [diff] [blame] | 1054 | |
Weston Andros Adamson | ab75e41 | 2014-05-15 11:56:44 -0400 | [diff] [blame] | 1055 | if (prev) { |
Trond Myklebust | 9fcd596 | 2019-04-07 13:59:11 -0400 | [diff] [blame] | 1056 | if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev))) |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1057 | return 0; |
Jeff Layton | 17b985d | 2022-11-16 09:55:36 -0500 | [diff] [blame] | 1058 | flctx = locks_inode_context(d_inode(nfs_req_openctx(req)->dentry)); |
Jeff Layton | bd61e0a | 2015-01-16 15:05:55 -0500 | [diff] [blame] | 1059 | if (flctx != NULL && |
| 1060 | !(list_empty_careful(&flctx->flc_posix) && |
| 1061 | list_empty_careful(&flctx->flc_flock)) && |
Jeff Layton | 5263e31 | 2015-01-16 15:05:55 -0500 | [diff] [blame] | 1062 | !nfs_match_lock_context(req->wb_lock_context, |
| 1063 | prev->wb_lock_context)) |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1064 | return 0; |
Weston Andros Adamson | ab75e41 | 2014-05-15 11:56:44 -0400 | [diff] [blame] | 1065 | if (req_offset(req) != req_offset(prev) + prev->wb_bytes) |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1066 | return 0; |
Weston Andros Adamson | 78270e8 | 2014-08-14 17:39:33 -0400 | [diff] [blame] | 1067 | if (req->wb_page == prev->wb_page) { |
| 1068 | if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes) |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1069 | return 0; |
Weston Andros Adamson | 78270e8 | 2014-08-14 17:39:33 -0400 | [diff] [blame] | 1070 | } else { |
| 1071 | if (req->wb_pgbase != 0 || |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1072 | prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE) |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1073 | return 0; |
Weston Andros Adamson | 78270e8 | 2014-08-14 17:39:33 -0400 | [diff] [blame] | 1074 | } |
Weston Andros Adamson | ab75e41 | 2014-05-15 11:56:44 -0400 | [diff] [blame] | 1075 | } |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1076 | return pgio->pg_ops->pg_test(pgio, prev, req); |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1077 | } |
| 1078 | |
| 1079 | /** |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1080 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1081 | * @desc: destination io descriptor |
| 1082 | * @req: request |
| 1083 | * |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1084 | * If the request 'req' was successfully coalesced into the existing list |
| 1085 | * of pages 'desc', it returns the size of req. |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1086 | */ |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1087 | static unsigned int |
| 1088 | nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, |
| 1089 | struct nfs_page *req) |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1090 | { |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 1091 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
Weston Andros Adamson | ab75e41 | 2014-05-15 11:56:44 -0400 | [diff] [blame] | 1092 | struct nfs_page *prev = NULL; |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1093 | unsigned int size; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1094 | |
Trond Myklebust | 56517ab | 2021-05-25 10:23:05 -0400 | [diff] [blame] | 1095 | if (list_empty(&mirror->pg_list)) { |
Trond Myklebust | d8007d4 | 2011-06-10 13:30:23 -0400 | [diff] [blame] | 1096 | if (desc->pg_ops->pg_init) |
| 1097 | desc->pg_ops->pg_init(desc, req); |
Peng Tao | d600ad1 | 2015-12-04 02:57:48 +0800 | [diff] [blame] | 1098 | if (desc->pg_error < 0) |
| 1099 | return 0; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1100 | mirror->pg_base = req->wb_pgbase; |
Trond Myklebust | 56517ab | 2021-05-25 10:23:05 -0400 | [diff] [blame] | 1101 | mirror->pg_count = 0; |
| 1102 | mirror->pg_recoalesce = 0; |
| 1103 | } else |
| 1104 | prev = nfs_list_entry(mirror->pg_list.prev); |
Trond Myklebust | 33344e0 | 2019-04-07 13:59:08 -0400 | [diff] [blame] | 1105 | |
| 1106 | if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) { |
| 1107 | if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR) |
| 1108 | desc->pg_error = -ETIMEDOUT; |
| 1109 | else |
| 1110 | desc->pg_error = -EIO; |
| 1111 | return 0; |
| 1112 | } |
| 1113 | |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1114 | size = nfs_coalesce_size(prev, req, desc); |
| 1115 | if (size < req->wb_bytes) |
| 1116 | return size; |
Trond Myklebust | 078b5fd | 2019-02-18 11:35:54 -0500 | [diff] [blame] | 1117 | nfs_list_move_request(req, &mirror->pg_list); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1118 | mirror->pg_count += req->wb_bytes; |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1119 | return req->wb_bytes; |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1120 | } |
| 1121 | |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1122 | /* |
| 1123 | * Helper for nfs_pageio_add_request and nfs_pageio_complete |
| 1124 | */ |
| 1125 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) |
| 1126 | { |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 1127 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1128 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1129 | if (!list_empty(&mirror->pg_list)) { |
Trond Myklebust | 1751c36 | 2011-06-10 13:30:23 -0400 | [diff] [blame] | 1130 | int error = desc->pg_ops->pg_doio(desc); |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1131 | if (error < 0) |
| 1132 | desc->pg_error = error; |
Trond Myklebust | 70536bf | 2021-05-25 11:26:35 -0400 | [diff] [blame] | 1133 | if (list_empty(&mirror->pg_list)) |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1134 | mirror->pg_bytes_written += mirror->pg_count; |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1135 | } |
| 1136 | } |
| 1137 | |
Trond Myklebust | f57dcf4 | 2019-02-13 09:21:38 -0500 | [diff] [blame] | 1138 | static void |
| 1139 | nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc, |
| 1140 | struct nfs_page *req) |
| 1141 | { |
| 1142 | LIST_HEAD(head); |
| 1143 | |
Trond Myklebust | 078b5fd | 2019-02-18 11:35:54 -0500 | [diff] [blame] | 1144 | nfs_list_move_request(req, &head); |
Trond Myklebust | df3accb | 2019-02-13 10:39:39 -0500 | [diff] [blame] | 1145 | desc->pg_completion_ops->error_cleanup(&head, desc->pg_error); |
Trond Myklebust | f57dcf4 | 2019-02-13 09:21:38 -0500 | [diff] [blame] | 1146 | } |
| 1147 | |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1148 | /** |
Trond Myklebust | 6453bcd | 2021-03-04 20:29:50 -0500 | [diff] [blame] | 1149 | * __nfs_pageio_add_request - Attempt to coalesce a request into a page list. |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1150 | * @desc: destination io descriptor |
| 1151 | * @req: request |
| 1152 | * |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1153 | * This may split a request into subrequests which are all part of the |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1154 | * same page group. If so, it will submit @req as the last one, to ensure |
| 1155 | * the pointer to @req is still valid in case of failure. |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1156 | * |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1157 | * Returns true if the request 'req' was successfully coalesced into the |
| 1158 | * existing list of pages 'desc'. |
| 1159 | */ |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1160 | static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
Trond Myklebust | 8b09bee | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1161 | struct nfs_page *req) |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1162 | { |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 1163 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1164 | struct nfs_page *subreq; |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1165 | unsigned int size, subreq_size; |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1166 | |
Trond Myklebust | 1344b7e | 2017-07-17 10:54:14 -0400 | [diff] [blame] | 1167 | nfs_page_group_lock(req); |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1168 | |
| 1169 | subreq = req; |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1170 | subreq_size = subreq->wb_bytes; |
| 1171 | for(;;) { |
| 1172 | size = nfs_pageio_do_add_request(desc, subreq); |
| 1173 | if (size == subreq_size) { |
| 1174 | /* We successfully submitted a request */ |
| 1175 | if (subreq == req) |
| 1176 | break; |
| 1177 | req->wb_pgbase += size; |
| 1178 | req->wb_bytes -= size; |
| 1179 | req->wb_offset += size; |
| 1180 | subreq_size = req->wb_bytes; |
| 1181 | subreq = req; |
| 1182 | continue; |
| 1183 | } |
| 1184 | if (WARN_ON_ONCE(subreq != req)) { |
| 1185 | nfs_page_group_unlock(req); |
| 1186 | nfs_pageio_cleanup_request(desc, subreq); |
| 1187 | subreq = req; |
| 1188 | subreq_size = req->wb_bytes; |
| 1189 | nfs_page_group_lock(req); |
| 1190 | } |
| 1191 | if (!size) { |
| 1192 | /* Can't coalesce any more, so do I/O */ |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1193 | nfs_page_group_unlock(req); |
| 1194 | desc->pg_moreio = 1; |
| 1195 | nfs_pageio_doio(desc); |
Trond Myklebust | f57dcf4 | 2019-02-13 09:21:38 -0500 | [diff] [blame] | 1196 | if (desc->pg_error < 0 || mirror->pg_recoalesce) |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1197 | return 0; |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1198 | /* retry add_request for this subreq */ |
Trond Myklebust | 1344b7e | 2017-07-17 10:54:14 -0400 | [diff] [blame] | 1199 | nfs_page_group_lock(req); |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1200 | continue; |
| 1201 | } |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1202 | subreq = nfs_create_subreq(req, req->wb_pgbase, |
| 1203 | req->wb_offset, size); |
| 1204 | if (IS_ERR(subreq)) |
| 1205 | goto err_ptr; |
| 1206 | subreq_size = size; |
| 1207 | } |
Weston Andros Adamson | 2bfc6e5 | 2014-05-15 11:56:45 -0400 | [diff] [blame] | 1208 | |
| 1209 | nfs_page_group_unlock(req); |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1210 | return 1; |
Trond Myklebust | c110955 | 2014-05-29 11:38:15 -0400 | [diff] [blame] | 1211 | err_ptr: |
| 1212 | desc->pg_error = PTR_ERR(subreq); |
| 1213 | nfs_page_group_unlock(req); |
| 1214 | return 0; |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1215 | } |
| 1216 | |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1217 | static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) |
| 1218 | { |
Peng Tao | 48d635f | 2014-11-10 08:35:35 +0800 | [diff] [blame] | 1219 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1220 | LIST_HEAD(head); |
| 1221 | |
| 1222 | do { |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1223 | list_splice_init(&mirror->pg_list, &head); |
Trond Myklebust | d02d81e | 2022-03-25 21:51:03 -0400 | [diff] [blame] | 1224 | mirror->pg_recoalesce = 0; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1225 | |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1226 | while (!list_empty(&head)) { |
| 1227 | struct nfs_page *req; |
| 1228 | |
| 1229 | req = list_first_entry(&head, struct nfs_page, wb_list); |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1230 | if (__nfs_pageio_add_request(desc, req)) |
| 1231 | continue; |
Trond Myklebust | 03d5eb6 | 2015-07-27 10:23:19 -0400 | [diff] [blame] | 1232 | if (desc->pg_error < 0) { |
| 1233 | list_splice_tail(&head, &mirror->pg_list); |
| 1234 | mirror->pg_recoalesce = 1; |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1235 | return 0; |
Trond Myklebust | 03d5eb6 | 2015-07-27 10:23:19 -0400 | [diff] [blame] | 1236 | } |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1237 | break; |
| 1238 | } |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1239 | } while (mirror->pg_recoalesce); |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1240 | return 1; |
| 1241 | } |
| 1242 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1243 | static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc, |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1244 | struct nfs_page *req) |
| 1245 | { |
| 1246 | int ret; |
| 1247 | |
| 1248 | do { |
| 1249 | ret = __nfs_pageio_add_request(desc, req); |
| 1250 | if (ret) |
| 1251 | break; |
| 1252 | if (desc->pg_error < 0) |
| 1253 | break; |
| 1254 | ret = nfs_do_recoalesce(desc); |
| 1255 | } while (ret); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1256 | |
Trond Myklebust | d9156f9 | 2011-07-12 13:42:02 -0400 | [diff] [blame] | 1257 | return ret; |
| 1258 | } |
| 1259 | |
Benjamin Coddington | fdbd1a2 | 2018-10-18 15:01:48 -0400 | [diff] [blame] | 1260 | static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc) |
| 1261 | { |
| 1262 | u32 midx; |
| 1263 | struct nfs_pgio_mirror *mirror; |
| 1264 | |
| 1265 | if (!desc->pg_error) |
| 1266 | return; |
| 1267 | |
| 1268 | for (midx = 0; midx < desc->pg_mirror_count; midx++) { |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 1269 | mirror = nfs_pgio_get_mirror(desc, midx); |
Trond Myklebust | df3accb | 2019-02-13 10:39:39 -0500 | [diff] [blame] | 1270 | desc->pg_completion_ops->error_cleanup(&mirror->pg_list, |
| 1271 | desc->pg_error); |
Benjamin Coddington | fdbd1a2 | 2018-10-18 15:01:48 -0400 | [diff] [blame] | 1272 | } |
| 1273 | } |
| 1274 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1275 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
| 1276 | struct nfs_page *req) |
| 1277 | { |
| 1278 | u32 midx; |
| 1279 | unsigned int pgbase, offset, bytes; |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1280 | struct nfs_page *dupreq; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1281 | |
| 1282 | pgbase = req->wb_pgbase; |
| 1283 | offset = req->wb_offset; |
| 1284 | bytes = req->wb_bytes; |
| 1285 | |
| 1286 | nfs_pageio_setup_mirroring(desc, req); |
Peng Tao | d600ad1 | 2015-12-04 02:57:48 +0800 | [diff] [blame] | 1287 | if (desc->pg_error < 0) |
Peng Tao | c18b96a | 2015-12-05 01:59:56 +0800 | [diff] [blame] | 1288 | goto out_failed; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1289 | |
Trond Myklebust | dc9dc2f | 2020-03-29 19:55:05 -0400 | [diff] [blame] | 1290 | /* Create the mirror instances first, and fire them off */ |
| 1291 | for (midx = 1; midx < desc->pg_mirror_count; midx++) { |
| 1292 | nfs_page_group_lock(req); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1293 | |
Trond Myklebust | 44a65a0 | 2020-03-31 18:27:26 -0400 | [diff] [blame] | 1294 | dupreq = nfs_create_subreq(req, |
Trond Myklebust | dc9dc2f | 2020-03-29 19:55:05 -0400 | [diff] [blame] | 1295 | pgbase, offset, bytes); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1296 | |
Trond Myklebust | dc9dc2f | 2020-03-29 19:55:05 -0400 | [diff] [blame] | 1297 | nfs_page_group_unlock(req); |
| 1298 | if (IS_ERR(dupreq)) { |
| 1299 | desc->pg_error = PTR_ERR(dupreq); |
| 1300 | goto out_failed; |
| 1301 | } |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1302 | |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 1303 | nfs_pgio_set_current_mirror(desc, midx); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1304 | if (!nfs_pageio_add_request_mirror(desc, dupreq)) |
Trond Myklebust | f57dcf4 | 2019-02-13 09:21:38 -0500 | [diff] [blame] | 1305 | goto out_cleanup_subreq; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1306 | } |
| 1307 | |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 1308 | nfs_pgio_set_current_mirror(desc, 0); |
Trond Myklebust | dc9dc2f | 2020-03-29 19:55:05 -0400 | [diff] [blame] | 1309 | if (!nfs_pageio_add_request_mirror(desc, req)) |
| 1310 | goto out_failed; |
| 1311 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1312 | return 1; |
Peng Tao | c18b96a | 2015-12-05 01:59:56 +0800 | [diff] [blame] | 1313 | |
Trond Myklebust | f57dcf4 | 2019-02-13 09:21:38 -0500 | [diff] [blame] | 1314 | out_cleanup_subreq: |
Trond Myklebust | dc9dc2f | 2020-03-29 19:55:05 -0400 | [diff] [blame] | 1315 | nfs_pageio_cleanup_request(desc, dupreq); |
Peng Tao | c18b96a | 2015-12-05 01:59:56 +0800 | [diff] [blame] | 1316 | out_failed: |
Benjamin Coddington | fdbd1a2 | 2018-10-18 15:01:48 -0400 | [diff] [blame] | 1317 | nfs_pageio_error_cleanup(desc); |
Peng Tao | c18b96a | 2015-12-05 01:59:56 +0800 | [diff] [blame] | 1318 | return 0; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1319 | } |
| 1320 | |
| 1321 | /* |
| 1322 | * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an |
| 1323 | * nfs_pageio_descriptor |
| 1324 | * @desc: pointer to io descriptor |
Yijing Wang | dfad700 | 2015-06-18 19:37:13 +0800 | [diff] [blame] | 1325 | * @mirror_idx: pointer to mirror index |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1326 | */ |
| 1327 | static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, |
| 1328 | u32 mirror_idx) |
| 1329 | { |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 1330 | struct nfs_pgio_mirror *mirror; |
| 1331 | u32 restore_idx; |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1332 | |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 1333 | restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx); |
| 1334 | mirror = nfs_pgio_current_mirror(desc); |
| 1335 | |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1336 | for (;;) { |
| 1337 | nfs_pageio_doio(desc); |
Trond Myklebust | 8127d82 | 2019-02-15 16:08:25 -0500 | [diff] [blame] | 1338 | if (desc->pg_error < 0 || !mirror->pg_recoalesce) |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1339 | break; |
| 1340 | if (!nfs_do_recoalesce(desc)) |
| 1341 | break; |
| 1342 | } |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 1343 | nfs_pgio_set_current_mirror(desc, restore_idx); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1344 | } |
| 1345 | |
Weston Andros Adamson | 53113ad | 2014-06-09 11:48:38 -0400 | [diff] [blame] | 1346 | /* |
| 1347 | * nfs_pageio_resend - Transfer requests to new descriptor and resend |
| 1348 | * @hdr - the pgio header to move request from |
| 1349 | * @desc - the pageio descriptor to add requests to |
| 1350 | * |
| 1351 | * Try to move each request (nfs_page) from @hdr to @desc then attempt |
| 1352 | * to send them. |
| 1353 | * |
| 1354 | * Returns 0 on success and < 0 on error. |
| 1355 | */ |
| 1356 | int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, |
| 1357 | struct nfs_pgio_header *hdr) |
| 1358 | { |
Trond Myklebust | f4340e9 | 2019-08-12 15:19:54 -0400 | [diff] [blame] | 1359 | LIST_HEAD(pages); |
Weston Andros Adamson | 53113ad | 2014-06-09 11:48:38 -0400 | [diff] [blame] | 1360 | |
Trond Myklebust | 919e3bd | 2017-06-20 19:35:37 -0400 | [diff] [blame] | 1361 | desc->pg_io_completion = hdr->io_completion; |
Weston Andros Adamson | 53113ad | 2014-06-09 11:48:38 -0400 | [diff] [blame] | 1362 | desc->pg_dreq = hdr->dreq; |
Trond Myklebust | f4340e9 | 2019-08-12 15:19:54 -0400 | [diff] [blame] | 1363 | list_splice_init(&hdr->pages, &pages); |
| 1364 | while (!list_empty(&pages)) { |
| 1365 | struct nfs_page *req = nfs_list_entry(pages.next); |
Weston Andros Adamson | 53113ad | 2014-06-09 11:48:38 -0400 | [diff] [blame] | 1366 | |
Weston Andros Adamson | 53113ad | 2014-06-09 11:48:38 -0400 | [diff] [blame] | 1367 | if (!nfs_pageio_add_request(desc, req)) |
Trond Myklebust | f4340e9 | 2019-08-12 15:19:54 -0400 | [diff] [blame] | 1368 | break; |
Weston Andros Adamson | 53113ad | 2014-06-09 11:48:38 -0400 | [diff] [blame] | 1369 | } |
| 1370 | nfs_pageio_complete(desc); |
Trond Myklebust | f4340e9 | 2019-08-12 15:19:54 -0400 | [diff] [blame] | 1371 | if (!list_empty(&pages)) { |
| 1372 | int err = desc->pg_error < 0 ? desc->pg_error : -EIO; |
| 1373 | hdr->completion_ops->error_cleanup(&pages, err); |
Trond Myklebust | eb2c50d | 2019-08-12 18:04:36 -0400 | [diff] [blame] | 1374 | nfs_set_pgio_error(hdr, err, hdr->io_start); |
Trond Myklebust | f4340e9 | 2019-08-12 15:19:54 -0400 | [diff] [blame] | 1375 | return err; |
Weston Andros Adamson | 53113ad | 2014-06-09 11:48:38 -0400 | [diff] [blame] | 1376 | } |
| 1377 | return 0; |
| 1378 | } |
| 1379 | EXPORT_SYMBOL_GPL(nfs_pageio_resend); |
Trond Myklebust | d8a5ad7 | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1380 | |
| 1381 | /** |
Weston Andros Adamson | 2176bf4 | 2014-09-10 15:44:18 -0400 | [diff] [blame] | 1382 | * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1383 | * @desc: pointer to io descriptor |
| 1384 | */ |
| 1385 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) |
| 1386 | { |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1387 | u32 midx; |
| 1388 | |
| 1389 | for (midx = 0; midx < desc->pg_mirror_count; midx++) |
| 1390 | nfs_pageio_complete_mirror(desc, midx); |
Weston Andros Adamson | 2176bf4 | 2014-09-10 15:44:18 -0400 | [diff] [blame] | 1391 | |
Benjamin Coddington | fdbd1a2 | 2018-10-18 15:01:48 -0400 | [diff] [blame] | 1392 | if (desc->pg_error < 0) |
| 1393 | nfs_pageio_error_cleanup(desc); |
Weston Andros Adamson | 2176bf4 | 2014-09-10 15:44:18 -0400 | [diff] [blame] | 1394 | if (desc->pg_ops->pg_cleanup) |
| 1395 | desc->pg_ops->pg_cleanup(desc); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1396 | nfs_pageio_cleanup_mirroring(desc); |
Trond Myklebust | bcb71bb | 2007-04-02 18:48:28 -0400 | [diff] [blame] | 1397 | } |
| 1398 | |
Trond Myklebust | 7fe7f84 | 2007-05-20 10:18:27 -0400 | [diff] [blame] | 1399 | /** |
| 1400 | * nfs_pageio_cond_complete - Conditional I/O completion |
| 1401 | * @desc: pointer to io descriptor |
| 1402 | * @index: page index |
| 1403 | * |
| 1404 | * It is important to ensure that processes don't try to take locks |
| 1405 | * on non-contiguous ranges of pages as that might deadlock. This |
| 1406 | * function should be called before attempting to wait on a locked |
| 1407 | * nfs_page. It will complete the I/O if the page index 'index' |
| 1408 | * is not contiguous with the existing list of pages in 'desc'. |
| 1409 | */ |
| 1410 | void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) |
| 1411 | { |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1412 | struct nfs_pgio_mirror *mirror; |
| 1413 | struct nfs_page *prev; |
| 1414 | u32 midx; |
| 1415 | |
| 1416 | for (midx = 0; midx < desc->pg_mirror_count; midx++) { |
Trond Myklebust | 63e2fff | 2020-11-15 17:37:37 -0500 | [diff] [blame] | 1417 | mirror = nfs_pgio_get_mirror(desc, midx); |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1418 | if (!list_empty(&mirror->pg_list)) { |
| 1419 | prev = nfs_list_entry(mirror->pg_list.prev); |
Benjamin Coddington | 43b7d96 | 2017-04-14 12:29:54 -0400 | [diff] [blame] | 1420 | if (index != prev->wb_index + 1) { |
| 1421 | nfs_pageio_complete(desc); |
| 1422 | break; |
| 1423 | } |
Weston Andros Adamson | a7d42dd | 2014-09-19 10:55:07 -0400 | [diff] [blame] | 1424 | } |
Trond Myklebust | 7fe7f84 | 2007-05-20 10:18:27 -0400 | [diff] [blame] | 1425 | } |
| 1426 | } |
| 1427 | |
Trond Myklebust | 862f35c | 2020-03-29 20:06:45 -0400 | [diff] [blame] | 1428 | /* |
| 1429 | * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) |
| 1430 | */ |
| 1431 | void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) |
| 1432 | { |
| 1433 | nfs_pageio_complete(pgio); |
| 1434 | } |
| 1435 | |
David Howells | f7b422b | 2006-06-09 09:34:33 -0400 | [diff] [blame] | 1436 | int __init nfs_init_nfspagecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | { |
| 1438 | nfs_page_cachep = kmem_cache_create("nfs_page", |
| 1439 | sizeof(struct nfs_page), |
| 1440 | 0, SLAB_HWCACHE_ALIGN, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1441 | NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | if (nfs_page_cachep == NULL) |
| 1443 | return -ENOMEM; |
| 1444 | |
| 1445 | return 0; |
| 1446 | } |
| 1447 | |
David Brownell | 266bee8 | 2006-06-27 12:59:15 -0700 | [diff] [blame] | 1448 | void nfs_destroy_nfspagecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1449 | { |
Alexey Dobriyan | 1a1d92c | 2006-09-27 01:49:40 -0700 | [diff] [blame] | 1450 | kmem_cache_destroy(nfs_page_cachep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | } |
| 1452 | |
Anna Schumaker | ef2c488 | 2014-05-06 09:12:36 -0400 | [diff] [blame] | 1453 | static const struct rpc_call_ops nfs_pgio_common_ops = { |
Anna Schumaker | 6f92fa4 | 2014-05-06 09:12:33 -0400 | [diff] [blame] | 1454 | .rpc_call_prepare = nfs_pgio_prepare, |
| 1455 | .rpc_call_done = nfs_pgio_result, |
| 1456 | .rpc_release = nfs_pgio_release, |
| 1457 | }; |
Anna Schumaker | 41d8d5b | 2014-05-06 09:12:40 -0400 | [diff] [blame] | 1458 | |
| 1459 | const struct nfs_pageio_ops nfs_pgio_rw_ops = { |
| 1460 | .pg_test = nfs_generic_pg_test, |
| 1461 | .pg_doio = nfs_generic_pg_pgios, |
| 1462 | }; |