blob: b05717fe0d4e4f5b505f98e52e25273fe216b325 [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells14727282009-04-03 16:42:42 +01002/* NFS filesystem cache interface
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells14727282009-04-03 16:42:42 +01006 */
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/nfs_fs.h>
13#include <linux/nfs_fs_sb.h>
14#include <linux/in6.h>
15#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
David Howells402cb8d2018-04-04 13:41:28 +010017#include <linux/iversion.h>
Dave Wysochanski000dbe02023-02-20 08:43:06 -050018#include <linux/xarray.h>
19#include <linux/fscache.h>
20#include <linux/netfs.h>
David Howells14727282009-04-03 16:42:42 +010021
22#include "internal.h"
David Howells545db452009-04-03 16:42:44 +010023#include "iostat.h"
David Howells14727282009-04-03 16:42:42 +010024#include "fscache.h"
Dave Wysochanskie3f0a7fe2022-03-01 14:37:26 -050025#include "nfstrace.h"
David Howells14727282009-04-03 16:42:42 +010026
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050027#define NFS_MAX_KEY_LEN 1000
David Howells08734042009-04-03 16:42:42 +010028
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050029static bool nfs_append_int(char *key, int *_len, unsigned long long x)
30{
31 if (*_len > NFS_MAX_KEY_LEN)
32 return false;
33 if (x == 0)
34 key[(*_len)++] = ',';
35 else
36 *_len += sprintf(key + *_len, ",%llx", x);
37 return true;
38}
David Howells402cb8d2018-04-04 13:41:28 +010039
40/*
David Howells14727282009-04-03 16:42:42 +010041 * Get the per-client index cookie for an NFS client if the appropriate mount
42 * flag was set
43 * - We always try and get an index cookie for the client, but get filehandle
44 * cookies on a per-superblock basis, depending on the mount flags
45 */
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050046static bool nfs_fscache_get_client_key(struct nfs_client *clp,
47 char *key, int *_len)
David Howells14727282009-04-03 16:42:42 +010048{
David Howells402cb8d2018-04-04 13:41:28 +010049 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
50 const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
David Howells402cb8d2018-04-04 13:41:28 +010051
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050052 *_len += snprintf(key + *_len, NFS_MAX_KEY_LEN - *_len,
53 ",%u.%u,%x",
54 clp->rpc_ops->version,
55 clp->cl_minorversion,
56 clp->cl_addr.ss_family);
David Howells402cb8d2018-04-04 13:41:28 +010057
58 switch (clp->cl_addr.ss_family) {
59 case AF_INET:
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050060 if (!nfs_append_int(key, _len, sin->sin_port) ||
61 !nfs_append_int(key, _len, sin->sin_addr.s_addr))
62 return false;
63 return true;
David Howells402cb8d2018-04-04 13:41:28 +010064
65 case AF_INET6:
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050066 if (!nfs_append_int(key, _len, sin6->sin6_port) ||
67 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[0]) ||
68 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[1]) ||
69 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[2]) ||
70 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[3]))
71 return false;
72 return true;
David Howells402cb8d2018-04-04 13:41:28 +010073
74 default:
75 printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
76 clp->cl_addr.ss_family);
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050077 return false;
David Howells402cb8d2018-04-04 13:41:28 +010078 }
David Howells14727282009-04-03 16:42:42 +010079}
80
81/*
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050082 * Get the cache cookie for an NFS superblock.
David Howells2df54802009-09-23 14:36:39 -040083 *
84 * The default uniquifier is just an empty string, but it may be overridden
85 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
86 * superblock across an automount point of some nature.
David Howells08734042009-04-03 16:42:42 +010087 */
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050088int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
David Howells08734042009-04-03 16:42:42 +010089{
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050090 struct fscache_volume *vcookie;
David Howells08734042009-04-03 16:42:42 +010091 struct nfs_server *nfss = NFS_SB(sb);
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050092 unsigned int len = 3;
93 char *key;
David Howells2df54802009-09-23 14:36:39 -040094
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050095 if (uniq) {
96 nfss->fscache_uniq = kmemdup_nul(uniq, ulen, GFP_KERNEL);
97 if (!nfss->fscache_uniq)
98 return -ENOMEM;
David Howells2df54802009-09-23 14:36:39 -040099 }
100
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500101 key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL);
David Howells08734042009-04-03 16:42:42 +0100102 if (!key)
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500103 return -ENOMEM;
David Howells08734042009-04-03 16:42:42 +0100104
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500105 memcpy(key, "nfs", 3);
106 if (!nfs_fscache_get_client_key(nfss->nfs_client, key, &len) ||
107 !nfs_append_int(key, &len, nfss->fsid.major) ||
108 !nfs_append_int(key, &len, nfss->fsid.minor) ||
109 !nfs_append_int(key, &len, sb->s_flags & NFS_SB_MASK) ||
110 !nfs_append_int(key, &len, nfss->flags) ||
111 !nfs_append_int(key, &len, nfss->rsize) ||
112 !nfs_append_int(key, &len, nfss->wsize) ||
113 !nfs_append_int(key, &len, nfss->acregmin) ||
114 !nfs_append_int(key, &len, nfss->acregmax) ||
115 !nfs_append_int(key, &len, nfss->acdirmin) ||
116 !nfs_append_int(key, &len, nfss->acdirmax) ||
117 !nfs_append_int(key, &len, nfss->client->cl_auth->au_flavor))
118 goto out;
David Howells08734042009-04-03 16:42:42 +0100119
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500120 if (ulen > 0) {
121 if (ulen > NFS_MAX_KEY_LEN - len)
122 goto out;
123 key[len++] = ',';
124 memcpy(key + len, uniq, ulen);
125 len += ulen;
David Howells08734042009-04-03 16:42:42 +0100126 }
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500127 key[len] = 0;
David Howells08734042009-04-03 16:42:42 +0100128
129 /* create a cache index for looking up filehandles */
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500130 vcookie = fscache_acquire_volume(key,
131 NULL, /* preferred_cache */
132 NULL, 0 /* coherency_data */);
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500133 if (IS_ERR(vcookie)) {
134 if (vcookie != ERR_PTR(-EBUSY)) {
135 kfree(key);
136 return PTR_ERR(vcookie);
137 }
138 pr_err("NFS: Cache volume key already in use (%s)\n", key);
139 vcookie = NULL;
140 }
141 nfss->fscache = vcookie;
David Howells08734042009-04-03 16:42:42 +0100142
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500143out:
David Howells08734042009-04-03 16:42:42 +0100144 kfree(key);
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500145 return 0;
David Howells08734042009-04-03 16:42:42 +0100146}
147
148/*
149 * release a per-superblock cookie
150 */
151void nfs_fscache_release_super_cookie(struct super_block *sb)
152{
153 struct nfs_server *nfss = NFS_SB(sb);
154
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500155 fscache_relinquish_volume(nfss->fscache, NULL, false);
David Howells08734042009-04-03 16:42:42 +0100156 nfss->fscache = NULL;
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500157 kfree(nfss->fscache_uniq);
Dave Wysochanski50eaa652020-04-16 06:06:08 -0400158}
159
David Howellsef79c092009-04-03 16:42:43 +0100160/*
161 * Initialise the per-inode cache cookie pointer for an NFS inode.
162 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100163void nfs_fscache_init_inode(struct inode *inode)
David Howellsef79c092009-04-03 16:42:43 +0100164{
David Howells402cb8d2018-04-04 13:41:28 +0100165 struct nfs_fscache_inode_auxdata auxdata;
Trond Myklebustdea1bb32019-08-03 13:39:24 -0400166 struct nfs_server *nfss = NFS_SERVER(inode);
David Howellsef79c092009-04-03 16:42:43 +0100167 struct nfs_inode *nfsi = NFS_I(inode);
168
Dave Wysochanski88a4d7b2023-02-20 08:43:05 -0500169 netfs_inode(inode)->cache = NULL;
Trond Myklebustdea1bb32019-08-03 13:39:24 -0400170 if (!(nfss->fscache && S_ISREG(inode->i_mode)))
David Howellsef79c092009-04-03 16:42:43 +0100171 return;
David Howells402cb8d2018-04-04 13:41:28 +0100172
Dave Wysochanski45f3a702022-03-01 14:37:24 -0500173 nfs_fscache_update_auxdata(&auxdata, inode);
David Howells402cb8d2018-04-04 13:41:28 +0100174
Dave Wysochanski88a4d7b2023-02-20 08:43:05 -0500175 netfs_inode(inode)->cache = fscache_acquire_cookie(
176 nfss->fscache,
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500177 0,
178 nfsi->fh.data, /* index_key */
179 nfsi->fh.size,
180 &auxdata, /* aux_data */
181 sizeof(auxdata),
Dave Wysochanski45f3a702022-03-01 14:37:24 -0500182 i_size_read(inode));
David Howellsb4fa9662023-06-28 11:48:52 +0100183
184 if (netfs_inode(inode)->cache)
185 mapping_set_release_always(inode->i_mapping);
David Howellsef79c092009-04-03 16:42:43 +0100186}
187
188/*
189 * Release a per-inode cookie.
190 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100191void nfs_fscache_clear_inode(struct inode *inode)
David Howellsef79c092009-04-03 16:42:43 +0100192{
Dave Wysochanski88a4d7b2023-02-20 08:43:05 -0500193 fscache_relinquish_cookie(netfs_i_cookie(netfs_inode(inode)), false);
194 netfs_inode(inode)->cache = NULL;
David Howellsef79c092009-04-03 16:42:43 +0100195}
196
David Howellsef79c092009-04-03 16:42:43 +0100197/*
David Howellsf1fe29b2013-09-27 11:20:03 +0100198 * Enable or disable caching for a file that is being opened as appropriate.
199 * The cookie is allocated when the inode is initialised, but is not enabled at
200 * that time. Enablement is deferred to file-open time to avoid stat() and
201 * access() thrashing the cache.
202 *
203 * For now, with NFS, only regular files that are open read-only will be able
204 * to use the cache.
205 *
206 * We enable the cache for an inode if we open it read-only and it isn't
207 * currently open for writing. We disable the cache if the inode is open
208 * write-only.
209 *
210 * The caller uses the file struct to pin i_writecount on the inode before
211 * calling us when a file is opened for writing, so we can make use of that.
212 *
213 * Note that this may be invoked multiple times in parallel by parallel
214 * nfs_open() functions.
David Howellsef79c092009-04-03 16:42:43 +0100215 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100216void nfs_fscache_open_file(struct inode *inode, struct file *filp)
David Howellsef79c092009-04-03 16:42:43 +0100217{
David Howells402cb8d2018-04-04 13:41:28 +0100218 struct nfs_fscache_inode_auxdata auxdata;
Dave Wysochanski88a4d7b2023-02-20 08:43:05 -0500219 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500220 bool open_for_write = inode_is_open_for_write(inode);
David Howellsef79c092009-04-03 16:42:43 +0100221
David Howellsf1fe29b2013-09-27 11:20:03 +0100222 if (!fscache_cookie_valid(cookie))
223 return;
David Howellsef79c092009-04-03 16:42:43 +0100224
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500225 fscache_use_cookie(cookie, open_for_write);
226 if (open_for_write) {
Dave Wysochanski45f3a702022-03-01 14:37:24 -0500227 nfs_fscache_update_auxdata(&auxdata, inode);
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500228 fscache_invalidate(cookie, &auxdata, i_size_read(inode),
229 FSCACHE_INVAL_DIO_WRITE);
David Howellsef79c092009-04-03 16:42:43 +0100230 }
231}
David Howellsf1fe29b2013-09-27 11:20:03 +0100232EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
David Howells545db452009-04-03 16:42:44 +0100233
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500234void nfs_fscache_release_file(struct inode *inode, struct file *filp)
David Howells545db452009-04-03 16:42:44 +0100235{
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500236 struct nfs_fscache_inode_auxdata auxdata;
Dave Wysochanski88a4d7b2023-02-20 08:43:05 -0500237 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
Dave Wysochanski9c4a5c72022-05-04 09:21:06 -0400238 loff_t i_size = i_size_read(inode);
David Howells545db452009-04-03 16:42:44 +0100239
Dave Wysochanski9c4a5c72022-05-04 09:21:06 -0400240 nfs_fscache_update_auxdata(&auxdata, inode);
241 fscache_unuse_cookie(cookie, &auxdata, &i_size);
David Howells9a9fc1c2009-04-03 16:42:44 +0100242}
243
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500244int nfs_netfs_read_folio(struct file *file, struct folio *folio)
David Howells16f2f4e2021-08-27 15:19:34 +0100245{
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500246 if (!netfs_inode(folio_inode(folio))->cache)
247 return -ENOBUFS;
David Howells16f2f4e2021-08-27 15:19:34 +0100248
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500249 return netfs_read_folio(file, folio);
David Howells16f2f4e2021-08-27 15:19:34 +0100250}
251
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500252int nfs_netfs_readahead(struct readahead_control *ractl)
David Howells16f2f4e2021-08-27 15:19:34 +0100253{
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500254 struct inode *inode = ractl->mapping->host;
David Howells16f2f4e2021-08-27 15:19:34 +0100255
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500256 if (!netfs_inode(inode)->cache)
257 return -ENOBUFS;
David Howells16f2f4e2021-08-27 15:19:34 +0100258
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500259 netfs_readahead(ractl);
260 return 0;
David Howells16f2f4e2021-08-27 15:19:34 +0100261}
262
Tom Rixc5733ae2023-04-20 21:01:32 -0400263static atomic_t nfs_netfs_debug_id;
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500264static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
David Howells9a9fc1c2009-04-03 16:42:44 +0100265{
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500266 rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
267 rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
David Howells16f2f4e2021-08-27 15:19:34 +0100268
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500269 return 0;
270}
271
272static void nfs_netfs_free_request(struct netfs_io_request *rreq)
273{
274 put_nfs_open_context(rreq->netfs_priv);
275}
276
277static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq)
278{
279 return fscache_begin_read_operation(&rreq->cache_resources,
280 netfs_i_cookie(netfs_inode(rreq->inode)));
281}
282
283static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
284{
285 struct nfs_netfs_io_data *netfs;
286
287 netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT);
288 if (!netfs)
289 return NULL;
290 netfs->sreq = sreq;
291 refcount_set(&netfs->refcount, 1);
292 return netfs;
293}
294
295static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
296{
297 size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize;
298
299 sreq->len = min(sreq->len, rsize);
300 return true;
301}
302
303static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
304{
305 struct nfs_netfs_io_data *netfs;
306 struct nfs_pageio_descriptor pgio;
307 struct inode *inode = sreq->rreq->inode;
308 struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
309 struct page *page;
310 int err;
311 pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
312 pgoff_t last = ((sreq->start + sreq->len -
313 sreq->transferred - 1) >> PAGE_SHIFT);
314 XA_STATE(xas, &sreq->rreq->mapping->i_pages, start);
315
316 nfs_pageio_init_read(&pgio, inode, false,
317 &nfs_async_read_completion_ops);
318
319 netfs = nfs_netfs_alloc(sreq);
320 if (!netfs)
321 return netfs_subreq_terminated(sreq, -ENOMEM, false);
322
323 pgio.pg_netfs = netfs; /* used in completion */
324
325 xas_lock(&xas);
326 xas_for_each(&xas, page, last) {
327 /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
328 xas_pause(&xas);
329 xas_unlock(&xas);
330 err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
331 if (err < 0) {
332 netfs->error = err;
333 goto out;
334 }
335 xas_lock(&xas);
Dave Wysochanskiba512c12021-06-29 13:13:57 -0400336 }
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500337 xas_unlock(&xas);
Dave Wysochanskie3f0a7fe2022-03-01 14:37:26 -0500338out:
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500339 nfs_pageio_complete_read(&pgio);
340 nfs_netfs_put(netfs);
David Howells9a9fc1c2009-04-03 16:42:44 +0100341}
342
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500343void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
David Howells9a9fc1c2009-04-03 16:42:44 +0100344{
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500345 struct nfs_netfs_io_data *netfs = hdr->netfs;
David Howells9a9fc1c2009-04-03 16:42:44 +0100346
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500347 if (!netfs)
348 return;
David Howells7f8e05f2009-04-03 16:42:45 +0100349
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500350 nfs_netfs_get(netfs);
David Howells7f8e05f2009-04-03 16:42:45 +0100351}
Dave Wysochanski000dbe02023-02-20 08:43:06 -0500352
353int nfs_netfs_folio_unlock(struct folio *folio)
354{
355 struct inode *inode = folio_file_mapping(folio)->host;
356
357 /*
358 * If fscache is enabled, netfs will unlock pages.
359 */
360 if (netfs_inode(inode)->cache)
361 return 0;
362
363 return 1;
364}
365
366void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
367{
368 struct nfs_netfs_io_data *netfs = hdr->netfs;
369 struct netfs_io_subrequest *sreq;
370
371 if (!netfs)
372 return;
373
374 sreq = netfs->sreq;
375 if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
376 __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
377
378 if (hdr->error)
379 netfs->error = hdr->error;
380 else
381 atomic64_add(hdr->res.count, &netfs->transferred);
382
383 nfs_netfs_put(netfs);
384 hdr->netfs = NULL;
385}
386
387const struct netfs_request_ops nfs_netfs_ops = {
388 .init_request = nfs_netfs_init_request,
389 .free_request = nfs_netfs_free_request,
390 .begin_cache_operation = nfs_netfs_begin_cache_operation,
391 .issue_read = nfs_netfs_issue_read,
392 .clamp_length = nfs_netfs_clamp_length
393};