blob: 9eb34701a566c84cd05b4923427c5ca3f73ba12c [file] [log] [blame]
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -05001/*
2 * V9FS cache definitions.
3 *
4 * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to:
17 * Free Software Foundation
18 * 51 Franklin Street, Fifth Floor
19 * Boston, MA 02111-1301 USA
20 *
21 */
22
23#include <linux/jiffies.h>
24#include <linux/file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050026#include <linux/stat.h>
27#include <linux/sched.h>
28#include <linux/fs.h>
29#include <net/9p/9p.h>
30
31#include "v9fs.h"
32#include "cache.h"
33
34#define CACHETAG_LEN 11
35
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050036struct fscache_netfs v9fs_cache_netfs = {
37 .name = "9p",
38 .version = 0,
39};
40
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050041/**
42 * v9fs_random_cachetag - Generate a random tag to be associated
43 * with a new cache session.
44 *
45 * The value of jiffies is used for a fairly randomly cache tag.
46 */
47
48static
49int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
50{
51 v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
52 if (!v9ses->cachetag)
53 return -ENOMEM;
54
55 return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
56}
57
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050058const struct fscache_cookie_def v9fs_cache_session_index_def = {
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +053059 .name = "9P.session",
60 .type = FSCACHE_COOKIE_TYPE_INDEX,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050061};
62
63void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
64{
65 /* If no cache session tag was specified, we generate a random one. */
David Howells402cb8d2018-04-04 13:41:28 +010066 if (!v9ses->cachetag) {
67 if (v9fs_random_cachetag(v9ses) < 0) {
68 v9ses->fscache = NULL;
69 return;
70 }
71 }
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050072
73 v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
74 &v9fs_cache_session_index_def,
David Howells402cb8d2018-04-04 13:41:28 +010075 v9ses->cachetag,
76 strlen(v9ses->cachetag),
77 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +010078 v9ses, 0, true);
Joe Perches5d385152011-11-28 10:40:46 -080079 p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
80 v9ses, v9ses->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050081}
82
83void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
84{
Joe Perches5d385152011-11-28 10:40:46 -080085 p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
86 v9ses, v9ses->fscache);
David Howells402cb8d2018-04-04 13:41:28 +010087 fscache_relinquish_cookie(v9ses->fscache, NULL, false);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050088 v9ses->fscache = NULL;
89}
90
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050091static enum
92fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
93 const void *buffer,
David Howellsee1235a2018-04-04 13:41:28 +010094 uint16_t buflen,
95 loff_t object_size)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050096{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +053097 const struct v9fs_inode *v9inode = cookie_netfs_data;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050098
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +000099 if (buflen != sizeof(v9inode->qid.version))
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500100 return FSCACHE_CHECKAUX_OBSOLETE;
101
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +0000102 if (memcmp(buffer, &v9inode->qid.version,
103 sizeof(v9inode->qid.version)))
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500104 return FSCACHE_CHECKAUX_OBSOLETE;
105
106 return FSCACHE_CHECKAUX_OKAY;
107}
108
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500109const struct fscache_cookie_def v9fs_cache_inode_index_def = {
110 .name = "9p.inode",
111 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500112 .check_aux = v9fs_cache_inode_check_aux,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500113};
114
115void v9fs_cache_inode_get_cookie(struct inode *inode)
116{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530117 struct v9fs_inode *v9inode;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500118 struct v9fs_session_info *v9ses;
119
120 if (!S_ISREG(inode->i_mode))
121 return;
122
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530123 v9inode = V9FS_I(inode);
124 if (v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500125 return;
126
127 v9ses = v9fs_inode2v9ses(inode);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530128 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500129 &v9fs_cache_inode_index_def,
David Howells402cb8d2018-04-04 13:41:28 +0100130 &v9inode->qid.path,
131 sizeof(v9inode->qid.path),
132 &v9inode->qid.version,
133 sizeof(v9inode->qid.version),
David Howellsee1235a2018-04-04 13:41:28 +0100134 v9inode,
135 i_size_read(&v9inode->vfs_inode),
136 true);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500137
Joe Perches5d385152011-11-28 10:40:46 -0800138 p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
139 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500140}
141
142void v9fs_cache_inode_put_cookie(struct inode *inode)
143{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530144 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500145
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530146 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500147 return;
Joe Perches5d385152011-11-28 10:40:46 -0800148 p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
149 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500150
David Howells402cb8d2018-04-04 13:41:28 +0100151 fscache_relinquish_cookie(v9inode->fscache, &v9inode->qid.version,
152 false);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530153 v9inode->fscache = NULL;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500154}
155
156void v9fs_cache_inode_flush_cookie(struct inode *inode)
157{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530158 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500159
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530160 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500161 return;
Joe Perches5d385152011-11-28 10:40:46 -0800162 p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
163 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500164
David Howells402cb8d2018-04-04 13:41:28 +0100165 fscache_relinquish_cookie(v9inode->fscache, NULL, true);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530166 v9inode->fscache = NULL;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500167}
168
169void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
170{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530171 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500172
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530173 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500174 return;
175
Sasha Levin8f5fed12016-01-07 17:49:51 -0500176 mutex_lock(&v9inode->fscache_lock);
Geyslan G. Bembd126e52013-09-28 20:32:13 -0300177
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500178 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
179 v9fs_cache_inode_flush_cookie(inode);
180 else
181 v9fs_cache_inode_get_cookie(inode);
182
Sasha Levin8f5fed12016-01-07 17:49:51 -0500183 mutex_unlock(&v9inode->fscache_lock);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500184}
185
186void v9fs_cache_inode_reset_cookie(struct inode *inode)
187{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530188 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500189 struct v9fs_session_info *v9ses;
190 struct fscache_cookie *old;
191
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530192 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500193 return;
194
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530195 old = v9inode->fscache;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500196
Sasha Levin8f5fed12016-01-07 17:49:51 -0500197 mutex_lock(&v9inode->fscache_lock);
David Howells402cb8d2018-04-04 13:41:28 +0100198 fscache_relinquish_cookie(v9inode->fscache, NULL, true);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500199
200 v9ses = v9fs_inode2v9ses(inode);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530201 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500202 &v9fs_cache_inode_index_def,
David Howells402cb8d2018-04-04 13:41:28 +0100203 &v9inode->qid.path,
204 sizeof(v9inode->qid.path),
205 &v9inode->qid.version,
206 sizeof(v9inode->qid.version),
David Howellsee1235a2018-04-04 13:41:28 +0100207 v9inode,
208 i_size_read(&v9inode->vfs_inode),
209 true);
Joe Perches5d385152011-11-28 10:40:46 -0800210 p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
211 inode, old, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500212
Sasha Levin8f5fed12016-01-07 17:49:51 -0500213 mutex_unlock(&v9inode->fscache_lock);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500214}
215
216int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
217{
218 struct inode *inode = page->mapping->host;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530219 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500220
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530221 BUG_ON(!v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500222
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530223 return fscache_maybe_release_page(v9inode->fscache, page, gfp);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500224}
225
226void __v9fs_fscache_invalidate_page(struct page *page)
227{
228 struct inode *inode = page->mapping->host;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530229 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500230
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530231 BUG_ON(!v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500232
233 if (PageFsCache(page)) {
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530234 fscache_wait_on_page_write(v9inode->fscache, page);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500235 BUG_ON(!PageLocked(page));
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530236 fscache_uncache_page(v9inode->fscache, page);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500237 }
238}
239
240static void v9fs_vfs_readpage_complete(struct page *page, void *data,
241 int error)
242{
243 if (!error)
244 SetPageUptodate(page);
245
246 unlock_page(page);
247}
248
249/**
250 * __v9fs_readpage_from_fscache - read a page from cache
251 *
252 * Returns 0 if the pages are in cache and a BIO is submitted,
253 * 1 if the pages are not in cache and -error otherwise.
254 */
255
256int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
257{
258 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530259 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500260
Joe Perches5d385152011-11-28 10:40:46 -0800261 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530262 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500263 return -ENOBUFS;
264
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530265 ret = fscache_read_or_alloc_page(v9inode->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500266 page,
267 v9fs_vfs_readpage_complete,
268 NULL,
269 GFP_KERNEL);
270 switch (ret) {
271 case -ENOBUFS:
272 case -ENODATA:
Joe Perches5d385152011-11-28 10:40:46 -0800273 p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500274 return 1;
275 case 0:
Joe Perches5d385152011-11-28 10:40:46 -0800276 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500277 return ret;
278 default:
Joe Perches5d385152011-11-28 10:40:46 -0800279 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500280 return ret;
281 }
282}
283
284/**
285 * __v9fs_readpages_from_fscache - read multiple pages from cache
286 *
287 * Returns 0 if the pages are in cache and a BIO is submitted,
288 * 1 if the pages are not in cache and -error otherwise.
289 */
290
291int __v9fs_readpages_from_fscache(struct inode *inode,
292 struct address_space *mapping,
293 struct list_head *pages,
294 unsigned *nr_pages)
295{
296 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530297 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500298
Joe Perches5d385152011-11-28 10:40:46 -0800299 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530300 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500301 return -ENOBUFS;
302
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530303 ret = fscache_read_or_alloc_pages(v9inode->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500304 mapping, pages, nr_pages,
305 v9fs_vfs_readpage_complete,
306 NULL,
307 mapping_gfp_mask(mapping));
308 switch (ret) {
309 case -ENOBUFS:
310 case -ENODATA:
Joe Perches5d385152011-11-28 10:40:46 -0800311 p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500312 return 1;
313 case 0:
314 BUG_ON(!list_empty(pages));
315 BUG_ON(*nr_pages != 0);
Joe Perches5d385152011-11-28 10:40:46 -0800316 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500317 return ret;
318 default:
Joe Perches5d385152011-11-28 10:40:46 -0800319 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500320 return ret;
321 }
322}
323
324/**
325 * __v9fs_readpage_to_fscache - write a page to the cache
326 *
327 */
328
329void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
330{
331 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530332 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500333
Joe Perches5d385152011-11-28 10:40:46 -0800334 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
David Howellsee1235a2018-04-04 13:41:28 +0100335 ret = fscache_write_page(v9inode->fscache, page,
336 i_size_read(&v9inode->vfs_inode), GFP_KERNEL);
Joe Perches5d385152011-11-28 10:40:46 -0800337 p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500338 if (ret != 0)
339 v9fs_uncache_page(inode, page);
340}
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530341
342/*
343 * wait for a page to complete writing to the cache
344 */
345void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
346{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530347 const struct v9fs_inode *v9inode = V9FS_I(inode);
Joe Perches5d385152011-11-28 10:40:46 -0800348 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530349 if (PageFsCache(page))
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530350 fscache_wait_on_page_write(v9inode->fscache, page);
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530351}