blob: 6807277ef95606ad1bbb1d8d5e97cea737040c1b [file] [log] [blame]
David Howells31143d52007-05-09 02:33:46 -07001/* handling of writes to regular files and writing back to the server
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
David Howells4343d002017-11-02 15:27:52 +000011
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070012#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -070013#include <linux/slab.h>
14#include <linux/fs.h>
15#include <linux/pagemap.h>
16#include <linux/writeback.h>
17#include <linux/pagevec.h>
18#include "internal.h"
19
David Howells4343d002017-11-02 15:27:52 +000020/*
21 * We use page->private to hold the amount of the page that we've written to,
22 * splitting the field into two parts. However, we need to represent a range
23 * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
24 */
25#if PAGE_SIZE > 32768
26#define AFS_PRIV_MAX 0xffffffff
27#define AFS_PRIV_SHIFT 32
28#else
29#define AFS_PRIV_MAX 0xffff
30#define AFS_PRIV_SHIFT 16
31#endif
David Howells31143d52007-05-09 02:33:46 -070032
33/*
34 * mark a page as having been made dirty and thus needing writeback
35 */
36int afs_set_page_dirty(struct page *page)
37{
38 _enter("");
39 return __set_page_dirty_nobuffers(page);
40}
41
42/*
David Howells31143d52007-05-09 02:33:46 -070043 * partly or wholly fill a page that's under preparation for writing
44 */
45static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000046 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070047{
David Howells196ee9c2017-01-05 10:38:34 +000048 struct afs_read *req;
David Howells31143d52007-05-09 02:33:46 -070049 int ret;
50
Anton Blanchard5e7f2332011-06-13 22:31:12 +010051 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070052
David Howells196ee9c2017-01-05 10:38:34 +000053 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
54 GFP_KERNEL);
55 if (!req)
56 return -ENOMEM;
57
58 atomic_set(&req->usage, 1);
59 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +000060 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +000061 req->nr_pages = 1;
62 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +000063 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +000064
David Howellsd2ddc772017-11-02 15:27:50 +000065 ret = afs_fetch_data(vnode, key, req);
David Howells196ee9c2017-01-05 10:38:34 +000066 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -070067 if (ret < 0) {
68 if (ret == -ENOENT) {
69 _debug("got NOENT from server"
70 " - marking file deleted and stale");
71 set_bit(AFS_VNODE_DELETED, &vnode->flags);
72 ret = -ESTALE;
73 }
74 }
75
76 _leave(" = %d", ret);
77 return ret;
78}
79
80/*
David Howells31143d52007-05-09 02:33:46 -070081 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070082 */
Nick Piggin15b46502008-10-15 22:04:32 -070083int afs_write_begin(struct file *file, struct address_space *mapping,
84 loff_t pos, unsigned len, unsigned flags,
85 struct page **pagep, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070086{
Al Viro496ad9a2013-01-23 17:07:38 -050087 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -070088 struct page *page;
David Howells215804a2017-11-02 15:27:52 +000089 struct key *key = afs_file_key(file);
David Howells4343d002017-11-02 15:27:52 +000090 unsigned long priv;
91 unsigned f, from = pos & (PAGE_SIZE - 1);
92 unsigned t, to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030093 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -070094 int ret;
95
96 _enter("{%x:%u},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -070097 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -070098
David Howells4343d002017-11-02 15:27:52 +000099 /* We want to store information about how much of a page is altered in
100 * page->private.
101 */
102 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
David Howells31143d52007-05-09 02:33:46 -0700103
Nick Piggin54566b22009-01-04 12:00:53 -0800104 page = grab_cache_page_write_begin(mapping, index, flags);
David Howells4343d002017-11-02 15:27:52 +0000105 if (!page)
Nick Piggin15b46502008-10-15 22:04:32 -0700106 return -ENOMEM;
Nick Piggin15b46502008-10-15 22:04:32 -0700107
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300108 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +0000109 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -0700110 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +0000111 unlock_page(page);
112 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700113 _leave(" = %d [prep]", ret);
114 return ret;
115 }
Nick Piggin15b46502008-10-15 22:04:32 -0700116 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700117 }
118
David Howells6d06b0d2017-03-16 16:27:48 +0000119 /* page won't leak in error case: it eventually gets cleaned off LRU */
120 *pagep = page;
121
David Howells31143d52007-05-09 02:33:46 -0700122try_again:
David Howells4343d002017-11-02 15:27:52 +0000123 /* See if this page is already partially written in a way that we can
124 * merge the new write with.
125 */
126 t = f = 0;
127 if (PagePrivate(page)) {
128 priv = page_private(page);
129 f = priv & AFS_PRIV_MAX;
130 t = priv >> AFS_PRIV_SHIFT;
131 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -0700132 }
133
David Howells4343d002017-11-02 15:27:52 +0000134 if (f != t) {
135 if (to < f || from > t)
136 goto flush_conflicting_write;
137 if (from < f)
138 f = from;
139 if (to > t)
140 t = to;
141 } else {
142 f = from;
143 t = to;
David Howells31143d52007-05-09 02:33:46 -0700144 }
145
David Howells4343d002017-11-02 15:27:52 +0000146 priv = (unsigned long)t << AFS_PRIV_SHIFT;
147 priv |= f;
David Howells31143d52007-05-09 02:33:46 -0700148 SetPagePrivate(page);
David Howells4343d002017-11-02 15:27:52 +0000149 set_page_private(page, priv);
150 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -0700151 return 0;
152
David Howells4343d002017-11-02 15:27:52 +0000153 /* The previous write and this write aren't adjacent or overlapping, so
154 * flush the page out.
155 */
156flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700157 _debug("flush conflict");
David Howells4343d002017-11-02 15:27:52 +0000158 ret = write_one_page(page);
159 if (ret < 0) {
160 _leave(" = %d", ret);
161 return ret;
David Howells31143d52007-05-09 02:33:46 -0700162 }
163
David Howells4343d002017-11-02 15:27:52 +0000164 ret = lock_page_killable(page);
165 if (ret < 0) {
166 _leave(" = %d", ret);
167 return ret;
168 }
David Howells31143d52007-05-09 02:33:46 -0700169 goto try_again;
170}
171
172/*
173 * finalise part of a write to a page
174 */
Nick Piggin15b46502008-10-15 22:04:32 -0700175int afs_write_end(struct file *file, struct address_space *mapping,
176 loff_t pos, unsigned len, unsigned copied,
177 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700178{
Al Viro496ad9a2013-01-23 17:07:38 -0500179 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells215804a2017-11-02 15:27:52 +0000180 struct key *key = afs_file_key(file);
David Howells31143d52007-05-09 02:33:46 -0700181 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000182 int ret;
David Howells31143d52007-05-09 02:33:46 -0700183
Nick Piggin15b46502008-10-15 22:04:32 -0700184 _enter("{%x:%u},{%lx}",
185 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700186
Nick Piggin15b46502008-10-15 22:04:32 -0700187 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700188
189 i_size = i_size_read(&vnode->vfs_inode);
190 if (maybe_i_size > i_size) {
David Howells4343d002017-11-02 15:27:52 +0000191 spin_lock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700192 i_size = i_size_read(&vnode->vfs_inode);
193 if (maybe_i_size > i_size)
194 i_size_write(&vnode->vfs_inode, maybe_i_size);
David Howells4343d002017-11-02 15:27:52 +0000195 spin_unlock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700196 }
197
David Howellse8e581a2017-03-16 16:27:44 +0000198 if (!PageUptodate(page)) {
199 if (copied < len) {
200 /* Try and load any missing data from the server. The
201 * unmarshalling routine will take care of clearing any
202 * bits that are beyond the EOF.
203 */
204 ret = afs_fill_page(vnode, key, pos + copied,
205 len - copied, page);
206 if (ret < 0)
207 return ret;
208 }
209 SetPageUptodate(page);
210 }
211
David Howells31143d52007-05-09 02:33:46 -0700212 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700213 if (PageDirty(page))
214 _debug("dirtied");
Nick Piggin15b46502008-10-15 22:04:32 -0700215 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300216 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700217
Nick Piggin15b46502008-10-15 22:04:32 -0700218 return copied;
David Howells31143d52007-05-09 02:33:46 -0700219}
220
221/*
222 * kill all the pages in the given range
223 */
David Howells4343d002017-11-02 15:27:52 +0000224static void afs_kill_pages(struct address_space *mapping,
David Howells31143d52007-05-09 02:33:46 -0700225 pgoff_t first, pgoff_t last)
226{
David Howells4343d002017-11-02 15:27:52 +0000227 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700228 struct pagevec pv;
229 unsigned count, loop;
230
231 _enter("{%x:%u},%lx-%lx",
232 vnode->fid.vid, vnode->fid.vnode, first, last);
233
234 pagevec_init(&pv, 0);
235
236 do {
237 _debug("kill %lx-%lx", first, last);
238
239 count = last - first + 1;
240 if (count > PAGEVEC_SIZE)
241 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000242 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700243 ASSERTCMP(pv.nr, ==, count);
244
245 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000246 struct page *page = pv.pages[loop];
247 ClearPageUptodate(page);
David Howells4343d002017-11-02 15:27:52 +0000248 SetPageError(page);
249 end_page_writeback(page);
250 if (page->index >= first)
251 first = page->index + 1;
252 lock_page(page);
253 generic_error_remove_page(mapping, page);
254 }
255
256 __pagevec_release(&pv);
257 } while (first <= last);
258
259 _leave("");
260}
261
262/*
263 * Redirty all the pages in a given range.
264 */
265static void afs_redirty_pages(struct writeback_control *wbc,
266 struct address_space *mapping,
267 pgoff_t first, pgoff_t last)
268{
269 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
270 struct pagevec pv;
271 unsigned count, loop;
272
273 _enter("{%x:%u},%lx-%lx",
274 vnode->fid.vid, vnode->fid.vnode, first, last);
275
276 pagevec_init(&pv, 0);
277
278 do {
279 _debug("redirty %lx-%lx", first, last);
280
281 count = last - first + 1;
282 if (count > PAGEVEC_SIZE)
283 count = PAGEVEC_SIZE;
284 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
285 ASSERTCMP(pv.nr, ==, count);
286
287 for (loop = 0; loop < count; loop++) {
288 struct page *page = pv.pages[loop];
289
290 redirty_page_for_writepage(wbc, page);
291 end_page_writeback(page);
David Howells7286a352017-03-16 16:27:48 +0000292 if (page->index >= first)
293 first = page->index + 1;
David Howells31143d52007-05-09 02:33:46 -0700294 }
295
296 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000297 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700298
299 _leave("");
300}
301
302/*
David Howellsd2ddc772017-11-02 15:27:50 +0000303 * write to a file
304 */
David Howells4343d002017-11-02 15:27:52 +0000305static int afs_store_data(struct address_space *mapping,
306 pgoff_t first, pgoff_t last,
David Howellsd2ddc772017-11-02 15:27:50 +0000307 unsigned offset, unsigned to)
308{
David Howells4343d002017-11-02 15:27:52 +0000309 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellsd2ddc772017-11-02 15:27:50 +0000310 struct afs_fs_cursor fc;
David Howells4343d002017-11-02 15:27:52 +0000311 struct afs_wb_key *wbk = NULL;
312 struct list_head *p;
313 int ret = -ENOKEY, ret2;
David Howellsd2ddc772017-11-02 15:27:50 +0000314
David Howells4343d002017-11-02 15:27:52 +0000315 _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
David Howellsd2ddc772017-11-02 15:27:50 +0000316 vnode->volume->name,
317 vnode->fid.vid,
318 vnode->fid.vnode,
319 vnode->fid.unique,
David Howellsd2ddc772017-11-02 15:27:50 +0000320 first, last, offset, to);
321
David Howells4343d002017-11-02 15:27:52 +0000322 spin_lock(&vnode->wb_lock);
323 p = vnode->wb_keys.next;
324
325 /* Iterate through the list looking for a valid key to use. */
326try_next_key:
327 while (p != &vnode->wb_keys) {
328 wbk = list_entry(p, struct afs_wb_key, vnode_link);
329 _debug("wbk %u", key_serial(wbk->key));
330 ret2 = key_validate(wbk->key);
331 if (ret2 == 0)
332 goto found_key;
333 if (ret == -ENOKEY)
334 ret = ret2;
335 p = p->next;
336 }
337
338 spin_unlock(&vnode->wb_lock);
339 afs_put_wb_key(wbk);
340 _leave(" = %d [no keys]", ret);
341 return ret;
342
343found_key:
344 refcount_inc(&wbk->usage);
345 spin_unlock(&vnode->wb_lock);
346
347 _debug("USE WB KEY %u", key_serial(wbk->key));
348
David Howellsd2ddc772017-11-02 15:27:50 +0000349 ret = -ERESTARTSYS;
David Howells4343d002017-11-02 15:27:52 +0000350 if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
David Howellsd2ddc772017-11-02 15:27:50 +0000351 while (afs_select_fileserver(&fc)) {
352 fc.cb_break = vnode->cb_break + vnode->cb_s_break;
David Howells4343d002017-11-02 15:27:52 +0000353 afs_fs_store_data(&fc, mapping, first, last, offset, to);
David Howellsd2ddc772017-11-02 15:27:50 +0000354 }
355
356 afs_check_for_remote_deletion(&fc, fc.vnode);
357 afs_vnode_commit_status(&fc, vnode, fc.cb_break);
358 ret = afs_end_vnode_operation(&fc);
359 }
360
David Howells4343d002017-11-02 15:27:52 +0000361 switch (ret) {
362 case -EACCES:
363 case -EPERM:
364 case -ENOKEY:
365 case -EKEYEXPIRED:
366 case -EKEYREJECTED:
367 case -EKEYREVOKED:
368 _debug("next");
369 spin_lock(&vnode->wb_lock);
370 p = wbk->vnode_link.next;
371 afs_put_wb_key(wbk);
372 goto try_next_key;
373 }
374
375 afs_put_wb_key(wbk);
David Howellsd2ddc772017-11-02 15:27:50 +0000376 _leave(" = %d", ret);
377 return ret;
378}
379
380/*
David Howells4343d002017-11-02 15:27:52 +0000381 * Synchronously write back the locked page and any subsequent non-locked dirty
382 * pages.
David Howells31143d52007-05-09 02:33:46 -0700383 */
David Howells4343d002017-11-02 15:27:52 +0000384static int afs_write_back_from_locked_page(struct address_space *mapping,
385 struct writeback_control *wbc,
386 struct page *primary_page,
387 pgoff_t final_page)
David Howells31143d52007-05-09 02:33:46 -0700388{
389 struct page *pages[8], *page;
David Howells4343d002017-11-02 15:27:52 +0000390 unsigned long count, priv;
391 unsigned n, offset, to, f, t;
David Howells31143d52007-05-09 02:33:46 -0700392 pgoff_t start, first, last;
393 int loop, ret;
394
395 _enter(",%lx", primary_page->index);
396
397 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700398 if (test_set_page_writeback(primary_page))
399 BUG();
400
David Howells4343d002017-11-02 15:27:52 +0000401 /* Find all consecutive lockable dirty pages that have contiguous
402 * written regions, stopping when we find a page that is not
403 * immediately lockable, is not dirty or is missing, or we reach the
404 * end of the range.
405 */
David Howells31143d52007-05-09 02:33:46 -0700406 start = primary_page->index;
David Howells4343d002017-11-02 15:27:52 +0000407 priv = page_private(primary_page);
408 offset = priv & AFS_PRIV_MAX;
409 to = priv >> AFS_PRIV_SHIFT;
410
411 WARN_ON(offset == to);
412
413 if (start >= final_page || to < PAGE_SIZE)
David Howells31143d52007-05-09 02:33:46 -0700414 goto no_more;
David Howells4343d002017-11-02 15:27:52 +0000415
David Howells31143d52007-05-09 02:33:46 -0700416 start++;
417 do {
418 _debug("more %lx [%lx]", start, count);
David Howells4343d002017-11-02 15:27:52 +0000419 n = final_page - start + 1;
David Howells31143d52007-05-09 02:33:46 -0700420 if (n > ARRAY_SIZE(pages))
421 n = ARRAY_SIZE(pages);
David Howells4343d002017-11-02 15:27:52 +0000422 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
David Howells31143d52007-05-09 02:33:46 -0700423 _debug("fgpc %u", n);
424 if (n == 0)
425 goto no_more;
426 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700427 do {
428 put_page(pages[--n]);
429 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700430 goto no_more;
431 }
432
433 for (loop = 0; loop < n; loop++) {
David Howells4343d002017-11-02 15:27:52 +0000434 if (to != PAGE_SIZE)
435 break;
David Howells31143d52007-05-09 02:33:46 -0700436 page = pages[loop];
David Howells4343d002017-11-02 15:27:52 +0000437 if (page->index > final_page)
David Howells31143d52007-05-09 02:33:46 -0700438 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200439 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700440 break;
David Howells4343d002017-11-02 15:27:52 +0000441 if (!PageDirty(page) || PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700442 unlock_page(page);
443 break;
444 }
David Howells4343d002017-11-02 15:27:52 +0000445
446 priv = page_private(page);
447 f = priv & AFS_PRIV_MAX;
448 t = priv >> AFS_PRIV_SHIFT;
449 if (f != 0) {
450 unlock_page(page);
451 break;
452 }
453 to = t;
454
David Howells31143d52007-05-09 02:33:46 -0700455 if (!clear_page_dirty_for_io(page))
456 BUG();
457 if (test_set_page_writeback(page))
458 BUG();
459 unlock_page(page);
460 put_page(page);
461 }
462 count += loop;
463 if (loop < n) {
464 for (; loop < n; loop++)
465 put_page(pages[loop]);
466 goto no_more;
467 }
468
469 start += loop;
David Howells4343d002017-11-02 15:27:52 +0000470 } while (start <= final_page && count < 65536);
David Howells31143d52007-05-09 02:33:46 -0700471
472no_more:
David Howells4343d002017-11-02 15:27:52 +0000473 /* We now have a contiguous set of dirty pages, each with writeback
474 * set; the first page is still locked at this point, but all the rest
475 * have been unlocked.
476 */
477 unlock_page(primary_page);
478
David Howells31143d52007-05-09 02:33:46 -0700479 first = primary_page->index;
480 last = first + count - 1;
481
David Howells31143d52007-05-09 02:33:46 -0700482 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
483
David Howells4343d002017-11-02 15:27:52 +0000484 ret = afs_store_data(mapping, first, last, offset, to);
485 switch (ret) {
486 case 0:
David Howells31143d52007-05-09 02:33:46 -0700487 ret = count;
David Howells4343d002017-11-02 15:27:52 +0000488 break;
489
490 default:
491 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
492 /* Fall through */
493 case -EACCES:
494 case -EPERM:
495 case -ENOKEY:
496 case -EKEYEXPIRED:
497 case -EKEYREJECTED:
498 case -EKEYREVOKED:
499 afs_redirty_pages(wbc, mapping, first, last);
500 mapping_set_error(mapping, ret);
501 break;
502
503 case -EDQUOT:
504 case -ENOSPC:
505 afs_redirty_pages(wbc, mapping, first, last);
506 mapping_set_error(mapping, -ENOSPC);
507 break;
508
509 case -EROFS:
510 case -EIO:
511 case -EREMOTEIO:
512 case -EFBIG:
513 case -ENOENT:
514 case -ENOMEDIUM:
515 case -ENXIO:
516 afs_kill_pages(mapping, first, last);
517 mapping_set_error(mapping, ret);
518 break;
David Howells31143d52007-05-09 02:33:46 -0700519 }
520
521 _leave(" = %d", ret);
522 return ret;
523}
524
525/*
526 * write a page back to the server
527 * - the caller locked the page for us
528 */
529int afs_writepage(struct page *page, struct writeback_control *wbc)
530{
David Howells31143d52007-05-09 02:33:46 -0700531 int ret;
532
533 _enter("{%lx},", page->index);
534
David Howells4343d002017-11-02 15:27:52 +0000535 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
536 wbc->range_end >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700537 if (ret < 0) {
538 _leave(" = %d", ret);
539 return 0;
540 }
541
542 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700543
544 _leave(" = 0");
545 return 0;
546}
547
548/*
549 * write a region of pages back to the server
550 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700551static int afs_writepages_region(struct address_space *mapping,
552 struct writeback_control *wbc,
553 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700554{
David Howells31143d52007-05-09 02:33:46 -0700555 struct page *page;
556 int ret, n;
557
558 _enter(",,%lx,%lx,", index, end);
559
560 do {
561 n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,
562 1, &page);
563 if (!n)
564 break;
565
566 _debug("wback %lx", page->index);
567
568 if (page->index > end) {
569 *_next = index;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300570 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700571 _leave(" = 0 [%lx]", *_next);
572 return 0;
573 }
574
575 /* at this point we hold neither mapping->tree_lock nor lock on
576 * the page itself: the page may be truncated or invalidated
577 * (changing page->mapping to NULL), or even swizzled back from
578 * swapper_space to tmpfs file mapping
579 */
David Howells4343d002017-11-02 15:27:52 +0000580 ret = lock_page_killable(page);
581 if (ret < 0) {
582 put_page(page);
583 _leave(" = %d", ret);
584 return ret;
585 }
David Howells31143d52007-05-09 02:33:46 -0700586
David Howellsc5051c72017-03-16 16:27:49 +0000587 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700588 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300589 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700590 continue;
591 }
592
David Howellsc5051c72017-03-16 16:27:49 +0000593 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700594 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000595 if (wbc->sync_mode != WB_SYNC_NONE)
596 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000597 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700598 continue;
599 }
600
David Howells65a15102017-03-16 16:27:49 +0000601 if (!clear_page_dirty_for_io(page))
602 BUG();
David Howells4343d002017-11-02 15:27:52 +0000603 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300604 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700605 if (ret < 0) {
606 _leave(" = %d", ret);
607 return ret;
608 }
609
610 wbc->nr_to_write -= ret;
611
David Howells31143d52007-05-09 02:33:46 -0700612 cond_resched();
613 } while (index < end && wbc->nr_to_write > 0);
614
615 *_next = index;
616 _leave(" = 0 [%lx]", *_next);
617 return 0;
618}
619
620/*
621 * write some of the pending data back to the server
622 */
623int afs_writepages(struct address_space *mapping,
624 struct writeback_control *wbc)
625{
David Howells31143d52007-05-09 02:33:46 -0700626 pgoff_t start, end, next;
627 int ret;
628
629 _enter("");
630
David Howells31143d52007-05-09 02:33:46 -0700631 if (wbc->range_cyclic) {
632 start = mapping->writeback_index;
633 end = -1;
634 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700635 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700636 ret = afs_writepages_region(mapping, wbc, 0, start,
637 &next);
638 mapping->writeback_index = next;
639 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300640 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700641 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
642 if (wbc->nr_to_write > 0)
643 mapping->writeback_index = next;
644 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300645 start = wbc->range_start >> PAGE_SHIFT;
646 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700647 ret = afs_writepages_region(mapping, wbc, start, end, &next);
648 }
649
650 _leave(" = %d", ret);
651 return ret;
652}
653
654/*
David Howells31143d52007-05-09 02:33:46 -0700655 * completion of write to server
656 */
657void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
658{
David Howells31143d52007-05-09 02:33:46 -0700659 struct pagevec pv;
660 unsigned count, loop;
661 pgoff_t first = call->first, last = call->last;
David Howells31143d52007-05-09 02:33:46 -0700662
663 _enter("{%x:%u},{%lx-%lx}",
664 vnode->fid.vid, vnode->fid.vnode, first, last);
665
David Howells31143d52007-05-09 02:33:46 -0700666 pagevec_init(&pv, 0);
667
668 do {
David Howells5bbf5d32007-05-10 03:15:23 -0700669 _debug("done %lx-%lx", first, last);
David Howells31143d52007-05-09 02:33:46 -0700670
671 count = last - first + 1;
672 if (count > PAGEVEC_SIZE)
673 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000674 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
675 first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700676 ASSERTCMP(pv.nr, ==, count);
677
David Howells31143d52007-05-09 02:33:46 -0700678 for (loop = 0; loop < count; loop++) {
David Howells4343d002017-11-02 15:27:52 +0000679 set_page_private(pv.pages[loop], 0);
680 end_page_writeback(pv.pages[loop]);
David Howells31143d52007-05-09 02:33:46 -0700681 }
David Howells31143d52007-05-09 02:33:46 -0700682 first += count;
David Howells31143d52007-05-09 02:33:46 -0700683 __pagevec_release(&pv);
David Howells5bbf5d32007-05-10 03:15:23 -0700684 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700685
David Howells4343d002017-11-02 15:27:52 +0000686 afs_prune_wb_keys(vnode);
David Howells31143d52007-05-09 02:33:46 -0700687 _leave("");
688}
689
690/*
691 * write to an AFS file
692 */
Al Viro50b55512014-04-03 14:13:46 -0400693ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700694{
Al Viro496ad9a2013-01-23 17:07:38 -0500695 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700696 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400697 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700698
Al Viro50b55512014-04-03 14:13:46 -0400699 _enter("{%x.%u},{%zu},",
700 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700701
702 if (IS_SWAPFILE(&vnode->vfs_inode)) {
703 printk(KERN_INFO
704 "AFS: Attempt to write to active swap file!\n");
705 return -EBUSY;
706 }
707
708 if (!count)
709 return 0;
710
Al Viro50b55512014-04-03 14:13:46 -0400711 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700712
David Howells31143d52007-05-09 02:33:46 -0700713 _leave(" = %zd", result);
714 return result;
715}
716
717/*
David Howells31143d52007-05-09 02:33:46 -0700718 * flush any dirty pages for this process, and check for write errors.
719 * - the return status from this call provides a reliable indication of
720 * whether any write errors occurred for this process.
721 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400722int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700723{
Al Viro3c981bf2013-09-03 13:37:45 -0400724 struct inode *inode = file_inode(file);
Al Viro3c981bf2013-09-03 13:37:45 -0400725 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700726
Al Viro3c981bf2013-09-03 13:37:45 -0400727 _enter("{%x:%u},{n=%pD},%d",
728 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700729 datasync);
730
David Howells4343d002017-11-02 15:27:52 +0000731 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700732}
David Howells9b3f26c2009-04-03 16:42:41 +0100733
734/*
David Howells58fed942017-03-16 16:27:45 +0000735 * Flush out all outstanding writes on a file opened for writing when it is
736 * closed.
737 */
738int afs_flush(struct file *file, fl_owner_t id)
739{
740 _enter("");
741
742 if ((file->f_mode & FMODE_WRITE) == 0)
743 return 0;
744
745 return vfs_fsync(file, 0);
746}
747
748/*
David Howells9b3f26c2009-04-03 16:42:41 +0100749 * notification that a previously read-only page is about to become writable
750 * - if it returns an error, the caller will deliver a bus error signal
751 */
David Howells1cf7a152017-11-02 15:27:52 +0000752int afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100753{
David Howells1cf7a152017-11-02 15:27:52 +0000754 struct file *file = vmf->vma->vm_file;
755 struct inode *inode = file_inode(file);
756 struct afs_vnode *vnode = AFS_FS_I(inode);
757 unsigned long priv;
David Howells9b3f26c2009-04-03 16:42:41 +0100758
759 _enter("{{%x:%u}},{%lx}",
David Howells1cf7a152017-11-02 15:27:52 +0000760 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
David Howells9b3f26c2009-04-03 16:42:41 +0100761
David Howells1cf7a152017-11-02 15:27:52 +0000762 sb_start_pagefault(inode->i_sb);
763
764 /* Wait for the page to be written to the cache before we allow it to
765 * be modified. We then assume the entire page will need writing back.
766 */
David Howells9b3f26c2009-04-03 16:42:41 +0100767#ifdef CONFIG_AFS_FSCACHE
David Howells1cf7a152017-11-02 15:27:52 +0000768 fscache_wait_on_page_write(vnode->cache, vmf->page);
David Howells9b3f26c2009-04-03 16:42:41 +0100769#endif
770
David Howells1cf7a152017-11-02 15:27:52 +0000771 if (PageWriteback(vmf->page) &&
772 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
773 return VM_FAULT_RETRY;
774
775 if (lock_page_killable(vmf->page) < 0)
776 return VM_FAULT_RETRY;
777
778 /* We mustn't change page->private until writeback is complete as that
779 * details the portion of the page we need to write back and we might
780 * need to redirty the page if there's a problem.
781 */
782 wait_on_page_writeback(vmf->page);
783
784 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
785 priv |= 0; /* From */
786 SetPagePrivate(vmf->page);
787 set_page_private(vmf->page, priv);
788
789 sb_end_pagefault(inode->i_sb);
790 return VM_FAULT_LOCKED;
David Howells9b3f26c2009-04-03 16:42:41 +0100791}
David Howells4343d002017-11-02 15:27:52 +0000792
793/*
794 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
795 */
796void afs_prune_wb_keys(struct afs_vnode *vnode)
797{
798 LIST_HEAD(graveyard);
799 struct afs_wb_key *wbk, *tmp;
800
801 /* Discard unused keys */
802 spin_lock(&vnode->wb_lock);
803
804 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
805 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
806 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
807 if (refcount_read(&wbk->usage) == 1)
808 list_move(&wbk->vnode_link, &graveyard);
809 }
810 }
811
812 spin_unlock(&vnode->wb_lock);
813
814 while (!list_empty(&graveyard)) {
815 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
816 list_del(&wbk->vnode_link);
817 afs_put_wb_key(wbk);
818 }
819}
820
821/*
822 * Clean up a page during invalidation.
823 */
824int afs_launder_page(struct page *page)
825{
826 struct address_space *mapping = page->mapping;
827 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
828 unsigned long priv;
829 unsigned int f, t;
830 int ret = 0;
831
832 _enter("{%lx}", page->index);
833
834 priv = page_private(page);
835 if (clear_page_dirty_for_io(page)) {
836 f = 0;
837 t = PAGE_SIZE;
838 if (PagePrivate(page)) {
839 f = priv & AFS_PRIV_MAX;
840 t = priv >> AFS_PRIV_SHIFT;
841 }
842
843 ret = afs_store_data(mapping, page->index, page->index, t, f);
844 }
845
846 set_page_private(page, 0);
847 ClearPagePrivate(page);
848
849#ifdef CONFIG_AFS_FSCACHE
850 if (PageFsCache(page)) {
851 fscache_wait_on_page_write(vnode->cache, page);
852 fscache_uncache_page(vnode->cache, page);
853 }
854#endif
855 return ret;
856}