blob: a2511e3ad2ccaa306e39e2e397ec92cd65401d09 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells31143d52007-05-09 02:33:46 -07002/* handling of writes to regular files and writing back to the server
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells ([email protected])
David Howells31143d52007-05-09 02:33:46 -07006 */
David Howells4343d002017-11-02 15:27:52 +00007
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -07008#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -07009#include <linux/slab.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include "internal.h"
15
David Howells31143d52007-05-09 02:33:46 -070016/*
17 * mark a page as having been made dirty and thus needing writeback
18 */
19int afs_set_page_dirty(struct page *page)
20{
21 _enter("");
22 return __set_page_dirty_nobuffers(page);
23}
24
25/*
David Howells31143d52007-05-09 02:33:46 -070026 * partly or wholly fill a page that's under preparation for writing
27 */
28static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000029 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070030{
David Howells196ee9c2017-01-05 10:38:34 +000031 struct afs_read *req;
David Howells2a0b4f62018-10-20 00:57:57 +010032 size_t p;
33 void *data;
David Howells31143d52007-05-09 02:33:46 -070034 int ret;
35
Anton Blanchard5e7f2332011-06-13 22:31:12 +010036 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070037
David Howells2a0b4f62018-10-20 00:57:57 +010038 if (pos >= vnode->vfs_inode.i_size) {
39 p = pos & ~PAGE_MASK;
40 ASSERTCMP(p + len, <=, PAGE_SIZE);
41 data = kmap(page);
42 memset(data + p, 0, len);
43 kunmap(page);
44 return 0;
45 }
46
Zhengyuan Liuee102582019-06-20 18:12:17 +010047 req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
David Howells196ee9c2017-01-05 10:38:34 +000048 if (!req)
49 return -ENOMEM;
50
David Howellsf3ddee82018-04-06 14:17:25 +010051 refcount_set(&req->usage, 1);
David Howells196ee9c2017-01-05 10:38:34 +000052 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +000053 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +000054 req->nr_pages = 1;
David Howellsf3ddee82018-04-06 14:17:25 +010055 req->pages = req->array;
David Howells196ee9c2017-01-05 10:38:34 +000056 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +000057 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +000058
David Howellsd2ddc772017-11-02 15:27:50 +000059 ret = afs_fetch_data(vnode, key, req);
David Howells196ee9c2017-01-05 10:38:34 +000060 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -070061 if (ret < 0) {
62 if (ret == -ENOENT) {
63 _debug("got NOENT from server"
64 " - marking file deleted and stale");
65 set_bit(AFS_VNODE_DELETED, &vnode->flags);
66 ret = -ESTALE;
67 }
68 }
69
70 _leave(" = %d", ret);
71 return ret;
72}
73
74/*
David Howells31143d52007-05-09 02:33:46 -070075 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070076 */
Nick Piggin15b46502008-10-15 22:04:32 -070077int afs_write_begin(struct file *file, struct address_space *mapping,
78 loff_t pos, unsigned len, unsigned flags,
David Howells21db2cd2020-10-22 14:03:03 +010079 struct page **_page, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070080{
Al Viro496ad9a2013-01-23 17:07:38 -050081 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -070082 struct page *page;
David Howells215804a2017-11-02 15:27:52 +000083 struct key *key = afs_file_key(file);
David Howells4343d002017-11-02 15:27:52 +000084 unsigned long priv;
85 unsigned f, from = pos & (PAGE_SIZE - 1);
86 unsigned t, to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030087 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -070088 int ret;
89
David Howells3b6492d2018-10-20 00:57:57 +010090 _enter("{%llx:%llu},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -070091 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -070092
David Howells4343d002017-11-02 15:27:52 +000093 /* We want to store information about how much of a page is altered in
94 * page->private.
95 */
David Howells65dd2d62020-10-26 13:57:44 +000096 BUILD_BUG_ON(PAGE_SIZE - 1 > __AFS_PAGE_PRIV_MASK && sizeof(page->private) < 8);
David Howells31143d52007-05-09 02:33:46 -070097
Nick Piggin54566b22009-01-04 12:00:53 -080098 page = grab_cache_page_write_begin(mapping, index, flags);
David Howells4343d002017-11-02 15:27:52 +000099 if (!page)
Nick Piggin15b46502008-10-15 22:04:32 -0700100 return -ENOMEM;
Nick Piggin15b46502008-10-15 22:04:32 -0700101
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300102 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +0000103 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -0700104 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +0000105 unlock_page(page);
106 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700107 _leave(" = %d [prep]", ret);
108 return ret;
109 }
Nick Piggin15b46502008-10-15 22:04:32 -0700110 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700111 }
112
David Howells31143d52007-05-09 02:33:46 -0700113try_again:
David Howells4343d002017-11-02 15:27:52 +0000114 /* See if this page is already partially written in a way that we can
115 * merge the new write with.
116 */
117 t = f = 0;
118 if (PagePrivate(page)) {
119 priv = page_private(page);
David Howells185f0c72020-10-26 13:22:47 +0000120 f = afs_page_dirty_from(priv);
121 t = afs_page_dirty_to(priv);
David Howells4343d002017-11-02 15:27:52 +0000122 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -0700123 }
124
David Howells4343d002017-11-02 15:27:52 +0000125 if (f != t) {
David Howells5a039c32017-11-18 00:13:30 +0000126 if (PageWriteback(page)) {
127 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
128 page->index, priv);
129 goto flush_conflicting_write;
130 }
David Howells5a813272018-04-06 14:17:26 +0100131 /* If the file is being filled locally, allow inter-write
132 * spaces to be merged into writes. If it's not, only write
133 * back what the user gives us.
134 */
135 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
136 (to < f || from > t))
David Howells4343d002017-11-02 15:27:52 +0000137 goto flush_conflicting_write;
David Howells31143d52007-05-09 02:33:46 -0700138 }
139
David Howells21db2cd2020-10-22 14:03:03 +0100140 *_page = page;
David Howells4343d002017-11-02 15:27:52 +0000141 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -0700142 return 0;
143
David Howells4343d002017-11-02 15:27:52 +0000144 /* The previous write and this write aren't adjacent or overlapping, so
145 * flush the page out.
146 */
147flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700148 _debug("flush conflict");
David Howells4343d002017-11-02 15:27:52 +0000149 ret = write_one_page(page);
David Howells21db2cd2020-10-22 14:03:03 +0100150 if (ret < 0)
151 goto error;
David Howells31143d52007-05-09 02:33:46 -0700152
David Howells4343d002017-11-02 15:27:52 +0000153 ret = lock_page_killable(page);
David Howells21db2cd2020-10-22 14:03:03 +0100154 if (ret < 0)
155 goto error;
David Howells31143d52007-05-09 02:33:46 -0700156 goto try_again;
David Howells21db2cd2020-10-22 14:03:03 +0100157
158error:
159 put_page(page);
160 _leave(" = %d", ret);
161 return ret;
David Howells31143d52007-05-09 02:33:46 -0700162}
163
164/*
165 * finalise part of a write to a page
166 */
Nick Piggin15b46502008-10-15 22:04:32 -0700167int afs_write_end(struct file *file, struct address_space *mapping,
168 loff_t pos, unsigned len, unsigned copied,
169 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700170{
Al Viro496ad9a2013-01-23 17:07:38 -0500171 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells215804a2017-11-02 15:27:52 +0000172 struct key *key = afs_file_key(file);
David Howellsf792e3a2020-10-26 14:05:33 +0000173 unsigned long priv;
174 unsigned int f, from = pos & (PAGE_SIZE - 1);
175 unsigned int t, to = from + copied;
David Howells31143d52007-05-09 02:33:46 -0700176 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000177 int ret;
David Howells31143d52007-05-09 02:33:46 -0700178
David Howells3b6492d2018-10-20 00:57:57 +0100179 _enter("{%llx:%llu},{%lx}",
Nick Piggin15b46502008-10-15 22:04:32 -0700180 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700181
Nick Piggin15b46502008-10-15 22:04:32 -0700182 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700183
184 i_size = i_size_read(&vnode->vfs_inode);
185 if (maybe_i_size > i_size) {
David Howells1f32ef72020-06-12 23:58:51 +0100186 write_seqlock(&vnode->cb_lock);
David Howells31143d52007-05-09 02:33:46 -0700187 i_size = i_size_read(&vnode->vfs_inode);
188 if (maybe_i_size > i_size)
189 i_size_write(&vnode->vfs_inode, maybe_i_size);
David Howells1f32ef72020-06-12 23:58:51 +0100190 write_sequnlock(&vnode->cb_lock);
David Howells31143d52007-05-09 02:33:46 -0700191 }
192
David Howellse8e581a2017-03-16 16:27:44 +0000193 if (!PageUptodate(page)) {
194 if (copied < len) {
195 /* Try and load any missing data from the server. The
196 * unmarshalling routine will take care of clearing any
197 * bits that are beyond the EOF.
198 */
199 ret = afs_fill_page(vnode, key, pos + copied,
200 len - copied, page);
201 if (ret < 0)
David Howellsafae4572018-01-02 10:02:19 +0000202 goto out;
David Howellse8e581a2017-03-16 16:27:44 +0000203 }
204 SetPageUptodate(page);
205 }
206
David Howellsf792e3a2020-10-26 14:05:33 +0000207 if (PagePrivate(page)) {
208 priv = page_private(page);
David Howells185f0c72020-10-26 13:22:47 +0000209 f = afs_page_dirty_from(priv);
210 t = afs_page_dirty_to(priv);
David Howellsf792e3a2020-10-26 14:05:33 +0000211 if (from < f)
212 f = from;
213 if (to > t)
214 t = to;
David Howells185f0c72020-10-26 13:22:47 +0000215 priv = afs_page_dirty(f, t);
David Howellsf792e3a2020-10-26 14:05:33 +0000216 set_page_private(page, priv);
217 trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
218 page->index, priv);
219 } else {
David Howells185f0c72020-10-26 13:22:47 +0000220 priv = afs_page_dirty(from, to);
David Howellsf792e3a2020-10-26 14:05:33 +0000221 attach_page_private(page, (void *)priv);
222 trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
223 page->index, priv);
224 }
225
David Howells31143d52007-05-09 02:33:46 -0700226 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700227 if (PageDirty(page))
228 _debug("dirtied");
David Howellsafae4572018-01-02 10:02:19 +0000229 ret = copied;
230
231out:
Nick Piggin15b46502008-10-15 22:04:32 -0700232 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300233 put_page(page);
David Howellsafae4572018-01-02 10:02:19 +0000234 return ret;
David Howells31143d52007-05-09 02:33:46 -0700235}
236
237/*
238 * kill all the pages in the given range
239 */
David Howells4343d002017-11-02 15:27:52 +0000240static void afs_kill_pages(struct address_space *mapping,
David Howells31143d52007-05-09 02:33:46 -0700241 pgoff_t first, pgoff_t last)
242{
David Howells4343d002017-11-02 15:27:52 +0000243 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700244 struct pagevec pv;
245 unsigned count, loop;
246
David Howells3b6492d2018-10-20 00:57:57 +0100247 _enter("{%llx:%llu},%lx-%lx",
David Howells31143d52007-05-09 02:33:46 -0700248 vnode->fid.vid, vnode->fid.vnode, first, last);
249
Mel Gorman86679822017-11-15 17:37:52 -0800250 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700251
252 do {
253 _debug("kill %lx-%lx", first, last);
254
255 count = last - first + 1;
256 if (count > PAGEVEC_SIZE)
257 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000258 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700259 ASSERTCMP(pv.nr, ==, count);
260
261 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000262 struct page *page = pv.pages[loop];
263 ClearPageUptodate(page);
David Howells4343d002017-11-02 15:27:52 +0000264 SetPageError(page);
265 end_page_writeback(page);
David Howells7286a352017-03-16 16:27:48 +0000266 if (page->index >= first)
267 first = page->index + 1;
David Howells4343d002017-11-02 15:27:52 +0000268 lock_page(page);
269 generic_error_remove_page(mapping, page);
Marc Dionne21bd68f2019-04-13 08:37:37 +0100270 unlock_page(page);
David Howells31143d52007-05-09 02:33:46 -0700271 }
272
273 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000274 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700275
276 _leave("");
277}
278
279/*
David Howells4343d002017-11-02 15:27:52 +0000280 * Redirty all the pages in a given range.
David Howells31143d52007-05-09 02:33:46 -0700281 */
David Howells4343d002017-11-02 15:27:52 +0000282static void afs_redirty_pages(struct writeback_control *wbc,
283 struct address_space *mapping,
284 pgoff_t first, pgoff_t last)
David Howells31143d52007-05-09 02:33:46 -0700285{
David Howells4343d002017-11-02 15:27:52 +0000286 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
287 struct pagevec pv;
288 unsigned count, loop;
289
David Howells3b6492d2018-10-20 00:57:57 +0100290 _enter("{%llx:%llu},%lx-%lx",
David Howells4343d002017-11-02 15:27:52 +0000291 vnode->fid.vid, vnode->fid.vnode, first, last);
292
Linus Torvalds487e2c92017-11-16 11:41:22 -0800293 pagevec_init(&pv);
David Howells4343d002017-11-02 15:27:52 +0000294
295 do {
296 _debug("redirty %lx-%lx", first, last);
297
298 count = last - first + 1;
299 if (count > PAGEVEC_SIZE)
300 count = PAGEVEC_SIZE;
301 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
302 ASSERTCMP(pv.nr, ==, count);
303
304 for (loop = 0; loop < count; loop++) {
305 struct page *page = pv.pages[loop];
306
307 redirty_page_for_writepage(wbc, page);
308 end_page_writeback(page);
David Howells31143d52007-05-09 02:33:46 -0700309 if (page->index >= first)
310 first = page->index + 1;
311 }
312
313 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000314 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700315
316 _leave("");
317}
318
319/*
David Howellsa58823a2019-05-09 15:16:10 +0100320 * completion of write to server
321 */
322static void afs_pages_written_back(struct afs_vnode *vnode,
323 pgoff_t first, pgoff_t last)
324{
325 struct pagevec pv;
326 unsigned long priv;
327 unsigned count, loop;
328
329 _enter("{%llx:%llu},{%lx-%lx}",
330 vnode->fid.vid, vnode->fid.vnode, first, last);
331
332 pagevec_init(&pv);
333
334 do {
335 _debug("done %lx-%lx", first, last);
336
337 count = last - first + 1;
338 if (count > PAGEVEC_SIZE)
339 count = PAGEVEC_SIZE;
340 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
341 first, count, pv.pages);
342 ASSERTCMP(pv.nr, ==, count);
343
344 for (loop = 0; loop < count; loop++) {
David Howellsfa04a402020-10-21 13:22:19 +0100345 priv = (unsigned long)detach_page_private(pv.pages[loop]);
David Howellsa58823a2019-05-09 15:16:10 +0100346 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
347 pv.pages[loop]->index, priv);
David Howellsa58823a2019-05-09 15:16:10 +0100348 end_page_writeback(pv.pages[loop]);
349 }
350 first += count;
351 __pagevec_release(&pv);
352 } while (first <= last);
353
354 afs_prune_wb_keys(vnode);
355 _leave("");
356}
357
358/*
David Howellse49c7b22020-04-10 20:51:51 +0100359 * Find a key to use for the writeback. We cached the keys used to author the
360 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
361 * and we need to start from there if it's set.
362 */
363static int afs_get_writeback_key(struct afs_vnode *vnode,
364 struct afs_wb_key **_wbk)
365{
366 struct afs_wb_key *wbk = NULL;
367 struct list_head *p;
368 int ret = -ENOKEY, ret2;
369
370 spin_lock(&vnode->wb_lock);
371 if (*_wbk)
372 p = (*_wbk)->vnode_link.next;
373 else
374 p = vnode->wb_keys.next;
375
376 while (p != &vnode->wb_keys) {
377 wbk = list_entry(p, struct afs_wb_key, vnode_link);
378 _debug("wbk %u", key_serial(wbk->key));
379 ret2 = key_validate(wbk->key);
380 if (ret2 == 0) {
381 refcount_inc(&wbk->usage);
382 _debug("USE WB KEY %u", key_serial(wbk->key));
383 break;
384 }
385
386 wbk = NULL;
387 if (ret == -ENOKEY)
388 ret = ret2;
389 p = p->next;
390 }
391
392 spin_unlock(&vnode->wb_lock);
393 if (*_wbk)
394 afs_put_wb_key(*_wbk);
395 *_wbk = wbk;
396 return 0;
397}
398
399static void afs_store_data_success(struct afs_operation *op)
400{
401 struct afs_vnode *vnode = op->file[0].vnode;
402
David Howellsda8d0752020-06-13 19:34:59 +0100403 op->ctime = op->file[0].scb.status.mtime_client;
David Howellse49c7b22020-04-10 20:51:51 +0100404 afs_vnode_commit_status(op, &op->file[0]);
405 if (op->error == 0) {
David Howellsd383e342020-10-22 14:40:31 +0100406 if (!op->store.laundering)
407 afs_pages_written_back(vnode, op->store.first, op->store.last);
David Howellse49c7b22020-04-10 20:51:51 +0100408 afs_stat_v(vnode, n_stores);
409 atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
410 (op->store.first * PAGE_SIZE + op->store.first_offset),
411 &afs_v2net(vnode)->n_store_bytes);
412 }
413}
414
415static const struct afs_operation_ops afs_store_data_operation = {
416 .issue_afs_rpc = afs_fs_store_data,
417 .issue_yfs_rpc = yfs_fs_store_data,
418 .success = afs_store_data_success,
419};
420
421/*
David Howellsd2ddc772017-11-02 15:27:50 +0000422 * write to a file
423 */
David Howells4343d002017-11-02 15:27:52 +0000424static int afs_store_data(struct address_space *mapping,
425 pgoff_t first, pgoff_t last,
David Howellsd383e342020-10-22 14:40:31 +0100426 unsigned offset, unsigned to, bool laundering)
David Howellsd2ddc772017-11-02 15:27:50 +0000427{
David Howells4343d002017-11-02 15:27:52 +0000428 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellse49c7b22020-04-10 20:51:51 +0100429 struct afs_operation *op;
David Howells4343d002017-11-02 15:27:52 +0000430 struct afs_wb_key *wbk = NULL;
David Howellse49c7b22020-04-10 20:51:51 +0100431 int ret;
David Howellsd2ddc772017-11-02 15:27:50 +0000432
David Howells3b6492d2018-10-20 00:57:57 +0100433 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
David Howellsd2ddc772017-11-02 15:27:50 +0000434 vnode->volume->name,
435 vnode->fid.vid,
436 vnode->fid.vnode,
437 vnode->fid.unique,
David Howellsd2ddc772017-11-02 15:27:50 +0000438 first, last, offset, to);
439
David Howellse49c7b22020-04-10 20:51:51 +0100440 ret = afs_get_writeback_key(vnode, &wbk);
441 if (ret) {
442 _leave(" = %d [no keys]", ret);
443 return ret;
444 }
445
446 op = afs_alloc_operation(wbk->key, vnode->volume);
447 if (IS_ERR(op)) {
448 afs_put_wb_key(wbk);
David Howellsa58823a2019-05-09 15:16:10 +0100449 return -ENOMEM;
David Howellse49c7b22020-04-10 20:51:51 +0100450 }
David Howellsa58823a2019-05-09 15:16:10 +0100451
David Howellse49c7b22020-04-10 20:51:51 +0100452 afs_op_set_vnode(op, 0, vnode);
453 op->file[0].dv_delta = 1;
454 op->store.mapping = mapping;
455 op->store.first = first;
456 op->store.last = last;
457 op->store.first_offset = offset;
458 op->store.last_to = to;
David Howellsd383e342020-10-22 14:40:31 +0100459 op->store.laundering = laundering;
David Howellsb3597942020-06-11 21:50:24 +0100460 op->mtime = vnode->vfs_inode.i_mtime;
David Howells811f04b2020-07-08 09:27:07 +0100461 op->flags |= AFS_OPERATION_UNINTR;
David Howellse49c7b22020-04-10 20:51:51 +0100462 op->ops = &afs_store_data_operation;
David Howells4343d002017-11-02 15:27:52 +0000463
David Howells4343d002017-11-02 15:27:52 +0000464try_next_key:
David Howellse49c7b22020-04-10 20:51:51 +0100465 afs_begin_vnode_operation(op);
466 afs_wait_for_operation(op);
David Howells4343d002017-11-02 15:27:52 +0000467
David Howellse49c7b22020-04-10 20:51:51 +0100468 switch (op->error) {
David Howells4343d002017-11-02 15:27:52 +0000469 case -EACCES:
470 case -EPERM:
471 case -ENOKEY:
472 case -EKEYEXPIRED:
473 case -EKEYREJECTED:
474 case -EKEYREVOKED:
475 _debug("next");
David Howellse49c7b22020-04-10 20:51:51 +0100476
477 ret = afs_get_writeback_key(vnode, &wbk);
478 if (ret == 0) {
479 key_put(op->key);
480 op->key = key_get(wbk->key);
481 goto try_next_key;
482 }
483 break;
David Howells4343d002017-11-02 15:27:52 +0000484 }
485
486 afs_put_wb_key(wbk);
David Howellse49c7b22020-04-10 20:51:51 +0100487 _leave(" = %d", op->error);
488 return afs_put_operation(op);
David Howellsd2ddc772017-11-02 15:27:50 +0000489}
490
491/*
David Howells4343d002017-11-02 15:27:52 +0000492 * Synchronously write back the locked page and any subsequent non-locked dirty
493 * pages.
David Howells31143d52007-05-09 02:33:46 -0700494 */
David Howells4343d002017-11-02 15:27:52 +0000495static int afs_write_back_from_locked_page(struct address_space *mapping,
496 struct writeback_control *wbc,
497 struct page *primary_page,
498 pgoff_t final_page)
David Howells31143d52007-05-09 02:33:46 -0700499{
David Howells13524ab2017-11-02 15:27:53 +0000500 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700501 struct page *pages[8], *page;
David Howells4343d002017-11-02 15:27:52 +0000502 unsigned long count, priv;
503 unsigned n, offset, to, f, t;
David Howells31143d52007-05-09 02:33:46 -0700504 pgoff_t start, first, last;
David Howells793fe822020-06-12 16:13:52 +0100505 loff_t i_size, end;
David Howells31143d52007-05-09 02:33:46 -0700506 int loop, ret;
507
508 _enter(",%lx", primary_page->index);
509
510 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700511 if (test_set_page_writeback(primary_page))
512 BUG();
513
David Howells4343d002017-11-02 15:27:52 +0000514 /* Find all consecutive lockable dirty pages that have contiguous
515 * written regions, stopping when we find a page that is not
516 * immediately lockable, is not dirty or is missing, or we reach the
517 * end of the range.
518 */
David Howells31143d52007-05-09 02:33:46 -0700519 start = primary_page->index;
David Howells4343d002017-11-02 15:27:52 +0000520 priv = page_private(primary_page);
David Howells185f0c72020-10-26 13:22:47 +0000521 offset = afs_page_dirty_from(priv);
522 to = afs_page_dirty_to(priv);
David Howells13524ab2017-11-02 15:27:53 +0000523 trace_afs_page_dirty(vnode, tracepoint_string("store"),
524 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000525
526 WARN_ON(offset == to);
David Howells13524ab2017-11-02 15:27:53 +0000527 if (offset == to)
528 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
529 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000530
David Howells5a813272018-04-06 14:17:26 +0100531 if (start >= final_page ||
532 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
David Howells31143d52007-05-09 02:33:46 -0700533 goto no_more;
David Howells4343d002017-11-02 15:27:52 +0000534
David Howells31143d52007-05-09 02:33:46 -0700535 start++;
536 do {
537 _debug("more %lx [%lx]", start, count);
David Howells4343d002017-11-02 15:27:52 +0000538 n = final_page - start + 1;
David Howells31143d52007-05-09 02:33:46 -0700539 if (n > ARRAY_SIZE(pages))
540 n = ARRAY_SIZE(pages);
David Howells4343d002017-11-02 15:27:52 +0000541 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
David Howells31143d52007-05-09 02:33:46 -0700542 _debug("fgpc %u", n);
543 if (n == 0)
544 goto no_more;
545 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700546 do {
547 put_page(pages[--n]);
548 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700549 goto no_more;
550 }
551
552 for (loop = 0; loop < n; loop++) {
553 page = pages[loop];
David Howells5a813272018-04-06 14:17:26 +0100554 if (to != PAGE_SIZE &&
555 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
556 break;
David Howells4343d002017-11-02 15:27:52 +0000557 if (page->index > final_page)
David Howells31143d52007-05-09 02:33:46 -0700558 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200559 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700560 break;
David Howells4343d002017-11-02 15:27:52 +0000561 if (!PageDirty(page) || PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700562 unlock_page(page);
563 break;
564 }
David Howells4343d002017-11-02 15:27:52 +0000565
566 priv = page_private(page);
David Howells185f0c72020-10-26 13:22:47 +0000567 f = afs_page_dirty_from(priv);
568 t = afs_page_dirty_to(priv);
David Howells5a813272018-04-06 14:17:26 +0100569 if (f != 0 &&
570 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
David Howells4343d002017-11-02 15:27:52 +0000571 unlock_page(page);
572 break;
573 }
574 to = t;
575
David Howells13524ab2017-11-02 15:27:53 +0000576 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
577 page->index, priv);
578
David Howells31143d52007-05-09 02:33:46 -0700579 if (!clear_page_dirty_for_io(page))
580 BUG();
581 if (test_set_page_writeback(page))
582 BUG();
583 unlock_page(page);
584 put_page(page);
585 }
586 count += loop;
587 if (loop < n) {
588 for (; loop < n; loop++)
589 put_page(pages[loop]);
590 goto no_more;
591 }
592
593 start += loop;
David Howells4343d002017-11-02 15:27:52 +0000594 } while (start <= final_page && count < 65536);
David Howells31143d52007-05-09 02:33:46 -0700595
596no_more:
David Howells4343d002017-11-02 15:27:52 +0000597 /* We now have a contiguous set of dirty pages, each with writeback
598 * set; the first page is still locked at this point, but all the rest
599 * have been unlocked.
600 */
601 unlock_page(primary_page);
602
David Howells31143d52007-05-09 02:33:46 -0700603 first = primary_page->index;
604 last = first + count - 1;
605
David Howells793fe822020-06-12 16:13:52 +0100606 end = (loff_t)last * PAGE_SIZE + to;
607 i_size = i_size_read(&vnode->vfs_inode);
608
David Howells31143d52007-05-09 02:33:46 -0700609 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
David Howells793fe822020-06-12 16:13:52 +0100610 if (end > i_size)
611 to = i_size & ~PAGE_MASK;
David Howells31143d52007-05-09 02:33:46 -0700612
David Howellsd383e342020-10-22 14:40:31 +0100613 ret = afs_store_data(mapping, first, last, offset, to, false);
David Howells4343d002017-11-02 15:27:52 +0000614 switch (ret) {
615 case 0:
David Howells31143d52007-05-09 02:33:46 -0700616 ret = count;
David Howells4343d002017-11-02 15:27:52 +0000617 break;
618
619 default:
620 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500621 fallthrough;
David Howells4343d002017-11-02 15:27:52 +0000622 case -EACCES:
623 case -EPERM:
624 case -ENOKEY:
625 case -EKEYEXPIRED:
626 case -EKEYREJECTED:
627 case -EKEYREVOKED:
628 afs_redirty_pages(wbc, mapping, first, last);
629 mapping_set_error(mapping, ret);
630 break;
631
632 case -EDQUOT:
633 case -ENOSPC:
634 afs_redirty_pages(wbc, mapping, first, last);
635 mapping_set_error(mapping, -ENOSPC);
636 break;
637
638 case -EROFS:
639 case -EIO:
640 case -EREMOTEIO:
641 case -EFBIG:
642 case -ENOENT:
643 case -ENOMEDIUM:
644 case -ENXIO:
David Howellsf51375c2018-10-20 00:57:57 +0100645 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
David Howells4343d002017-11-02 15:27:52 +0000646 afs_kill_pages(mapping, first, last);
647 mapping_set_error(mapping, ret);
648 break;
David Howells31143d52007-05-09 02:33:46 -0700649 }
650
651 _leave(" = %d", ret);
652 return ret;
653}
654
655/*
656 * write a page back to the server
657 * - the caller locked the page for us
658 */
659int afs_writepage(struct page *page, struct writeback_control *wbc)
660{
David Howells31143d52007-05-09 02:33:46 -0700661 int ret;
662
663 _enter("{%lx},", page->index);
664
David Howells4343d002017-11-02 15:27:52 +0000665 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
666 wbc->range_end >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700667 if (ret < 0) {
668 _leave(" = %d", ret);
669 return 0;
670 }
671
672 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700673
674 _leave(" = 0");
675 return 0;
676}
677
678/*
679 * write a region of pages back to the server
680 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700681static int afs_writepages_region(struct address_space *mapping,
682 struct writeback_control *wbc,
683 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700684{
David Howells31143d52007-05-09 02:33:46 -0700685 struct page *page;
686 int ret, n;
687
688 _enter(",,%lx,%lx,", index, end);
689
690 do {
Jan Karaaef6e412017-11-15 17:35:23 -0800691 n = find_get_pages_range_tag(mapping, &index, end,
692 PAGECACHE_TAG_DIRTY, 1, &page);
David Howells31143d52007-05-09 02:33:46 -0700693 if (!n)
694 break;
695
696 _debug("wback %lx", page->index);
697
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700698 /*
699 * at this point we hold neither the i_pages lock nor the
700 * page lock: the page may be truncated or invalidated
701 * (changing page->mapping to NULL), or even swizzled
702 * back from swapper_space to tmpfs file mapping
David Howells31143d52007-05-09 02:33:46 -0700703 */
David Howells4343d002017-11-02 15:27:52 +0000704 ret = lock_page_killable(page);
705 if (ret < 0) {
706 put_page(page);
707 _leave(" = %d", ret);
708 return ret;
709 }
David Howells31143d52007-05-09 02:33:46 -0700710
David Howellsc5051c72017-03-16 16:27:49 +0000711 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700712 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300713 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700714 continue;
715 }
716
David Howellsc5051c72017-03-16 16:27:49 +0000717 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700718 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000719 if (wbc->sync_mode != WB_SYNC_NONE)
720 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000721 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700722 continue;
723 }
724
David Howells65a15102017-03-16 16:27:49 +0000725 if (!clear_page_dirty_for_io(page))
726 BUG();
David Howells4343d002017-11-02 15:27:52 +0000727 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300728 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700729 if (ret < 0) {
730 _leave(" = %d", ret);
731 return ret;
732 }
733
734 wbc->nr_to_write -= ret;
735
David Howells31143d52007-05-09 02:33:46 -0700736 cond_resched();
737 } while (index < end && wbc->nr_to_write > 0);
738
739 *_next = index;
740 _leave(" = 0 [%lx]", *_next);
741 return 0;
742}
743
744/*
745 * write some of the pending data back to the server
746 */
747int afs_writepages(struct address_space *mapping,
748 struct writeback_control *wbc)
749{
David Howellsec0fa0b2020-10-07 14:22:12 +0100750 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700751 pgoff_t start, end, next;
752 int ret;
753
754 _enter("");
755
David Howellsec0fa0b2020-10-07 14:22:12 +0100756 /* We have to be careful as we can end up racing with setattr()
757 * truncating the pagecache since the caller doesn't take a lock here
758 * to prevent it.
759 */
760 if (wbc->sync_mode == WB_SYNC_ALL)
761 down_read(&vnode->validate_lock);
762 else if (!down_read_trylock(&vnode->validate_lock))
763 return 0;
764
David Howells31143d52007-05-09 02:33:46 -0700765 if (wbc->range_cyclic) {
766 start = mapping->writeback_index;
767 end = -1;
768 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700769 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700770 ret = afs_writepages_region(mapping, wbc, 0, start,
771 &next);
772 mapping->writeback_index = next;
773 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300774 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700775 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
776 if (wbc->nr_to_write > 0)
777 mapping->writeback_index = next;
778 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300779 start = wbc->range_start >> PAGE_SHIFT;
780 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700781 ret = afs_writepages_region(mapping, wbc, start, end, &next);
782 }
783
David Howellsec0fa0b2020-10-07 14:22:12 +0100784 up_read(&vnode->validate_lock);
David Howells31143d52007-05-09 02:33:46 -0700785 _leave(" = %d", ret);
786 return ret;
787}
788
789/*
David Howells31143d52007-05-09 02:33:46 -0700790 * write to an AFS file
791 */
Al Viro50b55512014-04-03 14:13:46 -0400792ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700793{
Al Viro496ad9a2013-01-23 17:07:38 -0500794 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700795 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400796 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700797
David Howells3b6492d2018-10-20 00:57:57 +0100798 _enter("{%llx:%llu},{%zu},",
Al Viro50b55512014-04-03 14:13:46 -0400799 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700800
801 if (IS_SWAPFILE(&vnode->vfs_inode)) {
802 printk(KERN_INFO
803 "AFS: Attempt to write to active swap file!\n");
804 return -EBUSY;
805 }
806
807 if (!count)
808 return 0;
809
Al Viro50b55512014-04-03 14:13:46 -0400810 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700811
David Howells31143d52007-05-09 02:33:46 -0700812 _leave(" = %zd", result);
813 return result;
814}
815
816/*
David Howells31143d52007-05-09 02:33:46 -0700817 * flush any dirty pages for this process, and check for write errors.
818 * - the return status from this call provides a reliable indication of
819 * whether any write errors occurred for this process.
820 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400821int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700822{
Al Viro3c981bf2013-09-03 13:37:45 -0400823 struct inode *inode = file_inode(file);
Al Viro3c981bf2013-09-03 13:37:45 -0400824 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700825
David Howells3b6492d2018-10-20 00:57:57 +0100826 _enter("{%llx:%llu},{n=%pD},%d",
Al Viro3c981bf2013-09-03 13:37:45 -0400827 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700828 datasync);
829
David Howells4343d002017-11-02 15:27:52 +0000830 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700831}
David Howells9b3f26c2009-04-03 16:42:41 +0100832
833/*
834 * notification that a previously read-only page is about to become writable
835 * - if it returns an error, the caller will deliver a bus error signal
836 */
Souptick Joarder0722f182018-08-23 17:00:48 -0700837vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100838{
David Howells1cf7a152017-11-02 15:27:52 +0000839 struct file *file = vmf->vma->vm_file;
840 struct inode *inode = file_inode(file);
841 struct afs_vnode *vnode = AFS_FS_I(inode);
842 unsigned long priv;
David Howells9b3f26c2009-04-03 16:42:41 +0100843
David Howells3b6492d2018-10-20 00:57:57 +0100844 _enter("{{%llx:%llu}},{%lx}",
David Howells1cf7a152017-11-02 15:27:52 +0000845 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
David Howells9b3f26c2009-04-03 16:42:41 +0100846
David Howells1cf7a152017-11-02 15:27:52 +0000847 sb_start_pagefault(inode->i_sb);
848
849 /* Wait for the page to be written to the cache before we allow it to
850 * be modified. We then assume the entire page will need writing back.
851 */
David Howells9b3f26c2009-04-03 16:42:41 +0100852#ifdef CONFIG_AFS_FSCACHE
David Howells1cf7a152017-11-02 15:27:52 +0000853 fscache_wait_on_page_write(vnode->cache, vmf->page);
David Howells9b3f26c2009-04-03 16:42:41 +0100854#endif
855
David Howells1cf7a152017-11-02 15:27:52 +0000856 if (PageWriteback(vmf->page) &&
857 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
858 return VM_FAULT_RETRY;
859
860 if (lock_page_killable(vmf->page) < 0)
861 return VM_FAULT_RETRY;
862
863 /* We mustn't change page->private until writeback is complete as that
864 * details the portion of the page we need to write back and we might
865 * need to redirty the page if there's a problem.
866 */
867 wait_on_page_writeback(vmf->page);
868
David Howells185f0c72020-10-26 13:22:47 +0000869 priv = afs_page_dirty(0, PAGE_SIZE);
David Howellsf86726a2020-10-22 14:08:23 +0100870 priv = afs_page_dirty_mmapped(priv);
David Howells13524ab2017-11-02 15:27:53 +0000871 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
872 vmf->page->index, priv);
David Howellsfa04a402020-10-21 13:22:19 +0100873 if (PagePrivate(vmf->page))
874 set_page_private(vmf->page, priv);
875 else
876 attach_page_private(vmf->page, (void *)priv);
David Howellsbb413482020-06-12 00:15:13 +0100877 file_update_time(file);
David Howells1cf7a152017-11-02 15:27:52 +0000878
879 sb_end_pagefault(inode->i_sb);
880 return VM_FAULT_LOCKED;
David Howells9b3f26c2009-04-03 16:42:41 +0100881}
David Howells4343d002017-11-02 15:27:52 +0000882
883/*
884 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
885 */
886void afs_prune_wb_keys(struct afs_vnode *vnode)
887{
888 LIST_HEAD(graveyard);
889 struct afs_wb_key *wbk, *tmp;
890
891 /* Discard unused keys */
892 spin_lock(&vnode->wb_lock);
893
894 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
895 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
896 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
897 if (refcount_read(&wbk->usage) == 1)
898 list_move(&wbk->vnode_link, &graveyard);
899 }
900 }
901
902 spin_unlock(&vnode->wb_lock);
903
904 while (!list_empty(&graveyard)) {
905 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
906 list_del(&wbk->vnode_link);
907 afs_put_wb_key(wbk);
908 }
909}
910
911/*
912 * Clean up a page during invalidation.
913 */
914int afs_launder_page(struct page *page)
915{
916 struct address_space *mapping = page->mapping;
917 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
918 unsigned long priv;
919 unsigned int f, t;
920 int ret = 0;
921
922 _enter("{%lx}", page->index);
923
924 priv = page_private(page);
925 if (clear_page_dirty_for_io(page)) {
926 f = 0;
927 t = PAGE_SIZE;
928 if (PagePrivate(page)) {
David Howells185f0c72020-10-26 13:22:47 +0000929 f = afs_page_dirty_from(priv);
930 t = afs_page_dirty_to(priv);
David Howells4343d002017-11-02 15:27:52 +0000931 }
932
David Howells13524ab2017-11-02 15:27:53 +0000933 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
934 page->index, priv);
David Howellsd383e342020-10-22 14:40:31 +0100935 ret = afs_store_data(mapping, page->index, page->index, t, f, true);
David Howells4343d002017-11-02 15:27:52 +0000936 }
937
David Howellsfa04a402020-10-21 13:22:19 +0100938 priv = (unsigned long)detach_page_private(page);
David Howells13524ab2017-11-02 15:27:53 +0000939 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
940 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000941
942#ifdef CONFIG_AFS_FSCACHE
943 if (PageFsCache(page)) {
944 fscache_wait_on_page_write(vnode->cache, page);
945 fscache_uncache_page(vnode->cache, page);
946 }
947#endif
948 return ret;
David Howells31143d52007-05-09 02:33:46 -0700949}