blob: 9cea9c40a4efc19d251f867ad7e0db4b5d8d8f2f [file] [log] [blame]
David Howells31143d52007-05-09 02:33:46 -07001/* handling of writes to regular files and writing back to the server
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells ([email protected])
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
David Howells4343d002017-11-02 15:27:52 +000011
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070012#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -070013#include <linux/slab.h>
14#include <linux/fs.h>
15#include <linux/pagemap.h>
16#include <linux/writeback.h>
17#include <linux/pagevec.h>
18#include "internal.h"
19
David Howells31143d52007-05-09 02:33:46 -070020/*
21 * mark a page as having been made dirty and thus needing writeback
22 */
23int afs_set_page_dirty(struct page *page)
24{
25 _enter("");
26 return __set_page_dirty_nobuffers(page);
27}
28
29/*
David Howells31143d52007-05-09 02:33:46 -070030 * partly or wholly fill a page that's under preparation for writing
31 */
32static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000033 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070034{
David Howells196ee9c2017-01-05 10:38:34 +000035 struct afs_read *req;
David Howells2a0b4f62018-10-20 00:57:57 +010036 size_t p;
37 void *data;
David Howells31143d52007-05-09 02:33:46 -070038 int ret;
39
Anton Blanchard5e7f2332011-06-13 22:31:12 +010040 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070041
David Howells2a0b4f62018-10-20 00:57:57 +010042 if (pos >= vnode->vfs_inode.i_size) {
43 p = pos & ~PAGE_MASK;
44 ASSERTCMP(p + len, <=, PAGE_SIZE);
45 data = kmap(page);
46 memset(data + p, 0, len);
47 kunmap(page);
48 return 0;
49 }
50
Zhengyuan Liuee102582019-06-20 18:12:17 +010051 req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
David Howells196ee9c2017-01-05 10:38:34 +000052 if (!req)
53 return -ENOMEM;
54
David Howellsf3ddee82018-04-06 14:17:25 +010055 refcount_set(&req->usage, 1);
David Howells196ee9c2017-01-05 10:38:34 +000056 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +000057 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +000058 req->nr_pages = 1;
David Howellsf3ddee82018-04-06 14:17:25 +010059 req->pages = req->array;
David Howells196ee9c2017-01-05 10:38:34 +000060 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +000061 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +000062
David Howellsd2ddc772017-11-02 15:27:50 +000063 ret = afs_fetch_data(vnode, key, req);
David Howells196ee9c2017-01-05 10:38:34 +000064 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -070065 if (ret < 0) {
66 if (ret == -ENOENT) {
67 _debug("got NOENT from server"
68 " - marking file deleted and stale");
69 set_bit(AFS_VNODE_DELETED, &vnode->flags);
70 ret = -ESTALE;
71 }
72 }
73
74 _leave(" = %d", ret);
75 return ret;
76}
77
78/*
David Howells31143d52007-05-09 02:33:46 -070079 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070080 */
Nick Piggin15b46502008-10-15 22:04:32 -070081int afs_write_begin(struct file *file, struct address_space *mapping,
82 loff_t pos, unsigned len, unsigned flags,
83 struct page **pagep, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070084{
Al Viro496ad9a2013-01-23 17:07:38 -050085 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -070086 struct page *page;
David Howells215804a2017-11-02 15:27:52 +000087 struct key *key = afs_file_key(file);
David Howells4343d002017-11-02 15:27:52 +000088 unsigned long priv;
89 unsigned f, from = pos & (PAGE_SIZE - 1);
90 unsigned t, to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030091 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -070092 int ret;
93
David Howells3b6492d2018-10-20 00:57:57 +010094 _enter("{%llx:%llu},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -070095 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -070096
David Howells4343d002017-11-02 15:27:52 +000097 /* We want to store information about how much of a page is altered in
98 * page->private.
99 */
100 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
David Howells31143d52007-05-09 02:33:46 -0700101
Nick Piggin54566b22009-01-04 12:00:53 -0800102 page = grab_cache_page_write_begin(mapping, index, flags);
David Howells4343d002017-11-02 15:27:52 +0000103 if (!page)
Nick Piggin15b46502008-10-15 22:04:32 -0700104 return -ENOMEM;
Nick Piggin15b46502008-10-15 22:04:32 -0700105
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300106 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +0000107 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -0700108 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +0000109 unlock_page(page);
110 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700111 _leave(" = %d [prep]", ret);
112 return ret;
113 }
Nick Piggin15b46502008-10-15 22:04:32 -0700114 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700115 }
116
David Howells6d06b0d2017-03-16 16:27:48 +0000117 /* page won't leak in error case: it eventually gets cleaned off LRU */
118 *pagep = page;
119
David Howells31143d52007-05-09 02:33:46 -0700120try_again:
David Howells4343d002017-11-02 15:27:52 +0000121 /* See if this page is already partially written in a way that we can
122 * merge the new write with.
123 */
124 t = f = 0;
125 if (PagePrivate(page)) {
126 priv = page_private(page);
127 f = priv & AFS_PRIV_MAX;
128 t = priv >> AFS_PRIV_SHIFT;
129 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -0700130 }
131
David Howells4343d002017-11-02 15:27:52 +0000132 if (f != t) {
David Howells5a039c32017-11-18 00:13:30 +0000133 if (PageWriteback(page)) {
134 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
135 page->index, priv);
136 goto flush_conflicting_write;
137 }
David Howells5a813272018-04-06 14:17:26 +0100138 /* If the file is being filled locally, allow inter-write
139 * spaces to be merged into writes. If it's not, only write
140 * back what the user gives us.
141 */
142 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
143 (to < f || from > t))
David Howells4343d002017-11-02 15:27:52 +0000144 goto flush_conflicting_write;
145 if (from < f)
146 f = from;
147 if (to > t)
148 t = to;
149 } else {
150 f = from;
151 t = to;
David Howells31143d52007-05-09 02:33:46 -0700152 }
153
David Howells4343d002017-11-02 15:27:52 +0000154 priv = (unsigned long)t << AFS_PRIV_SHIFT;
155 priv |= f;
David Howells13524ab2017-11-02 15:27:53 +0000156 trace_afs_page_dirty(vnode, tracepoint_string("begin"),
157 page->index, priv);
David Howells31143d52007-05-09 02:33:46 -0700158 SetPagePrivate(page);
David Howells4343d002017-11-02 15:27:52 +0000159 set_page_private(page, priv);
160 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -0700161 return 0;
162
David Howells4343d002017-11-02 15:27:52 +0000163 /* The previous write and this write aren't adjacent or overlapping, so
164 * flush the page out.
165 */
166flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700167 _debug("flush conflict");
David Howells4343d002017-11-02 15:27:52 +0000168 ret = write_one_page(page);
169 if (ret < 0) {
170 _leave(" = %d", ret);
171 return ret;
David Howells31143d52007-05-09 02:33:46 -0700172 }
173
David Howells4343d002017-11-02 15:27:52 +0000174 ret = lock_page_killable(page);
175 if (ret < 0) {
176 _leave(" = %d", ret);
177 return ret;
178 }
David Howells31143d52007-05-09 02:33:46 -0700179 goto try_again;
180}
181
182/*
183 * finalise part of a write to a page
184 */
Nick Piggin15b46502008-10-15 22:04:32 -0700185int afs_write_end(struct file *file, struct address_space *mapping,
186 loff_t pos, unsigned len, unsigned copied,
187 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700188{
Al Viro496ad9a2013-01-23 17:07:38 -0500189 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells215804a2017-11-02 15:27:52 +0000190 struct key *key = afs_file_key(file);
David Howells31143d52007-05-09 02:33:46 -0700191 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000192 int ret;
David Howells31143d52007-05-09 02:33:46 -0700193
David Howells3b6492d2018-10-20 00:57:57 +0100194 _enter("{%llx:%llu},{%lx}",
Nick Piggin15b46502008-10-15 22:04:32 -0700195 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700196
Nick Piggin15b46502008-10-15 22:04:32 -0700197 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700198
199 i_size = i_size_read(&vnode->vfs_inode);
200 if (maybe_i_size > i_size) {
David Howells4343d002017-11-02 15:27:52 +0000201 spin_lock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700202 i_size = i_size_read(&vnode->vfs_inode);
203 if (maybe_i_size > i_size)
204 i_size_write(&vnode->vfs_inode, maybe_i_size);
David Howells4343d002017-11-02 15:27:52 +0000205 spin_unlock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700206 }
207
David Howellse8e581a2017-03-16 16:27:44 +0000208 if (!PageUptodate(page)) {
209 if (copied < len) {
210 /* Try and load any missing data from the server. The
211 * unmarshalling routine will take care of clearing any
212 * bits that are beyond the EOF.
213 */
214 ret = afs_fill_page(vnode, key, pos + copied,
215 len - copied, page);
216 if (ret < 0)
David Howellsafae4572018-01-02 10:02:19 +0000217 goto out;
David Howellse8e581a2017-03-16 16:27:44 +0000218 }
219 SetPageUptodate(page);
220 }
221
David Howells31143d52007-05-09 02:33:46 -0700222 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700223 if (PageDirty(page))
224 _debug("dirtied");
David Howellsafae4572018-01-02 10:02:19 +0000225 ret = copied;
226
227out:
Nick Piggin15b46502008-10-15 22:04:32 -0700228 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300229 put_page(page);
David Howellsafae4572018-01-02 10:02:19 +0000230 return ret;
David Howells31143d52007-05-09 02:33:46 -0700231}
232
233/*
234 * kill all the pages in the given range
235 */
David Howells4343d002017-11-02 15:27:52 +0000236static void afs_kill_pages(struct address_space *mapping,
David Howells31143d52007-05-09 02:33:46 -0700237 pgoff_t first, pgoff_t last)
238{
David Howells4343d002017-11-02 15:27:52 +0000239 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700240 struct pagevec pv;
241 unsigned count, loop;
242
David Howells3b6492d2018-10-20 00:57:57 +0100243 _enter("{%llx:%llu},%lx-%lx",
David Howells31143d52007-05-09 02:33:46 -0700244 vnode->fid.vid, vnode->fid.vnode, first, last);
245
Mel Gorman86679822017-11-15 17:37:52 -0800246 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700247
248 do {
249 _debug("kill %lx-%lx", first, last);
250
251 count = last - first + 1;
252 if (count > PAGEVEC_SIZE)
253 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000254 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700255 ASSERTCMP(pv.nr, ==, count);
256
257 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000258 struct page *page = pv.pages[loop];
259 ClearPageUptodate(page);
David Howells4343d002017-11-02 15:27:52 +0000260 SetPageError(page);
261 end_page_writeback(page);
David Howells7286a352017-03-16 16:27:48 +0000262 if (page->index >= first)
263 first = page->index + 1;
David Howells4343d002017-11-02 15:27:52 +0000264 lock_page(page);
265 generic_error_remove_page(mapping, page);
Marc Dionne21bd68f2019-04-13 08:37:37 +0100266 unlock_page(page);
David Howells31143d52007-05-09 02:33:46 -0700267 }
268
269 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000270 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700271
272 _leave("");
273}
274
275/*
David Howells4343d002017-11-02 15:27:52 +0000276 * Redirty all the pages in a given range.
David Howells31143d52007-05-09 02:33:46 -0700277 */
David Howells4343d002017-11-02 15:27:52 +0000278static void afs_redirty_pages(struct writeback_control *wbc,
279 struct address_space *mapping,
280 pgoff_t first, pgoff_t last)
David Howells31143d52007-05-09 02:33:46 -0700281{
David Howells4343d002017-11-02 15:27:52 +0000282 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
283 struct pagevec pv;
284 unsigned count, loop;
285
David Howells3b6492d2018-10-20 00:57:57 +0100286 _enter("{%llx:%llu},%lx-%lx",
David Howells4343d002017-11-02 15:27:52 +0000287 vnode->fid.vid, vnode->fid.vnode, first, last);
288
Linus Torvalds487e2c92017-11-16 11:41:22 -0800289 pagevec_init(&pv);
David Howells4343d002017-11-02 15:27:52 +0000290
291 do {
292 _debug("redirty %lx-%lx", first, last);
293
294 count = last - first + 1;
295 if (count > PAGEVEC_SIZE)
296 count = PAGEVEC_SIZE;
297 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
298 ASSERTCMP(pv.nr, ==, count);
299
300 for (loop = 0; loop < count; loop++) {
301 struct page *page = pv.pages[loop];
302
303 redirty_page_for_writepage(wbc, page);
304 end_page_writeback(page);
David Howells31143d52007-05-09 02:33:46 -0700305 if (page->index >= first)
306 first = page->index + 1;
307 }
308
309 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000310 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700311
312 _leave("");
313}
314
315/*
David Howellsa58823a2019-05-09 15:16:10 +0100316 * completion of write to server
317 */
318static void afs_pages_written_back(struct afs_vnode *vnode,
319 pgoff_t first, pgoff_t last)
320{
321 struct pagevec pv;
322 unsigned long priv;
323 unsigned count, loop;
324
325 _enter("{%llx:%llu},{%lx-%lx}",
326 vnode->fid.vid, vnode->fid.vnode, first, last);
327
328 pagevec_init(&pv);
329
330 do {
331 _debug("done %lx-%lx", first, last);
332
333 count = last - first + 1;
334 if (count > PAGEVEC_SIZE)
335 count = PAGEVEC_SIZE;
336 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
337 first, count, pv.pages);
338 ASSERTCMP(pv.nr, ==, count);
339
340 for (loop = 0; loop < count; loop++) {
341 priv = page_private(pv.pages[loop]);
342 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
343 pv.pages[loop]->index, priv);
344 set_page_private(pv.pages[loop], 0);
345 end_page_writeback(pv.pages[loop]);
346 }
347 first += count;
348 __pagevec_release(&pv);
349 } while (first <= last);
350
351 afs_prune_wb_keys(vnode);
352 _leave("");
353}
354
355/*
David Howellsd2ddc772017-11-02 15:27:50 +0000356 * write to a file
357 */
David Howells4343d002017-11-02 15:27:52 +0000358static int afs_store_data(struct address_space *mapping,
359 pgoff_t first, pgoff_t last,
David Howellsd2ddc772017-11-02 15:27:50 +0000360 unsigned offset, unsigned to)
361{
David Howells4343d002017-11-02 15:27:52 +0000362 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellsd2ddc772017-11-02 15:27:50 +0000363 struct afs_fs_cursor fc;
David Howellsa58823a2019-05-09 15:16:10 +0100364 struct afs_status_cb *scb;
David Howells4343d002017-11-02 15:27:52 +0000365 struct afs_wb_key *wbk = NULL;
366 struct list_head *p;
367 int ret = -ENOKEY, ret2;
David Howellsd2ddc772017-11-02 15:27:50 +0000368
David Howells3b6492d2018-10-20 00:57:57 +0100369 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
David Howellsd2ddc772017-11-02 15:27:50 +0000370 vnode->volume->name,
371 vnode->fid.vid,
372 vnode->fid.vnode,
373 vnode->fid.unique,
David Howellsd2ddc772017-11-02 15:27:50 +0000374 first, last, offset, to);
375
David Howellsa58823a2019-05-09 15:16:10 +0100376 scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
377 if (!scb)
378 return -ENOMEM;
379
David Howells4343d002017-11-02 15:27:52 +0000380 spin_lock(&vnode->wb_lock);
381 p = vnode->wb_keys.next;
382
383 /* Iterate through the list looking for a valid key to use. */
384try_next_key:
385 while (p != &vnode->wb_keys) {
386 wbk = list_entry(p, struct afs_wb_key, vnode_link);
387 _debug("wbk %u", key_serial(wbk->key));
388 ret2 = key_validate(wbk->key);
389 if (ret2 == 0)
390 goto found_key;
391 if (ret == -ENOKEY)
392 ret = ret2;
393 p = p->next;
394 }
395
396 spin_unlock(&vnode->wb_lock);
397 afs_put_wb_key(wbk);
David Howellsa58823a2019-05-09 15:16:10 +0100398 kfree(scb);
David Howells4343d002017-11-02 15:27:52 +0000399 _leave(" = %d [no keys]", ret);
400 return ret;
401
402found_key:
403 refcount_inc(&wbk->usage);
404 spin_unlock(&vnode->wb_lock);
405
406 _debug("USE WB KEY %u", key_serial(wbk->key));
407
David Howellsd2ddc772017-11-02 15:27:50 +0000408 ret = -ERESTARTSYS;
David Howells20b83912019-05-08 16:16:31 +0100409 if (afs_begin_vnode_operation(&fc, vnode, wbk->key, false)) {
David Howellsa58823a2019-05-09 15:16:10 +0100410 afs_dataversion_t data_version = vnode->status.data_version + 1;
411
David Howellsd2ddc772017-11-02 15:27:50 +0000412 while (afs_select_fileserver(&fc)) {
David Howells68251f02018-05-12 22:31:33 +0100413 fc.cb_break = afs_calc_vnode_cb_break(vnode);
David Howellsa58823a2019-05-09 15:16:10 +0100414 afs_fs_store_data(&fc, mapping, first, last, offset, to, scb);
David Howellsd2ddc772017-11-02 15:27:50 +0000415 }
416
David Howellsa58823a2019-05-09 15:16:10 +0100417 afs_check_for_remote_deletion(&fc, vnode);
418 afs_vnode_commit_status(&fc, vnode, fc.cb_break,
419 &data_version, scb);
420 if (fc.ac.error == 0)
421 afs_pages_written_back(vnode, first, last);
David Howellsd2ddc772017-11-02 15:27:50 +0000422 ret = afs_end_vnode_operation(&fc);
423 }
424
David Howells4343d002017-11-02 15:27:52 +0000425 switch (ret) {
David Howells76a5cb62018-04-06 14:17:26 +0100426 case 0:
427 afs_stat_v(vnode, n_stores);
428 atomic_long_add((last * PAGE_SIZE + to) -
429 (first * PAGE_SIZE + offset),
430 &afs_v2net(vnode)->n_store_bytes);
431 break;
David Howells4343d002017-11-02 15:27:52 +0000432 case -EACCES:
433 case -EPERM:
434 case -ENOKEY:
435 case -EKEYEXPIRED:
436 case -EKEYREJECTED:
437 case -EKEYREVOKED:
438 _debug("next");
439 spin_lock(&vnode->wb_lock);
440 p = wbk->vnode_link.next;
441 afs_put_wb_key(wbk);
442 goto try_next_key;
443 }
444
445 afs_put_wb_key(wbk);
David Howellsa58823a2019-05-09 15:16:10 +0100446 kfree(scb);
David Howellsd2ddc772017-11-02 15:27:50 +0000447 _leave(" = %d", ret);
448 return ret;
449}
450
451/*
David Howells4343d002017-11-02 15:27:52 +0000452 * Synchronously write back the locked page and any subsequent non-locked dirty
453 * pages.
David Howells31143d52007-05-09 02:33:46 -0700454 */
David Howells4343d002017-11-02 15:27:52 +0000455static int afs_write_back_from_locked_page(struct address_space *mapping,
456 struct writeback_control *wbc,
457 struct page *primary_page,
458 pgoff_t final_page)
David Howells31143d52007-05-09 02:33:46 -0700459{
David Howells13524ab2017-11-02 15:27:53 +0000460 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700461 struct page *pages[8], *page;
David Howells4343d002017-11-02 15:27:52 +0000462 unsigned long count, priv;
463 unsigned n, offset, to, f, t;
David Howells31143d52007-05-09 02:33:46 -0700464 pgoff_t start, first, last;
465 int loop, ret;
466
467 _enter(",%lx", primary_page->index);
468
469 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700470 if (test_set_page_writeback(primary_page))
471 BUG();
472
David Howells4343d002017-11-02 15:27:52 +0000473 /* Find all consecutive lockable dirty pages that have contiguous
474 * written regions, stopping when we find a page that is not
475 * immediately lockable, is not dirty or is missing, or we reach the
476 * end of the range.
477 */
David Howells31143d52007-05-09 02:33:46 -0700478 start = primary_page->index;
David Howells4343d002017-11-02 15:27:52 +0000479 priv = page_private(primary_page);
480 offset = priv & AFS_PRIV_MAX;
481 to = priv >> AFS_PRIV_SHIFT;
David Howells13524ab2017-11-02 15:27:53 +0000482 trace_afs_page_dirty(vnode, tracepoint_string("store"),
483 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000484
485 WARN_ON(offset == to);
David Howells13524ab2017-11-02 15:27:53 +0000486 if (offset == to)
487 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
488 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000489
David Howells5a813272018-04-06 14:17:26 +0100490 if (start >= final_page ||
491 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
David Howells31143d52007-05-09 02:33:46 -0700492 goto no_more;
David Howells4343d002017-11-02 15:27:52 +0000493
David Howells31143d52007-05-09 02:33:46 -0700494 start++;
495 do {
496 _debug("more %lx [%lx]", start, count);
David Howells4343d002017-11-02 15:27:52 +0000497 n = final_page - start + 1;
David Howells31143d52007-05-09 02:33:46 -0700498 if (n > ARRAY_SIZE(pages))
499 n = ARRAY_SIZE(pages);
David Howells4343d002017-11-02 15:27:52 +0000500 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
David Howells31143d52007-05-09 02:33:46 -0700501 _debug("fgpc %u", n);
502 if (n == 0)
503 goto no_more;
504 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700505 do {
506 put_page(pages[--n]);
507 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700508 goto no_more;
509 }
510
511 for (loop = 0; loop < n; loop++) {
512 page = pages[loop];
David Howells5a813272018-04-06 14:17:26 +0100513 if (to != PAGE_SIZE &&
514 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
515 break;
David Howells4343d002017-11-02 15:27:52 +0000516 if (page->index > final_page)
David Howells31143d52007-05-09 02:33:46 -0700517 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200518 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700519 break;
David Howells4343d002017-11-02 15:27:52 +0000520 if (!PageDirty(page) || PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700521 unlock_page(page);
522 break;
523 }
David Howells4343d002017-11-02 15:27:52 +0000524
525 priv = page_private(page);
526 f = priv & AFS_PRIV_MAX;
527 t = priv >> AFS_PRIV_SHIFT;
David Howells5a813272018-04-06 14:17:26 +0100528 if (f != 0 &&
529 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
David Howells4343d002017-11-02 15:27:52 +0000530 unlock_page(page);
531 break;
532 }
533 to = t;
534
David Howells13524ab2017-11-02 15:27:53 +0000535 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
536 page->index, priv);
537
David Howells31143d52007-05-09 02:33:46 -0700538 if (!clear_page_dirty_for_io(page))
539 BUG();
540 if (test_set_page_writeback(page))
541 BUG();
542 unlock_page(page);
543 put_page(page);
544 }
545 count += loop;
546 if (loop < n) {
547 for (; loop < n; loop++)
548 put_page(pages[loop]);
549 goto no_more;
550 }
551
552 start += loop;
David Howells4343d002017-11-02 15:27:52 +0000553 } while (start <= final_page && count < 65536);
David Howells31143d52007-05-09 02:33:46 -0700554
555no_more:
David Howells4343d002017-11-02 15:27:52 +0000556 /* We now have a contiguous set of dirty pages, each with writeback
557 * set; the first page is still locked at this point, but all the rest
558 * have been unlocked.
559 */
560 unlock_page(primary_page);
561
David Howells31143d52007-05-09 02:33:46 -0700562 first = primary_page->index;
563 last = first + count - 1;
564
David Howells31143d52007-05-09 02:33:46 -0700565 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
566
David Howells4343d002017-11-02 15:27:52 +0000567 ret = afs_store_data(mapping, first, last, offset, to);
568 switch (ret) {
569 case 0:
David Howells31143d52007-05-09 02:33:46 -0700570 ret = count;
David Howells4343d002017-11-02 15:27:52 +0000571 break;
572
573 default:
574 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
575 /* Fall through */
576 case -EACCES:
577 case -EPERM:
578 case -ENOKEY:
579 case -EKEYEXPIRED:
580 case -EKEYREJECTED:
581 case -EKEYREVOKED:
582 afs_redirty_pages(wbc, mapping, first, last);
583 mapping_set_error(mapping, ret);
584 break;
585
586 case -EDQUOT:
587 case -ENOSPC:
588 afs_redirty_pages(wbc, mapping, first, last);
589 mapping_set_error(mapping, -ENOSPC);
590 break;
591
592 case -EROFS:
593 case -EIO:
594 case -EREMOTEIO:
595 case -EFBIG:
596 case -ENOENT:
597 case -ENOMEDIUM:
598 case -ENXIO:
David Howellsf51375c2018-10-20 00:57:57 +0100599 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
David Howells4343d002017-11-02 15:27:52 +0000600 afs_kill_pages(mapping, first, last);
601 mapping_set_error(mapping, ret);
602 break;
David Howells31143d52007-05-09 02:33:46 -0700603 }
604
605 _leave(" = %d", ret);
606 return ret;
607}
608
609/*
610 * write a page back to the server
611 * - the caller locked the page for us
612 */
613int afs_writepage(struct page *page, struct writeback_control *wbc)
614{
David Howells31143d52007-05-09 02:33:46 -0700615 int ret;
616
617 _enter("{%lx},", page->index);
618
David Howells4343d002017-11-02 15:27:52 +0000619 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
620 wbc->range_end >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700621 if (ret < 0) {
622 _leave(" = %d", ret);
623 return 0;
624 }
625
626 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700627
628 _leave(" = 0");
629 return 0;
630}
631
632/*
633 * write a region of pages back to the server
634 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700635static int afs_writepages_region(struct address_space *mapping,
636 struct writeback_control *wbc,
637 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700638{
David Howells31143d52007-05-09 02:33:46 -0700639 struct page *page;
640 int ret, n;
641
642 _enter(",,%lx,%lx,", index, end);
643
644 do {
Jan Karaaef6e412017-11-15 17:35:23 -0800645 n = find_get_pages_range_tag(mapping, &index, end,
646 PAGECACHE_TAG_DIRTY, 1, &page);
David Howells31143d52007-05-09 02:33:46 -0700647 if (!n)
648 break;
649
650 _debug("wback %lx", page->index);
651
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700652 /*
653 * at this point we hold neither the i_pages lock nor the
654 * page lock: the page may be truncated or invalidated
655 * (changing page->mapping to NULL), or even swizzled
656 * back from swapper_space to tmpfs file mapping
David Howells31143d52007-05-09 02:33:46 -0700657 */
David Howells4343d002017-11-02 15:27:52 +0000658 ret = lock_page_killable(page);
659 if (ret < 0) {
660 put_page(page);
661 _leave(" = %d", ret);
662 return ret;
663 }
David Howells31143d52007-05-09 02:33:46 -0700664
David Howellsc5051c72017-03-16 16:27:49 +0000665 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700666 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300667 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700668 continue;
669 }
670
David Howellsc5051c72017-03-16 16:27:49 +0000671 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700672 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000673 if (wbc->sync_mode != WB_SYNC_NONE)
674 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000675 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700676 continue;
677 }
678
David Howells65a15102017-03-16 16:27:49 +0000679 if (!clear_page_dirty_for_io(page))
680 BUG();
David Howells4343d002017-11-02 15:27:52 +0000681 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300682 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700683 if (ret < 0) {
684 _leave(" = %d", ret);
685 return ret;
686 }
687
688 wbc->nr_to_write -= ret;
689
David Howells31143d52007-05-09 02:33:46 -0700690 cond_resched();
691 } while (index < end && wbc->nr_to_write > 0);
692
693 *_next = index;
694 _leave(" = 0 [%lx]", *_next);
695 return 0;
696}
697
698/*
699 * write some of the pending data back to the server
700 */
701int afs_writepages(struct address_space *mapping,
702 struct writeback_control *wbc)
703{
David Howells31143d52007-05-09 02:33:46 -0700704 pgoff_t start, end, next;
705 int ret;
706
707 _enter("");
708
David Howells31143d52007-05-09 02:33:46 -0700709 if (wbc->range_cyclic) {
710 start = mapping->writeback_index;
711 end = -1;
712 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700713 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700714 ret = afs_writepages_region(mapping, wbc, 0, start,
715 &next);
716 mapping->writeback_index = next;
717 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300718 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700719 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
720 if (wbc->nr_to_write > 0)
721 mapping->writeback_index = next;
722 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300723 start = wbc->range_start >> PAGE_SHIFT;
724 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700725 ret = afs_writepages_region(mapping, wbc, start, end, &next);
726 }
727
728 _leave(" = %d", ret);
729 return ret;
730}
731
732/*
David Howells31143d52007-05-09 02:33:46 -0700733 * write to an AFS file
734 */
Al Viro50b55512014-04-03 14:13:46 -0400735ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700736{
Al Viro496ad9a2013-01-23 17:07:38 -0500737 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700738 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400739 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700740
David Howells3b6492d2018-10-20 00:57:57 +0100741 _enter("{%llx:%llu},{%zu},",
Al Viro50b55512014-04-03 14:13:46 -0400742 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700743
744 if (IS_SWAPFILE(&vnode->vfs_inode)) {
745 printk(KERN_INFO
746 "AFS: Attempt to write to active swap file!\n");
747 return -EBUSY;
748 }
749
750 if (!count)
751 return 0;
752
Al Viro50b55512014-04-03 14:13:46 -0400753 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700754
David Howells31143d52007-05-09 02:33:46 -0700755 _leave(" = %zd", result);
756 return result;
757}
758
759/*
David Howells31143d52007-05-09 02:33:46 -0700760 * flush any dirty pages for this process, and check for write errors.
761 * - the return status from this call provides a reliable indication of
762 * whether any write errors occurred for this process.
763 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400764int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700765{
Al Viro3c981bf2013-09-03 13:37:45 -0400766 struct inode *inode = file_inode(file);
Al Viro3c981bf2013-09-03 13:37:45 -0400767 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700768
David Howells3b6492d2018-10-20 00:57:57 +0100769 _enter("{%llx:%llu},{n=%pD},%d",
Al Viro3c981bf2013-09-03 13:37:45 -0400770 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700771 datasync);
772
David Howells4343d002017-11-02 15:27:52 +0000773 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700774}
David Howells9b3f26c2009-04-03 16:42:41 +0100775
776/*
777 * notification that a previously read-only page is about to become writable
778 * - if it returns an error, the caller will deliver a bus error signal
779 */
Souptick Joarder0722f182018-08-23 17:00:48 -0700780vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100781{
David Howells1cf7a152017-11-02 15:27:52 +0000782 struct file *file = vmf->vma->vm_file;
783 struct inode *inode = file_inode(file);
784 struct afs_vnode *vnode = AFS_FS_I(inode);
785 unsigned long priv;
David Howells9b3f26c2009-04-03 16:42:41 +0100786
David Howells3b6492d2018-10-20 00:57:57 +0100787 _enter("{{%llx:%llu}},{%lx}",
David Howells1cf7a152017-11-02 15:27:52 +0000788 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
David Howells9b3f26c2009-04-03 16:42:41 +0100789
David Howells1cf7a152017-11-02 15:27:52 +0000790 sb_start_pagefault(inode->i_sb);
791
792 /* Wait for the page to be written to the cache before we allow it to
793 * be modified. We then assume the entire page will need writing back.
794 */
David Howells9b3f26c2009-04-03 16:42:41 +0100795#ifdef CONFIG_AFS_FSCACHE
David Howells1cf7a152017-11-02 15:27:52 +0000796 fscache_wait_on_page_write(vnode->cache, vmf->page);
David Howells9b3f26c2009-04-03 16:42:41 +0100797#endif
798
David Howells1cf7a152017-11-02 15:27:52 +0000799 if (PageWriteback(vmf->page) &&
800 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
801 return VM_FAULT_RETRY;
802
803 if (lock_page_killable(vmf->page) < 0)
804 return VM_FAULT_RETRY;
805
806 /* We mustn't change page->private until writeback is complete as that
807 * details the portion of the page we need to write back and we might
808 * need to redirty the page if there's a problem.
809 */
810 wait_on_page_writeback(vmf->page);
811
812 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
813 priv |= 0; /* From */
David Howells13524ab2017-11-02 15:27:53 +0000814 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
815 vmf->page->index, priv);
David Howells1cf7a152017-11-02 15:27:52 +0000816 SetPagePrivate(vmf->page);
817 set_page_private(vmf->page, priv);
818
819 sb_end_pagefault(inode->i_sb);
820 return VM_FAULT_LOCKED;
David Howells9b3f26c2009-04-03 16:42:41 +0100821}
David Howells4343d002017-11-02 15:27:52 +0000822
823/*
824 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
825 */
826void afs_prune_wb_keys(struct afs_vnode *vnode)
827{
828 LIST_HEAD(graveyard);
829 struct afs_wb_key *wbk, *tmp;
830
831 /* Discard unused keys */
832 spin_lock(&vnode->wb_lock);
833
834 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
835 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
836 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
837 if (refcount_read(&wbk->usage) == 1)
838 list_move(&wbk->vnode_link, &graveyard);
839 }
840 }
841
842 spin_unlock(&vnode->wb_lock);
843
844 while (!list_empty(&graveyard)) {
845 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
846 list_del(&wbk->vnode_link);
847 afs_put_wb_key(wbk);
848 }
849}
850
851/*
852 * Clean up a page during invalidation.
853 */
854int afs_launder_page(struct page *page)
855{
856 struct address_space *mapping = page->mapping;
857 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
858 unsigned long priv;
859 unsigned int f, t;
860 int ret = 0;
861
862 _enter("{%lx}", page->index);
863
864 priv = page_private(page);
865 if (clear_page_dirty_for_io(page)) {
866 f = 0;
867 t = PAGE_SIZE;
868 if (PagePrivate(page)) {
869 f = priv & AFS_PRIV_MAX;
870 t = priv >> AFS_PRIV_SHIFT;
871 }
872
David Howells13524ab2017-11-02 15:27:53 +0000873 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
874 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000875 ret = afs_store_data(mapping, page->index, page->index, t, f);
876 }
877
David Howells13524ab2017-11-02 15:27:53 +0000878 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
879 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000880 set_page_private(page, 0);
881 ClearPagePrivate(page);
882
883#ifdef CONFIG_AFS_FSCACHE
884 if (PageFsCache(page)) {
885 fscache_wait_on_page_write(vnode->cache, page);
886 fscache_uncache_page(vnode->cache, page);
887 }
888#endif
889 return ret;
David Howells31143d52007-05-09 02:33:46 -0700890}