blob: f28d85c38cd899f3db4a9a7f24a4ff4989bbb553 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells31143d52007-05-09 02:33:46 -07002/* handling of writes to regular files and writing back to the server
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells ([email protected])
David Howells31143d52007-05-09 02:33:46 -07006 */
David Howells4343d002017-11-02 15:27:52 +00007
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -07008#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -07009#include <linux/slab.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include "internal.h"
15
David Howells31143d52007-05-09 02:33:46 -070016/*
17 * mark a page as having been made dirty and thus needing writeback
18 */
19int afs_set_page_dirty(struct page *page)
20{
21 _enter("");
22 return __set_page_dirty_nobuffers(page);
23}
24
25/*
David Howells31143d52007-05-09 02:33:46 -070026 * partly or wholly fill a page that's under preparation for writing
27 */
28static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000029 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070030{
David Howells196ee9c2017-01-05 10:38:34 +000031 struct afs_read *req;
David Howells2a0b4f62018-10-20 00:57:57 +010032 size_t p;
33 void *data;
David Howells31143d52007-05-09 02:33:46 -070034 int ret;
35
Anton Blanchard5e7f2332011-06-13 22:31:12 +010036 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070037
David Howells2a0b4f62018-10-20 00:57:57 +010038 if (pos >= vnode->vfs_inode.i_size) {
39 p = pos & ~PAGE_MASK;
40 ASSERTCMP(p + len, <=, PAGE_SIZE);
41 data = kmap(page);
42 memset(data + p, 0, len);
43 kunmap(page);
44 return 0;
45 }
46
Zhengyuan Liuee102582019-06-20 18:12:17 +010047 req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
David Howells196ee9c2017-01-05 10:38:34 +000048 if (!req)
49 return -ENOMEM;
50
David Howellsf3ddee82018-04-06 14:17:25 +010051 refcount_set(&req->usage, 1);
David Howells196ee9c2017-01-05 10:38:34 +000052 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +000053 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +000054 req->nr_pages = 1;
David Howellsf3ddee82018-04-06 14:17:25 +010055 req->pages = req->array;
David Howells196ee9c2017-01-05 10:38:34 +000056 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +000057 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +000058
David Howellsd2ddc772017-11-02 15:27:50 +000059 ret = afs_fetch_data(vnode, key, req);
David Howells196ee9c2017-01-05 10:38:34 +000060 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -070061 if (ret < 0) {
62 if (ret == -ENOENT) {
63 _debug("got NOENT from server"
64 " - marking file deleted and stale");
65 set_bit(AFS_VNODE_DELETED, &vnode->flags);
66 ret = -ESTALE;
67 }
68 }
69
70 _leave(" = %d", ret);
71 return ret;
72}
73
74/*
David Howells31143d52007-05-09 02:33:46 -070075 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070076 */
Nick Piggin15b46502008-10-15 22:04:32 -070077int afs_write_begin(struct file *file, struct address_space *mapping,
78 loff_t pos, unsigned len, unsigned flags,
David Howells21db2cd2020-10-22 14:03:03 +010079 struct page **_page, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070080{
Al Viro496ad9a2013-01-23 17:07:38 -050081 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -070082 struct page *page;
David Howells215804a2017-11-02 15:27:52 +000083 struct key *key = afs_file_key(file);
David Howells4343d002017-11-02 15:27:52 +000084 unsigned long priv;
85 unsigned f, from = pos & (PAGE_SIZE - 1);
86 unsigned t, to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030087 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -070088 int ret;
89
David Howells3b6492d2018-10-20 00:57:57 +010090 _enter("{%llx:%llu},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -070091 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -070092
David Howells4343d002017-11-02 15:27:52 +000093 /* We want to store information about how much of a page is altered in
94 * page->private.
95 */
96 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
David Howells31143d52007-05-09 02:33:46 -070097
Nick Piggin54566b22009-01-04 12:00:53 -080098 page = grab_cache_page_write_begin(mapping, index, flags);
David Howells4343d002017-11-02 15:27:52 +000099 if (!page)
Nick Piggin15b46502008-10-15 22:04:32 -0700100 return -ENOMEM;
Nick Piggin15b46502008-10-15 22:04:32 -0700101
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300102 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +0000103 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -0700104 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +0000105 unlock_page(page);
106 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700107 _leave(" = %d [prep]", ret);
108 return ret;
109 }
Nick Piggin15b46502008-10-15 22:04:32 -0700110 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700111 }
112
David Howells31143d52007-05-09 02:33:46 -0700113try_again:
David Howells4343d002017-11-02 15:27:52 +0000114 /* See if this page is already partially written in a way that we can
115 * merge the new write with.
116 */
117 t = f = 0;
118 if (PagePrivate(page)) {
119 priv = page_private(page);
120 f = priv & AFS_PRIV_MAX;
121 t = priv >> AFS_PRIV_SHIFT;
122 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -0700123 }
124
David Howells4343d002017-11-02 15:27:52 +0000125 if (f != t) {
David Howells5a039c32017-11-18 00:13:30 +0000126 if (PageWriteback(page)) {
127 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
128 page->index, priv);
129 goto flush_conflicting_write;
130 }
David Howells5a813272018-04-06 14:17:26 +0100131 /* If the file is being filled locally, allow inter-write
132 * spaces to be merged into writes. If it's not, only write
133 * back what the user gives us.
134 */
135 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
136 (to < f || from > t))
David Howells4343d002017-11-02 15:27:52 +0000137 goto flush_conflicting_write;
David Howells31143d52007-05-09 02:33:46 -0700138 }
139
David Howells21db2cd2020-10-22 14:03:03 +0100140 *_page = page;
David Howells4343d002017-11-02 15:27:52 +0000141 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -0700142 return 0;
143
David Howells4343d002017-11-02 15:27:52 +0000144 /* The previous write and this write aren't adjacent or overlapping, so
145 * flush the page out.
146 */
147flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700148 _debug("flush conflict");
David Howells4343d002017-11-02 15:27:52 +0000149 ret = write_one_page(page);
David Howells21db2cd2020-10-22 14:03:03 +0100150 if (ret < 0)
151 goto error;
David Howells31143d52007-05-09 02:33:46 -0700152
David Howells4343d002017-11-02 15:27:52 +0000153 ret = lock_page_killable(page);
David Howells21db2cd2020-10-22 14:03:03 +0100154 if (ret < 0)
155 goto error;
David Howells31143d52007-05-09 02:33:46 -0700156 goto try_again;
David Howells21db2cd2020-10-22 14:03:03 +0100157
158error:
159 put_page(page);
160 _leave(" = %d", ret);
161 return ret;
David Howells31143d52007-05-09 02:33:46 -0700162}
163
164/*
165 * finalise part of a write to a page
166 */
Nick Piggin15b46502008-10-15 22:04:32 -0700167int afs_write_end(struct file *file, struct address_space *mapping,
168 loff_t pos, unsigned len, unsigned copied,
169 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700170{
Al Viro496ad9a2013-01-23 17:07:38 -0500171 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells215804a2017-11-02 15:27:52 +0000172 struct key *key = afs_file_key(file);
David Howellsf792e3a2020-10-26 14:05:33 +0000173 unsigned long priv;
174 unsigned int f, from = pos & (PAGE_SIZE - 1);
175 unsigned int t, to = from + copied;
David Howells31143d52007-05-09 02:33:46 -0700176 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000177 int ret;
David Howells31143d52007-05-09 02:33:46 -0700178
David Howells3b6492d2018-10-20 00:57:57 +0100179 _enter("{%llx:%llu},{%lx}",
Nick Piggin15b46502008-10-15 22:04:32 -0700180 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700181
Nick Piggin15b46502008-10-15 22:04:32 -0700182 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700183
184 i_size = i_size_read(&vnode->vfs_inode);
185 if (maybe_i_size > i_size) {
David Howells1f32ef72020-06-12 23:58:51 +0100186 write_seqlock(&vnode->cb_lock);
David Howells31143d52007-05-09 02:33:46 -0700187 i_size = i_size_read(&vnode->vfs_inode);
188 if (maybe_i_size > i_size)
189 i_size_write(&vnode->vfs_inode, maybe_i_size);
David Howells1f32ef72020-06-12 23:58:51 +0100190 write_sequnlock(&vnode->cb_lock);
David Howells31143d52007-05-09 02:33:46 -0700191 }
192
David Howellse8e581a2017-03-16 16:27:44 +0000193 if (!PageUptodate(page)) {
194 if (copied < len) {
195 /* Try and load any missing data from the server. The
196 * unmarshalling routine will take care of clearing any
197 * bits that are beyond the EOF.
198 */
199 ret = afs_fill_page(vnode, key, pos + copied,
200 len - copied, page);
201 if (ret < 0)
David Howellsafae4572018-01-02 10:02:19 +0000202 goto out;
David Howellse8e581a2017-03-16 16:27:44 +0000203 }
204 SetPageUptodate(page);
205 }
206
David Howellsf792e3a2020-10-26 14:05:33 +0000207 if (PagePrivate(page)) {
208 priv = page_private(page);
209 f = priv & AFS_PRIV_MAX;
210 t = priv >> AFS_PRIV_SHIFT;
211 if (from < f)
212 f = from;
213 if (to > t)
214 t = to;
215 priv = (unsigned long)t << AFS_PRIV_SHIFT;
216 priv |= f;
217 set_page_private(page, priv);
218 trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
219 page->index, priv);
220 } else {
221 f = from;
222 t = to;
223 priv = (unsigned long)t << AFS_PRIV_SHIFT;
224 priv |= f;
225 attach_page_private(page, (void *)priv);
226 trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
227 page->index, priv);
228 }
229
David Howells31143d52007-05-09 02:33:46 -0700230 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700231 if (PageDirty(page))
232 _debug("dirtied");
David Howellsafae4572018-01-02 10:02:19 +0000233 ret = copied;
234
235out:
Nick Piggin15b46502008-10-15 22:04:32 -0700236 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300237 put_page(page);
David Howellsafae4572018-01-02 10:02:19 +0000238 return ret;
David Howells31143d52007-05-09 02:33:46 -0700239}
240
241/*
242 * kill all the pages in the given range
243 */
David Howells4343d002017-11-02 15:27:52 +0000244static void afs_kill_pages(struct address_space *mapping,
David Howells31143d52007-05-09 02:33:46 -0700245 pgoff_t first, pgoff_t last)
246{
David Howells4343d002017-11-02 15:27:52 +0000247 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700248 struct pagevec pv;
249 unsigned count, loop;
250
David Howells3b6492d2018-10-20 00:57:57 +0100251 _enter("{%llx:%llu},%lx-%lx",
David Howells31143d52007-05-09 02:33:46 -0700252 vnode->fid.vid, vnode->fid.vnode, first, last);
253
Mel Gorman86679822017-11-15 17:37:52 -0800254 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700255
256 do {
257 _debug("kill %lx-%lx", first, last);
258
259 count = last - first + 1;
260 if (count > PAGEVEC_SIZE)
261 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000262 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700263 ASSERTCMP(pv.nr, ==, count);
264
265 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000266 struct page *page = pv.pages[loop];
267 ClearPageUptodate(page);
David Howells4343d002017-11-02 15:27:52 +0000268 SetPageError(page);
269 end_page_writeback(page);
David Howells7286a352017-03-16 16:27:48 +0000270 if (page->index >= first)
271 first = page->index + 1;
David Howells4343d002017-11-02 15:27:52 +0000272 lock_page(page);
273 generic_error_remove_page(mapping, page);
Marc Dionne21bd68f2019-04-13 08:37:37 +0100274 unlock_page(page);
David Howells31143d52007-05-09 02:33:46 -0700275 }
276
277 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000278 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700279
280 _leave("");
281}
282
283/*
David Howells4343d002017-11-02 15:27:52 +0000284 * Redirty all the pages in a given range.
David Howells31143d52007-05-09 02:33:46 -0700285 */
David Howells4343d002017-11-02 15:27:52 +0000286static void afs_redirty_pages(struct writeback_control *wbc,
287 struct address_space *mapping,
288 pgoff_t first, pgoff_t last)
David Howells31143d52007-05-09 02:33:46 -0700289{
David Howells4343d002017-11-02 15:27:52 +0000290 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
291 struct pagevec pv;
292 unsigned count, loop;
293
David Howells3b6492d2018-10-20 00:57:57 +0100294 _enter("{%llx:%llu},%lx-%lx",
David Howells4343d002017-11-02 15:27:52 +0000295 vnode->fid.vid, vnode->fid.vnode, first, last);
296
Linus Torvalds487e2c92017-11-16 11:41:22 -0800297 pagevec_init(&pv);
David Howells4343d002017-11-02 15:27:52 +0000298
299 do {
300 _debug("redirty %lx-%lx", first, last);
301
302 count = last - first + 1;
303 if (count > PAGEVEC_SIZE)
304 count = PAGEVEC_SIZE;
305 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
306 ASSERTCMP(pv.nr, ==, count);
307
308 for (loop = 0; loop < count; loop++) {
309 struct page *page = pv.pages[loop];
310
311 redirty_page_for_writepage(wbc, page);
312 end_page_writeback(page);
David Howells31143d52007-05-09 02:33:46 -0700313 if (page->index >= first)
314 first = page->index + 1;
315 }
316
317 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000318 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700319
320 _leave("");
321}
322
323/*
David Howellsa58823a2019-05-09 15:16:10 +0100324 * completion of write to server
325 */
326static void afs_pages_written_back(struct afs_vnode *vnode,
327 pgoff_t first, pgoff_t last)
328{
329 struct pagevec pv;
330 unsigned long priv;
331 unsigned count, loop;
332
333 _enter("{%llx:%llu},{%lx-%lx}",
334 vnode->fid.vid, vnode->fid.vnode, first, last);
335
336 pagevec_init(&pv);
337
338 do {
339 _debug("done %lx-%lx", first, last);
340
341 count = last - first + 1;
342 if (count > PAGEVEC_SIZE)
343 count = PAGEVEC_SIZE;
344 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
345 first, count, pv.pages);
346 ASSERTCMP(pv.nr, ==, count);
347
348 for (loop = 0; loop < count; loop++) {
David Howellsfa04a402020-10-21 13:22:19 +0100349 priv = (unsigned long)detach_page_private(pv.pages[loop]);
David Howellsa58823a2019-05-09 15:16:10 +0100350 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
351 pv.pages[loop]->index, priv);
David Howellsa58823a2019-05-09 15:16:10 +0100352 end_page_writeback(pv.pages[loop]);
353 }
354 first += count;
355 __pagevec_release(&pv);
356 } while (first <= last);
357
358 afs_prune_wb_keys(vnode);
359 _leave("");
360}
361
362/*
David Howellse49c7b22020-04-10 20:51:51 +0100363 * Find a key to use for the writeback. We cached the keys used to author the
364 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
365 * and we need to start from there if it's set.
366 */
367static int afs_get_writeback_key(struct afs_vnode *vnode,
368 struct afs_wb_key **_wbk)
369{
370 struct afs_wb_key *wbk = NULL;
371 struct list_head *p;
372 int ret = -ENOKEY, ret2;
373
374 spin_lock(&vnode->wb_lock);
375 if (*_wbk)
376 p = (*_wbk)->vnode_link.next;
377 else
378 p = vnode->wb_keys.next;
379
380 while (p != &vnode->wb_keys) {
381 wbk = list_entry(p, struct afs_wb_key, vnode_link);
382 _debug("wbk %u", key_serial(wbk->key));
383 ret2 = key_validate(wbk->key);
384 if (ret2 == 0) {
385 refcount_inc(&wbk->usage);
386 _debug("USE WB KEY %u", key_serial(wbk->key));
387 break;
388 }
389
390 wbk = NULL;
391 if (ret == -ENOKEY)
392 ret = ret2;
393 p = p->next;
394 }
395
396 spin_unlock(&vnode->wb_lock);
397 if (*_wbk)
398 afs_put_wb_key(*_wbk);
399 *_wbk = wbk;
400 return 0;
401}
402
403static void afs_store_data_success(struct afs_operation *op)
404{
405 struct afs_vnode *vnode = op->file[0].vnode;
406
David Howellsda8d0752020-06-13 19:34:59 +0100407 op->ctime = op->file[0].scb.status.mtime_client;
David Howellse49c7b22020-04-10 20:51:51 +0100408 afs_vnode_commit_status(op, &op->file[0]);
409 if (op->error == 0) {
David Howellsd383e342020-10-22 14:40:31 +0100410 if (!op->store.laundering)
411 afs_pages_written_back(vnode, op->store.first, op->store.last);
David Howellse49c7b22020-04-10 20:51:51 +0100412 afs_stat_v(vnode, n_stores);
413 atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
414 (op->store.first * PAGE_SIZE + op->store.first_offset),
415 &afs_v2net(vnode)->n_store_bytes);
416 }
417}
418
419static const struct afs_operation_ops afs_store_data_operation = {
420 .issue_afs_rpc = afs_fs_store_data,
421 .issue_yfs_rpc = yfs_fs_store_data,
422 .success = afs_store_data_success,
423};
424
425/*
David Howellsd2ddc772017-11-02 15:27:50 +0000426 * write to a file
427 */
David Howells4343d002017-11-02 15:27:52 +0000428static int afs_store_data(struct address_space *mapping,
429 pgoff_t first, pgoff_t last,
David Howellsd383e342020-10-22 14:40:31 +0100430 unsigned offset, unsigned to, bool laundering)
David Howellsd2ddc772017-11-02 15:27:50 +0000431{
David Howells4343d002017-11-02 15:27:52 +0000432 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellse49c7b22020-04-10 20:51:51 +0100433 struct afs_operation *op;
David Howells4343d002017-11-02 15:27:52 +0000434 struct afs_wb_key *wbk = NULL;
David Howellse49c7b22020-04-10 20:51:51 +0100435 int ret;
David Howellsd2ddc772017-11-02 15:27:50 +0000436
David Howells3b6492d2018-10-20 00:57:57 +0100437 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
David Howellsd2ddc772017-11-02 15:27:50 +0000438 vnode->volume->name,
439 vnode->fid.vid,
440 vnode->fid.vnode,
441 vnode->fid.unique,
David Howellsd2ddc772017-11-02 15:27:50 +0000442 first, last, offset, to);
443
David Howellse49c7b22020-04-10 20:51:51 +0100444 ret = afs_get_writeback_key(vnode, &wbk);
445 if (ret) {
446 _leave(" = %d [no keys]", ret);
447 return ret;
448 }
449
450 op = afs_alloc_operation(wbk->key, vnode->volume);
451 if (IS_ERR(op)) {
452 afs_put_wb_key(wbk);
David Howellsa58823a2019-05-09 15:16:10 +0100453 return -ENOMEM;
David Howellse49c7b22020-04-10 20:51:51 +0100454 }
David Howellsa58823a2019-05-09 15:16:10 +0100455
David Howellse49c7b22020-04-10 20:51:51 +0100456 afs_op_set_vnode(op, 0, vnode);
457 op->file[0].dv_delta = 1;
458 op->store.mapping = mapping;
459 op->store.first = first;
460 op->store.last = last;
461 op->store.first_offset = offset;
462 op->store.last_to = to;
David Howellsd383e342020-10-22 14:40:31 +0100463 op->store.laundering = laundering;
David Howellsb3597942020-06-11 21:50:24 +0100464 op->mtime = vnode->vfs_inode.i_mtime;
David Howells811f04b2020-07-08 09:27:07 +0100465 op->flags |= AFS_OPERATION_UNINTR;
David Howellse49c7b22020-04-10 20:51:51 +0100466 op->ops = &afs_store_data_operation;
David Howells4343d002017-11-02 15:27:52 +0000467
David Howells4343d002017-11-02 15:27:52 +0000468try_next_key:
David Howellse49c7b22020-04-10 20:51:51 +0100469 afs_begin_vnode_operation(op);
470 afs_wait_for_operation(op);
David Howells4343d002017-11-02 15:27:52 +0000471
David Howellse49c7b22020-04-10 20:51:51 +0100472 switch (op->error) {
David Howells4343d002017-11-02 15:27:52 +0000473 case -EACCES:
474 case -EPERM:
475 case -ENOKEY:
476 case -EKEYEXPIRED:
477 case -EKEYREJECTED:
478 case -EKEYREVOKED:
479 _debug("next");
David Howellse49c7b22020-04-10 20:51:51 +0100480
481 ret = afs_get_writeback_key(vnode, &wbk);
482 if (ret == 0) {
483 key_put(op->key);
484 op->key = key_get(wbk->key);
485 goto try_next_key;
486 }
487 break;
David Howells4343d002017-11-02 15:27:52 +0000488 }
489
490 afs_put_wb_key(wbk);
David Howellse49c7b22020-04-10 20:51:51 +0100491 _leave(" = %d", op->error);
492 return afs_put_operation(op);
David Howellsd2ddc772017-11-02 15:27:50 +0000493}
494
495/*
David Howells4343d002017-11-02 15:27:52 +0000496 * Synchronously write back the locked page and any subsequent non-locked dirty
497 * pages.
David Howells31143d52007-05-09 02:33:46 -0700498 */
David Howells4343d002017-11-02 15:27:52 +0000499static int afs_write_back_from_locked_page(struct address_space *mapping,
500 struct writeback_control *wbc,
501 struct page *primary_page,
502 pgoff_t final_page)
David Howells31143d52007-05-09 02:33:46 -0700503{
David Howells13524ab2017-11-02 15:27:53 +0000504 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700505 struct page *pages[8], *page;
David Howells4343d002017-11-02 15:27:52 +0000506 unsigned long count, priv;
507 unsigned n, offset, to, f, t;
David Howells31143d52007-05-09 02:33:46 -0700508 pgoff_t start, first, last;
David Howells793fe822020-06-12 16:13:52 +0100509 loff_t i_size, end;
David Howells31143d52007-05-09 02:33:46 -0700510 int loop, ret;
511
512 _enter(",%lx", primary_page->index);
513
514 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700515 if (test_set_page_writeback(primary_page))
516 BUG();
517
David Howells4343d002017-11-02 15:27:52 +0000518 /* Find all consecutive lockable dirty pages that have contiguous
519 * written regions, stopping when we find a page that is not
520 * immediately lockable, is not dirty or is missing, or we reach the
521 * end of the range.
522 */
David Howells31143d52007-05-09 02:33:46 -0700523 start = primary_page->index;
David Howells4343d002017-11-02 15:27:52 +0000524 priv = page_private(primary_page);
525 offset = priv & AFS_PRIV_MAX;
526 to = priv >> AFS_PRIV_SHIFT;
David Howells13524ab2017-11-02 15:27:53 +0000527 trace_afs_page_dirty(vnode, tracepoint_string("store"),
528 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000529
530 WARN_ON(offset == to);
David Howells13524ab2017-11-02 15:27:53 +0000531 if (offset == to)
532 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
533 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000534
David Howells5a813272018-04-06 14:17:26 +0100535 if (start >= final_page ||
536 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
David Howells31143d52007-05-09 02:33:46 -0700537 goto no_more;
David Howells4343d002017-11-02 15:27:52 +0000538
David Howells31143d52007-05-09 02:33:46 -0700539 start++;
540 do {
541 _debug("more %lx [%lx]", start, count);
David Howells4343d002017-11-02 15:27:52 +0000542 n = final_page - start + 1;
David Howells31143d52007-05-09 02:33:46 -0700543 if (n > ARRAY_SIZE(pages))
544 n = ARRAY_SIZE(pages);
David Howells4343d002017-11-02 15:27:52 +0000545 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
David Howells31143d52007-05-09 02:33:46 -0700546 _debug("fgpc %u", n);
547 if (n == 0)
548 goto no_more;
549 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700550 do {
551 put_page(pages[--n]);
552 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700553 goto no_more;
554 }
555
556 for (loop = 0; loop < n; loop++) {
557 page = pages[loop];
David Howells5a813272018-04-06 14:17:26 +0100558 if (to != PAGE_SIZE &&
559 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
560 break;
David Howells4343d002017-11-02 15:27:52 +0000561 if (page->index > final_page)
David Howells31143d52007-05-09 02:33:46 -0700562 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200563 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700564 break;
David Howells4343d002017-11-02 15:27:52 +0000565 if (!PageDirty(page) || PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700566 unlock_page(page);
567 break;
568 }
David Howells4343d002017-11-02 15:27:52 +0000569
570 priv = page_private(page);
571 f = priv & AFS_PRIV_MAX;
572 t = priv >> AFS_PRIV_SHIFT;
David Howells5a813272018-04-06 14:17:26 +0100573 if (f != 0 &&
574 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
David Howells4343d002017-11-02 15:27:52 +0000575 unlock_page(page);
576 break;
577 }
578 to = t;
579
David Howells13524ab2017-11-02 15:27:53 +0000580 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
581 page->index, priv);
582
David Howells31143d52007-05-09 02:33:46 -0700583 if (!clear_page_dirty_for_io(page))
584 BUG();
585 if (test_set_page_writeback(page))
586 BUG();
587 unlock_page(page);
588 put_page(page);
589 }
590 count += loop;
591 if (loop < n) {
592 for (; loop < n; loop++)
593 put_page(pages[loop]);
594 goto no_more;
595 }
596
597 start += loop;
David Howells4343d002017-11-02 15:27:52 +0000598 } while (start <= final_page && count < 65536);
David Howells31143d52007-05-09 02:33:46 -0700599
600no_more:
David Howells4343d002017-11-02 15:27:52 +0000601 /* We now have a contiguous set of dirty pages, each with writeback
602 * set; the first page is still locked at this point, but all the rest
603 * have been unlocked.
604 */
605 unlock_page(primary_page);
606
David Howells31143d52007-05-09 02:33:46 -0700607 first = primary_page->index;
608 last = first + count - 1;
609
David Howells793fe822020-06-12 16:13:52 +0100610 end = (loff_t)last * PAGE_SIZE + to;
611 i_size = i_size_read(&vnode->vfs_inode);
612
David Howells31143d52007-05-09 02:33:46 -0700613 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
David Howells793fe822020-06-12 16:13:52 +0100614 if (end > i_size)
615 to = i_size & ~PAGE_MASK;
David Howells31143d52007-05-09 02:33:46 -0700616
David Howellsd383e342020-10-22 14:40:31 +0100617 ret = afs_store_data(mapping, first, last, offset, to, false);
David Howells4343d002017-11-02 15:27:52 +0000618 switch (ret) {
619 case 0:
David Howells31143d52007-05-09 02:33:46 -0700620 ret = count;
David Howells4343d002017-11-02 15:27:52 +0000621 break;
622
623 default:
624 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500625 fallthrough;
David Howells4343d002017-11-02 15:27:52 +0000626 case -EACCES:
627 case -EPERM:
628 case -ENOKEY:
629 case -EKEYEXPIRED:
630 case -EKEYREJECTED:
631 case -EKEYREVOKED:
632 afs_redirty_pages(wbc, mapping, first, last);
633 mapping_set_error(mapping, ret);
634 break;
635
636 case -EDQUOT:
637 case -ENOSPC:
638 afs_redirty_pages(wbc, mapping, first, last);
639 mapping_set_error(mapping, -ENOSPC);
640 break;
641
642 case -EROFS:
643 case -EIO:
644 case -EREMOTEIO:
645 case -EFBIG:
646 case -ENOENT:
647 case -ENOMEDIUM:
648 case -ENXIO:
David Howellsf51375c2018-10-20 00:57:57 +0100649 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
David Howells4343d002017-11-02 15:27:52 +0000650 afs_kill_pages(mapping, first, last);
651 mapping_set_error(mapping, ret);
652 break;
David Howells31143d52007-05-09 02:33:46 -0700653 }
654
655 _leave(" = %d", ret);
656 return ret;
657}
658
659/*
660 * write a page back to the server
661 * - the caller locked the page for us
662 */
663int afs_writepage(struct page *page, struct writeback_control *wbc)
664{
David Howells31143d52007-05-09 02:33:46 -0700665 int ret;
666
667 _enter("{%lx},", page->index);
668
David Howells4343d002017-11-02 15:27:52 +0000669 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
670 wbc->range_end >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700671 if (ret < 0) {
672 _leave(" = %d", ret);
673 return 0;
674 }
675
676 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700677
678 _leave(" = 0");
679 return 0;
680}
681
682/*
683 * write a region of pages back to the server
684 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700685static int afs_writepages_region(struct address_space *mapping,
686 struct writeback_control *wbc,
687 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700688{
David Howells31143d52007-05-09 02:33:46 -0700689 struct page *page;
690 int ret, n;
691
692 _enter(",,%lx,%lx,", index, end);
693
694 do {
Jan Karaaef6e412017-11-15 17:35:23 -0800695 n = find_get_pages_range_tag(mapping, &index, end,
696 PAGECACHE_TAG_DIRTY, 1, &page);
David Howells31143d52007-05-09 02:33:46 -0700697 if (!n)
698 break;
699
700 _debug("wback %lx", page->index);
701
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700702 /*
703 * at this point we hold neither the i_pages lock nor the
704 * page lock: the page may be truncated or invalidated
705 * (changing page->mapping to NULL), or even swizzled
706 * back from swapper_space to tmpfs file mapping
David Howells31143d52007-05-09 02:33:46 -0700707 */
David Howells4343d002017-11-02 15:27:52 +0000708 ret = lock_page_killable(page);
709 if (ret < 0) {
710 put_page(page);
711 _leave(" = %d", ret);
712 return ret;
713 }
David Howells31143d52007-05-09 02:33:46 -0700714
David Howellsc5051c72017-03-16 16:27:49 +0000715 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700716 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300717 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700718 continue;
719 }
720
David Howellsc5051c72017-03-16 16:27:49 +0000721 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700722 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000723 if (wbc->sync_mode != WB_SYNC_NONE)
724 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000725 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700726 continue;
727 }
728
David Howells65a15102017-03-16 16:27:49 +0000729 if (!clear_page_dirty_for_io(page))
730 BUG();
David Howells4343d002017-11-02 15:27:52 +0000731 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300732 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700733 if (ret < 0) {
734 _leave(" = %d", ret);
735 return ret;
736 }
737
738 wbc->nr_to_write -= ret;
739
David Howells31143d52007-05-09 02:33:46 -0700740 cond_resched();
741 } while (index < end && wbc->nr_to_write > 0);
742
743 *_next = index;
744 _leave(" = 0 [%lx]", *_next);
745 return 0;
746}
747
748/*
749 * write some of the pending data back to the server
750 */
751int afs_writepages(struct address_space *mapping,
752 struct writeback_control *wbc)
753{
David Howellsec0fa0b2020-10-07 14:22:12 +0100754 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700755 pgoff_t start, end, next;
756 int ret;
757
758 _enter("");
759
David Howellsec0fa0b2020-10-07 14:22:12 +0100760 /* We have to be careful as we can end up racing with setattr()
761 * truncating the pagecache since the caller doesn't take a lock here
762 * to prevent it.
763 */
764 if (wbc->sync_mode == WB_SYNC_ALL)
765 down_read(&vnode->validate_lock);
766 else if (!down_read_trylock(&vnode->validate_lock))
767 return 0;
768
David Howells31143d52007-05-09 02:33:46 -0700769 if (wbc->range_cyclic) {
770 start = mapping->writeback_index;
771 end = -1;
772 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700773 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700774 ret = afs_writepages_region(mapping, wbc, 0, start,
775 &next);
776 mapping->writeback_index = next;
777 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300778 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700779 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
780 if (wbc->nr_to_write > 0)
781 mapping->writeback_index = next;
782 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300783 start = wbc->range_start >> PAGE_SHIFT;
784 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700785 ret = afs_writepages_region(mapping, wbc, start, end, &next);
786 }
787
David Howellsec0fa0b2020-10-07 14:22:12 +0100788 up_read(&vnode->validate_lock);
David Howells31143d52007-05-09 02:33:46 -0700789 _leave(" = %d", ret);
790 return ret;
791}
792
793/*
David Howells31143d52007-05-09 02:33:46 -0700794 * write to an AFS file
795 */
Al Viro50b55512014-04-03 14:13:46 -0400796ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700797{
Al Viro496ad9a2013-01-23 17:07:38 -0500798 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700799 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400800 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700801
David Howells3b6492d2018-10-20 00:57:57 +0100802 _enter("{%llx:%llu},{%zu},",
Al Viro50b55512014-04-03 14:13:46 -0400803 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700804
805 if (IS_SWAPFILE(&vnode->vfs_inode)) {
806 printk(KERN_INFO
807 "AFS: Attempt to write to active swap file!\n");
808 return -EBUSY;
809 }
810
811 if (!count)
812 return 0;
813
Al Viro50b55512014-04-03 14:13:46 -0400814 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700815
David Howells31143d52007-05-09 02:33:46 -0700816 _leave(" = %zd", result);
817 return result;
818}
819
820/*
David Howells31143d52007-05-09 02:33:46 -0700821 * flush any dirty pages for this process, and check for write errors.
822 * - the return status from this call provides a reliable indication of
823 * whether any write errors occurred for this process.
824 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400825int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700826{
Al Viro3c981bf2013-09-03 13:37:45 -0400827 struct inode *inode = file_inode(file);
Al Viro3c981bf2013-09-03 13:37:45 -0400828 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700829
David Howells3b6492d2018-10-20 00:57:57 +0100830 _enter("{%llx:%llu},{n=%pD},%d",
Al Viro3c981bf2013-09-03 13:37:45 -0400831 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700832 datasync);
833
David Howells4343d002017-11-02 15:27:52 +0000834 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700835}
David Howells9b3f26c2009-04-03 16:42:41 +0100836
837/*
838 * notification that a previously read-only page is about to become writable
839 * - if it returns an error, the caller will deliver a bus error signal
840 */
Souptick Joarder0722f182018-08-23 17:00:48 -0700841vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100842{
David Howells1cf7a152017-11-02 15:27:52 +0000843 struct file *file = vmf->vma->vm_file;
844 struct inode *inode = file_inode(file);
845 struct afs_vnode *vnode = AFS_FS_I(inode);
846 unsigned long priv;
David Howells9b3f26c2009-04-03 16:42:41 +0100847
David Howells3b6492d2018-10-20 00:57:57 +0100848 _enter("{{%llx:%llu}},{%lx}",
David Howells1cf7a152017-11-02 15:27:52 +0000849 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
David Howells9b3f26c2009-04-03 16:42:41 +0100850
David Howells1cf7a152017-11-02 15:27:52 +0000851 sb_start_pagefault(inode->i_sb);
852
853 /* Wait for the page to be written to the cache before we allow it to
854 * be modified. We then assume the entire page will need writing back.
855 */
David Howells9b3f26c2009-04-03 16:42:41 +0100856#ifdef CONFIG_AFS_FSCACHE
David Howells1cf7a152017-11-02 15:27:52 +0000857 fscache_wait_on_page_write(vnode->cache, vmf->page);
David Howells9b3f26c2009-04-03 16:42:41 +0100858#endif
859
David Howells1cf7a152017-11-02 15:27:52 +0000860 if (PageWriteback(vmf->page) &&
861 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
862 return VM_FAULT_RETRY;
863
864 if (lock_page_killable(vmf->page) < 0)
865 return VM_FAULT_RETRY;
866
867 /* We mustn't change page->private until writeback is complete as that
868 * details the portion of the page we need to write back and we might
869 * need to redirty the page if there's a problem.
870 */
871 wait_on_page_writeback(vmf->page);
872
873 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
874 priv |= 0; /* From */
David Howells13524ab2017-11-02 15:27:53 +0000875 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
876 vmf->page->index, priv);
David Howellsfa04a402020-10-21 13:22:19 +0100877 if (PagePrivate(vmf->page))
878 set_page_private(vmf->page, priv);
879 else
880 attach_page_private(vmf->page, (void *)priv);
David Howellsbb413482020-06-12 00:15:13 +0100881 file_update_time(file);
David Howells1cf7a152017-11-02 15:27:52 +0000882
883 sb_end_pagefault(inode->i_sb);
884 return VM_FAULT_LOCKED;
David Howells9b3f26c2009-04-03 16:42:41 +0100885}
David Howells4343d002017-11-02 15:27:52 +0000886
887/*
888 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
889 */
890void afs_prune_wb_keys(struct afs_vnode *vnode)
891{
892 LIST_HEAD(graveyard);
893 struct afs_wb_key *wbk, *tmp;
894
895 /* Discard unused keys */
896 spin_lock(&vnode->wb_lock);
897
898 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
899 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
900 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
901 if (refcount_read(&wbk->usage) == 1)
902 list_move(&wbk->vnode_link, &graveyard);
903 }
904 }
905
906 spin_unlock(&vnode->wb_lock);
907
908 while (!list_empty(&graveyard)) {
909 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
910 list_del(&wbk->vnode_link);
911 afs_put_wb_key(wbk);
912 }
913}
914
915/*
916 * Clean up a page during invalidation.
917 */
918int afs_launder_page(struct page *page)
919{
920 struct address_space *mapping = page->mapping;
921 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
922 unsigned long priv;
923 unsigned int f, t;
924 int ret = 0;
925
926 _enter("{%lx}", page->index);
927
928 priv = page_private(page);
929 if (clear_page_dirty_for_io(page)) {
930 f = 0;
931 t = PAGE_SIZE;
932 if (PagePrivate(page)) {
933 f = priv & AFS_PRIV_MAX;
934 t = priv >> AFS_PRIV_SHIFT;
935 }
936
David Howells13524ab2017-11-02 15:27:53 +0000937 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
938 page->index, priv);
David Howellsd383e342020-10-22 14:40:31 +0100939 ret = afs_store_data(mapping, page->index, page->index, t, f, true);
David Howells4343d002017-11-02 15:27:52 +0000940 }
941
David Howellsfa04a402020-10-21 13:22:19 +0100942 priv = (unsigned long)detach_page_private(page);
David Howells13524ab2017-11-02 15:27:53 +0000943 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
944 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000945
946#ifdef CONFIG_AFS_FSCACHE
947 if (PageFsCache(page)) {
948 fscache_wait_on_page_write(vnode->cache, page);
949 fscache_uncache_page(vnode->cache, page);
950 }
951#endif
952 return ret;
David Howells31143d52007-05-09 02:33:46 -0700953}