blob: 02facb19a0f1d002cc7711d48e79c007806a199e [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells31143d52007-05-09 02:33:46 -07002/* handling of writes to regular files and writing back to the server
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells ([email protected])
David Howells31143d52007-05-09 02:33:46 -07006 */
David Howells4343d002017-11-02 15:27:52 +00007
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -07008#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -07009#include <linux/slab.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include "internal.h"
15
David Howells31143d52007-05-09 02:33:46 -070016/*
17 * mark a page as having been made dirty and thus needing writeback
18 */
19int afs_set_page_dirty(struct page *page)
20{
21 _enter("");
22 return __set_page_dirty_nobuffers(page);
23}
24
25/*
David Howells31143d52007-05-09 02:33:46 -070026 * partly or wholly fill a page that's under preparation for writing
27 */
28static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000029 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070030{
David Howells196ee9c2017-01-05 10:38:34 +000031 struct afs_read *req;
David Howells2a0b4f62018-10-20 00:57:57 +010032 size_t p;
33 void *data;
David Howells31143d52007-05-09 02:33:46 -070034 int ret;
35
Anton Blanchard5e7f2332011-06-13 22:31:12 +010036 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070037
David Howells2a0b4f62018-10-20 00:57:57 +010038 if (pos >= vnode->vfs_inode.i_size) {
39 p = pos & ~PAGE_MASK;
40 ASSERTCMP(p + len, <=, PAGE_SIZE);
41 data = kmap(page);
42 memset(data + p, 0, len);
43 kunmap(page);
44 return 0;
45 }
46
Zhengyuan Liuee102582019-06-20 18:12:17 +010047 req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
David Howells196ee9c2017-01-05 10:38:34 +000048 if (!req)
49 return -ENOMEM;
50
David Howellsf3ddee82018-04-06 14:17:25 +010051 refcount_set(&req->usage, 1);
David Howells196ee9c2017-01-05 10:38:34 +000052 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +000053 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +000054 req->nr_pages = 1;
David Howellsf3ddee82018-04-06 14:17:25 +010055 req->pages = req->array;
David Howells196ee9c2017-01-05 10:38:34 +000056 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +000057 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +000058
David Howellsd2ddc772017-11-02 15:27:50 +000059 ret = afs_fetch_data(vnode, key, req);
David Howells196ee9c2017-01-05 10:38:34 +000060 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -070061 if (ret < 0) {
62 if (ret == -ENOENT) {
63 _debug("got NOENT from server"
64 " - marking file deleted and stale");
65 set_bit(AFS_VNODE_DELETED, &vnode->flags);
66 ret = -ESTALE;
67 }
68 }
69
70 _leave(" = %d", ret);
71 return ret;
72}
73
74/*
David Howells31143d52007-05-09 02:33:46 -070075 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070076 */
Nick Piggin15b46502008-10-15 22:04:32 -070077int afs_write_begin(struct file *file, struct address_space *mapping,
78 loff_t pos, unsigned len, unsigned flags,
79 struct page **pagep, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070080{
Al Viro496ad9a2013-01-23 17:07:38 -050081 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -070082 struct page *page;
David Howells215804a2017-11-02 15:27:52 +000083 struct key *key = afs_file_key(file);
David Howells4343d002017-11-02 15:27:52 +000084 unsigned long priv;
85 unsigned f, from = pos & (PAGE_SIZE - 1);
86 unsigned t, to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030087 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -070088 int ret;
89
David Howells3b6492d2018-10-20 00:57:57 +010090 _enter("{%llx:%llu},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -070091 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -070092
David Howells4343d002017-11-02 15:27:52 +000093 /* We want to store information about how much of a page is altered in
94 * page->private.
95 */
96 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
David Howells31143d52007-05-09 02:33:46 -070097
Nick Piggin54566b22009-01-04 12:00:53 -080098 page = grab_cache_page_write_begin(mapping, index, flags);
David Howells4343d002017-11-02 15:27:52 +000099 if (!page)
Nick Piggin15b46502008-10-15 22:04:32 -0700100 return -ENOMEM;
Nick Piggin15b46502008-10-15 22:04:32 -0700101
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300102 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +0000103 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -0700104 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +0000105 unlock_page(page);
106 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700107 _leave(" = %d [prep]", ret);
108 return ret;
109 }
Nick Piggin15b46502008-10-15 22:04:32 -0700110 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700111 }
112
David Howells6d06b0d2017-03-16 16:27:48 +0000113 /* page won't leak in error case: it eventually gets cleaned off LRU */
114 *pagep = page;
115
David Howells31143d52007-05-09 02:33:46 -0700116try_again:
David Howells4343d002017-11-02 15:27:52 +0000117 /* See if this page is already partially written in a way that we can
118 * merge the new write with.
119 */
120 t = f = 0;
121 if (PagePrivate(page)) {
122 priv = page_private(page);
123 f = priv & AFS_PRIV_MAX;
124 t = priv >> AFS_PRIV_SHIFT;
125 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -0700126 }
127
David Howells4343d002017-11-02 15:27:52 +0000128 if (f != t) {
David Howells5a039c32017-11-18 00:13:30 +0000129 if (PageWriteback(page)) {
130 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
131 page->index, priv);
132 goto flush_conflicting_write;
133 }
David Howells5a813272018-04-06 14:17:26 +0100134 /* If the file is being filled locally, allow inter-write
135 * spaces to be merged into writes. If it's not, only write
136 * back what the user gives us.
137 */
138 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
139 (to < f || from > t))
David Howells4343d002017-11-02 15:27:52 +0000140 goto flush_conflicting_write;
141 if (from < f)
142 f = from;
143 if (to > t)
144 t = to;
145 } else {
146 f = from;
147 t = to;
David Howells31143d52007-05-09 02:33:46 -0700148 }
149
David Howells4343d002017-11-02 15:27:52 +0000150 priv = (unsigned long)t << AFS_PRIV_SHIFT;
151 priv |= f;
David Howells13524ab2017-11-02 15:27:53 +0000152 trace_afs_page_dirty(vnode, tracepoint_string("begin"),
153 page->index, priv);
David Howellsfa04a402020-10-21 13:22:19 +0100154 if (PagePrivate(page))
155 set_page_private(page, priv);
156 else
157 attach_page_private(page, (void *)priv);
David Howells4343d002017-11-02 15:27:52 +0000158 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -0700159 return 0;
160
David Howells4343d002017-11-02 15:27:52 +0000161 /* The previous write and this write aren't adjacent or overlapping, so
162 * flush the page out.
163 */
164flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700165 _debug("flush conflict");
David Howells4343d002017-11-02 15:27:52 +0000166 ret = write_one_page(page);
167 if (ret < 0) {
168 _leave(" = %d", ret);
169 return ret;
David Howells31143d52007-05-09 02:33:46 -0700170 }
171
David Howells4343d002017-11-02 15:27:52 +0000172 ret = lock_page_killable(page);
173 if (ret < 0) {
174 _leave(" = %d", ret);
175 return ret;
176 }
David Howells31143d52007-05-09 02:33:46 -0700177 goto try_again;
178}
179
180/*
181 * finalise part of a write to a page
182 */
Nick Piggin15b46502008-10-15 22:04:32 -0700183int afs_write_end(struct file *file, struct address_space *mapping,
184 loff_t pos, unsigned len, unsigned copied,
185 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700186{
Al Viro496ad9a2013-01-23 17:07:38 -0500187 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells215804a2017-11-02 15:27:52 +0000188 struct key *key = afs_file_key(file);
David Howells31143d52007-05-09 02:33:46 -0700189 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000190 int ret;
David Howells31143d52007-05-09 02:33:46 -0700191
David Howells3b6492d2018-10-20 00:57:57 +0100192 _enter("{%llx:%llu},{%lx}",
Nick Piggin15b46502008-10-15 22:04:32 -0700193 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700194
Nick Piggin15b46502008-10-15 22:04:32 -0700195 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700196
197 i_size = i_size_read(&vnode->vfs_inode);
198 if (maybe_i_size > i_size) {
David Howells1f32ef72020-06-12 23:58:51 +0100199 write_seqlock(&vnode->cb_lock);
David Howells31143d52007-05-09 02:33:46 -0700200 i_size = i_size_read(&vnode->vfs_inode);
201 if (maybe_i_size > i_size)
202 i_size_write(&vnode->vfs_inode, maybe_i_size);
David Howells1f32ef72020-06-12 23:58:51 +0100203 write_sequnlock(&vnode->cb_lock);
David Howells31143d52007-05-09 02:33:46 -0700204 }
205
David Howellse8e581a2017-03-16 16:27:44 +0000206 if (!PageUptodate(page)) {
207 if (copied < len) {
208 /* Try and load any missing data from the server. The
209 * unmarshalling routine will take care of clearing any
210 * bits that are beyond the EOF.
211 */
212 ret = afs_fill_page(vnode, key, pos + copied,
213 len - copied, page);
214 if (ret < 0)
David Howellsafae4572018-01-02 10:02:19 +0000215 goto out;
David Howellse8e581a2017-03-16 16:27:44 +0000216 }
217 SetPageUptodate(page);
218 }
219
David Howells31143d52007-05-09 02:33:46 -0700220 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700221 if (PageDirty(page))
222 _debug("dirtied");
David Howellsafae4572018-01-02 10:02:19 +0000223 ret = copied;
224
225out:
Nick Piggin15b46502008-10-15 22:04:32 -0700226 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300227 put_page(page);
David Howellsafae4572018-01-02 10:02:19 +0000228 return ret;
David Howells31143d52007-05-09 02:33:46 -0700229}
230
231/*
232 * kill all the pages in the given range
233 */
David Howells4343d002017-11-02 15:27:52 +0000234static void afs_kill_pages(struct address_space *mapping,
David Howells31143d52007-05-09 02:33:46 -0700235 pgoff_t first, pgoff_t last)
236{
David Howells4343d002017-11-02 15:27:52 +0000237 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700238 struct pagevec pv;
239 unsigned count, loop;
240
David Howells3b6492d2018-10-20 00:57:57 +0100241 _enter("{%llx:%llu},%lx-%lx",
David Howells31143d52007-05-09 02:33:46 -0700242 vnode->fid.vid, vnode->fid.vnode, first, last);
243
Mel Gorman86679822017-11-15 17:37:52 -0800244 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700245
246 do {
247 _debug("kill %lx-%lx", first, last);
248
249 count = last - first + 1;
250 if (count > PAGEVEC_SIZE)
251 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000252 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700253 ASSERTCMP(pv.nr, ==, count);
254
255 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000256 struct page *page = pv.pages[loop];
257 ClearPageUptodate(page);
David Howells4343d002017-11-02 15:27:52 +0000258 SetPageError(page);
259 end_page_writeback(page);
David Howells7286a352017-03-16 16:27:48 +0000260 if (page->index >= first)
261 first = page->index + 1;
David Howells4343d002017-11-02 15:27:52 +0000262 lock_page(page);
263 generic_error_remove_page(mapping, page);
Marc Dionne21bd68f2019-04-13 08:37:37 +0100264 unlock_page(page);
David Howells31143d52007-05-09 02:33:46 -0700265 }
266
267 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000268 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700269
270 _leave("");
271}
272
273/*
David Howells4343d002017-11-02 15:27:52 +0000274 * Redirty all the pages in a given range.
David Howells31143d52007-05-09 02:33:46 -0700275 */
David Howells4343d002017-11-02 15:27:52 +0000276static void afs_redirty_pages(struct writeback_control *wbc,
277 struct address_space *mapping,
278 pgoff_t first, pgoff_t last)
David Howells31143d52007-05-09 02:33:46 -0700279{
David Howells4343d002017-11-02 15:27:52 +0000280 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
281 struct pagevec pv;
282 unsigned count, loop;
283
David Howells3b6492d2018-10-20 00:57:57 +0100284 _enter("{%llx:%llu},%lx-%lx",
David Howells4343d002017-11-02 15:27:52 +0000285 vnode->fid.vid, vnode->fid.vnode, first, last);
286
Linus Torvalds487e2c92017-11-16 11:41:22 -0800287 pagevec_init(&pv);
David Howells4343d002017-11-02 15:27:52 +0000288
289 do {
290 _debug("redirty %lx-%lx", first, last);
291
292 count = last - first + 1;
293 if (count > PAGEVEC_SIZE)
294 count = PAGEVEC_SIZE;
295 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
296 ASSERTCMP(pv.nr, ==, count);
297
298 for (loop = 0; loop < count; loop++) {
299 struct page *page = pv.pages[loop];
300
301 redirty_page_for_writepage(wbc, page);
302 end_page_writeback(page);
David Howells31143d52007-05-09 02:33:46 -0700303 if (page->index >= first)
304 first = page->index + 1;
305 }
306
307 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000308 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700309
310 _leave("");
311}
312
313/*
David Howellsa58823a2019-05-09 15:16:10 +0100314 * completion of write to server
315 */
316static void afs_pages_written_back(struct afs_vnode *vnode,
317 pgoff_t first, pgoff_t last)
318{
319 struct pagevec pv;
320 unsigned long priv;
321 unsigned count, loop;
322
323 _enter("{%llx:%llu},{%lx-%lx}",
324 vnode->fid.vid, vnode->fid.vnode, first, last);
325
326 pagevec_init(&pv);
327
328 do {
329 _debug("done %lx-%lx", first, last);
330
331 count = last - first + 1;
332 if (count > PAGEVEC_SIZE)
333 count = PAGEVEC_SIZE;
334 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
335 first, count, pv.pages);
336 ASSERTCMP(pv.nr, ==, count);
337
338 for (loop = 0; loop < count; loop++) {
David Howellsfa04a402020-10-21 13:22:19 +0100339 priv = (unsigned long)detach_page_private(pv.pages[loop]);
David Howellsa58823a2019-05-09 15:16:10 +0100340 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
341 pv.pages[loop]->index, priv);
David Howellsa58823a2019-05-09 15:16:10 +0100342 end_page_writeback(pv.pages[loop]);
343 }
344 first += count;
345 __pagevec_release(&pv);
346 } while (first <= last);
347
348 afs_prune_wb_keys(vnode);
349 _leave("");
350}
351
352/*
David Howellse49c7b22020-04-10 20:51:51 +0100353 * Find a key to use for the writeback. We cached the keys used to author the
354 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
355 * and we need to start from there if it's set.
356 */
357static int afs_get_writeback_key(struct afs_vnode *vnode,
358 struct afs_wb_key **_wbk)
359{
360 struct afs_wb_key *wbk = NULL;
361 struct list_head *p;
362 int ret = -ENOKEY, ret2;
363
364 spin_lock(&vnode->wb_lock);
365 if (*_wbk)
366 p = (*_wbk)->vnode_link.next;
367 else
368 p = vnode->wb_keys.next;
369
370 while (p != &vnode->wb_keys) {
371 wbk = list_entry(p, struct afs_wb_key, vnode_link);
372 _debug("wbk %u", key_serial(wbk->key));
373 ret2 = key_validate(wbk->key);
374 if (ret2 == 0) {
375 refcount_inc(&wbk->usage);
376 _debug("USE WB KEY %u", key_serial(wbk->key));
377 break;
378 }
379
380 wbk = NULL;
381 if (ret == -ENOKEY)
382 ret = ret2;
383 p = p->next;
384 }
385
386 spin_unlock(&vnode->wb_lock);
387 if (*_wbk)
388 afs_put_wb_key(*_wbk);
389 *_wbk = wbk;
390 return 0;
391}
392
393static void afs_store_data_success(struct afs_operation *op)
394{
395 struct afs_vnode *vnode = op->file[0].vnode;
396
David Howellsda8d0752020-06-13 19:34:59 +0100397 op->ctime = op->file[0].scb.status.mtime_client;
David Howellse49c7b22020-04-10 20:51:51 +0100398 afs_vnode_commit_status(op, &op->file[0]);
399 if (op->error == 0) {
David Howellsd383e342020-10-22 14:40:31 +0100400 if (!op->store.laundering)
401 afs_pages_written_back(vnode, op->store.first, op->store.last);
David Howellse49c7b22020-04-10 20:51:51 +0100402 afs_stat_v(vnode, n_stores);
403 atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
404 (op->store.first * PAGE_SIZE + op->store.first_offset),
405 &afs_v2net(vnode)->n_store_bytes);
406 }
407}
408
409static const struct afs_operation_ops afs_store_data_operation = {
410 .issue_afs_rpc = afs_fs_store_data,
411 .issue_yfs_rpc = yfs_fs_store_data,
412 .success = afs_store_data_success,
413};
414
415/*
David Howellsd2ddc772017-11-02 15:27:50 +0000416 * write to a file
417 */
David Howells4343d002017-11-02 15:27:52 +0000418static int afs_store_data(struct address_space *mapping,
419 pgoff_t first, pgoff_t last,
David Howellsd383e342020-10-22 14:40:31 +0100420 unsigned offset, unsigned to, bool laundering)
David Howellsd2ddc772017-11-02 15:27:50 +0000421{
David Howells4343d002017-11-02 15:27:52 +0000422 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellse49c7b22020-04-10 20:51:51 +0100423 struct afs_operation *op;
David Howells4343d002017-11-02 15:27:52 +0000424 struct afs_wb_key *wbk = NULL;
David Howellse49c7b22020-04-10 20:51:51 +0100425 int ret;
David Howellsd2ddc772017-11-02 15:27:50 +0000426
David Howells3b6492d2018-10-20 00:57:57 +0100427 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
David Howellsd2ddc772017-11-02 15:27:50 +0000428 vnode->volume->name,
429 vnode->fid.vid,
430 vnode->fid.vnode,
431 vnode->fid.unique,
David Howellsd2ddc772017-11-02 15:27:50 +0000432 first, last, offset, to);
433
David Howellse49c7b22020-04-10 20:51:51 +0100434 ret = afs_get_writeback_key(vnode, &wbk);
435 if (ret) {
436 _leave(" = %d [no keys]", ret);
437 return ret;
438 }
439
440 op = afs_alloc_operation(wbk->key, vnode->volume);
441 if (IS_ERR(op)) {
442 afs_put_wb_key(wbk);
David Howellsa58823a2019-05-09 15:16:10 +0100443 return -ENOMEM;
David Howellse49c7b22020-04-10 20:51:51 +0100444 }
David Howellsa58823a2019-05-09 15:16:10 +0100445
David Howellse49c7b22020-04-10 20:51:51 +0100446 afs_op_set_vnode(op, 0, vnode);
447 op->file[0].dv_delta = 1;
448 op->store.mapping = mapping;
449 op->store.first = first;
450 op->store.last = last;
451 op->store.first_offset = offset;
452 op->store.last_to = to;
David Howellsd383e342020-10-22 14:40:31 +0100453 op->store.laundering = laundering;
David Howellsb3597942020-06-11 21:50:24 +0100454 op->mtime = vnode->vfs_inode.i_mtime;
David Howells811f04b2020-07-08 09:27:07 +0100455 op->flags |= AFS_OPERATION_UNINTR;
David Howellse49c7b22020-04-10 20:51:51 +0100456 op->ops = &afs_store_data_operation;
David Howells4343d002017-11-02 15:27:52 +0000457
David Howells4343d002017-11-02 15:27:52 +0000458try_next_key:
David Howellse49c7b22020-04-10 20:51:51 +0100459 afs_begin_vnode_operation(op);
460 afs_wait_for_operation(op);
David Howells4343d002017-11-02 15:27:52 +0000461
David Howellse49c7b22020-04-10 20:51:51 +0100462 switch (op->error) {
David Howells4343d002017-11-02 15:27:52 +0000463 case -EACCES:
464 case -EPERM:
465 case -ENOKEY:
466 case -EKEYEXPIRED:
467 case -EKEYREJECTED:
468 case -EKEYREVOKED:
469 _debug("next");
David Howellse49c7b22020-04-10 20:51:51 +0100470
471 ret = afs_get_writeback_key(vnode, &wbk);
472 if (ret == 0) {
473 key_put(op->key);
474 op->key = key_get(wbk->key);
475 goto try_next_key;
476 }
477 break;
David Howells4343d002017-11-02 15:27:52 +0000478 }
479
480 afs_put_wb_key(wbk);
David Howellse49c7b22020-04-10 20:51:51 +0100481 _leave(" = %d", op->error);
482 return afs_put_operation(op);
David Howellsd2ddc772017-11-02 15:27:50 +0000483}
484
485/*
David Howells4343d002017-11-02 15:27:52 +0000486 * Synchronously write back the locked page and any subsequent non-locked dirty
487 * pages.
David Howells31143d52007-05-09 02:33:46 -0700488 */
David Howells4343d002017-11-02 15:27:52 +0000489static int afs_write_back_from_locked_page(struct address_space *mapping,
490 struct writeback_control *wbc,
491 struct page *primary_page,
492 pgoff_t final_page)
David Howells31143d52007-05-09 02:33:46 -0700493{
David Howells13524ab2017-11-02 15:27:53 +0000494 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700495 struct page *pages[8], *page;
David Howells4343d002017-11-02 15:27:52 +0000496 unsigned long count, priv;
497 unsigned n, offset, to, f, t;
David Howells31143d52007-05-09 02:33:46 -0700498 pgoff_t start, first, last;
David Howells793fe822020-06-12 16:13:52 +0100499 loff_t i_size, end;
David Howells31143d52007-05-09 02:33:46 -0700500 int loop, ret;
501
502 _enter(",%lx", primary_page->index);
503
504 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700505 if (test_set_page_writeback(primary_page))
506 BUG();
507
David Howells4343d002017-11-02 15:27:52 +0000508 /* Find all consecutive lockable dirty pages that have contiguous
509 * written regions, stopping when we find a page that is not
510 * immediately lockable, is not dirty or is missing, or we reach the
511 * end of the range.
512 */
David Howells31143d52007-05-09 02:33:46 -0700513 start = primary_page->index;
David Howells4343d002017-11-02 15:27:52 +0000514 priv = page_private(primary_page);
515 offset = priv & AFS_PRIV_MAX;
516 to = priv >> AFS_PRIV_SHIFT;
David Howells13524ab2017-11-02 15:27:53 +0000517 trace_afs_page_dirty(vnode, tracepoint_string("store"),
518 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000519
520 WARN_ON(offset == to);
David Howells13524ab2017-11-02 15:27:53 +0000521 if (offset == to)
522 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
523 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000524
David Howells5a813272018-04-06 14:17:26 +0100525 if (start >= final_page ||
526 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
David Howells31143d52007-05-09 02:33:46 -0700527 goto no_more;
David Howells4343d002017-11-02 15:27:52 +0000528
David Howells31143d52007-05-09 02:33:46 -0700529 start++;
530 do {
531 _debug("more %lx [%lx]", start, count);
David Howells4343d002017-11-02 15:27:52 +0000532 n = final_page - start + 1;
David Howells31143d52007-05-09 02:33:46 -0700533 if (n > ARRAY_SIZE(pages))
534 n = ARRAY_SIZE(pages);
David Howells4343d002017-11-02 15:27:52 +0000535 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
David Howells31143d52007-05-09 02:33:46 -0700536 _debug("fgpc %u", n);
537 if (n == 0)
538 goto no_more;
539 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700540 do {
541 put_page(pages[--n]);
542 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700543 goto no_more;
544 }
545
546 for (loop = 0; loop < n; loop++) {
547 page = pages[loop];
David Howells5a813272018-04-06 14:17:26 +0100548 if (to != PAGE_SIZE &&
549 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
550 break;
David Howells4343d002017-11-02 15:27:52 +0000551 if (page->index > final_page)
David Howells31143d52007-05-09 02:33:46 -0700552 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200553 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700554 break;
David Howells4343d002017-11-02 15:27:52 +0000555 if (!PageDirty(page) || PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700556 unlock_page(page);
557 break;
558 }
David Howells4343d002017-11-02 15:27:52 +0000559
560 priv = page_private(page);
561 f = priv & AFS_PRIV_MAX;
562 t = priv >> AFS_PRIV_SHIFT;
David Howells5a813272018-04-06 14:17:26 +0100563 if (f != 0 &&
564 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
David Howells4343d002017-11-02 15:27:52 +0000565 unlock_page(page);
566 break;
567 }
568 to = t;
569
David Howells13524ab2017-11-02 15:27:53 +0000570 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
571 page->index, priv);
572
David Howells31143d52007-05-09 02:33:46 -0700573 if (!clear_page_dirty_for_io(page))
574 BUG();
575 if (test_set_page_writeback(page))
576 BUG();
577 unlock_page(page);
578 put_page(page);
579 }
580 count += loop;
581 if (loop < n) {
582 for (; loop < n; loop++)
583 put_page(pages[loop]);
584 goto no_more;
585 }
586
587 start += loop;
David Howells4343d002017-11-02 15:27:52 +0000588 } while (start <= final_page && count < 65536);
David Howells31143d52007-05-09 02:33:46 -0700589
590no_more:
David Howells4343d002017-11-02 15:27:52 +0000591 /* We now have a contiguous set of dirty pages, each with writeback
592 * set; the first page is still locked at this point, but all the rest
593 * have been unlocked.
594 */
595 unlock_page(primary_page);
596
David Howells31143d52007-05-09 02:33:46 -0700597 first = primary_page->index;
598 last = first + count - 1;
599
David Howells793fe822020-06-12 16:13:52 +0100600 end = (loff_t)last * PAGE_SIZE + to;
601 i_size = i_size_read(&vnode->vfs_inode);
602
David Howells31143d52007-05-09 02:33:46 -0700603 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
David Howells793fe822020-06-12 16:13:52 +0100604 if (end > i_size)
605 to = i_size & ~PAGE_MASK;
David Howells31143d52007-05-09 02:33:46 -0700606
David Howellsd383e342020-10-22 14:40:31 +0100607 ret = afs_store_data(mapping, first, last, offset, to, false);
David Howells4343d002017-11-02 15:27:52 +0000608 switch (ret) {
609 case 0:
David Howells31143d52007-05-09 02:33:46 -0700610 ret = count;
David Howells4343d002017-11-02 15:27:52 +0000611 break;
612
613 default:
614 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500615 fallthrough;
David Howells4343d002017-11-02 15:27:52 +0000616 case -EACCES:
617 case -EPERM:
618 case -ENOKEY:
619 case -EKEYEXPIRED:
620 case -EKEYREJECTED:
621 case -EKEYREVOKED:
622 afs_redirty_pages(wbc, mapping, first, last);
623 mapping_set_error(mapping, ret);
624 break;
625
626 case -EDQUOT:
627 case -ENOSPC:
628 afs_redirty_pages(wbc, mapping, first, last);
629 mapping_set_error(mapping, -ENOSPC);
630 break;
631
632 case -EROFS:
633 case -EIO:
634 case -EREMOTEIO:
635 case -EFBIG:
636 case -ENOENT:
637 case -ENOMEDIUM:
638 case -ENXIO:
David Howellsf51375c2018-10-20 00:57:57 +0100639 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
David Howells4343d002017-11-02 15:27:52 +0000640 afs_kill_pages(mapping, first, last);
641 mapping_set_error(mapping, ret);
642 break;
David Howells31143d52007-05-09 02:33:46 -0700643 }
644
645 _leave(" = %d", ret);
646 return ret;
647}
648
649/*
650 * write a page back to the server
651 * - the caller locked the page for us
652 */
653int afs_writepage(struct page *page, struct writeback_control *wbc)
654{
David Howells31143d52007-05-09 02:33:46 -0700655 int ret;
656
657 _enter("{%lx},", page->index);
658
David Howells4343d002017-11-02 15:27:52 +0000659 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
660 wbc->range_end >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700661 if (ret < 0) {
662 _leave(" = %d", ret);
663 return 0;
664 }
665
666 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700667
668 _leave(" = 0");
669 return 0;
670}
671
672/*
673 * write a region of pages back to the server
674 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700675static int afs_writepages_region(struct address_space *mapping,
676 struct writeback_control *wbc,
677 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700678{
David Howells31143d52007-05-09 02:33:46 -0700679 struct page *page;
680 int ret, n;
681
682 _enter(",,%lx,%lx,", index, end);
683
684 do {
Jan Karaaef6e412017-11-15 17:35:23 -0800685 n = find_get_pages_range_tag(mapping, &index, end,
686 PAGECACHE_TAG_DIRTY, 1, &page);
David Howells31143d52007-05-09 02:33:46 -0700687 if (!n)
688 break;
689
690 _debug("wback %lx", page->index);
691
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700692 /*
693 * at this point we hold neither the i_pages lock nor the
694 * page lock: the page may be truncated or invalidated
695 * (changing page->mapping to NULL), or even swizzled
696 * back from swapper_space to tmpfs file mapping
David Howells31143d52007-05-09 02:33:46 -0700697 */
David Howells4343d002017-11-02 15:27:52 +0000698 ret = lock_page_killable(page);
699 if (ret < 0) {
700 put_page(page);
701 _leave(" = %d", ret);
702 return ret;
703 }
David Howells31143d52007-05-09 02:33:46 -0700704
David Howellsc5051c72017-03-16 16:27:49 +0000705 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700706 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300707 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700708 continue;
709 }
710
David Howellsc5051c72017-03-16 16:27:49 +0000711 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700712 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000713 if (wbc->sync_mode != WB_SYNC_NONE)
714 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000715 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700716 continue;
717 }
718
David Howells65a15102017-03-16 16:27:49 +0000719 if (!clear_page_dirty_for_io(page))
720 BUG();
David Howells4343d002017-11-02 15:27:52 +0000721 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300722 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700723 if (ret < 0) {
724 _leave(" = %d", ret);
725 return ret;
726 }
727
728 wbc->nr_to_write -= ret;
729
David Howells31143d52007-05-09 02:33:46 -0700730 cond_resched();
731 } while (index < end && wbc->nr_to_write > 0);
732
733 *_next = index;
734 _leave(" = 0 [%lx]", *_next);
735 return 0;
736}
737
738/*
739 * write some of the pending data back to the server
740 */
741int afs_writepages(struct address_space *mapping,
742 struct writeback_control *wbc)
743{
David Howellsec0fa0b2020-10-07 14:22:12 +0100744 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700745 pgoff_t start, end, next;
746 int ret;
747
748 _enter("");
749
David Howellsec0fa0b2020-10-07 14:22:12 +0100750 /* We have to be careful as we can end up racing with setattr()
751 * truncating the pagecache since the caller doesn't take a lock here
752 * to prevent it.
753 */
754 if (wbc->sync_mode == WB_SYNC_ALL)
755 down_read(&vnode->validate_lock);
756 else if (!down_read_trylock(&vnode->validate_lock))
757 return 0;
758
David Howells31143d52007-05-09 02:33:46 -0700759 if (wbc->range_cyclic) {
760 start = mapping->writeback_index;
761 end = -1;
762 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700763 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700764 ret = afs_writepages_region(mapping, wbc, 0, start,
765 &next);
766 mapping->writeback_index = next;
767 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300768 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700769 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
770 if (wbc->nr_to_write > 0)
771 mapping->writeback_index = next;
772 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300773 start = wbc->range_start >> PAGE_SHIFT;
774 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700775 ret = afs_writepages_region(mapping, wbc, start, end, &next);
776 }
777
David Howellsec0fa0b2020-10-07 14:22:12 +0100778 up_read(&vnode->validate_lock);
David Howells31143d52007-05-09 02:33:46 -0700779 _leave(" = %d", ret);
780 return ret;
781}
782
783/*
David Howells31143d52007-05-09 02:33:46 -0700784 * write to an AFS file
785 */
Al Viro50b55512014-04-03 14:13:46 -0400786ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700787{
Al Viro496ad9a2013-01-23 17:07:38 -0500788 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700789 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400790 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700791
David Howells3b6492d2018-10-20 00:57:57 +0100792 _enter("{%llx:%llu},{%zu},",
Al Viro50b55512014-04-03 14:13:46 -0400793 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700794
795 if (IS_SWAPFILE(&vnode->vfs_inode)) {
796 printk(KERN_INFO
797 "AFS: Attempt to write to active swap file!\n");
798 return -EBUSY;
799 }
800
801 if (!count)
802 return 0;
803
Al Viro50b55512014-04-03 14:13:46 -0400804 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700805
David Howells31143d52007-05-09 02:33:46 -0700806 _leave(" = %zd", result);
807 return result;
808}
809
810/*
David Howells31143d52007-05-09 02:33:46 -0700811 * flush any dirty pages for this process, and check for write errors.
812 * - the return status from this call provides a reliable indication of
813 * whether any write errors occurred for this process.
814 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400815int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700816{
Al Viro3c981bf2013-09-03 13:37:45 -0400817 struct inode *inode = file_inode(file);
Al Viro3c981bf2013-09-03 13:37:45 -0400818 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700819
David Howells3b6492d2018-10-20 00:57:57 +0100820 _enter("{%llx:%llu},{n=%pD},%d",
Al Viro3c981bf2013-09-03 13:37:45 -0400821 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700822 datasync);
823
David Howells4343d002017-11-02 15:27:52 +0000824 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700825}
David Howells9b3f26c2009-04-03 16:42:41 +0100826
827/*
828 * notification that a previously read-only page is about to become writable
829 * - if it returns an error, the caller will deliver a bus error signal
830 */
Souptick Joarder0722f182018-08-23 17:00:48 -0700831vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100832{
David Howells1cf7a152017-11-02 15:27:52 +0000833 struct file *file = vmf->vma->vm_file;
834 struct inode *inode = file_inode(file);
835 struct afs_vnode *vnode = AFS_FS_I(inode);
836 unsigned long priv;
David Howells9b3f26c2009-04-03 16:42:41 +0100837
David Howells3b6492d2018-10-20 00:57:57 +0100838 _enter("{{%llx:%llu}},{%lx}",
David Howells1cf7a152017-11-02 15:27:52 +0000839 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
David Howells9b3f26c2009-04-03 16:42:41 +0100840
David Howells1cf7a152017-11-02 15:27:52 +0000841 sb_start_pagefault(inode->i_sb);
842
843 /* Wait for the page to be written to the cache before we allow it to
844 * be modified. We then assume the entire page will need writing back.
845 */
David Howells9b3f26c2009-04-03 16:42:41 +0100846#ifdef CONFIG_AFS_FSCACHE
David Howells1cf7a152017-11-02 15:27:52 +0000847 fscache_wait_on_page_write(vnode->cache, vmf->page);
David Howells9b3f26c2009-04-03 16:42:41 +0100848#endif
849
David Howells1cf7a152017-11-02 15:27:52 +0000850 if (PageWriteback(vmf->page) &&
851 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
852 return VM_FAULT_RETRY;
853
854 if (lock_page_killable(vmf->page) < 0)
855 return VM_FAULT_RETRY;
856
857 /* We mustn't change page->private until writeback is complete as that
858 * details the portion of the page we need to write back and we might
859 * need to redirty the page if there's a problem.
860 */
861 wait_on_page_writeback(vmf->page);
862
863 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
864 priv |= 0; /* From */
David Howells13524ab2017-11-02 15:27:53 +0000865 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
866 vmf->page->index, priv);
David Howellsfa04a402020-10-21 13:22:19 +0100867 if (PagePrivate(vmf->page))
868 set_page_private(vmf->page, priv);
869 else
870 attach_page_private(vmf->page, (void *)priv);
David Howellsbb413482020-06-12 00:15:13 +0100871 file_update_time(file);
David Howells1cf7a152017-11-02 15:27:52 +0000872
873 sb_end_pagefault(inode->i_sb);
874 return VM_FAULT_LOCKED;
David Howells9b3f26c2009-04-03 16:42:41 +0100875}
David Howells4343d002017-11-02 15:27:52 +0000876
877/*
878 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
879 */
880void afs_prune_wb_keys(struct afs_vnode *vnode)
881{
882 LIST_HEAD(graveyard);
883 struct afs_wb_key *wbk, *tmp;
884
885 /* Discard unused keys */
886 spin_lock(&vnode->wb_lock);
887
888 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
889 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
890 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
891 if (refcount_read(&wbk->usage) == 1)
892 list_move(&wbk->vnode_link, &graveyard);
893 }
894 }
895
896 spin_unlock(&vnode->wb_lock);
897
898 while (!list_empty(&graveyard)) {
899 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
900 list_del(&wbk->vnode_link);
901 afs_put_wb_key(wbk);
902 }
903}
904
905/*
906 * Clean up a page during invalidation.
907 */
908int afs_launder_page(struct page *page)
909{
910 struct address_space *mapping = page->mapping;
911 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
912 unsigned long priv;
913 unsigned int f, t;
914 int ret = 0;
915
916 _enter("{%lx}", page->index);
917
918 priv = page_private(page);
919 if (clear_page_dirty_for_io(page)) {
920 f = 0;
921 t = PAGE_SIZE;
922 if (PagePrivate(page)) {
923 f = priv & AFS_PRIV_MAX;
924 t = priv >> AFS_PRIV_SHIFT;
925 }
926
David Howells13524ab2017-11-02 15:27:53 +0000927 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
928 page->index, priv);
David Howellsd383e342020-10-22 14:40:31 +0100929 ret = afs_store_data(mapping, page->index, page->index, t, f, true);
David Howells4343d002017-11-02 15:27:52 +0000930 }
931
David Howellsfa04a402020-10-21 13:22:19 +0100932 priv = (unsigned long)detach_page_private(page);
David Howells13524ab2017-11-02 15:27:53 +0000933 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
934 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000935
936#ifdef CONFIG_AFS_FSCACHE
937 if (PageFsCache(page)) {
938 fscache_wait_on_page_write(vnode->cache, page);
939 fscache_uncache_page(vnode->cache, page);
940 }
941#endif
942 return ret;
David Howells31143d52007-05-09 02:33:46 -0700943}