blob: 6e00bb843c7f50e2eb62815e4aae86c048c01302 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Hellwig199a31c2016-06-21 09:22:39 +10002#ifndef LINUX_IOMAP_H
3#define LINUX_IOMAP_H 1
4
Christoph Hellwig9dc55f12018-07-11 22:26:05 -07005#include <linux/atomic.h>
6#include <linux/bitmap.h>
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07007#include <linux/blk_types.h>
Christoph Hellwig9dc55f12018-07-11 22:26:05 -07008#include <linux/mm.h>
Christoph Hellwig199a31c2016-06-21 09:22:39 +10009#include <linux/types.h>
Souptick Joarder5780a022018-10-26 15:02:59 -070010#include <linux/mm_types.h>
Darrick J. Wongdb074432019-07-15 08:50:59 -070011#include <linux/blkdev.h>
Christoph Hellwig199a31c2016-06-21 09:22:39 +100012
Christoph Hellwig89eb1902018-06-01 09:03:08 -070013struct address_space;
Christoph Hellwig8be9f562016-06-21 09:38:45 +100014struct fiemap_extent_info;
Christoph Hellwigae259a92016-06-21 09:23:11 +100015struct inode;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -070016struct iomap_writepage_ctx;
Christoph Hellwigae259a92016-06-21 09:23:11 +100017struct iov_iter;
18struct kiocb;
Christoph Hellwig63899c62018-06-19 15:10:56 -070019struct page;
Christoph Hellwigae259a92016-06-21 09:23:11 +100020struct vm_area_struct;
21struct vm_fault;
22
23/*
24 * Types of block ranges for iomap mappings:
25 */
Christoph Hellwig199a31c2016-06-21 09:22:39 +100026#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
27#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -040028#define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */
29#define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */
Christoph Hellwig19319b52018-06-01 09:03:06 -070030#define IOMAP_INLINE 0x05 /* data inline in the inode */
Christoph Hellwig199a31c2016-06-21 09:22:39 +100031
Christoph Hellwigae259a92016-06-21 09:23:11 +100032/*
Christoph Hellwigd33fd772016-10-20 15:51:28 +110033 * Flags for all iomap mappings:
Linus Torvaldsa3841f92017-11-17 09:51:57 -080034 *
Jan Karacaa51d22017-11-01 16:36:42 +010035 * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
36 * written data and requires fdatasync to commit them to persistent storage.
Dave Chinner7684e2c2019-10-17 13:12:01 -070037 * This needs to take into account metadata changes that *may* be made at IO
38 * completion, such as file size updates from direct IO.
Christoph Hellwig17de0a92016-08-29 11:33:58 +100039 */
Bob Peterson39743202017-02-16 10:27:16 -050040#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
Christoph Hellwig7ee66c02018-06-01 09:03:07 -070041#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
Christoph Hellwigc03cea42018-06-19 15:10:58 -070042#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
Andreas Gruenbacher8d3e72a2019-06-27 17:28:40 -070043#define IOMAP_F_SIZE_CHANGED 0x08 /* file size has changed */
Christoph Hellwigd33fd772016-10-20 15:51:28 +110044
45/*
46 * Flags that only need to be reported for IOMAP_REPORT requests:
47 */
Andreas Gruenbacher9ca250a2017-10-01 17:56:54 -040048#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
49#define IOMAP_F_SHARED 0x20 /* block shared with another file */
Christoph Hellwig17de0a92016-08-29 11:33:58 +100050
51/*
Christoph Hellwig7ee66c02018-06-01 09:03:07 -070052 * Flags from 0x1000 up are for file system specific usage:
53 */
54#define IOMAP_F_PRIVATE 0x1000
55
56
57/*
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -040058 * Magic value for addr:
Christoph Hellwigae259a92016-06-21 09:23:11 +100059 */
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -040060#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
Christoph Hellwig199a31c2016-06-21 09:22:39 +100061
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -070062struct iomap_page_ops;
63
Christoph Hellwig199a31c2016-06-21 09:22:39 +100064struct iomap {
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -040065 u64 addr; /* disk offset of mapping, bytes */
Christoph Hellwigae259a92016-06-21 09:23:11 +100066 loff_t offset; /* file offset of mapping, bytes */
67 u64 length; /* length of mapping, bytes */
Christoph Hellwig17de0a92016-08-29 11:33:58 +100068 u16 type; /* type of mapping */
69 u16 flags; /* flags for mapping */
Christoph Hellwigae259a92016-06-21 09:23:11 +100070 struct block_device *bdev; /* block device for I/O */
Dan Williamsfa5d9322017-01-27 12:04:59 -080071 struct dax_device *dax_dev; /* dax_dev for dax operations */
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -070072 void *inline_data;
Andreas Gruenbachere184fde2018-06-19 15:10:57 -070073 void *private; /* filesystem private */
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -070074 const struct iomap_page_ops *page_ops;
75};
Christoph Hellwig63899c62018-06-19 15:10:56 -070076
Darrick J. Wongdb074432019-07-15 08:50:59 -070077static inline sector_t
78iomap_sector(struct iomap *iomap, loff_t pos)
79{
80 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
81}
82
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -070083/*
84 * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
85 * and page_done will be called for each page written to. This only applies to
86 * buffered writes as unbuffered writes will not typically have pages
87 * associated with them.
88 *
89 * When page_prepare succeeds, page_done will always be called to do any
90 * cleanup work necessary. In that page_done call, @page will be NULL if the
91 * associated page could not be obtained.
92 */
93struct iomap_page_ops {
94 int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
95 struct iomap *iomap);
Christoph Hellwig63899c62018-06-19 15:10:56 -070096 void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
97 struct page *page, struct iomap *iomap);
Christoph Hellwig199a31c2016-06-21 09:22:39 +100098};
99
Christoph Hellwigae259a92016-06-21 09:23:11 +1000100/*
101 * Flags for iomap_begin / iomap_end. No flag implies a read.
102 */
Christoph Hellwigd33fd772016-10-20 15:51:28 +1100103#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
104#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
105#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
Jan Kara9484ab12016-11-10 10:26:50 +1100106#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100107#define IOMAP_DIRECT (1 << 4) /* direct I/O */
Christoph Hellwig9ecac0e2018-06-01 09:03:07 -0700108#define IOMAP_NOWAIT (1 << 5) /* do not block */
Christoph Hellwigae259a92016-06-21 09:23:11 +1000109
110struct iomap_ops {
111 /*
112 * Return the existing mapping at pos, or reserve space starting at
113 * pos for up to length, as long as we can do it as a single mapping.
114 * The actual length is returned in iomap->length.
115 */
116 int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
117 unsigned flags, struct iomap *iomap);
118
119 /*
120 * Commit and/or unreserve space previous allocated using iomap_begin.
121 * Written indicates the length of the successful write operation which
122 * needs to be commited, while the rest needs to be unreserved.
123 * Written might be zero if no data was written.
124 */
125 int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
126 ssize_t written, unsigned flags, struct iomap *iomap);
127};
128
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700129/*
Darrick J. Wong5d9073072019-07-15 08:51:01 -0700130 * Main iomap iterator function.
131 */
132typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
133 void *data, struct iomap *iomap);
134
135loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
136 unsigned flags, const struct iomap_ops *ops, void *data,
137 iomap_actor_t actor);
138
139/*
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700140 * Structure allocate for each page when block size < PAGE_SIZE to track
141 * sub-page uptodate status and I/O completions.
142 */
143struct iomap_page {
144 atomic_t read_count;
145 atomic_t write_count;
146 DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
147};
148
149static inline struct iomap_page *to_iomap_page(struct page *page)
150{
151 if (page_has_private(page))
152 return (struct iomap_page *)page_private(page);
153 return NULL;
154}
155
Christoph Hellwigae259a92016-06-21 09:23:11 +1000156ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800157 const struct iomap_ops *ops);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700158int iomap_readpage(struct page *page, const struct iomap_ops *ops);
159int iomap_readpages(struct address_space *mapping, struct list_head *pages,
160 unsigned nr_pages, const struct iomap_ops *ops);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700161int iomap_set_page_dirty(struct page *page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700162int iomap_is_partially_uptodate(struct page *page, unsigned long from,
163 unsigned long count);
164int iomap_releasepage(struct page *page, gfp_t gfp_mask);
165void iomap_invalidatepage(struct page *page, unsigned int offset,
166 unsigned int len);
167#ifdef CONFIG_MIGRATION
168int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
169 struct page *page, enum migrate_mode mode);
170#else
171#define iomap_migrate_page NULL
172#endif
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000173int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800174 const struct iomap_ops *ops);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000175int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800176 bool *did_zero, const struct iomap_ops *ops);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000177int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800178 const struct iomap_ops *ops);
Souptick Joarder5780a022018-10-26 15:02:59 -0700179vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
180 const struct iomap_ops *ops);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000181int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800182 loff_t start, loff_t len, const struct iomap_ops *ops);
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700183loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
184 const struct iomap_ops *ops);
185loff_t iomap_seek_data(struct inode *inode, loff_t offset,
186 const struct iomap_ops *ops);
Christoph Hellwig89eb1902018-06-01 09:03:08 -0700187sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
188 const struct iomap_ops *ops);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000189
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100190/*
Christoph Hellwig598ecfb2019-10-17 13:12:15 -0700191 * Structure for writeback I/O completions.
192 */
193struct iomap_ioend {
194 struct list_head io_list; /* next ioend in chain */
195 u16 io_type;
196 u16 io_flags; /* IOMAP_F_* */
197 struct inode *io_inode; /* file being written to */
198 size_t io_size; /* size of the extent */
199 loff_t io_offset; /* offset in the file */
200 void *io_private; /* file system private data */
201 struct bio *io_bio; /* bio being built */
202 struct bio io_inline_bio; /* MUST BE LAST! */
203};
204
205struct iomap_writeback_ops {
206 /*
207 * Required, maps the blocks so that writeback can be performed on
208 * the range starting at offset.
209 */
210 int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
211 loff_t offset);
212
213 /*
214 * Optional, allows the file systems to perform actions just before
215 * submitting the bio and/or override the bio end_io handler for complex
216 * operations like copy on write extent manipulation or unwritten extent
217 * conversions.
218 */
219 int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
220
221 /*
222 * Optional, allows the file system to discard state on a page where
223 * we failed to submit any I/O.
224 */
225 void (*discard_page)(struct page *page);
226};
227
228struct iomap_writepage_ctx {
229 struct iomap iomap;
230 struct iomap_ioend *ioend;
231 const struct iomap_writeback_ops *ops;
232};
233
234void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
235void iomap_ioend_try_merge(struct iomap_ioend *ioend,
236 struct list_head *more_ioends,
237 void (*merge_private)(struct iomap_ioend *ioend,
238 struct iomap_ioend *next));
239void iomap_sort_ioends(struct list_head *ioend_list);
240int iomap_writepage(struct page *page, struct writeback_control *wbc,
241 struct iomap_writepage_ctx *wpc,
242 const struct iomap_writeback_ops *ops);
243int iomap_writepages(struct address_space *mapping,
244 struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
245 const struct iomap_writeback_ops *ops);
246
247/*
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100248 * Flags for direct I/O ->end_io:
249 */
250#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
251#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
Christoph Hellwig838c4f32019-09-19 15:32:45 -0700252
253struct iomap_dio_ops {
254 int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
255 unsigned flags);
256};
257
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100258ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
Jan Kara13ef9542019-10-15 08:43:42 -0700259 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
260 bool wait_for_completion);
Christoph Hellwig81214ba2018-12-04 11:12:08 -0700261int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100262
Darrick J. Wong674821292018-05-10 08:38:15 -0700263#ifdef CONFIG_SWAP
264struct file;
265struct swap_info_struct;
266
267int iomap_swapfile_activate(struct swap_info_struct *sis,
268 struct file *swap_file, sector_t *pagespan,
269 const struct iomap_ops *ops);
270#else
271# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
272#endif /* CONFIG_SWAP */
273
Christoph Hellwig199a31c2016-06-21 09:22:39 +1000274#endif /* LINUX_IOMAP_H */