blob: 358d2817741b7b089fc2ac32d92456fecff43fbf [file] [log] [blame]
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -07001/******************************************************************************
2 * grant_table.h
3 *
4 * Two sets of functionality:
5 * 1. Granting foreign access to our memory reservation.
6 * 2. Accessing others' memory reservations via grant references.
7 * (i.e., mechanisms for both sender and recipient of grant references)
8 *
9 * Copyright (c) 2004-2005, K A Fraser
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#ifndef __ASM_GNTTAB_H__
38#define __ASM_GNTTAB_H__
39
Ian Campbellf0774532009-02-09 12:05:49 -080040#include <asm/page.h>
41
42#include <xen/interface/xen.h>
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -070043#include <xen/interface/grant_table.h>
Ian Campbellf0774532009-02-09 12:05:49 -080044
45#include <asm/xen/hypervisor.h>
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -070046
Ian Campbellf0774532009-02-09 12:05:49 -080047#include <xen/features.h>
Julien Grall008c3202015-06-19 17:49:03 +010048#include <xen/page.h>
Jennifer Herbert8da76332014-12-24 14:17:06 +000049#include <linux/mm_types.h>
50#include <linux/page-flags.h>
Julien Grall008c3202015-06-19 17:49:03 +010051#include <linux/kernel.h>
Ian Campbellf0774532009-02-09 12:05:49 -080052
Jan Beulichbce21a22021-03-10 11:45:26 +010053/*
54 * Technically there's no reliably invalid grant reference or grant handle,
55 * so pick the value that is the most unlikely one to be observed valid.
56 */
57#define INVALID_GRANT_REF ((grant_ref_t)-1)
58#define INVALID_GRANT_HANDLE ((grant_handle_t)-1)
59
Daniel De Graafd2fb4c52012-05-08 09:46:57 -040060#define GNTTAB_RESERVED_XENSTORE 1
61
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -070062/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
63#define NR_GRANT_FRAMES 4
64
65struct gnttab_free_callback {
66 struct gnttab_free_callback *next;
67 void (*fn)(void *);
68 void *arg;
69 u16 count;
70};
71
Jennifer Herbert3f9f1c62014-12-09 18:28:37 +000072struct gntab_unmap_queue_data;
73
74typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
75
76struct gntab_unmap_queue_data
77{
78 struct delayed_work gnttab_work;
79 void *data;
80 gnttab_unmap_refs_done done;
81 struct gnttab_unmap_grant_ref *unmap_ops;
82 struct gnttab_unmap_grant_ref *kunmap_ops;
83 struct page **pages;
84 unsigned int count;
85 unsigned int age;
86};
87
Stefano Stabellini183d03c2010-05-17 17:08:21 +010088int gnttab_init(void);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +010089int gnttab_suspend(void);
90int gnttab_resume(void);
91
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -070092int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
93 int readonly);
Annie Li9438ce92011-12-12 18:15:07 +080094
95/*
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -070096 * End access through the given grant reference, iff the grant entry is no
97 * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
98 * use.
99 */
100int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
101
102/*
103 * Eventually end access through the given grant reference, and once that
104 * access has been ended, free the given page too. Access will be ended
105 * immediately iff the grant entry is not in use, otherwise it will happen
106 * some time later. page may be 0, in which case no freeing will occur.
Juergen Gross6b1775f2022-03-07 09:48:54 +0100107 * Note that the granted page might still be accessed (read or write) by the
108 * other side after gnttab_end_foreign_access() returns, so even if page was
109 * specified as 0 it is not allowed to just reuse the page for other
110 * purposes immediately.
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -0700111 */
112void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
113 unsigned long page);
114
Juergen Gross6b1775f2022-03-07 09:48:54 +0100115/*
116 * End access through the given grant reference, iff the grant entry is
117 * no longer in use. In case of success ending foreign access, the
118 * grant reference is deallocated.
119 * Return 1 if the grant entry was freed, 0 if it is still in use.
120 */
121int gnttab_try_end_foreign_access(grant_ref_t ref);
122
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -0700123int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
124
125unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
126unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
127
128int gnttab_query_foreign_access(grant_ref_t ref);
129
130/*
131 * operations on reserved batches of grant references
132 */
133int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
134
135void gnttab_free_grant_reference(grant_ref_t ref);
136
137void gnttab_free_grant_references(grant_ref_t head);
138
139int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
140
141int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
142
143void gnttab_release_grant_reference(grant_ref_t *private_head,
144 grant_ref_t release);
145
146void gnttab_request_free_callback(struct gnttab_free_callback *callback,
147 void (*fn)(void *), void *arg, u16 count);
148void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
149
150void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
151 unsigned long frame, int readonly);
152
Julien Grall3922f322015-06-19 18:05:06 +0100153/* Give access to the first 4K of the page */
154static inline void gnttab_page_grant_foreign_access_ref_one(
155 grant_ref_t ref, domid_t domid,
156 struct page *page, int readonly)
157{
158 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
159 readonly);
160}
161
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -0700162void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
163 unsigned long pfn);
164
Ian Campbellf0774532009-02-09 12:05:49 -0800165static inline void
166gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
167 uint32_t flags, grant_ref_t ref, domid_t domid)
168{
169 if (flags & GNTMAP_contains_pte)
170 map->host_addr = addr;
171 else if (xen_feature(XENFEAT_auto_translated_physmap))
172 map->host_addr = __pa(addr);
173 else
174 map->host_addr = addr;
175
176 map->flags = flags;
177 map->ref = ref;
178 map->dom = domid;
Jan Beulichebee0ea2021-02-15 08:52:27 +0100179 map->status = 1; /* arbitrary positive value */
Ian Campbellf0774532009-02-09 12:05:49 -0800180}
181
182static inline void
183gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
184 uint32_t flags, grant_handle_t handle)
185{
186 if (flags & GNTMAP_contains_pte)
187 unmap->host_addr = addr;
188 else if (xen_feature(XENFEAT_auto_translated_physmap))
189 unmap->host_addr = __pa(addr);
190 else
191 unmap->host_addr = addr;
192
193 unmap->handle = handle;
194 unmap->dev_bus_addr = 0;
195}
196
Juergen Grossb988b8ff2017-11-02 10:19:17 +0100197int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
Ian Campbellef32f892012-10-17 09:39:14 +0100198int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
Isaku Yamahata8d3d2102008-04-02 10:54:00 -0700199 unsigned long max_nr_gframes,
Annie Li0f9f5a92011-11-22 09:58:06 +0800200 void **__shared);
Juergen Grossb988b8ff2017-11-02 10:19:17 +0100201int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
202 unsigned long max_nr_gframes,
203 grant_status_t **__shared);
Annie Li85ff6ac2011-11-22 09:59:21 +0800204void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
Isaku Yamahata8d3d2102008-04-02 10:54:00 -0700205
Konrad Rzeszutek Wilkefaf30a2014-01-06 10:40:36 -0500206struct grant_frames {
207 xen_pfn_t *pfn;
208 unsigned int count;
209 void *vaddr;
210};
211extern struct grant_frames xen_auto_xlat_grant_frames;
Stefano Stabellini183d03c2010-05-17 17:08:21 +0100212unsigned int gnttab_max_grant_frames(void);
Julien Grall47c54202014-01-30 12:56:34 +0000213int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
Konrad Rzeszutek Wilkefaf30a2014-01-06 10:40:36 -0500214void gnttab_free_auto_xlat_frames(void);
Stefano Stabellini183d03c2010-05-17 17:08:21 +0100215
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -0700216#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
217
David Vrabelff4b1562015-01-08 18:06:01 +0000218int gnttab_alloc_pages(int nr_pages, struct page **pages);
219void gnttab_free_pages(int nr_pages, struct page **pages);
220
Juergen Grossca334792020-12-07 08:31:22 +0100221struct gnttab_page_cache {
222 spinlock_t lock;
Juergen Grossee32f322020-12-07 09:36:14 +0100223#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
224 struct page *pages;
225#else
Juergen Grossca334792020-12-07 08:31:22 +0100226 struct list_head pages;
Juergen Grossee32f322020-12-07 09:36:14 +0100227#endif
Juergen Grossca334792020-12-07 08:31:22 +0100228 unsigned int num_pages;
229};
230
231void gnttab_page_cache_init(struct gnttab_page_cache *cache);
232int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
233void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
234 unsigned int num);
235void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
236 unsigned int num);
237
Oleksandr Andrushchenko9bdc73042018-07-20 12:01:45 +0300238#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
239struct gnttab_dma_alloc_args {
240 /* Device for which DMA memory will be/was allocated. */
241 struct device *dev;
242 /* If set then DMA buffer is coherent and write-combine otherwise. */
243 bool coherent;
244
245 int nr_pages;
246 struct page **pages;
247 xen_pfn_t *frames;
248 void *vaddr;
249 dma_addr_t dev_bus_addr;
250};
251
252int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
253int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
254#endif
255
Oleksandr Andrushchenko8c3799e2018-07-20 12:01:43 +0300256int gnttab_pages_set_private(int nr_pages, struct page **pages);
257void gnttab_pages_clear_private(int nr_pages, struct page **pages);
258
Stefano Stabellini289b7772010-12-10 14:54:44 +0000259int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
Konrad Rzeszutek Wilke85fc982014-02-03 06:43:59 -0500260 struct gnttab_map_grant_ref *kmap_ops,
Stefano Stabellini289b7772010-12-10 14:54:44 +0000261 struct page **pages, unsigned int count);
262int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
David Vrabel853d0282015-01-05 14:13:41 +0000263 struct gnttab_unmap_grant_ref *kunmap_ops,
Stefano Stabellini2fc136e2012-09-12 12:44:30 +0100264 struct page **pages, unsigned int count);
Jennifer Herbert3f9f1c62014-12-09 18:28:37 +0000265void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
Bob Liub44166c2015-04-03 14:42:59 +0800266int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
Jennifer Herbert3f9f1c62014-12-09 18:28:37 +0000267
Stefano Stabellini289b7772010-12-10 14:54:44 +0000268
Andres Lagar-Cavillac5718982012-09-14 14:26:59 +0000269/* Perform a batch of grant map/copy operations. Retry every batch slot
270 * for which the hypervisor returns GNTST_eagain. This is typically due
271 * to paged out target frames.
272 *
273 * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
274 *
275 * Return value in each iand every status field of the batch guaranteed
276 * to not be GNTST_eagain.
277 */
278void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
279void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
280
Jennifer Herbert8da76332014-12-24 14:17:06 +0000281
282struct xen_page_foreign {
283 domid_t domid;
284 grant_ref_t gref;
285};
286
287static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
288{
289 if (!PageForeign(page))
290 return NULL;
291#if BITS_PER_LONG < 64
292 return (struct xen_page_foreign *)page->private;
293#else
294 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
295 return (struct xen_page_foreign *)&page->private;
296#endif
297}
298
Julien Grall008c3202015-06-19 17:49:03 +0100299/* Split Linux page in chunk of the size of the grant and call fn
300 *
301 * Parameters of fn:
302 * gfn: guest frame number
303 * offset: offset in the grant
304 * len: length of the data in the grant.
305 * data: internal information
306 */
307typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
308 unsigned int len, void *data);
309
310void gnttab_foreach_grant_in_range(struct page *page,
311 unsigned int offset,
312 unsigned int len,
313 xen_grant_fn_t fn,
314 void *data);
315
316/* Helper to get to call fn only on the first "grant chunk" */
317static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
318 unsigned len, xen_grant_fn_t fn,
319 void *data)
320{
321 /* The first request is limited to the size of one grant */
322 len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
323 len);
324
325 gnttab_foreach_grant_in_range(page, offset, len, fn, data);
326}
327
Julien Grallf73314b2015-10-13 17:50:12 +0100328/* Get @nr_grefs grants from an array of page and call fn for each grant */
329void gnttab_foreach_grant(struct page **pages,
330 unsigned int nr_grefs,
331 xen_grant_fn_t fn,
332 void *data);
333
Julien Grall008c3202015-06-19 17:49:03 +0100334/* Get the number of grant in a specified region
335 *
336 * start: Offset from the beginning of the first page
337 * len: total length of data (can cross multiple page)
338 */
339static inline unsigned int gnttab_count_grant(unsigned int start,
340 unsigned int len)
341{
342 return XEN_PFN_UP(xen_offset_in_page(start) + len);
343}
344
Jeremy Fitzhardingead9a8612007-07-17 18:37:06 -0700345#endif /* __ASM_GNTTAB_H__ */