blob: 98ceba6fa848ae722fd7f311206ab251e533f39a [file] [log] [blame]
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This header is for implementations of dma_map_ops and related code.
4 * It should not be included in drivers just using the DMA API.
5 */
6#ifndef _LINUX_DMA_MAP_OPS_H
7#define _LINUX_DMA_MAP_OPS_H
8
9#include <linux/dma-mapping.h>
Christoph Hellwig9f4df96b2020-09-22 15:36:11 +020010#include <linux/pgtable.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020011
Christoph Hellwig0b1abd12020-09-11 10:56:52 +020012struct cma;
13
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020014struct dma_map_ops {
15 void *(*alloc)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp,
17 unsigned long attrs);
18 void (*free)(struct device *dev, size_t size, void *vaddr,
19 dma_addr_t dma_handle, unsigned long attrs);
20 struct page *(*alloc_pages)(struct device *dev, size_t size,
21 dma_addr_t *dma_handle, enum dma_data_direction dir,
22 gfp_t gfp);
23 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
24 dma_addr_t dma_handle, enum dma_data_direction dir);
Christoph Hellwig7d5b5732021-01-28 14:54:18 +010025 struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
26 enum dma_data_direction dir, gfp_t gfp,
27 unsigned long attrs);
28 void (*free_noncontiguous)(struct device *dev, size_t size,
29 struct sg_table *sgt, enum dma_data_direction dir);
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020030 int (*mmap)(struct device *, struct vm_area_struct *,
31 void *, dma_addr_t, size_t, unsigned long attrs);
32
33 int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
34 void *cpu_addr, dma_addr_t dma_addr, size_t size,
35 unsigned long attrs);
36
37 dma_addr_t (*map_page)(struct device *dev, struct page *page,
38 unsigned long offset, size_t size,
39 enum dma_data_direction dir, unsigned long attrs);
40 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
41 size_t size, enum dma_data_direction dir,
42 unsigned long attrs);
43 /*
Logan Gunthorpefffe3cc2021-07-29 14:15:19 -060044 * map_sg should return a negative error code on error. See
45 * dma_map_sgtable() for a list of appropriate error codes
46 * and their meanings.
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020047 */
48 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
49 enum dma_data_direction dir, unsigned long attrs);
50 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
51 enum dma_data_direction dir, unsigned long attrs);
52 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
53 size_t size, enum dma_data_direction dir,
54 unsigned long attrs);
55 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
56 size_t size, enum dma_data_direction dir,
57 unsigned long attrs);
58 void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
59 size_t size, enum dma_data_direction dir);
60 void (*sync_single_for_device)(struct device *dev,
61 dma_addr_t dma_handle, size_t size,
62 enum dma_data_direction dir);
63 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
64 int nents, enum dma_data_direction dir);
65 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
66 int nents, enum dma_data_direction dir);
67 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
68 enum dma_data_direction direction);
69 int (*dma_supported)(struct device *dev, u64 mask);
70 u64 (*get_required_mask)(struct device *dev);
71 size_t (*max_mapping_size)(struct device *dev);
John Garrya229cc12022-07-14 19:15:24 +080072 size_t (*opt_mapping_size)(void);
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020073 unsigned long (*get_merge_boundary)(struct device *dev);
74};
75
76#ifdef CONFIG_DMA_OPS
77#include <asm/dma-mapping.h>
78
79static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
80{
81 if (dev->dma_ops)
82 return dev->dma_ops;
83 return get_arch_dma_ops(dev->bus);
84}
85
86static inline void set_dma_ops(struct device *dev,
87 const struct dma_map_ops *dma_ops)
88{
89 dev->dma_ops = dma_ops;
90}
91#else /* CONFIG_DMA_OPS */
92static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
93{
94 return NULL;
95}
96static inline void set_dma_ops(struct device *dev,
97 const struct dma_map_ops *dma_ops)
98{
99}
100#endif /* CONFIG_DMA_OPS */
101
Christoph Hellwig0b1abd12020-09-11 10:56:52 +0200102#ifdef CONFIG_DMA_CMA
103extern struct cma *dma_contiguous_default_area;
104
105static inline struct cma *dev_get_cma_area(struct device *dev)
106{
107 if (dev && dev->cma_area)
108 return dev->cma_area;
109 return dma_contiguous_default_area;
110}
111
112void dma_contiguous_reserve(phys_addr_t addr_limit);
113int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
114 phys_addr_t limit, struct cma **res_cma, bool fixed);
115
116struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
117 unsigned int order, bool no_warn);
118bool dma_release_from_contiguous(struct device *dev, struct page *pages,
119 int count);
120struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
121void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
Christoph Hellwig5db5d932020-09-11 11:04:43 +0200122
123void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
Christoph Hellwig0b1abd12020-09-11 10:56:52 +0200124#else /* CONFIG_DMA_CMA */
125static inline struct cma *dev_get_cma_area(struct device *dev)
126{
127 return NULL;
128}
129static inline void dma_contiguous_reserve(phys_addr_t limit)
130{
131}
132static inline int dma_contiguous_reserve_area(phys_addr_t size,
133 phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
134 bool fixed)
135{
136 return -ENOSYS;
137}
138static inline struct page *dma_alloc_from_contiguous(struct device *dev,
139 size_t count, unsigned int order, bool no_warn)
140{
141 return NULL;
142}
143static inline bool dma_release_from_contiguous(struct device *dev,
144 struct page *pages, int count)
145{
146 return false;
147}
148/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
149static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
150 gfp_t gfp)
151{
152 return NULL;
153}
154static inline void dma_free_contiguous(struct device *dev, struct page *page,
155 size_t size)
156{
157 __free_pages(page, get_order(size));
158}
159#endif /* CONFIG_DMA_CMA*/
160
161#ifdef CONFIG_DMA_PERNUMA_CMA
162void dma_pernuma_cma_reserve(void);
163#else
164static inline void dma_pernuma_cma_reserve(void) { }
165#endif /* CONFIG_DMA_PERNUMA_CMA */
166
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +0200167#ifdef CONFIG_DMA_DECLARE_COHERENT
168int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
169 dma_addr_t device_addr, size_t size);
170int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
171 dma_addr_t *dma_handle, void **ret);
172int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
173int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
174 void *cpu_addr, size_t size, int *ret);
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +0200175#else
176static inline int dma_declare_coherent_memory(struct device *dev,
177 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
178{
179 return -ENOSYS;
180}
181#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
182#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
183#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200184#endif /* CONFIG_DMA_DECLARE_COHERENT */
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +0200185
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200186#ifdef CONFIG_DMA_GLOBAL_POOL
187void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
188 dma_addr_t *dma_handle);
189int dma_release_from_global_coherent(int order, void *vaddr);
190int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
191 size_t size, int *ret);
192int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
193#else
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +0200194static inline void *dma_alloc_from_global_coherent(struct device *dev,
195 ssize_t size, dma_addr_t *dma_handle)
196{
197 return NULL;
198}
199static inline int dma_release_from_global_coherent(int order, void *vaddr)
200{
201 return 0;
202}
203static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
204 void *cpu_addr, size_t size, int *ret)
205{
206 return 0;
207}
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200208#endif /* CONFIG_DMA_GLOBAL_POOL */
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +0200209
Christoph Hellwig7d5b5732021-01-28 14:54:18 +0100210/*
211 * This is the actual return value from the ->alloc_noncontiguous method.
212 * The users of the DMA API should only care about the sg_table, but to make
213 * the DMA-API internal vmaping and freeing easier we stash away the page
214 * array as well (except for the fallback case). This can go away any time,
215 * e.g. when a vmap-variant that takes a scatterlist comes along.
216 */
217struct dma_sgt_handle {
218 struct sg_table sgt;
219 struct page **pages;
220};
221#define sgt_handle(sgt) \
222 container_of((sgt), struct dma_sgt_handle, sgt)
223
Christoph Hellwig695cebe2020-10-20 10:41:07 +0200224int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
225 void *cpu_addr, dma_addr_t dma_addr, size_t size,
226 unsigned long attrs);
227int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
228 void *cpu_addr, dma_addr_t dma_addr, size_t size,
229 unsigned long attrs);
230struct page *dma_common_alloc_pages(struct device *dev, size_t size,
231 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
232void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
233 dma_addr_t dma_handle, enum dma_data_direction dir);
234
235struct page **dma_common_find_pages(void *cpu_addr);
236void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
237 const void *caller);
238void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
239 const void *caller);
240void dma_common_free_remap(void *cpu_addr, size_t size);
241
242struct page *dma_alloc_from_pool(struct device *dev, size_t size,
243 void **cpu_addr, gfp_t flags,
244 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
245bool dma_free_from_pool(struct device *dev, void *start, size_t size);
246
Christoph Hellwig16fee29b2020-11-06 17:02:17 +0100247int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
248 dma_addr_t dma_start, u64 size);
249
Christoph Hellwig6d4e9a82021-02-10 10:56:39 +0100250#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
Christoph Hellwig9f4df96b2020-09-22 15:36:11 +0200251 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
252 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
Christoph Hellwig6d4e9a82021-02-10 10:56:39 +0100253extern bool dma_default_coherent;
Christoph Hellwig9f4df96b2020-09-22 15:36:11 +0200254static inline bool dev_is_dma_coherent(struct device *dev)
255{
256 return dev->dma_coherent;
257}
258#else
259static inline bool dev_is_dma_coherent(struct device *dev)
260{
261 return true;
262}
263#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
264
Christoph Hellwig9f4df96b2020-09-22 15:36:11 +0200265void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
266 gfp_t gfp, unsigned long attrs);
267void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
268 dma_addr_t dma_addr, unsigned long attrs);
269
270#ifdef CONFIG_MMU
271/*
272 * Page protection so that devices that can't snoop CPU caches can use the
273 * memory coherently. We default to pgprot_noncached which is usually used
274 * for ioremap as a safe bet, but architectures can override this with less
275 * strict semantics if possible.
276 */
277#ifndef pgprot_dmacoherent
278#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
279#endif
280
281pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
282#else
283static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
284 unsigned long attrs)
285{
286 return prot; /* no protection bits supported without page tables */
287}
288#endif /* CONFIG_MMU */
289
290#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
291void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
292 enum dma_data_direction dir);
293#else
294static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
295 enum dma_data_direction dir)
296{
297}
298#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
299
300#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
301void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
302 enum dma_data_direction dir);
303#else
304static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
305 enum dma_data_direction dir)
306{
307}
308#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
309
310#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
311void arch_sync_dma_for_cpu_all(void);
312#else
313static inline void arch_sync_dma_for_cpu_all(void)
314{
315}
316#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
317
318#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
319void arch_dma_prep_coherent(struct page *page, size_t size);
320#else
321static inline void arch_dma_prep_coherent(struct page *page, size_t size)
322{
323}
324#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
325
326#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
327void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
328#else
329static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
330{
331}
332#endif /* ARCH_HAS_DMA_MARK_CLEAN */
333
334void *arch_dma_set_uncached(void *addr, size_t size);
335void arch_dma_clear_uncached(void *addr, size_t size);
336
Alexey Kardashevskiy8d8d53c2020-10-29 12:52:40 +1100337#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
338bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
339bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
340bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
341 int nents);
342bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
343 int nents);
344#else
345#define arch_dma_map_page_direct(d, a) (false)
346#define arch_dma_unmap_page_direct(d, a) (false)
347#define arch_dma_map_sg_direct(d, s, n) (false)
348#define arch_dma_unmap_sg_direct(d, s, n) (false)
349#endif
350
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +0200351#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
352void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
353 const struct iommu_ops *iommu, bool coherent);
354#else
355static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
356 u64 size, const struct iommu_ops *iommu, bool coherent)
357{
358}
359#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
360
361#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
362void arch_teardown_dma_ops(struct device *dev);
363#else
364static inline void arch_teardown_dma_ops(struct device *dev)
365{
366}
367#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
368
Christoph Hellwiga1fd09e2020-09-11 10:12:44 +0200369#ifdef CONFIG_DMA_API_DEBUG
370void dma_debug_add_bus(struct bus_type *bus);
371void debug_dma_dump_mappings(struct device *dev);
372#else
373static inline void dma_debug_add_bus(struct bus_type *bus)
374{
375}
376static inline void debug_dma_dump_mappings(struct device *dev)
377{
378}
379#endif /* CONFIG_DMA_API_DEBUG */
380
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +0200381extern const struct dma_map_ops dma_dummy_ops;
382
383#endif /* _LINUX_DMA_MAP_OPS_H */