6
6
#include <linux/mm_types.h>
7
7
#include <linux/scatterlist.h>
8
#include <linux/dma-debug.h>
9
10
#include <asm-generic/dma-coherent.h>
10
11
#include <asm/memory.h>
13
#ifdef __arch_page_to_dma
14
#error Please update to __arch_pfn_to_dma
13
* page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14
* used internally by the DMA-mapping API to provide DMA addresses. They
15
* must not be used by drivers.
18
* dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
19
* functions used internally by the DMA-mapping API to provide DMA
20
* addresses. They must not be used by drivers.
17
#ifndef __arch_page_to_dma
22
#ifndef __arch_pfn_to_dma
23
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
25
return (dma_addr_t)__pfn_to_bus(pfn);
19
#if !defined(CONFIG_HIGHMEM)
20
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
22
return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
24
#elif defined(__pfn_to_bus)
25
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
27
return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
30
#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
28
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
30
return __bus_to_pfn(addr);
33
33
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
40
40
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
43
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
45
return __arch_page_to_dma(dev, page);
43
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
45
return __arch_pfn_to_dma(dev, pfn);
48
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
50
return __arch_dma_to_pfn(dev, addr);
48
53
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
60
* DMA-consistent mapping functions. These allocate/free a region of
61
* uncached, unwrite-buffered mapped memory space for use with DMA
62
* devices. This is the "generic" version. The PCI specific version
65
* Note: Drivers should NOT use this function directly, as it will break
66
* platforms with CONFIG_DMABOUNCE.
67
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
65
* The DMA API is built upon the notion of "buffer ownership". A buffer
66
* is either exclusively owned by the CPU (and therefore may be accessed
67
* by it) or exclusively owned by the DMA device. These helper functions
68
* represent the transitions between these two ownership states.
70
* Note, however, that on later ARMs, this notion does not work due to
71
* speculative prefetches. We model our approach on the assumption that
72
* the CPU does do speculative prefetches, which means we clean caches
73
* before transfers and delay cache invalidation until transfer completion.
75
* Private support functions: these are not part of the API and are
76
* liable to change. Drivers must not use these.
69
extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
70
extern void dma_cache_maint_page(struct page *page, unsigned long offset,
78
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
79
enum dma_data_direction dir)
81
extern void ___dma_single_cpu_to_dev(const void *, size_t,
82
enum dma_data_direction);
84
if (!arch_is_coherent())
85
___dma_single_cpu_to_dev(kaddr, size, dir);
88
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
89
enum dma_data_direction dir)
91
extern void ___dma_single_dev_to_cpu(const void *, size_t,
92
enum dma_data_direction);
94
if (!arch_is_coherent())
95
___dma_single_dev_to_cpu(kaddr, size, dir);
98
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
99
size_t size, enum dma_data_direction dir)
101
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
102
size_t, enum dma_data_direction);
104
if (!arch_is_coherent())
105
___dma_page_cpu_to_dev(page, off, size, dir);
108
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
109
size_t size, enum dma_data_direction dir)
111
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
112
size_t, enum dma_data_direction);
114
if (!arch_is_coherent())
115
___dma_page_dev_to_cpu(page, off, size, dir);
74
119
* Return whether the given device DMA address mask can be supported
256
299
* The DMA API, implemented by dmabounce.c. See below for descriptions.
258
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
259
enum dma_data_direction);
260
extern dma_addr_t dma_map_page(struct device *, struct page *,
301
extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
302
enum dma_data_direction);
303
extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
304
enum dma_data_direction);
305
extern dma_addr_t __dma_map_page(struct device *, struct page *,
261
306
unsigned long, size_t, enum dma_data_direction);
262
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
307
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
263
308
enum dma_data_direction);
331
static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
332
size_t size, enum dma_data_direction dir)
334
__dma_single_cpu_to_dev(cpu_addr, size, dir);
335
return virt_to_dma(dev, cpu_addr);
338
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
339
unsigned long offset, size_t size, enum dma_data_direction dir)
341
__dma_page_cpu_to_dev(page, offset, size, dir);
342
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
345
static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
346
size_t size, enum dma_data_direction dir)
348
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
351
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
352
size_t size, enum dma_data_direction dir)
354
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
355
handle & ~PAGE_MASK, size, dir);
357
#endif /* CONFIG_DMABOUNCE */
287
360
* dma_map_single - map a single buffer for streaming DMA
288
361
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
300
373
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
301
374
size_t size, enum dma_data_direction dir)
303
378
BUG_ON(!valid_dma_direction(dir));
305
if (!arch_is_coherent())
306
dma_cache_maint(cpu_addr, size, dir);
380
addr = __dma_map_single(dev, cpu_addr, size, dir);
381
debug_dma_map_page(dev, virt_to_page(cpu_addr),
382
(unsigned long)cpu_addr & ~PAGE_MASK, size,
308
return virt_to_dma(dev, cpu_addr);
325
402
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
326
403
unsigned long offset, size_t size, enum dma_data_direction dir)
328
407
BUG_ON(!valid_dma_direction(dir));
330
if (!arch_is_coherent())
331
dma_cache_maint_page(page, offset, size, dir);
409
addr = __dma_map_page(dev, page, offset, size, dir);
410
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
333
return page_to_dma(dev, page) + offset;
350
429
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
351
430
size_t size, enum dma_data_direction dir)
432
debug_dma_unmap_page(dev, handle, size, dir, true);
433
__dma_unmap_single(dev, handle, size, dir);
355
#endif /* CONFIG_DMABOUNCE */
358
437
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
371
450
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
372
451
size_t size, enum dma_data_direction dir)
374
dma_unmap_single(dev, handle, size, dir);
453
debug_dma_unmap_page(dev, handle, size, dir, false);
454
__dma_unmap_page(dev, handle, size, dir);
399
479
BUG_ON(!valid_dma_direction(dir));
401
dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
481
debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
483
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
486
__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
404
489
static inline void dma_sync_single_range_for_device(struct device *dev,
408
493
BUG_ON(!valid_dma_direction(dir));
495
debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
410
497
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
413
if (!arch_is_coherent())
414
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
500
__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
417
503
static inline void dma_sync_single_for_cpu(struct device *dev,