277
277
* We don't need to sync the DMA buffer since
278
278
* it was allocated via the coherent allocators.
280
dma_cache_maint(ptr, size, dir);
280
__dma_single_cpu_to_dev(ptr, size, dir);
308
308
memcpy(ptr, buf->safe, size);
311
* DMA buffers must have the same cache properties
312
* as if they were really used for DMA - which means
313
* data must be written back to RAM. Note that
314
* we don't use dmac_flush_range() here for the
315
* bidirectional case because we know the cache
316
* lines will be coherent with the data written.
311
* Since we may have written to a page cache page,
312
* we need to ensure that the data will be coherent
313
* with user mappings.
318
dmac_clean_range(ptr, ptr + size);
319
outer_clean_range(__pa(ptr), __pa(ptr) + size);
315
__cpuc_flush_dcache_area(ptr, size);
321
317
free_safe_buffer(dev->archdata.dmabounce, buf);
319
__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
330
328
* substitute the safe buffer for the unsafe one.
331
329
* (basically move the buffer from an unsafe area to a safe one)
333
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
331
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
334
332
enum dma_data_direction dir)
336
334
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
341
339
return map_single(dev, ptr, size, dir);
343
EXPORT_SYMBOL(dma_map_single);
345
dma_addr_t dma_map_page(struct device *dev, struct page *page,
341
EXPORT_SYMBOL(__dma_map_single);
344
* see if a mapped address was really a "safe" buffer and if so, copy
345
* the data from the safe buffer back to the unsafe buffer and free up
346
* the safe buffer. (basically return things back to the way they
349
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350
enum dma_data_direction dir)
352
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
353
__func__, (void *) dma_addr, size, dir);
355
unmap_single(dev, dma_addr, size, dir);
357
EXPORT_SYMBOL(__dma_unmap_single);
359
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
346
360
unsigned long offset, size_t size, enum dma_data_direction dir)
348
362
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
359
373
return map_single(dev, page_address(page) + offset, size, dir);
361
EXPORT_SYMBOL(dma_map_page);
375
EXPORT_SYMBOL(__dma_map_page);
364
378
* see if a mapped address was really a "safe" buffer and if so, copy
366
380
* the safe buffer. (basically return things back to the way they
370
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
383
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
371
384
enum dma_data_direction dir)
373
386
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
376
389
unmap_single(dev, dma_addr, size, dir);
378
EXPORT_SYMBOL(dma_unmap_single);
391
EXPORT_SYMBOL(__dma_unmap_page);
380
393
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
381
394
unsigned long off, size_t sz, enum dma_data_direction dir)