210
212
if (!dev || !dev->archdata.dmabounce)
212
214
if (dma_mapping_error(dev, dma_addr)) {
214
dev_err(dev, "Trying to %s invalid mapping\n", where);
216
pr_err("unknown device: Trying to %s invalid mapping\n", where);
215
dev_err(dev, "Trying to %s invalid mapping\n", where);
219
218
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
222
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
223
enum dma_data_direction dir)
221
static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
225
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
227
int needs_bounce = 0;
230
DO_STATS ( device_info->map_op_count++ );
232
dma_addr = virt_to_dma(dev, ptr);
223
if (!dev || !dev->archdata.dmabounce)
234
226
if (dev->dma_mask) {
235
unsigned long mask = *dev->dma_mask;
227
unsigned long limit, mask = *dev->dma_mask;
238
229
limit = (mask + 1) & ~mask;
239
230
if (limit && size > limit) {
240
231
dev_err(dev, "DMA mapping too big (requested %#x "
241
232
"mask %#Lx)\n", size, *dev->dma_mask);
246
* Figure out if we need to bounce from the DMA mask.
248
needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
251
if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
252
struct safe_buffer *buf;
254
buf = alloc_safe_buffer(device_info, ptr, size, dir);
256
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
262
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
263
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
264
buf->safe, buf->safe_dma_addr);
266
if ((dir == DMA_TO_DEVICE) ||
267
(dir == DMA_BIDIRECTIONAL)) {
268
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
269
__func__, ptr, buf->safe, size);
270
memcpy(buf->safe, ptr, size);
274
dma_addr = buf->safe_dma_addr;
277
* We don't need to sync the DMA buffer since
278
* it was allocated via the coherent allocators.
280
__dma_single_cpu_to_dev(ptr, size, dir);
286
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
236
/* Figure out if we need to bounce from the DMA mask. */
237
if ((dma_addr | (dma_addr + size - 1)) & ~mask)
241
return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
244
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
245
enum dma_data_direction dir)
247
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
248
struct safe_buffer *buf;
251
DO_STATS ( device_info->map_op_count++ );
253
buf = alloc_safe_buffer(device_info, ptr, size, dir);
255
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
260
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
261
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
262
buf->safe, buf->safe_dma_addr);
264
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
265
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
266
__func__, ptr, buf->safe, size);
267
memcpy(buf->safe, ptr, size);
270
return buf->safe_dma_addr;
273
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
287
274
size_t size, enum dma_data_direction dir)
289
struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
292
BUG_ON(buf->size != size);
293
BUG_ON(buf->direction != dir);
296
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
297
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
298
buf->safe, buf->safe_dma_addr);
300
DO_STATS(dev->archdata.dmabounce->bounce_count++);
302
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
303
void *ptr = buf->ptr;
306
"%s: copy back safe %p to unsafe %p size %d\n",
307
__func__, buf->safe, ptr, size);
308
memcpy(ptr, buf->safe, size);
311
* Since we may have written to a page cache page,
312
* we need to ensure that the data will be coherent
313
* with user mappings.
315
__cpuc_flush_dcache_area(ptr, size);
317
free_safe_buffer(dev->archdata.dmabounce, buf);
319
__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
276
BUG_ON(buf->size != size);
277
BUG_ON(buf->direction != dir);
279
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
280
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
281
buf->safe, buf->safe_dma_addr);
283
DO_STATS(dev->archdata.dmabounce->bounce_count++);
285
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
286
void *ptr = buf->ptr;
288
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
289
__func__, buf->safe, ptr, size);
290
memcpy(ptr, buf->safe, size);
293
* Since we may have written to a page cache page,
294
* we need to ensure that the data will be coherent
295
* with user mappings.
297
__cpuc_flush_dcache_area(ptr, size);
299
free_safe_buffer(dev->archdata.dmabounce, buf);
323
302
/* ************************************************** */
328
307
* substitute the safe buffer for the unsafe one.
329
308
* (basically move the buffer from an unsafe area to a safe one)
331
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
332
enum dma_data_direction dir)
334
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
335
__func__, ptr, size, dir);
337
BUG_ON(!valid_dma_direction(dir));
339
return map_single(dev, ptr, size, dir);
341
EXPORT_SYMBOL(__dma_map_single);
344
* see if a mapped address was really a "safe" buffer and if so, copy
345
* the data from the safe buffer back to the unsafe buffer and free up
346
* the safe buffer. (basically return things back to the way they
349
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350
enum dma_data_direction dir)
352
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
353
__func__, (void *) dma_addr, size, dir);
355
unmap_single(dev, dma_addr, size, dir);
357
EXPORT_SYMBOL(__dma_unmap_single);
359
310
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
360
311
unsigned long offset, size_t size, enum dma_data_direction dir)
362
316
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
363
317
__func__, page, offset, size, dir);
365
BUG_ON(!valid_dma_direction(dir));
319
dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
321
ret = needs_bounce(dev, dma_addr, size);
326
__dma_page_cpu_to_dev(page, offset, size, dir);
367
330
if (PageHighMem(page)) {
368
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
369
"is not supported\n");
331
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
383
345
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
384
346
enum dma_data_direction dir)
386
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
387
__func__, (void *) dma_addr, size, dir);
389
unmap_single(dev, dma_addr, size, dir);
348
struct safe_buffer *buf;
350
dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
351
__func__, dma_addr, size, dir);
353
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
355
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
356
dma_addr & ~PAGE_MASK, size, dir);
360
unmap_single(dev, buf, size, dir);
391
362
EXPORT_SYMBOL(__dma_unmap_page);