15
15
#include <linux/scatterlist.h>
16
16
#include <linux/string.h>
17
17
#include <linux/gfp.h>
18
#include <linux/highmem.h>
19
20
#include <asm/cache.h>
20
21
#include <asm/io.h>
22
23
#include <dma-coherence.h>
24
static inline unsigned long dma_addr_to_virt(struct device *dev,
25
static inline struct page *dma_addr_to_page(struct device *dev,
25
26
dma_addr_t dma_addr)
27
unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
29
return (unsigned long)phys_to_virt(addr);
29
plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
148
148
free_pages(addr, get_order(size));
151
static inline void __dma_sync(unsigned long addr, size_t size,
151
static inline void __dma_sync_virtual(void *addr, size_t size,
152
152
enum dma_data_direction direction)
154
154
switch (direction) {
155
155
case DMA_TO_DEVICE:
156
dma_cache_wback(addr, size);
156
dma_cache_wback((unsigned long)addr, size);
159
159
case DMA_FROM_DEVICE:
160
dma_cache_inv(addr, size);
160
dma_cache_inv((unsigned long)addr, size);
163
163
case DMA_BIDIRECTIONAL:
164
dma_cache_wback_inv(addr, size);
164
dma_cache_wback_inv((unsigned long)addr, size);
173
* A single sg entry may refer to multiple physically contiguous
174
* pages. But we still need to process highmem pages individually.
175
* If highmem is not configured then the bulk of this loop gets
178
static inline void __dma_sync(struct page *page,
179
unsigned long offset, size_t size, enum dma_data_direction direction)
186
if (PageHighMem(page)) {
189
if (offset + len > PAGE_SIZE) {
190
if (offset >= PAGE_SIZE) {
191
page += offset >> PAGE_SHIFT;
192
offset &= ~PAGE_MASK;
194
len = PAGE_SIZE - offset;
197
addr = kmap_atomic(page);
198
__dma_sync_virtual(addr + offset, len, direction);
201
__dma_sync_virtual(page_address(page) + offset,
172
209
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
173
210
size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
175
212
if (cpu_is_noncoherent_r10000(dev))
176
__dma_sync(dma_addr_to_virt(dev, dma_addr), size,
213
__dma_sync(dma_addr_to_page(dev, dma_addr),
214
dma_addr & ~PAGE_MASK, size, direction);
179
216
plat_unmap_dma_mem(dev, dma_addr, size, direction);
187
224
for (i = 0; i < nents; i++, sg++) {
190
addr = (unsigned long) sg_virt(sg);
191
if (!plat_device_is_coherent(dev) && addr)
192
__dma_sync(addr, sg->length, direction);
193
sg->dma_address = plat_map_dma_mem(dev,
194
(void *)addr, sg->length);
225
if (!plat_device_is_coherent(dev))
226
__dma_sync(sg_page(sg), sg->offset, sg->length,
228
sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
201
236
unsigned long offset, size_t size, enum dma_data_direction direction,
202
237
struct dma_attrs *attrs)
206
addr = (unsigned long) page_address(page) + offset;
208
239
if (!plat_device_is_coherent(dev))
209
__dma_sync(addr, size, direction);
240
__dma_sync(page, offset, size, direction);
211
return plat_map_dma_mem(dev, (void *)addr, size);
242
return plat_map_dma_mem_page(dev, page) + offset;
214
245
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
215
246
int nhwentries, enum dma_data_direction direction,
216
247
struct dma_attrs *attrs)
221
251
for (i = 0; i < nhwentries; i++, sg++) {
222
252
if (!plat_device_is_coherent(dev) &&
223
direction != DMA_TO_DEVICE) {
224
addr = (unsigned long) sg_virt(sg);
226
__dma_sync(addr, sg->length, direction);
253
direction != DMA_TO_DEVICE)
254
__dma_sync(sg_page(sg), sg->offset, sg->length,
228
256
plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
232
260
static void mips_dma_sync_single_for_cpu(struct device *dev,
233
261
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
235
if (cpu_is_noncoherent_r10000(dev)) {
238
addr = dma_addr_to_virt(dev, dma_handle);
239
__dma_sync(addr, size, direction);
263
if (cpu_is_noncoherent_r10000(dev))
264
__dma_sync(dma_addr_to_page(dev, dma_handle),
265
dma_handle & ~PAGE_MASK, size, direction);
243
268
static void mips_dma_sync_single_for_device(struct device *dev,
244
269
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
246
271
plat_extra_sync_for_device(dev);
247
if (!plat_device_is_coherent(dev)) {
250
addr = dma_addr_to_virt(dev, dma_handle);
251
__dma_sync(addr, size, direction);
272
if (!plat_device_is_coherent(dev))
273
__dma_sync(dma_addr_to_page(dev, dma_handle),
274
dma_handle & ~PAGE_MASK, size, direction);
255
277
static void mips_dma_sync_sg_for_cpu(struct device *dev,
260
282
/* Make sure that gcc doesn't leave the empty loop body. */
261
283
for (i = 0; i < nelems; i++, sg++) {
262
284
if (cpu_is_noncoherent_r10000(dev))
263
__dma_sync((unsigned long)page_address(sg_page(sg)),
264
sg->length, direction);
285
__dma_sync(sg_page(sg), sg->offset, sg->length,
273
295
/* Make sure that gcc doesn't leave the empty loop body. */
274
296
for (i = 0; i < nelems; i++, sg++) {
275
297
if (!plat_device_is_coherent(dev))
276
__dma_sync((unsigned long)page_address(sg_page(sg)),
277
sg->length, direction);
298
__dma_sync(sg_page(sg), sg->offset, sg->length,
296
318
plat_extra_sync_for_device(dev);
297
319
if (!plat_device_is_coherent(dev))
298
__dma_sync((unsigned long)vaddr, size, direction);
320
__dma_sync_virtual(vaddr, size, direction);
301
323
EXPORT_SYMBOL(dma_cache_sync);