1
/*****************************************************************************
2
* Copyright 2004 - 2008 Broadcom Corporation. All rights reserved.
4
* Unless you and Broadcom execute a separate written software license
5
* agreement governing use of this software, this software is licensed to you
6
* under the terms of the GNU General Public License version 2, available at
7
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
9
* Notwithstanding the above, under no circumstances may you combine this
10
* software in any way with any other Broadcom software provided under a
11
* license other than the GPL, without Broadcom's express prior written
13
*****************************************************************************/
15
/****************************************************************************/
19
* @brief Implements the DMA interface.
21
/****************************************************************************/
23
/* ---- Include Files ---------------------------------------------------- */
25
#include <linux/module.h>
26
#include <linux/device.h>
27
#include <linux/dma-mapping.h>
28
#include <linux/interrupt.h>
29
#include <linux/sched.h>
30
#include <linux/irqreturn.h>
31
#include <linux/proc_fs.h>
32
#include <linux/slab.h>
34
#include <mach/timer.h>
37
#include <linux/pfn.h>
38
#include <linux/atomic.h>
39
#include <linux/sched.h>
42
/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
43
/* especially since dc4 doesn't use kmalloc'd memory. */
45
#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
47
/* ---- Public Variables ------------------------------------------------- */
49
/* ---- Private Constants and Types -------------------------------------- */
51
#define MAKE_HANDLE(controllerIdx, channelIdx) (((controllerIdx) << 4) | (channelIdx))
53
#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f)
54
#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f)
56
#define DMA_MAP_DEBUG 0
59
# define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args)
61
# define DMA_MAP_PRINT(fmt, args...)
64
/* ---- Private Variables ------------------------------------------------ */
66
static DMA_Global_t gDMA;
67
static struct proc_dir_entry *gDmaDir;
69
static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
70
static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
71
static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
72
static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
74
#include "dma_device.c"
76
/* ---- Private Function Prototypes -------------------------------------- */
78
/* ---- Functions ------------------------------------------------------- */
80
/****************************************************************************/
82
* Displays information for /proc/dma/mem-type
84
/****************************************************************************/
86
static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
87
int count, int *eof, void *data)
91
len += sprintf(buf + len, "dma_map_mem statistics\n");
93
sprintf(buf + len, "coherent: %d\n",
94
atomic_read(&gDmaStatMemTypeCoherent));
96
sprintf(buf + len, "kmalloc: %d\n",
97
atomic_read(&gDmaStatMemTypeKmalloc));
99
sprintf(buf + len, "vmalloc: %d\n",
100
atomic_read(&gDmaStatMemTypeVmalloc));
102
sprintf(buf + len, "user: %d\n",
103
atomic_read(&gDmaStatMemTypeUser));
108
/****************************************************************************/
110
* Displays information for /proc/dma/channels
112
/****************************************************************************/
114
static int dma_proc_read_channels(char *buf, char **start, off_t offset,
115
int count, int *eof, void *data)
119
int limit = count - 200;
121
DMA_Channel_t *channel;
123
if (down_interruptible(&gDMA.lock) < 0) {
127
for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
129
for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
136
&gDMA.controller[controllerIdx].channel[channelIdx];
139
sprintf(buf + len, "%d:%d ", controllerIdx,
142
if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
145
sprintf(buf + len, "Dedicated for %s ",
146
DMA_gDeviceAttribute[channel->
149
len += sprintf(buf + len, "Shared ");
152
if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) {
153
len += sprintf(buf + len, "No ISR ");
156
if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) {
157
len += sprintf(buf + len, "Fifo: 128 ");
159
len += sprintf(buf + len, "Fifo: 64 ");
162
if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
164
sprintf(buf + len, "InUse by %s",
165
DMA_gDeviceAttribute[channel->
167
#if (DMA_DEBUG_TRACK_RESERVATION)
169
sprintf(buf + len, " (%s:%d)",
174
len += sprintf(buf + len, "Avail ");
177
if (channel->lastDevType != DMA_DEVICE_NONE) {
179
sprintf(buf + len, "Last use: %s ",
180
DMA_gDeviceAttribute[channel->
185
len += sprintf(buf + len, "\n");
194
/****************************************************************************/
196
* Displays information for /proc/dma/devices
198
/****************************************************************************/
200
static int dma_proc_read_devices(char *buf, char **start, off_t offset,
201
int count, int *eof, void *data)
203
int limit = count - 200;
207
if (down_interruptible(&gDMA.lock) < 0) {
211
for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
212
DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
214
if (devAttr->name == NULL) {
222
len += sprintf(buf + len, "%-12s ", devAttr->name);
224
if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
226
sprintf(buf + len, "Dedicated %d:%d ",
227
devAttr->dedicatedController,
228
devAttr->dedicatedChannel);
230
len += sprintf(buf + len, "Shared DMA:");
231
if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA0) != 0) {
232
len += sprintf(buf + len, "0");
234
if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA1) != 0) {
235
len += sprintf(buf + len, "1");
237
len += sprintf(buf + len, " ");
239
if ((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) {
240
len += sprintf(buf + len, "NoISR ");
242
if ((devAttr->flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) != 0) {
243
len += sprintf(buf + len, "Allow-128 ");
248
"Xfer #: %Lu Ticks: %Lu Bytes: %Lu DescLen: %u\n",
249
devAttr->numTransfers, devAttr->transferTicks,
250
devAttr->transferBytes,
251
devAttr->ring.bytesAllocated);
261
/****************************************************************************/
263
* Determines if a DMA_Device_t is "valid".
266
* TRUE - dma device is valid
267
* FALSE - dma device isn't valid
269
/****************************************************************************/
271
static inline int IsDeviceValid(DMA_Device_t device)
273
return (device >= 0) && (device < DMA_NUM_DEVICE_ENTRIES);
276
/****************************************************************************/
278
* Translates a DMA handle into a pointer to a channel.
281
* non-NULL - pointer to DMA_Channel_t
282
* NULL - DMA Handle was invalid
284
/****************************************************************************/
286
static inline DMA_Channel_t *HandleToChannel(DMA_Handle_t handle)
291
controllerIdx = CONTROLLER_FROM_HANDLE(handle);
292
channelIdx = CHANNEL_FROM_HANDLE(handle);
294
if ((controllerIdx > DMA_NUM_CONTROLLERS)
295
|| (channelIdx > DMA_NUM_CHANNELS)) {
298
return &gDMA.controller[controllerIdx].channel[channelIdx];
301
/****************************************************************************/
303
* Interrupt handler which is called to process DMA interrupts.
305
/****************************************************************************/
307
static irqreturn_t dma_interrupt_handler(int irq, void *dev_id)
309
DMA_Channel_t *channel;
310
DMA_DeviceAttribute_t *devAttr;
313
channel = (DMA_Channel_t *) dev_id;
315
/* Figure out why we were called, and knock down the interrupt */
317
irqStatus = dmacHw_getInterruptStatus(channel->dmacHwHandle);
318
dmacHw_clearInterrupt(channel->dmacHwHandle);
320
if ((channel->devType < 0)
321
|| (channel->devType > DMA_NUM_DEVICE_ENTRIES)) {
322
printk(KERN_ERR "dma_interrupt_handler: Invalid devType: %d\n",
326
devAttr = &DMA_gDeviceAttribute[channel->devType];
330
if ((irqStatus & dmacHw_INTERRUPT_STATUS_TRANS) != 0) {
331
devAttr->transferTicks +=
332
(timer_get_tick_count() - devAttr->transferStartTime);
335
if ((irqStatus & dmacHw_INTERRUPT_STATUS_ERROR) != 0) {
337
"dma_interrupt_handler: devType :%d DMA error (%s)\n",
338
channel->devType, devAttr->name);
340
devAttr->numTransfers++;
341
devAttr->transferBytes += devAttr->numBytes;
344
/* Call any installed handler */
346
if (devAttr->devHandler != NULL) {
347
devAttr->devHandler(channel->devType, irqStatus,
354
/****************************************************************************/
356
* Allocates memory to hold a descriptor ring. The descriptor ring then
357
* needs to be populated by making one or more calls to
358
* dna_add_descriptors.
360
* The returned descriptor ring will be automatically initialized.
363
* 0 Descriptor ring was allocated successfully
364
* -EINVAL Invalid parameters passed in
365
* -ENOMEM Unable to allocate memory for the desired number of descriptors.
367
/****************************************************************************/
369
int dma_alloc_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to populate */
370
int numDescriptors /* Number of descriptors that need to be allocated. */
372
size_t bytesToAlloc = dmacHw_descriptorLen(numDescriptors);
374
if ((ring == NULL) || (numDescriptors <= 0)) {
379
ring->descriptorsAllocated = 0;
380
ring->bytesAllocated = 0;
382
ring->virtAddr = dma_alloc_writecombine(NULL,
386
if (ring->virtAddr == NULL) {
390
ring->bytesAllocated = bytesToAlloc;
391
ring->descriptorsAllocated = numDescriptors;
393
return dma_init_descriptor_ring(ring, numDescriptors);
396
EXPORT_SYMBOL(dma_alloc_descriptor_ring);
398
/****************************************************************************/
400
* Releases the memory which was previously allocated for a descriptor ring.
402
/****************************************************************************/
404
void dma_free_descriptor_ring(DMA_DescriptorRing_t *ring /* Descriptor to release */
406
if (ring->virtAddr != NULL) {
407
dma_free_writecombine(NULL,
408
ring->bytesAllocated,
409
ring->virtAddr, ring->physAddr);
412
ring->bytesAllocated = 0;
413
ring->descriptorsAllocated = 0;
414
ring->virtAddr = NULL;
418
EXPORT_SYMBOL(dma_free_descriptor_ring);
420
/****************************************************************************/
422
* Initializes a descriptor ring, so that descriptors can be added to it.
423
* Once a descriptor ring has been allocated, it may be reinitialized for
424
* use with additional/different regions of memory.
426
* Note that if 7 descriptors are allocated, it's perfectly acceptable to
427
* initialize the ring with a smaller number of descriptors. The amount
428
* of memory allocated for the descriptor ring will not be reduced, and
429
* the descriptor ring may be reinitialized later
432
* 0 Descriptor ring was initialized successfully
433
* -ENOMEM The descriptor which was passed in has insufficient space
434
* to hold the desired number of descriptors.
436
/****************************************************************************/
438
int dma_init_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to initialize */
439
int numDescriptors /* Number of descriptors to initialize. */
441
if (ring->virtAddr == NULL) {
444
if (dmacHw_initDescriptor(ring->virtAddr,
446
ring->bytesAllocated, numDescriptors) < 0) {
448
"dma_init_descriptor_ring: dmacHw_initDescriptor failed\n");
455
EXPORT_SYMBOL(dma_init_descriptor_ring);
457
/****************************************************************************/
459
* Determines the number of descriptors which would be required for a
460
* transfer of the indicated memory region.
462
* This function also needs to know which DMA device this transfer will
463
* be destined for, so that the appropriate DMA configuration can be retrieved.
464
* DMA parameters such as transfer width, and whether this is a memory-to-memory
465
* or memory-to-peripheral, etc can all affect the actual number of descriptors
469
* > 0 Returns the number of descriptors required for the indicated transfer
470
* -ENODEV - Device handed in is invalid.
471
* -EINVAL Invalid parameters
472
* -ENOMEM Memory exhausted
474
/****************************************************************************/
476
int dma_calculate_descriptor_count(DMA_Device_t device, /* DMA Device that this will be associated with */
477
dma_addr_t srcData, /* Place to get data to write to device */
478
dma_addr_t dstData, /* Pointer to device data address */
479
size_t numBytes /* Number of bytes to transfer to the device */
482
DMA_DeviceAttribute_t *devAttr;
484
if (!IsDeviceValid(device)) {
487
devAttr = &DMA_gDeviceAttribute[device];
489
numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
493
if (numDescriptors < 0) {
495
"dma_calculate_descriptor_count: dmacHw_calculateDescriptorCount failed\n");
499
return numDescriptors;
502
EXPORT_SYMBOL(dma_calculate_descriptor_count);
504
/****************************************************************************/
506
* Adds a region of memory to the descriptor ring. Note that it may take
507
* multiple descriptors for each region of memory. It is the callers
508
* responsibility to allocate a sufficiently large descriptor ring.
511
* 0 Descriptors were added successfully
512
* -ENODEV Device handed in is invalid.
513
* -EINVAL Invalid parameters
514
* -ENOMEM Memory exhausted
516
/****************************************************************************/
518
int dma_add_descriptors(DMA_DescriptorRing_t *ring, /* Descriptor ring to add descriptors to */
519
DMA_Device_t device, /* DMA Device that descriptors are for */
520
dma_addr_t srcData, /* Place to get data (memory or device) */
521
dma_addr_t dstData, /* Place to put data (memory or device) */
522
size_t numBytes /* Number of bytes to transfer to the device */
525
DMA_DeviceAttribute_t *devAttr;
527
if (!IsDeviceValid(device)) {
530
devAttr = &DMA_gDeviceAttribute[device];
532
rc = dmacHw_setDataDescriptor(&devAttr->config,
535
(void *)dstData, numBytes);
538
"dma_add_descriptors: dmacHw_setDataDescriptor failed with code: %d\n",
546
EXPORT_SYMBOL(dma_add_descriptors);
548
/****************************************************************************/
550
* Sets the descriptor ring associated with a device.
552
* Once set, the descriptor ring will be associated with the device, even
553
* across channel request/free calls. Passing in a NULL descriptor ring
554
* will release any descriptor ring currently associated with the device.
556
* Note: If you call dma_transfer, or one of the other dma_alloc_ functions
557
* the descriptor ring may be released and reallocated.
559
* Note: This function will release the descriptor memory for any current
560
* descriptor ring associated with this device.
563
* 0 Descriptors were added successfully
564
* -ENODEV Device handed in is invalid.
566
/****************************************************************************/
568
int dma_set_device_descriptor_ring(DMA_Device_t device, /* Device to update the descriptor ring for. */
569
DMA_DescriptorRing_t *ring /* Descriptor ring to add descriptors to */
571
DMA_DeviceAttribute_t *devAttr;
573
if (!IsDeviceValid(device)) {
576
devAttr = &DMA_gDeviceAttribute[device];
578
/* Free the previously allocated descriptor ring */
580
dma_free_descriptor_ring(&devAttr->ring);
583
/* Copy in the new one */
585
devAttr->ring = *ring;
588
/* Set things up so that if dma_transfer is called then this descriptor */
589
/* ring will get freed. */
591
devAttr->prevSrcData = 0;
592
devAttr->prevDstData = 0;
593
devAttr->prevNumBytes = 0;
598
EXPORT_SYMBOL(dma_set_device_descriptor_ring);
600
/****************************************************************************/
602
* Retrieves the descriptor ring associated with a device.
605
* 0 Descriptors were added successfully
606
* -ENODEV Device handed in is invalid.
608
/****************************************************************************/
610
int dma_get_device_descriptor_ring(DMA_Device_t device, /* Device to retrieve the descriptor ring for. */
611
DMA_DescriptorRing_t *ring /* Place to store retrieved ring */
613
DMA_DeviceAttribute_t *devAttr;
615
memset(ring, 0, sizeof(*ring));
617
if (!IsDeviceValid(device)) {
620
devAttr = &DMA_gDeviceAttribute[device];
622
*ring = devAttr->ring;
627
EXPORT_SYMBOL(dma_get_device_descriptor_ring);
629
/****************************************************************************/
631
* Configures a DMA channel.
634
* >= 0 - Initialization was successful.
636
* -EBUSY - Device is currently being used.
637
* -ENODEV - Device handed in is invalid.
639
/****************************************************************************/
641
static int ConfigChannel(DMA_Handle_t handle)
643
DMA_Channel_t *channel;
644
DMA_DeviceAttribute_t *devAttr;
647
channel = HandleToChannel(handle);
648
if (channel == NULL) {
651
devAttr = &DMA_gDeviceAttribute[channel->devType];
652
controllerIdx = CONTROLLER_FROM_HANDLE(handle);
654
if ((devAttr->flags & DMA_DEVICE_FLAG_PORT_PER_DMAC) != 0) {
655
if (devAttr->config.transferType ==
656
dmacHw_TRANSFER_TYPE_MEM_TO_PERIPHERAL) {
657
devAttr->config.dstPeripheralPort =
658
devAttr->dmacPort[controllerIdx];
659
} else if (devAttr->config.transferType ==
660
dmacHw_TRANSFER_TYPE_PERIPHERAL_TO_MEM) {
661
devAttr->config.srcPeripheralPort =
662
devAttr->dmacPort[controllerIdx];
666
if (dmacHw_configChannel(channel->dmacHwHandle, &devAttr->config) != 0) {
667
printk(KERN_ERR "ConfigChannel: dmacHw_configChannel failed\n");
674
/****************************************************************************/
676
* Initializes all of the data structures associated with the DMA.
678
* >= 0 - Initialization was successful.
680
* -EBUSY - Device is currently being used.
681
* -ENODEV - Device handed in is invalid.
683
/****************************************************************************/
691
DMA_Channel_t *channel;
692
DMA_Handle_t dedicatedHandle;
694
memset(&gDMA, 0, sizeof(gDMA));
696
sema_init(&gDMA.lock, 0);
697
init_waitqueue_head(&gDMA.freeChannelQ);
699
/* Initialize the Hardware */
703
/* Start off by marking all of the DMA channels as shared. */
705
for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
707
for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
710
&gDMA.controller[controllerIdx].channel[channelIdx];
713
channel->devType = DMA_DEVICE_NONE;
714
channel->lastDevType = DMA_DEVICE_NONE;
716
#if (DMA_DEBUG_TRACK_RESERVATION)
717
channel->fileName = "";
718
channel->lineNum = 0;
721
channel->dmacHwHandle =
722
dmacHw_getChannelHandle(dmacHw_MAKE_CHANNEL_ID
725
dmacHw_initChannel(channel->dmacHwHandle);
729
/* Record any special attributes that channels may have */
731
gDMA.controller[0].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
732
gDMA.controller[0].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
733
gDMA.controller[1].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
734
gDMA.controller[1].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
736
/* Now walk through and record the dedicated channels. */
738
for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
739
DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
741
if (((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0)
742
&& ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0)) {
744
"DMA Device: %s Can only request NO_ISR for dedicated devices\n",
750
if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
751
/* This is a dedicated device. Mark the channel as being reserved. */
753
if (devAttr->dedicatedController >= DMA_NUM_CONTROLLERS) {
755
"DMA Device: %s DMA Controller %d is out of range\n",
757
devAttr->dedicatedController);
762
if (devAttr->dedicatedChannel >= DMA_NUM_CHANNELS) {
764
"DMA Device: %s DMA Channel %d is out of range\n",
766
devAttr->dedicatedChannel);
772
MAKE_HANDLE(devAttr->dedicatedController,
773
devAttr->dedicatedChannel);
774
channel = HandleToChannel(dedicatedHandle);
776
if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
779
("DMA Device: %s attempting to use same DMA Controller:Channel (%d:%d) as %s\n",
781
devAttr->dedicatedController,
782
devAttr->dedicatedChannel,
783
DMA_gDeviceAttribute[channel->devType].
789
channel->flags |= DMA_CHANNEL_FLAG_IS_DEDICATED;
790
channel->devType = devIdx;
792
if (devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) {
793
channel->flags |= DMA_CHANNEL_FLAG_NO_ISR;
796
/* For dedicated channels, we can go ahead and configure the DMA channel now */
799
ConfigChannel(dedicatedHandle);
803
/* Go through and register the interrupt handlers */
805
for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
807
for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
810
&gDMA.controller[controllerIdx].channel[channelIdx];
812
if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) == 0) {
813
snprintf(channel->name, sizeof(channel->name),
814
"dma %d:%d %s", controllerIdx,
817
DMA_DEVICE_NONE ? "" :
818
DMA_gDeviceAttribute[channel->devType].
822
request_irq(IRQ_DMA0C0 +
826
dma_interrupt_handler,
827
IRQF_DISABLED, channel->name,
831
"request_irq for IRQ_DMA%dC%d failed\n",
832
controllerIdx, channelIdx);
838
/* Create /proc/dma/channels and /proc/dma/devices */
840
gDmaDir = proc_mkdir("dma", NULL);
842
if (gDmaDir == NULL) {
843
printk(KERN_ERR "Unable to create /proc/dma\n");
845
create_proc_read_entry("channels", 0, gDmaDir,
846
dma_proc_read_channels, NULL);
847
create_proc_read_entry("devices", 0, gDmaDir,
848
dma_proc_read_devices, NULL);
849
create_proc_read_entry("mem-type", 0, gDmaDir,
850
dma_proc_read_mem_type, NULL);
860
/****************************************************************************/
862
* Reserves a channel for use with @a dev. If the device is setup to use
863
* a shared channel, then this function will block until a free channel
867
* >= 0 - A valid DMA Handle.
868
* -EBUSY - Device is currently being used.
869
* -ENODEV - Device handed in is invalid.
871
/****************************************************************************/
873
#if (DMA_DEBUG_TRACK_RESERVATION)
874
DMA_Handle_t dma_request_channel_dbg
875
(DMA_Device_t dev, const char *fileName, int lineNum)
877
DMA_Handle_t dma_request_channel(DMA_Device_t dev)
881
DMA_DeviceAttribute_t *devAttr;
882
DMA_Channel_t *channel;
887
if (down_interruptible(&gDMA.lock) < 0) {
891
if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) {
895
devAttr = &DMA_gDeviceAttribute[dev];
897
#if (DMA_DEBUG_TRACK_RESERVATION)
901
s = strrchr(fileName, '/');
907
if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) {
908
/* This device has already been requested and not been freed */
910
printk(KERN_ERR "%s: device %s is already requested\n",
911
__func__, devAttr->name);
916
if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
917
/* This device has a dedicated channel. */
920
&gDMA.controller[devAttr->dedicatedController].
921
channel[devAttr->dedicatedChannel];
922
if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
927
channel->flags |= DMA_CHANNEL_FLAG_IN_USE;
928
devAttr->flags |= DMA_DEVICE_FLAG_IN_USE;
930
#if (DMA_DEBUG_TRACK_RESERVATION)
931
channel->fileName = fileName;
932
channel->lineNum = lineNum;
935
MAKE_HANDLE(devAttr->dedicatedController,
936
devAttr->dedicatedChannel);
940
/* This device needs to use one of the shared channels. */
942
handle = DMA_INVALID_HANDLE;
943
while (handle == DMA_INVALID_HANDLE) {
944
/* Scan through the shared channels and see if one is available */
946
for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS;
948
/* Check to see if we should try on controller 1 first. */
950
controllerIdx = controllerIdx2;
952
flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) {
953
controllerIdx = 1 - controllerIdx;
956
/* See if the device is available on the controller being tested */
959
flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx))
962
channelIdx < DMA_NUM_CHANNELS;
965
&gDMA.controller[controllerIdx].
970
DMA_CHANNEL_FLAG_IS_DEDICATED) ==
974
flags & DMA_CHANNEL_FLAG_IN_USE)
978
DMA_CHANNEL_FLAG_LARGE_FIFO)
983
DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO)
985
/* This channel is a large fifo - don't tie it up */
986
/* with devices that we don't want using it. */
992
DMA_CHANNEL_FLAG_IN_USE;
993
channel->devType = dev;
995
DMA_DEVICE_FLAG_IN_USE;
997
#if (DMA_DEBUG_TRACK_RESERVATION)
998
channel->fileName = fileName;
999
channel->lineNum = lineNum;
1002
MAKE_HANDLE(controllerIdx,
1005
/* Now that we've reserved the channel - we can go ahead and configure it */
1007
if (ConfigChannel(handle) != 0) {
1010
"dma_request_channel: ConfigChannel failed\n");
1018
/* No channels are currently available. Let's wait for one to free up. */
1023
prepare_to_wait(&gDMA.freeChannelQ, &wait,
1024
TASK_INTERRUPTIBLE);
1027
finish_wait(&gDMA.freeChannelQ, &wait);
1029
if (signal_pending(current)) {
1030
/* We don't currently hold gDMA.lock, so we return directly */
1032
return -ERESTARTSYS;
1036
if (down_interruptible(&gDMA.lock)) {
1037
return -ERESTARTSYS;
1047
/* Create both _dbg and non _dbg functions for modules. */
1049
#if (DMA_DEBUG_TRACK_RESERVATION)
1050
#undef dma_request_channel
1051
DMA_Handle_t dma_request_channel(DMA_Device_t dev)
1053
return dma_request_channel_dbg(dev, __FILE__, __LINE__);
1056
EXPORT_SYMBOL(dma_request_channel_dbg);
1058
EXPORT_SYMBOL(dma_request_channel);
1060
/****************************************************************************/
1062
* Frees a previously allocated DMA Handle.
1064
/****************************************************************************/
1066
int dma_free_channel(DMA_Handle_t handle /* DMA handle. */
1069
DMA_Channel_t *channel;
1070
DMA_DeviceAttribute_t *devAttr;
1072
if (down_interruptible(&gDMA.lock) < 0) {
1073
return -ERESTARTSYS;
1076
channel = HandleToChannel(handle);
1077
if (channel == NULL) {
1082
devAttr = &DMA_gDeviceAttribute[channel->devType];
1084
if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) {
1085
channel->lastDevType = channel->devType;
1086
channel->devType = DMA_DEVICE_NONE;
1088
channel->flags &= ~DMA_CHANNEL_FLAG_IN_USE;
1089
devAttr->flags &= ~DMA_DEVICE_FLAG_IN_USE;
1094
wake_up_interruptible(&gDMA.freeChannelQ);
1099
EXPORT_SYMBOL(dma_free_channel);
1101
/****************************************************************************/
1103
* Determines if a given device has been configured as using a shared
1107
* 0 Device uses a dedicated channel
1108
* > zero Device uses a shared channel
1111
/****************************************************************************/
1113
int dma_device_is_channel_shared(DMA_Device_t device /* Device to check. */
1115
DMA_DeviceAttribute_t *devAttr;
1117
if (!IsDeviceValid(device)) {
1120
devAttr = &DMA_gDeviceAttribute[device];
1122
return ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0);
1125
EXPORT_SYMBOL(dma_device_is_channel_shared);
1127
/****************************************************************************/
1129
* Allocates buffers for the descriptors. This is normally done automatically
1130
* but needs to be done explicitly when initiating a dma from interrupt
1134
* 0 Descriptors were allocated successfully
1135
* -EINVAL Invalid device type for this kind of transfer
1136
* (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
1137
* -ENOMEM Memory exhausted
1139
/****************************************************************************/
1141
int dma_alloc_descriptors(DMA_Handle_t handle, /* DMA Handle */
1142
dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */
1143
dma_addr_t srcData, /* Place to get data to write to device */
1144
dma_addr_t dstData, /* Pointer to device data address */
1145
size_t numBytes /* Number of bytes to transfer to the device */
1147
DMA_Channel_t *channel;
1148
DMA_DeviceAttribute_t *devAttr;
1150
size_t ringBytesRequired;
1153
channel = HandleToChannel(handle);
1154
if (channel == NULL) {
1158
devAttr = &DMA_gDeviceAttribute[channel->devType];
1160
if (devAttr->config.transferType != transferType) {
1164
/* Figure out how many descriptors we need. */
1166
/* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
1167
/* srcData, dstData, numBytes); */
1169
numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
1173
if (numDescriptors < 0) {
1174
printk(KERN_ERR "%s: dmacHw_calculateDescriptorCount failed\n",
1179
/* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
1182
ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
1184
/* printk("ringBytesRequired: %d\n", ringBytesRequired); */
1186
if (ringBytesRequired > devAttr->ring.bytesAllocated) {
1187
/* Make sure that this code path is never taken from interrupt context. */
1188
/* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
1189
/* allocation needs to have already been done. */
1193
/* Free the old descriptor ring and allocate a new one. */
1195
dma_free_descriptor_ring(&devAttr->ring);
1197
/* And allocate a new one. */
1200
dma_alloc_descriptor_ring(&devAttr->ring,
1204
"%s: dma_alloc_descriptor_ring(%d) failed\n",
1205
__func__, numDescriptors);
1208
/* Setup the descriptor for this transfer */
1210
if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
1211
devAttr->ring.physAddr,
1212
devAttr->ring.bytesAllocated,
1213
numDescriptors) < 0) {
1214
printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n",
1219
/* We've already got enough ring buffer allocated. All we need to do is reset */
1220
/* any control information, just in case the previous DMA was stopped. */
1222
dmacHw_resetDescriptorControl(devAttr->ring.virtAddr);
1225
/* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
1226
/* as last time, then we don't need to call setDataDescriptor again. */
1228
if (dmacHw_setDataDescriptor(&devAttr->config,
1229
devAttr->ring.virtAddr,
1231
(void *)dstData, numBytes) < 0) {
1232
printk(KERN_ERR "%s: dmacHw_setDataDescriptor failed\n",
1237
/* Remember the critical information for this transfer so that we can eliminate */
1238
/* another call to dma_alloc_descriptors if the caller reuses the same buffers */
1240
devAttr->prevSrcData = srcData;
1241
devAttr->prevDstData = dstData;
1242
devAttr->prevNumBytes = numBytes;
1247
EXPORT_SYMBOL(dma_alloc_descriptors);
1249
/****************************************************************************/
1251
* Allocates and sets up descriptors for a double buffered circular buffer.
1253
* This is primarily intended to be used for things like the ingress samples
1254
* from a microphone.
1257
* > 0 Number of descriptors actually allocated.
1258
* -EINVAL Invalid device type for this kind of transfer
1259
* (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
1260
* -ENOMEM Memory exhausted
1262
/****************************************************************************/
1264
int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */
1265
dma_addr_t srcData, /* Physical address of source data */
1266
dma_addr_t dstData1, /* Physical address of first destination buffer */
1267
dma_addr_t dstData2, /* Physical address of second destination buffer */
1268
size_t numBytes /* Number of bytes in each destination buffer */
1270
DMA_Channel_t *channel;
1271
DMA_DeviceAttribute_t *devAttr;
1272
int numDst1Descriptors;
1273
int numDst2Descriptors;
1275
size_t ringBytesRequired;
1278
channel = HandleToChannel(handle);
1279
if (channel == NULL) {
1283
devAttr = &DMA_gDeviceAttribute[channel->devType];
1285
/* Figure out how many descriptors we need. */
1287
/* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
1288
/* srcData, dstData, numBytes); */
1290
numDst1Descriptors =
1291
dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
1292
(void *)dstData1, numBytes);
1293
if (numDst1Descriptors < 0) {
1296
numDst2Descriptors =
1297
dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
1298
(void *)dstData2, numBytes);
1299
if (numDst2Descriptors < 0) {
1302
numDescriptors = numDst1Descriptors + numDst2Descriptors;
1303
/* printk("numDescriptors: %d\n", numDescriptors); */
1305
/* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
1308
ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
1310
/* printk("ringBytesRequired: %d\n", ringBytesRequired); */
1312
if (ringBytesRequired > devAttr->ring.bytesAllocated) {
1313
/* Make sure that this code path is never taken from interrupt context. */
1314
/* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
1315
/* allocation needs to have already been done. */
1319
/* Free the old descriptor ring and allocate a new one. */
1321
dma_free_descriptor_ring(&devAttr->ring);
1323
/* And allocate a new one. */
1326
dma_alloc_descriptor_ring(&devAttr->ring,
1330
"%s: dma_alloc_descriptor_ring(%d) failed\n",
1331
__func__, ringBytesRequired);
1336
/* Setup the descriptor for this transfer. Since this function is used with */
1337
/* CONTINUOUS DMA operations, we need to reinitialize every time, otherwise */
1338
/* setDataDescriptor will keep trying to append onto the end. */
1340
if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
1341
devAttr->ring.physAddr,
1342
devAttr->ring.bytesAllocated,
1343
numDescriptors) < 0) {
1344
printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__);
1348
/* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
1349
/* as last time, then we don't need to call setDataDescriptor again. */
1351
if (dmacHw_setDataDescriptor(&devAttr->config,
1352
devAttr->ring.virtAddr,
1354
(void *)dstData1, numBytes) < 0) {
1355
printk(KERN_ERR "%s: dmacHw_setDataDescriptor 1 failed\n",
1359
if (dmacHw_setDataDescriptor(&devAttr->config,
1360
devAttr->ring.virtAddr,
1362
(void *)dstData2, numBytes) < 0) {
1363
printk(KERN_ERR "%s: dmacHw_setDataDescriptor 2 failed\n",
1368
/* You should use dma_start_transfer rather than dma_transfer_xxx so we don't */
1369
/* try to make the 'prev' variables right. */
1371
devAttr->prevSrcData = 0;
1372
devAttr->prevDstData = 0;
1373
devAttr->prevNumBytes = 0;
1375
return numDescriptors;
1378
EXPORT_SYMBOL(dma_alloc_double_dst_descriptors);
1380
/****************************************************************************/
1382
* Initiates a transfer when the descriptors have already been setup.
1384
* This is a special case, and normally, the dma_transfer_xxx functions should
1388
* 0 Transfer was started successfully
1389
* -ENODEV Invalid handle
1391
/****************************************************************************/
1393
int dma_start_transfer(DMA_Handle_t handle)
1395
DMA_Channel_t *channel;
1396
DMA_DeviceAttribute_t *devAttr;
1398
channel = HandleToChannel(handle);
1399
if (channel == NULL) {
1402
devAttr = &DMA_gDeviceAttribute[channel->devType];
1404
dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
1405
devAttr->ring.virtAddr);
1407
/* Since we got this far, everything went successfully */
1412
EXPORT_SYMBOL(dma_start_transfer);
1414
/****************************************************************************/
1416
* Stops a previously started DMA transfer.
1419
* 0 Transfer was stopped successfully
1420
* -ENODEV Invalid handle
1422
/****************************************************************************/
1424
int dma_stop_transfer(DMA_Handle_t handle)
1426
DMA_Channel_t *channel;
1428
channel = HandleToChannel(handle);
1429
if (channel == NULL) {
1433
dmacHw_stopTransfer(channel->dmacHwHandle);
1438
EXPORT_SYMBOL(dma_stop_transfer);
1440
/****************************************************************************/
1442
* Waits for a DMA to complete by polling. This function is only intended
1443
* to be used for testing. Interrupts should be used for most DMA operations.
1445
/****************************************************************************/
1447
int dma_wait_transfer_done(DMA_Handle_t handle)
1449
DMA_Channel_t *channel;
1450
dmacHw_TRANSFER_STATUS_e status;
1452
channel = HandleToChannel(handle);
1453
if (channel == NULL) {
1458
dmacHw_transferCompleted(channel->dmacHwHandle)) ==
1459
dmacHw_TRANSFER_STATUS_BUSY) {
1463
if (status == dmacHw_TRANSFER_STATUS_ERROR) {
1464
printk(KERN_ERR "%s: DMA transfer failed\n", __func__);
1470
EXPORT_SYMBOL(dma_wait_transfer_done);
1472
/****************************************************************************/
1474
* Initiates a DMA, allocating the descriptors as required.
1477
* 0 Transfer was started successfully
1478
* -EINVAL Invalid device type for this kind of transfer
1479
* (i.e. the device is _DEV_TO_MEM and not _MEM_TO_DEV)
1481
/****************************************************************************/
1483
int dma_transfer(DMA_Handle_t handle, /* DMA Handle */
1484
dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */
1485
dma_addr_t srcData, /* Place to get data to write to device */
1486
dma_addr_t dstData, /* Pointer to device data address */
1487
size_t numBytes /* Number of bytes to transfer to the device */
1489
DMA_Channel_t *channel;
1490
DMA_DeviceAttribute_t *devAttr;
1493
channel = HandleToChannel(handle);
1494
if (channel == NULL) {
1498
devAttr = &DMA_gDeviceAttribute[channel->devType];
1500
if (devAttr->config.transferType != transferType) {
1504
/* We keep track of the information about the previous request for this */
1505
/* device, and if the attributes match, then we can use the descriptors we setup */
1506
/* the last time, and not have to reinitialize everything. */
1510
dma_alloc_descriptors(handle, transferType, srcData,
1517
/* And kick off the transfer */
1519
devAttr->numBytes = numBytes;
1520
devAttr->transferStartTime = timer_get_tick_count();
1522
dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
1523
devAttr->ring.virtAddr);
1525
/* Since we got this far, everything went successfully */
1530
EXPORT_SYMBOL(dma_transfer);
1532
/****************************************************************************/
1534
* Set the callback function which will be called when a transfer completes.
1535
* If a NULL callback function is set, then no callback will occur.
1537
* @note @a devHandler will be called from IRQ context.
1541
* -ENODEV - Device handed in is invalid.
1543
/****************************************************************************/
1545
int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for. */
1546
DMA_DeviceHandler_t devHandler, /* Function to call when the DMA completes */
1547
void *userData /* Pointer which will be passed to devHandler. */
1549
DMA_DeviceAttribute_t *devAttr;
1550
unsigned long flags;
1552
if (!IsDeviceValid(dev)) {
1555
devAttr = &DMA_gDeviceAttribute[dev];
1557
local_irq_save(flags);
1559
devAttr->userData = userData;
1560
devAttr->devHandler = devHandler;
1562
local_irq_restore(flags);
1567
EXPORT_SYMBOL(dma_set_device_handler);
1569
/****************************************************************************/
1571
* Initializes a memory mapping structure
1573
/****************************************************************************/
1575
int dma_init_mem_map(DMA_MemMap_t *memMap)
1577
memset(memMap, 0, sizeof(*memMap));
1579
sema_init(&memMap->lock, 1);
1584
EXPORT_SYMBOL(dma_init_mem_map);
1586
/****************************************************************************/
1588
* Releases any memory currently being held by a memory mapping structure.
1590
/****************************************************************************/
1592
int dma_term_mem_map(DMA_MemMap_t *memMap)
1594
down(&memMap->lock); /* Just being paranoid */
1596
/* Free up any allocated memory */
1599
memset(memMap, 0, sizeof(*memMap));
1604
EXPORT_SYMBOL(dma_term_mem_map);
1606
/****************************************************************************/
1608
* Looks at a memory address and categorizes it.
1610
* @return One of the values from the DMA_MemType_t enumeration.
1612
/****************************************************************************/
1614
DMA_MemType_t dma_mem_type(void *addr)
1616
unsigned long addrVal = (unsigned long)addr;
1618
if (addrVal >= VMALLOC_END) {
1619
/* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
1621
/* dma_alloc_xxx pages are physically and virtually contiguous */
1623
return DMA_MEM_TYPE_DMA;
1626
/* Technically, we could add one more classification. Addresses between VMALLOC_END */
1627
/* and the beginning of the DMA virtual address could be considered to be I/O space. */
1628
/* Right now, nobody cares about this particular classification, so we ignore it. */
1630
if (is_vmalloc_addr(addr)) {
1631
/* Address comes from the vmalloc'd region. Pages are virtually */
1632
/* contiguous but NOT physically contiguous */
1634
return DMA_MEM_TYPE_VMALLOC;
1637
if (addrVal >= PAGE_OFFSET) {
1638
/* PAGE_OFFSET is typically 0xC0000000 */
1640
/* kmalloc'd pages are physically contiguous */
1642
return DMA_MEM_TYPE_KMALLOC;
1645
return DMA_MEM_TYPE_USER;
1648
EXPORT_SYMBOL(dma_mem_type);
1650
/****************************************************************************/
1652
* Looks at a memory address and determines if we support DMA'ing to/from
1653
* that type of memory.
1656
* return value != 0 means dma supported
1657
* return value == 0 means dma not supported
1659
/****************************************************************************/
1661
int dma_mem_supports_dma(void *addr)
1663
DMA_MemType_t memType = dma_mem_type(addr);
1665
return (memType == DMA_MEM_TYPE_DMA)
1666
#if ALLOW_MAP_OF_KMALLOC_MEMORY
1667
|| (memType == DMA_MEM_TYPE_KMALLOC)
1669
|| (memType == DMA_MEM_TYPE_USER);
1672
EXPORT_SYMBOL(dma_mem_supports_dma);
1674
/****************************************************************************/
1676
* Maps in a memory region such that it can be used for performing a DMA.
1680
/****************************************************************************/
1682
int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
1683
enum dma_data_direction dir /* Direction that the mapping will be going */
1687
down(&memMap->lock);
1689
DMA_MAP_PRINT("memMap: %p\n", memMap);
1691
if (memMap->inUse) {
1692
printk(KERN_ERR "%s: memory map %p is already being used\n",
1700
memMap->numRegionsUsed = 0;
1706
DMA_MAP_PRINT("returning %d", rc);
1713
EXPORT_SYMBOL(dma_map_start);
1715
/****************************************************************************/
1717
* Adds a segment of memory to a memory map. Each segment is both
1718
* physically and virtually contiguous.
1720
* @return 0 on success, error code otherwise.
1722
/****************************************************************************/
1724
static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */
1725
DMA_Region_t *region, /* Region that the segment belongs to */
1726
void *virtAddr, /* Virtual address of the segment being added */
1727
dma_addr_t physAddr, /* Physical address of the segment being added */
1728
size_t numBytes /* Number of bytes of the segment being added */
1730
DMA_Segment_t *segment;
1732
DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
1733
physAddr, numBytes);
1737
if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
1738
|| (((unsigned long)virtAddr + numBytes)) >
1739
((unsigned long)region->virtAddr + region->numBytes)) {
1741
"%s: virtAddr %p is outside region @ %p len: %d\n",
1742
__func__, virtAddr, region->virtAddr, region->numBytes);
1746
if (region->numSegmentsUsed > 0) {
1747
/* Check to see if this segment is physically contiguous with the previous one */
1749
segment = ®ion->segment[region->numSegmentsUsed - 1];
1751
if ((segment->physAddr + segment->numBytes) == physAddr) {
1752
/* It is - just add on to the end */
1754
DMA_MAP_PRINT("appending %d bytes to last segment\n",
1757
segment->numBytes += numBytes;
1763
/* Reallocate to hold more segments, if required. */
1765
if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
1766
DMA_Segment_t *newSegment;
1768
region->numSegmentsAllocated * sizeof(*newSegment);
1769
int newAlloc = region->numSegmentsAllocated + 4;
1770
size_t newSize = newAlloc * sizeof(*newSegment);
1772
newSegment = kmalloc(newSize, GFP_KERNEL);
1773
if (newSegment == NULL) {
1776
memcpy(newSegment, region->segment, oldSize);
1777
memset(&((uint8_t *) newSegment)[oldSize], 0,
1779
kfree(region->segment);
1781
region->numSegmentsAllocated = newAlloc;
1782
region->segment = newSegment;
1785
segment = ®ion->segment[region->numSegmentsUsed];
1786
region->numSegmentsUsed++;
1788
segment->virtAddr = virtAddr;
1789
segment->physAddr = physAddr;
1790
segment->numBytes = numBytes;
1792
DMA_MAP_PRINT("returning success\n");
1797
/****************************************************************************/
1799
* Adds a region of memory to a memory map. Each region is virtually
1800
* contiguous, but not necessarily physically contiguous.
1802
* @return 0 on success, error code otherwise.
1804
/****************************************************************************/
1806
int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
1807
void *mem, /* Virtual address that we want to get a map of */
1808
size_t numBytes /* Number of bytes being mapped */
1810
unsigned long addr = (unsigned long)mem;
1811
unsigned int offset;
1813
DMA_Region_t *region;
1814
dma_addr_t physAddr;
1816
down(&memMap->lock);
1818
DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
1820
if (!memMap->inUse) {
1821
printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
1827
/* Reallocate to hold more regions. */
1829
if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
1830
DMA_Region_t *newRegion;
1832
memMap->numRegionsAllocated * sizeof(*newRegion);
1833
int newAlloc = memMap->numRegionsAllocated + 4;
1834
size_t newSize = newAlloc * sizeof(*newRegion);
1836
newRegion = kmalloc(newSize, GFP_KERNEL);
1837
if (newRegion == NULL) {
1841
memcpy(newRegion, memMap->region, oldSize);
1842
memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
1844
kfree(memMap->region);
1846
memMap->numRegionsAllocated = newAlloc;
1847
memMap->region = newRegion;
1850
region = &memMap->region[memMap->numRegionsUsed];
1851
memMap->numRegionsUsed++;
1853
offset = addr & ~PAGE_MASK;
1855
region->memType = dma_mem_type(mem);
1856
region->virtAddr = mem;
1857
region->numBytes = numBytes;
1858
region->numSegmentsUsed = 0;
1859
region->numLockedPages = 0;
1860
region->lockedPages = NULL;
1862
switch (region->memType) {
1863
case DMA_MEM_TYPE_VMALLOC:
1865
atomic_inc(&gDmaStatMemTypeVmalloc);
1867
/* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
1869
/* vmalloc'd pages are not physically contiguous */
1875
case DMA_MEM_TYPE_KMALLOC:
1877
atomic_inc(&gDmaStatMemTypeKmalloc);
1879
/* kmalloc'd pages are physically contiguous, so they'll have exactly */
1882
#if ALLOW_MAP_OF_KMALLOC_MEMORY
1884
dma_map_single(NULL, mem, numBytes, memMap->dir);
1885
rc = dma_map_add_segment(memMap, region, mem, physAddr,
1893
case DMA_MEM_TYPE_DMA:
1895
/* dma_alloc_xxx pages are physically contiguous */
1897
atomic_inc(&gDmaStatMemTypeCoherent);
1899
physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
1901
dma_sync_single_for_cpu(NULL, physAddr, numBytes,
1903
rc = dma_map_add_segment(memMap, region, mem, physAddr,
1908
case DMA_MEM_TYPE_USER:
1910
size_t firstPageOffset;
1911
size_t firstPageSize;
1912
struct page **pages;
1913
struct task_struct *userTask;
1915
atomic_inc(&gDmaStatMemTypeUser);
1918
/* If the pages are user pages, then the dma_mem_map_set_user_task function */
1919
/* must have been previously called. */
1921
if (memMap->userTask == NULL) {
1923
"%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
1928
/* User pages need to be locked. */
1931
(unsigned long)region->virtAddr & (PAGE_SIZE - 1);
1932
firstPageSize = PAGE_SIZE - firstPageOffset;
1934
region->numLockedPages = (firstPageOffset
1935
+ region->numBytes +
1936
PAGE_SIZE - 1) / PAGE_SIZE;
1938
kmalloc(region->numLockedPages *
1939
sizeof(struct page *), GFP_KERNEL);
1941
if (pages == NULL) {
1942
region->numLockedPages = 0;
1946
userTask = memMap->userTask;
1948
down_read(&userTask->mm->mmap_sem);
1949
rc = get_user_pages(userTask, /* task */
1950
userTask->mm, /* mm */
1951
(unsigned long)region->virtAddr, /* start */
1952
region->numLockedPages, /* len */
1953
memMap->dir == DMA_FROM_DEVICE, /* write */
1955
pages, /* pages (array of pointers to page) */
1957
up_read(&userTask->mm->mmap_sem);
1959
if (rc != region->numLockedPages) {
1961
region->numLockedPages = 0;
1967
uint8_t *virtAddr = region->virtAddr;
1968
size_t bytesRemaining;
1971
rc = 0; /* Since get_user_pages returns +ve number */
1973
region->lockedPages = pages;
1975
/* We've locked the user pages. Now we need to walk them and figure */
1976
/* out the physical addresses. */
1978
/* The first page may be partial */
1980
dma_map_add_segment(memMap,
1983
PFN_PHYS(page_to_pfn
1988
virtAddr += firstPageSize;
1990
region->numBytes - firstPageSize;
1993
pageIdx < region->numLockedPages;
1995
size_t bytesThisPage =
1997
PAGE_SIZE ? PAGE_SIZE :
2001
("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
2002
pageIdx, pages[pageIdx],
2003
page_to_pfn(pages[pageIdx]),
2004
PFN_PHYS(page_to_pfn
2007
dma_map_add_segment(memMap,
2010
PFN_PHYS(page_to_pfn
2015
virtAddr += bytesThisPage;
2016
bytesRemaining -= bytesThisPage;
2021
"%s: User mode pages are not yet supported\n",
2024
/* user pages are not physically contiguous */
2033
printk(KERN_ERR "%s: Unsupported memory type: %d\n",
2034
__func__, region->memType);
2042
memMap->numRegionsUsed--;
2047
DMA_MAP_PRINT("returning %d\n", rc);
2054
EXPORT_SYMBOL(dma_map_add_segment);
2056
/****************************************************************************/
2058
* Maps in a memory region such that it can be used for performing a DMA.
2060
* @return 0 on success, error code otherwise.
2062
/****************************************************************************/
2064
int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
2065
void *mem, /* Virtual address that we want to get a map of */
2066
size_t numBytes, /* Number of bytes being mapped */
2067
enum dma_data_direction dir /* Direction that the mapping will be going */
2071
rc = dma_map_start(memMap, dir);
2073
rc = dma_map_add_region(memMap, mem, numBytes);
2075
/* Since the add fails, this function will fail, and the caller won't */
2076
/* call unmap, so we need to do it here. */
2078
dma_unmap(memMap, 0);
2085
EXPORT_SYMBOL(dma_map_mem);
2087
/****************************************************************************/
2089
* Setup a descriptor ring for a given memory map.
2091
* It is assumed that the descriptor ring has already been initialized, and
2092
* this routine will only reallocate a new descriptor ring if the existing
2095
* @return 0 on success, error code otherwise.
2097
/****************************************************************************/
2099
int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
2100
DMA_MemMap_t *memMap, /* Memory map that will be used */
2101
dma_addr_t devPhysAddr /* Physical address of device */
2105
DMA_DeviceAttribute_t *devAttr;
2106
DMA_Region_t *region;
2107
DMA_Segment_t *segment;
2108
dma_addr_t srcPhysAddr;
2109
dma_addr_t dstPhysAddr;
2113
devAttr = &DMA_gDeviceAttribute[dev];
2115
down(&memMap->lock);
2117
/* Figure out how many descriptors we need */
2120
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2121
region = &memMap->region[regionIdx];
2123
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2125
segment = ®ion->segment[segmentIdx];
2127
if (memMap->dir == DMA_TO_DEVICE) {
2128
srcPhysAddr = segment->physAddr;
2129
dstPhysAddr = devPhysAddr;
2131
srcPhysAddr = devPhysAddr;
2132
dstPhysAddr = segment->physAddr;
2136
dma_calculate_descriptor_count(dev, srcPhysAddr,
2142
"%s: dma_calculate_descriptor_count failed: %d\n",
2146
numDescriptors += rc;
2150
/* Adjust the size of the ring, if it isn't big enough */
2152
if (numDescriptors > devAttr->ring.descriptorsAllocated) {
2153
dma_free_descriptor_ring(&devAttr->ring);
2155
dma_alloc_descriptor_ring(&devAttr->ring,
2159
"%s: dma_alloc_descriptor_ring failed: %d\n",
2165
dma_init_descriptor_ring(&devAttr->ring,
2169
"%s: dma_init_descriptor_ring failed: %d\n",
2175
/* Populate the descriptors */
2177
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2178
region = &memMap->region[regionIdx];
2180
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2182
segment = ®ion->segment[segmentIdx];
2184
if (memMap->dir == DMA_TO_DEVICE) {
2185
srcPhysAddr = segment->physAddr;
2186
dstPhysAddr = devPhysAddr;
2188
srcPhysAddr = devPhysAddr;
2189
dstPhysAddr = segment->physAddr;
2193
dma_add_descriptors(&devAttr->ring, dev,
2194
srcPhysAddr, dstPhysAddr,
2198
"%s: dma_add_descriptors failed: %d\n",
2213
EXPORT_SYMBOL(dma_map_create_descriptor_ring);
2215
/****************************************************************************/
2217
* Maps in a memory region such that it can be used for performing a DMA.
2221
/****************************************************************************/
2223
int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2224
int dirtied /* non-zero if any of the pages were modified */
2230
DMA_Region_t *region;
2231
DMA_Segment_t *segment;
2233
down(&memMap->lock);
2235
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2236
region = &memMap->region[regionIdx];
2238
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2240
segment = ®ion->segment[segmentIdx];
2242
switch (region->memType) {
2243
case DMA_MEM_TYPE_VMALLOC:
2246
"%s: vmalloc'd pages are not yet supported\n",
2252
case DMA_MEM_TYPE_KMALLOC:
2254
#if ALLOW_MAP_OF_KMALLOC_MEMORY
2255
dma_unmap_single(NULL,
2263
case DMA_MEM_TYPE_DMA:
2265
dma_sync_single_for_cpu(NULL,
2274
case DMA_MEM_TYPE_USER:
2276
/* Nothing to do here. */
2284
"%s: Unsupported memory type: %d\n",
2285
__func__, region->memType);
2291
segment->virtAddr = NULL;
2292
segment->physAddr = 0;
2293
segment->numBytes = 0;
2296
if (region->numLockedPages > 0) {
2299
/* Some user pages were locked. We need to go and unlock them now. */
2301
for (pageIdx = 0; pageIdx < region->numLockedPages;
2304
region->lockedPages[pageIdx];
2306
if (memMap->dir == DMA_FROM_DEVICE) {
2309
page_cache_release(page);
2311
kfree(region->lockedPages);
2312
region->numLockedPages = 0;
2313
region->lockedPages = NULL;
2316
region->memType = DMA_MEM_TYPE_NONE;
2317
region->virtAddr = NULL;
2318
region->numBytes = 0;
2319
region->numSegmentsUsed = 0;
2321
memMap->userTask = NULL;
2322
memMap->numRegionsUsed = 0;
2331
EXPORT_SYMBOL(dma_unmap);