~ubuntu-branches/debian/wheezy/linux-2.6/wheezy

« back to all changes in this revision

Viewing changes to drivers/staging/iio/ring_sw.c

  • Committer: Bazaar Package Importer
  • Author(s): Ben Hutchings, Ben Hutchings, Aurelien Jarno, Martin Michlmayr
  • Date: 2011-04-06 13:53:30 UTC
  • mfrom: (43.1.5 sid)
  • Revision ID: james.westby@ubuntu.com-20110406135330-wjufxhd0tvn3zx4z
Tags: 2.6.38-3
[ Ben Hutchings ]
* [ppc64] Add to linux-tools package architectures (Closes: #620124)
* [amd64] Save cr4 to mmu_cr4_features at boot time (Closes: #620284)
* appletalk: Fix bugs introduced when removing use of BKL
* ALSA: Fix yet another race in disconnection
* cciss: Fix lost command issue
* ath9k: Fix kernel panic in AR2427
* ses: Avoid kernel panic when lun 0 is not mapped
* PCI/ACPI: Report ASPM support to BIOS if not disabled from command line

[ Aurelien Jarno ]
* rtlwifi: fix build when PCI is not enabled.

[ Martin Michlmayr ]
* rtlwifi: Eliminate udelay calls with too large values (Closes: #620204)

Show diffs side-by-side

added added

removed removed

Lines of Context:
7
7
 * the Free Software Foundation.
8
8
 */
9
9
 
 
10
#include <linux/slab.h>
10
11
#include <linux/kernel.h>
11
 
#include <linux/device.h>
12
12
#include <linux/module.h>
13
13
#include <linux/device.h>
14
14
#include <linux/workqueue.h>
15
15
#include "ring_sw.h"
 
16
#include "trigger.h"
16
17
 
17
 
static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
18
 
                                            int bytes_per_datum, int length)
 
18
static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
 
19
                                                int bytes_per_datum, int length)
19
20
{
20
21
        if ((length == 0) || (bytes_per_datum == 0))
21
22
                return -EINVAL;
22
 
 
23
 
        __iio_init_ring_buffer(&ring->buf, bytes_per_datum, length);
24
 
        ring->use_lock = __SPIN_LOCK_UNLOCKED((ring)->use_lock);
25
 
        ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL);
26
 
        ring->read_p = 0;
27
 
        ring->write_p = 0;
28
 
        ring->last_written_p = 0;
29
 
        ring->half_p = 0;
 
23
        __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length);
 
24
        ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
 
25
        ring->read_p = NULL;
 
26
        ring->write_p = NULL;
 
27
        ring->last_written_p = NULL;
 
28
        ring->half_p = NULL;
30
29
        return ring->data ? 0 : -ENOMEM;
31
30
}
32
31
 
 
32
static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
 
33
{
 
34
        spin_lock_init(&ring->use_lock);
 
35
}
 
36
 
33
37
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
34
38
{
35
39
        kfree(ring->data);
59
63
 * in the device driver */
60
64
/* Lock always held if their is a chance this may be called */
61
65
/* Only one of these per ring may run concurrently - enforced by drivers */
62
 
int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
63
 
                         unsigned char *data,
64
 
                         s64 timestamp)
 
66
static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
 
67
                                unsigned char *data, s64 timestamp)
65
68
{
66
69
        int ret = 0;
67
70
        int code;
68
71
        unsigned char *temp_ptr, *change_test_ptr;
69
72
 
70
73
        /* initial store */
71
 
        if (unlikely(ring->write_p == 0)) {
 
74
        if (unlikely(ring->write_p == NULL)) {
72
75
                ring->write_p = ring->data;
73
76
                /* Doesn't actually matter if this is out of the set
74
77
                 * as long as the read pointer is valid before this
75
78
                 * passes it - guaranteed as set later in this function.
76
79
                 */
77
 
                ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2;
 
80
                ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
78
81
        }
79
82
        /* Copy data to where ever the current write pointer says */
80
 
        memcpy(ring->write_p, data, ring->buf.bpd);
 
83
        memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
81
84
        barrier();
82
85
        /* Update the pointer used to get most recent value.
83
86
         * Always valid as either points to latest or second latest value.
88
91
        /* temp_ptr used to ensure we never have an invalid pointer
89
92
         * it may be slightly lagging, but never invalid
90
93
         */
91
 
        temp_ptr = ring->write_p + ring->buf.bpd;
 
94
        temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
92
95
        /* End of ring, back to the beginning */
93
 
        if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd)
 
96
        if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
94
97
                temp_ptr = ring->data;
95
98
        /* Update the write pointer
96
99
         * always valid as long as this is the only function able to write.
99
102
         */
100
103
        ring->write_p = temp_ptr;
101
104
 
102
 
        if (ring->read_p == 0)
 
105
        if (ring->read_p == NULL)
103
106
                ring->read_p = ring->data;
104
107
        /* Buffer full - move the read pointer and create / escalate
105
108
         * ring event */
109
112
         */
110
113
        else if (ring->write_p == ring->read_p) {
111
114
                change_test_ptr = ring->read_p;
112
 
                temp_ptr = change_test_ptr + ring->buf.bpd;
 
115
                temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
113
116
                if (temp_ptr
114
 
                    == ring->data + ring->buf.length*ring->buf.bpd) {
 
117
                    == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
115
118
                        temp_ptr = ring->data;
116
119
                }
117
120
                /* We are moving pointer on one because the ring is full.  Any
123
126
                spin_lock(&ring->buf.shared_ev_pointer.lock);
124
127
 
125
128
                ret = iio_push_or_escallate_ring_event(&ring->buf,
126
 
                                                       IIO_EVENT_CODE_RING_100_FULL,
127
 
                                                       timestamp);
 
129
                               IIO_EVENT_CODE_RING_100_FULL, timestamp);
128
130
                spin_unlock(&ring->buf.shared_ev_pointer.lock);
129
131
                if (ret)
130
132
                        goto error_ret;
133
135
        /* There are definite 'issues' with this and chances of
134
136
         * simultaneous read */
135
137
        /* Also need to use loop count to ensure this only happens once */
136
 
        ring->half_p += ring->buf.bpd;
137
 
        if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd)
 
138
        ring->half_p += ring->buf.bytes_per_datum;
 
139
        if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
138
140
                ring->half_p = ring->data;
139
141
        if (ring->half_p == ring->read_p) {
140
142
                spin_lock(&ring->buf.shared_ev_pointer.lock);
162
164
         *  read something that is not a whole number of bpds.
163
165
         * Return an error.
164
166
         */
165
 
        if (count % ring->buf.bpd) {
 
167
        if (count % ring->buf.bytes_per_datum) {
166
168
                ret = -EINVAL;
167
169
                printk(KERN_INFO "Ring buffer read request not whole number of"
168
 
                       "samples: Request bytes %zd, Current bpd %d\n",
169
 
                       count, ring->buf.bpd);
 
170
                       "samples: Request bytes %zd, Current bytes per datum %d\n",
 
171
                       count, ring->buf.bytes_per_datum);
170
172
                goto error_ret;
171
173
        }
172
174
        /* Limit size to whole of ring buffer */
173
 
        bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count);
 
175
        bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length), count);
174
176
 
175
177
        *data = kmalloc(bytes_to_rip, GFP_KERNEL);
176
178
        if (*data == NULL) {
180
182
 
181
183
        /* build local copy */
182
184
        initial_read_p = ring->read_p;
183
 
        if (unlikely(initial_read_p == 0)) { /* No data here as yet */
 
185
        if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
184
186
                ret = 0;
185
187
                goto error_free_data_cpy;
186
188
        }
212
214
        } else {
213
215
                /* going through 'end' of ring buffer */
214
216
                max_copied = ring->data
215
 
                        + ring->buf.length*ring->buf.bpd - initial_read_p;
 
217
                        + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
216
218
                memcpy(*data, initial_read_p, max_copied);
217
219
                /* possible we are done if we align precisely with end */
218
220
                if (max_copied == bytes_to_rip)
238
240
        if (initial_read_p <= current_read_p)
239
241
                *dead_offset = current_read_p - initial_read_p;
240
242
        else
241
 
                *dead_offset = ring->buf.length*ring->buf.bpd
 
243
                *dead_offset = ring->buf.length*ring->buf.bytes_per_datum
242
244
                        - (initial_read_p - current_read_p);
243
245
 
244
246
        /* possible issue if the initial write has been lapped or indeed
278
280
}
279
281
EXPORT_SYMBOL(iio_store_to_sw_rb);
280
282
 
281
 
int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
282
 
                               unsigned char *data)
 
283
static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
 
284
                                      unsigned char *data)
283
285
{
284
286
        unsigned char *last_written_p_copy;
285
287
 
289
291
        last_written_p_copy = ring->last_written_p;
290
292
        barrier(); /*unnessecary? */
291
293
        /* Check there is anything here */
292
 
        if (last_written_p_copy == 0)
 
294
        if (last_written_p_copy == NULL)
293
295
                return -EAGAIN;
294
 
        memcpy(data, last_written_p_copy, ring->buf.bpd);
 
296
        memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
295
297
 
296
 
        if (unlikely(ring->last_written_p >= last_written_p_copy))
 
298
        if (unlikely(ring->last_written_p != last_written_p_copy))
297
299
                goto again;
298
300
 
299
301
        iio_unmark_sw_rb_in_use(&ring->buf);
320
322
                goto error_ret;
321
323
        }
322
324
        __iio_free_sw_ring_buffer(ring);
323
 
        ret = __iio_init_sw_ring_buffer(ring, ring->buf.bpd, ring->buf.length);
 
325
        ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
 
326
                                            ring->buf.length);
324
327
error_ret:
325
328
        spin_unlock(&ring->use_lock);
326
329
        return ret;
327
330
}
328
331
EXPORT_SYMBOL(iio_request_update_sw_rb);
329
332
 
330
 
int iio_get_bpd_sw_rb(struct iio_ring_buffer *r)
 
333
int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r)
331
334
{
332
335
        struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
333
 
        return ring->buf.bpd;
 
336
        return ring->buf.bytes_per_datum;
334
337
}
335
 
EXPORT_SYMBOL(iio_get_bpd_sw_rb);
 
338
EXPORT_SYMBOL(iio_get_bytes_per_datum_sw_rb);
336
339
 
337
 
int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd)
 
340
int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd)
338
341
{
339
 
        if (r->bpd != bpd) {
340
 
                r->bpd = bpd;
 
342
        if (r->bytes_per_datum != bpd) {
 
343
                r->bytes_per_datum = bpd;
341
344
                if (r->access.mark_param_change)
342
345
                        r->access.mark_param_change(r);
343
346
        }
344
347
        return 0;
345
348
}
346
 
EXPORT_SYMBOL(iio_set_bpd_sw_rb);
 
349
EXPORT_SYMBOL(iio_set_bytes_per_datum_sw_rb);
347
350
 
348
351
int iio_get_length_sw_rb(struct iio_ring_buffer *r)
349
352
{
377
380
}
378
381
 
379
382
static IIO_RING_ENABLE_ATTR;
380
 
static IIO_RING_BPS_ATTR;
 
383
static IIO_RING_BYTES_PER_DATUM_ATTR;
381
384
static IIO_RING_LENGTH_ATTR;
382
385
 
383
386
/* Standard set of ring buffer attributes */
384
387
static struct attribute *iio_ring_attributes[] = {
385
388
        &dev_attr_length.attr,
386
 
        &dev_attr_bps.attr,
387
 
        &dev_attr_ring_enable.attr,
 
389
        &dev_attr_bytes_per_datum.attr,
 
390
        &dev_attr_enable.attr,
388
391
        NULL,
389
392
};
390
393
 
409
412
 
410
413
        ring = kzalloc(sizeof *ring, GFP_KERNEL);
411
414
        if (!ring)
412
 
                return 0;
 
415
                return NULL;
413
416
        buf = &ring->buf;
414
 
 
415
417
        iio_ring_buffer_init(buf, indio_dev);
 
418
        __iio_init_sw_ring_buffer(ring);
416
419
        buf->dev.type = &iio_sw_ring_type;
417
420
        device_initialize(&buf->dev);
418
421
        buf->dev.parent = &indio_dev->dev;
419
 
        buf->dev.class = &iio_class;
 
422
        buf->dev.bus = &iio_bus_type;
420
423
        dev_set_drvdata(&buf->dev, (void *)buf);
421
424
 
422
425
        return buf;
429
432
                iio_put_ring_buffer(r);
430
433
}
431
434
EXPORT_SYMBOL(iio_sw_rb_free);
 
435
 
 
436
int iio_sw_ring_preenable(struct iio_dev *indio_dev)
 
437
{
 
438
        struct iio_ring_buffer *ring = indio_dev->ring;
 
439
        size_t size;
 
440
        dev_dbg(&indio_dev->dev, "%s\n", __func__);
 
441
        /* Check if there are any scan elements enabled, if not fail*/
 
442
        if (!(ring->scan_count || ring->scan_timestamp))
 
443
                return -EINVAL;
 
444
        if (ring->scan_timestamp)
 
445
                if (ring->scan_count)
 
446
                        /* Timestamp (aligned to s64) and data */
 
447
                        size = (((ring->scan_count * ring->bpe)
 
448
                                        + sizeof(s64) - 1)
 
449
                                & ~(sizeof(s64) - 1))
 
450
                                + sizeof(s64);
 
451
                else /* Timestamp only  */
 
452
                        size = sizeof(s64);
 
453
        else /* Data only */
 
454
                size = ring->scan_count * ring->bpe;
 
455
        ring->access.set_bytes_per_datum(ring, size);
 
456
 
 
457
        return 0;
 
458
}
 
459
EXPORT_SYMBOL(iio_sw_ring_preenable);
 
460
 
 
461
void iio_sw_trigger_bh_to_ring(struct work_struct *work_s)
 
462
{
 
463
        struct iio_sw_ring_helper_state *st
 
464
                = container_of(work_s, struct iio_sw_ring_helper_state,
 
465
                        work_trigger_to_ring);
 
466
        struct iio_ring_buffer *ring = st->indio_dev->ring;
 
467
        int len = 0;
 
468
        size_t datasize = ring->access.get_bytes_per_datum(ring);
 
469
        char *data = kmalloc(datasize, GFP_KERNEL);
 
470
 
 
471
        if (data == NULL) {
 
472
                dev_err(st->indio_dev->dev.parent,
 
473
                        "memory alloc failed in ring bh");
 
474
                return;
 
475
        }
 
476
 
 
477
        if (ring->scan_count)
 
478
                len = st->get_ring_element(st, data);
 
479
 
 
480
          /* Guaranteed to be aligned with 8 byte boundary */
 
481
        if (ring->scan_timestamp)
 
482
                *(s64 *)(((phys_addr_t)data + len
 
483
                                + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
 
484
                        = st->last_timestamp;
 
485
        ring->access.store_to(ring,
 
486
                        (u8 *)data,
 
487
                        st->last_timestamp);
 
488
 
 
489
        iio_trigger_notify_done(st->indio_dev->trig);
 
490
        kfree(data);
 
491
 
 
492
        return;
 
493
}
 
494
EXPORT_SYMBOL(iio_sw_trigger_bh_to_ring);
 
495
 
 
496
void iio_sw_poll_func_th(struct iio_dev *indio_dev, s64 time)
 
497
{       struct iio_sw_ring_helper_state *h
 
498
                = iio_dev_get_devdata(indio_dev);
 
499
        h->last_timestamp = time;
 
500
        schedule_work(&h->work_trigger_to_ring);
 
501
}
 
502
EXPORT_SYMBOL(iio_sw_poll_func_th);
 
503
 
432
504
MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
433
505
MODULE_LICENSE("GPL");