2
cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices
4
Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
7
This program is free software; you can redistribute it and/or modify
8
it under the terms of the GNU General Public License as published by
9
the Free Software Foundation; either version 2 of the License, or
10
(at your option) any later version.
12
This program is distributed in the hope that it will be useful,
13
but WITHOUT ANY WARRANTY; without even the implied warranty of
14
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
GNU General Public License for more details.
17
You should have received a copy of the GNU General Public License
18
along with this program; if not, write to the Free Software
19
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22
#include <linux/init.h>
23
#include <linux/list.h>
24
#include <linux/module.h>
25
#include <linux/kernel.h>
26
#include <linux/bitmap.h>
27
#include <linux/usb.h>
28
#include <linux/i2c.h>
30
#include <linux/mutex.h>
31
#include <linux/slab.h>
33
#include <media/v4l2-common.h>
34
#include <media/v4l2-ioctl.h>
35
#include <media/v4l2-chip-ident.h>
36
#include <media/msp3400.h>
37
#include <media/tuner.h>
40
#include "cx231xx-vbi.h"
42
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
44
char *errmsg = "Unknown";
48
errmsg = "unlinked synchronuously";
51
errmsg = "unlinked asynchronuously";
54
errmsg = "Buffer error (overrun)";
57
errmsg = "Stalled (device not responding)";
60
errmsg = "Babble (bad cable?)";
63
errmsg = "Bit-stuff error (bad cable?)";
66
errmsg = "CRC/Timeout (could be anything)";
69
errmsg = "Device does not respond";
73
cx231xx_err(DRIVER_NAME "URB status %d [%s].\n", status,
76
cx231xx_err(DRIVER_NAME "URB packet %d, status %d [%s].\n",
77
packet, status, errmsg);
82
* Controls the isoc copy of each urb packet
84
static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
86
struct cx231xx_buffer *buf;
87
struct cx231xx_dmaqueue *dma_q = urb->context;
89
unsigned char *p_buffer;
90
u32 bytes_parsed = 0, buffer_size = 0;
96
if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
99
if (urb->status < 0) {
100
print_err_status(dev, -1, urb->status);
101
if (urb->status == -ENOENT)
105
buf = dev->vbi_mode.bulk_ctl.buf;
107
/* get buffer pointer and length */
108
p_buffer = urb->transfer_buffer;
109
buffer_size = urb->actual_length;
111
if (buffer_size > 0) {
114
if (dma_q->is_partial_line) {
115
/* Handle the case where we were working on a partial
117
sav_eav = dma_q->last_sav;
119
/* Check for a SAV/EAV overlapping the
122
sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer,
128
/* Get the first line if we have some portion of an SAV/EAV from
129
the last buffer or a partial line */
131
bytes_parsed += cx231xx_get_vbi_line(dev, dma_q,
132
sav_eav, /* SAV/EAV */
133
p_buffer + bytes_parsed, /* p_buffer */
134
buffer_size - bytes_parsed); /* buffer size */
137
/* Now parse data that is completely in this buffer */
138
dma_q->is_partial_line = 0;
140
while (bytes_parsed < buffer_size) {
143
sav_eav = cx231xx_find_next_SAV_EAV(
144
p_buffer + bytes_parsed, /* p_buffer */
145
buffer_size - bytes_parsed, /* buffer size */
146
&bytes_used); /* bytes used to get SAV/EAV */
148
bytes_parsed += bytes_used;
151
if (sav_eav && (bytes_parsed < buffer_size)) {
152
bytes_parsed += cx231xx_get_vbi_line(dev,
153
dma_q, sav_eav, /* SAV/EAV */
154
p_buffer+bytes_parsed, /* p_buffer */
155
buffer_size-bytes_parsed);/*buf size*/
159
/* Save the last four bytes of the buffer so we can
160
check the buffer boundary condition next time */
161
memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4);
168
/* ------------------------------------------------------------------
170
------------------------------------------------------------------*/
173
vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count,
176
struct cx231xx_fh *fh = vq->priv_data;
177
struct cx231xx *dev = fh->dev;
180
height = ((dev->norm & V4L2_STD_625_50) ?
181
PAL_VBI_LINES : NTSC_VBI_LINES);
183
*size = (dev->width * height * 2 * 2);
185
*count = CX231XX_DEF_VBI_BUF;
187
if (*count < CX231XX_MIN_BUF)
188
*count = CX231XX_MIN_BUF;
193
/* This is called *without* dev->slock held; please keep it that way */
194
static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
196
struct cx231xx_fh *fh = vq->priv_data;
197
struct cx231xx *dev = fh->dev;
198
unsigned long flags = 0;
202
/* We used to wait for the buffer to finish here, but this didn't work
203
because, as we were keeping the state as VIDEOBUF_QUEUED,
204
videobuf_queue_cancel marked it as finished for us.
205
(Also, it could wedge forever if the hardware was misconfigured.)
207
This should be safe; by the time we get here, the buffer isn't
208
queued anymore. If we ever start marking the buffers as
209
VIDEOBUF_ACTIVE, it won't be, though.
211
spin_lock_irqsave(&dev->vbi_mode.slock, flags);
212
if (dev->vbi_mode.bulk_ctl.buf == buf)
213
dev->vbi_mode.bulk_ctl.buf = NULL;
214
spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
216
videobuf_vmalloc_free(&buf->vb);
217
buf->vb.state = VIDEOBUF_NEEDS_INIT;
221
vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
222
enum v4l2_field field)
224
struct cx231xx_fh *fh = vq->priv_data;
225
struct cx231xx_buffer *buf =
226
container_of(vb, struct cx231xx_buffer, vb);
227
struct cx231xx *dev = fh->dev;
228
int rc = 0, urb_init = 0;
231
height = ((dev->norm & V4L2_STD_625_50) ?
232
PAL_VBI_LINES : NTSC_VBI_LINES);
233
buf->vb.size = ((dev->width << 1) * height * 2);
235
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
238
buf->vb.width = dev->width;
239
buf->vb.height = height;
240
buf->vb.field = field;
241
buf->vb.field = V4L2_FIELD_SEQ_TB;
243
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
244
rc = videobuf_iolock(vq, &buf->vb, NULL);
249
if (!dev->vbi_mode.bulk_ctl.num_bufs)
253
rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
254
CX231XX_NUM_VBI_BUFS,
255
dev->vbi_mode.alt_max_pkt_size[0],
256
cx231xx_isoc_vbi_copy);
261
buf->vb.state = VIDEOBUF_PREPARED;
265
free_buffer(vq, buf);
270
vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
272
struct cx231xx_buffer *buf =
273
container_of(vb, struct cx231xx_buffer, vb);
274
struct cx231xx_fh *fh = vq->priv_data;
275
struct cx231xx *dev = fh->dev;
276
struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
278
buf->vb.state = VIDEOBUF_QUEUED;
279
list_add_tail(&buf->vb.queue, &vidq->active);
283
static void vbi_buffer_release(struct videobuf_queue *vq,
284
struct videobuf_buffer *vb)
286
struct cx231xx_buffer *buf =
287
container_of(vb, struct cx231xx_buffer, vb);
290
free_buffer(vq, buf);
293
struct videobuf_queue_ops cx231xx_vbi_qops = {
294
.buf_setup = vbi_buffer_setup,
295
.buf_prepare = vbi_buffer_prepare,
296
.buf_queue = vbi_buffer_queue,
297
.buf_release = vbi_buffer_release,
300
/* ------------------------------------------------------------------
302
------------------------------------------------------------------*/
305
* IRQ callback, called by URB callback
307
static void cx231xx_irq_vbi_callback(struct urb *urb)
309
struct cx231xx_dmaqueue *dma_q = urb->context;
310
struct cx231xx_video_mode *vmode =
311
container_of(dma_q, struct cx231xx_video_mode, vidq);
312
struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
315
switch (urb->status) {
316
case 0: /* success */
317
case -ETIMEDOUT: /* NAK */
319
case -ECONNRESET: /* kill */
324
cx231xx_err(DRIVER_NAME "urb completition error %d.\n",
329
/* Copy data from URB */
330
spin_lock(&dev->vbi_mode.slock);
331
rc = dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
332
spin_unlock(&dev->vbi_mode.slock);
337
urb->status = usb_submit_urb(urb, GFP_ATOMIC);
339
cx231xx_err(DRIVER_NAME "urb resubmit failed (error=%i)\n",
345
* Stop and Deallocate URBs
347
void cx231xx_uninit_vbi_isoc(struct cx231xx *dev)
352
cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_uninit_vbi_isoc\n");
354
dev->vbi_mode.bulk_ctl.nfields = -1;
355
for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
356
urb = dev->vbi_mode.bulk_ctl.urb[i];
358
if (!irqs_disabled())
363
if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
365
kfree(dev->vbi_mode.bulk_ctl.
367
dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
371
dev->vbi_mode.bulk_ctl.urb[i] = NULL;
373
dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL;
376
kfree(dev->vbi_mode.bulk_ctl.urb);
377
kfree(dev->vbi_mode.bulk_ctl.transfer_buffer);
379
dev->vbi_mode.bulk_ctl.urb = NULL;
380
dev->vbi_mode.bulk_ctl.transfer_buffer = NULL;
381
dev->vbi_mode.bulk_ctl.num_bufs = 0;
383
cx231xx_capture_start(dev, 0, Vbi);
385
EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc);
388
* Allocate URBs and start IRQ
390
int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
391
int num_bufs, int max_pkt_size,
392
int (*bulk_copy) (struct cx231xx *dev,
395
struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq;
401
cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_prepare_isoc\n");
403
/* De-allocates all pending stuff */
404
cx231xx_uninit_vbi_isoc(dev);
406
/* clear if any halt */
407
usb_clear_halt(dev->udev,
408
usb_rcvbulkpipe(dev->udev,
409
dev->vbi_mode.end_point_addr));
411
dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy;
412
dev->vbi_mode.bulk_ctl.num_bufs = num_bufs;
414
dma_q->is_partial_line = 0;
416
dma_q->current_field = -1;
417
dma_q->bytes_left_in_line = dev->width << 1;
418
dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ?
419
PAL_VBI_LINES : NTSC_VBI_LINES);
420
dma_q->lines_completed = 0;
421
for (i = 0; i < 8; i++)
422
dma_q->partial_buf[i] = 0;
424
dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
426
if (!dev->vbi_mode.bulk_ctl.urb) {
427
cx231xx_errdev("cannot alloc memory for usb buffers\n");
431
dev->vbi_mode.bulk_ctl.transfer_buffer =
432
kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
433
if (!dev->vbi_mode.bulk_ctl.transfer_buffer) {
434
cx231xx_errdev("cannot allocate memory for usbtransfer\n");
435
kfree(dev->vbi_mode.bulk_ctl.urb);
439
dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size;
440
dev->vbi_mode.bulk_ctl.buf = NULL;
442
sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size;
444
/* allocate urbs and transfer buffers */
445
for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
447
urb = usb_alloc_urb(0, GFP_KERNEL);
449
cx231xx_err(DRIVER_NAME
450
": cannot alloc bulk_ctl.urb %i\n", i);
451
cx231xx_uninit_vbi_isoc(dev);
454
dev->vbi_mode.bulk_ctl.urb[i] = urb;
455
urb->transfer_flags = 0;
457
dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
458
kzalloc(sb_size, GFP_KERNEL);
459
if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
460
cx231xx_err(DRIVER_NAME
461
": unable to allocate %i bytes for transfer"
462
" buffer %i%s\n", sb_size, i,
463
in_interrupt() ? " while in int" : "");
464
cx231xx_uninit_vbi_isoc(dev);
468
pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr);
469
usb_fill_bulk_urb(urb, dev->udev, pipe,
470
dev->vbi_mode.bulk_ctl.transfer_buffer[i],
471
sb_size, cx231xx_irq_vbi_callback, dma_q);
474
init_waitqueue_head(&dma_q->wq);
476
/* submit urbs and enables IRQ */
477
for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
478
rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC);
480
cx231xx_err(DRIVER_NAME
481
": submit of urb %i failed (error=%i)\n", i,
483
cx231xx_uninit_vbi_isoc(dev);
488
cx231xx_capture_start(dev, 1, Vbi);
492
EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc);
494
u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
495
u8 sav_eav, u8 *p_buffer, u32 buffer_size)
497
u32 bytes_copied = 0;
498
int current_field = -1;
513
if (current_field < 0)
516
dma_q->last_sav = sav_eav;
519
cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size,
526
* Announces that a buffer were filled and request the next
528
static inline void vbi_buffer_filled(struct cx231xx *dev,
529
struct cx231xx_dmaqueue *dma_q,
530
struct cx231xx_buffer *buf)
532
/* Advice that buffer was filled */
533
/* cx231xx_info(DRIVER_NAME "[%p/%d] wakeup\n", buf, buf->vb.i); */
535
buf->vb.state = VIDEOBUF_DONE;
536
buf->vb.field_count++;
537
do_gettimeofday(&buf->vb.ts);
539
dev->vbi_mode.bulk_ctl.buf = NULL;
541
list_del(&buf->vb.queue);
542
wake_up(&buf->vb.done);
545
u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
546
u8 *p_line, u32 length, int field_number)
549
struct cx231xx_buffer *buf;
550
u32 _line_size = dev->width * 2;
552
if (dma_q->current_field == -1) {
553
/* Just starting up */
554
cx231xx_reset_vbi_buffer(dev, dma_q);
557
if (dma_q->current_field != field_number)
558
dma_q->lines_completed = 0;
560
/* get the buffer pointer */
561
buf = dev->vbi_mode.bulk_ctl.buf;
563
/* Remember the field number for next time */
564
dma_q->current_field = field_number;
566
bytes_to_copy = dma_q->bytes_left_in_line;
567
if (bytes_to_copy > length)
568
bytes_to_copy = length;
570
if (dma_q->lines_completed >= dma_q->lines_per_field) {
571
dma_q->bytes_left_in_line -= bytes_to_copy;
572
dma_q->is_partial_line =
573
(dma_q->bytes_left_in_line == 0) ? 0 : 1;
577
dma_q->is_partial_line = 1;
579
/* If we don't have a buffer, just return the number of bytes we would
580
have copied if we had a buffer. */
582
dma_q->bytes_left_in_line -= bytes_to_copy;
583
dma_q->is_partial_line =
584
(dma_q->bytes_left_in_line == 0) ? 0 : 1;
585
return bytes_to_copy;
588
/* copy the data to video buffer */
589
cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy);
591
dma_q->pos += bytes_to_copy;
592
dma_q->bytes_left_in_line -= bytes_to_copy;
594
if (dma_q->bytes_left_in_line == 0) {
596
dma_q->bytes_left_in_line = _line_size;
597
dma_q->lines_completed++;
598
dma_q->is_partial_line = 0;
600
if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) {
602
vbi_buffer_filled(dev, dma_q, buf);
605
dma_q->lines_completed = 0;
606
cx231xx_reset_vbi_buffer(dev, dma_q);
610
return bytes_to_copy;
614
* video-buf generic routine to get the next available buffer
616
static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
617
struct cx231xx_buffer **buf)
619
struct cx231xx_video_mode *vmode =
620
container_of(dma_q, struct cx231xx_video_mode, vidq);
621
struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
624
if (list_empty(&dma_q->active)) {
625
cx231xx_err(DRIVER_NAME ": No active queue to serve\n");
626
dev->vbi_mode.bulk_ctl.buf = NULL;
631
/* Get the next buffer */
632
*buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
634
/* Cleans up buffer - Useful for testing for frame/URB loss */
635
outp = videobuf_to_vmalloc(&(*buf)->vb);
636
memset(outp, 0, (*buf)->vb.size);
638
dev->vbi_mode.bulk_ctl.buf = *buf;
643
void cx231xx_reset_vbi_buffer(struct cx231xx *dev,
644
struct cx231xx_dmaqueue *dma_q)
646
struct cx231xx_buffer *buf;
648
buf = dev->vbi_mode.bulk_ctl.buf;
651
/* first try to get the buffer */
652
get_next_vbi_buf(dma_q, &buf);
655
dma_q->current_field = -1;
658
dma_q->bytes_left_in_line = dev->width << 1;
659
dma_q->lines_completed = 0;
662
int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
663
u8 *p_buffer, u32 bytes_to_copy)
665
u8 *p_out_buffer = NULL;
666
u32 current_line_bytes_copied = 0;
667
struct cx231xx_buffer *buf;
668
u32 _line_size = dev->width << 1;
672
buf = dev->vbi_mode.bulk_ctl.buf;
677
p_out_buffer = videobuf_to_vmalloc(&buf->vb);
679
if (dma_q->bytes_left_in_line != _line_size) {
680
current_line_bytes_copied =
681
_line_size - dma_q->bytes_left_in_line;
684
offset = (dma_q->lines_completed * _line_size) +
685
current_line_bytes_copied;
687
if (dma_q->current_field == 2) {
688
/* Populate the second half of the frame */
689
offset += (dev->width * 2 * dma_q->lines_per_field);
692
/* prepare destination address */
693
startwrite = p_out_buffer + offset;
695
lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
696
bytes_to_copy : dma_q->bytes_left_in_line;
698
memcpy(startwrite, p_buffer, lencopy);
703
u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev,
704
struct cx231xx_dmaqueue *dma_q)
708
height = ((dev->norm & V4L2_STD_625_50) ?
709
PAL_VBI_LINES : NTSC_VBI_LINES);
710
if (dma_q->lines_completed == height && dma_q->current_field == 2)