4
* TI OMAP3 ISP - Statistics core
6
* Copyright (C) 2010 Nokia Corporation
7
* Copyright (C) 2009 Texas Instruments, Inc
9
* Contacts: David Cohen <dacohen@gmail.com>
10
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
11
* Sakari Ailus <sakari.ailus@iki.fi>
13
* This program is free software; you can redistribute it and/or modify
14
* it under the terms of the GNU General Public License version 2 as
15
* published by the Free Software Foundation.
17
* This program is distributed in the hope that it will be useful, but
18
* WITHOUT ANY WARRANTY; without even the implied warranty of
19
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
* General Public License for more details.
22
* You should have received a copy of the GNU General Public License
23
* along with this program; if not, write to the Free Software
24
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
28
#include <linux/dma-mapping.h>
29
#include <linux/slab.h>
30
#include <linux/uaccess.h>
34
#define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0)
37
* MAGIC_SIZE must always be the greatest common divisor of
38
* AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
41
#define MAGIC_NUM 0x55
43
/* HACK: AF module seems to be writing one more paxel data than it should. */
44
#define AF_EXTRA_DATA OMAP3ISP_AF_PAXEL_SIZE
47
* HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
48
* the next buffer to start to be written in the same point where the overflow
49
* occurred instead of the configured address. The only known way to make it to
50
* go back to a valid state is having a valid buffer processing. Of course it
51
* requires at least a doubled buffer size to avoid an access to invalid memory
52
* region. But it does not fix everything. It may happen more than one
53
* consecutive SBL overflows. In that case, it might be unpredictable how many
54
* buffers the allocated memory should fit. For that case, a recover
55
* configuration was created. It produces the minimum buffer size for each H3A
56
* module and decrease the change for more SBL overflows. This recover state
57
* will be enabled every time a SBL overflow occur. As the output buffer size
58
* isn't big, it's possible to have an extra size able to fit many recover
59
* buffers making it extreamily unlikely to have an access to invalid memory
62
#define NUM_H3A_RECOVER_BUFS 10
65
* HACK: Because of HW issues the generic layer sometimes need to have
66
* different behaviour for different statistic modules.
68
#define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af)
69
#define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb)
70
#define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
72
static void __isp_stat_buf_sync_magic(struct ispstat *stat,
73
struct ispstat_buffer *buf,
74
u32 buf_size, enum dma_data_direction dir,
75
void (*dma_sync)(struct device *,
76
dma_addr_t, unsigned long, size_t,
77
enum dma_data_direction))
79
struct device *dev = stat->isp->dev;
84
/* Initial magic words */
85
pg = vmalloc_to_page(buf->virt_addr);
86
dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
87
dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
89
/* Final magic words */
90
pg = vmalloc_to_page(buf->virt_addr + buf_size);
91
dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
92
offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
93
dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
96
static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
97
struct ispstat_buffer *buf,
99
enum dma_data_direction dir)
101
if (IS_COHERENT_BUF(stat))
104
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
105
dma_sync_single_range_for_device);
108
static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
109
struct ispstat_buffer *buf,
111
enum dma_data_direction dir)
113
if (IS_COHERENT_BUF(stat))
116
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
117
dma_sync_single_range_for_cpu);
120
static int isp_stat_buf_check_magic(struct ispstat *stat,
121
struct ispstat_buffer *buf)
123
const u32 buf_size = IS_H3A_AF(stat) ?
124
buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
129
isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
131
/* Checking initial magic numbers. They shouldn't be here anymore. */
132
for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
133
if (likely(*w != MAGIC_NUM))
137
dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
138
"match.\n", stat->subdev.name);
142
/* Checking magic numbers at the end. They must be still here. */
143
for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
145
if (unlikely(*w != MAGIC_NUM)) {
146
dev_dbg(stat->isp->dev, "%s: endding magic check does "
147
"not match.\n", stat->subdev.name);
152
isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
158
static void isp_stat_buf_insert_magic(struct ispstat *stat,
159
struct ispstat_buffer *buf)
161
const u32 buf_size = IS_H3A_AF(stat) ?
162
stat->buf_size + AF_EXTRA_DATA : stat->buf_size;
164
isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
167
* Inserting MAGIC_NUM at the beginning and end of the buffer.
168
* buf->buf_size is set only after the buffer is queued. For now the
169
* right buf_size for the current configuration is pointed by
172
memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
173
memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
175
isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
179
static void isp_stat_buf_sync_for_device(struct ispstat *stat,
180
struct ispstat_buffer *buf)
182
if (IS_COHERENT_BUF(stat))
185
dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl,
186
buf->iovm->sgt->nents, DMA_FROM_DEVICE);
189
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
190
struct ispstat_buffer *buf)
192
if (IS_COHERENT_BUF(stat))
195
dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl,
196
buf->iovm->sgt->nents, DMA_FROM_DEVICE);
199
static void isp_stat_buf_clear(struct ispstat *stat)
203
for (i = 0; i < STAT_MAX_BUFS; i++)
204
stat->buf[i].empty = 1;
207
static struct ispstat_buffer *
208
__isp_stat_buf_find(struct ispstat *stat, int look_empty)
210
struct ispstat_buffer *found = NULL;
213
for (i = 0; i < STAT_MAX_BUFS; i++) {
214
struct ispstat_buffer *curr = &stat->buf[i];
217
* Don't select the buffer which is being copied to
218
* userspace or used by the module.
220
if (curr == stat->locked_buf || curr == stat->active_buf)
223
/* Don't select uninitialised buffers if it's not required */
224
if (!look_empty && curr->empty)
227
/* Pick uninitialised buffer over anything else if look_empty */
233
/* Choose the oldest buffer */
235
(s32)curr->frame_number - (s32)found->frame_number < 0)
242
static inline struct ispstat_buffer *
243
isp_stat_buf_find_oldest(struct ispstat *stat)
245
return __isp_stat_buf_find(stat, 0);
248
static inline struct ispstat_buffer *
249
isp_stat_buf_find_oldest_or_empty(struct ispstat *stat)
251
return __isp_stat_buf_find(stat, 1);
254
static int isp_stat_buf_queue(struct ispstat *stat)
256
if (!stat->active_buf)
259
do_gettimeofday(&stat->active_buf->ts);
261
stat->active_buf->buf_size = stat->buf_size;
262
if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
263
dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
267
stat->active_buf->config_counter = stat->config_counter;
268
stat->active_buf->frame_number = stat->frame_number;
269
stat->active_buf->empty = 0;
270
stat->active_buf = NULL;
272
return STAT_BUF_DONE;
275
/* Get next free buffer to write the statistics to and mark it active. */
276
static void isp_stat_buf_next(struct ispstat *stat)
278
if (unlikely(stat->active_buf))
279
/* Overwriting unused active buffer */
280
dev_dbg(stat->isp->dev, "%s: new buffer requested without "
281
"queuing active one.\n",
284
stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
287
static void isp_stat_buf_release(struct ispstat *stat)
291
isp_stat_buf_sync_for_device(stat, stat->locked_buf);
292
spin_lock_irqsave(&stat->isp->stat_lock, flags);
293
stat->locked_buf = NULL;
294
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
297
/* Get buffer to userspace. */
298
static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
299
struct omap3isp_stat_data *data)
303
struct ispstat_buffer *buf;
305
spin_lock_irqsave(&stat->isp->stat_lock, flags);
308
buf = isp_stat_buf_find_oldest(stat);
310
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
311
dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n",
313
return ERR_PTR(-EBUSY);
315
if (isp_stat_buf_check_magic(stat, buf)) {
316
dev_dbg(stat->isp->dev, "%s: current buffer has "
317
"corrupted data\n.", stat->subdev.name);
318
/* Mark empty because it doesn't have valid data. */
321
/* Buffer isn't corrupted. */
326
stat->locked_buf = buf;
328
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
330
if (buf->buf_size > data->buf_size) {
331
dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
332
"not enough.\n", stat->subdev.name);
333
isp_stat_buf_release(stat);
334
return ERR_PTR(-EINVAL);
337
isp_stat_buf_sync_for_cpu(stat, buf);
339
rval = copy_to_user(data->buf,
344
dev_info(stat->isp->dev,
345
"%s: failed copying %d bytes of stat data\n",
346
stat->subdev.name, rval);
347
buf = ERR_PTR(-EFAULT);
348
isp_stat_buf_release(stat);
354
static void isp_stat_bufs_free(struct ispstat *stat)
356
struct isp_device *isp = stat->isp;
359
for (i = 0; i < STAT_MAX_BUFS; i++) {
360
struct ispstat_buffer *buf = &stat->buf[i];
362
if (!IS_COHERENT_BUF(stat)) {
363
if (IS_ERR_OR_NULL((void *)buf->iommu_addr))
366
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
367
buf->iovm->sgt->nents,
369
iommu_vfree(isp->iommu, buf->iommu_addr);
373
dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
374
buf->virt_addr, buf->dma_addr);
379
buf->virt_addr = NULL;
383
dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
386
stat->buf_alloc_size = 0;
387
stat->active_buf = NULL;
390
static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
392
struct isp_device *isp = stat->isp;
395
stat->buf_alloc_size = size;
397
for (i = 0; i < STAT_MAX_BUFS; i++) {
398
struct ispstat_buffer *buf = &stat->buf[i];
399
struct iovm_struct *iovm;
401
WARN_ON(buf->dma_addr);
402
buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size,
404
if (IS_ERR((void *)buf->iommu_addr)) {
405
dev_err(stat->isp->dev,
406
"%s: Can't acquire memory for "
407
"buffer %d\n", stat->subdev.name, i);
408
isp_stat_bufs_free(stat);
412
iovm = find_iovm_area(isp->iommu, buf->iommu_addr);
414
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
416
isp_stat_bufs_free(stat);
421
buf->virt_addr = da_to_va(stat->isp->iommu,
422
(u32)buf->iommu_addr);
424
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
425
"iommu_addr=0x%08lx virt_addr=0x%08lx",
426
stat->subdev.name, i, buf->iommu_addr,
427
(unsigned long)buf->virt_addr);
433
static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
437
stat->buf_alloc_size = size;
439
for (i = 0; i < STAT_MAX_BUFS; i++) {
440
struct ispstat_buffer *buf = &stat->buf[i];
442
WARN_ON(buf->iommu_addr);
443
buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
444
&buf->dma_addr, GFP_KERNEL | GFP_DMA);
446
if (!buf->virt_addr || !buf->dma_addr) {
447
dev_info(stat->isp->dev,
448
"%s: Can't acquire memory for "
449
"DMA buffer %d\n", stat->subdev.name, i);
450
isp_stat_bufs_free(stat);
455
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
456
"dma_addr=0x%08lx virt_addr=0x%08lx\n",
457
stat->subdev.name, i, (unsigned long)buf->dma_addr,
458
(unsigned long)buf->virt_addr);
464
static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
468
spin_lock_irqsave(&stat->isp->stat_lock, flags);
470
BUG_ON(stat->locked_buf != NULL);
472
/* Are the old buffers big enough? */
473
if (stat->buf_alloc_size >= size) {
474
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
478
if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) {
479
dev_info(stat->isp->dev,
480
"%s: trying to allocate memory when busy\n",
482
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
486
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
488
isp_stat_bufs_free(stat);
490
if (IS_COHERENT_BUF(stat))
491
return isp_stat_bufs_alloc_dma(stat, size);
493
return isp_stat_bufs_alloc_iommu(stat, size);
496
static void isp_stat_queue_event(struct ispstat *stat, int err)
498
struct video_device *vdev = &stat->subdev.devnode;
499
struct v4l2_event event;
500
struct omap3isp_stat_event_status *status = (void *)event.u.data;
502
memset(&event, 0, sizeof(event));
504
status->frame_number = stat->frame_number;
505
status->config_counter = stat->config_counter;
509
event.type = stat->event_type;
510
v4l2_event_queue(vdev, &event);
515
* omap3isp_stat_request_statistics - Request statistics.
516
* @data: Pointer to return statistics data.
518
* Returns 0 if successful.
520
int omap3isp_stat_request_statistics(struct ispstat *stat,
521
struct omap3isp_stat_data *data)
523
struct ispstat_buffer *buf;
525
if (stat->state != ISPSTAT_ENABLED) {
526
dev_dbg(stat->isp->dev, "%s: engine not enabled.\n",
531
mutex_lock(&stat->ioctl_lock);
532
buf = isp_stat_buf_get(stat, data);
534
mutex_unlock(&stat->ioctl_lock);
539
data->config_counter = buf->config_counter;
540
data->frame_number = buf->frame_number;
541
data->buf_size = buf->buf_size;
544
isp_stat_buf_release(stat);
545
mutex_unlock(&stat->ioctl_lock);
551
* omap3isp_stat_config - Receives new statistic engine configuration.
552
* @new_conf: Pointer to config structure.
554
* Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
555
* was unable to allocate memory for the buffer, or other errors if parameters
558
int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
561
unsigned long irqflags;
562
struct ispstat_generic_config *user_cfg = new_conf;
563
u32 buf_size = user_cfg->buf_size;
566
dev_dbg(stat->isp->dev, "%s: configuration is NULL\n",
571
mutex_lock(&stat->ioctl_lock);
573
dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
574
"size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
576
ret = stat->ops->validate_params(stat, new_conf);
578
mutex_unlock(&stat->ioctl_lock);
579
dev_dbg(stat->isp->dev, "%s: configuration values are "
580
"invalid.\n", stat->subdev.name);
584
if (buf_size != user_cfg->buf_size)
585
dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
586
"request to 0x%08lx\n", stat->subdev.name,
587
(unsigned long)user_cfg->buf_size);
590
* Hack: H3A modules may need a doubled buffer size to avoid access
591
* to a invalid memory address after a SBL overflow.
592
* The buffer size is always PAGE_ALIGNED.
593
* Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
594
* inserted at the end to data integrity check purpose.
595
* Hack 3: AF module writes one paxel data more than it should, so
596
* the buffer allocation must consider it to avoid invalid memory
598
* Hack 4: H3A need to allocate extra space for the recover state.
601
buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE;
604
* Adding one extra paxel data size for each recover
605
* buffer + 2 regular ones.
607
buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2);
608
if (stat->recover_priv) {
609
struct ispstat_generic_config *recover_cfg =
611
buf_size += recover_cfg->buf_size *
612
NUM_H3A_RECOVER_BUFS;
614
buf_size = PAGE_ALIGN(buf_size);
615
} else { /* Histogram */
616
buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE);
619
ret = isp_stat_bufs_alloc(stat, buf_size);
621
mutex_unlock(&stat->ioctl_lock);
625
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
626
stat->ops->set_params(stat, new_conf);
627
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
630
* Returning the right future config_counter for this setup, so
631
* userspace can *know* when it has been applied.
633
user_cfg->config_counter = stat->config_counter + stat->inc_config;
635
/* Module has a valid configuration. */
636
stat->configured = 1;
637
dev_dbg(stat->isp->dev, "%s: module has been successfully "
638
"configured.\n", stat->subdev.name);
640
mutex_unlock(&stat->ioctl_lock);
646
* isp_stat_buf_process - Process statistic buffers.
647
* @buf_state: points out if buffer is ready to be processed. It's necessary
648
* because histogram needs to copy the data from internal memory
649
* before be able to process the buffer.
651
static int isp_stat_buf_process(struct ispstat *stat, int buf_state)
653
int ret = STAT_NO_BUF;
655
if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
656
buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
657
ret = isp_stat_buf_queue(stat);
658
isp_stat_buf_next(stat);
664
int omap3isp_stat_pcr_busy(struct ispstat *stat)
666
return stat->ops->busy(stat);
669
int omap3isp_stat_busy(struct ispstat *stat)
671
return omap3isp_stat_pcr_busy(stat) | stat->buf_processing |
672
(stat->state != ISPSTAT_DISABLED);
676
* isp_stat_pcr_enable - Disables/Enables statistic engines.
677
* @pcr_enable: 0/1 - Disables/Enables the engine.
679
* Must be called from ISP driver when the module is idle and synchronized
682
static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
684
if ((stat->state != ISPSTAT_ENABLING &&
685
stat->state != ISPSTAT_ENABLED) && pcr_enable)
686
/* Userspace has disabled the module. Aborting. */
689
stat->ops->enable(stat, pcr_enable);
690
if (stat->state == ISPSTAT_DISABLING && !pcr_enable)
691
stat->state = ISPSTAT_DISABLED;
692
else if (stat->state == ISPSTAT_ENABLING && pcr_enable)
693
stat->state = ISPSTAT_ENABLED;
696
void omap3isp_stat_suspend(struct ispstat *stat)
700
spin_lock_irqsave(&stat->isp->stat_lock, flags);
702
if (stat->state != ISPSTAT_DISABLED)
703
stat->ops->enable(stat, 0);
704
if (stat->state == ISPSTAT_ENABLED)
705
stat->state = ISPSTAT_SUSPENDED;
707
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
710
void omap3isp_stat_resume(struct ispstat *stat)
712
/* Module will be re-enabled with its pipeline */
713
if (stat->state == ISPSTAT_SUSPENDED)
714
stat->state = ISPSTAT_ENABLING;
717
static void isp_stat_try_enable(struct ispstat *stat)
719
unsigned long irqflags;
721
if (stat->priv == NULL)
722
/* driver wasn't initialised */
725
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
726
if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing &&
727
stat->buf_alloc_size) {
729
* Userspace's requested to enable the engine but it wasn't yet.
733
isp_stat_buf_next(stat);
734
stat->ops->setup_regs(stat, stat->priv);
735
isp_stat_buf_insert_magic(stat, stat->active_buf);
738
* H3A module has some hw issues which forces the driver to
739
* ignore next buffers even if it was disabled in the meantime.
740
* On the other hand, Histogram shouldn't ignore buffers anymore
741
* if it's being enabled.
744
atomic_set(&stat->buf_err, 0);
746
isp_stat_pcr_enable(stat, 1);
747
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
748
dev_dbg(stat->isp->dev, "%s: module is enabled.\n",
751
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
755
void omap3isp_stat_isr_frame_sync(struct ispstat *stat)
757
isp_stat_try_enable(stat);
760
void omap3isp_stat_sbl_overflow(struct ispstat *stat)
762
unsigned long irqflags;
764
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
766
* Due to a H3A hw issue which prevents the next buffer to start from
767
* the correct memory address, 2 buffers must be ignored.
769
atomic_set(&stat->buf_err, 2);
772
* If more than one SBL overflow happen in a row, H3A module may access
773
* invalid memory region.
774
* stat->sbl_ovl_recover is set to tell to the driver to temporarily use
775
* a soft configuration which helps to avoid consecutive overflows.
777
if (stat->recover_priv)
778
stat->sbl_ovl_recover = 1;
779
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
783
* omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
784
* @enable: 0/1 - Disables/Enables the engine.
786
* Client should configure all the module registers before this.
787
* This function can be called from a userspace request.
789
int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
791
unsigned long irqflags;
793
dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n",
794
stat->subdev.name, enable ? "enable" : "disable");
796
/* Prevent enabling while configuring */
797
mutex_lock(&stat->ioctl_lock);
799
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
801
if (!stat->configured && enable) {
802
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
803
mutex_unlock(&stat->ioctl_lock);
804
dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
805
"never been successfully configured so far.\n",
811
if (stat->state == ISPSTAT_DISABLING)
812
/* Previous disabling request wasn't done yet */
813
stat->state = ISPSTAT_ENABLED;
814
else if (stat->state == ISPSTAT_DISABLED)
815
/* Module is now being enabled */
816
stat->state = ISPSTAT_ENABLING;
818
if (stat->state == ISPSTAT_ENABLING) {
819
/* Previous enabling request wasn't done yet */
820
stat->state = ISPSTAT_DISABLED;
821
} else if (stat->state == ISPSTAT_ENABLED) {
822
/* Module is now being disabled */
823
stat->state = ISPSTAT_DISABLING;
824
isp_stat_buf_clear(stat);
828
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
829
mutex_unlock(&stat->ioctl_lock);
834
int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable)
836
struct ispstat *stat = v4l2_get_subdevdata(subdev);
840
* Only set enable PCR bit if the module was previously
841
* enabled through ioct.
843
isp_stat_try_enable(stat);
846
/* Disable PCR bit and config enable field */
847
omap3isp_stat_enable(stat, 0);
848
spin_lock_irqsave(&stat->isp->stat_lock, flags);
849
stat->ops->enable(stat, 0);
850
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
853
* If module isn't busy, a new interrupt may come or not to
854
* set the state to DISABLED. As Histogram needs to read its
855
* internal memory to clear it, let interrupt handler
856
* responsible of changing state to DISABLED. If the last
857
* interrupt is coming, it's still safe as the handler will
858
* ignore the second time when state is already set to DISABLED.
859
* It's necessary to synchronize Histogram with streamoff, once
860
* the module may be considered idle before last SDMA transfer
861
* starts if we return here.
863
if (!omap3isp_stat_pcr_busy(stat))
864
omap3isp_stat_isr(stat);
866
dev_dbg(stat->isp->dev, "%s: module is being disabled\n",
874
* __stat_isr - Interrupt handler for statistic drivers
876
static void __stat_isr(struct ispstat *stat, int from_dma)
878
int ret = STAT_BUF_DONE;
880
unsigned long irqflags;
881
struct isp_pipeline *pipe;
884
* stat->buf_processing must be set before disable module. It's
885
* necessary to not inform too early the buffers aren't busy in case
886
* of SDMA is going to be used.
888
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
889
if (stat->state == ISPSTAT_DISABLED) {
890
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
893
buf_processing = stat->buf_processing;
894
stat->buf_processing = 1;
895
stat->ops->enable(stat, 0);
897
if (buf_processing && !from_dma) {
898
if (stat->state == ISPSTAT_ENABLED) {
899
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
900
dev_err(stat->isp->dev,
901
"%s: interrupt occurred when module was still "
902
"processing a buffer.\n", stat->subdev.name);
907
* Interrupt handler was called from streamoff when
908
* the module wasn't busy anymore to ensure it is being
909
* disabled after process last buffer. If such buffer
910
* processing has already started, no need to do
913
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
917
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
919
/* If it's busy we can't process this buffer anymore */
920
if (!omap3isp_stat_pcr_busy(stat)) {
921
if (!from_dma && stat->ops->buf_process)
922
/* Module still need to copy data to buffer. */
923
ret = stat->ops->buf_process(stat);
924
if (ret == STAT_BUF_WAITING_DMA)
925
/* Buffer is not ready yet */
928
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
931
* Histogram needs to read its internal memory to clear it
932
* before be disabled. For that reason, common statistic layer
933
* can return only after call stat's buf_process() operator.
935
if (stat->state == ISPSTAT_DISABLING) {
936
stat->state = ISPSTAT_DISABLED;
937
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
938
stat->buf_processing = 0;
941
pipe = to_isp_pipeline(&stat->subdev.entity);
942
stat->frame_number = atomic_read(&pipe->frame_number);
945
* Before this point, 'ret' stores the buffer's status if it's
946
* ready to be processed. Afterwards, it holds the status if
947
* it was processed successfully.
949
ret = isp_stat_buf_process(stat, ret);
951
if (likely(!stat->sbl_ovl_recover)) {
952
stat->ops->setup_regs(stat, stat->priv);
955
* Using recover config to increase the chance to have
956
* a good buffer processing and make the H3A module to
957
* go back to a valid state.
960
stat->ops->setup_regs(stat, stat->recover_priv);
961
stat->sbl_ovl_recover = 0;
964
* Set 'update' in case of the module needs to use
965
* regular configuration after next buffer.
970
isp_stat_buf_insert_magic(stat, stat->active_buf);
973
* Hack: H3A modules may access invalid memory address or send
974
* corrupted data to userspace if more than 1 SBL overflow
975
* happens in a row without re-writing its buffer's start memory
976
* address in the meantime. Such situation is avoided if the
977
* module is not immediately re-enabled when the ISR misses the
978
* timing to process the buffer and to setup the registers.
979
* Because of that, pcr_enable(1) was moved to inside this 'if'
980
* block. But the next interruption will still happen as during
981
* pcr_enable(0) the module was busy.
983
isp_stat_pcr_enable(stat, 1);
984
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
987
* If a SBL overflow occurs and the H3A driver misses the timing
988
* to process the buffer, stat->buf_err is set and won't be
989
* cleared now. So the next buffer will be correctly ignored.
990
* It's necessary due to a hw issue which makes the next H3A
991
* buffer to start from the memory address where the previous
992
* one stopped, instead of start where it was configured to.
993
* Do not "stat->buf_err = 0" here.
996
if (stat->ops->buf_process)
998
* Driver may need to erase current data prior to
999
* process a new buffer. If it misses the timing, the
1000
* next buffer might be wrong. So should be ignored.
1001
* It happens only for Histogram.
1003
atomic_set(&stat->buf_err, 1);
1006
dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
1007
"device is busy.\n", stat->subdev.name);
1011
stat->buf_processing = 0;
1012
isp_stat_queue_event(stat, ret != STAT_BUF_DONE);
1015
void omap3isp_stat_isr(struct ispstat *stat)
1017
__stat_isr(stat, 0);
1020
void omap3isp_stat_dma_isr(struct ispstat *stat)
1022
__stat_isr(stat, 1);
1025
static int isp_stat_init_entities(struct ispstat *stat, const char *name,
1026
const struct v4l2_subdev_ops *sd_ops)
1028
struct v4l2_subdev *subdev = &stat->subdev;
1029
struct media_entity *me = &subdev->entity;
1031
v4l2_subdev_init(subdev, sd_ops);
1032
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
1033
subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
1034
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
1035
subdev->nevents = STAT_NEVENTS;
1036
v4l2_set_subdevdata(subdev, stat);
1038
stat->pad.flags = MEDIA_PAD_FL_SINK;
1041
return media_entity_init(me, 1, &stat->pad, 0);
1044
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
1046
struct v4l2_event_subscription *sub)
1048
struct ispstat *stat = v4l2_get_subdevdata(subdev);
1050
if (sub->type != stat->event_type)
1053
return v4l2_event_subscribe(fh, sub);
1056
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
1058
struct v4l2_event_subscription *sub)
1060
return v4l2_event_unsubscribe(fh, sub);
1063
void omap3isp_stat_unregister_entities(struct ispstat *stat)
1065
media_entity_cleanup(&stat->subdev.entity);
1066
v4l2_device_unregister_subdev(&stat->subdev);
1069
int omap3isp_stat_register_entities(struct ispstat *stat,
1070
struct v4l2_device *vdev)
1072
return v4l2_device_register_subdev(vdev, &stat->subdev);
1075
int omap3isp_stat_init(struct ispstat *stat, const char *name,
1076
const struct v4l2_subdev_ops *sd_ops)
1078
stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
1081
isp_stat_buf_clear(stat);
1082
mutex_init(&stat->ioctl_lock);
1083
atomic_set(&stat->buf_err, 0);
1085
return isp_stat_init_entities(stat, name, sd_ops);
1088
void omap3isp_stat_free(struct ispstat *stat)
1090
isp_stat_bufs_free(stat);