Lines Matching +full:buffer +full:- +full:enable

1 // SPDX-License-Identifier: GPL-2.0-only
5 * TI OMAP3 ISP - Statistics core
15 #include <linux/dma-mapping.h>
22 #define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch != NULL)
36 * the next buffer to start to be written in the same point where the overflow
38 * go back to a valid state is having a valid buffer processing. Of course it
39 * requires at least a doubled buffer size to avoid an access to invalid memory
43 * configuration was created. It produces the minimum buffer size for each H3A
45 * will be enabled every time a SBL overflow occur. As the output buffer size
56 #define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af)
57 #define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb)
68 dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir); in __isp_stat_buf_sync_magic()
69 dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK), in __isp_stat_buf_sync_magic()
101 buf->buf_size + AF_EXTRA_DATA : buf->buf_size; in isp_stat_buf_check_magic()
104 int ret = -EINVAL; in isp_stat_buf_check_magic()
109 for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++) in isp_stat_buf_check_magic()
114 dev_dbg(stat->isp->dev, in isp_stat_buf_check_magic()
116 stat->subdev.name); in isp_stat_buf_check_magic()
121 for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE; in isp_stat_buf_check_magic()
124 dev_dbg(stat->isp->dev, in isp_stat_buf_check_magic()
126 stat->subdev.name); in isp_stat_buf_check_magic()
127 return -EINVAL; in isp_stat_buf_check_magic()
141 stat->buf_size + AF_EXTRA_DATA : stat->buf_size; in isp_stat_buf_insert_magic()
146 * Inserting MAGIC_NUM at the beginning and end of the buffer. in isp_stat_buf_insert_magic()
147 * buf->buf_size is set only after the buffer is queued. For now the in isp_stat_buf_insert_magic()
149 * stat->buf_size. in isp_stat_buf_insert_magic()
151 memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE); in isp_stat_buf_insert_magic()
152 memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE); in isp_stat_buf_insert_magic()
164 dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl, in isp_stat_buf_sync_for_device()
165 buf->sgt.nents, DMA_FROM_DEVICE); in isp_stat_buf_sync_for_device()
174 dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl, in isp_stat_buf_sync_for_cpu()
175 buf->sgt.nents, DMA_FROM_DEVICE); in isp_stat_buf_sync_for_cpu()
183 stat->buf[i].empty = 1; in isp_stat_buf_clear()
193 struct ispstat_buffer *curr = &stat->buf[i]; in __isp_stat_buf_find()
196 * Don't select the buffer which is being copied to in __isp_stat_buf_find()
199 if (curr == stat->locked_buf || curr == stat->active_buf) in __isp_stat_buf_find()
203 if (!look_empty && curr->empty) in __isp_stat_buf_find()
206 /* Pick uninitialised buffer over anything else if look_empty */ in __isp_stat_buf_find()
207 if (curr->empty) { in __isp_stat_buf_find()
212 /* Choose the oldest buffer */ in __isp_stat_buf_find()
214 (s32)curr->frame_number - (s32)found->frame_number < 0) in __isp_stat_buf_find()
235 if (!stat->active_buf) in isp_stat_buf_queue()
238 ktime_get_ts64(&stat->active_buf->ts); in isp_stat_buf_queue()
240 stat->active_buf->buf_size = stat->buf_size; in isp_stat_buf_queue()
241 if (isp_stat_buf_check_magic(stat, stat->active_buf)) { in isp_stat_buf_queue()
242 dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n", in isp_stat_buf_queue()
243 stat->subdev.name); in isp_stat_buf_queue()
246 stat->active_buf->config_counter = stat->config_counter; in isp_stat_buf_queue()
247 stat->active_buf->frame_number = stat->frame_number; in isp_stat_buf_queue()
248 stat->active_buf->empty = 0; in isp_stat_buf_queue()
249 stat->active_buf = NULL; in isp_stat_buf_queue()
254 /* Get next free buffer to write the statistics to and mark it active. */
257 if (unlikely(stat->active_buf)) in isp_stat_buf_next()
258 /* Overwriting unused active buffer */ in isp_stat_buf_next()
259 dev_dbg(stat->isp->dev, in isp_stat_buf_next()
260 "%s: new buffer requested without queuing active one.\n", in isp_stat_buf_next()
261 stat->subdev.name); in isp_stat_buf_next()
263 stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat); in isp_stat_buf_next()
270 isp_stat_buf_sync_for_device(stat, stat->locked_buf); in isp_stat_buf_release()
271 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_buf_release()
272 stat->locked_buf = NULL; in isp_stat_buf_release()
273 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_release()
276 /* Get buffer to userspace. */
284 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
289 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
290 dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n", in isp_stat_buf_get()
291 stat->subdev.name); in isp_stat_buf_get()
292 return ERR_PTR(-EBUSY); in isp_stat_buf_get()
295 dev_dbg(stat->isp->dev, in isp_stat_buf_get()
296 "%s: current buffer has corrupted data\n.", in isp_stat_buf_get()
297 stat->subdev.name); in isp_stat_buf_get()
299 buf->empty = 1; in isp_stat_buf_get()
301 /* Buffer isn't corrupted. */ in isp_stat_buf_get()
306 stat->locked_buf = buf; in isp_stat_buf_get()
308 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
310 if (buf->buf_size > data->buf_size) { in isp_stat_buf_get()
311 dev_warn(stat->isp->dev, in isp_stat_buf_get()
312 "%s: userspace's buffer size is not enough.\n", in isp_stat_buf_get()
313 stat->subdev.name); in isp_stat_buf_get()
315 return ERR_PTR(-EINVAL); in isp_stat_buf_get()
320 rval = copy_to_user(data->buf, in isp_stat_buf_get()
321 buf->virt_addr, in isp_stat_buf_get()
322 buf->buf_size); in isp_stat_buf_get()
325 dev_info(stat->isp->dev, in isp_stat_buf_get()
327 stat->subdev.name, rval); in isp_stat_buf_get()
328 buf = ERR_PTR(-EFAULT); in isp_stat_buf_get()
338 ? NULL : stat->isp->dev; in isp_stat_bufs_free()
342 struct ispstat_buffer *buf = &stat->buf[i]; in isp_stat_bufs_free()
344 if (!buf->virt_addr) in isp_stat_bufs_free()
347 sg_free_table(&buf->sgt); in isp_stat_bufs_free()
349 dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr, in isp_stat_bufs_free()
350 buf->dma_addr); in isp_stat_bufs_free()
352 buf->dma_addr = 0; in isp_stat_bufs_free()
353 buf->virt_addr = NULL; in isp_stat_bufs_free()
354 buf->empty = 1; in isp_stat_bufs_free()
357 dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n", in isp_stat_bufs_free()
358 stat->subdev.name); in isp_stat_bufs_free()
360 stat->buf_alloc_size = 0; in isp_stat_bufs_free()
361 stat->active_buf = NULL; in isp_stat_bufs_free()
370 buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr, in isp_stat_bufs_alloc_one()
372 if (!buf->virt_addr) in isp_stat_bufs_alloc_one()
373 return -ENOMEM; in isp_stat_bufs_alloc_one()
375 ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr, in isp_stat_bufs_alloc_one()
378 dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr); in isp_stat_bufs_alloc_one()
379 buf->virt_addr = NULL; in isp_stat_bufs_alloc_one()
380 buf->dma_addr = 0; in isp_stat_bufs_alloc_one()
406 ? NULL : stat->isp->dev; in isp_stat_bufs_alloc()
410 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
412 BUG_ON(stat->locked_buf != NULL); in isp_stat_bufs_alloc()
415 if (stat->buf_alloc_size >= size) { in isp_stat_bufs_alloc()
416 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
420 if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { in isp_stat_bufs_alloc()
421 dev_info(stat->isp->dev, in isp_stat_bufs_alloc()
423 stat->subdev.name); in isp_stat_bufs_alloc()
424 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
425 return -EBUSY; in isp_stat_bufs_alloc()
428 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
432 stat->buf_alloc_size = size; in isp_stat_bufs_alloc()
435 struct ispstat_buffer *buf = &stat->buf[i]; in isp_stat_bufs_alloc()
440 dev_err(stat->isp->dev, in isp_stat_bufs_alloc()
441 "%s: Failed to allocate DMA buffer %u\n", in isp_stat_bufs_alloc()
442 stat->subdev.name, i); in isp_stat_bufs_alloc()
447 buf->empty = 1; in isp_stat_bufs_alloc()
449 dev_dbg(stat->isp->dev, in isp_stat_bufs_alloc()
450 "%s: buffer[%u] allocated. dma=%pad virt=%p", in isp_stat_bufs_alloc()
451 stat->subdev.name, i, &buf->dma_addr, buf->virt_addr); in isp_stat_bufs_alloc()
459 struct video_device *vdev = stat->subdev.devnode; in isp_stat_queue_event()
465 status->frame_number = stat->frame_number; in isp_stat_queue_event()
466 status->config_counter = stat->config_counter; in isp_stat_queue_event()
468 status->buf_err = 1; in isp_stat_queue_event()
470 event.type = stat->event_type; in isp_stat_queue_event()
476 * omap3isp_stat_request_statistics - Request statistics.
486 if (stat->state != ISPSTAT_ENABLED) { in omap3isp_stat_request_statistics()
487 dev_dbg(stat->isp->dev, "%s: engine not enabled.\n", in omap3isp_stat_request_statistics()
488 stat->subdev.name); in omap3isp_stat_request_statistics()
489 return -EINVAL; in omap3isp_stat_request_statistics()
492 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
495 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
499 data->ts.tv_sec = buf->ts.tv_sec; in omap3isp_stat_request_statistics()
500 data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC; in omap3isp_stat_request_statistics()
501 data->config_counter = buf->config_counter; in omap3isp_stat_request_statistics()
502 data->frame_number = buf->frame_number; in omap3isp_stat_request_statistics()
503 data->buf_size = buf->buf_size; in omap3isp_stat_request_statistics()
505 buf->empty = 1; in omap3isp_stat_request_statistics()
507 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
522 data->ts.tv_sec = data64.ts.tv_sec; in omap3isp_stat_request_statistics_time32()
523 data->ts.tv_usec = data64.ts.tv_usec; in omap3isp_stat_request_statistics_time32()
524 data->buf = (uintptr_t)data64.buf; in omap3isp_stat_request_statistics_time32()
525 memcpy(&data->frame, &data64.frame, sizeof(data->frame)); in omap3isp_stat_request_statistics_time32()
531 * omap3isp_stat_config - Receives new statistic engine configuration.
534 * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
535 * was unable to allocate memory for the buffer, or other errors if parameters
543 u32 buf_size = user_cfg->buf_size; in omap3isp_stat_config()
545 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_config()
547 dev_dbg(stat->isp->dev, in omap3isp_stat_config()
548 "%s: configuring module with buffer size=0x%08lx\n", in omap3isp_stat_config()
549 stat->subdev.name, (unsigned long)buf_size); in omap3isp_stat_config()
551 ret = stat->ops->validate_params(stat, new_conf); in omap3isp_stat_config()
553 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
554 dev_dbg(stat->isp->dev, "%s: configuration values are invalid.\n", in omap3isp_stat_config()
555 stat->subdev.name); in omap3isp_stat_config()
559 if (buf_size != user_cfg->buf_size) in omap3isp_stat_config()
560 dev_dbg(stat->isp->dev, in omap3isp_stat_config()
561 "%s: driver has corrected buffer size request to 0x%08lx\n", in omap3isp_stat_config()
562 stat->subdev.name, in omap3isp_stat_config()
563 (unsigned long)user_cfg->buf_size); in omap3isp_stat_config()
566 * Hack: H3A modules may need a doubled buffer size to avoid access in omap3isp_stat_config()
568 * The buffer size is always PAGE_ALIGNED. in omap3isp_stat_config()
572 * the buffer allocation must consider it to avoid invalid memory in omap3isp_stat_config()
577 buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE; in omap3isp_stat_config()
581 * buffer + 2 regular ones. in omap3isp_stat_config()
584 if (stat->recover_priv) { in omap3isp_stat_config()
586 stat->recover_priv; in omap3isp_stat_config()
587 buf_size += recover_cfg->buf_size * in omap3isp_stat_config()
592 buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE); in omap3isp_stat_config()
597 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
601 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_config()
602 stat->ops->set_params(stat, new_conf); in omap3isp_stat_config()
603 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_config()
609 user_cfg->config_counter = stat->config_counter + stat->inc_config; in omap3isp_stat_config()
612 stat->configured = 1; in omap3isp_stat_config()
613 dev_dbg(stat->isp->dev, in omap3isp_stat_config()
615 stat->subdev.name); in omap3isp_stat_config()
617 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
623 * isp_stat_buf_process - Process statistic buffers.
624 * @buf_state: points out if buffer is ready to be processed. It's necessary
626 * before be able to process the buffer.
632 if (!atomic_add_unless(&stat->buf_err, -1, 0) && in isp_stat_buf_process()
633 buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) { in isp_stat_buf_process()
643 return stat->ops->busy(stat); in omap3isp_stat_pcr_busy()
648 return omap3isp_stat_pcr_busy(stat) | stat->buf_processing | in omap3isp_stat_busy()
649 (stat->state != ISPSTAT_DISABLED); in omap3isp_stat_busy()
653 * isp_stat_pcr_enable - Disables/Enables statistic engines.
654 * @pcr_enable: 0/1 - Disables/Enables the engine.
661 if ((stat->state != ISPSTAT_ENABLING && in isp_stat_pcr_enable()
662 stat->state != ISPSTAT_ENABLED) && pcr_enable) in isp_stat_pcr_enable()
666 stat->ops->enable(stat, pcr_enable); in isp_stat_pcr_enable()
667 if (stat->state == ISPSTAT_DISABLING && !pcr_enable) in isp_stat_pcr_enable()
668 stat->state = ISPSTAT_DISABLED; in isp_stat_pcr_enable()
669 else if (stat->state == ISPSTAT_ENABLING && pcr_enable) in isp_stat_pcr_enable()
670 stat->state = ISPSTAT_ENABLED; in isp_stat_pcr_enable()
677 spin_lock_irqsave(&stat->isp->stat_lock, flags); in omap3isp_stat_suspend()
679 if (stat->state != ISPSTAT_DISABLED) in omap3isp_stat_suspend()
680 stat->ops->enable(stat, 0); in omap3isp_stat_suspend()
681 if (stat->state == ISPSTAT_ENABLED) in omap3isp_stat_suspend()
682 stat->state = ISPSTAT_SUSPENDED; in omap3isp_stat_suspend()
684 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in omap3isp_stat_suspend()
689 /* Module will be re-enabled with its pipeline */ in omap3isp_stat_resume()
690 if (stat->state == ISPSTAT_SUSPENDED) in omap3isp_stat_resume()
691 stat->state = ISPSTAT_ENABLING; in omap3isp_stat_resume()
698 if (stat->priv == NULL) in isp_stat_try_enable()
702 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
703 if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing && in isp_stat_try_enable()
704 stat->buf_alloc_size) { in isp_stat_try_enable()
706 * Userspace's requested to enable the engine but it wasn't yet. in isp_stat_try_enable()
709 stat->update = 1; in isp_stat_try_enable()
711 stat->ops->setup_regs(stat, stat->priv); in isp_stat_try_enable()
712 isp_stat_buf_insert_magic(stat, stat->active_buf); in isp_stat_try_enable()
721 atomic_set(&stat->buf_err, 0); in isp_stat_try_enable()
724 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
725 dev_dbg(stat->isp->dev, "%s: module is enabled.\n", in isp_stat_try_enable()
726 stat->subdev.name); in isp_stat_try_enable()
728 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
741 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_sbl_overflow()
743 * Due to a H3A hw issue which prevents the next buffer to start from in omap3isp_stat_sbl_overflow()
746 atomic_set(&stat->buf_err, 2); in omap3isp_stat_sbl_overflow()
751 * stat->sbl_ovl_recover is set to tell to the driver to temporarily use in omap3isp_stat_sbl_overflow()
754 if (stat->recover_priv) in omap3isp_stat_sbl_overflow()
755 stat->sbl_ovl_recover = 1; in omap3isp_stat_sbl_overflow()
756 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_sbl_overflow()
760 * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
761 * @enable: 0/1 - Disables/Enables the engine.
766 int omap3isp_stat_enable(struct ispstat *stat, u8 enable) in omap3isp_stat_enable() argument
770 dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n", in omap3isp_stat_enable()
771 stat->subdev.name, enable ? "enable" : "disable"); in omap3isp_stat_enable()
774 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_enable()
776 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
778 if (!stat->configured && enable) { in omap3isp_stat_enable()
779 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
780 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_enable()
781 dev_dbg(stat->isp->dev, in omap3isp_stat_enable()
782 "%s: cannot enable module as it's never been successfully configured so far.\n", in omap3isp_stat_enable()
783 stat->subdev.name); in omap3isp_stat_enable()
784 return -EINVAL; in omap3isp_stat_enable()
787 if (enable) { in omap3isp_stat_enable()
788 if (stat->state == ISPSTAT_DISABLING) in omap3isp_stat_enable()
790 stat->state = ISPSTAT_ENABLED; in omap3isp_stat_enable()
791 else if (stat->state == ISPSTAT_DISABLED) in omap3isp_stat_enable()
793 stat->state = ISPSTAT_ENABLING; in omap3isp_stat_enable()
795 if (stat->state == ISPSTAT_ENABLING) { in omap3isp_stat_enable()
797 stat->state = ISPSTAT_DISABLED; in omap3isp_stat_enable()
798 } else if (stat->state == ISPSTAT_ENABLED) { in omap3isp_stat_enable()
800 stat->state = ISPSTAT_DISABLING; in omap3isp_stat_enable()
805 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
806 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_enable()
811 int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable) in omap3isp_stat_s_stream() argument
815 if (enable) { in omap3isp_stat_s_stream()
817 * Only set enable PCR bit if the module was previously in omap3isp_stat_s_stream()
823 /* Disable PCR bit and config enable field */ in omap3isp_stat_s_stream()
825 spin_lock_irqsave(&stat->isp->stat_lock, flags); in omap3isp_stat_s_stream()
826 stat->ops->enable(stat, 0); in omap3isp_stat_s_stream()
827 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in omap3isp_stat_s_stream()
843 dev_dbg(stat->isp->dev, "%s: module is being disabled\n", in omap3isp_stat_s_stream()
844 stat->subdev.name); in omap3isp_stat_s_stream()
851 * __stat_isr - Interrupt handler for statistic drivers
861 * stat->buf_processing must be set before disable module. It's in __stat_isr()
865 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in __stat_isr()
866 if (stat->state == ISPSTAT_DISABLED) { in __stat_isr()
867 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
870 buf_processing = stat->buf_processing; in __stat_isr()
871 stat->buf_processing = 1; in __stat_isr()
872 stat->ops->enable(stat, 0); in __stat_isr()
875 if (stat->state == ISPSTAT_ENABLED) { in __stat_isr()
876 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
877 dev_err(stat->isp->dev, in __stat_isr()
878 "%s: interrupt occurred when module was still processing a buffer.\n", in __stat_isr()
879 stat->subdev.name); in __stat_isr()
886 * disabled after process last buffer. If such buffer in __stat_isr()
890 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
894 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
896 /* If it's busy we can't process this buffer anymore */ in __stat_isr()
898 if (!from_dma && stat->ops->buf_process) in __stat_isr()
899 /* Module still need to copy data to buffer. */ in __stat_isr()
900 ret = stat->ops->buf_process(stat); in __stat_isr()
902 /* Buffer is not ready yet */ in __stat_isr()
905 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in __stat_isr()
912 if (stat->state == ISPSTAT_DISABLING) { in __stat_isr()
913 stat->state = ISPSTAT_DISABLED; in __stat_isr()
914 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
915 stat->buf_processing = 0; in __stat_isr()
918 pipe = to_isp_pipeline(&stat->subdev.entity); in __stat_isr()
919 stat->frame_number = atomic_read(&pipe->frame_number); in __stat_isr()
922 * Before this point, 'ret' stores the buffer's status if it's in __stat_isr()
928 if (likely(!stat->sbl_ovl_recover)) { in __stat_isr()
929 stat->ops->setup_regs(stat, stat->priv); in __stat_isr()
933 * a good buffer processing and make the H3A module to in __stat_isr()
936 stat->update = 1; in __stat_isr()
937 stat->ops->setup_regs(stat, stat->recover_priv); in __stat_isr()
938 stat->sbl_ovl_recover = 0; in __stat_isr()
942 * regular configuration after next buffer. in __stat_isr()
944 stat->update = 1; in __stat_isr()
947 isp_stat_buf_insert_magic(stat, stat->active_buf); in __stat_isr()
952 * happens in a row without re-writing its buffer's start memory in __stat_isr()
954 * module is not immediately re-enabled when the ISR misses the in __stat_isr()
955 * timing to process the buffer and to setup the registers. in __stat_isr()
961 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
965 * to process the buffer, stat->buf_err is set and won't be in __stat_isr()
966 * cleared now. So the next buffer will be correctly ignored. in __stat_isr()
968 * buffer to start from the memory address where the previous in __stat_isr()
970 * Do not "stat->buf_err = 0" here. in __stat_isr()
973 if (stat->ops->buf_process) in __stat_isr()
976 * process a new buffer. If it misses the timing, the in __stat_isr()
977 * next buffer might be wrong. So should be ignored. in __stat_isr()
980 atomic_set(&stat->buf_err, 1); in __stat_isr()
983 dev_dbg(stat->isp->dev, in __stat_isr()
984 "%s: cannot process buffer, device is busy.\n", in __stat_isr()
985 stat->subdev.name); in __stat_isr()
989 stat->buf_processing = 0; in __stat_isr()
1009 if (sub->type != stat->event_type) in omap3isp_stat_subscribe_event()
1010 return -EINVAL; in omap3isp_stat_subscribe_event()
1024 v4l2_device_unregister_subdev(&stat->subdev); in omap3isp_stat_unregister_entities()
1030 stat->subdev.dev = vdev->mdev->dev; in omap3isp_stat_register_entities()
1032 return v4l2_device_register_subdev(vdev, &stat->subdev); in omap3isp_stat_register_entities()
1038 struct v4l2_subdev *subdev = &stat->subdev; in isp_stat_init_entities()
1039 struct media_entity *me = &subdev->entity; in isp_stat_init_entities()
1042 snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name); in isp_stat_init_entities()
1043 subdev->grp_id = BIT(16); /* group ID for isp subdevs */ in isp_stat_init_entities()
1044 subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; in isp_stat_init_entities()
1047 stat->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; in isp_stat_init_entities()
1048 me->ops = NULL; in isp_stat_init_entities()
1050 return media_entity_pads_init(me, 1, &stat->pad); in isp_stat_init_entities()
1058 stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL); in omap3isp_stat_init()
1059 if (!stat->buf) in omap3isp_stat_init()
1060 return -ENOMEM; in omap3isp_stat_init()
1063 mutex_init(&stat->ioctl_lock); in omap3isp_stat_init()
1064 atomic_set(&stat->buf_err, 0); in omap3isp_stat_init()
1068 mutex_destroy(&stat->ioctl_lock); in omap3isp_stat_init()
1069 kfree(stat->buf); in omap3isp_stat_init()
1077 media_entity_cleanup(&stat->subdev.entity); in omap3isp_stat_cleanup()
1078 mutex_destroy(&stat->ioctl_lock); in omap3isp_stat_cleanup()
1080 kfree(stat->buf); in omap3isp_stat_cleanup()
1081 kfree(stat->priv); in omap3isp_stat_cleanup()
1082 kfree(stat->recover_priv); in omap3isp_stat_cleanup()